diff --git a/MLPY/Lib/site-packages/mlagents-0.30.0.dist-info/INSTALLER b/MLPY/Lib/site-packages/mlagents-0.30.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/MLPY/Lib/site-packages/mlagents-0.30.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/MLPY/Lib/site-packages/mlagents-0.30.0.dist-info/METADATA b/MLPY/Lib/site-packages/mlagents-0.30.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..2c9f64292504cadc37858ebfe62135c167b321a5 --- /dev/null +++ b/MLPY/Lib/site-packages/mlagents-0.30.0.dist-info/METADATA @@ -0,0 +1,62 @@ +Metadata-Version: 2.1 +Name: mlagents +Version: 0.30.0 +Summary: Unity Machine Learning Agents +Home-page: https://github.com/Unity-Technologies/ml-agents +Author: Unity Technologies +Author-email: ML-Agents@unity3d.com +Classifier: Intended Audience :: Developers +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Requires-Python: >=3.8.13,<=3.10.8 +Description-Content-Type: text/markdown +Requires-Dist: grpcio >=1.11.0 +Requires-Dist: h5py >=2.9.0 +Requires-Dist: mlagents-envs ==0.30.0 +Requires-Dist: numpy <2.0,>=1.13.3 +Requires-Dist: Pillow >=4.2.1 +Requires-Dist: protobuf >=3.6 +Requires-Dist: pyyaml >=3.1.0 +Requires-Dist: tensorboard >=1.15 +Requires-Dist: attrs >=19.3.0 +Requires-Dist: torch <1.9.0,>=1.6.0 ; platform_system != "Windows" and python_version < "3.9" +Requires-Dist: torch <=1.11.0,>=1.8.0 ; platform_system != "Windows" and python_version >= "3.9" +Requires-Dist: pypiwin32 ==223 ; platform_system == "Windows" +Requires-Dist: cattrs <1.1.0 ; python_version < "3.8" +Requires-Dist: importlib-metadata ==4.4 ; python_version < "3.8" +Requires-Dist: cattrs <1.7,>=1.1.0 ; python_version >= "3.8" + +# Unity ML-Agents Trainers + +The `mlagents` Python package is part of the +[ML-Agents Toolkit](https://github.com/Unity-Technologies/ml-agents). `mlagents` +provides a set of reinforcement and imitation learning algorithms designed to be +used with Unity environments. The algorithms interface with the Python API +provided by the `mlagents_envs` package. See [here](../docs/Python-LLAPI.md) for +more information on `mlagents_envs`. + +The algorithms can be accessed using the: `mlagents-learn` access point. See +[here](../docs/Training-ML-Agents.md) for more information on using this +package. + +## Installation + +Install the `mlagents` package with: + +```sh +python -m pip install mlagents==0.30.0 +``` + +## Usage & More Information + +For more information on the ML-Agents Toolkit and how to instrument a Unity +scene with the ML-Agents SDK, check out the main +[ML-Agents Toolkit documentation](../docs/Readme.md). + +## Limitations + +- Resuming self-play from a checkpoint resets the reported ELO to the default + value. diff --git a/MLPY/Lib/site-packages/mlagents-0.30.0.dist-info/RECORD b/MLPY/Lib/site-packages/mlagents-0.30.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..13b5348f2f8f1c11ca9fd926d752edd98885e8a2 --- /dev/null +++ b/MLPY/Lib/site-packages/mlagents-0.30.0.dist-info/RECORD @@ -0,0 +1,175 @@ +../../Scripts/mlagents-learn.exe,sha256=pNeSgQInMNLCUF2n3o5mBxX9oLP8Hr6qJLKNkH-YEG0,108394 +../../Scripts/mlagents-run-experiment.exe,sha256=tg0YWa3eMDIbkZXgXwroG5tpHqycRvXFEaoR3Od0f40,108403 +mlagents-0.30.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +mlagents-0.30.0.dist-info/METADATA,sha256=chdO78bUfunObqbfcJB785p7NAftlD-VPWYTu0SHpcc,2406 +mlagents-0.30.0.dist-info/RECORD,, +mlagents-0.30.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +mlagents-0.30.0.dist-info/WHEEL,sha256=Wyh-_nZ0DJYolHNn1_hMa4lM7uDedD_RGVwbmTjyItk,91 +mlagents-0.30.0.dist-info/entry_points.txt,sha256=rc8cYQbQtS5CskjJHJzZxmkCYP-yFwR2_CzOffeZL08,310 +mlagents-0.30.0.dist-info/top_level.txt,sha256=LIH88FaXtKCg_PiFin-NGZISQJ0tIXWpkeTQWtFSj5E,9 +mlagents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +mlagents/__pycache__/__init__.cpython-39.pyc,, +mlagents/plugins/__init__.py,sha256=C4AnFR_yVEbD8-iC-cDizZrRixLypvBZxc-VLRwyC8g,265 +mlagents/plugins/__pycache__/__init__.cpython-39.pyc,, +mlagents/plugins/__pycache__/stats_writer.cpython-39.pyc,, +mlagents/plugins/__pycache__/trainer_type.cpython-39.pyc,, +mlagents/plugins/stats_writer.py,sha256=uS5-k2mx4vNCybBTl5WVn9G4S0mb0-cv_8rC56caS6A,2717 +mlagents/plugins/trainer_type.py,sha256=FV5byclgcP3nG0s3cdfCit98c_dJ_5ToiR6dZlDFt5g,3120 +mlagents/torch_utils/__init__.py,sha256=bawTiOuYKBXHJRs7fyyXQHTVp1QUtpji3H-lGO6KkoE,238 +mlagents/torch_utils/__pycache__/__init__.cpython-39.pyc,, +mlagents/torch_utils/__pycache__/cpu_utils.cpython-39.pyc,, +mlagents/torch_utils/__pycache__/globals.cpython-39.pyc,, +mlagents/torch_utils/__pycache__/torch.cpython-39.pyc,, +mlagents/torch_utils/cpu_utils.py,sha256=koYxRxElcLl-XcH43C7r5dZaiFpXTr7VN51rz3i5r-U,1464 +mlagents/torch_utils/globals.py,sha256=G4PDxGTi-MfDvf9KJCduoeNH9hJJyJ2a0Fbl-5r7HIY,321 +mlagents/torch_utils/torch.py,sha256=cT7cyz1Jmr1bCDvatdL0J7a_h1HsCRSjQ3yQSlBglvs,1881 +mlagents/trainers/__init__.py,sha256=ymb7Cn10uOrkBsmNF2FhbpsF0To4FXvnlH4fZYDOYc0,194 +mlagents/trainers/__pycache__/__init__.cpython-39.pyc,, +mlagents/trainers/__pycache__/action_info.cpython-39.pyc,, +mlagents/trainers/__pycache__/agent_processor.cpython-39.pyc,, +mlagents/trainers/__pycache__/behavior_id_utils.cpython-39.pyc,, +mlagents/trainers/__pycache__/buffer.cpython-39.pyc,, +mlagents/trainers/__pycache__/cli_utils.cpython-39.pyc,, +mlagents/trainers/__pycache__/demo_loader.cpython-39.pyc,, +mlagents/trainers/__pycache__/directory_utils.cpython-39.pyc,, +mlagents/trainers/__pycache__/env_manager.cpython-39.pyc,, +mlagents/trainers/__pycache__/environment_parameter_manager.cpython-39.pyc,, +mlagents/trainers/__pycache__/exception.cpython-39.pyc,, +mlagents/trainers/__pycache__/learn.cpython-39.pyc,, +mlagents/trainers/__pycache__/run_experiment.cpython-39.pyc,, +mlagents/trainers/__pycache__/settings.cpython-39.pyc,, +mlagents/trainers/__pycache__/simple_env_manager.cpython-39.pyc,, +mlagents/trainers/__pycache__/stats.cpython-39.pyc,, +mlagents/trainers/__pycache__/subprocess_env_manager.cpython-39.pyc,, +mlagents/trainers/__pycache__/trainer_controller.cpython-39.pyc,, +mlagents/trainers/__pycache__/training_analytics_side_channel.cpython-39.pyc,, +mlagents/trainers/__pycache__/training_status.cpython-39.pyc,, +mlagents/trainers/__pycache__/trajectory.cpython-39.pyc,, +mlagents/trainers/__pycache__/upgrade_config.cpython-39.pyc,, +mlagents/trainers/action_info.py,sha256=xob0TgV393WKE7JzOuGjUNMNXujr9tGxSC5PLiHjUQY,831 +mlagents/trainers/agent_processor.py,sha256=oYeV7lANba8oXDvwgvj5moT6DfzkV-iutMP8w82M_98,20474 +mlagents/trainers/behavior_id_utils.py,sha256=hJ_E1-RKYXN-8rnybkI4YUbRq_bRBbWhbupDCBNquHc,2134 +mlagents/trainers/buffer.py,sha256=CpPKTfVK1xuYQrEcmI0bx112abEHNfWl-nQ0zYV8a14,20000 +mlagents/trainers/cli_utils.py,sha256=qnWO0dcNe_hlSDtSJ49unehY0uOgcUIyKvVGfSa3MKI,11721 +mlagents/trainers/demo_loader.py,sha256=zTLXom5tF-_i42wbvdOEhrqb5HHVXTz67UVgd8H3BK8,9790 +mlagents/trainers/directory_utils.py,sha256=4q6tEO5bBfM8IPCQyhHgLZW1F8ncqW4nhgX1Y7ENgms,3027 +mlagents/trainers/env_manager.py,sha256=ouzQ3K4flHzYJHxvf_GeWS-Hh5FP8bhm8SYWHrpk0JM,5898 +mlagents/trainers/environment_parameter_manager.py,sha256=wDO_kqiG8iXqZUwWlnw0aXfdDtlEG8Q9Hpv7ebN35QU,8458 +mlagents/trainers/exception.py,sha256=EiyulULBNkHCnC85yI7TMt0zM_tFi73C6Xc2Vqu5eoA,1221 +mlagents/trainers/ghost/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +mlagents/trainers/ghost/__pycache__/__init__.cpython-39.pyc,, +mlagents/trainers/ghost/__pycache__/controller.cpython-39.pyc,, +mlagents/trainers/ghost/__pycache__/trainer.cpython-39.pyc,, +mlagents/trainers/ghost/controller.py,sha256=lMP7I7QC2JQFq2ypAJDoeeSIyLzXzNwBxHMUBFHjIWs,4347 +mlagents/trainers/ghost/trainer.py,sha256=u9Y3YZtNsKHONiLNDO1LynmvWuwxCloxXZ6LJEp6MtM,20972 +mlagents/trainers/learn.py,sha256=Lc17zalvi0LKNcD6EThBWBafJoH2Pb4HyCIcRaskbkU,10216 +mlagents/trainers/model_saver/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +mlagents/trainers/model_saver/__pycache__/__init__.cpython-39.pyc,, +mlagents/trainers/model_saver/__pycache__/model_saver.cpython-39.pyc,, +mlagents/trainers/model_saver/__pycache__/torch_model_saver.cpython-39.pyc,, +mlagents/trainers/model_saver/model_saver.py,sha256=IYKRrarr9xg44UwJf2FmHPy_qZBhtrmAcygqGJewTcs,2569 +mlagents/trainers/model_saver/torch_model_saver.py,sha256=q_fiUhd4UdQYk04uPcXSU-ai0xm-4EvsjRDMIF7kAwU,6620 +mlagents/trainers/optimizer/__init__.py,sha256=AZxx5ZQd9FrD3TPvE-eC2TuZvTy8fhKjEKPYo_JylWg,68 +mlagents/trainers/optimizer/__pycache__/__init__.cpython-39.pyc,, +mlagents/trainers/optimizer/__pycache__/optimizer.cpython-39.pyc,, +mlagents/trainers/optimizer/__pycache__/torch_optimizer.cpython-39.pyc,, +mlagents/trainers/optimizer/optimizer.py,sha256=WX9yAm-6sqV3_DKkvl9nbY0ytueKtc77EK9--osJivc,792 +mlagents/trainers/optimizer/torch_optimizer.py,sha256=xrxVOqH4CLD3nFr32G75j9B0HTAGxAWW4vmaW-4pDJM,9420 +mlagents/trainers/poca/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +mlagents/trainers/poca/__pycache__/__init__.cpython-39.pyc,, +mlagents/trainers/poca/__pycache__/optimizer_torch.cpython-39.pyc,, +mlagents/trainers/poca/__pycache__/trainer.cpython-39.pyc,, +mlagents/trainers/poca/optimizer_torch.py,sha256=bYhhwtO7Q3YGRYinbj6I1aXsWUFKZG60CFHZ3nJR-hc,27848 +mlagents/trainers/poca/trainer.py,sha256=YKRl2BZZW4FJj3nM2oSipDQ0_AoZyDy2KPSD9VyVduU,9819 +mlagents/trainers/policy/__init__.py,sha256=gWYKy9a9WxSXDkCO-qCD1W0Fj8gndTREm8c-f-hx2eM,59 +mlagents/trainers/policy/__pycache__/__init__.cpython-39.pyc,, +mlagents/trainers/policy/__pycache__/checkpoint_manager.cpython-39.pyc,, +mlagents/trainers/policy/__pycache__/policy.cpython-39.pyc,, +mlagents/trainers/policy/__pycache__/torch_policy.cpython-39.pyc,, +mlagents/trainers/policy/checkpoint_manager.py,sha256=sMUNw9T5wjB1n5Zd1jGQ2uDoaDG33meX_sVZCKE2pg0,3958 +mlagents/trainers/policy/policy.py,sha256=Pa1uH49lWMqqtvf7C4YwU_fIIj609FQ2VsZZRhjARFU,5350 +mlagents/trainers/policy/torch_policy.py,sha256=YYJQGDBxuwgaN-jpbdFoo_8_UZ5WaFHP1mK-OxzoM6I,6425 +mlagents/trainers/ppo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +mlagents/trainers/ppo/__pycache__/__init__.cpython-39.pyc,, +mlagents/trainers/ppo/__pycache__/optimizer_torch.cpython-39.pyc,, +mlagents/trainers/ppo/__pycache__/trainer.cpython-39.pyc,, +mlagents/trainers/ppo/optimizer_torch.py,sha256=Uh2stK3x-gOURlousroQRp0UMWRwZRg3ViYm_Kfekw4,7812 +mlagents/trainers/ppo/trainer.py,sha256=zd7sOhm9w7GkZDp6Uymteju1tipLdcVGH_qCrl4Una0,8375 +mlagents/trainers/run_experiment.py,sha256=dTFzVv_7l3jFp-0pWCLaBTjmY6JHKk4kQ0aAU6-SrGk,1026 +mlagents/trainers/sac/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +mlagents/trainers/sac/__pycache__/__init__.cpython-39.pyc,, +mlagents/trainers/sac/__pycache__/optimizer_torch.cpython-39.pyc,, +mlagents/trainers/sac/__pycache__/trainer.cpython-39.pyc,, +mlagents/trainers/sac/optimizer_torch.py,sha256=K8j9Yvjkv0dIja1Gu958s7uwZotDZQJOQU9kRuwQ1yw,26502 +mlagents/trainers/sac/trainer.py,sha256=YtlxEw2mApEHww5d5HcYRR-wTHE3rMHHGmvwPnozFvc,6685 +mlagents/trainers/settings.py,sha256=hSc7j3W8VPS96EpJjFUkDXc8ipOikvTc1_IKcRDLHKY,37139 +mlagents/trainers/simple_env_manager.py,sha256=8WM0P_SlSWqzd3iCo_BpbHwU0bJLkY-_ARNlhxoKCKI,3377 +mlagents/trainers/stats.py,sha256=XhylF85cZd5nEKEUdyHzVOCOWMeC_GAIukMd3gqyAYk,14792 +mlagents/trainers/subprocess_env_manager.py,sha256=zCFeEcVLpWEbZfnYeZ14UfqGdkkkR3eyHPXIYs3qej8,22762 +mlagents/trainers/torch_entities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +mlagents/trainers/torch_entities/__pycache__/__init__.cpython-39.pyc,, +mlagents/trainers/torch_entities/__pycache__/action_flattener.cpython-39.pyc,, +mlagents/trainers/torch_entities/__pycache__/action_log_probs.cpython-39.pyc,, +mlagents/trainers/torch_entities/__pycache__/action_model.cpython-39.pyc,, +mlagents/trainers/torch_entities/__pycache__/agent_action.cpython-39.pyc,, +mlagents/trainers/torch_entities/__pycache__/attention.cpython-39.pyc,, +mlagents/trainers/torch_entities/__pycache__/conditioning.cpython-39.pyc,, +mlagents/trainers/torch_entities/__pycache__/decoders.cpython-39.pyc,, +mlagents/trainers/torch_entities/__pycache__/distributions.cpython-39.pyc,, +mlagents/trainers/torch_entities/__pycache__/encoders.cpython-39.pyc,, +mlagents/trainers/torch_entities/__pycache__/layers.cpython-39.pyc,, +mlagents/trainers/torch_entities/__pycache__/model_serialization.cpython-39.pyc,, +mlagents/trainers/torch_entities/__pycache__/networks.cpython-39.pyc,, +mlagents/trainers/torch_entities/__pycache__/utils.cpython-39.pyc,, +mlagents/trainers/torch_entities/action_flattener.py,sha256=c1wiQzaJIsB2wuW8jZU7-o0jgEsn9y_05PxaqEoQFqQ,1713 +mlagents/trainers/torch_entities/action_log_probs.py,sha256=2NeVwier4ZZHJMsLMtgMKnE2oebZafSBRbEBrc_0b04,4703 +mlagents/trainers/torch_entities/action_model.py,sha256=wMJVT31IfeKTht3ibUAKElvDzrixKuNhOAVBg6Pm1Ic,10749 +mlagents/trainers/torch_entities/agent_action.py,sha256=7zx0iBGC-FBgJBoVh5THnzo-57iKt8BYnZFixUQoMlQ,7189 +mlagents/trainers/torch_entities/attention.py,sha256=VbzJigV7DmQIbUaG7AiqbSjtw2AsyDHtwQVi7V0OeKk,11931 +mlagents/trainers/torch_entities/components/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +mlagents/trainers/torch_entities/components/__pycache__/__init__.cpython-39.pyc,, +mlagents/trainers/torch_entities/components/bc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +mlagents/trainers/torch_entities/components/bc/__pycache__/__init__.cpython-39.pyc,, +mlagents/trainers/torch_entities/components/bc/__pycache__/module.cpython-39.pyc,, +mlagents/trainers/torch_entities/components/bc/module.py,sha256=yHcnRcsmVH2ygmK4uqkwjfbwxxGl1Ba8kJ-0weYU2Rc,7469 +mlagents/trainers/torch_entities/components/reward_providers/__init__.py,sha256=Q6cQK-uPVry728P23eS37j-BzwLwXodslTzdDNF4C4k,835 +mlagents/trainers/torch_entities/components/reward_providers/__pycache__/__init__.cpython-39.pyc,, +mlagents/trainers/torch_entities/components/reward_providers/__pycache__/base_reward_provider.cpython-39.pyc,, +mlagents/trainers/torch_entities/components/reward_providers/__pycache__/curiosity_reward_provider.cpython-39.pyc,, +mlagents/trainers/torch_entities/components/reward_providers/__pycache__/extrinsic_reward_provider.cpython-39.pyc,, +mlagents/trainers/torch_entities/components/reward_providers/__pycache__/gail_reward_provider.cpython-39.pyc,, +mlagents/trainers/torch_entities/components/reward_providers/__pycache__/reward_provider_factory.cpython-39.pyc,, +mlagents/trainers/torch_entities/components/reward_providers/__pycache__/rnd_reward_provider.cpython-39.pyc,, +mlagents/trainers/torch_entities/components/reward_providers/base_reward_provider.py,sha256=0VHxlChwadl8o-pGBFMUL2pkB1I-UqSUmZPRbLWLjV8,2891 +mlagents/trainers/torch_entities/components/reward_providers/curiosity_reward_provider.py,sha256=xzehU1vqMcZcfpy-FWgcCV0pGuebVT8k8mNBLH0UCPI,9339 +mlagents/trainers/torch_entities/components/reward_providers/extrinsic_reward_provider.py,sha256=3rEXjC2jNsL_aCBQARh4-s0aLCMRLZv5wPeht-d3M7E,1786 +mlagents/trainers/torch_entities/components/reward_providers/gail_reward_provider.py,sha256=VzURKz50wj4quTgdTRcareeh-wvPEqU8f8eWiUQTMzo,11190 +mlagents/trainers/torch_entities/components/reward_providers/reward_provider_factory.py,sha256=9Hk4Ji6r4vIw6twoS0lMndrVvXD-0luOk4mSVLaapVM,1772 +mlagents/trainers/torch_entities/components/reward_providers/rnd_reward_provider.py,sha256=pBHbtO0rtYGjlMISyUZETqpUkTAbX-RxJodYTx8kiys,3068 +mlagents/trainers/torch_entities/conditioning.py,sha256=ZU9UAr2-AwNihGM9CzR5c5XGnaYXlmPkG2XTBQuxmVY,5134 +mlagents/trainers/torch_entities/decoders.py,sha256=qPKcCNUqAFjzGdz8sSFKwAxsJWEcbjkkIXupCe4gT1I,793 +mlagents/trainers/torch_entities/distributions.py,sha256=9W14ch5duHlsN0eSLlGS4xGfWUN4iy2Fkg18AqZrxfQ,8238 +mlagents/trainers/torch_entities/encoders.py,sha256=UVYFImSj0mw8mC-L5v7kPtwA7O3yryZOWZeWwQpdamM,11206 +mlagents/trainers/torch_entities/layers.py,sha256=HHaq4vpKS2PWLzRm7WhGp2ibzmBghe40oXklGHX9paA,7643 +mlagents/trainers/torch_entities/model_serialization.py,sha256=M8B-SrHTXHn1KnVXu_E93nFMjvsPrs9SFs37PkYDwOA,6272 +mlagents/trainers/torch_entities/networks.py,sha256=kSMGIa2dwgn1n00UiFxu2qYbS5bpk8zAYvXlozhEfYM,29280 +mlagents/trainers/torch_entities/utils.py,sha256=O9Ew46WoYHV5wHxoSfnsYyJDgv4HhPOQZTVGXw6ZU1E,18698 +mlagents/trainers/trainer/__init__.py,sha256=mkctLu3RcN5nXXev0Ca2UdQrfEmEjHaDCcnMQujyn74,139 +mlagents/trainers/trainer/__pycache__/__init__.cpython-39.pyc,, +mlagents/trainers/trainer/__pycache__/off_policy_trainer.cpython-39.pyc,, +mlagents/trainers/trainer/__pycache__/on_policy_trainer.cpython-39.pyc,, +mlagents/trainers/trainer/__pycache__/rl_trainer.cpython-39.pyc,, +mlagents/trainers/trainer/__pycache__/trainer.cpython-39.pyc,, +mlagents/trainers/trainer/__pycache__/trainer_factory.cpython-39.pyc,, +mlagents/trainers/trainer/__pycache__/trainer_utils.cpython-39.pyc,, +mlagents/trainers/trainer/off_policy_trainer.py,sha256=bGamsOF8_X1_W6U_Mmvp9xmqxPs5wWP0RzuDlE7F66c,10489 +mlagents/trainers/trainer/on_policy_trainer.py,sha256=yiCJtsNKrusDOWEAnNyblmo7X2qDILqjRO1oeih4vFI,5851 +mlagents/trainers/trainer/rl_trainer.py,sha256=0MH5v1mWXls_pJI6dC9OR3c5rNnwfiOywe5AlqBMbyw,12542 +mlagents/trainers/trainer/trainer.py,sha256=hEIqksByARHhzA0yck_k3ZKXAZSY551VoTe0cUgASTw,6084 +mlagents/trainers/trainer/trainer_factory.py,sha256=tY1aGbw1rUIoUBc4cvEuaVsKMUyGUGgowJWQI2s88go,5007 +mlagents/trainers/trainer/trainer_utils.py,sha256=u9v-0Ccey0myFrBuslfJEQU6iezitHs9WoX2a6Qq1NY,1671 +mlagents/trainers/trainer_controller.py,sha256=E0IUqv7g4F2BjFQimhVSKrsUzPSCMJwxOmCuLp3RQvs,11391 +mlagents/trainers/training_analytics_side_channel.py,sha256=xYpSpnAlFbBxv7k7d-nOU5PcLtmOqzSwJJq-WuWZ_r8,8121 +mlagents/trainers/training_status.py,sha256=8EZ7ZqTCUPJJRo_N6jaUiwyxBlsn84pm4B0Znns_RBo,4323 +mlagents/trainers/trajectory.py,sha256=_FcCBZaxTKSirT2TpZjNGPGfE90rTPvzXBmru9_CQYU,11646 +mlagents/trainers/upgrade_config.py,sha256=c5UB4zAQBL9V7ywcVvWPRWe39onFomtQsfncO80LJVw,10319 diff --git a/MLPY/Lib/site-packages/mlagents-0.30.0.dist-info/REQUESTED b/MLPY/Lib/site-packages/mlagents-0.30.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/mlagents-0.30.0.dist-info/WHEEL b/MLPY/Lib/site-packages/mlagents-0.30.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..ecaf39f3c3df8b0075a2951da9b1a27fcb08a173 --- /dev/null +++ b/MLPY/Lib/site-packages/mlagents-0.30.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (71.1.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/MLPY/Lib/site-packages/mlagents-0.30.0.dist-info/entry_points.txt b/MLPY/Lib/site-packages/mlagents-0.30.0.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..787aff827a37f703ba60a7f771699f7a69b2e0ff --- /dev/null +++ b/MLPY/Lib/site-packages/mlagents-0.30.0.dist-info/entry_points.txt @@ -0,0 +1,9 @@ +[console_scripts] +mlagents-learn = mlagents.trainers.learn:main +mlagents-run-experiment = mlagents.trainers.run_experiment:main + +[mlagents.stats_writer] +default = mlagents.plugins.stats_writer:get_default_stats_writers + +[mlagents.trainer_type] +default = mlagents.plugins.trainer_type:get_default_trainer_types diff --git a/MLPY/Lib/site-packages/mlagents-0.30.0.dist-info/top_level.txt b/MLPY/Lib/site-packages/mlagents-0.30.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..cdf8e207464b3ed4067646e83e568a57a6a38fa1 --- /dev/null +++ b/MLPY/Lib/site-packages/mlagents-0.30.0.dist-info/top_level.txt @@ -0,0 +1 @@ +mlagents diff --git a/MLPY/Lib/site-packages/mlagents_envs-0.30.0.dist-info/INSTALLER b/MLPY/Lib/site-packages/mlagents_envs-0.30.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/MLPY/Lib/site-packages/mlagents_envs-0.30.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/MLPY/Lib/site-packages/mlagents_envs-0.30.0.dist-info/METADATA b/MLPY/Lib/site-packages/mlagents_envs-0.30.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..ce236b8024d27bc9478d52c4b0aa4991f926df9a --- /dev/null +++ b/MLPY/Lib/site-packages/mlagents_envs-0.30.0.dist-info/METADATA @@ -0,0 +1,25 @@ +Metadata-Version: 2.1 +Name: mlagents_envs +Version: 0.30.0 +Summary: Unity Machine Learning Agents Interface +Home-page: https://github.com/Unity-Technologies/ml-agents +Author: Unity Technologies +Author-email: ML-Agents@unity3d.com +Classifier: Intended Audience :: Developers +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Requires-Python: >=3.8.13,<=3.10.8 +Requires-Dist: cloudpickle +Requires-Dist: grpcio >=1.11.0 +Requires-Dist: numpy >=1.14.1 +Requires-Dist: Pillow >=4.2.1 +Requires-Dist: protobuf >=3.6 +Requires-Dist: pyyaml >=3.1.0 +Requires-Dist: gym >=0.21.0 +Requires-Dist: pettingzoo ==1.15.0 +Requires-Dist: numpy ==1.21.2 +Requires-Dist: filelock >=3.4.0 + diff --git a/MLPY/Lib/site-packages/mlagents_envs-0.30.0.dist-info/RECORD b/MLPY/Lib/site-packages/mlagents_envs-0.30.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..cb8ec164e89f9897ac681858b1a2c8892f725013 --- /dev/null +++ b/MLPY/Lib/site-packages/mlagents_envs-0.30.0.dist-info/RECORD @@ -0,0 +1,119 @@ +mlagents_envs-0.30.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +mlagents_envs-0.30.0.dist-info/METADATA,sha256=ExFBBBp79KyKfJl4JkcI0PqFZA6oPNM72qxXPVnc3RI,922 +mlagents_envs-0.30.0.dist-info/RECORD,, +mlagents_envs-0.30.0.dist-info/WHEEL,sha256=Wyh-_nZ0DJYolHNn1_hMa4lM7uDedD_RGVwbmTjyItk,91 +mlagents_envs-0.30.0.dist-info/top_level.txt,sha256=jEe5REiGp6_VjGewVbI4CILjHZo2IYC03lUztCsSxUQ,14 +mlagents_envs/__init__.py,sha256=ymb7Cn10uOrkBsmNF2FhbpsF0To4FXvnlH4fZYDOYc0,194 +mlagents_envs/__pycache__/__init__.cpython-39.pyc,, +mlagents_envs/__pycache__/base_env.cpython-39.pyc,, +mlagents_envs/__pycache__/communicator.cpython-39.pyc,, +mlagents_envs/__pycache__/env_utils.cpython-39.pyc,, +mlagents_envs/__pycache__/environment.cpython-39.pyc,, +mlagents_envs/__pycache__/exception.cpython-39.pyc,, +mlagents_envs/__pycache__/logging_util.cpython-39.pyc,, +mlagents_envs/__pycache__/mock_communicator.cpython-39.pyc,, +mlagents_envs/__pycache__/rpc_communicator.cpython-39.pyc,, +mlagents_envs/__pycache__/rpc_utils.cpython-39.pyc,, +mlagents_envs/__pycache__/timers.cpython-39.pyc,, +mlagents_envs/base_env.py,sha256=x1Y4rpJMN9vulTyVGFDDIEsqCXxEsJ7QbGQLwFZ4PgY,22625 +mlagents_envs/communicator.py,sha256=SZByKom33pvGNzu9Tmc0o8XwiTe1XGdTGnqQxV0CJWw,1887 +mlagents_envs/communicator_objects/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +mlagents_envs/communicator_objects/__pycache__/__init__.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/agent_action_pb2.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/agent_info_action_pair_pb2.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/agent_info_pb2.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/brain_parameters_pb2.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/capabilities_pb2.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/command_pb2.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/custom_reset_parameters_pb2.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/demonstration_meta_pb2.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/engine_configuration_pb2.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/header_pb2.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/observation_pb2.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/space_type_pb2.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/training_analytics_pb2.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/unity_input_pb2.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/unity_message_pb2.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/unity_output_pb2.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/unity_rl_initialization_input_pb2.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/unity_rl_initialization_output_pb2.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/unity_rl_input_pb2.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/unity_rl_output_pb2.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/unity_to_external_pb2.cpython-39.pyc,, +mlagents_envs/communicator_objects/__pycache__/unity_to_external_pb2_grpc.cpython-39.pyc,, +mlagents_envs/communicator_objects/agent_action_pb2.py,sha256=cJLj4-1ph--PzYRdzGgYpcBFASvfp5DQdjyuOG-S4eE,3779 +mlagents_envs/communicator_objects/agent_info_action_pair_pb2.py,sha256=yW-6BcBjE4AdD1wX49VGrJUo8VnhkDRSWP89vFFFWyo,3895 +mlagents_envs/communicator_objects/agent_info_pb2.py,sha256=ryAM1iA0G8GPcM6v3HnUf2iyYM4Gpkd6DXVziKBWL-s,5851 +mlagents_envs/communicator_objects/brain_parameters_pb2.py,sha256=Doc74jZ4w8bm9vQmE89XkbofPPKr_BPZ48OiCoH687k,8083 +mlagents_envs/communicator_objects/capabilities_pb2.py,sha256=CzsSOHwdvIgzxwbiNmNHHYBfs6yW5p4fkAG_EmkvStc,5252 +mlagents_envs/communicator_objects/command_pb2.py,sha256=ywUBoxkBbXn0YYluy8xHjDQxnmdFcAV1yrpMv_vjWTk,2066 +mlagents_envs/communicator_objects/custom_reset_parameters_pb2.py,sha256=OzB2Zgi1q4IwEwXV5QwPjAqIghWHN_vIvHttrsHOAgs,2128 +mlagents_envs/communicator_objects/demonstration_meta_pb2.py,sha256=XzjYZk8P0YjsK4qeXVQRyW6witqOG_4Y460o5Z2w5dE,4255 +mlagents_envs/communicator_objects/engine_configuration_pb2.py,sha256=P60GIeybVr9-o1HlQP0nKnMd_R5n28pkVUvbRkGtYew,4641 +mlagents_envs/communicator_objects/header_pb2.py,sha256=jPl6efvaRVikkU0zWkJEbIc9Le56duEx6kwo4H_bDVo,2703 +mlagents_envs/communicator_objects/observation_pb2.py,sha256=HB1JPlJHRfXQAcZ_IcOu-8NfCPEehoSSsPGCxO8I1_Y,9620 +mlagents_envs/communicator_objects/space_type_pb2.py,sha256=NLdPFrm9zP_jnwfRfN2Xdor1yXbBthcp_wtqqgeGdc4,1972 +mlagents_envs/communicator_objects/training_analytics_pb2.py,sha256=cx887zBXzO_oYLnBWe6yqFDp8MFhdAU4dAxkv37XkV4,13623 +mlagents_envs/communicator_objects/unity_input_pb2.py,sha256=boTQvkktMWcV23M34r9N8q-Z9_st0cMUmpXvIWXNm0M,3903 +mlagents_envs/communicator_objects/unity_message_pb2.py,sha256=KEKDbZ8qDt8OOrv5CBq37B1l5dd05YQXxDGP513BrM8,4571 +mlagents_envs/communicator_objects/unity_output_pb2.py,sha256=iIwtET2fdfwCvd_QtZZ0dCWJD3VKpDLfDalc5c4zjFY,3935 +mlagents_envs/communicator_objects/unity_rl_initialization_input_pb2.py,sha256=k_iEL1KxNDMoZ8WLFm3E-KdgahSBJeTNauxZn7jpu2E,4940 +mlagents_envs/communicator_objects/unity_rl_initialization_output_pb2.py,sha256=ntOKQtE7k8iyp5xpjpzGg_v79ATY5ep-XHkxgFpiQfo,5973 +mlagents_envs/communicator_objects/unity_rl_input_pb2.py,sha256=SLyG0dYroHoBOhekwkhFjvbaC-H6-QIvBSTK9G0xiPo,8109 +mlagents_envs/communicator_objects/unity_rl_output_pb2.py,sha256=2alPQeU_bYUSEn0shht0RiXeQOzbxZ8Rti0S4KJZ2RQ,7220 +mlagents_envs/communicator_objects/unity_to_external_pb2.py,sha256=-HSKjNoqpwVu5ba4MoXFnew1s8Ot1_2FnNsoPG6hUt4,2356 +mlagents_envs/communicator_objects/unity_to_external_pb2_grpc.py,sha256=QHmH4daN6Cx-WH-PbucLMcEAVt0NrTIxKHFXVUakJzY,1786 +mlagents_envs/env_utils.py,sha256=ahAxUVh_ybN7rB0nU7lugelP-pUh3ws_8hlyhi9KPgg,5174 +mlagents_envs/environment.py,sha256=GWLgwdGZPNsauGp9PCGK6ky_ccI7fb2HXkqAuW0t2Lo,22901 +mlagents_envs/envs/__init__.py,sha256=ZbLbl2QmrG0w_k28qZruA3xztEQtU3f-QwKH7bcxGyo,578 +mlagents_envs/envs/__pycache__/__init__.cpython-39.pyc,, +mlagents_envs/envs/__pycache__/env_helpers.cpython-39.pyc,, +mlagents_envs/envs/__pycache__/pettingzoo_env_factory.cpython-39.pyc,, +mlagents_envs/envs/__pycache__/unity_aec_env.cpython-39.pyc,, +mlagents_envs/envs/__pycache__/unity_gym_env.cpython-39.pyc,, +mlagents_envs/envs/__pycache__/unity_parallel_env.cpython-39.pyc,, +mlagents_envs/envs/__pycache__/unity_pettingzoo_base_env.cpython-39.pyc,, +mlagents_envs/envs/env_helpers.py,sha256=0_KCQQPDOL2UXub3Wi2p2uUOypLBK7n3xiOBk9mndXE,2860 +mlagents_envs/envs/pettingzoo_env_factory.py,sha256=BT-RZtJsEfb_UKlUIqlX4xHMi1LNmb3NCDsWgxjDEAc,1957 +mlagents_envs/envs/unity_aec_env.py,sha256=rOtvT1EYrMXpWJ8mLqdnaqHBnUHNNYx2ubPFqFRgObA,2494 +mlagents_envs/envs/unity_gym_env.py,sha256=bR_yq6pQqIkp_fA_2PF5oJGCjtOFUIOWY75u1s9V4OE,14060 +mlagents_envs/envs/unity_parallel_env.py,sha256=a2HCYdmoHX7XgT1PCyhPo-gTyibaU0Ys3TL5FnY95Jc,1594 +mlagents_envs/envs/unity_pettingzoo_base_env.py,sha256=furHR6SvHjV2PQuCydMSi-h2fmE-vO7BN0ykut_HXqc,12365 +mlagents_envs/exception.py,sha256=juuUc2myXKVakxkVnnnCDkqRbi84mewGy6CPv5N5hkE,1621 +mlagents_envs/logging_util.py,sha256=aLYz3jqg_te3jei1hrQKolxIVlF3xwFfA8lzB1CXq7o,1866 +mlagents_envs/mock_communicator.py,sha256=uvPKopMNsEnX7PRe2vRsxIeWIcbJth9IBjwSq7bLr9k,3931 +mlagents_envs/registry/__init__.py,sha256=C3D8TtWFhcZKekZIgyXd6gIcnGTozePinCCj2GxqS-k,115 +mlagents_envs/registry/__pycache__/__init__.cpython-39.pyc,, +mlagents_envs/registry/__pycache__/base_registry_entry.cpython-39.pyc,, +mlagents_envs/registry/__pycache__/binary_utils.cpython-39.pyc,, +mlagents_envs/registry/__pycache__/remote_registry_entry.cpython-39.pyc,, +mlagents_envs/registry/__pycache__/unity_env_registry.cpython-39.pyc,, +mlagents_envs/registry/base_registry_entry.py,sha256=odZ_EElER899S9Gqo-ZA2dgdmadhhdd3MsUZhJis0CY,1856 +mlagents_envs/registry/binary_utils.py,sha256=j-URDvMeLV4x7lTvFkbi7emF0AbnhTEvJE7jHA_X8hI,9591 +mlagents_envs/registry/remote_registry_entry.py,sha256=C6UqVrit4ZmS7r3jjrcYzheLttXYJWSstw68gzU5QQI,3394 +mlagents_envs/registry/unity_env_registry.py,sha256=d0_8xrDjGTkidwTq-mtfIl2Sp_ncShTsxm5GG3FHKRc,5020 +mlagents_envs/rpc_communicator.py,sha256=OS968fiWWxOutSTJWpv8cZ25rX-VLY2FHcb9ccR7ISk,6729 +mlagents_envs/rpc_utils.py,sha256=Im5koVMpi7UqvxEUSuKVVFkhLGTtzgiEerwAkwV4arY,16415 +mlagents_envs/side_channel/__init__.py,sha256=Jks-qs8TPIPmSGCiv3_GdJ2V5225UZrvptuZoIGLyJI,381 +mlagents_envs/side_channel/__pycache__/__init__.cpython-39.pyc,, +mlagents_envs/side_channel/__pycache__/default_training_analytics_side_channel.cpython-39.pyc,, +mlagents_envs/side_channel/__pycache__/engine_configuration_channel.cpython-39.pyc,, +mlagents_envs/side_channel/__pycache__/environment_parameters_channel.cpython-39.pyc,, +mlagents_envs/side_channel/__pycache__/float_properties_channel.cpython-39.pyc,, +mlagents_envs/side_channel/__pycache__/incoming_message.cpython-39.pyc,, +mlagents_envs/side_channel/__pycache__/outgoing_message.cpython-39.pyc,, +mlagents_envs/side_channel/__pycache__/raw_bytes_channel.cpython-39.pyc,, +mlagents_envs/side_channel/__pycache__/side_channel.cpython-39.pyc,, +mlagents_envs/side_channel/__pycache__/side_channel_manager.cpython-39.pyc,, +mlagents_envs/side_channel/__pycache__/stats_side_channel.cpython-39.pyc,, +mlagents_envs/side_channel/default_training_analytics_side_channel.py,sha256=1KK6EPqJcGxeKaRdqjX6dlfnNx4jFPTEOovQ1dnKjWk,1835 +mlagents_envs/side_channel/engine_configuration_channel.py,sha256=y74y9qABe3590KaCsp6JM0v-mJY29U2DWpsT5FM2iDk,4937 +mlagents_envs/side_channel/environment_parameters_channel.py,sha256=ym9hvTljdHrfNWlomzIq3FUHNKqzScAl1uS4YOaqJOI,3874 +mlagents_envs/side_channel/float_properties_channel.py,sha256=7rYK2wyBg-XISUdt_CjutqDehOtyB-i12J6700bWwEU,2227 +mlagents_envs/side_channel/incoming_message.py,sha256=scXeGH-p1-x6c4TIN9YzYERVjqrR-hqWdlOxeqC9OrY,3366 +mlagents_envs/side_channel/outgoing_message.py,sha256=gLo3a8m-L4PVINNLKV1znax33KATylgIhT9SwVTIGL8,1961 +mlagents_envs/side_channel/raw_bytes_channel.py,sha256=r2rSwKinlbQd-nJLdPy5wQ7231SB4dXI-25XweAORfs,1301 +mlagents_envs/side_channel/side_channel.py,sha256=ns5gaeVkUeP8LV8yDsGDA4vMeYI20vSX1z0hRuE7l2A,1504 +mlagents_envs/side_channel/side_channel_manager.py,sha256=4W4nVvYs5l2Ad5g5SW9JGLBEYpxlrbhjAqmonlysrE0,3601 +mlagents_envs/side_channel/stats_side_channel.py,sha256=PI5fA08AJejHAUWLd8FIMyva6MxyKbWn3V8Kbx6zTzk,1876 +mlagents_envs/timers.py,sha256=nGXmnHV6o6Trx--54lie2Lt3LJEzBDuSORtzo8qDncg,11684 diff --git a/MLPY/Lib/site-packages/mlagents_envs-0.30.0.dist-info/WHEEL b/MLPY/Lib/site-packages/mlagents_envs-0.30.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..ecaf39f3c3df8b0075a2951da9b1a27fcb08a173 --- /dev/null +++ b/MLPY/Lib/site-packages/mlagents_envs-0.30.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (71.1.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/MLPY/Lib/site-packages/mlagents_envs-0.30.0.dist-info/top_level.txt b/MLPY/Lib/site-packages/mlagents_envs-0.30.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..833e77edf5f93decd1aa3f9f114174f12faad07d --- /dev/null +++ b/MLPY/Lib/site-packages/mlagents_envs-0.30.0.dist-info/top_level.txt @@ -0,0 +1 @@ +mlagents_envs diff --git a/MLPY/Lib/site-packages/mpmath-1.3.0.dist-info/INSTALLER b/MLPY/Lib/site-packages/mpmath-1.3.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath-1.3.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/MLPY/Lib/site-packages/mpmath-1.3.0.dist-info/LICENSE b/MLPY/Lib/site-packages/mpmath-1.3.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..9ecdc7586d08805bc984539f6672476e86e538b6 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath-1.3.0.dist-info/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2005-2021 Fredrik Johansson and mpmath contributors + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + a. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + b. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + c. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. diff --git a/MLPY/Lib/site-packages/mpmath-1.3.0.dist-info/METADATA b/MLPY/Lib/site-packages/mpmath-1.3.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..994b48acdba5cd0fdfb28cd1fbb0a84ebf81cba5 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath-1.3.0.dist-info/METADATA @@ -0,0 +1,233 @@ +Metadata-Version: 2.1 +Name: mpmath +Version: 1.3.0 +Summary: Python library for arbitrary-precision floating-point arithmetic +Home-page: http://mpmath.org/ +Author: Fredrik Johansson +Author-email: fredrik.johansson@gmail.com +License: BSD +Project-URL: Source, https://github.com/fredrik-johansson/mpmath +Project-URL: Tracker, https://github.com/fredrik-johansson/mpmath/issues +Project-URL: Documentation, http://mpmath.org/doc/current/ +Classifier: License :: OSI Approved :: BSD License +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +License-File: LICENSE +Provides-Extra: develop +Requires-Dist: pytest (>=4.6) ; extra == 'develop' +Requires-Dist: pycodestyle ; extra == 'develop' +Requires-Dist: pytest-cov ; extra == 'develop' +Requires-Dist: codecov ; extra == 'develop' +Requires-Dist: wheel ; extra == 'develop' +Provides-Extra: docs +Requires-Dist: sphinx ; extra == 'docs' +Provides-Extra: gmpy +Requires-Dist: gmpy2 (>=2.1.0a4) ; (platform_python_implementation != "PyPy") and extra == 'gmpy' +Provides-Extra: tests +Requires-Dist: pytest (>=4.6) ; extra == 'tests' + +mpmath +====== + +|pypi version| |Build status| |Code coverage status| |Zenodo Badge| + +.. |pypi version| image:: https://img.shields.io/pypi/v/mpmath.svg + :target: https://pypi.python.org/pypi/mpmath +.. |Build status| image:: https://github.com/fredrik-johansson/mpmath/workflows/test/badge.svg + :target: https://github.com/fredrik-johansson/mpmath/actions?workflow=test +.. |Code coverage status| image:: https://codecov.io/gh/fredrik-johansson/mpmath/branch/master/graph/badge.svg + :target: https://codecov.io/gh/fredrik-johansson/mpmath +.. |Zenodo Badge| image:: https://zenodo.org/badge/2934512.svg + :target: https://zenodo.org/badge/latestdoi/2934512 + +A Python library for arbitrary-precision floating-point arithmetic. + +Website: http://mpmath.org/ +Main author: Fredrik Johansson + +Mpmath is free software released under the New BSD License (see the +LICENSE file for details) + +0. History and credits +---------------------- + +The following people (among others) have contributed major patches +or new features to mpmath: + +* Pearu Peterson +* Mario Pernici +* Ondrej Certik +* Vinzent Steinberg +* Nimish Telang +* Mike Taschuk +* Case Van Horsen +* Jorn Baayen +* Chris Smith +* Juan Arias de Reyna +* Ioannis Tziakos +* Aaron Meurer +* Stefan Krastanov +* Ken Allen +* Timo Hartmann +* Sergey B Kirpichev +* Kris Kuhlman +* Paul Masson +* Michael Kagalenko +* Jonathan Warner +* Max Gaukler +* Guillermo Navas-Palencia +* Nike Dattani + +Numerous other people have contributed by reporting bugs, +requesting new features, or suggesting improvements to the +documentation. + +For a detailed changelog, including individual contributions, +see the CHANGES file. + +Fredrik's work on mpmath during summer 2008 was sponsored by Google +as part of the Google Summer of Code program. + +Fredrik's work on mpmath during summer 2009 was sponsored by the +American Institute of Mathematics under the support of the National Science +Foundation Grant No. 0757627 (FRG: L-functions and Modular Forms). + +Any opinions, findings, and conclusions or recommendations expressed in this +material are those of the author(s) and do not necessarily reflect the +views of the sponsors. + +Credit also goes to: + +* The authors of the GMP library and the Python wrapper + gmpy, enabling mpmath to become much faster at + high precision +* The authors of MPFR, pari/gp, MPFUN, and other arbitrary- + precision libraries, whose documentation has been helpful + for implementing many of the algorithms in mpmath +* Wikipedia contributors; Abramowitz & Stegun; Gradshteyn & Ryzhik; + Wolfram Research for MathWorld and the Wolfram Functions site. + These are the main references used for special functions + implementations. +* George Brandl for developing the Sphinx documentation tool + used to build mpmath's documentation + +Release history: + +* Version 1.3.0 released on March 7, 2023 +* Version 1.2.0 released on February 1, 2021 +* Version 1.1.0 released on December 11, 2018 +* Version 1.0.0 released on September 27, 2017 +* Version 0.19 released on June 10, 2014 +* Version 0.18 released on December 31, 2013 +* Version 0.17 released on February 1, 2011 +* Version 0.16 released on September 24, 2010 +* Version 0.15 released on June 6, 2010 +* Version 0.14 released on February 5, 2010 +* Version 0.13 released on August 13, 2009 +* Version 0.12 released on June 9, 2009 +* Version 0.11 released on January 26, 2009 +* Version 0.10 released on October 15, 2008 +* Version 0.9 released on August 23, 2008 +* Version 0.8 released on April 20, 2008 +* Version 0.7 released on March 12, 2008 +* Version 0.6 released on January 13, 2008 +* Version 0.5 released on November 24, 2007 +* Version 0.4 released on November 3, 2007 +* Version 0.3 released on October 5, 2007 +* Version 0.2 released on October 2, 2007 +* Version 0.1 released on September 27, 2007 + +1. Download & installation +-------------------------- + +Mpmath requires Python 2.7 or 3.5 (or later versions). It has been tested +with CPython 2.7, 3.5 through 3.7 and for PyPy. + +The latest release of mpmath can be downloaded from the mpmath +website and from https://github.com/fredrik-johansson/mpmath/releases + +It should also be available in the Python Package Index at +https://pypi.python.org/pypi/mpmath + +To install latest release of Mpmath with pip, simply run + +``pip install mpmath`` + +Or unpack the mpmath archive and run + +``python setup.py install`` + +Mpmath can also be installed using + +``python -m easy_install mpmath`` + +The latest development code is available from +https://github.com/fredrik-johansson/mpmath + +See the main documentation for more detailed instructions. + +2. Running tests +---------------- + +The unit tests in mpmath/tests/ can be run via the script +runtests.py, but it is recommended to run them with py.test +(https://pytest.org/), especially +to generate more useful reports in case there are failures. + +You may also want to check out the demo scripts in the demo +directory. + +The master branch is automatically tested by Travis CI. + +3. Documentation +---------------- + +Documentation in reStructuredText format is available in the +doc directory included with the source package. These files +are human-readable, but can be compiled to prettier HTML using +the build.py script (requires Sphinx, http://sphinx.pocoo.org/). + +See setup.txt in the documentation for more information. + +The most recent documentation is also available in HTML format: + +http://mpmath.org/doc/current/ + +4. Known problems +----------------- + +Mpmath is a work in progress. Major issues include: + +* Some functions may return incorrect values when given extremely + large arguments or arguments very close to singularities. + +* Directed rounding works for arithmetic operations. It is implemented + heuristically for other operations, and their results may be off by one + or two units in the last place (even if otherwise accurate). + +* Some IEEE 754 features are not available. Inifinities and NaN are + partially supported; denormal rounding is currently not available + at all. + +* The interface for switching precision and rounding is not finalized. + The current method is not threadsafe. + +5. Help and bug reports +----------------------- + +General questions and comments can be sent to the mpmath mailinglist, +mpmath@googlegroups.com + +You can also report bugs and send patches to the mpmath issue tracker, +https://github.com/fredrik-johansson/mpmath/issues diff --git a/MLPY/Lib/site-packages/mpmath-1.3.0.dist-info/RECORD b/MLPY/Lib/site-packages/mpmath-1.3.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..f839ac4e3356b6688f0afbf53f73853ccd3737fe --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath-1.3.0.dist-info/RECORD @@ -0,0 +1,180 @@ +mpmath-1.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +mpmath-1.3.0.dist-info/LICENSE,sha256=wmyugdpFCOXiSZhXd6M4IfGDIj67dNf4z7-Q_n7vL7c,1537 +mpmath-1.3.0.dist-info/METADATA,sha256=RLZupES5wNGa6UgV01a_BHrmtoDBkmi1wmVofNaoFAY,8630 +mpmath-1.3.0.dist-info/RECORD,, +mpmath-1.3.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92 +mpmath-1.3.0.dist-info/top_level.txt,sha256=BUVWrh8EVlkOhM1n3X9S8msTaVcC-3s6Sjt60avHYus,7 +mpmath/__init__.py,sha256=skFYTSwfwDBLChAV6pI3SdewgAQR3UBtyrfIK_Jdn-g,8765 +mpmath/__pycache__/__init__.cpython-39.pyc,, +mpmath/__pycache__/ctx_base.cpython-39.pyc,, +mpmath/__pycache__/ctx_fp.cpython-39.pyc,, +mpmath/__pycache__/ctx_iv.cpython-39.pyc,, +mpmath/__pycache__/ctx_mp.cpython-39.pyc,, +mpmath/__pycache__/ctx_mp_python.cpython-39.pyc,, +mpmath/__pycache__/function_docs.cpython-39.pyc,, +mpmath/__pycache__/identification.cpython-39.pyc,, +mpmath/__pycache__/math2.cpython-39.pyc,, +mpmath/__pycache__/rational.cpython-39.pyc,, +mpmath/__pycache__/usertools.cpython-39.pyc,, +mpmath/__pycache__/visualization.cpython-39.pyc,, +mpmath/calculus/__init__.py,sha256=UAgCIJ1YmaeyTqpNzjBlCZGeIzLtUZMEEpl99VWNjus,162 +mpmath/calculus/__pycache__/__init__.cpython-39.pyc,, +mpmath/calculus/__pycache__/approximation.cpython-39.pyc,, +mpmath/calculus/__pycache__/calculus.cpython-39.pyc,, +mpmath/calculus/__pycache__/differentiation.cpython-39.pyc,, +mpmath/calculus/__pycache__/extrapolation.cpython-39.pyc,, +mpmath/calculus/__pycache__/inverselaplace.cpython-39.pyc,, +mpmath/calculus/__pycache__/odes.cpython-39.pyc,, +mpmath/calculus/__pycache__/optimization.cpython-39.pyc,, +mpmath/calculus/__pycache__/polynomials.cpython-39.pyc,, +mpmath/calculus/__pycache__/quadrature.cpython-39.pyc,, +mpmath/calculus/approximation.py,sha256=vyzu3YI6r63Oq1KFHrQz02mGXAcH23emqNYhJuUaFZ4,8817 +mpmath/calculus/calculus.py,sha256=A0gSp0hxSyEDfugJViY3CeWalF-vK701YftzrjSQzQ4,112 +mpmath/calculus/differentiation.py,sha256=2L6CBj8xtX9iip98NPbKsLtwtRjxi571wYmTMHFeL90,20226 +mpmath/calculus/extrapolation.py,sha256=xM0rvk2DFEF4iR1Jhl-Y3aS93iW9VVJX7y9IGpmzC-A,73306 +mpmath/calculus/inverselaplace.py,sha256=5-pn8N_t0PtgBTXixsXZ4xxrihK2J5gYsVfTKfDx4gA,36056 +mpmath/calculus/odes.py,sha256=gaHiw7IJjsONNTAa6izFPZpmcg9uyTp8MULnGdzTIGo,9908 +mpmath/calculus/optimization.py,sha256=bKnShXElBOmVOIOlFeksDsYCp9fYSmYwKmXDt0z26MM,32856 +mpmath/calculus/polynomials.py,sha256=D16BhU_SHbVi06IxNwABHR-H77IylndNsN3muPTuFYs,7877 +mpmath/calculus/quadrature.py,sha256=n-avtS8E43foV-5tr5lofgOBaiMUYE8AJjQcWI9QcKk,42432 +mpmath/ctx_base.py,sha256=rfjmfMyA55x8R_cWFINUwWVTElfZmyx5erKDdauSEVw,15985 +mpmath/ctx_fp.py,sha256=ctUjx_NoU0iFWk05cXDYCL2ZtLZOlWs1n6Zao3pbG2g,6572 +mpmath/ctx_iv.py,sha256=tqdMr-GDfkZk1EhoGeCAajy7pQv-RWtrVqhYjfI8r4g,17211 +mpmath/ctx_mp.py,sha256=d3r4t7xHNqSFtmqsA9Btq1Npy3WTM-pcM2_jeCyECxY,49452 +mpmath/ctx_mp_python.py,sha256=3olYWo4lk1SnQ0A_IaZ181qqG8u5pxGat_v-L4Qtn3Y,37815 +mpmath/function_docs.py,sha256=g4PP8n6ILXmHcLyA50sxK6Tmp_Z4_pRN-wDErU8D1i4,283512 +mpmath/functions/__init__.py,sha256=YXVdhqv-6LKm6cr5xxtTNTtuD9zDPKGQl8GmS0xz2xo,330 +mpmath/functions/__pycache__/__init__.cpython-39.pyc,, +mpmath/functions/__pycache__/bessel.cpython-39.pyc,, +mpmath/functions/__pycache__/elliptic.cpython-39.pyc,, +mpmath/functions/__pycache__/expintegrals.cpython-39.pyc,, +mpmath/functions/__pycache__/factorials.cpython-39.pyc,, +mpmath/functions/__pycache__/functions.cpython-39.pyc,, +mpmath/functions/__pycache__/hypergeometric.cpython-39.pyc,, +mpmath/functions/__pycache__/orthogonal.cpython-39.pyc,, +mpmath/functions/__pycache__/qfunctions.cpython-39.pyc,, +mpmath/functions/__pycache__/rszeta.cpython-39.pyc,, +mpmath/functions/__pycache__/signals.cpython-39.pyc,, +mpmath/functions/__pycache__/theta.cpython-39.pyc,, +mpmath/functions/__pycache__/zeta.cpython-39.pyc,, +mpmath/functions/__pycache__/zetazeros.cpython-39.pyc,, +mpmath/functions/bessel.py,sha256=dUPLu8frlK-vmf3-irX_7uvwyw4xccv6EIizmIZ88kM,37938 +mpmath/functions/elliptic.py,sha256=qz0yVMb4lWEeOTDL_DWz5u5awmGIPKAsuZFJXgwHJNU,42237 +mpmath/functions/expintegrals.py,sha256=75X_MRdYc1F_X73bgNiOJqwRlS2hqAzcFLl3RM2tCDc,11644 +mpmath/functions/factorials.py,sha256=8_6kCR7e4k1GwxiAOJu0NRadeF4jA28qx4hidhu4ILk,5273 +mpmath/functions/functions.py,sha256=ub2JExvqzCWLkm5yAm72Fr6fdWmZZUknq9_3w9MEigI,18100 +mpmath/functions/hypergeometric.py,sha256=Z0OMAMC4ylK42n_SnamyFVnUx6zHLyCLCoJDSZ1JrHY,51570 +mpmath/functions/orthogonal.py,sha256=FabkxKfBoSseA5flWu1a3re-2BYaew9augqIsT8LaLw,16097 +mpmath/functions/qfunctions.py,sha256=a3EHGKQt_jMd4x9I772Jz-TGFnGY-arWqPvZGz9QSe0,7633 +mpmath/functions/rszeta.py,sha256=yuUVp4ilIyDmXyE3WTBxDDjwfEJNypJnbPS-xPH5How,46184 +mpmath/functions/signals.py,sha256=ELotwQaW1CDpv-eeJzOZ5c23NhfaZcj9_Gkb3psvS0Q,703 +mpmath/functions/theta.py,sha256=KggOocczoMG6_HMoal4oEP7iZ4SKOou9JFE-WzY2r3M,37320 +mpmath/functions/zeta.py,sha256=ue7JY7GXA0oX8q08sQJl2CSRrZ7kOt8HsftpVjnTwrE,36410 +mpmath/functions/zetazeros.py,sha256=uq6TVyZBcY2MLX7VSdVfn0TOkowBLM9fXtnySEwaNzw,30858 +mpmath/identification.py,sha256=7aMdngRAaeL_MafDUNbmEIlGQSklHDZ8pmPFt-OLgkw,29253 +mpmath/libmp/__init__.py,sha256=UCDjLZw4brbklaCmSixCcPdLdHkz8sF_-6F_wr0duAg,3790 +mpmath/libmp/__pycache__/__init__.cpython-39.pyc,, +mpmath/libmp/__pycache__/backend.cpython-39.pyc,, +mpmath/libmp/__pycache__/gammazeta.cpython-39.pyc,, +mpmath/libmp/__pycache__/libelefun.cpython-39.pyc,, +mpmath/libmp/__pycache__/libhyper.cpython-39.pyc,, +mpmath/libmp/__pycache__/libintmath.cpython-39.pyc,, +mpmath/libmp/__pycache__/libmpc.cpython-39.pyc,, +mpmath/libmp/__pycache__/libmpf.cpython-39.pyc,, +mpmath/libmp/__pycache__/libmpi.cpython-39.pyc,, +mpmath/libmp/backend.py,sha256=26A8pUkaGov26vrrFNQVyWJ5LDtK8sl3UHrYLecaTjA,3360 +mpmath/libmp/gammazeta.py,sha256=Xqdw6PMoswDaSca_sOs-IglRuk3fb8c9p43M_lbcrlc,71469 +mpmath/libmp/libelefun.py,sha256=joBZP4FOdxPfieWso1LPtSr6dHydpG_LQiF_bYQYWMg,43861 +mpmath/libmp/libhyper.py,sha256=J9fmdDF6u27EcssEWvBuVaAa3hFjPvPN1SgRgu1dEbc,36624 +mpmath/libmp/libintmath.py,sha256=aIRT0rkUZ_sdGQf3TNCLd-pBMvtQWjssbvFLfK7U0jc,16688 +mpmath/libmp/libmpc.py,sha256=KBndUjs5YVS32-Id3fflDfYgpdW1Prx6zfo8Ez5Qbrs,26875 +mpmath/libmp/libmpf.py,sha256=vpP0kNVkScbCVoZogJ4Watl4I7Ce0d4dzHVjfVe57so,45021 +mpmath/libmp/libmpi.py,sha256=u0I5Eiwkqa-4-dXETi5k7MuaxBeZbvCAPFtl93U9YF0,27622 +mpmath/math2.py,sha256=O5Dglg81SsW0wfHDUJcXOD8-cCaLvbVIvyw0sVmRbpI,18561 +mpmath/matrices/__init__.py,sha256=ETzGDciYbq9ftiKwaMbJ15EI-KNXHrzRb-ZHehhqFjs,94 +mpmath/matrices/__pycache__/__init__.cpython-39.pyc,, +mpmath/matrices/__pycache__/calculus.cpython-39.pyc,, +mpmath/matrices/__pycache__/eigen.cpython-39.pyc,, +mpmath/matrices/__pycache__/eigen_symmetric.cpython-39.pyc,, +mpmath/matrices/__pycache__/linalg.cpython-39.pyc,, +mpmath/matrices/__pycache__/matrices.cpython-39.pyc,, +mpmath/matrices/calculus.py,sha256=PNRq-p2nxgT-fzC54K2depi8ddhdx6Q86G8qpUiHeUY,18609 +mpmath/matrices/eigen.py,sha256=GbDXI3CixzEdXxr1G86uUWkAngAvd-05MmSQ-Tsu_5k,24394 +mpmath/matrices/eigen_symmetric.py,sha256=FPKPeQr1cGYw6Y6ea32a1YdEWQDLP6JlQHEA2WfNLYg,58534 +mpmath/matrices/linalg.py,sha256=04C3ijzMFom7ob5fXBCDfyPPdo3BIboIeE8x2A6vqF0,26958 +mpmath/matrices/matrices.py,sha256=o78Eq62EHQnxcsR0LBoWDEGREOoN4L2iDM1q3dQrw0o,32331 +mpmath/rational.py,sha256=64d56fvZXngYZT7nOAHeFRUX77eJ1A0R3rpfWBU-mSo,5976 +mpmath/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +mpmath/tests/__pycache__/__init__.cpython-39.pyc,, +mpmath/tests/__pycache__/extratest_gamma.cpython-39.pyc,, +mpmath/tests/__pycache__/extratest_zeta.cpython-39.pyc,, +mpmath/tests/__pycache__/runtests.cpython-39.pyc,, +mpmath/tests/__pycache__/test_basic_ops.cpython-39.pyc,, +mpmath/tests/__pycache__/test_bitwise.cpython-39.pyc,, +mpmath/tests/__pycache__/test_calculus.cpython-39.pyc,, +mpmath/tests/__pycache__/test_compatibility.cpython-39.pyc,, +mpmath/tests/__pycache__/test_convert.cpython-39.pyc,, +mpmath/tests/__pycache__/test_diff.cpython-39.pyc,, +mpmath/tests/__pycache__/test_division.cpython-39.pyc,, +mpmath/tests/__pycache__/test_eigen.cpython-39.pyc,, +mpmath/tests/__pycache__/test_eigen_symmetric.cpython-39.pyc,, +mpmath/tests/__pycache__/test_elliptic.cpython-39.pyc,, +mpmath/tests/__pycache__/test_fp.cpython-39.pyc,, +mpmath/tests/__pycache__/test_functions.cpython-39.pyc,, +mpmath/tests/__pycache__/test_functions2.cpython-39.pyc,, +mpmath/tests/__pycache__/test_gammazeta.cpython-39.pyc,, +mpmath/tests/__pycache__/test_hp.cpython-39.pyc,, +mpmath/tests/__pycache__/test_identify.cpython-39.pyc,, +mpmath/tests/__pycache__/test_interval.cpython-39.pyc,, +mpmath/tests/__pycache__/test_levin.cpython-39.pyc,, +mpmath/tests/__pycache__/test_linalg.cpython-39.pyc,, +mpmath/tests/__pycache__/test_matrices.cpython-39.pyc,, +mpmath/tests/__pycache__/test_mpmath.cpython-39.pyc,, +mpmath/tests/__pycache__/test_ode.cpython-39.pyc,, +mpmath/tests/__pycache__/test_pickle.cpython-39.pyc,, +mpmath/tests/__pycache__/test_power.cpython-39.pyc,, +mpmath/tests/__pycache__/test_quad.cpython-39.pyc,, +mpmath/tests/__pycache__/test_rootfinding.cpython-39.pyc,, +mpmath/tests/__pycache__/test_special.cpython-39.pyc,, +mpmath/tests/__pycache__/test_str.cpython-39.pyc,, +mpmath/tests/__pycache__/test_summation.cpython-39.pyc,, +mpmath/tests/__pycache__/test_trig.cpython-39.pyc,, +mpmath/tests/__pycache__/test_visualization.cpython-39.pyc,, +mpmath/tests/__pycache__/torture.cpython-39.pyc,, +mpmath/tests/extratest_gamma.py,sha256=xidhXUelILcxtiPGoTBHjqUOKIJzEaZ_v3nntGQyWZQ,7228 +mpmath/tests/extratest_zeta.py,sha256=sg10j9RhjBpV2EdUqyYhGV2ERWvM--EvwwGIz6HTmlw,1003 +mpmath/tests/runtests.py,sha256=7NUV82F3K_5AhU8mCLUFf5OibtT7uloFCwPyM3l71wM,5189 +mpmath/tests/test_basic_ops.py,sha256=dsB8DRG-GrPzBaZ-bIauYabaeqXbfqBo9SIP9BqcTSs,15348 +mpmath/tests/test_bitwise.py,sha256=-nLYhgQbhDza3SQM63BhktYntACagqMYx9ib3dPnTKM,7686 +mpmath/tests/test_calculus.py,sha256=4oxtNfMpO4RLLoOzrv7r9-h8BcqfBsJIE6UpsHe7c4w,9187 +mpmath/tests/test_compatibility.py,sha256=_t3ASZ3jhfAMnN1voWX7PDNIDzn-3PokkJGIdT1x7y0,2306 +mpmath/tests/test_convert.py,sha256=JPcDcTJIWh5prIxjx5DM1aNWgqlUoF2KpHvAgK3uHi4,8834 +mpmath/tests/test_diff.py,sha256=qjiF8NxQ8vueuZ5ZHGPQ-kjcj_I7Jh_fEdFtaA8DzEI,2466 +mpmath/tests/test_division.py,sha256=6lUeZfmaBWvvszdqlWLMHgXPjVsxvW1WZpd4-jFWCpU,5340 +mpmath/tests/test_eigen.py,sha256=2mnqVATGbsJkvSVHPpitfAk881twFfb3LsO3XikV9Hs,3905 +mpmath/tests/test_eigen_symmetric.py,sha256=v0VimCicIU2owASDMBaP-t-30uq-pXcsglt95KBtNO4,8778 +mpmath/tests/test_elliptic.py,sha256=Kjiwq9Bb6N_OOzzWewGQ1M_PMa7vRs42V0t90gloZxo,26225 +mpmath/tests/test_fp.py,sha256=AJo0FTyH4BuUnUsv176LD956om308KGYndy-b54KGxM,89997 +mpmath/tests/test_functions.py,sha256=b47VywdomoOX6KmMmz9-iv2IqVIydwKSuUw2pWlFHrY,30955 +mpmath/tests/test_functions2.py,sha256=vlw2RWhL1oTcifnOMDx1a_YzN96UgNNIE5STeKRv1HY,96990 +mpmath/tests/test_gammazeta.py,sha256=AB34O0DV7AlEf9Z4brnCadeQU5-uAwhWRw5FZas65DA,27917 +mpmath/tests/test_hp.py,sha256=6hcENu6Te2klPEiTSeLBIRPlH7PADlJwFKbx8xpnOhg,10461 +mpmath/tests/test_identify.py,sha256=lGUIPfrB2paTg0cFUo64GmMzF77F9gs9FQjX7gxGHV8,692 +mpmath/tests/test_interval.py,sha256=TjYd7a9ca6iRJiLjw06isLeZTuGoGAPmgleDZ0cYfJ0,17527 +mpmath/tests/test_levin.py,sha256=P8M11yV1dj_gdSNv5xuwCzFiF86QyRDtPMjURy6wJ28,5090 +mpmath/tests/test_linalg.py,sha256=miKEnwB8iwWV13hi1bF1cg3hgB4rTKOR0fvDVfWmXds,10440 +mpmath/tests/test_matrices.py,sha256=qyA4Ml2CvNvW034lzB01G6wVgNr7UrgZqh2wkMXtpzM,7944 +mpmath/tests/test_mpmath.py,sha256=LVyJUeofiaxW-zLKWVBCz59L9UQsjlW0Ts9_oBiEv_4,196 +mpmath/tests/test_ode.py,sha256=zAxexBH4fnmFNO4bvEHbug1NJWC5zqfFaVDlYijowkY,1822 +mpmath/tests/test_pickle.py,sha256=Y8CKmDLFsJHUqG8CDaBw5ilrPP4YT1xijVduLpQ7XFE,401 +mpmath/tests/test_power.py,sha256=sz_K02SmNxpa6Kb1uJLN_N4tXTJGdQ___vPRshEN7Gk,5227 +mpmath/tests/test_quad.py,sha256=49Ltft0vZ_kdKLL5s-Kj-BzAVoF5LPVEUeNUzdOkghI,3893 +mpmath/tests/test_rootfinding.py,sha256=umQegEaKHmYOEl5jEyoD-VLKDtXsTJJkepKEr4c0dC0,3132 +mpmath/tests/test_special.py,sha256=YbMIoMIkJEvvKYIzS0CXthJFG0--j6un7-tcE6b7FPM,2848 +mpmath/tests/test_str.py,sha256=0WsGD9hMPRi8zcuYMA9Cu2mOvQiCFskPwMsMf8lBDK4,544 +mpmath/tests/test_summation.py,sha256=fdNlsvRVOsbWxbhlyDLDaEO2S8kTJrRMKIvB5-aNci0,2035 +mpmath/tests/test_trig.py,sha256=zPtkIEnZaThxcWur4k7BX8-2Jmj-AhO191Svv7ANYUU,4799 +mpmath/tests/test_visualization.py,sha256=1PqtkoUx-WsKYgTRiu5o9pBc85kwhf1lzU2eobDQCJM,944 +mpmath/tests/torture.py,sha256=LD95oES7JY2KroELK-m-jhvtbvZaKChnt0Cq7kFMNCw,7868 +mpmath/usertools.py,sha256=a-TDw7XSRsPdBEffxOooDV4WDFfuXnO58P75dcAD87I,3029 +mpmath/visualization.py,sha256=pnnbjcd9AhFVRBZavYX5gjx4ytK_kXoDDisYR6EpXhs,10627 diff --git a/MLPY/Lib/site-packages/mpmath-1.3.0.dist-info/WHEEL b/MLPY/Lib/site-packages/mpmath-1.3.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..57e3d840d59a650ac5bccbad5baeec47d155f0ad --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath-1.3.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.38.4) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/MLPY/Lib/site-packages/mpmath-1.3.0.dist-info/top_level.txt b/MLPY/Lib/site-packages/mpmath-1.3.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..dda7c273a8dd1c6adffa9d2d9901e0ce6876f4ac --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath-1.3.0.dist-info/top_level.txt @@ -0,0 +1 @@ +mpmath diff --git a/MLPY/Lib/site-packages/mpmath/__init__.py b/MLPY/Lib/site-packages/mpmath/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..46a7c6f7c0875548f264612b604a9e1574b00a84 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/__init__.py @@ -0,0 +1,468 @@ +__version__ = '1.3.0' + +from .usertools import monitor, timing + +from .ctx_fp import FPContext +from .ctx_mp import MPContext +from .ctx_iv import MPIntervalContext + +fp = FPContext() +mp = MPContext() +iv = MPIntervalContext() + +fp._mp = mp +mp._mp = mp +iv._mp = mp +mp._fp = fp +fp._fp = fp +mp._iv = iv +fp._iv = iv +iv._iv = iv + +# XXX: extremely bad pickle hack +from . import ctx_mp as _ctx_mp +_ctx_mp._mpf_module.mpf = mp.mpf +_ctx_mp._mpf_module.mpc = mp.mpc + +make_mpf = mp.make_mpf +make_mpc = mp.make_mpc + +extraprec = mp.extraprec +extradps = mp.extradps +workprec = mp.workprec +workdps = mp.workdps +autoprec = mp.autoprec +maxcalls = mp.maxcalls +memoize = mp.memoize + +mag = mp.mag + +bernfrac = mp.bernfrac + +qfrom = mp.qfrom +mfrom = mp.mfrom +kfrom = mp.kfrom +taufrom = mp.taufrom +qbarfrom = mp.qbarfrom +ellipfun = mp.ellipfun +jtheta = mp.jtheta +kleinj = mp.kleinj +eta = mp.eta + +qp = mp.qp +qhyper = mp.qhyper +qgamma = mp.qgamma +qfac = mp.qfac + +nint_distance = mp.nint_distance + +plot = mp.plot +cplot = mp.cplot +splot = mp.splot + +odefun = mp.odefun + +jacobian = mp.jacobian +findroot = mp.findroot +multiplicity = mp.multiplicity + +isinf = mp.isinf +isnan = mp.isnan +isnormal = mp.isnormal +isint = mp.isint +isfinite = mp.isfinite +almosteq = mp.almosteq +nan = mp.nan +rand = mp.rand + +absmin = mp.absmin +absmax = mp.absmax + +fraction = mp.fraction + +linspace = mp.linspace +arange = mp.arange + +mpmathify = convert = mp.convert +mpc = mp.mpc + +mpi = iv._mpi + +nstr = mp.nstr +nprint = mp.nprint +chop = mp.chop + +fneg = mp.fneg +fadd = mp.fadd +fsub = mp.fsub +fmul = mp.fmul +fdiv = mp.fdiv +fprod = mp.fprod + +quad = mp.quad +quadgl = mp.quadgl +quadts = mp.quadts +quadosc = mp.quadosc +quadsubdiv = mp.quadsubdiv + +invertlaplace = mp.invertlaplace +invlaptalbot = mp.invlaptalbot +invlapstehfest = mp.invlapstehfest +invlapdehoog = mp.invlapdehoog + +pslq = mp.pslq +identify = mp.identify +findpoly = mp.findpoly + +richardson = mp.richardson +shanks = mp.shanks +levin = mp.levin +cohen_alt = mp.cohen_alt +nsum = mp.nsum +nprod = mp.nprod +difference = mp.difference +diff = mp.diff +diffs = mp.diffs +diffs_prod = mp.diffs_prod +diffs_exp = mp.diffs_exp +diffun = mp.diffun +differint = mp.differint +taylor = mp.taylor +pade = mp.pade +polyval = mp.polyval +polyroots = mp.polyroots +fourier = mp.fourier +fourierval = mp.fourierval +sumem = mp.sumem +sumap = mp.sumap +chebyfit = mp.chebyfit +limit = mp.limit + +matrix = mp.matrix +eye = mp.eye +diag = mp.diag +zeros = mp.zeros +ones = mp.ones +hilbert = mp.hilbert +randmatrix = mp.randmatrix +swap_row = mp.swap_row +extend = mp.extend +norm = mp.norm +mnorm = mp.mnorm + +lu_solve = mp.lu_solve +lu = mp.lu +qr = mp.qr +unitvector = mp.unitvector +inverse = mp.inverse +residual = mp.residual +qr_solve = mp.qr_solve +cholesky = mp.cholesky +cholesky_solve = mp.cholesky_solve +det = mp.det +cond = mp.cond +hessenberg = mp.hessenberg +schur = mp.schur +eig = mp.eig +eig_sort = mp.eig_sort +eigsy = mp.eigsy +eighe = mp.eighe +eigh = mp.eigh +svd_r = mp.svd_r +svd_c = mp.svd_c +svd = mp.svd +gauss_quadrature = mp.gauss_quadrature + +expm = mp.expm +sqrtm = mp.sqrtm +powm = mp.powm +logm = mp.logm +sinm = mp.sinm +cosm = mp.cosm + +mpf = mp.mpf +j = mp.j +exp = mp.exp +expj = mp.expj +expjpi = mp.expjpi +ln = mp.ln +im = mp.im +re = mp.re +inf = mp.inf +ninf = mp.ninf +sign = mp.sign + +eps = mp.eps +pi = mp.pi +ln2 = mp.ln2 +ln10 = mp.ln10 +phi = mp.phi +e = mp.e +euler = mp.euler +catalan = mp.catalan +khinchin = mp.khinchin +glaisher = mp.glaisher +apery = mp.apery +degree = mp.degree +twinprime = mp.twinprime +mertens = mp.mertens + +ldexp = mp.ldexp +frexp = mp.frexp + +fsum = mp.fsum +fdot = mp.fdot + +sqrt = mp.sqrt +cbrt = mp.cbrt +exp = mp.exp +ln = mp.ln +log = mp.log +log10 = mp.log10 +power = mp.power +cos = mp.cos +sin = mp.sin +tan = mp.tan +cosh = mp.cosh +sinh = mp.sinh +tanh = mp.tanh +acos = mp.acos +asin = mp.asin +atan = mp.atan +asinh = mp.asinh +acosh = mp.acosh +atanh = mp.atanh +sec = mp.sec +csc = mp.csc +cot = mp.cot +sech = mp.sech +csch = mp.csch +coth = mp.coth +asec = mp.asec +acsc = mp.acsc +acot = mp.acot +asech = mp.asech +acsch = mp.acsch +acoth = mp.acoth +cospi = mp.cospi +sinpi = mp.sinpi +sinc = mp.sinc +sincpi = mp.sincpi +cos_sin = mp.cos_sin +cospi_sinpi = mp.cospi_sinpi +fabs = mp.fabs +re = mp.re +im = mp.im +conj = mp.conj +floor = mp.floor +ceil = mp.ceil +nint = mp.nint +frac = mp.frac +root = mp.root +nthroot = mp.nthroot +hypot = mp.hypot +fmod = mp.fmod +ldexp = mp.ldexp +frexp = mp.frexp +sign = mp.sign +arg = mp.arg +phase = mp.phase +polar = mp.polar +rect = mp.rect +degrees = mp.degrees +radians = mp.radians +atan2 = mp.atan2 +fib = mp.fib +fibonacci = mp.fibonacci +lambertw = mp.lambertw +zeta = mp.zeta +altzeta = mp.altzeta +gamma = mp.gamma +rgamma = mp.rgamma +factorial = mp.factorial +fac = mp.fac +fac2 = mp.fac2 +beta = mp.beta +betainc = mp.betainc +psi = mp.psi +#psi0 = mp.psi0 +#psi1 = mp.psi1 +#psi2 = mp.psi2 +#psi3 = mp.psi3 +polygamma = mp.polygamma +digamma = mp.digamma +#trigamma = mp.trigamma +#tetragamma = mp.tetragamma +#pentagamma = mp.pentagamma +harmonic = mp.harmonic +bernoulli = mp.bernoulli +bernfrac = mp.bernfrac +stieltjes = mp.stieltjes +hurwitz = mp.hurwitz +dirichlet = mp.dirichlet +bernpoly = mp.bernpoly +eulerpoly = mp.eulerpoly +eulernum = mp.eulernum +polylog = mp.polylog +clsin = mp.clsin +clcos = mp.clcos +gammainc = mp.gammainc +gammaprod = mp.gammaprod +binomial = mp.binomial +rf = mp.rf +ff = mp.ff +hyper = mp.hyper +hyp0f1 = mp.hyp0f1 +hyp1f1 = mp.hyp1f1 +hyp1f2 = mp.hyp1f2 +hyp2f1 = mp.hyp2f1 +hyp2f2 = mp.hyp2f2 +hyp2f0 = mp.hyp2f0 +hyp2f3 = mp.hyp2f3 +hyp3f2 = mp.hyp3f2 +hyperu = mp.hyperu +hypercomb = mp.hypercomb +meijerg = mp.meijerg +appellf1 = mp.appellf1 +appellf2 = mp.appellf2 +appellf3 = mp.appellf3 +appellf4 = mp.appellf4 +hyper2d = mp.hyper2d +bihyper = mp.bihyper +erf = mp.erf +erfc = mp.erfc +erfi = mp.erfi +erfinv = mp.erfinv +npdf = mp.npdf +ncdf = mp.ncdf +expint = mp.expint +e1 = mp.e1 +ei = mp.ei +li = mp.li +ci = mp.ci +si = mp.si +chi = mp.chi +shi = mp.shi +fresnels = mp.fresnels +fresnelc = mp.fresnelc +airyai = mp.airyai +airybi = mp.airybi +airyaizero = mp.airyaizero +airybizero = mp.airybizero +scorergi = mp.scorergi +scorerhi = mp.scorerhi +ellipk = mp.ellipk +ellipe = mp.ellipe +ellipf = mp.ellipf +ellippi = mp.ellippi +elliprc = mp.elliprc +elliprj = mp.elliprj +elliprf = mp.elliprf +elliprd = mp.elliprd +elliprg = mp.elliprg +agm = mp.agm +jacobi = mp.jacobi +chebyt = mp.chebyt +chebyu = mp.chebyu +legendre = mp.legendre +legenp = mp.legenp +legenq = mp.legenq +hermite = mp.hermite +pcfd = mp.pcfd +pcfu = mp.pcfu +pcfv = mp.pcfv +pcfw = mp.pcfw +gegenbauer = mp.gegenbauer +laguerre = mp.laguerre +spherharm = mp.spherharm +besselj = mp.besselj +j0 = mp.j0 +j1 = mp.j1 +besseli = mp.besseli +bessely = mp.bessely +besselk = mp.besselk +besseljzero = mp.besseljzero +besselyzero = mp.besselyzero +hankel1 = mp.hankel1 +hankel2 = mp.hankel2 +struveh = mp.struveh +struvel = mp.struvel +angerj = mp.angerj +webere = mp.webere +lommels1 = mp.lommels1 +lommels2 = mp.lommels2 +whitm = mp.whitm +whitw = mp.whitw +ber = mp.ber +bei = mp.bei +ker = mp.ker +kei = mp.kei +coulombc = mp.coulombc +coulombf = mp.coulombf +coulombg = mp.coulombg +barnesg = mp.barnesg +superfac = mp.superfac +hyperfac = mp.hyperfac +loggamma = mp.loggamma +siegeltheta = mp.siegeltheta +siegelz = mp.siegelz +grampoint = mp.grampoint +zetazero = mp.zetazero +riemannr = mp.riemannr +primepi = mp.primepi +primepi2 = mp.primepi2 +primezeta = mp.primezeta +bell = mp.bell +polyexp = mp.polyexp +expm1 = mp.expm1 +log1p = mp.log1p +powm1 = mp.powm1 +unitroots = mp.unitroots +cyclotomic = mp.cyclotomic +mangoldt = mp.mangoldt +secondzeta = mp.secondzeta +nzeros = mp.nzeros +backlunds = mp.backlunds +lerchphi = mp.lerchphi +stirling1 = mp.stirling1 +stirling2 = mp.stirling2 +squarew = mp.squarew +trianglew = mp.trianglew +sawtoothw = mp.sawtoothw +unit_triangle = mp.unit_triangle +sigmoid = mp.sigmoid + +# be careful when changing this name, don't use test*! +def runtests(): + """ + Run all mpmath tests and print output. + """ + import os.path + from inspect import getsourcefile + from .tests import runtests as tests + testdir = os.path.dirname(os.path.abspath(getsourcefile(tests))) + importdir = os.path.abspath(testdir + '/../..') + tests.testit(importdir, testdir) + +def doctests(filter=[]): + import sys + from timeit import default_timer as clock + for i, arg in enumerate(sys.argv): + if '__init__.py' in arg: + filter = [sn for sn in sys.argv[i+1:] if not sn.startswith("-")] + break + import doctest + globs = globals().copy() + for obj in globs: #sorted(globs.keys()): + if filter: + if not sum([pat in obj for pat in filter]): + continue + sys.stdout.write(str(obj) + " ") + sys.stdout.flush() + t1 = clock() + doctest.run_docstring_examples(globs[obj], {}, verbose=("-v" in sys.argv)) + t2 = clock() + print(round(t2-t1, 3)) + +if __name__ == '__main__': + doctests() diff --git a/MLPY/Lib/site-packages/mpmath/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03cf5a8be2feb01db220d671ffe1226f68ec6768 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/__pycache__/ctx_base.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/__pycache__/ctx_base.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5104de7d64ce68dd67fb0ca6d3f0f9106115500 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/__pycache__/ctx_base.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/__pycache__/ctx_fp.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/__pycache__/ctx_fp.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb9210236b343af3ef9d397e6c3f90924f06e415 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/__pycache__/ctx_fp.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/__pycache__/ctx_iv.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/__pycache__/ctx_iv.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eefe975d6519ffe737b908edbca0c5e1f5bd538f Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/__pycache__/ctx_iv.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/__pycache__/ctx_mp.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/__pycache__/ctx_mp.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0132029b872af57a6336f135f292b5f7596f02bd Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/__pycache__/ctx_mp.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/__pycache__/ctx_mp_python.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/__pycache__/ctx_mp_python.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e0fd7b7261d79043b43de3bff9861c6f9697700 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/__pycache__/ctx_mp_python.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/__pycache__/function_docs.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/__pycache__/function_docs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..021bcb25fa77e308c7a3fbc90455fbbc2ecf6aa7 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/__pycache__/function_docs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/__pycache__/identification.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/__pycache__/identification.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..059407dcc287f1b2d964ce759664b2693c72881e Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/__pycache__/identification.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/__pycache__/math2.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/__pycache__/math2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..542224953d6891d9968ce5d028b8c3ea160f66ef Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/__pycache__/math2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/__pycache__/rational.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/__pycache__/rational.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72667db9eeb4bd2ec3c543e85401ebf005f0ed8d Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/__pycache__/rational.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/__pycache__/usertools.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/__pycache__/usertools.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4dd47bd2a7ef1daf5fbca8cc70cad154b9053af2 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/__pycache__/usertools.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/__pycache__/visualization.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/__pycache__/visualization.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65e68a5229a19e808d3b8add77a6e1ef0caf5a88 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/__pycache__/visualization.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/calculus/__init__.py b/MLPY/Lib/site-packages/mpmath/calculus/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..040a3806b968f75b8d1a88ae37a4979fe83d466a --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/calculus/__init__.py @@ -0,0 +1,6 @@ +from . import calculus +# XXX: hack to set methods +from . import approximation +from . import differentiation +from . import extrapolation +from . import polynomials diff --git a/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfe4d693f4ebaa89989acda57455acb89bfda009 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/approximation.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/approximation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7ca0979695a0a0fb09573bae1eda0b4cf4528cb Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/approximation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/calculus.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/calculus.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df5680971761f1a4907dd5e9236c74b42078726a Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/calculus.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/differentiation.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/differentiation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fafcdc0ea5671bf0a02761972c44a943c8badade Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/differentiation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/extrapolation.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/extrapolation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76dd71115f0dbf7daf66fe8b8fde004504bbba02 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/extrapolation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/inverselaplace.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/inverselaplace.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae01b0e338dcccf2fbd33e15a7625a4d965b4454 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/inverselaplace.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/odes.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/odes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1152b42b7e0c7a9013ba79668d8973f4d56837eb Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/odes.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/optimization.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/optimization.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73b5f88016e63a9cd8d790fa8b7664314aa36a2a Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/optimization.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..765fbbed5a4624c06f99a59deb3a5725fe16b214 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd411bdcbf55c03ae1049b6114222a83d303d1d7 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/calculus/approximation.py b/MLPY/Lib/site-packages/mpmath/calculus/approximation.py new file mode 100644 index 0000000000000000000000000000000000000000..7ca5cc598fb53491cb6ae4a41a40477c58544d53 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/calculus/approximation.py @@ -0,0 +1,246 @@ +from ..libmp.backend import xrange +from .calculus import defun + +#----------------------------------------------------------------------------# +# Approximation methods # +#----------------------------------------------------------------------------# + +# The Chebyshev approximation formula is given at: +# http://mathworld.wolfram.com/ChebyshevApproximationFormula.html + +# The only major changes in the following code is that we return the +# expanded polynomial coefficients instead of Chebyshev coefficients, +# and that we automatically transform [a,b] -> [-1,1] and back +# for convenience. + +# Coefficient in Chebyshev approximation +def chebcoeff(ctx,f,a,b,j,N): + s = ctx.mpf(0) + h = ctx.mpf(0.5) + for k in range(1, N+1): + t = ctx.cospi((k-h)/N) + s += f(t*(b-a)*h + (b+a)*h) * ctx.cospi(j*(k-h)/N) + return 2*s/N + +# Generate Chebyshev polynomials T_n(ax+b) in expanded form +def chebT(ctx, a=1, b=0): + Tb = [1] + yield Tb + Ta = [b, a] + while 1: + yield Ta + # Recurrence: T[n+1](ax+b) = 2*(ax+b)*T[n](ax+b) - T[n-1](ax+b) + Tmp = [0] + [2*a*t for t in Ta] + for i, c in enumerate(Ta): Tmp[i] += 2*b*c + for i, c in enumerate(Tb): Tmp[i] -= c + Ta, Tb = Tmp, Ta + +@defun +def chebyfit(ctx, f, interval, N, error=False): + r""" + Computes a polynomial of degree `N-1` that approximates the + given function `f` on the interval `[a, b]`. With ``error=True``, + :func:`~mpmath.chebyfit` also returns an accurate estimate of the + maximum absolute error; that is, the maximum value of + `|f(x) - P(x)|` for `x \in [a, b]`. + + :func:`~mpmath.chebyfit` uses the Chebyshev approximation formula, + which gives a nearly optimal solution: that is, the maximum + error of the approximating polynomial is very close to + the smallest possible for any polynomial of the same degree. + + Chebyshev approximation is very useful if one needs repeated + evaluation of an expensive function, such as function defined + implicitly by an integral or a differential equation. (For + example, it could be used to turn a slow mpmath function + into a fast machine-precision version of the same.) + + **Examples** + + Here we use :func:`~mpmath.chebyfit` to generate a low-degree approximation + of `f(x) = \cos(x)`, valid on the interval `[1, 2]`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> poly, err = chebyfit(cos, [1, 2], 5, error=True) + >>> nprint(poly) + [0.00291682, 0.146166, -0.732491, 0.174141, 0.949553] + >>> nprint(err, 12) + 1.61351758081e-5 + + The polynomial can be evaluated using ``polyval``:: + + >>> nprint(polyval(poly, 1.6), 12) + -0.0291858904138 + >>> nprint(cos(1.6), 12) + -0.0291995223013 + + Sampling the true error at 1000 points shows that the error + estimate generated by ``chebyfit`` is remarkably good:: + + >>> error = lambda x: abs(cos(x) - polyval(poly, x)) + >>> nprint(max([error(1+n/1000.) for n in range(1000)]), 12) + 1.61349954245e-5 + + **Choice of degree** + + The degree `N` can be set arbitrarily high, to obtain an + arbitrarily good approximation. As a rule of thumb, an + `N`-term Chebyshev approximation is good to `N/(b-a)` decimal + places on a unit interval (although this depends on how + well-behaved `f` is). The cost grows accordingly: ``chebyfit`` + evaluates the function `(N^2)/2` times to compute the + coefficients and an additional `N` times to estimate the error. + + **Possible issues** + + One should be careful to use a sufficiently high working + precision both when calling ``chebyfit`` and when evaluating + the resulting polynomial, as the polynomial is sometimes + ill-conditioned. It is for example difficult to reach + 15-digit accuracy when evaluating the polynomial using + machine precision floats, no matter the theoretical + accuracy of the polynomial. (The option to return the + coefficients in Chebyshev form should be made available + in the future.) + + It is important to note the Chebyshev approximation works + poorly if `f` is not smooth. A function containing singularities, + rapid oscillation, etc can be approximated more effectively by + multiplying it by a weight function that cancels out the + nonsmooth features, or by dividing the interval into several + segments. + """ + a, b = ctx._as_points(interval) + orig = ctx.prec + try: + ctx.prec = orig + int(N**0.5) + 20 + c = [chebcoeff(ctx,f,a,b,k,N) for k in range(N)] + d = [ctx.zero] * N + d[0] = -c[0]/2 + h = ctx.mpf(0.5) + T = chebT(ctx, ctx.mpf(2)/(b-a), ctx.mpf(-1)*(b+a)/(b-a)) + for (k, Tk) in zip(range(N), T): + for i in range(len(Tk)): + d[i] += c[k]*Tk[i] + d = d[::-1] + # Estimate maximum error + err = ctx.zero + for k in range(N): + x = ctx.cos(ctx.pi*k/N) * (b-a)*h + (b+a)*h + err = max(err, abs(f(x) - ctx.polyval(d, x))) + finally: + ctx.prec = orig + if error: + return d, +err + else: + return d + +@defun +def fourier(ctx, f, interval, N): + r""" + Computes the Fourier series of degree `N` of the given function + on the interval `[a, b]`. More precisely, :func:`~mpmath.fourier` returns + two lists `(c, s)` of coefficients (the cosine series and sine + series, respectively), such that + + .. math :: + + f(x) \sim \sum_{k=0}^N + c_k \cos(k m x) + s_k \sin(k m x) + + where `m = 2 \pi / (b-a)`. + + Note that many texts define the first coefficient as `2 c_0` instead + of `c_0`. The easiest way to evaluate the computed series correctly + is to pass it to :func:`~mpmath.fourierval`. + + **Examples** + + The function `f(x) = x` has a simple Fourier series on the standard + interval `[-\pi, \pi]`. The cosine coefficients are all zero (because + the function has odd symmetry), and the sine coefficients are + rational numbers:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> c, s = fourier(lambda x: x, [-pi, pi], 5) + >>> nprint(c) + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + >>> nprint(s) + [0.0, 2.0, -1.0, 0.666667, -0.5, 0.4] + + This computes a Fourier series of a nonsymmetric function on + a nonstandard interval:: + + >>> I = [-1, 1.5] + >>> f = lambda x: x**2 - 4*x + 1 + >>> cs = fourier(f, I, 4) + >>> nprint(cs[0]) + [0.583333, 1.12479, -1.27552, 0.904708, -0.441296] + >>> nprint(cs[1]) + [0.0, -2.6255, 0.580905, 0.219974, -0.540057] + + It is instructive to plot a function along with its truncated + Fourier series:: + + >>> plot([f, lambda x: fourierval(cs, I, x)], I) #doctest: +SKIP + + Fourier series generally converge slowly (and may not converge + pointwise). For example, if `f(x) = \cosh(x)`, a 10-term Fourier + series gives an `L^2` error corresponding to 2-digit accuracy:: + + >>> I = [-1, 1] + >>> cs = fourier(cosh, I, 9) + >>> g = lambda x: (cosh(x) - fourierval(cs, I, x))**2 + >>> nprint(sqrt(quad(g, I))) + 0.00467963 + + :func:`~mpmath.fourier` uses numerical quadrature. For nonsmooth functions, + the accuracy (and speed) can be improved by including all singular + points in the interval specification:: + + >>> nprint(fourier(abs, [-1, 1], 0), 10) + ([0.5000441648], [0.0]) + >>> nprint(fourier(abs, [-1, 0, 1], 0), 10) + ([0.5], [0.0]) + + """ + interval = ctx._as_points(interval) + a = interval[0] + b = interval[-1] + L = b-a + cos_series = [] + sin_series = [] + cutoff = ctx.eps*10 + for n in xrange(N+1): + m = 2*n*ctx.pi/L + an = 2*ctx.quadgl(lambda t: f(t)*ctx.cos(m*t), interval)/L + bn = 2*ctx.quadgl(lambda t: f(t)*ctx.sin(m*t), interval)/L + if n == 0: + an /= 2 + if abs(an) < cutoff: an = ctx.zero + if abs(bn) < cutoff: bn = ctx.zero + cos_series.append(an) + sin_series.append(bn) + return cos_series, sin_series + +@defun +def fourierval(ctx, series, interval, x): + """ + Evaluates a Fourier series (in the format computed by + by :func:`~mpmath.fourier` for the given interval) at the point `x`. + + The series should be a pair `(c, s)` where `c` is the + cosine series and `s` is the sine series. The two lists + need not have the same length. + """ + cs, ss = series + ab = ctx._as_points(interval) + a = interval[0] + b = interval[-1] + m = 2*ctx.pi/(ab[-1]-ab[0]) + s = ctx.zero + s += ctx.fsum(cs[n]*ctx.cos(m*n*x) for n in xrange(len(cs)) if cs[n]) + s += ctx.fsum(ss[n]*ctx.sin(m*n*x) for n in xrange(len(ss)) if ss[n]) + return s diff --git a/MLPY/Lib/site-packages/mpmath/calculus/calculus.py b/MLPY/Lib/site-packages/mpmath/calculus/calculus.py new file mode 100644 index 0000000000000000000000000000000000000000..24256f121d6c07e5ce954f2a5f5024f156f64016 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/calculus/calculus.py @@ -0,0 +1,6 @@ +class CalculusMethods(object): + pass + +def defun(f): + setattr(CalculusMethods, f.__name__, f) + return f diff --git a/MLPY/Lib/site-packages/mpmath/calculus/differentiation.py b/MLPY/Lib/site-packages/mpmath/calculus/differentiation.py new file mode 100644 index 0000000000000000000000000000000000000000..f8186bebd4476476eded9e4b2f8dc3eb23ef5ff9 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/calculus/differentiation.py @@ -0,0 +1,647 @@ +from ..libmp.backend import xrange +from .calculus import defun + +try: + iteritems = dict.iteritems +except AttributeError: + iteritems = dict.items + +#----------------------------------------------------------------------------# +# Differentiation # +#----------------------------------------------------------------------------# + +@defun +def difference(ctx, s, n): + r""" + Given a sequence `(s_k)` containing at least `n+1` items, returns the + `n`-th forward difference, + + .. math :: + + \Delta^n = \sum_{k=0}^{\infty} (-1)^{k+n} {n \choose k} s_k. + """ + n = int(n) + d = ctx.zero + b = (-1) ** (n & 1) + for k in xrange(n+1): + d += b * s[k] + b = (b * (k-n)) // (k+1) + return d + +def hsteps(ctx, f, x, n, prec, **options): + singular = options.get('singular') + addprec = options.get('addprec', 10) + direction = options.get('direction', 0) + workprec = (prec+2*addprec) * (n+1) + orig = ctx.prec + try: + ctx.prec = workprec + h = options.get('h') + if h is None: + if options.get('relative'): + hextramag = int(ctx.mag(x)) + else: + hextramag = 0 + h = ctx.ldexp(1, -prec-addprec-hextramag) + else: + h = ctx.convert(h) + # Directed: steps x, x+h, ... x+n*h + direction = options.get('direction', 0) + if direction: + h *= ctx.sign(direction) + steps = xrange(n+1) + norm = h + # Central: steps x-n*h, x-(n-2)*h ..., x, ..., x+(n-2)*h, x+n*h + else: + steps = xrange(-n, n+1, 2) + norm = (2*h) + # Perturb + if singular: + x += 0.5*h + values = [f(x+k*h) for k in steps] + return values, norm, workprec + finally: + ctx.prec = orig + + +@defun +def diff(ctx, f, x, n=1, **options): + r""" + Numerically computes the derivative of `f`, `f'(x)`, or generally for + an integer `n \ge 0`, the `n`-th derivative `f^{(n)}(x)`. + A few basic examples are:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> diff(lambda x: x**2 + x, 1.0) + 3.0 + >>> diff(lambda x: x**2 + x, 1.0, 2) + 2.0 + >>> diff(lambda x: x**2 + x, 1.0, 3) + 0.0 + >>> nprint([diff(exp, 3, n) for n in range(5)]) # exp'(x) = exp(x) + [20.0855, 20.0855, 20.0855, 20.0855, 20.0855] + + Even more generally, given a tuple of arguments `(x_1, \ldots, x_k)` + and order `(n_1, \ldots, n_k)`, the partial derivative + `f^{(n_1,\ldots,n_k)}(x_1,\ldots,x_k)` is evaluated. For example:: + + >>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (0,1)) + 2.75 + >>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (1,1)) + 3.0 + + **Options** + + The following optional keyword arguments are recognized: + + ``method`` + Supported methods are ``'step'`` or ``'quad'``: derivatives may be + computed using either a finite difference with a small step + size `h` (default), or numerical quadrature. + ``direction`` + Direction of finite difference: can be -1 for a left + difference, 0 for a central difference (default), or +1 + for a right difference; more generally can be any complex number. + ``addprec`` + Extra precision for `h` used to account for the function's + sensitivity to perturbations (default = 10). + ``relative`` + Choose `h` relative to the magnitude of `x`, rather than an + absolute value; useful for large or tiny `x` (default = False). + ``h`` + As an alternative to ``addprec`` and ``relative``, manually + select the step size `h`. + ``singular`` + If True, evaluation exactly at the point `x` is avoided; this is + useful for differentiating functions with removable singularities. + Default = False. + ``radius`` + Radius of integration contour (with ``method = 'quad'``). + Default = 0.25. A larger radius typically is faster and more + accurate, but it must be chosen so that `f` has no + singularities within the radius from the evaluation point. + + A finite difference requires `n+1` function evaluations and must be + performed at `(n+1)` times the target precision. Accordingly, `f` must + support fast evaluation at high precision. + + With integration, a larger number of function evaluations is + required, but not much extra precision is required. For high order + derivatives, this method may thus be faster if f is very expensive to + evaluate at high precision. + + **Further examples** + + The direction option is useful for computing left- or right-sided + derivatives of nonsmooth functions:: + + >>> diff(abs, 0, direction=0) + 0.0 + >>> diff(abs, 0, direction=1) + 1.0 + >>> diff(abs, 0, direction=-1) + -1.0 + + More generally, if the direction is nonzero, a right difference + is computed where the step size is multiplied by sign(direction). + For example, with direction=+j, the derivative from the positive + imaginary direction will be computed:: + + >>> diff(abs, 0, direction=j) + (0.0 - 1.0j) + + With integration, the result may have a small imaginary part + even even if the result is purely real:: + + >>> diff(sqrt, 1, method='quad') # doctest:+ELLIPSIS + (0.5 - 4.59...e-26j) + >>> chop(_) + 0.5 + + Adding precision to obtain an accurate value:: + + >>> diff(cos, 1e-30) + 0.0 + >>> diff(cos, 1e-30, h=0.0001) + -9.99999998328279e-31 + >>> diff(cos, 1e-30, addprec=100) + -1.0e-30 + + """ + partial = False + try: + orders = list(n) + x = list(x) + partial = True + except TypeError: + pass + if partial: + x = [ctx.convert(_) for _ in x] + return _partial_diff(ctx, f, x, orders, options) + method = options.get('method', 'step') + if n == 0 and method != 'quad' and not options.get('singular'): + return f(ctx.convert(x)) + prec = ctx.prec + try: + if method == 'step': + values, norm, workprec = hsteps(ctx, f, x, n, prec, **options) + ctx.prec = workprec + v = ctx.difference(values, n) / norm**n + elif method == 'quad': + ctx.prec += 10 + radius = ctx.convert(options.get('radius', 0.25)) + def g(t): + rei = radius*ctx.expj(t) + z = x + rei + return f(z) / rei**n + d = ctx.quadts(g, [0, 2*ctx.pi]) + v = d * ctx.factorial(n) / (2*ctx.pi) + else: + raise ValueError("unknown method: %r" % method) + finally: + ctx.prec = prec + return +v + +def _partial_diff(ctx, f, xs, orders, options): + if not orders: + return f() + if not sum(orders): + return f(*xs) + i = 0 + for i in range(len(orders)): + if orders[i]: + break + order = orders[i] + def fdiff_inner(*f_args): + def inner(t): + return f(*(f_args[:i] + (t,) + f_args[i+1:])) + return ctx.diff(inner, f_args[i], order, **options) + orders[i] = 0 + return _partial_diff(ctx, fdiff_inner, xs, orders, options) + +@defun +def diffs(ctx, f, x, n=None, **options): + r""" + Returns a generator that yields the sequence of derivatives + + .. math :: + + f(x), f'(x), f''(x), \ldots, f^{(k)}(x), \ldots + + With ``method='step'``, :func:`~mpmath.diffs` uses only `O(k)` + function evaluations to generate the first `k` derivatives, + rather than the roughly `O(k^2)` evaluations + required if one calls :func:`~mpmath.diff` `k` separate times. + + With `n < \infty`, the generator stops as soon as the + `n`-th derivative has been generated. If the exact number of + needed derivatives is known in advance, this is further + slightly more efficient. + + Options are the same as for :func:`~mpmath.diff`. + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 15 + >>> nprint(list(diffs(cos, 1, 5))) + [0.540302, -0.841471, -0.540302, 0.841471, 0.540302, -0.841471] + >>> for i, d in zip(range(6), diffs(cos, 1)): + ... print("%s %s" % (i, d)) + ... + 0 0.54030230586814 + 1 -0.841470984807897 + 2 -0.54030230586814 + 3 0.841470984807897 + 4 0.54030230586814 + 5 -0.841470984807897 + + """ + if n is None: + n = ctx.inf + else: + n = int(n) + if options.get('method', 'step') != 'step': + k = 0 + while k < n + 1: + yield ctx.diff(f, x, k, **options) + k += 1 + return + singular = options.get('singular') + if singular: + yield ctx.diff(f, x, 0, singular=True) + else: + yield f(ctx.convert(x)) + if n < 1: + return + if n == ctx.inf: + A, B = 1, 2 + else: + A, B = 1, n+1 + while 1: + callprec = ctx.prec + y, norm, workprec = hsteps(ctx, f, x, B, callprec, **options) + for k in xrange(A, B): + try: + ctx.prec = workprec + d = ctx.difference(y, k) / norm**k + finally: + ctx.prec = callprec + yield +d + if k >= n: + return + A, B = B, int(A*1.4+1) + B = min(B, n) + +def iterable_to_function(gen): + gen = iter(gen) + data = [] + def f(k): + for i in xrange(len(data), k+1): + data.append(next(gen)) + return data[k] + return f + +@defun +def diffs_prod(ctx, factors): + r""" + Given a list of `N` iterables or generators yielding + `f_k(x), f'_k(x), f''_k(x), \ldots` for `k = 1, \ldots, N`, + generate `g(x), g'(x), g''(x), \ldots` where + `g(x) = f_1(x) f_2(x) \cdots f_N(x)`. + + At high precision and for large orders, this is typically more efficient + than numerical differentiation if the derivatives of each `f_k(x)` + admit direct computation. + + Note: This function does not increase the working precision internally, + so guard digits may have to be added externally for full accuracy. + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> f = lambda x: exp(x)*cos(x)*sin(x) + >>> u = diffs(f, 1) + >>> v = mp.diffs_prod([diffs(exp,1), diffs(cos,1), diffs(sin,1)]) + >>> next(u); next(v) + 1.23586333600241 + 1.23586333600241 + >>> next(u); next(v) + 0.104658952245596 + 0.104658952245596 + >>> next(u); next(v) + -5.96999877552086 + -5.96999877552086 + >>> next(u); next(v) + -12.4632923122697 + -12.4632923122697 + + """ + N = len(factors) + if N == 1: + for c in factors[0]: + yield c + else: + u = iterable_to_function(ctx.diffs_prod(factors[:N//2])) + v = iterable_to_function(ctx.diffs_prod(factors[N//2:])) + n = 0 + while 1: + #yield sum(binomial(n,k)*u(n-k)*v(k) for k in xrange(n+1)) + s = u(n) * v(0) + a = 1 + for k in xrange(1,n+1): + a = a * (n-k+1) // k + s += a * u(n-k) * v(k) + yield s + n += 1 + +def dpoly(n, _cache={}): + """ + nth differentiation polynomial for exp (Faa di Bruno's formula). + + TODO: most exponents are zero, so maybe a sparse representation + would be better. + """ + if n in _cache: + return _cache[n] + if not _cache: + _cache[0] = {(0,):1} + R = dpoly(n-1) + R = dict((c+(0,),v) for (c,v) in iteritems(R)) + Ra = {} + for powers, count in iteritems(R): + powers1 = (powers[0]+1,) + powers[1:] + if powers1 in Ra: + Ra[powers1] += count + else: + Ra[powers1] = count + for powers, count in iteritems(R): + if not sum(powers): + continue + for k,p in enumerate(powers): + if p: + powers2 = powers[:k] + (p-1,powers[k+1]+1) + powers[k+2:] + if powers2 in Ra: + Ra[powers2] += p*count + else: + Ra[powers2] = p*count + _cache[n] = Ra + return _cache[n] + +@defun +def diffs_exp(ctx, fdiffs): + r""" + Given an iterable or generator yielding `f(x), f'(x), f''(x), \ldots` + generate `g(x), g'(x), g''(x), \ldots` where `g(x) = \exp(f(x))`. + + At high precision and for large orders, this is typically more efficient + than numerical differentiation if the derivatives of `f(x)` + admit direct computation. + + Note: This function does not increase the working precision internally, + so guard digits may have to be added externally for full accuracy. + + **Examples** + + The derivatives of the gamma function can be computed using + logarithmic differentiation:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> + >>> def diffs_loggamma(x): + ... yield loggamma(x) + ... i = 0 + ... while 1: + ... yield psi(i,x) + ... i += 1 + ... + >>> u = diffs_exp(diffs_loggamma(3)) + >>> v = diffs(gamma, 3) + >>> next(u); next(v) + 2.0 + 2.0 + >>> next(u); next(v) + 1.84556867019693 + 1.84556867019693 + >>> next(u); next(v) + 2.49292999190269 + 2.49292999190269 + >>> next(u); next(v) + 3.44996501352367 + 3.44996501352367 + + """ + fn = iterable_to_function(fdiffs) + f0 = ctx.exp(fn(0)) + yield f0 + i = 1 + while 1: + s = ctx.mpf(0) + for powers, c in iteritems(dpoly(i)): + s += c*ctx.fprod(fn(k+1)**p for (k,p) in enumerate(powers) if p) + yield s * f0 + i += 1 + +@defun +def differint(ctx, f, x, n=1, x0=0): + r""" + Calculates the Riemann-Liouville differintegral, or fractional + derivative, defined by + + .. math :: + + \,_{x_0}{\mathbb{D}}^n_xf(x) = \frac{1}{\Gamma(m-n)} \frac{d^m}{dx^m} + \int_{x_0}^{x}(x-t)^{m-n-1}f(t)dt + + where `f` is a given (presumably well-behaved) function, + `x` is the evaluation point, `n` is the order, and `x_0` is + the reference point of integration (`m` is an arbitrary + parameter selected automatically). + + With `n = 1`, this is just the standard derivative `f'(x)`; with `n = 2`, + the second derivative `f''(x)`, etc. With `n = -1`, it gives + `\int_{x_0}^x f(t) dt`, with `n = -2` + it gives `\int_{x_0}^x \left( \int_{x_0}^t f(u) du \right) dt`, etc. + + As `n` is permitted to be any number, this operator generalizes + iterated differentiation and iterated integration to a single + operator with a continuous order parameter. + + **Examples** + + There is an exact formula for the fractional derivative of a + monomial `x^p`, which may be used as a reference. For example, + the following gives a half-derivative (order 0.5):: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> x = mpf(3); p = 2; n = 0.5 + >>> differint(lambda t: t**p, x, n) + 7.81764019044672 + >>> gamma(p+1)/gamma(p-n+1) * x**(p-n) + 7.81764019044672 + + Another useful test function is the exponential function, whose + integration / differentiation formula easy generalizes + to arbitrary order. Here we first compute a third derivative, + and then a triply nested integral. (The reference point `x_0` + is set to `-\infty` to avoid nonzero endpoint terms.):: + + >>> differint(lambda x: exp(pi*x), -1.5, 3) + 0.278538406900792 + >>> exp(pi*-1.5) * pi**3 + 0.278538406900792 + >>> differint(lambda x: exp(pi*x), 3.5, -3, -inf) + 1922.50563031149 + >>> exp(pi*3.5) / pi**3 + 1922.50563031149 + + However, for noninteger `n`, the differentiation formula for the + exponential function must be modified to give the same result as the + Riemann-Liouville differintegral:: + + >>> x = mpf(3.5) + >>> c = pi + >>> n = 1+2*j + >>> differint(lambda x: exp(c*x), x, n) + (-123295.005390743 + 140955.117867654j) + >>> x**(-n) * exp(c)**x * (x*c)**n * gammainc(-n, 0, x*c) / gamma(-n) + (-123295.005390743 + 140955.117867654j) + + + """ + m = max(int(ctx.ceil(ctx.re(n)))+1, 1) + r = m-n-1 + g = lambda x: ctx.quad(lambda t: (x-t)**r * f(t), [x0, x]) + return ctx.diff(g, x, m) / ctx.gamma(m-n) + +@defun +def diffun(ctx, f, n=1, **options): + r""" + Given a function `f`, returns a function `g(x)` that evaluates the nth + derivative `f^{(n)}(x)`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> cos2 = diffun(sin) + >>> sin2 = diffun(sin, 4) + >>> cos(1.3), cos2(1.3) + (0.267498828624587, 0.267498828624587) + >>> sin(1.3), sin2(1.3) + (0.963558185417193, 0.963558185417193) + + The function `f` must support arbitrary precision evaluation. + See :func:`~mpmath.diff` for additional details and supported + keyword options. + """ + if n == 0: + return f + def g(x): + return ctx.diff(f, x, n, **options) + return g + +@defun +def taylor(ctx, f, x, n, **options): + r""" + Produces a degree-`n` Taylor polynomial around the point `x` of the + given function `f`. The coefficients are returned as a list. + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> nprint(chop(taylor(sin, 0, 5))) + [0.0, 1.0, 0.0, -0.166667, 0.0, 0.00833333] + + The coefficients are computed using high-order numerical + differentiation. The function must be possible to evaluate + to arbitrary precision. See :func:`~mpmath.diff` for additional details + and supported keyword options. + + Note that to evaluate the Taylor polynomial as an approximation + of `f`, e.g. with :func:`~mpmath.polyval`, the coefficients must be reversed, + and the point of the Taylor expansion must be subtracted from + the argument: + + >>> p = taylor(exp, 2.0, 10) + >>> polyval(p[::-1], 2.5 - 2.0) + 12.1824939606092 + >>> exp(2.5) + 12.1824939607035 + + """ + gen = enumerate(ctx.diffs(f, x, n, **options)) + if options.get("chop", True): + return [ctx.chop(d)/ctx.factorial(i) for i, d in gen] + else: + return [d/ctx.factorial(i) for i, d in gen] + +@defun +def pade(ctx, a, L, M): + r""" + Computes a Pade approximation of degree `(L, M)` to a function. + Given at least `L+M+1` Taylor coefficients `a` approximating + a function `A(x)`, :func:`~mpmath.pade` returns coefficients of + polynomials `P, Q` satisfying + + .. math :: + + P = \sum_{k=0}^L p_k x^k + + Q = \sum_{k=0}^M q_k x^k + + Q_0 = 1 + + A(x) Q(x) = P(x) + O(x^{L+M+1}) + + `P(x)/Q(x)` can provide a good approximation to an analytic function + beyond the radius of convergence of its Taylor series (example + from G.A. Baker 'Essentials of Pade Approximants' Academic Press, + Ch.1A):: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> one = mpf(1) + >>> def f(x): + ... return sqrt((one + 2*x)/(one + x)) + ... + >>> a = taylor(f, 0, 6) + >>> p, q = pade(a, 3, 3) + >>> x = 10 + >>> polyval(p[::-1], x)/polyval(q[::-1], x) + 1.38169105566806 + >>> f(x) + 1.38169855941551 + + """ + # To determine L+1 coefficients of P and M coefficients of Q + # L+M+1 coefficients of A must be provided + if len(a) < L+M+1: + raise ValueError("L+M+1 Coefficients should be provided") + + if M == 0: + if L == 0: + return [ctx.one], [ctx.one] + else: + return a[:L+1], [ctx.one] + + # Solve first + # a[L]*q[1] + ... + a[L-M+1]*q[M] = -a[L+1] + # ... + # a[L+M-1]*q[1] + ... + a[L]*q[M] = -a[L+M] + A = ctx.matrix(M) + for j in range(M): + for i in range(min(M, L+j+1)): + A[j, i] = a[L+j-i] + v = -ctx.matrix(a[(L+1):(L+M+1)]) + x = ctx.lu_solve(A, v) + q = [ctx.one] + list(x) + # compute p + p = [0]*(L+1) + for i in range(L+1): + s = a[i] + for j in range(1, min(M,i) + 1): + s += q[j]*a[i-j] + p[i] = s + return p, q diff --git a/MLPY/Lib/site-packages/mpmath/calculus/extrapolation.py b/MLPY/Lib/site-packages/mpmath/calculus/extrapolation.py new file mode 100644 index 0000000000000000000000000000000000000000..7df0fea3c62c9b71ee24d3f39fd9b7fd3318ed23 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/calculus/extrapolation.py @@ -0,0 +1,2115 @@ +try: + from itertools import izip +except ImportError: + izip = zip + +from ..libmp.backend import xrange +from .calculus import defun + +try: + next = next +except NameError: + next = lambda _: _.next() + +@defun +def richardson(ctx, seq): + r""" + Given a list ``seq`` of the first `N` elements of a slowly convergent + infinite sequence, :func:`~mpmath.richardson` computes the `N`-term + Richardson extrapolate for the limit. + + :func:`~mpmath.richardson` returns `(v, c)` where `v` is the estimated + limit and `c` is the magnitude of the largest weight used during the + computation. The weight provides an estimate of the precision + lost to cancellation. Due to cancellation effects, the sequence must + be typically be computed at a much higher precision than the target + accuracy of the extrapolation. + + **Applicability and issues** + + The `N`-step Richardson extrapolation algorithm used by + :func:`~mpmath.richardson` is described in [1]. + + Richardson extrapolation only works for a specific type of sequence, + namely one converging like partial sums of + `P(1)/Q(1) + P(2)/Q(2) + \ldots` where `P` and `Q` are polynomials. + When the sequence does not convergence at such a rate + :func:`~mpmath.richardson` generally produces garbage. + + Richardson extrapolation has the advantage of being fast: the `N`-term + extrapolate requires only `O(N)` arithmetic operations, and usually + produces an estimate that is accurate to `O(N)` digits. Contrast with + the Shanks transformation (see :func:`~mpmath.shanks`), which requires + `O(N^2)` operations. + + :func:`~mpmath.richardson` is unable to produce an estimate for the + approximation error. One way to estimate the error is to perform + two extrapolations with slightly different `N` and comparing the + results. + + Richardson extrapolation does not work for oscillating sequences. + As a simple workaround, :func:`~mpmath.richardson` detects if the last + three elements do not differ monotonically, and in that case + applies extrapolation only to the even-index elements. + + **Example** + + Applying Richardson extrapolation to the Leibniz series for `\pi`:: + + >>> from mpmath import * + >>> mp.dps = 30; mp.pretty = True + >>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m)) + ... for m in range(1,30)] + >>> v, c = richardson(S[:10]) + >>> v + 3.2126984126984126984126984127 + >>> nprint([v-pi, c]) + [0.0711058, 2.0] + + >>> v, c = richardson(S[:30]) + >>> v + 3.14159265468624052829954206226 + >>> nprint([v-pi, c]) + [1.09645e-9, 20833.3] + + **References** + + 1. [BenderOrszag]_ pp. 375-376 + + """ + if len(seq) < 3: + raise ValueError("seq should be of minimum length 3") + if ctx.sign(seq[-1]-seq[-2]) != ctx.sign(seq[-2]-seq[-3]): + seq = seq[::2] + N = len(seq)//2-1 + s = ctx.zero + # The general weight is c[k] = (N+k)**N * (-1)**(k+N) / k! / (N-k)! + # To avoid repeated factorials, we simplify the quotient + # of successive weights to obtain a recurrence relation + c = (-1)**N * N**N / ctx.mpf(ctx._ifac(N)) + maxc = 1 + for k in xrange(N+1): + s += c * seq[N+k] + maxc = max(abs(c), maxc) + c *= (k-N)*ctx.mpf(k+N+1)**N + c /= ((1+k)*ctx.mpf(k+N)**N) + return s, maxc + +@defun +def shanks(ctx, seq, table=None, randomized=False): + r""" + Given a list ``seq`` of the first `N` elements of a slowly + convergent infinite sequence `(A_k)`, :func:`~mpmath.shanks` computes the iterated + Shanks transformation `S(A), S(S(A)), \ldots, S^{N/2}(A)`. The Shanks + transformation often provides strong convergence acceleration, + especially if the sequence is oscillating. + + The iterated Shanks transformation is computed using the Wynn + epsilon algorithm (see [1]). :func:`~mpmath.shanks` returns the full + epsilon table generated by Wynn's algorithm, which can be read + off as follows: + + * The table is a list of lists forming a lower triangular matrix, + where higher row and column indices correspond to more accurate + values. + * The columns with even index hold dummy entries (required for the + computation) and the columns with odd index hold the actual + extrapolates. + * The last element in the last row is typically the most + accurate estimate of the limit. + * The difference to the third last element in the last row + provides an estimate of the approximation error. + * The magnitude of the second last element provides an estimate + of the numerical accuracy lost to cancellation. + + For convenience, so the extrapolation is stopped at an odd index + so that ``shanks(seq)[-1][-1]`` always gives an estimate of the + limit. + + Optionally, an existing table can be passed to :func:`~mpmath.shanks`. + This can be used to efficiently extend a previous computation after + new elements have been appended to the sequence. The table will + then be updated in-place. + + **The Shanks transformation** + + The Shanks transformation is defined as follows (see [2]): given + the input sequence `(A_0, A_1, \ldots)`, the transformed sequence is + given by + + .. math :: + + S(A_k) = \frac{A_{k+1}A_{k-1}-A_k^2}{A_{k+1}+A_{k-1}-2 A_k} + + The Shanks transformation gives the exact limit `A_{\infty}` in a + single step if `A_k = A + a q^k`. Note in particular that it + extrapolates the exact sum of a geometric series in a single step. + + Applying the Shanks transformation once often improves convergence + substantially for an arbitrary sequence, but the optimal effect is + obtained by applying it iteratively: + `S(S(A_k)), S(S(S(A_k))), \ldots`. + + Wynn's epsilon algorithm provides an efficient way to generate + the table of iterated Shanks transformations. It reduces the + computation of each element to essentially a single division, at + the cost of requiring dummy elements in the table. See [1] for + details. + + **Precision issues** + + Due to cancellation effects, the sequence must be typically be + computed at a much higher precision than the target accuracy + of the extrapolation. + + If the Shanks transformation converges to the exact limit (such + as if the sequence is a geometric series), then a division by + zero occurs. By default, :func:`~mpmath.shanks` handles this case by + terminating the iteration and returning the table it has + generated so far. With *randomized=True*, it will instead + replace the zero by a pseudorandom number close to zero. + (TODO: find a better solution to this problem.) + + **Examples** + + We illustrate by applying Shanks transformation to the Leibniz + series for `\pi`:: + + >>> from mpmath import * + >>> mp.dps = 50 + >>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m)) + ... for m in range(1,30)] + >>> + >>> T = shanks(S[:7]) + >>> for row in T: + ... nprint(row) + ... + [-0.75] + [1.25, 3.16667] + [-1.75, 3.13333, -28.75] + [2.25, 3.14524, 82.25, 3.14234] + [-2.75, 3.13968, -177.75, 3.14139, -969.937] + [3.25, 3.14271, 327.25, 3.14166, 3515.06, 3.14161] + + The extrapolated accuracy is about 4 digits, and about 4 digits + may have been lost due to cancellation:: + + >>> L = T[-1] + >>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])]) + [2.22532e-5, 4.78309e-5, 3515.06] + + Now we extend the computation:: + + >>> T = shanks(S[:25], T) + >>> L = T[-1] + >>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])]) + [3.75527e-19, 1.48478e-19, 2.96014e+17] + + The value for pi is now accurate to 18 digits. About 18 digits may + also have been lost to cancellation. + + Here is an example with a geometric series, where the convergence + is immediate (the sum is exactly 1):: + + >>> mp.dps = 15 + >>> for row in shanks([0.5, 0.75, 0.875, 0.9375, 0.96875]): + ... nprint(row) + [4.0] + [8.0, 1.0] + + **References** + + 1. [GravesMorris]_ + + 2. [BenderOrszag]_ pp. 368-375 + + """ + if len(seq) < 2: + raise ValueError("seq should be of minimum length 2") + if table: + START = len(table) + else: + START = 0 + table = [] + STOP = len(seq) - 1 + if STOP & 1: + STOP -= 1 + one = ctx.one + eps = +ctx.eps + if randomized: + from random import Random + rnd = Random() + rnd.seed(START) + for i in xrange(START, STOP): + row = [] + for j in xrange(i+1): + if j == 0: + a, b = 0, seq[i+1]-seq[i] + else: + if j == 1: + a = seq[i] + else: + a = table[i-1][j-2] + b = row[j-1] - table[i-1][j-1] + if not b: + if randomized: + b = (1 + rnd.getrandbits(10))*eps + elif i & 1: + return table[:-1] + else: + return table + row.append(a + one/b) + table.append(row) + return table + + +class levin_class: + # levin: Copyright 2013 Timo Hartmann (thartmann15 at gmail.com) + r""" + This interface implements Levin's (nonlinear) sequence transformation for + convergence acceleration and summation of divergent series. It performs + better than the Shanks/Wynn-epsilon algorithm for logarithmic convergent + or alternating divergent series. + + Let *A* be the series we want to sum: + + .. math :: + + A = \sum_{k=0}^{\infty} a_k + + Attention: all `a_k` must be non-zero! + + Let `s_n` be the partial sums of this series: + + .. math :: + + s_n = \sum_{k=0}^n a_k. + + **Methods** + + Calling ``levin`` returns an object with the following methods. + + ``update(...)`` works with the list of individual terms `a_k` of *A*, and + ``update_step(...)`` works with the list of partial sums `s_k` of *A*: + + .. code :: + + v, e = ...update([a_0, a_1,..., a_k]) + v, e = ...update_psum([s_0, s_1,..., s_k]) + + ``step(...)`` works with the individual terms `a_k` and ``step_psum(...)`` + works with the partial sums `s_k`: + + .. code :: + + v, e = ...step(a_k) + v, e = ...step_psum(s_k) + + *v* is the current estimate for *A*, and *e* is an error estimate which is + simply the difference between the current estimate and the last estimate. + One should not mix ``update``, ``update_psum``, ``step`` and ``step_psum``. + + **A word of caution** + + One can only hope for good results (i.e. convergence acceleration or + resummation) if the `s_n` have some well defind asymptotic behavior for + large `n` and are not erratic or random. Furthermore one usually needs very + high working precision because of the numerical cancellation. If the working + precision is insufficient, levin may produce silently numerical garbage. + Furthermore even if the Levin-transformation converges, in the general case + there is no proof that the result is mathematically sound. Only for very + special classes of problems one can prove that the Levin-transformation + converges to the expected result (for example Stieltjes-type integrals). + Furthermore the Levin-transform is quite expensive (i.e. slow) in comparison + to Shanks/Wynn-epsilon, Richardson & co. + In summary one can say that the Levin-transformation is powerful but + unreliable and that it may need a copious amount of working precision. + + The Levin transform has several variants differing in the choice of weights. + Some variants are better suited for the possible flavours of convergence + behaviour of *A* than other variants: + + .. code :: + + convergence behaviour levin-u levin-t levin-v shanks/wynn-epsilon + + logarithmic + - + - + linear + + + + + alternating divergent + + + + + + "+" means the variant is suitable,"-" means the variant is not suitable; + for comparison the Shanks/Wynn-epsilon transform is listed, too. + + The variant is controlled though the variant keyword (i.e. ``variant="u"``, + ``variant="t"`` or ``variant="v"``). Overall "u" is probably the best choice. + + Finally it is possible to use the Sidi-S transform instead of the Levin transform + by using the keyword ``method='sidi'``. The Sidi-S transform works better than the + Levin transformation for some divergent series (see the examples). + + Parameters: + + .. code :: + + method "levin" or "sidi" chooses either the Levin or the Sidi-S transformation + variant "u","t" or "v" chooses the weight variant. + + The Levin transform is also accessible through the nsum interface. + ``method="l"`` or ``method="levin"`` select the normal Levin transform while + ``method="sidi"`` + selects the Sidi-S transform. The variant is in both cases selected through the + levin_variant keyword. The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise + it will miss the point where the Levin transform converges resulting in numerical + overflow/garbage. For highly divergent series a copious amount of working precision + must be chosen. + + **Examples** + + First we sum the zeta function:: + + >>> from mpmath import mp + >>> mp.prec = 53 + >>> eps = mp.mpf(mp.eps) + >>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision + ... L = mp.levin(method = "levin", variant = "u") + ... S, s, n = [], 0, 1 + ... while 1: + ... s += mp.one / (n * n) + ... n += 1 + ... S.append(s) + ... v, e = L.update_psum(S) + ... if e < eps: + ... break + ... if n > 1000: raise RuntimeError("iteration limit exceeded") + >>> print(mp.chop(v - mp.pi ** 2 / 6)) + 0.0 + >>> w = mp.nsum(lambda n: 1 / (n*n), [1, mp.inf], method = "levin", levin_variant = "u") + >>> print(mp.chop(v - w)) + 0.0 + + Now we sum the zeta function outside its range of convergence + (attention: This does not work at the negative integers!):: + + >>> eps = mp.mpf(mp.eps) + >>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision + ... L = mp.levin(method = "levin", variant = "v") + ... A, n = [], 1 + ... while 1: + ... s = mp.mpf(n) ** (2 + 3j) + ... n += 1 + ... A.append(s) + ... v, e = L.update(A) + ... if e < eps: + ... break + ... if n > 1000: raise RuntimeError("iteration limit exceeded") + >>> print(mp.chop(v - mp.zeta(-2-3j))) + 0.0 + >>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v") + >>> print(mp.chop(v - w)) + 0.0 + + Now we sum the divergent asymptotic expansion of an integral related to the + exponential integral (see also [2] p.373). The Sidi-S transform works best here:: + + >>> z = mp.mpf(10) + >>> exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf]) + >>> # exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral + >>> eps = mp.mpf(mp.eps) + >>> with mp.extraprec(2 * mp.prec): # high working precisions are mandatory for divergent resummation + ... L = mp.levin(method = "sidi", variant = "t") + ... n = 0 + ... while 1: + ... s = (-1)**n * mp.fac(n) * z ** (-n) + ... v, e = L.step(s) + ... n += 1 + ... if e < eps: + ... break + ... if n > 1000: raise RuntimeError("iteration limit exceeded") + >>> print(mp.chop(v - exact)) + 0.0 + >>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t") + >>> print(mp.chop(v - w)) + 0.0 + + Another highly divergent integral is also summable:: + + >>> z = mp.mpf(2) + >>> eps = mp.mpf(mp.eps) + >>> exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi) + >>> # exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral + >>> with mp.extraprec(7 * mp.prec): # we need copious amount of precision to sum this highly divergent series + ... L = mp.levin(method = "levin", variant = "t") + ... n, s = 0, 0 + ... while 1: + ... s += (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)) + ... n += 1 + ... v, e = L.step_psum(s) + ... if e < eps: + ... break + ... if n > 1000: raise RuntimeError("iteration limit exceeded") + >>> print(mp.chop(v - exact)) + 0.0 + >>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)), + ... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)]) + >>> print(mp.chop(v - w)) + 0.0 + + These examples run with 15-20 decimal digits precision. For higher precision the + working precision must be raised. + + **Examples for nsum** + + Here we calculate Euler's constant as the constant term in the Laurent + expansion of `\zeta(s)` at `s=1`. This sum converges extremly slowly because of + the logarithmic convergence behaviour of the Dirichlet series for zeta:: + + >>> mp.dps = 30 + >>> z = mp.mpf(10) ** (-10) + >>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "l") - 1 / z + >>> print(mp.chop(a - mp.euler, tol = 1e-10)) + 0.0 + + The Sidi-S transform performs excellently for the alternating series of `\log(2)`:: + + >>> a = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "sidi") + >>> print(mp.chop(a - mp.log(2))) + 0.0 + + Hypergeometric series can also be summed outside their range of convergence. + The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise it will miss the + point where the Levin transform converges resulting in numerical overflow/garbage:: + + >>> z = 2 + 1j + >>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z) + >>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n)) + >>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)]) + >>> print(mp.chop(exact-v)) + 0.0 + + References: + + [1] E.J. Weniger - "Nonlinear Sequence Transformations for the Acceleration of + Convergence and the Summation of Divergent Series" arXiv:math/0306302 + + [2] A. Sidi - "Pratical Extrapolation Methods" + + [3] H.H.H. Homeier - "Scalar Levin-Type Sequence Transformations" arXiv:math/0005209 + + """ + + def __init__(self, method = "levin", variant = "u"): + self.variant = variant + self.n = 0 + self.a0 = 0 + self.theta = 1 + self.A = [] + self.B = [] + self.last = 0 + self.last_s = False + + if method == "levin": + self.factor = self.factor_levin + elif method == "sidi": + self.factor = self.factor_sidi + else: + raise ValueError("levin: unknown method \"%s\"" % method) + + def factor_levin(self, i): + # original levin + # [1] p.50,e.7.5-7 (with n-j replaced by i) + return (self.theta + i) * (self.theta + self.n - 1) ** (self.n - i - 2) / self.ctx.mpf(self.theta + self.n) ** (self.n - i - 1) + + def factor_sidi(self, i): + # sidi analogon to levin (factorial series) + # [1] p.59,e.8.3-16 (with n-j replaced by i) + return (self.theta + self.n - 1) * (self.theta + self.n - 2) / self.ctx.mpf((self.theta + 2 * self.n - i - 2) * (self.theta + 2 * self.n - i - 3)) + + def run(self, s, a0, a1 = 0): + if self.variant=="t": + # levin t + w=a0 + elif self.variant=="u": + # levin u + w=a0*(self.theta+self.n) + elif self.variant=="v": + # levin v + w=a0*a1/(a0-a1) + else: + assert False, "unknown variant" + + if w==0: + raise ValueError("levin: zero weight") + + self.A.append(s/w) + self.B.append(1/w) + + for i in range(self.n-1,-1,-1): + if i==self.n-1: + f=1 + else: + f=self.factor(i) + + self.A[i]=self.A[i+1]-f*self.A[i] + self.B[i]=self.B[i+1]-f*self.B[i] + + self.n+=1 + + ########################################################################### + + def update_psum(self,S): + """ + This routine applies the convergence acceleration to the list of partial sums. + + A = sum(a_k, k = 0..infinity) + s_n = sum(a_k, k = 0..n) + + v, e = ...update_psum([s_0, s_1,..., s_k]) + + output: + v current estimate of the series A + e an error estimate which is simply the difference between the current + estimate and the last estimate. + """ + + if self.variant!="v": + if self.n==0: + self.run(S[0],S[0]) + while self.n>> from mpmath import mp + >>> AC = mp.cohen_alt() + >>> S, s, n = [], 0, 1 + >>> while 1: + ... s += -((-1) ** n) * mp.one / (n * n) + ... n += 1 + ... S.append(s) + ... v, e = AC.update_psum(S) + ... if e < mp.eps: + ... break + ... if n > 1000: raise RuntimeError("iteration limit exceeded") + >>> print(mp.chop(v - mp.pi ** 2 / 12)) + 0.0 + + Here we compute the product `\prod_{n=1}^{\infty} \Gamma(1+1/(2n-1)) / \Gamma(1+1/(2n))`:: + + >>> A = [] + >>> AC = mp.cohen_alt() + >>> n = 1 + >>> while 1: + ... A.append( mp.loggamma(1 + mp.one / (2 * n - 1))) + ... A.append(-mp.loggamma(1 + mp.one / (2 * n))) + ... n += 1 + ... v, e = AC.update(A) + ... if e < mp.eps: + ... break + ... if n > 1000: raise RuntimeError("iteration limit exceeded") + >>> v = mp.exp(v) + >>> print(mp.chop(v - 1.06215090557106, tol = 1e-12)) + 0.0 + + ``cohen_alt`` is also accessible through the :func:`~mpmath.nsum` interface:: + + >>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a") + >>> print(mp.chop(v - mp.log(2))) + 0.0 + >>> v = mp.nsum(lambda n: (-1)**n / (2 * n + 1), [0, mp.inf], method = "a") + >>> print(mp.chop(v - mp.pi / 4)) + 0.0 + >>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a") + >>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1))) + 0.0 + + """ + + def __init__(self): + self.last=0 + + def update(self, A): + """ + This routine applies the convergence acceleration to the list of individual terms. + + A = sum(a_k, k = 0..infinity) + + v, e = ...update([a_0, a_1,..., a_k]) + + output: + v current estimate of the series A + e an error estimate which is simply the difference between the current + estimate and the last estimate. + """ + + n = len(A) + d = (3 + self.ctx.sqrt(8)) ** n + d = (d + 1 / d) / 2 + b = -self.ctx.one + c = -d + s = 0 + + for k in xrange(n): + c = b - c + if k % 2 == 0: + s = s + c * A[k] + else: + s = s - c * A[k] + b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one)) + + value = s / d + + err = abs(value - self.last) + self.last = value + + return value, err + + def update_psum(self, S): + """ + This routine applies the convergence acceleration to the list of partial sums. + + A = sum(a_k, k = 0..infinity) + s_n = sum(a_k ,k = 0..n) + + v, e = ...update_psum([s_0, s_1,..., s_k]) + + output: + v current estimate of the series A + e an error estimate which is simply the difference between the current + estimate and the last estimate. + """ + + n = len(S) + d = (3 + self.ctx.sqrt(8)) ** n + d = (d + 1 / d) / 2 + b = self.ctx.one + s = 0 + + for k in xrange(n): + b = 2 * (n + k) * (n - k) * b / ((2 * k + 1) * (k + self.ctx.one)) + s += b * S[k] + + value = s / d + + err = abs(value - self.last) + self.last = value + + return value, err + +def cohen_alt(ctx): + L = cohen_alt_class() + L.ctx = ctx + return L + +cohen_alt.__doc__ = cohen_alt_class.__doc__ +defun(cohen_alt) + + +@defun +def sumap(ctx, f, interval, integral=None, error=False): + r""" + Evaluates an infinite series of an analytic summand *f* using the + Abel-Plana formula + + .. math :: + + \sum_{k=0}^{\infty} f(k) = \int_0^{\infty} f(t) dt + \frac{1}{2} f(0) + + i \int_0^{\infty} \frac{f(it)-f(-it)}{e^{2\pi t}-1} dt. + + Unlike the Euler-Maclaurin formula (see :func:`~mpmath.sumem`), + the Abel-Plana formula does not require derivatives. However, + it only works when `|f(it)-f(-it)|` does not + increase too rapidly with `t`. + + **Examples** + + The Abel-Plana formula is particularly useful when the summand + decreases like a power of `k`; for example when the sum is a pure + zeta function:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> sumap(lambda k: 1/k**2.5, [1,inf]) + 1.34148725725091717975677 + >>> zeta(2.5) + 1.34148725725091717975677 + >>> sumap(lambda k: 1/(k+1j)**(2.5+2.5j), [1,inf]) + (-3.385361068546473342286084 - 0.7432082105196321803869551j) + >>> zeta(2.5+2.5j, 1+1j) + (-3.385361068546473342286084 - 0.7432082105196321803869551j) + + If the series is alternating, numerical quadrature along the real + line is likely to give poor results, so it is better to evaluate + the first term symbolically whenever possible: + + >>> n=3; z=-0.75 + >>> I = expint(n,-log(z)) + >>> chop(sumap(lambda k: z**k / k**n, [1,inf], integral=I)) + -0.6917036036904594510141448 + >>> polylog(n,z) + -0.6917036036904594510141448 + + """ + prec = ctx.prec + try: + ctx.prec += 10 + a, b = interval + if b != ctx.inf: + raise ValueError("b should be equal to ctx.inf") + g = lambda x: f(x+a) + if integral is None: + i1, err1 = ctx.quad(g, [0,ctx.inf], error=True) + else: + i1, err1 = integral, 0 + j = ctx.j + p = ctx.pi * 2 + if ctx._is_real_type(i1): + h = lambda t: -2 * ctx.im(g(j*t)) / ctx.expm1(p*t) + else: + h = lambda t: j*(g(j*t)-g(-j*t)) / ctx.expm1(p*t) + i2, err2 = ctx.quad(h, [0,ctx.inf], error=True) + err = err1+err2 + v = i1+i2+0.5*g(ctx.mpf(0)) + finally: + ctx.prec = prec + if error: + return +v, err + return +v + + +@defun +def sumem(ctx, f, interval, tol=None, reject=10, integral=None, + adiffs=None, bdiffs=None, verbose=False, error=False, + _fast_abort=False): + r""" + Uses the Euler-Maclaurin formula to compute an approximation accurate + to within ``tol`` (which defaults to the present epsilon) of the sum + + .. math :: + + S = \sum_{k=a}^b f(k) + + where `(a,b)` are given by ``interval`` and `a` or `b` may be + infinite. The approximation is + + .. math :: + + S \sim \int_a^b f(x) \,dx + \frac{f(a)+f(b)}{2} + + \sum_{k=1}^{\infty} \frac{B_{2k}}{(2k)!} + \left(f^{(2k-1)}(b)-f^{(2k-1)}(a)\right). + + The last sum in the Euler-Maclaurin formula is not generally + convergent (a notable exception is if `f` is a polynomial, in + which case Euler-Maclaurin actually gives an exact result). + + The summation is stopped as soon as the quotient between two + consecutive terms falls below *reject*. That is, by default + (*reject* = 10), the summation is continued as long as each + term adds at least one decimal. + + Although not convergent, convergence to a given tolerance can + often be "forced" if `b = \infty` by summing up to `a+N` and then + applying the Euler-Maclaurin formula to the sum over the range + `(a+N+1, \ldots, \infty)`. This procedure is implemented by + :func:`~mpmath.nsum`. + + By default numerical quadrature and differentiation is used. + If the symbolic values of the integral and endpoint derivatives + are known, it is more efficient to pass the value of the + integral explicitly as ``integral`` and the derivatives + explicitly as ``adiffs`` and ``bdiffs``. The derivatives + should be given as iterables that yield + `f(a), f'(a), f''(a), \ldots` (and the equivalent for `b`). + + **Examples** + + Summation of an infinite series, with automatic and symbolic + integral and derivative values (the second should be much faster):: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> sumem(lambda n: 1/n**2, [32, inf]) + 0.03174336652030209012658168043874142714132886413417 + >>> I = mpf(1)/32 + >>> D = adiffs=((-1)**n*fac(n+1)*32**(-2-n) for n in range(999)) + >>> sumem(lambda n: 1/n**2, [32, inf], integral=I, adiffs=D) + 0.03174336652030209012658168043874142714132886413417 + + An exact evaluation of a finite polynomial sum:: + + >>> sumem(lambda n: n**5-12*n**2+3*n, [-100000, 200000]) + 10500155000624963999742499550000.0 + >>> print(sum(n**5-12*n**2+3*n for n in range(-100000, 200001))) + 10500155000624963999742499550000 + + """ + tol = tol or +ctx.eps + interval = ctx._as_points(interval) + a = ctx.convert(interval[0]) + b = ctx.convert(interval[-1]) + err = ctx.zero + prev = 0 + M = 10000 + if a == ctx.ninf: adiffs = (0 for n in xrange(M)) + else: adiffs = adiffs or ctx.diffs(f, a) + if b == ctx.inf: bdiffs = (0 for n in xrange(M)) + else: bdiffs = bdiffs or ctx.diffs(f, b) + orig = ctx.prec + #verbose = 1 + try: + ctx.prec += 10 + s = ctx.zero + for k, (da, db) in enumerate(izip(adiffs, bdiffs)): + if k & 1: + term = (db-da) * ctx.bernoulli(k+1) / ctx.factorial(k+1) + mag = abs(term) + if verbose: + print("term", k, "magnitude =", ctx.nstr(mag)) + if k > 4 and mag < tol: + s += term + break + elif k > 4 and abs(prev) / mag < reject: + err += mag + if _fast_abort: + return [s, (s, err)][error] + if verbose: + print("Failed to converge") + break + else: + s += term + prev = term + # Endpoint correction + if a != ctx.ninf: s += f(a)/2 + if b != ctx.inf: s += f(b)/2 + # Tail integral + if verbose: + print("Integrating f(x) from x = %s to %s" % (ctx.nstr(a), ctx.nstr(b))) + if integral: + s += integral + else: + integral, ierr = ctx.quad(f, interval, error=True) + if verbose: + print("Integration error:", ierr) + s += integral + err += ierr + finally: + ctx.prec = orig + if error: + return s, err + else: + return s + +@defun +def adaptive_extrapolation(ctx, update, emfun, kwargs): + option = kwargs.get + if ctx._fixed_precision: + tol = option('tol', ctx.eps*2**10) + else: + tol = option('tol', ctx.eps/2**10) + verbose = option('verbose', False) + maxterms = option('maxterms', ctx.dps*10) + method = set(option('method', 'r+s').split('+')) + skip = option('skip', 0) + steps = iter(option('steps', xrange(10, 10**9, 10))) + strict = option('strict') + #steps = (10 for i in xrange(1000)) + summer=[] + if 'd' in method or 'direct' in method: + TRY_RICHARDSON = TRY_SHANKS = TRY_EULER_MACLAURIN = False + else: + TRY_RICHARDSON = ('r' in method) or ('richardson' in method) + TRY_SHANKS = ('s' in method) or ('shanks' in method) + TRY_EULER_MACLAURIN = ('e' in method) or \ + ('euler-maclaurin' in method) + + def init_levin(m): + variant = kwargs.get("levin_variant", "u") + if isinstance(variant, str): + if variant == "all": + variant = ["u", "v", "t"] + else: + variant = [variant] + for s in variant: + L = levin_class(method = m, variant = s) + L.ctx = ctx + L.name = m + "(" + s + ")" + summer.append(L) + + if ('l' in method) or ('levin' in method): + init_levin("levin") + + if ('sidi' in method): + init_levin("sidi") + + if ('a' in method) or ('alternating' in method): + L = cohen_alt_class() + L.ctx = ctx + L.name = "alternating" + summer.append(L) + + last_richardson_value = 0 + shanks_table = [] + index = 0 + step = 10 + partial = [] + best = ctx.zero + orig = ctx.prec + try: + if 'workprec' in kwargs: + ctx.prec = kwargs['workprec'] + elif TRY_RICHARDSON or TRY_SHANKS or len(summer)!=0: + ctx.prec = (ctx.prec+10) * 4 + else: + ctx.prec += 30 + while 1: + if index >= maxterms: + break + + # Get new batch of terms + try: + step = next(steps) + except StopIteration: + pass + if verbose: + print("-"*70) + print("Adding terms #%i-#%i" % (index, index+step)) + update(partial, xrange(index, index+step)) + index += step + + # Check direct error + best = partial[-1] + error = abs(best - partial[-2]) + if verbose: + print("Direct error: %s" % ctx.nstr(error)) + if error <= tol: + return best + + # Check each extrapolation method + if TRY_RICHARDSON: + value, maxc = ctx.richardson(partial) + # Convergence + richardson_error = abs(value - last_richardson_value) + if verbose: + print("Richardson error: %s" % ctx.nstr(richardson_error)) + # Convergence + if richardson_error <= tol: + return value + last_richardson_value = value + # Unreliable due to cancellation + if ctx.eps*maxc > tol: + if verbose: + print("Ran out of precision for Richardson") + TRY_RICHARDSON = False + if richardson_error < error: + error = richardson_error + best = value + if TRY_SHANKS: + shanks_table = ctx.shanks(partial, shanks_table, randomized=True) + row = shanks_table[-1] + if len(row) == 2: + est1 = row[-1] + shanks_error = 0 + else: + est1, maxc, est2 = row[-1], abs(row[-2]), row[-3] + shanks_error = abs(est1-est2) + if verbose: + print("Shanks error: %s" % ctx.nstr(shanks_error)) + if shanks_error <= tol: + return est1 + if ctx.eps*maxc > tol: + if verbose: + print("Ran out of precision for Shanks") + TRY_SHANKS = False + if shanks_error < error: + error = shanks_error + best = est1 + for L in summer: + est, lerror = L.update_psum(partial) + if verbose: + print("%s error: %s" % (L.name, ctx.nstr(lerror))) + if lerror <= tol: + return est + if lerror < error: + error = lerror + best = est + if TRY_EULER_MACLAURIN: + if ctx.almosteq(ctx.mpc(ctx.sign(partial[-1]) / ctx.sign(partial[-2])), -1): + if verbose: + print ("NOT using Euler-Maclaurin: the series appears" + " to be alternating, so numerical\n quadrature" + " will most likely fail") + TRY_EULER_MACLAURIN = False + else: + value, em_error = emfun(index, tol) + value += partial[-1] + if verbose: + print("Euler-Maclaurin error: %s" % ctx.nstr(em_error)) + if em_error <= tol: + return value + if em_error < error: + best = value + finally: + ctx.prec = orig + if strict: + raise ctx.NoConvergence + if verbose: + print("Warning: failed to converge to target accuracy") + return best + +@defun +def nsum(ctx, f, *intervals, **options): + r""" + Computes the sum + + .. math :: S = \sum_{k=a}^b f(k) + + where `(a, b)` = *interval*, and where `a = -\infty` and/or + `b = \infty` are allowed, or more generally + + .. math :: S = \sum_{k_1=a_1}^{b_1} \cdots + \sum_{k_n=a_n}^{b_n} f(k_1,\ldots,k_n) + + if multiple intervals are given. + + Two examples of infinite series that can be summed by :func:`~mpmath.nsum`, + where the first converges rapidly and the second converges slowly, + are:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> nsum(lambda n: 1/fac(n), [0, inf]) + 2.71828182845905 + >>> nsum(lambda n: 1/n**2, [1, inf]) + 1.64493406684823 + + When appropriate, :func:`~mpmath.nsum` applies convergence acceleration to + accurately estimate the sums of slowly convergent series. If the series is + finite, :func:`~mpmath.nsum` currently does not attempt to perform any + extrapolation, and simply calls :func:`~mpmath.fsum`. + + Multidimensional infinite series are reduced to a single-dimensional + series over expanding hypercubes; if both infinite and finite dimensions + are present, the finite ranges are moved innermost. For more advanced + control over the summation order, use nested calls to :func:`~mpmath.nsum`, + or manually rewrite the sum as a single-dimensional series. + + **Options** + + *tol* + Desired maximum final error. Defaults roughly to the + epsilon of the working precision. + + *method* + Which summation algorithm to use (described below). + Default: ``'richardson+shanks'``. + + *maxterms* + Cancel after at most this many terms. Default: 10*dps. + + *steps* + An iterable giving the number of terms to add between + each extrapolation attempt. The default sequence is + [10, 20, 30, 40, ...]. For example, if you know that + approximately 100 terms will be required, efficiency might be + improved by setting this to [100, 10]. Then the first + extrapolation will be performed after 100 terms, the second + after 110, etc. + + *verbose* + Print details about progress. + + *ignore* + If enabled, any term that raises ``ArithmeticError`` + or ``ValueError`` (e.g. through division by zero) is replaced + by a zero. This is convenient for lattice sums with + a singular term near the origin. + + **Methods** + + Unfortunately, an algorithm that can efficiently sum any infinite + series does not exist. :func:`~mpmath.nsum` implements several different + algorithms that each work well in different cases. The *method* + keyword argument selects a method. + + The default method is ``'r+s'``, i.e. both Richardson extrapolation + and Shanks transformation is attempted. A slower method that + handles more cases is ``'r+s+e'``. For very high precision + summation, or if the summation needs to be fast (for example if + multiple sums need to be evaluated), it is a good idea to + investigate which one method works best and only use that. + + ``'richardson'`` / ``'r'``: + Uses Richardson extrapolation. Provides useful extrapolation + when `f(k) \sim P(k)/Q(k)` or when `f(k) \sim (-1)^k P(k)/Q(k)` + for polynomials `P` and `Q`. See :func:`~mpmath.richardson` for + additional information. + + ``'shanks'`` / ``'s'``: + Uses Shanks transformation. Typically provides useful + extrapolation when `f(k) \sim c^k` or when successive terms + alternate signs. Is able to sum some divergent series. + See :func:`~mpmath.shanks` for additional information. + + ``'levin'`` / ``'l'``: + Uses the Levin transformation. It performs better than the Shanks + transformation for logarithmic convergent or alternating divergent + series. The ``'levin_variant'``-keyword selects the variant. Valid + choices are "u", "t", "v" and "all" whereby "all" uses all three + u,t and v simultanously (This is good for performance comparison in + conjunction with "verbose=True"). Instead of the Levin transform one can + also use the Sidi-S transform by selecting the method ``'sidi'``. + See :func:`~mpmath.levin` for additional details. + + ``'alternating'`` / ``'a'``: + This is the convergence acceleration of alternating series developped + by Cohen, Villegras and Zagier. + See :func:`~mpmath.cohen_alt` for additional details. + + ``'euler-maclaurin'`` / ``'e'``: + Uses the Euler-Maclaurin summation formula to approximate + the remainder sum by an integral. This requires high-order + numerical derivatives and numerical integration. The advantage + of this algorithm is that it works regardless of the + decay rate of `f`, as long as `f` is sufficiently smooth. + See :func:`~mpmath.sumem` for additional information. + + ``'direct'`` / ``'d'``: + Does not perform any extrapolation. This can be used + (and should only be used for) rapidly convergent series. + The summation automatically stops when the terms + decrease below the target tolerance. + + **Basic examples** + + A finite sum:: + + >>> nsum(lambda k: 1/k, [1, 6]) + 2.45 + + Summation of a series going to negative infinity and a doubly + infinite series:: + + >>> nsum(lambda k: 1/k**2, [-inf, -1]) + 1.64493406684823 + >>> nsum(lambda k: 1/(1+k**2), [-inf, inf]) + 3.15334809493716 + + :func:`~mpmath.nsum` handles sums of complex numbers:: + + >>> nsum(lambda k: (0.5+0.25j)**k, [0, inf]) + (1.6 + 0.8j) + + The following sum converges very rapidly, so it is most + efficient to sum it by disabling convergence acceleration:: + + >>> mp.dps = 1000 + >>> a = nsum(lambda k: -(-1)**k * k**2 / fac(2*k), [1, inf], + ... method='direct') + >>> b = (cos(1)+sin(1))/4 + >>> abs(a-b) < mpf('1e-998') + True + + **Examples with Richardson extrapolation** + + Richardson extrapolation works well for sums over rational + functions, as well as their alternating counterparts:: + + >>> mp.dps = 50 + >>> nsum(lambda k: 1 / k**3, [1, inf], + ... method='richardson') + 1.2020569031595942853997381615114499907649862923405 + >>> zeta(3) + 1.2020569031595942853997381615114499907649862923405 + + >>> nsum(lambda n: (n + 3)/(n**3 + n**2), [1, inf], + ... method='richardson') + 2.9348022005446793094172454999380755676568497036204 + >>> pi**2/2-2 + 2.9348022005446793094172454999380755676568497036204 + + >>> nsum(lambda k: (-1)**k / k**3, [1, inf], + ... method='richardson') + -0.90154267736969571404980362113358749307373971925537 + >>> -3*zeta(3)/4 + -0.90154267736969571404980362113358749307373971925538 + + **Examples with Shanks transformation** + + The Shanks transformation works well for geometric series + and typically provides excellent acceleration for Taylor + series near the border of their disk of convergence. + Here we apply it to a series for `\log(2)`, which can be + seen as the Taylor series for `\log(1+x)` with `x = 1`:: + + >>> nsum(lambda k: -(-1)**k/k, [1, inf], + ... method='shanks') + 0.69314718055994530941723212145817656807550013436025 + >>> log(2) + 0.69314718055994530941723212145817656807550013436025 + + Here we apply it to a slowly convergent geometric series:: + + >>> nsum(lambda k: mpf('0.995')**k, [0, inf], + ... method='shanks') + 200.0 + + Finally, Shanks' method works very well for alternating series + where `f(k) = (-1)^k g(k)`, and often does so regardless of + the exact decay rate of `g(k)`:: + + >>> mp.dps = 15 + >>> nsum(lambda k: (-1)**(k+1) / k**1.5, [1, inf], + ... method='shanks') + 0.765147024625408 + >>> (2-sqrt(2))*zeta(1.5)/2 + 0.765147024625408 + + The following slowly convergent alternating series has no known + closed-form value. Evaluating the sum a second time at higher + precision indicates that the value is probably correct:: + + >>> nsum(lambda k: (-1)**k / log(k), [2, inf], + ... method='shanks') + 0.924299897222939 + >>> mp.dps = 30 + >>> nsum(lambda k: (-1)**k / log(k), [2, inf], + ... method='shanks') + 0.92429989722293885595957018136 + + **Examples with Levin transformation** + + The following example calculates Euler's constant as the constant term in + the Laurent expansion of zeta(s) at s=1. This sum converges extremly slow + because of the logarithmic convergence behaviour of the Dirichlet series + for zeta. + + >>> mp.dps = 30 + >>> z = mp.mpf(10) ** (-10) + >>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "levin") - 1 / z + >>> print(mp.chop(a - mp.euler, tol = 1e-10)) + 0.0 + + Now we sum the zeta function outside its range of convergence + (attention: This does not work at the negative integers!): + + >>> mp.dps = 15 + >>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v") + >>> print(mp.chop(w - mp.zeta(-2-3j))) + 0.0 + + The next example resummates an asymptotic series expansion of an integral + related to the exponential integral. + + >>> mp.dps = 15 + >>> z = mp.mpf(10) + >>> # exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf]) + >>> exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral + >>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t") + >>> print(mp.chop(w - exact)) + 0.0 + + Following highly divergent asymptotic expansion needs some care. Firstly we + need copious amount of working precision. Secondly the stepsize must not be + chosen to large, otherwise nsum may miss the point where the Levin transform + converges and reach the point where only numerical garbage is produced due to + numerical cancellation. + + >>> mp.dps = 15 + >>> z = mp.mpf(2) + >>> # exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi) + >>> exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral + >>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)), + ... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)]) + >>> print(mp.chop(w - exact)) + 0.0 + + The hypergeoemtric function can also be summed outside its range of convergence: + + >>> mp.dps = 15 + >>> z = 2 + 1j + >>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z) + >>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n)) + >>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)]) + >>> print(mp.chop(exact-v)) + 0.0 + + **Examples with Cohen's alternating series resummation** + + The next example sums the alternating zeta function: + + >>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a") + >>> print(mp.chop(v - mp.log(2))) + 0.0 + + The derivate of the alternating zeta function outside its range of + convergence: + + >>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a") + >>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1))) + 0.0 + + **Examples with Euler-Maclaurin summation** + + The sum in the following example has the wrong rate of convergence + for either Richardson or Shanks to be effective. + + >>> f = lambda k: log(k)/k**2.5 + >>> mp.dps = 15 + >>> nsum(f, [1, inf], method='euler-maclaurin') + 0.38734195032621 + >>> -diff(zeta, 2.5) + 0.38734195032621 + + Increasing ``steps`` improves speed at higher precision:: + + >>> mp.dps = 50 + >>> nsum(f, [1, inf], method='euler-maclaurin', steps=[250]) + 0.38734195032620997271199237593105101319948228874688 + >>> -diff(zeta, 2.5) + 0.38734195032620997271199237593105101319948228874688 + + **Divergent series** + + The Shanks transformation is able to sum some *divergent* + series. In particular, it is often able to sum Taylor series + beyond their radius of convergence (this is due to a relation + between the Shanks transformation and Pade approximations; + see :func:`~mpmath.pade` for an alternative way to evaluate divergent + Taylor series). Furthermore the Levin-transform examples above + contain some divergent series resummation. + + Here we apply it to `\log(1+x)` far outside the region of + convergence:: + + >>> mp.dps = 50 + >>> nsum(lambda k: -(-9)**k/k, [1, inf], + ... method='shanks') + 2.3025850929940456840179914546843642076011014886288 + >>> log(10) + 2.3025850929940456840179914546843642076011014886288 + + A particular type of divergent series that can be summed + using the Shanks transformation is geometric series. + The result is the same as using the closed-form formula + for an infinite geometric series:: + + >>> mp.dps = 15 + >>> for n in range(-8, 8): + ... if n == 1: + ... continue + ... print("%s %s %s" % (mpf(n), mpf(1)/(1-n), + ... nsum(lambda k: n**k, [0, inf], method='shanks'))) + ... + -8.0 0.111111111111111 0.111111111111111 + -7.0 0.125 0.125 + -6.0 0.142857142857143 0.142857142857143 + -5.0 0.166666666666667 0.166666666666667 + -4.0 0.2 0.2 + -3.0 0.25 0.25 + -2.0 0.333333333333333 0.333333333333333 + -1.0 0.5 0.5 + 0.0 1.0 1.0 + 2.0 -1.0 -1.0 + 3.0 -0.5 -0.5 + 4.0 -0.333333333333333 -0.333333333333333 + 5.0 -0.25 -0.25 + 6.0 -0.2 -0.2 + 7.0 -0.166666666666667 -0.166666666666667 + + **Multidimensional sums** + + Any combination of finite and infinite ranges is allowed for the + summation indices:: + + >>> mp.dps = 15 + >>> nsum(lambda x,y: x+y, [2,3], [4,5]) + 28.0 + >>> nsum(lambda x,y: x/2**y, [1,3], [1,inf]) + 6.0 + >>> nsum(lambda x,y: y/2**x, [1,inf], [1,3]) + 6.0 + >>> nsum(lambda x,y,z: z/(2**x*2**y), [1,inf], [1,inf], [3,4]) + 7.0 + >>> nsum(lambda x,y,z: y/(2**x*2**z), [1,inf], [3,4], [1,inf]) + 7.0 + >>> nsum(lambda x,y,z: x/(2**z*2**y), [3,4], [1,inf], [1,inf]) + 7.0 + + Some nice examples of double series with analytic solutions or + reductions to single-dimensional series (see [1]):: + + >>> nsum(lambda m, n: 1/2**(m*n), [1,inf], [1,inf]) + 1.60669515241529 + >>> nsum(lambda n: 1/(2**n-1), [1,inf]) + 1.60669515241529 + + >>> nsum(lambda i,j: (-1)**(i+j)/(i**2+j**2), [1,inf], [1,inf]) + 0.278070510848213 + >>> pi*(pi-3*ln2)/12 + 0.278070510848213 + + >>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**2, [1,inf], [1,inf]) + 0.129319852864168 + >>> altzeta(2) - altzeta(1) + 0.129319852864168 + + >>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**3, [1,inf], [1,inf]) + 0.0790756439455825 + >>> altzeta(3) - altzeta(2) + 0.0790756439455825 + + >>> nsum(lambda m,n: m**2*n/(3**m*(n*3**m+m*3**n)), + ... [1,inf], [1,inf]) + 0.28125 + >>> mpf(9)/32 + 0.28125 + + >>> nsum(lambda i,j: fac(i-1)*fac(j-1)/fac(i+j), + ... [1,inf], [1,inf], workprec=400) + 1.64493406684823 + >>> zeta(2) + 1.64493406684823 + + A hard example of a multidimensional sum is the Madelung constant + in three dimensions (see [2]). The defining sum converges very + slowly and only conditionally, so :func:`~mpmath.nsum` is lucky to + obtain an accurate value through convergence acceleration. The + second evaluation below uses a much more efficient, rapidly + convergent 2D sum:: + + >>> nsum(lambda x,y,z: (-1)**(x+y+z)/(x*x+y*y+z*z)**0.5, + ... [-inf,inf], [-inf,inf], [-inf,inf], ignore=True) + -1.74756459463318 + >>> nsum(lambda x,y: -12*pi*sech(0.5*pi * \ + ... sqrt((2*x+1)**2+(2*y+1)**2))**2, [0,inf], [0,inf]) + -1.74756459463318 + + Another example of a lattice sum in 2D:: + + >>> nsum(lambda x,y: (-1)**(x+y) / (x**2+y**2), [-inf,inf], + ... [-inf,inf], ignore=True) + -2.1775860903036 + >>> -pi*ln2 + -2.1775860903036 + + An example of an Eisenstein series:: + + >>> nsum(lambda m,n: (m+n*1j)**(-4), [-inf,inf], [-inf,inf], + ... ignore=True) + (3.1512120021539 + 0.0j) + + **References** + + 1. [Weisstein]_ http://mathworld.wolfram.com/DoubleSeries.html, + 2. [Weisstein]_ http://mathworld.wolfram.com/MadelungConstants.html + + """ + infinite, g = standardize(ctx, f, intervals, options) + if not infinite: + return +g() + + def update(partial_sums, indices): + if partial_sums: + psum = partial_sums[-1] + else: + psum = ctx.zero + for k in indices: + psum = psum + g(ctx.mpf(k)) + partial_sums.append(psum) + + prec = ctx.prec + + def emfun(point, tol): + workprec = ctx.prec + ctx.prec = prec + 10 + v = ctx.sumem(g, [point, ctx.inf], tol, error=1) + ctx.prec = workprec + return v + + return +ctx.adaptive_extrapolation(update, emfun, options) + + +def wrapsafe(f): + def g(*args): + try: + return f(*args) + except (ArithmeticError, ValueError): + return 0 + return g + +def standardize(ctx, f, intervals, options): + if options.get("ignore"): + f = wrapsafe(f) + finite = [] + infinite = [] + for k, points in enumerate(intervals): + a, b = ctx._as_points(points) + if b < a: + return False, (lambda: ctx.zero) + if a == ctx.ninf or b == ctx.inf: + infinite.append((k, (a,b))) + else: + finite.append((k, (int(a), int(b)))) + if finite: + f = fold_finite(ctx, f, finite) + if not infinite: + return False, lambda: f(*([0]*len(intervals))) + if infinite: + f = standardize_infinite(ctx, f, infinite) + f = fold_infinite(ctx, f, infinite) + args = [0] * len(intervals) + d = infinite[0][0] + def g(k): + args[d] = k + return f(*args) + return True, g + +# backwards compatible itertools.product +def cartesian_product(args): + pools = map(tuple, args) + result = [[]] + for pool in pools: + result = [x+[y] for x in result for y in pool] + for prod in result: + yield tuple(prod) + +def fold_finite(ctx, f, intervals): + if not intervals: + return f + indices = [v[0] for v in intervals] + points = [v[1] for v in intervals] + ranges = [xrange(a, b+1) for (a,b) in points] + def g(*args): + args = list(args) + s = ctx.zero + for xs in cartesian_product(ranges): + for dim, x in zip(indices, xs): + args[dim] = ctx.mpf(x) + s += f(*args) + return s + #print "Folded finite", indices + return g + +# Standardize each interval to [0,inf] +def standardize_infinite(ctx, f, intervals): + if not intervals: + return f + dim, [a,b] = intervals[-1] + if a == ctx.ninf: + if b == ctx.inf: + def g(*args): + args = list(args) + k = args[dim] + if k: + s = f(*args) + args[dim] = -k + s += f(*args) + return s + else: + return f(*args) + else: + def g(*args): + args = list(args) + args[dim] = b - args[dim] + return f(*args) + else: + def g(*args): + args = list(args) + args[dim] += a + return f(*args) + #print "Standardized infinity along dimension", dim, a, b + return standardize_infinite(ctx, g, intervals[:-1]) + +def fold_infinite(ctx, f, intervals): + if len(intervals) < 2: + return f + dim1 = intervals[-2][0] + dim2 = intervals[-1][0] + # Assume intervals are [0,inf] x [0,inf] x ... + def g(*args): + args = list(args) + #args.insert(dim2, None) + n = int(args[dim1]) + s = ctx.zero + #y = ctx.mpf(n) + args[dim2] = ctx.mpf(n) #y + for x in xrange(n+1): + args[dim1] = ctx.mpf(x) + s += f(*args) + args[dim1] = ctx.mpf(n) #ctx.mpf(n) + for y in xrange(n): + args[dim2] = ctx.mpf(y) + s += f(*args) + return s + #print "Folded infinite from", len(intervals), "to", (len(intervals)-1) + return fold_infinite(ctx, g, intervals[:-1]) + +@defun +def nprod(ctx, f, interval, nsum=False, **kwargs): + r""" + Computes the product + + .. math :: + + P = \prod_{k=a}^b f(k) + + where `(a, b)` = *interval*, and where `a = -\infty` and/or + `b = \infty` are allowed. + + By default, :func:`~mpmath.nprod` uses the same extrapolation methods as + :func:`~mpmath.nsum`, except applied to the partial products rather than + partial sums, and the same keyword options as for :func:`~mpmath.nsum` are + supported. If ``nsum=True``, the product is instead computed via + :func:`~mpmath.nsum` as + + .. math :: + + P = \exp\left( \sum_{k=a}^b \log(f(k)) \right). + + This is slower, but can sometimes yield better results. It is + also required (and used automatically) when Euler-Maclaurin + summation is requested. + + **Examples** + + A simple finite product:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> nprod(lambda k: k, [1, 4]) + 24.0 + + A large number of infinite products have known exact values, + and can therefore be used as a reference. Most of the following + examples are taken from MathWorld [1]. + + A few infinite products with simple values are:: + + >>> 2*nprod(lambda k: (4*k**2)/(4*k**2-1), [1, inf]) + 3.141592653589793238462643 + >>> nprod(lambda k: (1+1/k)**2/(1+2/k), [1, inf]) + 2.0 + >>> nprod(lambda k: (k**3-1)/(k**3+1), [2, inf]) + 0.6666666666666666666666667 + >>> nprod(lambda k: (1-1/k**2), [2, inf]) + 0.5 + + Next, several more infinite products with more complicated + values:: + + >>> nprod(lambda k: exp(1/k**2), [1, inf]); exp(pi**2/6) + 5.180668317897115748416626 + 5.180668317897115748416626 + + >>> nprod(lambda k: (k**2-1)/(k**2+1), [2, inf]); pi*csch(pi) + 0.2720290549821331629502366 + 0.2720290549821331629502366 + + >>> nprod(lambda k: (k**4-1)/(k**4+1), [2, inf]) + 0.8480540493529003921296502 + >>> pi*sinh(pi)/(cosh(sqrt(2)*pi)-cos(sqrt(2)*pi)) + 0.8480540493529003921296502 + + >>> nprod(lambda k: (1+1/k+1/k**2)**2/(1+2/k+3/k**2), [1, inf]) + 1.848936182858244485224927 + >>> 3*sqrt(2)*cosh(pi*sqrt(3)/2)**2*csch(pi*sqrt(2))/pi + 1.848936182858244485224927 + + >>> nprod(lambda k: (1-1/k**4), [2, inf]); sinh(pi)/(4*pi) + 0.9190194775937444301739244 + 0.9190194775937444301739244 + + >>> nprod(lambda k: (1-1/k**6), [2, inf]) + 0.9826842777421925183244759 + >>> (1+cosh(pi*sqrt(3)))/(12*pi**2) + 0.9826842777421925183244759 + + >>> nprod(lambda k: (1+1/k**2), [2, inf]); sinh(pi)/(2*pi) + 1.838038955187488860347849 + 1.838038955187488860347849 + + >>> nprod(lambda n: (1+1/n)**n * exp(1/(2*n)-1), [1, inf]) + 1.447255926890365298959138 + >>> exp(1+euler/2)/sqrt(2*pi) + 1.447255926890365298959138 + + The following two products are equivalent and can be evaluated in + terms of a Jacobi theta function. Pi can be replaced by any value + (as long as convergence is preserved):: + + >>> nprod(lambda k: (1-pi**-k)/(1+pi**-k), [1, inf]) + 0.3838451207481672404778686 + >>> nprod(lambda k: tanh(k*log(pi)/2), [1, inf]) + 0.3838451207481672404778686 + >>> jtheta(4,0,1/pi) + 0.3838451207481672404778686 + + This product does not have a known closed form value:: + + >>> nprod(lambda k: (1-1/2**k), [1, inf]) + 0.2887880950866024212788997 + + A product taken from `-\infty`:: + + >>> nprod(lambda k: 1-k**(-3), [-inf,-2]) + 0.8093965973662901095786805 + >>> cosh(pi*sqrt(3)/2)/(3*pi) + 0.8093965973662901095786805 + + A doubly infinite product:: + + >>> nprod(lambda k: exp(1/(1+k**2)), [-inf, inf]) + 23.41432688231864337420035 + >>> exp(pi/tanh(pi)) + 23.41432688231864337420035 + + A product requiring the use of Euler-Maclaurin summation to compute + an accurate value:: + + >>> nprod(lambda k: (1-1/k**2.5), [2, inf], method='e') + 0.696155111336231052898125 + + **References** + + 1. [Weisstein]_ http://mathworld.wolfram.com/InfiniteProduct.html + + """ + if nsum or ('e' in kwargs.get('method', '')): + orig = ctx.prec + try: + # TODO: we are evaluating log(1+eps) -> eps, which is + # inaccurate. This currently works because nsum greatly + # increases the working precision. But we should be + # more intelligent and handle the precision here. + ctx.prec += 10 + v = ctx.nsum(lambda n: ctx.ln(f(n)), interval, **kwargs) + finally: + ctx.prec = orig + return +ctx.exp(v) + + a, b = ctx._as_points(interval) + if a == ctx.ninf: + if b == ctx.inf: + return f(0) * ctx.nprod(lambda k: f(-k) * f(k), [1, ctx.inf], **kwargs) + return ctx.nprod(f, [-b, ctx.inf], **kwargs) + elif b != ctx.inf: + return ctx.fprod(f(ctx.mpf(k)) for k in xrange(int(a), int(b)+1)) + + a = int(a) + + def update(partial_products, indices): + if partial_products: + pprod = partial_products[-1] + else: + pprod = ctx.one + for k in indices: + pprod = pprod * f(a + ctx.mpf(k)) + partial_products.append(pprod) + + return +ctx.adaptive_extrapolation(update, None, kwargs) + + +@defun +def limit(ctx, f, x, direction=1, exp=False, **kwargs): + r""" + Computes an estimate of the limit + + .. math :: + + \lim_{t \to x} f(t) + + where `x` may be finite or infinite. + + For finite `x`, :func:`~mpmath.limit` evaluates `f(x + d/n)` for + consecutive integer values of `n`, where the approach direction + `d` may be specified using the *direction* keyword argument. + For infinite `x`, :func:`~mpmath.limit` evaluates values of + `f(\mathrm{sign}(x) \cdot n)`. + + If the approach to the limit is not sufficiently fast to give + an accurate estimate directly, :func:`~mpmath.limit` attempts to find + the limit using Richardson extrapolation or the Shanks + transformation. You can select between these methods using + the *method* keyword (see documentation of :func:`~mpmath.nsum` for + more information). + + **Options** + + The following options are available with essentially the + same meaning as for :func:`~mpmath.nsum`: *tol*, *method*, *maxterms*, + *steps*, *verbose*. + + If the option *exp=True* is set, `f` will be + sampled at exponentially spaced points `n = 2^1, 2^2, 2^3, \ldots` + instead of the linearly spaced points `n = 1, 2, 3, \ldots`. + This can sometimes improve the rate of convergence so that + :func:`~mpmath.limit` may return a more accurate answer (and faster). + However, do note that this can only be used if `f` + supports fast and accurate evaluation for arguments that + are extremely close to the limit point (or if infinite, + very large arguments). + + **Examples** + + A basic evaluation of a removable singularity:: + + >>> from mpmath import * + >>> mp.dps = 30; mp.pretty = True + >>> limit(lambda x: (x-sin(x))/x**3, 0) + 0.166666666666666666666666666667 + + Computing the exponential function using its limit definition:: + + >>> limit(lambda n: (1+3/n)**n, inf) + 20.0855369231876677409285296546 + >>> exp(3) + 20.0855369231876677409285296546 + + A limit for `\pi`:: + + >>> f = lambda n: 2**(4*n+1)*fac(n)**4/(2*n+1)/fac(2*n)**2 + >>> limit(f, inf) + 3.14159265358979323846264338328 + + Calculating the coefficient in Stirling's formula:: + + >>> limit(lambda n: fac(n) / (sqrt(n)*(n/e)**n), inf) + 2.50662827463100050241576528481 + >>> sqrt(2*pi) + 2.50662827463100050241576528481 + + Evaluating Euler's constant `\gamma` using the limit representation + + .. math :: + + \gamma = \lim_{n \rightarrow \infty } \left[ \left( + \sum_{k=1}^n \frac{1}{k} \right) - \log(n) \right] + + (which converges notoriously slowly):: + + >>> f = lambda n: sum([mpf(1)/k for k in range(1,int(n)+1)]) - log(n) + >>> limit(f, inf) + 0.577215664901532860606512090082 + >>> +euler + 0.577215664901532860606512090082 + + With default settings, the following limit converges too slowly + to be evaluated accurately. Changing to exponential sampling + however gives a perfect result:: + + >>> f = lambda x: sqrt(x**3+x**2)/(sqrt(x**3)+x) + >>> limit(f, inf) + 0.992831158558330281129249686491 + >>> limit(f, inf, exp=True) + 1.0 + + """ + + if ctx.isinf(x): + direction = ctx.sign(x) + g = lambda k: f(ctx.mpf(k+1)*direction) + else: + direction *= ctx.one + g = lambda k: f(x + direction/(k+1)) + if exp: + h = g + g = lambda k: h(2**k) + + def update(values, indices): + for k in indices: + values.append(g(k+1)) + + # XXX: steps used by nsum don't work well + if not 'steps' in kwargs: + kwargs['steps'] = [10] + + return +ctx.adaptive_extrapolation(update, None, kwargs) diff --git a/MLPY/Lib/site-packages/mpmath/calculus/inverselaplace.py b/MLPY/Lib/site-packages/mpmath/calculus/inverselaplace.py new file mode 100644 index 0000000000000000000000000000000000000000..d2206b05c1601ee781b09dcbedf3c0fcd89cfa59 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/calculus/inverselaplace.py @@ -0,0 +1,973 @@ +# contributed to mpmath by Kristopher L. Kuhlman, February 2017 +# contributed to mpmath by Guillermo Navas-Palencia, February 2022 + +class InverseLaplaceTransform(object): + r""" + Inverse Laplace transform methods are implemented using this + class, in order to simplify the code and provide a common + infrastructure. + + Implement a custom inverse Laplace transform algorithm by + subclassing :class:`InverseLaplaceTransform` and implementing the + appropriate methods. The subclass can then be used by + :func:`~mpmath.invertlaplace` by passing it as the *method* + argument. + """ + + def __init__(self, ctx): + self.ctx = ctx + + def calc_laplace_parameter(self, t, **kwargs): + r""" + Determine the vector of Laplace parameter values needed for an + algorithm, this will depend on the choice of algorithm (de + Hoog is default), the algorithm-specific parameters passed (or + default ones), and desired time. + """ + raise NotImplementedError + + def calc_time_domain_solution(self, fp): + r""" + Compute the time domain solution, after computing the + Laplace-space function evaluations at the abscissa required + for the algorithm. Abscissa computed for one algorithm are + typically not useful for another algorithm. + """ + raise NotImplementedError + + +class FixedTalbot(InverseLaplaceTransform): + + def calc_laplace_parameter(self, t, **kwargs): + r"""The "fixed" Talbot method deforms the Bromwich contour towards + `-\infty` in the shape of a parabola. Traditionally the Talbot + algorithm has adjustable parameters, but the "fixed" version + does not. The `r` parameter could be passed in as a parameter, + if you want to override the default given by (Abate & Valko, + 2004). + + The Laplace parameter is sampled along a parabola opening + along the negative imaginary axis, with the base of the + parabola along the real axis at + `p=\frac{r}{t_\mathrm{max}}`. As the number of terms used in + the approximation (degree) grows, the abscissa required for + function evaluation tend towards `-\infty`, requiring high + precision to prevent overflow. If any poles, branch cuts or + other singularities exist such that the deformed Bromwich + contour lies to the left of the singularity, the method will + fail. + + **Optional arguments** + + :class:`~mpmath.calculus.inverselaplace.FixedTalbot.calc_laplace_parameter` + recognizes the following keywords + + *tmax* + maximum time associated with vector of times + (typically just the time requested) + *degree* + integer order of approximation (M = number of terms) + *r* + abscissa for `p_0` (otherwise computed using rule + of thumb `2M/5`) + + The working precision will be increased according to a rule of + thumb. If 'degree' is not specified, the working precision and + degree are chosen to hopefully achieve the dps of the calling + context. If 'degree' is specified, the working precision is + chosen to achieve maximum resulting precision for the + specified degree. + + .. math :: + + p_0=\frac{r}{t} + + .. math :: + + p_i=\frac{i r \pi}{Mt_\mathrm{max}}\left[\cot\left( + \frac{i\pi}{M}\right) + j \right] \qquad 1\le i 0: + self.degree += 1 + + M = self.degree + + # this is adjusting the dps of the calling context + # hopefully the caller doesn't monkey around with it + # between calling this routine and calc_time_domain_solution() + self.dps_orig = self.ctx.dps + self.ctx.dps = self.dps_goal + + self.V = self._coeff() + self.p = self.ctx.matrix(self.ctx.arange(1, M+1))*self.ctx.ln2/self.t + + # NB: p is real (mpf) + + def _coeff(self): + r"""Salzer summation weights (aka, "Stehfest coefficients") + only depend on the approximation order (M) and the precision""" + + M = self.degree + M2 = int(M/2) # checked earlier that M is even + + V = self.ctx.matrix(M, 1) + + # Salzer summation weights + # get very large in magnitude and oscillate in sign, + # if the precision is not high enough, there will be + # catastrophic cancellation + for k in range(1, M+1): + z = self.ctx.matrix(min(k, M2)+1, 1) + for j in range(int((k+1)/2), min(k, M2)+1): + z[j] = (self.ctx.power(j, M2)*self.ctx.fac(2*j)/ + (self.ctx.fac(M2-j)*self.ctx.fac(j)* + self.ctx.fac(j-1)*self.ctx.fac(k-j)* + self.ctx.fac(2*j-k))) + V[k-1] = self.ctx.power(-1, k+M2)*self.ctx.fsum(z) + + return V + + def calc_time_domain_solution(self, fp, t, manual_prec=False): + r"""Compute time-domain Stehfest algorithm solution. + + .. math :: + + f(t,M) = \frac{\log 2}{t} \sum_{k=1}^{M} V_k \bar{f}\left( + p_k \right) + + where + + .. math :: + + V_k = (-1)^{k + N/2} \sum^{\min(k,N/2)}_{i=\lfloor(k+1)/2 \rfloor} + \frac{i^{\frac{N}{2}}(2i)!}{\left(\frac{N}{2}-i \right)! \, i! \, + \left(i-1 \right)! \, \left(k-i\right)! \, \left(2i-k \right)!} + + As the degree increases, the abscissa (`p_k`) only increase + linearly towards `\infty`, but the Stehfest coefficients + (`V_k`) alternate in sign and increase rapidly in sign, + requiring high precision to prevent overflow or loss of + significance when evaluating the sum. + + **References** + + 1. Widder, D. (1941). *The Laplace Transform*. Princeton. + 2. Stehfest, H. (1970). Algorithm 368: numerical inversion of + Laplace transforms. *Communications of the ACM* 13(1):47-49, + http://dx.doi.org/10.1145/361953.361969 + + """ + + # required + self.t = self.ctx.convert(t) + + # assume fp was computed from p matrix returned from + # calc_laplace_parameter(), so is already + # a list or matrix of mpmath 'mpf' types + + result = self.ctx.fdot(self.V, fp)*self.ctx.ln2/self.t + + # setting dps back to value when calc_laplace_parameter was called + if not manual_prec: + self.ctx.dps = self.dps_orig + + # ignore any small imaginary part + return result.real + + +# **************************************** + +class deHoog(InverseLaplaceTransform): + + def calc_laplace_parameter(self, t, **kwargs): + r"""the de Hoog, Knight & Stokes algorithm is an + accelerated form of the Fourier series numerical + inverse Laplace transform algorithms. + + .. math :: + + p_k = \gamma + \frac{jk}{T} \qquad 0 \le k < 2M+1 + + where + + .. math :: + + \gamma = \alpha - \frac{\log \mathrm{tol}}{2T}, + + `j=\sqrt{-1}`, `T = 2t_\mathrm{max}` is a scaled time, + `\alpha=10^{-\mathrm{dps\_goal}}` is the real part of the + rightmost pole or singularity, which is chosen based on the + desired accuracy (assuming the rightmost singularity is 0), + and `\mathrm{tol}=10\alpha` is the desired tolerance, which is + chosen in relation to `\alpha`.` + + When increasing the degree, the abscissa increase towards + `j\infty`, but more slowly than the fixed Talbot + algorithm. The de Hoog et al. algorithm typically does better + with oscillatory functions of time, and less well-behaved + functions. The method tends to be slower than the Talbot and + Stehfest algorithsm, especially so at very high precision + (e.g., `>500` digits precision). + + """ + + # required + # ------------------------------ + self.t = self.ctx.convert(t) + + # optional + # ------------------------------ + self.tmax = kwargs.get('tmax', self.t) + + # empirical relationships used here based on a linear fit of + # requested and delivered dps for exponentially decaying time + # functions for requested dps up to 512. + + if 'degree' in kwargs: + self.degree = kwargs['degree'] + self.dps_goal = int(1.38*self.degree) + else: + self.dps_goal = int(self.ctx.dps*1.36) + self.degree = max(10, self.dps_goal) + + # 2*M+1 terms in approximation + M = self.degree + + # adjust alpha component of abscissa of convergence for higher + # precision + tmp = self.ctx.power(10.0, -self.dps_goal) + self.alpha = self.ctx.convert(kwargs.get('alpha', tmp)) + + # desired tolerance (here simply related to alpha) + self.tol = self.ctx.convert(kwargs.get('tol', self.alpha*10.0)) + self.np = 2*self.degree+1 # number of terms in approximation + + # this is adjusting the dps of the calling context + # hopefully the caller doesn't monkey around with it + # between calling this routine and calc_time_domain_solution() + self.dps_orig = self.ctx.dps + self.ctx.dps = self.dps_goal + + # scaling factor (likely tun-able, but 2 is typical) + self.scale = kwargs.get('scale', 2) + self.T = self.ctx.convert(kwargs.get('T', self.scale*self.tmax)) + + self.p = self.ctx.matrix(2*M+1, 1) + self.gamma = self.alpha - self.ctx.log(self.tol)/(self.scale*self.T) + self.p = (self.gamma + self.ctx.pi* + self.ctx.matrix(self.ctx.arange(self.np))/self.T*1j) + + # NB: p is complex (mpc) + + def calc_time_domain_solution(self, fp, t, manual_prec=False): + r"""Calculate time-domain solution for + de Hoog, Knight & Stokes algorithm. + + The un-accelerated Fourier series approach is: + + .. math :: + + f(t,2M+1) = \frac{e^{\gamma t}}{T} \sum_{k=0}^{2M}{}^{'} + \Re\left[\bar{f}\left( p_k \right) + e^{i\pi t/T} \right], + + where the prime on the summation indicates the first term is halved. + + This simplistic approach requires so many function evaluations + that it is not practical. Non-linear acceleration is + accomplished via Pade-approximation and an analytic expression + for the remainder of the continued fraction. See the original + paper (reference 2 below) a detailed description of the + numerical approach. + + **References** + + 1. Davies, B. (2005). *Integral Transforms and their + Applications*, Third Edition. Springer. + 2. de Hoog, F., J. Knight, A. Stokes (1982). An improved + method for numerical inversion of Laplace transforms. *SIAM + Journal of Scientific and Statistical Computing* 3:357-366, + http://dx.doi.org/10.1137/0903022 + + """ + + M = self.degree + np = self.np + T = self.T + + self.t = self.ctx.convert(t) + + # would it be useful to try re-using + # space between e&q and A&B? + e = self.ctx.zeros(np, M+1) + q = self.ctx.matrix(2*M, M) + d = self.ctx.matrix(np, 1) + A = self.ctx.zeros(np+1, 1) + B = self.ctx.ones(np+1, 1) + + # initialize Q-D table + e[:, 0] = 0.0 + 0j + q[0, 0] = fp[1]/(fp[0]/2) + for i in range(1, 2*M): + q[i, 0] = fp[i+1]/fp[i] + + # rhombus rule for filling triangular Q-D table (e & q) + for r in range(1, M+1): + # start with e, column 1, 0:2*M-2 + mr = 2*(M-r) + 1 + e[0:mr, r] = q[1:mr+1, r-1] - q[0:mr, r-1] + e[1:mr+1, r-1] + if not r == M: + rq = r+1 + mr = 2*(M-rq)+1 + 2 + for i in range(mr): + q[i, rq-1] = q[i+1, rq-2]*e[i+1, rq-1]/e[i, rq-1] + + # build up continued fraction coefficients (d) + d[0] = fp[0]/2 + for r in range(1, M+1): + d[2*r-1] = -q[0, r-1] # even terms + d[2*r] = -e[0, r] # odd terms + + # seed A and B for recurrence + A[0] = 0.0 + 0.0j + A[1] = d[0] + B[0:2] = 1.0 + 0.0j + + # base of the power series + z = self.ctx.expjpi(self.t/T) # i*pi is already in fcn + + # coefficients of Pade approximation (A & B) + # using recurrence for all but last term + for i in range(1, 2*M): + A[i+1] = A[i] + d[i]*A[i-1]*z + B[i+1] = B[i] + d[i]*B[i-1]*z + + # "improved remainder" to continued fraction + brem = (1 + (d[2*M-1] - d[2*M])*z)/2 + # powm1(x,y) computes x^y - 1 more accurately near zero + rem = brem*self.ctx.powm1(1 + d[2*M]*z/brem, + self.ctx.fraction(1, 2)) + + # last term of recurrence using new remainder + A[np] = A[2*M] + rem*A[2*M-1] + B[np] = B[2*M] + rem*B[2*M-1] + + # diagonal Pade approximation + # F=A/B represents accelerated trapezoid rule + result = self.ctx.exp(self.gamma*self.t)/T*(A[np]/B[np]).real + + # setting dps back to value when calc_laplace_parameter was called + if not manual_prec: + self.ctx.dps = self.dps_orig + + return result + + +# **************************************** + +class Cohen(InverseLaplaceTransform): + + def calc_laplace_parameter(self, t, **kwargs): + r"""The Cohen algorithm accelerates the convergence of the nearly + alternating series resulting from the application of the trapezoidal + rule to the Bromwich contour inversion integral. + + .. math :: + + p_k = \frac{\gamma}{2 t} + \frac{\pi i k}{t} \qquad 0 \le k < M + + where + + .. math :: + + \gamma = \frac{2}{3} (d + \log(10) + \log(2 t)), + + `d = \mathrm{dps\_goal}`, which is chosen based on the desired + accuracy using the method developed in [1] to improve numerical + stability. The Cohen algorithm shows robustness similar to the de Hoog + et al. algorithm, but it is faster than the fixed Talbot algorithm. + + **Optional arguments** + + *degree* + integer order of the approximation (M = number of terms) + *alpha* + abscissa for `p_0` (controls the discretization error) + + The working precision will be increased according to a rule of + thumb. If 'degree' is not specified, the working precision and + degree are chosen to hopefully achieve the dps of the calling + context. If 'degree' is specified, the working precision is + chosen to achieve maximum resulting precision for the + specified degree. + + **References** + + 1. P. Glasserman, J. Ruiz-Mata (2006). Computing the credit loss + distribution in the Gaussian copula model: a comparison of methods. + *Journal of Credit Risk* 2(4):33-66, 10.21314/JCR.2006.057 + + """ + self.t = self.ctx.convert(t) + + if 'degree' in kwargs: + self.degree = kwargs['degree'] + self.dps_goal = int(1.5 * self.degree) + else: + self.dps_goal = int(self.ctx.dps * 1.74) + self.degree = max(22, int(1.31 * self.dps_goal)) + + M = self.degree + 1 + + # this is adjusting the dps of the calling context hopefully + # the caller doesn't monkey around with it between calling + # this routine and calc_time_domain_solution() + self.dps_orig = self.ctx.dps + self.ctx.dps = self.dps_goal + + ttwo = 2 * self.t + tmp = self.ctx.dps * self.ctx.log(10) + self.ctx.log(ttwo) + tmp = self.ctx.fraction(2, 3) * tmp + self.alpha = self.ctx.convert(kwargs.get('alpha', tmp)) + + # all but time-dependent part of p + a_t = self.alpha / ttwo + p_t = self.ctx.pi * 1j / self.t + + self.p = self.ctx.matrix(M, 1) + self.p[0] = a_t + + for i in range(1, M): + self.p[i] = a_t + i * p_t + + def calc_time_domain_solution(self, fp, t, manual_prec=False): + r"""Calculate time-domain solution for Cohen algorithm. + + The accelerated nearly alternating series is: + + .. math :: + + f(t, M) = \frac{e^{\gamma / 2}}{t} \left[\frac{1}{2} + \Re\left(\bar{f}\left(\frac{\gamma}{2t}\right) \right) - + \sum_{k=0}^{M-1}\frac{c_{M,k}}{d_M}\Re\left(\bar{f} + \left(\frac{\gamma + 2(k+1) \pi i}{2t}\right)\right)\right], + + where coefficients `\frac{c_{M, k}}{d_M}` are described in [1]. + + 1. H. Cohen, F. Rodriguez Villegas, D. Zagier (2000). Convergence + acceleration of alternating series. *Experiment. Math* 9(1):3-12 + + """ + self.t = self.ctx.convert(t) + + n = self.degree + M = n + 1 + + A = self.ctx.matrix(M, 1) + for i in range(M): + A[i] = fp[i].real + + d = (3 + self.ctx.sqrt(8)) ** n + d = (d + 1 / d) / 2 + b = -self.ctx.one + c = -d + s = 0 + + for k in range(n): + c = b - c + s = s + c * A[k + 1] + b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one)) + + result = self.ctx.exp(self.alpha / 2) / self.t * (A[0] / 2 - s / d) + + # setting dps back to value when calc_laplace_parameter was + # called, unless flag is set. + if not manual_prec: + self.ctx.dps = self.dps_orig + + return result + + +# **************************************** + +class LaplaceTransformInversionMethods(object): + def __init__(ctx, *args, **kwargs): + ctx._fixed_talbot = FixedTalbot(ctx) + ctx._stehfest = Stehfest(ctx) + ctx._de_hoog = deHoog(ctx) + ctx._cohen = Cohen(ctx) + + def invertlaplace(ctx, f, t, **kwargs): + r"""Computes the numerical inverse Laplace transform for a + Laplace-space function at a given time. The function being + evaluated is assumed to be a real-valued function of time. + + The user must supply a Laplace-space function `\bar{f}(p)`, + and a desired time at which to estimate the time-domain + solution `f(t)`. + + A few basic examples of Laplace-space functions with known + inverses (see references [1,2]) : + + .. math :: + + \mathcal{L}\left\lbrace f(t) \right\rbrace=\bar{f}(p) + + .. math :: + + \mathcal{L}^{-1}\left\lbrace \bar{f}(p) \right\rbrace = f(t) + + .. math :: + + \bar{f}(p) = \frac{1}{(p+1)^2} + + .. math :: + + f(t) = t e^{-t} + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> tt = [0.001, 0.01, 0.1, 1, 10] + >>> fp = lambda p: 1/(p+1)**2 + >>> ft = lambda t: t*exp(-t) + >>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='talbot') + (0.000999000499833375, 8.57923043561212e-20) + >>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='talbot') + (0.00990049833749168, 3.27007646698047e-19) + >>> ft(tt[2]),ft(tt[2])-invertlaplace(fp,tt[2],method='talbot') + (0.090483741803596, -1.75215800052168e-18) + >>> ft(tt[3]),ft(tt[3])-invertlaplace(fp,tt[3],method='talbot') + (0.367879441171442, 1.2428864009344e-17) + >>> ft(tt[4]),ft(tt[4])-invertlaplace(fp,tt[4],method='talbot') + (0.000453999297624849, 4.04513489306658e-20) + + The methods also work for higher precision: + + >>> mp.dps = 100; mp.pretty = True + >>> nstr(ft(tt[0]),15),nstr(ft(tt[0])-invertlaplace(fp,tt[0],method='talbot'),15) + ('0.000999000499833375', '-4.96868310693356e-105') + >>> nstr(ft(tt[1]),15),nstr(ft(tt[1])-invertlaplace(fp,tt[1],method='talbot'),15) + ('0.00990049833749168', '1.23032291513122e-104') + + .. math :: + + \bar{f}(p) = \frac{1}{p^2+1} + + .. math :: + + f(t) = \mathrm{J}_0(t) + + >>> mp.dps = 15; mp.pretty = True + >>> fp = lambda p: 1/sqrt(p*p + 1) + >>> ft = lambda t: besselj(0,t) + >>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='dehoog') + (0.999999750000016, -6.09717765032273e-18) + >>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='dehoog') + (0.99997500015625, -5.61756281076169e-17) + + .. math :: + + \bar{f}(p) = \frac{\log p}{p} + + .. math :: + + f(t) = -\gamma -\log t + + >>> mp.dps = 15; mp.pretty = True + >>> fp = lambda p: log(p)/p + >>> ft = lambda t: -euler-log(t) + >>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='stehfest') + (6.3305396140806, -1.92126634837863e-16) + >>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='stehfest') + (4.02795452108656, -4.81486093200704e-16) + + **Options** + + :func:`~mpmath.invertlaplace` recognizes the following optional + keywords valid for all methods: + + *method* + Chooses numerical inverse Laplace transform algorithm + (described below). + *degree* + Number of terms used in the approximation + + **Algorithms** + + Mpmath implements four numerical inverse Laplace transform + algorithms, attributed to: Talbot, Stehfest, and de Hoog, + Knight and Stokes. These can be selected by using + *method='talbot'*, *method='stehfest'*, *method='dehoog'* or + *method='cohen'* or by passing the classes *method=FixedTalbot*, + *method=Stehfest*, *method=deHoog*, or *method=Cohen*. The functions + :func:`~mpmath.invlaptalbot`, :func:`~mpmath.invlapstehfest`, + :func:`~mpmath.invlapdehoog`, and :func:`~mpmath.invlapcohen` + are also available as shortcuts. + + All four algorithms implement a heuristic balance between the + requested precision and the precision used internally for the + calculations. This has been tuned for a typical exponentially + decaying function and precision up to few hundred decimal + digits. + + The Laplace transform converts the variable time (i.e., along + a line) into a parameter given by the right half of the + complex `p`-plane. Singularities, poles, and branch cuts in + the complex `p`-plane contain all the information regarding + the time behavior of the corresponding function. Any numerical + method must therefore sample `p`-plane "close enough" to the + singularities to accurately characterize them, while not + getting too close to have catastrophic cancellation, overflow, + or underflow issues. Most significantly, if one or more of the + singularities in the `p`-plane is not on the left side of the + Bromwich contour, its effects will be left out of the computed + solution, and the answer will be completely wrong. + + *Talbot* + + The fixed Talbot method is high accuracy and fast, but the + method can catastrophically fail for certain classes of time-domain + behavior, including a Heaviside step function for positive + time (e.g., `H(t-2)`), or some oscillatory behaviors. The + Talbot method usually has adjustable parameters, but the + "fixed" variety implemented here does not. This method + deforms the Bromwich integral contour in the shape of a + parabola towards `-\infty`, which leads to problems + when the solution has a decaying exponential in it (e.g., a + Heaviside step function is equivalent to multiplying by a + decaying exponential in Laplace space). + + *Stehfest* + + The Stehfest algorithm only uses abscissa along the real axis + of the complex `p`-plane to estimate the time-domain + function. Oscillatory time-domain functions have poles away + from the real axis, so this method does not work well with + oscillatory functions, especially high-frequency ones. This + method also depends on summation of terms in a series that + grows very large, and will have catastrophic cancellation + during summation if the working precision is too low. + + *de Hoog et al.* + + The de Hoog, Knight, and Stokes method is essentially a + Fourier-series quadrature-type approximation to the Bromwich + contour integral, with non-linear series acceleration and an + analytical expression for the remainder term. This method is + typically one of the most robust. This method also involves the + greatest amount of overhead, so it is typically the slowest of the + four methods at high precision. + + *Cohen* + + The Cohen method is a trapezoidal rule approximation to the Bromwich + contour integral, with linear acceleration for alternating + series. This method is as robust as the de Hoog et al method and the + fastest of the four methods at high precision, and is therefore the + default method. + + **Singularities** + + All numerical inverse Laplace transform methods have problems + at large time when the Laplace-space function has poles, + singularities, or branch cuts to the right of the origin in + the complex plane. For simple poles in `\bar{f}(p)` at the + `p`-plane origin, the time function is constant in time (e.g., + `\mathcal{L}\left\lbrace 1 \right\rbrace=1/p` has a pole at + `p=0`). A pole in `\bar{f}(p)` to the left of the origin is a + decreasing function of time (e.g., `\mathcal{L}\left\lbrace + e^{-t/2} \right\rbrace=1/(p+1/2)` has a pole at `p=-1/2`), and + a pole to the right of the origin leads to an increasing + function in time (e.g., `\mathcal{L}\left\lbrace t e^{t/4} + \right\rbrace = 1/(p-1/4)^2` has a pole at `p=1/4`). When + singularities occur off the real `p` axis, the time-domain + function is oscillatory. For example `\mathcal{L}\left\lbrace + \mathrm{J}_0(t) \right\rbrace=1/\sqrt{p^2+1}` has a branch cut + starting at `p=j=\sqrt{-1}` and is a decaying oscillatory + function, This range of behaviors is illustrated in Duffy [3] + Figure 4.10.4, p. 228. + + In general as `p \rightarrow \infty` `t \rightarrow 0` and + vice-versa. All numerical inverse Laplace transform methods + require their abscissa to shift closer to the origin for + larger times. If the abscissa shift left of the rightmost + singularity in the Laplace domain, the answer will be + completely wrong (the effect of singularities to the right of + the Bromwich contour are not included in the results). + + For example, the following exponentially growing function has + a pole at `p=3`: + + .. math :: + + \bar{f}(p)=\frac{1}{p^2-9} + + .. math :: + + f(t)=\frac{1}{3}\sinh 3t + + >>> mp.dps = 15; mp.pretty = True + >>> fp = lambda p: 1/(p*p-9) + >>> ft = lambda t: sinh(3*t)/3 + >>> tt = [0.01,0.1,1.0,10.0] + >>> ft(tt[0]),invertlaplace(fp,tt[0],method='talbot') + (0.0100015000675014, 0.0100015000675014) + >>> ft(tt[1]),invertlaplace(fp,tt[1],method='talbot') + (0.101506764482381, 0.101506764482381) + >>> ft(tt[2]),invertlaplace(fp,tt[2],method='talbot') + (3.33929164246997, 3.33929164246997) + >>> ft(tt[3]),invertlaplace(fp,tt[3],method='talbot') + (1781079096920.74, -1.61331069624091e-14) + + **References** + + 1. [DLMF]_ section 1.14 (http://dlmf.nist.gov/1.14T4) + 2. Cohen, A.M. (2007). Numerical Methods for Laplace Transform + Inversion, Springer. + 3. Duffy, D.G. (1998). Advanced Engineering Mathematics, CRC Press. + + **Numerical Inverse Laplace Transform Reviews** + + 1. Bellman, R., R.E. Kalaba, J.A. Lockett (1966). *Numerical + inversion of the Laplace transform: Applications to Biology, + Economics, Engineering, and Physics*. Elsevier. + 2. Davies, B., B. Martin (1979). Numerical inversion of the + Laplace transform: a survey and comparison of methods. *Journal + of Computational Physics* 33:1-32, + http://dx.doi.org/10.1016/0021-9991(79)90025-1 + 3. Duffy, D.G. (1993). On the numerical inversion of Laplace + transforms: Comparison of three new methods on characteristic + problems from applications. *ACM Transactions on Mathematical + Software* 19(3):333-359, http://dx.doi.org/10.1145/155743.155788 + 4. Kuhlman, K.L., (2013). Review of Inverse Laplace Transform + Algorithms for Laplace-Space Numerical Approaches, *Numerical + Algorithms*, 63(2):339-355. + http://dx.doi.org/10.1007/s11075-012-9625-3 + + """ + + rule = kwargs.get('method', 'cohen') + if type(rule) is str: + lrule = rule.lower() + if lrule == 'talbot': + rule = ctx._fixed_talbot + elif lrule == 'stehfest': + rule = ctx._stehfest + elif lrule == 'dehoog': + rule = ctx._de_hoog + elif rule == 'cohen': + rule = ctx._cohen + else: + raise ValueError("unknown invlap algorithm: %s" % rule) + else: + rule = rule(ctx) + + # determine the vector of Laplace-space parameter + # needed for the requested method and desired time + rule.calc_laplace_parameter(t, **kwargs) + + # compute the Laplace-space function evalutations + # at the required abscissa. + fp = [f(p) for p in rule.p] + + # compute the time-domain solution from the + # Laplace-space function evaluations + return rule.calc_time_domain_solution(fp, t) + + # shortcuts for the above function for specific methods + def invlaptalbot(ctx, *args, **kwargs): + kwargs['method'] = 'talbot' + return ctx.invertlaplace(*args, **kwargs) + + def invlapstehfest(ctx, *args, **kwargs): + kwargs['method'] = 'stehfest' + return ctx.invertlaplace(*args, **kwargs) + + def invlapdehoog(ctx, *args, **kwargs): + kwargs['method'] = 'dehoog' + return ctx.invertlaplace(*args, **kwargs) + + def invlapcohen(ctx, *args, **kwargs): + kwargs['method'] = 'cohen' + return ctx.invertlaplace(*args, **kwargs) + + +# **************************************** + +if __name__ == '__main__': + import doctest + doctest.testmod() diff --git a/MLPY/Lib/site-packages/mpmath/calculus/odes.py b/MLPY/Lib/site-packages/mpmath/calculus/odes.py new file mode 100644 index 0000000000000000000000000000000000000000..ac6bd3cb056c8841e6cf77ba4d08a0b1abb2f2c9 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/calculus/odes.py @@ -0,0 +1,288 @@ +from bisect import bisect +from ..libmp.backend import xrange + +class ODEMethods(object): + pass + +def ode_taylor(ctx, derivs, x0, y0, tol_prec, n): + h = tol = ctx.ldexp(1, -tol_prec) + dim = len(y0) + xs = [x0] + ys = [y0] + x = x0 + y = y0 + orig = ctx.prec + try: + ctx.prec = orig*(1+n) + # Use n steps with Euler's method to get + # evaluation points for derivatives + for i in range(n): + fxy = derivs(x, y) + y = [y[i]+h*fxy[i] for i in xrange(len(y))] + x += h + xs.append(x) + ys.append(y) + # Compute derivatives + ser = [[] for d in range(dim)] + for j in range(n+1): + s = [0]*dim + b = (-1) ** (j & 1) + k = 1 + for i in range(j+1): + for d in range(dim): + s[d] += b * ys[i][d] + b = (b * (j-k+1)) // (-k) + k += 1 + scale = h**(-j) / ctx.fac(j) + for d in range(dim): + s[d] = s[d] * scale + ser[d].append(s[d]) + finally: + ctx.prec = orig + # Estimate radius for which we can get full accuracy. + # XXX: do this right for zeros + radius = ctx.one + for ts in ser: + if ts[-1]: + radius = min(radius, ctx.nthroot(tol/abs(ts[-1]), n)) + radius /= 2 # XXX + return ser, x0+radius + +def odefun(ctx, F, x0, y0, tol=None, degree=None, method='taylor', verbose=False): + r""" + Returns a function `y(x) = [y_0(x), y_1(x), \ldots, y_n(x)]` + that is a numerical solution of the `n+1`-dimensional first-order + ordinary differential equation (ODE) system + + .. math :: + + y_0'(x) = F_0(x, [y_0(x), y_1(x), \ldots, y_n(x)]) + + y_1'(x) = F_1(x, [y_0(x), y_1(x), \ldots, y_n(x)]) + + \vdots + + y_n'(x) = F_n(x, [y_0(x), y_1(x), \ldots, y_n(x)]) + + The derivatives are specified by the vector-valued function + *F* that evaluates + `[y_0', \ldots, y_n'] = F(x, [y_0, \ldots, y_n])`. + The initial point `x_0` is specified by the scalar argument *x0*, + and the initial value `y(x_0) = [y_0(x_0), \ldots, y_n(x_0)]` is + specified by the vector argument *y0*. + + For convenience, if the system is one-dimensional, you may optionally + provide just a scalar value for *y0*. In this case, *F* should accept + a scalar *y* argument and return a scalar. The solution function + *y* will return scalar values instead of length-1 vectors. + + Evaluation of the solution function `y(x)` is permitted + for any `x \ge x_0`. + + A high-order ODE can be solved by transforming it into first-order + vector form. This transformation is described in standard texts + on ODEs. Examples will also be given below. + + **Options, speed and accuracy** + + By default, :func:`~mpmath.odefun` uses a high-order Taylor series + method. For reasonably well-behaved problems, the solution will + be fully accurate to within the working precision. Note that + *F* must be possible to evaluate to very high precision + for the generation of Taylor series to work. + + To get a faster but less accurate solution, you can set a large + value for *tol* (which defaults roughly to *eps*). If you just + want to plot the solution or perform a basic simulation, + *tol = 0.01* is likely sufficient. + + The *degree* argument controls the degree of the solver (with + *method='taylor'*, this is the degree of the Taylor series + expansion). A higher degree means that a longer step can be taken + before a new local solution must be generated from *F*, + meaning that fewer steps are required to get from `x_0` to a given + `x_1`. On the other hand, a higher degree also means that each + local solution becomes more expensive (i.e., more evaluations of + *F* are required per step, and at higher precision). + + The optimal setting therefore involves a tradeoff. Generally, + decreasing the *degree* for Taylor series is likely to give faster + solution at low precision, while increasing is likely to be better + at higher precision. + + The function + object returned by :func:`~mpmath.odefun` caches the solutions at all step + points and uses polynomial interpolation between step points. + Therefore, once `y(x_1)` has been evaluated for some `x_1`, + `y(x)` can be evaluated very quickly for any `x_0 \le x \le x_1`. + and continuing the evaluation up to `x_2 > x_1` is also fast. + + **Examples of first-order ODEs** + + We will solve the standard test problem `y'(x) = y(x), y(0) = 1` + which has explicit solution `y(x) = \exp(x)`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> f = odefun(lambda x, y: y, 0, 1) + >>> for x in [0, 1, 2.5]: + ... print((f(x), exp(x))) + ... + (1.0, 1.0) + (2.71828182845905, 2.71828182845905) + (12.1824939607035, 12.1824939607035) + + The solution with high precision:: + + >>> mp.dps = 50 + >>> f = odefun(lambda x, y: y, 0, 1) + >>> f(1) + 2.7182818284590452353602874713526624977572470937 + >>> exp(1) + 2.7182818284590452353602874713526624977572470937 + + Using the more general vectorized form, the test problem + can be input as (note that *f* returns a 1-element vector):: + + >>> mp.dps = 15 + >>> f = odefun(lambda x, y: [y[0]], 0, [1]) + >>> f(1) + [2.71828182845905] + + :func:`~mpmath.odefun` can solve nonlinear ODEs, which are generally + impossible (and at best difficult) to solve analytically. As + an example of a nonlinear ODE, we will solve `y'(x) = x \sin(y(x))` + for `y(0) = \pi/2`. An exact solution happens to be known + for this problem, and is given by + `y(x) = 2 \tan^{-1}\left(\exp\left(x^2/2\right)\right)`:: + + >>> f = odefun(lambda x, y: x*sin(y), 0, pi/2) + >>> for x in [2, 5, 10]: + ... print((f(x), 2*atan(exp(mpf(x)**2/2)))) + ... + (2.87255666284091, 2.87255666284091) + (3.14158520028345, 3.14158520028345) + (3.14159265358979, 3.14159265358979) + + If `F` is independent of `y`, an ODE can be solved using direct + integration. We can therefore obtain a reference solution with + :func:`~mpmath.quad`:: + + >>> f = lambda x: (1+x**2)/(1+x**3) + >>> g = odefun(lambda x, y: f(x), pi, 0) + >>> g(2*pi) + 0.72128263801696 + >>> quad(f, [pi, 2*pi]) + 0.72128263801696 + + **Examples of second-order ODEs** + + We will solve the harmonic oscillator equation `y''(x) + y(x) = 0`. + To do this, we introduce the helper functions `y_0 = y, y_1 = y_0'` + whereby the original equation can be written as `y_1' + y_0' = 0`. Put + together, we get the first-order, two-dimensional vector ODE + + .. math :: + + \begin{cases} + y_0' = y_1 \\ + y_1' = -y_0 + \end{cases} + + To get a well-defined IVP, we need two initial values. With + `y(0) = y_0(0) = 1` and `-y'(0) = y_1(0) = 0`, the problem will of + course be solved by `y(x) = y_0(x) = \cos(x)` and + `-y'(x) = y_1(x) = \sin(x)`. We check this:: + + >>> f = odefun(lambda x, y: [-y[1], y[0]], 0, [1, 0]) + >>> for x in [0, 1, 2.5, 10]: + ... nprint(f(x), 15) + ... nprint([cos(x), sin(x)], 15) + ... print("---") + ... + [1.0, 0.0] + [1.0, 0.0] + --- + [0.54030230586814, 0.841470984807897] + [0.54030230586814, 0.841470984807897] + --- + [-0.801143615546934, 0.598472144103957] + [-0.801143615546934, 0.598472144103957] + --- + [-0.839071529076452, -0.54402111088937] + [-0.839071529076452, -0.54402111088937] + --- + + Note that we get both the sine and the cosine solutions + simultaneously. + + **TODO** + + * Better automatic choice of degree and step size + * Make determination of Taylor series convergence radius + more robust + * Allow solution for `x < x_0` + * Allow solution for complex `x` + * Test for difficult (ill-conditioned) problems + * Implement Runge-Kutta and other algorithms + + """ + if tol: + tol_prec = int(-ctx.log(tol, 2))+10 + else: + tol_prec = ctx.prec+10 + degree = degree or (3 + int(3*ctx.dps/2.)) + workprec = ctx.prec + 40 + try: + len(y0) + return_vector = True + except TypeError: + F_ = F + F = lambda x, y: [F_(x, y[0])] + y0 = [y0] + return_vector = False + ser, xb = ode_taylor(ctx, F, x0, y0, tol_prec, degree) + series_boundaries = [x0, xb] + series_data = [(ser, x0, xb)] + # We will be working with vectors of Taylor series + def mpolyval(ser, a): + return [ctx.polyval(s[::-1], a) for s in ser] + # Find nearest expansion point; compute if necessary + def get_series(x): + if x < x0: + raise ValueError + n = bisect(series_boundaries, x) + if n < len(series_boundaries): + return series_data[n-1] + while 1: + ser, xa, xb = series_data[-1] + if verbose: + print("Computing Taylor series for [%f, %f]" % (xa, xb)) + y = mpolyval(ser, xb-xa) + xa = xb + ser, xb = ode_taylor(ctx, F, xb, y, tol_prec, degree) + series_boundaries.append(xb) + series_data.append((ser, xa, xb)) + if x <= xb: + return series_data[-1] + # Evaluation function + def interpolant(x): + x = ctx.convert(x) + orig = ctx.prec + try: + ctx.prec = workprec + ser, xa, xb = get_series(x) + y = mpolyval(ser, x-xa) + finally: + ctx.prec = orig + if return_vector: + return [+yk for yk in y] + else: + return +y[0] + return interpolant + +ODEMethods.odefun = odefun + +if __name__ == "__main__": + import doctest + doctest.testmod() diff --git a/MLPY/Lib/site-packages/mpmath/calculus/optimization.py b/MLPY/Lib/site-packages/mpmath/calculus/optimization.py new file mode 100644 index 0000000000000000000000000000000000000000..69724f78336ee24856366db48917a47108b1b416 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/calculus/optimization.py @@ -0,0 +1,1102 @@ +from __future__ import print_function + +from copy import copy + +from ..libmp.backend import xrange + +class OptimizationMethods(object): + def __init__(ctx): + pass + +############## +# 1D-SOLVERS # +############## + +class Newton: + """ + 1d-solver generating pairs of approximative root and error. + + Needs starting points x0 close to the root. + + Pro: + + * converges fast + * sometimes more robust than secant with bad second starting point + + Contra: + + * converges slowly for multiple roots + * needs first derivative + * 2 function evaluations per iteration + """ + maxsteps = 20 + + def __init__(self, ctx, f, x0, **kwargs): + self.ctx = ctx + if len(x0) == 1: + self.x0 = x0[0] + else: + raise ValueError('expected 1 starting point, got %i' % len(x0)) + self.f = f + if not 'df' in kwargs: + def df(x): + return self.ctx.diff(f, x) + else: + df = kwargs['df'] + self.df = df + + def __iter__(self): + f = self.f + df = self.df + x0 = self.x0 + while True: + x1 = x0 - f(x0) / df(x0) + error = abs(x1 - x0) + x0 = x1 + yield (x1, error) + +class Secant: + """ + 1d-solver generating pairs of approximative root and error. + + Needs starting points x0 and x1 close to the root. + x1 defaults to x0 + 0.25. + + Pro: + + * converges fast + + Contra: + + * converges slowly for multiple roots + """ + maxsteps = 30 + + def __init__(self, ctx, f, x0, **kwargs): + self.ctx = ctx + if len(x0) == 1: + self.x0 = x0[0] + self.x1 = self.x0 + 0.25 + elif len(x0) == 2: + self.x0 = x0[0] + self.x1 = x0[1] + else: + raise ValueError('expected 1 or 2 starting points, got %i' % len(x0)) + self.f = f + + def __iter__(self): + f = self.f + x0 = self.x0 + x1 = self.x1 + f0 = f(x0) + while True: + f1 = f(x1) + l = x1 - x0 + if not l: + break + s = (f1 - f0) / l + if not s: + break + x0, x1 = x1, x1 - f1/s + f0 = f1 + yield x1, abs(l) + +class MNewton: + """ + 1d-solver generating pairs of approximative root and error. + + Needs starting point x0 close to the root. + Uses modified Newton's method that converges fast regardless of the + multiplicity of the root. + + Pro: + + * converges fast for multiple roots + + Contra: + + * needs first and second derivative of f + * 3 function evaluations per iteration + """ + maxsteps = 20 + + def __init__(self, ctx, f, x0, **kwargs): + self.ctx = ctx + if not len(x0) == 1: + raise ValueError('expected 1 starting point, got %i' % len(x0)) + self.x0 = x0[0] + self.f = f + if not 'df' in kwargs: + def df(x): + return self.ctx.diff(f, x) + else: + df = kwargs['df'] + self.df = df + if not 'd2f' in kwargs: + def d2f(x): + return self.ctx.diff(df, x) + else: + d2f = kwargs['df'] + self.d2f = d2f + + def __iter__(self): + x = self.x0 + f = self.f + df = self.df + d2f = self.d2f + while True: + prevx = x + fx = f(x) + if fx == 0: + break + dfx = df(x) + d2fx = d2f(x) + # x = x - F(x)/F'(x) with F(x) = f(x)/f'(x) + x -= fx / (dfx - fx * d2fx / dfx) + error = abs(x - prevx) + yield x, error + +class Halley: + """ + 1d-solver generating pairs of approximative root and error. + + Needs a starting point x0 close to the root. + Uses Halley's method with cubic convergence rate. + + Pro: + + * converges even faster the Newton's method + * useful when computing with *many* digits + + Contra: + + * needs first and second derivative of f + * 3 function evaluations per iteration + * converges slowly for multiple roots + """ + + maxsteps = 20 + + def __init__(self, ctx, f, x0, **kwargs): + self.ctx = ctx + if not len(x0) == 1: + raise ValueError('expected 1 starting point, got %i' % len(x0)) + self.x0 = x0[0] + self.f = f + if not 'df' in kwargs: + def df(x): + return self.ctx.diff(f, x) + else: + df = kwargs['df'] + self.df = df + if not 'd2f' in kwargs: + def d2f(x): + return self.ctx.diff(df, x) + else: + d2f = kwargs['df'] + self.d2f = d2f + + def __iter__(self): + x = self.x0 + f = self.f + df = self.df + d2f = self.d2f + while True: + prevx = x + fx = f(x) + dfx = df(x) + d2fx = d2f(x) + x -= 2*fx*dfx / (2*dfx**2 - fx*d2fx) + error = abs(x - prevx) + yield x, error + +class Muller: + """ + 1d-solver generating pairs of approximative root and error. + + Needs starting points x0, x1 and x2 close to the root. + x1 defaults to x0 + 0.25; x2 to x1 + 0.25. + Uses Muller's method that converges towards complex roots. + + Pro: + + * converges fast (somewhat faster than secant) + * can find complex roots + + Contra: + + * converges slowly for multiple roots + * may have complex values for real starting points and real roots + + http://en.wikipedia.org/wiki/Muller's_method + """ + maxsteps = 30 + + def __init__(self, ctx, f, x0, **kwargs): + self.ctx = ctx + if len(x0) == 1: + self.x0 = x0[0] + self.x1 = self.x0 + 0.25 + self.x2 = self.x1 + 0.25 + elif len(x0) == 2: + self.x0 = x0[0] + self.x1 = x0[1] + self.x2 = self.x1 + 0.25 + elif len(x0) == 3: + self.x0 = x0[0] + self.x1 = x0[1] + self.x2 = x0[2] + else: + raise ValueError('expected 1, 2 or 3 starting points, got %i' + % len(x0)) + self.f = f + self.verbose = kwargs['verbose'] + + def __iter__(self): + f = self.f + x0 = self.x0 + x1 = self.x1 + x2 = self.x2 + fx0 = f(x0) + fx1 = f(x1) + fx2 = f(x2) + while True: + # TODO: maybe refactoring with function for divided differences + # calculate divided differences + fx2x1 = (fx1 - fx2) / (x1 - x2) + fx2x0 = (fx0 - fx2) / (x0 - x2) + fx1x0 = (fx0 - fx1) / (x0 - x1) + w = fx2x1 + fx2x0 - fx1x0 + fx2x1x0 = (fx1x0 - fx2x1) / (x0 - x2) + if w == 0 and fx2x1x0 == 0: + if self.verbose: + print('canceled with') + print('x0 =', x0, ', x1 =', x1, 'and x2 =', x2) + break + x0 = x1 + fx0 = fx1 + x1 = x2 + fx1 = fx2 + # denominator should be as large as possible => choose sign + r = self.ctx.sqrt(w**2 - 4*fx2*fx2x1x0) + if abs(w - r) > abs(w + r): + r = -r + x2 -= 2*fx2 / (w + r) + fx2 = f(x2) + error = abs(x2 - x1) + yield x2, error + +# TODO: consider raising a ValueError when there's no sign change in a and b +class Bisection: + """ + 1d-solver generating pairs of approximative root and error. + + Uses bisection method to find a root of f in [a, b]. + Might fail for multiple roots (needs sign change). + + Pro: + + * robust and reliable + + Contra: + + * converges slowly + * needs sign change + """ + maxsteps = 100 + + def __init__(self, ctx, f, x0, **kwargs): + self.ctx = ctx + if len(x0) != 2: + raise ValueError('expected interval of 2 points, got %i' % len(x0)) + self.f = f + self.a = x0[0] + self.b = x0[1] + + def __iter__(self): + f = self.f + a = self.a + b = self.b + l = b - a + fb = f(b) + while True: + m = self.ctx.ldexp(a + b, -1) + fm = f(m) + sign = fm * fb + if sign < 0: + a = m + elif sign > 0: + b = m + fb = fm + else: + yield m, self.ctx.zero + l /= 2 + yield (a + b)/2, abs(l) + +def _getm(method): + """ + Return a function to calculate m for Illinois-like methods. + """ + if method == 'illinois': + def getm(fz, fb): + return 0.5 + elif method == 'pegasus': + def getm(fz, fb): + return fb/(fb + fz) + elif method == 'anderson': + def getm(fz, fb): + m = 1 - fz/fb + if m > 0: + return m + else: + return 0.5 + else: + raise ValueError("method '%s' not recognized" % method) + return getm + +class Illinois: + """ + 1d-solver generating pairs of approximative root and error. + + Uses Illinois method or similar to find a root of f in [a, b]. + Might fail for multiple roots (needs sign change). + Combines bisect with secant (improved regula falsi). + + The only difference between the methods is the scaling factor m, which is + used to ensure convergence (you can choose one using the 'method' keyword): + + Illinois method ('illinois'): + m = 0.5 + + Pegasus method ('pegasus'): + m = fb/(fb + fz) + + Anderson-Bjoerk method ('anderson'): + m = 1 - fz/fb if positive else 0.5 + + Pro: + + * converges very fast + + Contra: + + * has problems with multiple roots + * needs sign change + """ + maxsteps = 30 + + def __init__(self, ctx, f, x0, **kwargs): + self.ctx = ctx + if len(x0) != 2: + raise ValueError('expected interval of 2 points, got %i' % len(x0)) + self.a = x0[0] + self.b = x0[1] + self.f = f + self.tol = kwargs['tol'] + self.verbose = kwargs['verbose'] + self.method = kwargs.get('method', 'illinois') + self.getm = _getm(self.method) + if self.verbose: + print('using %s method' % self.method) + + def __iter__(self): + method = self.method + f = self.f + a = self.a + b = self.b + fa = f(a) + fb = f(b) + m = None + while True: + l = b - a + if l == 0: + break + s = (fb - fa) / l + z = a - fa/s + fz = f(z) + if abs(fz) < self.tol: + # TODO: better condition (when f is very flat) + if self.verbose: + print('canceled with z =', z) + yield z, l + break + if fz * fb < 0: # root in [z, b] + a = b + fa = fb + b = z + fb = fz + else: # root in [a, z] + m = self.getm(fz, fb) + b = z + fb = fz + fa = m*fa # scale down to ensure convergence + if self.verbose and m and not method == 'illinois': + print('m:', m) + yield (a + b)/2, abs(l) + +def Pegasus(*args, **kwargs): + """ + 1d-solver generating pairs of approximative root and error. + + Uses Pegasus method to find a root of f in [a, b]. + Wrapper for illinois to use method='pegasus'. + """ + kwargs['method'] = 'pegasus' + return Illinois(*args, **kwargs) + +def Anderson(*args, **kwargs): + """ + 1d-solver generating pairs of approximative root and error. + + Uses Anderson-Bjoerk method to find a root of f in [a, b]. + Wrapper for illinois to use method='pegasus'. + """ + kwargs['method'] = 'anderson' + return Illinois(*args, **kwargs) + +# TODO: check whether it's possible to combine it with Illinois stuff +class Ridder: + """ + 1d-solver generating pairs of approximative root and error. + + Ridders' method to find a root of f in [a, b]. + Is told to perform as well as Brent's method while being simpler. + + Pro: + + * very fast + * simpler than Brent's method + + Contra: + + * two function evaluations per step + * has problems with multiple roots + * needs sign change + + http://en.wikipedia.org/wiki/Ridders'_method + """ + maxsteps = 30 + + def __init__(self, ctx, f, x0, **kwargs): + self.ctx = ctx + self.f = f + if len(x0) != 2: + raise ValueError('expected interval of 2 points, got %i' % len(x0)) + self.x1 = x0[0] + self.x2 = x0[1] + self.verbose = kwargs['verbose'] + self.tol = kwargs['tol'] + + def __iter__(self): + ctx = self.ctx + f = self.f + x1 = self.x1 + fx1 = f(x1) + x2 = self.x2 + fx2 = f(x2) + while True: + x3 = 0.5*(x1 + x2) + fx3 = f(x3) + x4 = x3 + (x3 - x1) * ctx.sign(fx1 - fx2) * fx3 / ctx.sqrt(fx3**2 - fx1*fx2) + fx4 = f(x4) + if abs(fx4) < self.tol: + # TODO: better condition (when f is very flat) + if self.verbose: + print('canceled with f(x4) =', fx4) + yield x4, abs(x1 - x2) + break + if fx4 * fx2 < 0: # root in [x4, x2] + x1 = x4 + fx1 = fx4 + else: # root in [x1, x4] + x2 = x4 + fx2 = fx4 + error = abs(x1 - x2) + yield (x1 + x2)/2, error + +class ANewton: + """ + EXPERIMENTAL 1d-solver generating pairs of approximative root and error. + + Uses Newton's method modified to use Steffensens method when convergence is + slow. (I.e. for multiple roots.) + """ + maxsteps = 20 + + def __init__(self, ctx, f, x0, **kwargs): + self.ctx = ctx + if not len(x0) == 1: + raise ValueError('expected 1 starting point, got %i' % len(x0)) + self.x0 = x0[0] + self.f = f + if not 'df' in kwargs: + def df(x): + return self.ctx.diff(f, x) + else: + df = kwargs['df'] + self.df = df + def phi(x): + return x - f(x) / df(x) + self.phi = phi + self.verbose = kwargs['verbose'] + + def __iter__(self): + x0 = self.x0 + f = self.f + df = self.df + phi = self.phi + error = 0 + counter = 0 + while True: + prevx = x0 + try: + x0 = phi(x0) + except ZeroDivisionError: + if self.verbose: + print('ZeroDivisionError: canceled with x =', x0) + break + preverror = error + error = abs(prevx - x0) + # TODO: decide not to use convergence acceleration + if error and abs(error - preverror) / error < 1: + if self.verbose: + print('converging slowly') + counter += 1 + if counter >= 3: + # accelerate convergence + phi = steffensen(phi) + counter = 0 + if self.verbose: + print('accelerating convergence') + yield x0, error + +# TODO: add Brent + +############################ +# MULTIDIMENSIONAL SOLVERS # +############################ + +def jacobian(ctx, f, x): + """ + Calculate the Jacobian matrix of a function at the point x0. + + This is the first derivative of a vectorial function: + + f : R^m -> R^n with m >= n + """ + x = ctx.matrix(x) + h = ctx.sqrt(ctx.eps) + fx = ctx.matrix(f(*x)) + m = len(fx) + n = len(x) + J = ctx.matrix(m, n) + for j in xrange(n): + xj = x.copy() + xj[j] += h + Jj = (ctx.matrix(f(*xj)) - fx) / h + for i in xrange(m): + J[i,j] = Jj[i] + return J + +# TODO: test with user-specified jacobian matrix +class MDNewton: + """ + Find the root of a vector function numerically using Newton's method. + + f is a vector function representing a nonlinear equation system. + + x0 is the starting point close to the root. + + J is a function returning the Jacobian matrix for a point. + + Supports overdetermined systems. + + Use the 'norm' keyword to specify which norm to use. Defaults to max-norm. + The function to calculate the Jacobian matrix can be given using the + keyword 'J'. Otherwise it will be calculated numerically. + + Please note that this method converges only locally. Especially for high- + dimensional systems it is not trivial to find a good starting point being + close enough to the root. + + It is recommended to use a faster, low-precision solver from SciPy [1] or + OpenOpt [2] to get an initial guess. Afterwards you can use this method for + root-polishing to any precision. + + [1] http://scipy.org + + [2] http://openopt.org/Welcome + """ + maxsteps = 10 + + def __init__(self, ctx, f, x0, **kwargs): + self.ctx = ctx + self.f = f + if isinstance(x0, (tuple, list)): + x0 = ctx.matrix(x0) + assert x0.cols == 1, 'need a vector' + self.x0 = x0 + if 'J' in kwargs: + self.J = kwargs['J'] + else: + def J(*x): + return ctx.jacobian(f, x) + self.J = J + self.norm = kwargs['norm'] + self.verbose = kwargs['verbose'] + + def __iter__(self): + f = self.f + x0 = self.x0 + norm = self.norm + J = self.J + fx = self.ctx.matrix(f(*x0)) + fxnorm = norm(fx) + cancel = False + while not cancel: + # get direction of descent + fxn = -fx + Jx = J(*x0) + s = self.ctx.lu_solve(Jx, fxn) + if self.verbose: + print('Jx:') + print(Jx) + print('s:', s) + # damping step size TODO: better strategy (hard task) + l = self.ctx.one + x1 = x0 + s + while True: + if x1 == x0: + if self.verbose: + print("canceled, won't get more excact") + cancel = True + break + fx = self.ctx.matrix(f(*x1)) + newnorm = norm(fx) + if newnorm < fxnorm: + # new x accepted + fxnorm = newnorm + x0 = x1 + break + l /= 2 + x1 = x0 + l*s + yield (x0, fxnorm) + +############# +# UTILITIES # +############# + +str2solver = {'newton':Newton, 'secant':Secant, 'mnewton':MNewton, + 'halley':Halley, 'muller':Muller, 'bisect':Bisection, + 'illinois':Illinois, 'pegasus':Pegasus, 'anderson':Anderson, + 'ridder':Ridder, 'anewton':ANewton, 'mdnewton':MDNewton} + +def findroot(ctx, f, x0, solver='secant', tol=None, verbose=False, verify=True, **kwargs): + r""" + Find an approximate solution to `f(x) = 0`, using *x0* as starting point or + interval for *x*. + + Multidimensional overdetermined systems are supported. + You can specify them using a function or a list of functions. + + Mathematically speaking, this function returns `x` such that + `|f(x)|^2 \leq \mathrm{tol}` is true within the current working precision. + If the computed value does not meet this criterion, an exception is raised. + This exception can be disabled with *verify=False*. + + For interval arithmetic (``iv.findroot()``), please note that + the returned interval ``x`` is not guaranteed to contain `f(x)=0`! + It is only some `x` for which `|f(x)|^2 \leq \mathrm{tol}` certainly holds + regardless of numerical error. This may be improved in the future. + + **Arguments** + + *f* + one dimensional function + *x0* + starting point, several starting points or interval (depends on solver) + *tol* + the returned solution has an error smaller than this + *verbose* + print additional information for each iteration if true + *verify* + verify the solution and raise a ValueError if `|f(x)|^2 > \mathrm{tol}` + *solver* + a generator for *f* and *x0* returning approximative solution and error + *maxsteps* + after how many steps the solver will cancel + *df* + first derivative of *f* (used by some solvers) + *d2f* + second derivative of *f* (used by some solvers) + *multidimensional* + force multidimensional solving + *J* + Jacobian matrix of *f* (used by multidimensional solvers) + *norm* + used vector norm (used by multidimensional solvers) + + solver has to be callable with ``(f, x0, **kwargs)`` and return an generator + yielding pairs of approximative solution and estimated error (which is + expected to be positive). + You can use the following string aliases: + 'secant', 'mnewton', 'halley', 'muller', 'illinois', 'pegasus', 'anderson', + 'ridder', 'anewton', 'bisect' + + See mpmath.calculus.optimization for their documentation. + + **Examples** + + The function :func:`~mpmath.findroot` locates a root of a given function using the + secant method by default. A simple example use of the secant method is to + compute `\pi` as the root of `\sin x` closest to `x_0 = 3`:: + + >>> from mpmath import * + >>> mp.dps = 30; mp.pretty = True + >>> findroot(sin, 3) + 3.14159265358979323846264338328 + + The secant method can be used to find complex roots of analytic functions, + although it must in that case generally be given a nonreal starting value + (or else it will never leave the real line):: + + >>> mp.dps = 15 + >>> findroot(lambda x: x**3 + 2*x + 1, j) + (0.226698825758202 + 1.46771150871022j) + + A nice application is to compute nontrivial roots of the Riemann zeta + function with many digits (good initial values are needed for convergence):: + + >>> mp.dps = 30 + >>> findroot(zeta, 0.5+14j) + (0.5 + 14.1347251417346937904572519836j) + + The secant method can also be used as an optimization algorithm, by passing + it a derivative of a function. The following example locates the positive + minimum of the gamma function:: + + >>> mp.dps = 20 + >>> findroot(lambda x: diff(gamma, x), 1) + 1.4616321449683623413 + + Finally, a useful application is to compute inverse functions, such as the + Lambert W function which is the inverse of `w e^w`, given the first + term of the solution's asymptotic expansion as the initial value. In basic + cases, this gives identical results to mpmath's built-in ``lambertw`` + function:: + + >>> def lambert(x): + ... return findroot(lambda w: w*exp(w) - x, log(1+x)) + ... + >>> mp.dps = 15 + >>> lambert(1); lambertw(1) + 0.567143290409784 + 0.567143290409784 + >>> lambert(1000); lambert(1000) + 5.2496028524016 + 5.2496028524016 + + Multidimensional functions are also supported:: + + >>> f = [lambda x1, x2: x1**2 + x2, + ... lambda x1, x2: 5*x1**2 - 3*x1 + 2*x2 - 3] + >>> findroot(f, (0, 0)) + [-0.618033988749895] + [-0.381966011250105] + >>> findroot(f, (10, 10)) + [ 1.61803398874989] + [-2.61803398874989] + + You can verify this by solving the system manually. + + Please note that the following (more general) syntax also works:: + + >>> def f(x1, x2): + ... return x1**2 + x2, 5*x1**2 - 3*x1 + 2*x2 - 3 + ... + >>> findroot(f, (0, 0)) + [-0.618033988749895] + [-0.381966011250105] + + + **Multiple roots** + + For multiple roots all methods of the Newtonian family (including secant) + converge slowly. Consider this example:: + + >>> f = lambda x: (x - 1)**99 + >>> findroot(f, 0.9, verify=False) + 0.918073542444929 + + Even for a very close starting point the secant method converges very + slowly. Use ``verbose=True`` to illustrate this. + + It is possible to modify Newton's method to make it converge regardless of + the root's multiplicity:: + + >>> findroot(f, -10, solver='mnewton') + 1.0 + + This variant uses the first and second derivative of the function, which is + not very efficient. + + Alternatively you can use an experimental Newtonian solver that keeps track + of the speed of convergence and accelerates it using Steffensen's method if + necessary:: + + >>> findroot(f, -10, solver='anewton', verbose=True) + x: -9.88888888888888888889 + error: 0.111111111111111111111 + converging slowly + x: -9.77890011223344556678 + error: 0.10998877665544332211 + converging slowly + x: -9.67002233332199662166 + error: 0.108877778911448945119 + converging slowly + accelerating convergence + x: -9.5622443299551077669 + error: 0.107778003366888854764 + converging slowly + x: 0.99999999999999999214 + error: 10.562244329955107759 + x: 1.0 + error: 7.8598304758094664213e-18 + ZeroDivisionError: canceled with x = 1.0 + 1.0 + + **Complex roots** + + For complex roots it's recommended to use Muller's method as it converges + even for real starting points very fast:: + + >>> findroot(lambda x: x**4 + x + 1, (0, 1, 2), solver='muller') + (0.727136084491197 + 0.934099289460529j) + + + **Intersection methods** + + When you need to find a root in a known interval, it's highly recommended to + use an intersection-based solver like ``'anderson'`` or ``'ridder'``. + Usually they converge faster and more reliable. They have however problems + with multiple roots and usually need a sign change to find a root:: + + >>> findroot(lambda x: x**3, (-1, 1), solver='anderson') + 0.0 + + Be careful with symmetric functions:: + + >>> findroot(lambda x: x**2, (-1, 1), solver='anderson') #doctest:+ELLIPSIS + Traceback (most recent call last): + ... + ZeroDivisionError + + It fails even for better starting points, because there is no sign change:: + + >>> findroot(lambda x: x**2, (-1, .5), solver='anderson') + Traceback (most recent call last): + ... + ValueError: Could not find root within given tolerance. (1.0 > 2.16840434497100886801e-19) + Try another starting point or tweak arguments. + + """ + prec = ctx.prec + try: + ctx.prec += 20 + + # initialize arguments + if tol is None: + tol = ctx.eps * 2**10 + + kwargs['verbose'] = kwargs.get('verbose', verbose) + + if 'd1f' in kwargs: + kwargs['df'] = kwargs['d1f'] + + kwargs['tol'] = tol + if isinstance(x0, (list, tuple)): + x0 = [ctx.convert(x) for x in x0] + else: + x0 = [ctx.convert(x0)] + + if isinstance(solver, str): + try: + solver = str2solver[solver] + except KeyError: + raise ValueError('could not recognize solver') + + # accept list of functions + if isinstance(f, (list, tuple)): + f2 = copy(f) + def tmp(*args): + return [fn(*args) for fn in f2] + f = tmp + + # detect multidimensional functions + try: + fx = f(*x0) + multidimensional = isinstance(fx, (list, tuple, ctx.matrix)) + except TypeError: + fx = f(x0[0]) + multidimensional = False + if 'multidimensional' in kwargs: + multidimensional = kwargs['multidimensional'] + if multidimensional: + # only one multidimensional solver available at the moment + solver = MDNewton + if not 'norm' in kwargs: + norm = lambda x: ctx.norm(x, 'inf') + kwargs['norm'] = norm + else: + norm = kwargs['norm'] + else: + norm = abs + + # happily return starting point if it's a root + if norm(fx) == 0: + if multidimensional: + return ctx.matrix(x0) + else: + return x0[0] + + # use solver + iterations = solver(ctx, f, x0, **kwargs) + if 'maxsteps' in kwargs: + maxsteps = kwargs['maxsteps'] + else: + maxsteps = iterations.maxsteps + i = 0 + for x, error in iterations: + if verbose: + print('x: ', x) + print('error:', error) + i += 1 + if error < tol * max(1, norm(x)) or i >= maxsteps: + break + else: + if not i: + raise ValueError('Could not find root using the given solver.\n' + 'Try another starting point or tweak arguments.') + if not isinstance(x, (list, tuple, ctx.matrix)): + xl = [x] + else: + xl = x + if verify and norm(f(*xl))**2 > tol: # TODO: better condition? + raise ValueError('Could not find root within given tolerance. ' + '(%s > %s)\n' + 'Try another starting point or tweak arguments.' + % (norm(f(*xl))**2, tol)) + return x + finally: + ctx.prec = prec + + +def multiplicity(ctx, f, root, tol=None, maxsteps=10, **kwargs): + """ + Return the multiplicity of a given root of f. + + Internally, numerical derivatives are used. This might be inefficient for + higher order derviatives. Due to this, ``multiplicity`` cancels after + evaluating 10 derivatives by default. You can be specify the n-th derivative + using the dnf keyword. + + >>> from mpmath import * + >>> multiplicity(lambda x: sin(x) - 1, pi/2) + 2 + + """ + if tol is None: + tol = ctx.eps ** 0.8 + kwargs['d0f'] = f + for i in xrange(maxsteps): + dfstr = 'd' + str(i) + 'f' + if dfstr in kwargs: + df = kwargs[dfstr] + else: + df = lambda x: ctx.diff(f, x, i) + if not abs(df(root)) < tol: + break + return i + +def steffensen(f): + """ + linear convergent function -> quadratic convergent function + + Steffensen's method for quadratic convergence of a linear converging + sequence. + Don not use it for higher rates of convergence. + It may even work for divergent sequences. + + Definition: + F(x) = (x*f(f(x)) - f(x)**2) / (f(f(x)) - 2*f(x) + x) + + Example + ....... + + You can use Steffensen's method to accelerate a fixpoint iteration of linear + (or less) convergence. + + x* is a fixpoint of the iteration x_{k+1} = phi(x_k) if x* = phi(x*). For + phi(x) = x**2 there are two fixpoints: 0 and 1. + + Let's try Steffensen's method: + + >>> f = lambda x: x**2 + >>> from mpmath.calculus.optimization import steffensen + >>> F = steffensen(f) + >>> for x in [0.5, 0.9, 2.0]: + ... fx = Fx = x + ... for i in xrange(9): + ... try: + ... fx = f(fx) + ... except OverflowError: + ... pass + ... try: + ... Fx = F(Fx) + ... except ZeroDivisionError: + ... pass + ... print('%20g %20g' % (fx, Fx)) + 0.25 -0.5 + 0.0625 0.1 + 0.00390625 -0.0011236 + 1.52588e-05 1.41691e-09 + 2.32831e-10 -2.84465e-27 + 5.42101e-20 2.30189e-80 + 2.93874e-39 -1.2197e-239 + 8.63617e-78 0 + 7.45834e-155 0 + 0.81 1.02676 + 0.6561 1.00134 + 0.430467 1 + 0.185302 1 + 0.0343368 1 + 0.00117902 1 + 1.39008e-06 1 + 1.93233e-12 1 + 3.73392e-24 1 + 4 1.6 + 16 1.2962 + 256 1.10194 + 65536 1.01659 + 4.29497e+09 1.00053 + 1.84467e+19 1 + 3.40282e+38 1 + 1.15792e+77 1 + 1.34078e+154 1 + + Unmodified, the iteration converges only towards 0. Modified it converges + not only much faster, it converges even to the repelling fixpoint 1. + """ + def F(x): + fx = f(x) + ffx = f(fx) + return (x*ffx - fx**2) / (ffx - 2*fx + x) + return F + +OptimizationMethods.jacobian = jacobian +OptimizationMethods.findroot = findroot +OptimizationMethods.multiplicity = multiplicity + +if __name__ == '__main__': + import doctest + doctest.testmod() diff --git a/MLPY/Lib/site-packages/mpmath/calculus/polynomials.py b/MLPY/Lib/site-packages/mpmath/calculus/polynomials.py new file mode 100644 index 0000000000000000000000000000000000000000..ba75c1e88cbc5d40aa590a786c0af5229f193103 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/calculus/polynomials.py @@ -0,0 +1,213 @@ +from ..libmp.backend import xrange +from .calculus import defun + +#----------------------------------------------------------------------------# +# Polynomials # +#----------------------------------------------------------------------------# + +# XXX: extra precision +@defun +def polyval(ctx, coeffs, x, derivative=False): + r""" + Given coefficients `[c_n, \ldots, c_2, c_1, c_0]` and a number `x`, + :func:`~mpmath.polyval` evaluates the polynomial + + .. math :: + + P(x) = c_n x^n + \ldots + c_2 x^2 + c_1 x + c_0. + + If *derivative=True* is set, :func:`~mpmath.polyval` simultaneously + evaluates `P(x)` with the derivative, `P'(x)`, and returns the + tuple `(P(x), P'(x))`. + + >>> from mpmath import * + >>> mp.pretty = True + >>> polyval([3, 0, 2], 0.5) + 2.75 + >>> polyval([3, 0, 2], 0.5, derivative=True) + (2.75, 3.0) + + The coefficients and the evaluation point may be any combination + of real or complex numbers. + """ + if not coeffs: + return ctx.zero + p = ctx.convert(coeffs[0]) + q = ctx.zero + for c in coeffs[1:]: + if derivative: + q = p + x*q + p = c + x*p + if derivative: + return p, q + else: + return p + +@defun +def polyroots(ctx, coeffs, maxsteps=50, cleanup=True, extraprec=10, + error=False, roots_init=None): + """ + Computes all roots (real or complex) of a given polynomial. + + The roots are returned as a sorted list, where real roots appear first + followed by complex conjugate roots as adjacent elements. The polynomial + should be given as a list of coefficients, in the format used by + :func:`~mpmath.polyval`. The leading coefficient must be nonzero. + + With *error=True*, :func:`~mpmath.polyroots` returns a tuple *(roots, err)* + where *err* is an estimate of the maximum error among the computed roots. + + **Examples** + + Finding the three real roots of `x^3 - x^2 - 14x + 24`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> nprint(polyroots([1,-1,-14,24]), 4) + [-4.0, 2.0, 3.0] + + Finding the two complex conjugate roots of `4x^2 + 3x + 2`, with an + error estimate:: + + >>> roots, err = polyroots([4,3,2], error=True) + >>> for r in roots: + ... print(r) + ... + (-0.375 + 0.59947894041409j) + (-0.375 - 0.59947894041409j) + >>> + >>> err + 2.22044604925031e-16 + >>> + >>> polyval([4,3,2], roots[0]) + (2.22044604925031e-16 + 0.0j) + >>> polyval([4,3,2], roots[1]) + (2.22044604925031e-16 + 0.0j) + + The following example computes all the 5th roots of unity; that is, + the roots of `x^5 - 1`:: + + >>> mp.dps = 20 + >>> for r in polyroots([1, 0, 0, 0, 0, -1]): + ... print(r) + ... + 1.0 + (-0.8090169943749474241 + 0.58778525229247312917j) + (-0.8090169943749474241 - 0.58778525229247312917j) + (0.3090169943749474241 + 0.95105651629515357212j) + (0.3090169943749474241 - 0.95105651629515357212j) + + **Precision and conditioning** + + The roots are computed to the current working precision accuracy. If this + accuracy cannot be achieved in ``maxsteps`` steps, then a + ``NoConvergence`` exception is raised. The algorithm internally is using + the current working precision extended by ``extraprec``. If + ``NoConvergence`` was raised, that is caused either by not having enough + extra precision to achieve convergence (in which case increasing + ``extraprec`` should fix the problem) or too low ``maxsteps`` (in which + case increasing ``maxsteps`` should fix the problem), or a combination of + both. + + The user should always do a convergence study with regards to + ``extraprec`` to ensure accurate results. It is possible to get + convergence to a wrong answer with too low ``extraprec``. + + Provided there are no repeated roots, :func:`~mpmath.polyroots` can + typically compute all roots of an arbitrary polynomial to high precision:: + + >>> mp.dps = 60 + >>> for r in polyroots([1, 0, -10, 0, 1]): + ... print(r) + ... + -3.14626436994197234232913506571557044551247712918732870123249 + -0.317837245195782244725757617296174288373133378433432554879127 + 0.317837245195782244725757617296174288373133378433432554879127 + 3.14626436994197234232913506571557044551247712918732870123249 + >>> + >>> sqrt(3) + sqrt(2) + 3.14626436994197234232913506571557044551247712918732870123249 + >>> sqrt(3) - sqrt(2) + 0.317837245195782244725757617296174288373133378433432554879127 + + **Algorithm** + + :func:`~mpmath.polyroots` implements the Durand-Kerner method [1], which + uses complex arithmetic to locate all roots simultaneously. + The Durand-Kerner method can be viewed as approximately performing + simultaneous Newton iteration for all the roots. In particular, + the convergence to simple roots is quadratic, just like Newton's + method. + + Although all roots are internally calculated using complex arithmetic, any + root found to have an imaginary part smaller than the estimated numerical + error is truncated to a real number (small real parts are also chopped). + Real roots are placed first in the returned list, sorted by value. The + remaining complex roots are sorted by their real parts so that conjugate + roots end up next to each other. + + **References** + + 1. http://en.wikipedia.org/wiki/Durand-Kerner_method + + """ + if len(coeffs) <= 1: + if not coeffs or not coeffs[0]: + raise ValueError("Input to polyroots must not be the zero polynomial") + # Constant polynomial with no roots + return [] + + orig = ctx.prec + tol = +ctx.eps + with ctx.extraprec(extraprec): + deg = len(coeffs) - 1 + # Must be monic + lead = ctx.convert(coeffs[0]) + if lead == 1: + coeffs = [ctx.convert(c) for c in coeffs] + else: + coeffs = [c/lead for c in coeffs] + f = lambda x: ctx.polyval(coeffs, x) + if roots_init is None: + roots = [ctx.mpc((0.4+0.9j)**n) for n in xrange(deg)] + else: + roots = [None]*deg; + deg_init = min(deg, len(roots_init)) + roots[:deg_init] = list(roots_init[:deg_init]) + roots[deg_init:] = [ctx.mpc((0.4+0.9j)**n) for n + in xrange(deg_init,deg)] + err = [ctx.one for n in xrange(deg)] + # Durand-Kerner iteration until convergence + for step in xrange(maxsteps): + if abs(max(err)) < tol: + break + for i in xrange(deg): + p = roots[i] + x = f(p) + for j in range(deg): + if i != j: + try: + x /= (p-roots[j]) + except ZeroDivisionError: + continue + roots[i] = p - x + err[i] = abs(x) + if abs(max(err)) >= tol: + raise ctx.NoConvergence("Didn't converge in maxsteps=%d steps." \ + % maxsteps) + # Remove small real or imaginary parts + if cleanup: + for i in xrange(deg): + if abs(roots[i]) < tol: + roots[i] = ctx.zero + elif abs(ctx._im(roots[i])) < tol: + roots[i] = roots[i].real + elif abs(ctx._re(roots[i])) < tol: + roots[i] = roots[i].imag * 1j + roots.sort(key=lambda x: (abs(ctx._im(x)), ctx._re(x))) + if error: + err = max(err) + err = max(err, ctx.ldexp(1, -orig+1)) + return [+r for r in roots], +err + else: + return [+r for r in roots] diff --git a/MLPY/Lib/site-packages/mpmath/calculus/quadrature.py b/MLPY/Lib/site-packages/mpmath/calculus/quadrature.py new file mode 100644 index 0000000000000000000000000000000000000000..0545b3ad3e6f70a44f4ec31a0c3bcfb4ffde422b --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/calculus/quadrature.py @@ -0,0 +1,1115 @@ +import math + +from ..libmp.backend import xrange + +class QuadratureRule(object): + """ + Quadrature rules are implemented using this class, in order to + simplify the code and provide a common infrastructure + for tasks such as error estimation and node caching. + + You can implement a custom quadrature rule by subclassing + :class:`QuadratureRule` and implementing the appropriate + methods. The subclass can then be used by :func:`~mpmath.quad` by + passing it as the *method* argument. + + :class:`QuadratureRule` instances are supposed to be singletons. + :class:`QuadratureRule` therefore implements instance caching + in :func:`~mpmath.__new__`. + """ + + def __init__(self, ctx): + self.ctx = ctx + self.standard_cache = {} + self.transformed_cache = {} + self.interval_count = {} + + def clear(self): + """ + Delete cached node data. + """ + self.standard_cache = {} + self.transformed_cache = {} + self.interval_count = {} + + def calc_nodes(self, degree, prec, verbose=False): + r""" + Compute nodes for the standard interval `[-1, 1]`. Subclasses + should probably implement only this method, and use + :func:`~mpmath.get_nodes` method to retrieve the nodes. + """ + raise NotImplementedError + + def get_nodes(self, a, b, degree, prec, verbose=False): + """ + Return nodes for given interval, degree and precision. The + nodes are retrieved from a cache if already computed; + otherwise they are computed by calling :func:`~mpmath.calc_nodes` + and are then cached. + + Subclasses should probably not implement this method, + but just implement :func:`~mpmath.calc_nodes` for the actual + node computation. + """ + key = (a, b, degree, prec) + if key in self.transformed_cache: + return self.transformed_cache[key] + orig = self.ctx.prec + try: + self.ctx.prec = prec+20 + # Get nodes on standard interval + if (degree, prec) in self.standard_cache: + nodes = self.standard_cache[degree, prec] + else: + nodes = self.calc_nodes(degree, prec, verbose) + self.standard_cache[degree, prec] = nodes + # Transform to general interval + nodes = self.transform_nodes(nodes, a, b, verbose) + if key in self.interval_count: + self.transformed_cache[key] = nodes + else: + self.interval_count[key] = True + finally: + self.ctx.prec = orig + return nodes + + def transform_nodes(self, nodes, a, b, verbose=False): + r""" + Rescale standardized nodes (for `[-1, 1]`) to a general + interval `[a, b]`. For a finite interval, a simple linear + change of variables is used. Otherwise, the following + transformations are used: + + .. math :: + + \lbrack a, \infty \rbrack : t = \frac{1}{x} + (a-1) + + \lbrack -\infty, b \rbrack : t = (b+1) - \frac{1}{x} + + \lbrack -\infty, \infty \rbrack : t = \frac{x}{\sqrt{1-x^2}} + + """ + ctx = self.ctx + a = ctx.convert(a) + b = ctx.convert(b) + one = ctx.one + if (a, b) == (-one, one): + return nodes + half = ctx.mpf(0.5) + new_nodes = [] + if ctx.isinf(a) or ctx.isinf(b): + if (a, b) == (ctx.ninf, ctx.inf): + p05 = -half + for x, w in nodes: + x2 = x*x + px1 = one-x2 + spx1 = px1**p05 + x = x*spx1 + w *= spx1/px1 + new_nodes.append((x, w)) + elif a == ctx.ninf: + b1 = b+1 + for x, w in nodes: + u = 2/(x+one) + x = b1-u + w *= half*u**2 + new_nodes.append((x, w)) + elif b == ctx.inf: + a1 = a-1 + for x, w in nodes: + u = 2/(x+one) + x = a1+u + w *= half*u**2 + new_nodes.append((x, w)) + elif a == ctx.inf or b == ctx.ninf: + return [(x,-w) for (x,w) in self.transform_nodes(nodes, b, a, verbose)] + else: + raise NotImplementedError + else: + # Simple linear change of variables + C = (b-a)/2 + D = (b+a)/2 + for x, w in nodes: + new_nodes.append((D+C*x, C*w)) + return new_nodes + + def guess_degree(self, prec): + """ + Given a desired precision `p` in bits, estimate the degree `m` + of the quadrature required to accomplish full accuracy for + typical integrals. By default, :func:`~mpmath.quad` will perform up + to `m` iterations. The value of `m` should be a slight + overestimate, so that "slightly bad" integrals can be dealt + with automatically using a few extra iterations. On the + other hand, it should not be too big, so :func:`~mpmath.quad` can + quit within a reasonable amount of time when it is given + an "unsolvable" integral. + + The default formula used by :func:`~mpmath.guess_degree` is tuned + for both :class:`TanhSinh` and :class:`GaussLegendre`. + The output is roughly as follows: + + +---------+---------+ + | `p` | `m` | + +=========+=========+ + | 50 | 6 | + +---------+---------+ + | 100 | 7 | + +---------+---------+ + | 500 | 10 | + +---------+---------+ + | 3000 | 12 | + +---------+---------+ + + This formula is based purely on a limited amount of + experimentation and will sometimes be wrong. + """ + # Expected degree + # XXX: use mag + g = int(4 + max(0, self.ctx.log(prec/30.0, 2))) + # Reasonable "worst case" + g += 2 + return g + + def estimate_error(self, results, prec, epsilon): + r""" + Given results from integrations `[I_1, I_2, \ldots, I_k]` done + with a quadrature of rule of degree `1, 2, \ldots, k`, estimate + the error of `I_k`. + + For `k = 2`, we estimate `|I_{\infty}-I_2|` as `|I_2-I_1|`. + + For `k > 2`, we extrapolate `|I_{\infty}-I_k| \approx |I_{k+1}-I_k|` + from `|I_k-I_{k-1}|` and `|I_k-I_{k-2}|` under the assumption + that each degree increment roughly doubles the accuracy of + the quadrature rule (this is true for both :class:`TanhSinh` + and :class:`GaussLegendre`). The extrapolation formula is given + by Borwein, Bailey & Girgensohn. Although not very conservative, + this method seems to be very robust in practice. + """ + if len(results) == 2: + return abs(results[0]-results[1]) + try: + if results[-1] == results[-2] == results[-3]: + return self.ctx.zero + D1 = self.ctx.log(abs(results[-1]-results[-2]), 10) + D2 = self.ctx.log(abs(results[-1]-results[-3]), 10) + except ValueError: + return epsilon + D3 = -prec + D4 = min(0, max(D1**2/D2, 2*D1, D3)) + return self.ctx.mpf(10) ** int(D4) + + def summation(self, f, points, prec, epsilon, max_degree, verbose=False): + """ + Main integration function. Computes the 1D integral over + the interval specified by *points*. For each subinterval, + performs quadrature of degree from 1 up to *max_degree* + until :func:`~mpmath.estimate_error` signals convergence. + + :func:`~mpmath.summation` transforms each subintegration to + the standard interval and then calls :func:`~mpmath.sum_next`. + """ + ctx = self.ctx + I = total_err = ctx.zero + for i in xrange(len(points)-1): + a, b = points[i], points[i+1] + if a == b: + continue + # XXX: we could use a single variable transformation, + # but this is not good in practice. We get better accuracy + # by having 0 as an endpoint. + if (a, b) == (ctx.ninf, ctx.inf): + _f = f + f = lambda x: _f(-x) + _f(x) + a, b = (ctx.zero, ctx.inf) + results = [] + err = ctx.zero + for degree in xrange(1, max_degree+1): + nodes = self.get_nodes(a, b, degree, prec, verbose) + if verbose: + print("Integrating from %s to %s (degree %s of %s)" % \ + (ctx.nstr(a), ctx.nstr(b), degree, max_degree)) + result = self.sum_next(f, nodes, degree, prec, results, verbose) + results.append(result) + if degree > 1: + err = self.estimate_error(results, prec, epsilon) + if verbose: + print("Estimated error:", ctx.nstr(err), " epsilon:", ctx.nstr(epsilon), " result: ", ctx.nstr(result)) + if err <= epsilon: + break + I += results[-1] + total_err += err + if total_err > epsilon: + if verbose: + print("Failed to reach full accuracy. Estimated error:", ctx.nstr(total_err)) + return I, total_err + + def sum_next(self, f, nodes, degree, prec, previous, verbose=False): + r""" + Evaluates the step sum `\sum w_k f(x_k)` where the *nodes* list + contains the `(w_k, x_k)` pairs. + + :func:`~mpmath.summation` will supply the list *results* of + values computed by :func:`~mpmath.sum_next` at previous degrees, in + case the quadrature rule is able to reuse them. + """ + return self.ctx.fdot((w, f(x)) for (x,w) in nodes) + + +class TanhSinh(QuadratureRule): + r""" + This class implements "tanh-sinh" or "doubly exponential" + quadrature. This quadrature rule is based on the Euler-Maclaurin + integral formula. By performing a change of variables involving + nested exponentials / hyperbolic functions (hence the name), the + derivatives at the endpoints vanish rapidly. Since the error term + in the Euler-Maclaurin formula depends on the derivatives at the + endpoints, a simple step sum becomes extremely accurate. In + practice, this means that doubling the number of evaluation + points roughly doubles the number of accurate digits. + + Comparison to Gauss-Legendre: + * Initial computation of nodes is usually faster + * Handles endpoint singularities better + * Handles infinite integration intervals better + * Is slower for smooth integrands once nodes have been computed + + The implementation of the tanh-sinh algorithm is based on the + description given in Borwein, Bailey & Girgensohn, "Experimentation + in Mathematics - Computational Paths to Discovery", A K Peters, + 2003, pages 312-313. In the present implementation, a few + improvements have been made: + + * A more efficient scheme is used to compute nodes (exploiting + recurrence for the exponential function) + * The nodes are computed successively instead of all at once + + **References** + + * [Bailey]_ + * http://users.cs.dal.ca/~jborwein/tanh-sinh.pdf + + """ + + def sum_next(self, f, nodes, degree, prec, previous, verbose=False): + """ + Step sum for tanh-sinh quadrature of degree `m`. We exploit the + fact that half of the abscissas at degree `m` are precisely the + abscissas from degree `m-1`. Thus reusing the result from + the previous level allows a 2x speedup. + """ + h = self.ctx.mpf(2)**(-degree) + # Abscissas overlap, so reusing saves half of the time + if previous: + S = previous[-1]/(h*2) + else: + S = self.ctx.zero + S += self.ctx.fdot((w,f(x)) for (x,w) in nodes) + return h*S + + def calc_nodes(self, degree, prec, verbose=False): + r""" + The abscissas and weights for tanh-sinh quadrature of degree + `m` are given by + + .. math:: + + x_k = \tanh(\pi/2 \sinh(t_k)) + + w_k = \pi/2 \cosh(t_k) / \cosh(\pi/2 \sinh(t_k))^2 + + where `t_k = t_0 + hk` for a step length `h \sim 2^{-m}`. The + list of nodes is actually infinite, but the weights die off so + rapidly that only a few are needed. + """ + ctx = self.ctx + nodes = [] + + extra = 20 + ctx.prec += extra + tol = ctx.ldexp(1, -prec-10) + pi4 = ctx.pi/4 + + # For simplicity, we work in steps h = 1/2^n, with the first point + # offset so that we can reuse the sum from the previous degree + + # We define degree 1 to include the "degree 0" steps, including + # the point x = 0. (It doesn't work well otherwise; not sure why.) + t0 = ctx.ldexp(1, -degree) + if degree == 1: + #nodes.append((mpf(0), pi4)) + #nodes.append((-mpf(0), pi4)) + nodes.append((ctx.zero, ctx.pi/2)) + h = t0 + else: + h = t0*2 + + # Since h is fixed, we can compute the next exponential + # by simply multiplying by exp(h) + expt0 = ctx.exp(t0) + a = pi4 * expt0 + b = pi4 / expt0 + udelta = ctx.exp(h) + urdelta = 1/udelta + + for k in xrange(0, 20*2**degree+1): + # Reference implementation: + # t = t0 + k*h + # x = tanh(pi/2 * sinh(t)) + # w = pi/2 * cosh(t) / cosh(pi/2 * sinh(t))**2 + + # Fast implementation. Note that c = exp(pi/2 * sinh(t)) + c = ctx.exp(a-b) + d = 1/c + co = (c+d)/2 + si = (c-d)/2 + x = si / co + w = (a+b) / co**2 + diff = abs(x-1) + if diff <= tol: + break + + nodes.append((x, w)) + nodes.append((-x, w)) + + a *= udelta + b *= urdelta + + if verbose and k % 300 == 150: + # Note: the number displayed is rather arbitrary. Should + # figure out how to print something that looks more like a + # percentage + print("Calculating nodes:", ctx.nstr(-ctx.log(diff, 10) / prec)) + + ctx.prec -= extra + return nodes + + +class GaussLegendre(QuadratureRule): + r""" + This class implements Gauss-Legendre quadrature, which is + exceptionally efficient for polynomials and polynomial-like (i.e. + very smooth) integrands. + + The abscissas and weights are given by roots and values of + Legendre polynomials, which are the orthogonal polynomials + on `[-1, 1]` with respect to the unit weight + (see :func:`~mpmath.legendre`). + + In this implementation, we take the "degree" `m` of the quadrature + to denote a Gauss-Legendre rule of degree `3 \cdot 2^m` (following + Borwein, Bailey & Girgensohn). This way we get quadratic, rather + than linear, convergence as the degree is incremented. + + Comparison to tanh-sinh quadrature: + * Is faster for smooth integrands once nodes have been computed + * Initial computation of nodes is usually slower + * Handles endpoint singularities worse + * Handles infinite integration intervals worse + + """ + + def calc_nodes(self, degree, prec, verbose=False): + r""" + Calculates the abscissas and weights for Gauss-Legendre + quadrature of degree of given degree (actually `3 \cdot 2^m`). + """ + ctx = self.ctx + # It is important that the epsilon is set lower than the + # "real" epsilon + epsilon = ctx.ldexp(1, -prec-8) + # Fairly high precision might be required for accurate + # evaluation of the roots + orig = ctx.prec + ctx.prec = int(prec*1.5) + if degree == 1: + x = ctx.sqrt(ctx.mpf(3)/5) + w = ctx.mpf(5)/9 + nodes = [(-x,w),(ctx.zero,ctx.mpf(8)/9),(x,w)] + ctx.prec = orig + return nodes + nodes = [] + n = 3*2**(degree-1) + upto = n//2 + 1 + for j in xrange(1, upto): + # Asymptotic formula for the roots + r = ctx.mpf(math.cos(math.pi*(j-0.25)/(n+0.5))) + # Newton iteration + while 1: + t1, t2 = 1, 0 + # Evaluates the Legendre polynomial using its defining + # recurrence relation + for j1 in xrange(1,n+1): + t3, t2, t1 = t2, t1, ((2*j1-1)*r*t1 - (j1-1)*t2)/j1 + t4 = n*(r*t1-t2)/(r**2-1) + a = t1/t4 + r = r - a + if abs(a) < epsilon: + break + x = r + w = 2/((1-r**2)*t4**2) + if verbose and j % 30 == 15: + print("Computing nodes (%i of %i)" % (j, upto)) + nodes.append((x, w)) + nodes.append((-x, w)) + ctx.prec = orig + return nodes + +class QuadratureMethods(object): + + def __init__(ctx, *args, **kwargs): + ctx._gauss_legendre = GaussLegendre(ctx) + ctx._tanh_sinh = TanhSinh(ctx) + + def quad(ctx, f, *points, **kwargs): + r""" + Computes a single, double or triple integral over a given + 1D interval, 2D rectangle, or 3D cuboid. A basic example:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> quad(sin, [0, pi]) + 2.0 + + A basic 2D integral:: + + >>> f = lambda x, y: cos(x+y/2) + >>> quad(f, [-pi/2, pi/2], [0, pi]) + 4.0 + + **Interval format** + + The integration range for each dimension may be specified + using a list or tuple. Arguments are interpreted as follows: + + ``quad(f, [x1, x2])`` -- calculates + `\int_{x_1}^{x_2} f(x) \, dx` + + ``quad(f, [x1, x2], [y1, y2])`` -- calculates + `\int_{x_1}^{x_2} \int_{y_1}^{y_2} f(x,y) \, dy \, dx` + + ``quad(f, [x1, x2], [y1, y2], [z1, z2])`` -- calculates + `\int_{x_1}^{x_2} \int_{y_1}^{y_2} \int_{z_1}^{z_2} f(x,y,z) + \, dz \, dy \, dx` + + Endpoints may be finite or infinite. An interval descriptor + may also contain more than two points. In this + case, the integration is split into subintervals, between + each pair of consecutive points. This is useful for + dealing with mid-interval discontinuities, or integrating + over large intervals where the function is irregular or + oscillates. + + **Options** + + :func:`~mpmath.quad` recognizes the following keyword arguments: + + *method* + Chooses integration algorithm (described below). + *error* + If set to true, :func:`~mpmath.quad` returns `(v, e)` where `v` is the + integral and `e` is the estimated error. + *maxdegree* + Maximum degree of the quadrature rule to try before + quitting. + *verbose* + Print details about progress. + + **Algorithms** + + Mpmath presently implements two integration algorithms: tanh-sinh + quadrature and Gauss-Legendre quadrature. These can be selected + using *method='tanh-sinh'* or *method='gauss-legendre'* or by + passing the classes *method=TanhSinh*, *method=GaussLegendre*. + The functions :func:`~mpmath.quadts` and :func:`~mpmath.quadgl` are also available + as shortcuts. + + Both algorithms have the property that doubling the number of + evaluation points roughly doubles the accuracy, so both are ideal + for high precision quadrature (hundreds or thousands of digits). + + At high precision, computing the nodes and weights for the + integration can be expensive (more expensive than computing the + function values). To make repeated integrations fast, nodes + are automatically cached. + + The advantages of the tanh-sinh algorithm are that it tends to + handle endpoint singularities well, and that the nodes are cheap + to compute on the first run. For these reasons, it is used by + :func:`~mpmath.quad` as the default algorithm. + + Gauss-Legendre quadrature often requires fewer function + evaluations, and is therefore often faster for repeated use, but + the algorithm does not handle endpoint singularities as well and + the nodes are more expensive to compute. Gauss-Legendre quadrature + can be a better choice if the integrand is smooth and repeated + integrations are required (e.g. for multiple integrals). + + See the documentation for :class:`TanhSinh` and + :class:`GaussLegendre` for additional details. + + **Examples of 1D integrals** + + Intervals may be infinite or half-infinite. The following two + examples evaluate the limits of the inverse tangent function + (`\int 1/(1+x^2) = \tan^{-1} x`), and the Gaussian integral + `\int_{\infty}^{\infty} \exp(-x^2)\,dx = \sqrt{\pi}`:: + + >>> mp.dps = 15 + >>> quad(lambda x: 2/(x**2+1), [0, inf]) + 3.14159265358979 + >>> quad(lambda x: exp(-x**2), [-inf, inf])**2 + 3.14159265358979 + + Integrals can typically be resolved to high precision. + The following computes 50 digits of `\pi` by integrating the + area of the half-circle defined by `x^2 + y^2 \le 1`, + `-1 \le x \le 1`, `y \ge 0`:: + + >>> mp.dps = 50 + >>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1]) + 3.1415926535897932384626433832795028841971693993751 + + One can just as well compute 1000 digits (output truncated):: + + >>> mp.dps = 1000 + >>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1]) #doctest:+ELLIPSIS + 3.141592653589793238462643383279502884...216420199 + + Complex integrals are supported. The following computes + a residue at `z = 0` by integrating counterclockwise along the + diamond-shaped path from `1` to `+i` to `-1` to `-i` to `1`:: + + >>> mp.dps = 15 + >>> chop(quad(lambda z: 1/z, [1,j,-1,-j,1])) + (0.0 + 6.28318530717959j) + + **Examples of 2D and 3D integrals** + + Here are several nice examples of analytically solvable + 2D integrals (taken from MathWorld [1]) that can be evaluated + to high precision fairly rapidly by :func:`~mpmath.quad`:: + + >>> mp.dps = 30 + >>> f = lambda x, y: (x-1)/((1-x*y)*log(x*y)) + >>> quad(f, [0, 1], [0, 1]) + 0.577215664901532860606512090082 + >>> +euler + 0.577215664901532860606512090082 + + >>> f = lambda x, y: 1/sqrt(1+x**2+y**2) + >>> quad(f, [-1, 1], [-1, 1]) + 3.17343648530607134219175646705 + >>> 4*log(2+sqrt(3))-2*pi/3 + 3.17343648530607134219175646705 + + >>> f = lambda x, y: 1/(1-x**2 * y**2) + >>> quad(f, [0, 1], [0, 1]) + 1.23370055013616982735431137498 + >>> pi**2 / 8 + 1.23370055013616982735431137498 + + >>> quad(lambda x, y: 1/(1-x*y), [0, 1], [0, 1]) + 1.64493406684822643647241516665 + >>> pi**2 / 6 + 1.64493406684822643647241516665 + + Multiple integrals may be done over infinite ranges:: + + >>> mp.dps = 15 + >>> print(quad(lambda x,y: exp(-x-y), [0, inf], [1, inf])) + 0.367879441171442 + >>> print(1/e) + 0.367879441171442 + + For nonrectangular areas, one can call :func:`~mpmath.quad` recursively. + For example, we can replicate the earlier example of calculating + `\pi` by integrating over the unit-circle, and actually use double + quadrature to actually measure the area circle:: + + >>> f = lambda x: quad(lambda y: 1, [-sqrt(1-x**2), sqrt(1-x**2)]) + >>> quad(f, [-1, 1]) + 3.14159265358979 + + Here is a simple triple integral:: + + >>> mp.dps = 15 + >>> f = lambda x,y,z: x*y/(1+z) + >>> quad(f, [0,1], [0,1], [1,2], method='gauss-legendre') + 0.101366277027041 + >>> (log(3)-log(2))/4 + 0.101366277027041 + + **Singularities** + + Both tanh-sinh and Gauss-Legendre quadrature are designed to + integrate smooth (infinitely differentiable) functions. Neither + algorithm copes well with mid-interval singularities (such as + mid-interval discontinuities in `f(x)` or `f'(x)`). + The best solution is to split the integral into parts:: + + >>> mp.dps = 15 + >>> quad(lambda x: abs(sin(x)), [0, 2*pi]) # Bad + 3.99900894176779 + >>> quad(lambda x: abs(sin(x)), [0, pi, 2*pi]) # Good + 4.0 + + The tanh-sinh rule often works well for integrands having a + singularity at one or both endpoints:: + + >>> mp.dps = 15 + >>> quad(log, [0, 1], method='tanh-sinh') # Good + -1.0 + >>> quad(log, [0, 1], method='gauss-legendre') # Bad + -0.999932197413801 + + However, the result may still be inaccurate for some functions:: + + >>> quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh') + 1.99999999946942 + + This problem is not due to the quadrature rule per se, but to + numerical amplification of errors in the nodes. The problem can be + circumvented by temporarily increasing the precision:: + + >>> mp.dps = 30 + >>> a = quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh') + >>> mp.dps = 15 + >>> +a + 2.0 + + **Highly variable functions** + + For functions that are smooth (in the sense of being infinitely + differentiable) but contain sharp mid-interval peaks or many + "bumps", :func:`~mpmath.quad` may fail to provide full accuracy. For + example, with default settings, :func:`~mpmath.quad` is able to integrate + `\sin(x)` accurately over an interval of length 100 but not over + length 1000:: + + >>> quad(sin, [0, 100]); 1-cos(100) # Good + 0.137681127712316 + 0.137681127712316 + >>> quad(sin, [0, 1000]); 1-cos(1000) # Bad + -37.8587612408485 + 0.437620923709297 + + One solution is to break the integration into 10 intervals of + length 100:: + + >>> quad(sin, linspace(0, 1000, 10)) # Good + 0.437620923709297 + + Another is to increase the degree of the quadrature:: + + >>> quad(sin, [0, 1000], maxdegree=10) # Also good + 0.437620923709297 + + Whether splitting the interval or increasing the degree is + more efficient differs from case to case. Another example is the + function `1/(1+x^2)`, which has a sharp peak centered around + `x = 0`:: + + >>> f = lambda x: 1/(1+x**2) + >>> quad(f, [-100, 100]) # Bad + 3.64804647105268 + >>> quad(f, [-100, 100], maxdegree=10) # Good + 3.12159332021646 + >>> quad(f, [-100, 0, 100]) # Also good + 3.12159332021646 + + **References** + + 1. http://mathworld.wolfram.com/DoubleIntegral.html + + """ + rule = kwargs.get('method', 'tanh-sinh') + if type(rule) is str: + if rule == 'tanh-sinh': + rule = ctx._tanh_sinh + elif rule == 'gauss-legendre': + rule = ctx._gauss_legendre + else: + raise ValueError("unknown quadrature rule: %s" % rule) + else: + rule = rule(ctx) + verbose = kwargs.get('verbose') + dim = len(points) + orig = prec = ctx.prec + epsilon = ctx.eps/8 + m = kwargs.get('maxdegree') or rule.guess_degree(prec) + points = [ctx._as_points(p) for p in points] + try: + ctx.prec += 20 + if dim == 1: + v, err = rule.summation(f, points[0], prec, epsilon, m, verbose) + elif dim == 2: + v, err = rule.summation(lambda x: \ + rule.summation(lambda y: f(x,y), \ + points[1], prec, epsilon, m)[0], + points[0], prec, epsilon, m, verbose) + elif dim == 3: + v, err = rule.summation(lambda x: \ + rule.summation(lambda y: \ + rule.summation(lambda z: f(x,y,z), \ + points[2], prec, epsilon, m)[0], + points[1], prec, epsilon, m)[0], + points[0], prec, epsilon, m, verbose) + else: + raise NotImplementedError("quadrature must have dim 1, 2 or 3") + finally: + ctx.prec = orig + if kwargs.get("error"): + return +v, err + return +v + + def quadts(ctx, *args, **kwargs): + """ + Performs tanh-sinh quadrature. The call + + quadts(func, *points, ...) + + is simply a shortcut for: + + quad(func, *points, ..., method=TanhSinh) + + For example, a single integral and a double integral: + + quadts(lambda x: exp(cos(x)), [0, 1]) + quadts(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1]) + + See the documentation for quad for information about how points + arguments and keyword arguments are parsed. + + See documentation for TanhSinh for algorithmic information about + tanh-sinh quadrature. + """ + kwargs['method'] = 'tanh-sinh' + return ctx.quad(*args, **kwargs) + + def quadgl(ctx, *args, **kwargs): + """ + Performs Gauss-Legendre quadrature. The call + + quadgl(func, *points, ...) + + is simply a shortcut for: + + quad(func, *points, ..., method=GaussLegendre) + + For example, a single integral and a double integral: + + quadgl(lambda x: exp(cos(x)), [0, 1]) + quadgl(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1]) + + See the documentation for quad for information about how points + arguments and keyword arguments are parsed. + + See documentation for TanhSinh for algorithmic information about + tanh-sinh quadrature. + """ + kwargs['method'] = 'gauss-legendre' + return ctx.quad(*args, **kwargs) + + def quadosc(ctx, f, interval, omega=None, period=None, zeros=None): + r""" + Calculates + + .. math :: + + I = \int_a^b f(x) dx + + where at least one of `a` and `b` is infinite and where + `f(x) = g(x) \cos(\omega x + \phi)` for some slowly + decreasing function `g(x)`. With proper input, :func:`~mpmath.quadosc` + can also handle oscillatory integrals where the oscillation + rate is different from a pure sine or cosine wave. + + In the standard case when `|a| < \infty, b = \infty`, + :func:`~mpmath.quadosc` works by evaluating the infinite series + + .. math :: + + I = \int_a^{x_1} f(x) dx + + \sum_{k=1}^{\infty} \int_{x_k}^{x_{k+1}} f(x) dx + + where `x_k` are consecutive zeros (alternatively + some other periodic reference point) of `f(x)`. + Accordingly, :func:`~mpmath.quadosc` requires information about the + zeros of `f(x)`. For a periodic function, you can specify + the zeros by either providing the angular frequency `\omega` + (*omega*) or the *period* `2 \pi/\omega`. In general, you can + specify the `n`-th zero by providing the *zeros* arguments. + Below is an example of each:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> f = lambda x: sin(3*x)/(x**2+1) + >>> quadosc(f, [0,inf], omega=3) + 0.37833007080198 + >>> quadosc(f, [0,inf], period=2*pi/3) + 0.37833007080198 + >>> quadosc(f, [0,inf], zeros=lambda n: pi*n/3) + 0.37833007080198 + >>> (ei(3)*exp(-3)-exp(3)*ei(-3))/2 # Computed by Mathematica + 0.37833007080198 + + Note that *zeros* was specified to multiply `n` by the + *half-period*, not the full period. In theory, it does not matter + whether each partial integral is done over a half period or a full + period. However, if done over half-periods, the infinite series + passed to :func:`~mpmath.nsum` becomes an *alternating series* and this + typically makes the extrapolation much more efficient. + + Here is an example of an integration over the entire real line, + and a half-infinite integration starting at `-\infty`:: + + >>> quadosc(lambda x: cos(x)/(1+x**2), [-inf, inf], omega=1) + 1.15572734979092 + >>> pi/e + 1.15572734979092 + >>> quadosc(lambda x: cos(x)/x**2, [-inf, -1], period=2*pi) + -0.0844109505595739 + >>> cos(1)+si(1)-pi/2 + -0.0844109505595738 + + Of course, the integrand may contain a complex exponential just as + well as a real sine or cosine:: + + >>> quadosc(lambda x: exp(3*j*x)/(1+x**2), [-inf,inf], omega=3) + (0.156410688228254 + 0.0j) + >>> pi/e**3 + 0.156410688228254 + >>> quadosc(lambda x: exp(3*j*x)/(2+x+x**2), [-inf,inf], omega=3) + (0.00317486988463794 - 0.0447701735209082j) + >>> 2*pi/sqrt(7)/exp(3*(j+sqrt(7))/2) + (0.00317486988463794 - 0.0447701735209082j) + + **Non-periodic functions** + + If `f(x) = g(x) h(x)` for some function `h(x)` that is not + strictly periodic, *omega* or *period* might not work, and it might + be necessary to use *zeros*. + + A notable exception can be made for Bessel functions which, though not + periodic, are "asymptotically periodic" in a sufficiently strong sense + that the sum extrapolation will work out:: + + >>> quadosc(j0, [0, inf], period=2*pi) + 1.0 + >>> quadosc(j1, [0, inf], period=2*pi) + 1.0 + + More properly, one should provide the exact Bessel function zeros:: + + >>> j0zero = lambda n: findroot(j0, pi*(n-0.25)) + >>> quadosc(j0, [0, inf], zeros=j0zero) + 1.0 + + For an example where *zeros* becomes necessary, consider the + complete Fresnel integrals + + .. math :: + + \int_0^{\infty} \cos x^2\,dx = \int_0^{\infty} \sin x^2\,dx + = \sqrt{\frac{\pi}{8}}. + + Although the integrands do not decrease in magnitude as + `x \to \infty`, the integrals are convergent since the oscillation + rate increases (causing consecutive periods to asymptotically + cancel out). These integrals are virtually impossible to calculate + to any kind of accuracy using standard quadrature rules. However, + if one provides the correct asymptotic distribution of zeros + (`x_n \sim \sqrt{n}`), :func:`~mpmath.quadosc` works:: + + >>> mp.dps = 30 + >>> f = lambda x: cos(x**2) + >>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n)) + 0.626657068657750125603941321203 + >>> f = lambda x: sin(x**2) + >>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n)) + 0.626657068657750125603941321203 + >>> sqrt(pi/8) + 0.626657068657750125603941321203 + + (Interestingly, these integrals can still be evaluated if one + places some other constant than `\pi` in the square root sign.) + + In general, if `f(x) \sim g(x) \cos(h(x))`, the zeros follow + the inverse-function distribution `h^{-1}(x)`:: + + >>> mp.dps = 15 + >>> f = lambda x: sin(exp(x)) + >>> quadosc(f, [1,inf], zeros=lambda n: log(n)) + -0.25024394235267 + >>> pi/2-si(e) + -0.250243942352671 + + **Non-alternating functions** + + If the integrand oscillates around a positive value, without + alternating signs, the extrapolation might fail. A simple trick + that sometimes works is to multiply or divide the frequency by 2:: + + >>> f = lambda x: 1/x**2+sin(x)/x**4 + >>> quadosc(f, [1,inf], omega=1) # Bad + 1.28642190869861 + >>> quadosc(f, [1,inf], omega=0.5) # Perfect + 1.28652953559617 + >>> 1+(cos(1)+ci(1)+sin(1))/6 + 1.28652953559617 + + **Fast decay** + + :func:`~mpmath.quadosc` is primarily useful for slowly decaying + integrands. If the integrand decreases exponentially or faster, + :func:`~mpmath.quad` will likely handle it without trouble (and generally be + much faster than :func:`~mpmath.quadosc`):: + + >>> quadosc(lambda x: cos(x)/exp(x), [0, inf], omega=1) + 0.5 + >>> quad(lambda x: cos(x)/exp(x), [0, inf]) + 0.5 + + """ + a, b = ctx._as_points(interval) + a = ctx.convert(a) + b = ctx.convert(b) + if [omega, period, zeros].count(None) != 2: + raise ValueError( \ + "must specify exactly one of omega, period, zeros") + if a == ctx.ninf and b == ctx.inf: + s1 = ctx.quadosc(f, [a, 0], omega=omega, zeros=zeros, period=period) + s2 = ctx.quadosc(f, [0, b], omega=omega, zeros=zeros, period=period) + return s1 + s2 + if a == ctx.ninf: + if zeros: + return ctx.quadosc(lambda x:f(-x), [-b,-a], lambda n: zeros(-n)) + else: + return ctx.quadosc(lambda x:f(-x), [-b,-a], omega=omega, period=period) + if b != ctx.inf: + raise ValueError("quadosc requires an infinite integration interval") + if not zeros: + if omega: + period = 2*ctx.pi/omega + zeros = lambda n: n*period/2 + #for n in range(1,10): + # p = zeros(n) + # if p > a: + # break + #if n >= 9: + # raise ValueError("zeros do not appear to be correctly indexed") + n = 1 + s = ctx.quadgl(f, [a, zeros(n)]) + def term(k): + return ctx.quadgl(f, [zeros(k), zeros(k+1)]) + s += ctx.nsum(term, [n, ctx.inf]) + return s + + def quadsubdiv(ctx, f, interval, tol=None, maxintervals=None, **kwargs): + """ + Computes the integral of *f* over the interval or path specified + by *interval*, using :func:`~mpmath.quad` together with adaptive + subdivision of the interval. + + This function gives an accurate answer for some integrals where + :func:`~mpmath.quad` fails:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> quad(lambda x: abs(sin(x)), [0, 2*pi]) + 3.99900894176779 + >>> quadsubdiv(lambda x: abs(sin(x)), [0, 2*pi]) + 4.0 + >>> quadsubdiv(sin, [0, 1000]) + 0.437620923709297 + >>> quadsubdiv(lambda x: 1/(1+x**2), [-100, 100]) + 3.12159332021646 + >>> quadsubdiv(lambda x: ceil(x), [0, 100]) + 5050.0 + >>> quadsubdiv(lambda x: sin(x+exp(x)), [0,8]) + 0.347400172657248 + + The argument *maxintervals* can be set to limit the permissible + subdivision:: + + >>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=5, error=True) + (-5.40487904307774, 5.011) + >>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=100, error=True) + (0.631417921866934, 1.10101120134116e-17) + + Subdivision does not guarantee a correct answer since, the error + estimate on subintervals may be inaccurate:: + + >>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True) + (0.210802735500549, 1.0001111101e-17) + >>> mp.dps = 20 + >>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True) + (0.21080273550054927738, 2.200000001e-24) + + The second answer is correct. We can get an accurate result at lower + precision by forcing a finer initial subdivision:: + + >>> mp.dps = 15 + >>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, linspace(0,1,5)) + 0.210802735500549 + + The following integral is too oscillatory for convergence, but we can get a + reasonable estimate:: + + >>> v, err = fp.quadsubdiv(lambda x: fp.sin(1/x), [0,1], error=True) + >>> round(v, 6), round(err, 6) + (0.504067, 1e-06) + >>> sin(1) - ci(1) + 0.504067061906928 + + """ + queue = [] + for i in range(len(interval)-1): + queue.append((interval[i], interval[i+1])) + total = ctx.zero + total_error = ctx.zero + if maxintervals is None: + maxintervals = 10 * ctx.prec + count = 0 + quad_args = kwargs.copy() + quad_args["verbose"] = False + quad_args["error"] = True + if tol is None: + tol = +ctx.eps + orig = ctx.prec + try: + ctx.prec += 5 + while queue: + a, b = queue.pop() + s, err = ctx.quad(f, [a, b], **quad_args) + if kwargs.get("verbose"): + print("subinterval", count, a, b, err) + if err < tol or count > maxintervals: + total += s + total_error += err + else: + count += 1 + if count == maxintervals and kwargs.get("verbose"): + print("warning: number of intervals exceeded maxintervals") + if a == -ctx.inf and b == ctx.inf: + m = 0 + elif a == -ctx.inf: + m = min(b-1, 2*b) + elif b == ctx.inf: + m = max(a+1, 2*a) + else: + m = a + (b - a) / 2 + queue.append((a, m)) + queue.append((m, b)) + finally: + ctx.prec = orig + if kwargs.get("error"): + return +total, +total_error + else: + return +total + +if __name__ == '__main__': + import doctest + doctest.testmod() diff --git a/MLPY/Lib/site-packages/mpmath/ctx_base.py b/MLPY/Lib/site-packages/mpmath/ctx_base.py new file mode 100644 index 0000000000000000000000000000000000000000..1946f8daf4dbe165b3943be09af361812828aab1 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/ctx_base.py @@ -0,0 +1,494 @@ +from operator import gt, lt + +from .libmp.backend import xrange + +from .functions.functions import SpecialFunctions +from .functions.rszeta import RSCache +from .calculus.quadrature import QuadratureMethods +from .calculus.inverselaplace import LaplaceTransformInversionMethods +from .calculus.calculus import CalculusMethods +from .calculus.optimization import OptimizationMethods +from .calculus.odes import ODEMethods +from .matrices.matrices import MatrixMethods +from .matrices.calculus import MatrixCalculusMethods +from .matrices.linalg import LinearAlgebraMethods +from .matrices.eigen import Eigen +from .identification import IdentificationMethods +from .visualization import VisualizationMethods + +from . import libmp + +class Context(object): + pass + +class StandardBaseContext(Context, + SpecialFunctions, + RSCache, + QuadratureMethods, + LaplaceTransformInversionMethods, + CalculusMethods, + MatrixMethods, + MatrixCalculusMethods, + LinearAlgebraMethods, + Eigen, + IdentificationMethods, + OptimizationMethods, + ODEMethods, + VisualizationMethods): + + NoConvergence = libmp.NoConvergence + ComplexResult = libmp.ComplexResult + + def __init__(ctx): + ctx._aliases = {} + # Call those that need preinitialization (e.g. for wrappers) + SpecialFunctions.__init__(ctx) + RSCache.__init__(ctx) + QuadratureMethods.__init__(ctx) + LaplaceTransformInversionMethods.__init__(ctx) + CalculusMethods.__init__(ctx) + MatrixMethods.__init__(ctx) + + def _init_aliases(ctx): + for alias, value in ctx._aliases.items(): + try: + setattr(ctx, alias, getattr(ctx, value)) + except AttributeError: + pass + + _fixed_precision = False + + # XXX + verbose = False + + def warn(ctx, msg): + print("Warning:", msg) + + def bad_domain(ctx, msg): + raise ValueError(msg) + + def _re(ctx, x): + if hasattr(x, "real"): + return x.real + return x + + def _im(ctx, x): + if hasattr(x, "imag"): + return x.imag + return ctx.zero + + def _as_points(ctx, x): + return x + + def fneg(ctx, x, **kwargs): + return -ctx.convert(x) + + def fadd(ctx, x, y, **kwargs): + return ctx.convert(x)+ctx.convert(y) + + def fsub(ctx, x, y, **kwargs): + return ctx.convert(x)-ctx.convert(y) + + def fmul(ctx, x, y, **kwargs): + return ctx.convert(x)*ctx.convert(y) + + def fdiv(ctx, x, y, **kwargs): + return ctx.convert(x)/ctx.convert(y) + + def fsum(ctx, args, absolute=False, squared=False): + if absolute: + if squared: + return sum((abs(x)**2 for x in args), ctx.zero) + return sum((abs(x) for x in args), ctx.zero) + if squared: + return sum((x**2 for x in args), ctx.zero) + return sum(args, ctx.zero) + + def fdot(ctx, xs, ys=None, conjugate=False): + if ys is not None: + xs = zip(xs, ys) + if conjugate: + cf = ctx.conj + return sum((x*cf(y) for (x,y) in xs), ctx.zero) + else: + return sum((x*y for (x,y) in xs), ctx.zero) + + def fprod(ctx, args): + prod = ctx.one + for arg in args: + prod *= arg + return prod + + def nprint(ctx, x, n=6, **kwargs): + """ + Equivalent to ``print(nstr(x, n))``. + """ + print(ctx.nstr(x, n, **kwargs)) + + def chop(ctx, x, tol=None): + """ + Chops off small real or imaginary parts, or converts + numbers close to zero to exact zeros. The input can be a + single number or an iterable:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> chop(5+1e-10j, tol=1e-9) + mpf('5.0') + >>> nprint(chop([1.0, 1e-20, 3+1e-18j, -4, 2])) + [1.0, 0.0, 3.0, -4.0, 2.0] + + The tolerance defaults to ``100*eps``. + """ + if tol is None: + tol = 100*ctx.eps + try: + x = ctx.convert(x) + absx = abs(x) + if abs(x) < tol: + return ctx.zero + if ctx._is_complex_type(x): + #part_tol = min(tol, absx*tol) + part_tol = max(tol, absx*tol) + if abs(x.imag) < part_tol: + return x.real + if abs(x.real) < part_tol: + return ctx.mpc(0, x.imag) + except TypeError: + if isinstance(x, ctx.matrix): + return x.apply(lambda a: ctx.chop(a, tol)) + if hasattr(x, "__iter__"): + return [ctx.chop(a, tol) for a in x] + return x + + def almosteq(ctx, s, t, rel_eps=None, abs_eps=None): + r""" + Determine whether the difference between `s` and `t` is smaller + than a given epsilon, either relatively or absolutely. + + Both a maximum relative difference and a maximum difference + ('epsilons') may be specified. The absolute difference is + defined as `|s-t|` and the relative difference is defined + as `|s-t|/\max(|s|, |t|)`. + + If only one epsilon is given, both are set to the same value. + If none is given, both epsilons are set to `2^{-p+m}` where + `p` is the current working precision and `m` is a small + integer. The default setting typically allows :func:`~mpmath.almosteq` + to be used to check for mathematical equality + in the presence of small rounding errors. + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 15 + >>> almosteq(3.141592653589793, 3.141592653589790) + True + >>> almosteq(3.141592653589793, 3.141592653589700) + False + >>> almosteq(3.141592653589793, 3.141592653589700, 1e-10) + True + >>> almosteq(1e-20, 2e-20) + True + >>> almosteq(1e-20, 2e-20, rel_eps=0, abs_eps=0) + False + + """ + t = ctx.convert(t) + if abs_eps is None and rel_eps is None: + rel_eps = abs_eps = ctx.ldexp(1, -ctx.prec+4) + if abs_eps is None: + abs_eps = rel_eps + elif rel_eps is None: + rel_eps = abs_eps + diff = abs(s-t) + if diff <= abs_eps: + return True + abss = abs(s) + abst = abs(t) + if abss < abst: + err = diff/abst + else: + err = diff/abss + return err <= rel_eps + + def arange(ctx, *args): + r""" + This is a generalized version of Python's :func:`~mpmath.range` function + that accepts fractional endpoints and step sizes and + returns a list of ``mpf`` instances. Like :func:`~mpmath.range`, + :func:`~mpmath.arange` can be called with 1, 2 or 3 arguments: + + ``arange(b)`` + `[0, 1, 2, \ldots, x]` + ``arange(a, b)`` + `[a, a+1, a+2, \ldots, x]` + ``arange(a, b, h)`` + `[a, a+h, a+h, \ldots, x]` + + where `b-1 \le x < b` (in the third case, `b-h \le x < b`). + + Like Python's :func:`~mpmath.range`, the endpoint is not included. To + produce ranges where the endpoint is included, :func:`~mpmath.linspace` + is more convenient. + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> arange(4) + [mpf('0.0'), mpf('1.0'), mpf('2.0'), mpf('3.0')] + >>> arange(1, 2, 0.25) + [mpf('1.0'), mpf('1.25'), mpf('1.5'), mpf('1.75')] + >>> arange(1, -1, -0.75) + [mpf('1.0'), mpf('0.25'), mpf('-0.5')] + + """ + if not len(args) <= 3: + raise TypeError('arange expected at most 3 arguments, got %i' + % len(args)) + if not len(args) >= 1: + raise TypeError('arange expected at least 1 argument, got %i' + % len(args)) + # set default + a = 0 + dt = 1 + # interpret arguments + if len(args) == 1: + b = args[0] + elif len(args) >= 2: + a = args[0] + b = args[1] + if len(args) == 3: + dt = args[2] + a, b, dt = ctx.mpf(a), ctx.mpf(b), ctx.mpf(dt) + assert a + dt != a, 'dt is too small and would cause an infinite loop' + # adapt code for sign of dt + if a > b: + if dt > 0: + return [] + op = gt + else: + if dt < 0: + return [] + op = lt + # create list + result = [] + i = 0 + t = a + while 1: + t = a + dt*i + i += 1 + if op(t, b): + result.append(t) + else: + break + return result + + def linspace(ctx, *args, **kwargs): + """ + ``linspace(a, b, n)`` returns a list of `n` evenly spaced + samples from `a` to `b`. The syntax ``linspace(mpi(a,b), n)`` + is also valid. + + This function is often more convenient than :func:`~mpmath.arange` + for partitioning an interval into subintervals, since + the endpoint is included:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> linspace(1, 4, 4) + [mpf('1.0'), mpf('2.0'), mpf('3.0'), mpf('4.0')] + + You may also provide the keyword argument ``endpoint=False``:: + + >>> linspace(1, 4, 4, endpoint=False) + [mpf('1.0'), mpf('1.75'), mpf('2.5'), mpf('3.25')] + + """ + if len(args) == 3: + a = ctx.mpf(args[0]) + b = ctx.mpf(args[1]) + n = int(args[2]) + elif len(args) == 2: + assert hasattr(args[0], '_mpi_') + a = args[0].a + b = args[0].b + n = int(args[1]) + else: + raise TypeError('linspace expected 2 or 3 arguments, got %i' \ + % len(args)) + if n < 1: + raise ValueError('n must be greater than 0') + if not 'endpoint' in kwargs or kwargs['endpoint']: + if n == 1: + return [ctx.mpf(a)] + step = (b - a) / ctx.mpf(n - 1) + y = [i*step + a for i in xrange(n)] + y[-1] = b + else: + step = (b - a) / ctx.mpf(n) + y = [i*step + a for i in xrange(n)] + return y + + def cos_sin(ctx, z, **kwargs): + return ctx.cos(z, **kwargs), ctx.sin(z, **kwargs) + + def cospi_sinpi(ctx, z, **kwargs): + return ctx.cospi(z, **kwargs), ctx.sinpi(z, **kwargs) + + def _default_hyper_maxprec(ctx, p): + return int(1000 * p**0.25 + 4*p) + + _gcd = staticmethod(libmp.gcd) + list_primes = staticmethod(libmp.list_primes) + isprime = staticmethod(libmp.isprime) + bernfrac = staticmethod(libmp.bernfrac) + moebius = staticmethod(libmp.moebius) + _ifac = staticmethod(libmp.ifac) + _eulernum = staticmethod(libmp.eulernum) + _stirling1 = staticmethod(libmp.stirling1) + _stirling2 = staticmethod(libmp.stirling2) + + def sum_accurately(ctx, terms, check_step=1): + prec = ctx.prec + try: + extraprec = 10 + while 1: + ctx.prec = prec + extraprec + 5 + max_mag = ctx.ninf + s = ctx.zero + k = 0 + for term in terms(): + s += term + if (not k % check_step) and term: + term_mag = ctx.mag(term) + max_mag = max(max_mag, term_mag) + sum_mag = ctx.mag(s) + if sum_mag - term_mag > ctx.prec: + break + k += 1 + cancellation = max_mag - sum_mag + if cancellation != cancellation: + break + if cancellation < extraprec or ctx._fixed_precision: + break + extraprec += min(ctx.prec, cancellation) + return s + finally: + ctx.prec = prec + + def mul_accurately(ctx, factors, check_step=1): + prec = ctx.prec + try: + extraprec = 10 + while 1: + ctx.prec = prec + extraprec + 5 + max_mag = ctx.ninf + one = ctx.one + s = one + k = 0 + for factor in factors(): + s *= factor + term = factor - one + if (not k % check_step): + term_mag = ctx.mag(term) + max_mag = max(max_mag, term_mag) + sum_mag = ctx.mag(s-one) + #if sum_mag - term_mag > ctx.prec: + # break + if -term_mag > ctx.prec: + break + k += 1 + cancellation = max_mag - sum_mag + if cancellation != cancellation: + break + if cancellation < extraprec or ctx._fixed_precision: + break + extraprec += min(ctx.prec, cancellation) + return s + finally: + ctx.prec = prec + + def power(ctx, x, y): + r"""Converts `x` and `y` to mpmath numbers and evaluates + `x^y = \exp(y \log(x))`:: + + >>> from mpmath import * + >>> mp.dps = 30; mp.pretty = True + >>> power(2, 0.5) + 1.41421356237309504880168872421 + + This shows the leading few digits of a large Mersenne prime + (performing the exact calculation ``2**43112609-1`` and + displaying the result in Python would be very slow):: + + >>> power(2, 43112609)-1 + 3.16470269330255923143453723949e+12978188 + """ + return ctx.convert(x) ** ctx.convert(y) + + def _zeta_int(ctx, n): + return ctx.zeta(n) + + def maxcalls(ctx, f, N): + """ + Return a wrapped copy of *f* that raises ``NoConvergence`` when *f* + has been called more than *N* times:: + + >>> from mpmath import * + >>> mp.dps = 15 + >>> f = maxcalls(sin, 10) + >>> print(sum(f(n) for n in range(10))) + 1.95520948210738 + >>> f(10) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + NoConvergence: maxcalls: function evaluated 10 times + + """ + counter = [0] + def f_maxcalls_wrapped(*args, **kwargs): + counter[0] += 1 + if counter[0] > N: + raise ctx.NoConvergence("maxcalls: function evaluated %i times" % N) + return f(*args, **kwargs) + return f_maxcalls_wrapped + + def memoize(ctx, f): + """ + Return a wrapped copy of *f* that caches computed values, i.e. + a memoized copy of *f*. Values are only reused if the cached precision + is equal to or higher than the working precision:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> f = memoize(maxcalls(sin, 1)) + >>> f(2) + 0.909297426825682 + >>> f(2) + 0.909297426825682 + >>> mp.dps = 25 + >>> f(2) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + NoConvergence: maxcalls: function evaluated 1 times + + """ + f_cache = {} + def f_cached(*args, **kwargs): + if kwargs: + key = args, tuple(kwargs.items()) + else: + key = args + prec = ctx.prec + if key in f_cache: + cprec, cvalue = f_cache[key] + if cprec >= prec: + return +cvalue + value = f(*args, **kwargs) + f_cache[key] = (prec, value) + return value + f_cached.__name__ = f.__name__ + f_cached.__doc__ = f.__doc__ + return f_cached diff --git a/MLPY/Lib/site-packages/mpmath/ctx_fp.py b/MLPY/Lib/site-packages/mpmath/ctx_fp.py new file mode 100644 index 0000000000000000000000000000000000000000..aa72ea5b03fde4da66b0d8fbf8ffa4012e3f6178 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/ctx_fp.py @@ -0,0 +1,253 @@ +from .ctx_base import StandardBaseContext + +import math +import cmath +from . import math2 + +from . import function_docs + +from .libmp import mpf_bernoulli, to_float, int_types +from . import libmp + +class FPContext(StandardBaseContext): + """ + Context for fast low-precision arithmetic (53-bit precision, giving at most + about 15-digit accuracy), using Python's builtin float and complex. + """ + + def __init__(ctx): + StandardBaseContext.__init__(ctx) + + # Override SpecialFunctions implementation + ctx.loggamma = math2.loggamma + ctx._bernoulli_cache = {} + ctx.pretty = False + + ctx._init_aliases() + + _mpq = lambda cls, x: float(x[0])/x[1] + + NoConvergence = libmp.NoConvergence + + def _get_prec(ctx): return 53 + def _set_prec(ctx, p): return + def _get_dps(ctx): return 15 + def _set_dps(ctx, p): return + + _fixed_precision = True + + prec = property(_get_prec, _set_prec) + dps = property(_get_dps, _set_dps) + + zero = 0.0 + one = 1.0 + eps = math2.EPS + inf = math2.INF + ninf = math2.NINF + nan = math2.NAN + j = 1j + + # Called by SpecialFunctions.__init__() + @classmethod + def _wrap_specfun(cls, name, f, wrap): + if wrap: + def f_wrapped(ctx, *args, **kwargs): + convert = ctx.convert + args = [convert(a) for a in args] + return f(ctx, *args, **kwargs) + else: + f_wrapped = f + f_wrapped.__doc__ = function_docs.__dict__.get(name, f.__doc__) + setattr(cls, name, f_wrapped) + + def bernoulli(ctx, n): + cache = ctx._bernoulli_cache + if n in cache: + return cache[n] + cache[n] = to_float(mpf_bernoulli(n, 53, 'n'), strict=True) + return cache[n] + + pi = math2.pi + e = math2.e + euler = math2.euler + sqrt2 = 1.4142135623730950488 + sqrt5 = 2.2360679774997896964 + phi = 1.6180339887498948482 + ln2 = 0.69314718055994530942 + ln10 = 2.302585092994045684 + euler = 0.57721566490153286061 + catalan = 0.91596559417721901505 + khinchin = 2.6854520010653064453 + apery = 1.2020569031595942854 + glaisher = 1.2824271291006226369 + + absmin = absmax = abs + + def is_special(ctx, x): + return x - x != 0.0 + + def isnan(ctx, x): + return x != x + + def isinf(ctx, x): + return abs(x) == math2.INF + + def isnormal(ctx, x): + if x: + return x - x == 0.0 + return False + + def isnpint(ctx, x): + if type(x) is complex: + if x.imag: + return False + x = x.real + return x <= 0.0 and round(x) == x + + mpf = float + mpc = complex + + def convert(ctx, x): + try: + return float(x) + except: + return complex(x) + + power = staticmethod(math2.pow) + sqrt = staticmethod(math2.sqrt) + exp = staticmethod(math2.exp) + ln = log = staticmethod(math2.log) + cos = staticmethod(math2.cos) + sin = staticmethod(math2.sin) + tan = staticmethod(math2.tan) + cos_sin = staticmethod(math2.cos_sin) + acos = staticmethod(math2.acos) + asin = staticmethod(math2.asin) + atan = staticmethod(math2.atan) + cosh = staticmethod(math2.cosh) + sinh = staticmethod(math2.sinh) + tanh = staticmethod(math2.tanh) + gamma = staticmethod(math2.gamma) + rgamma = staticmethod(math2.rgamma) + fac = factorial = staticmethod(math2.factorial) + floor = staticmethod(math2.floor) + ceil = staticmethod(math2.ceil) + cospi = staticmethod(math2.cospi) + sinpi = staticmethod(math2.sinpi) + cbrt = staticmethod(math2.cbrt) + _nthroot = staticmethod(math2.nthroot) + _ei = staticmethod(math2.ei) + _e1 = staticmethod(math2.e1) + _zeta = _zeta_int = staticmethod(math2.zeta) + + # XXX: math2 + def arg(ctx, z): + z = complex(z) + return math.atan2(z.imag, z.real) + + def expj(ctx, x): + return ctx.exp(ctx.j*x) + + def expjpi(ctx, x): + return ctx.exp(ctx.j*ctx.pi*x) + + ldexp = math.ldexp + frexp = math.frexp + + def mag(ctx, z): + if z: + return ctx.frexp(abs(z))[1] + return ctx.ninf + + def isint(ctx, z): + if hasattr(z, "imag"): # float/int don't have .real/.imag in py2.5 + if z.imag: + return False + z = z.real + try: + return z == int(z) + except: + return False + + def nint_distance(ctx, z): + if hasattr(z, "imag"): # float/int don't have .real/.imag in py2.5 + n = round(z.real) + else: + n = round(z) + if n == z: + return n, ctx.ninf + return n, ctx.mag(abs(z-n)) + + def _convert_param(ctx, z): + if type(z) is tuple: + p, q = z + return ctx.mpf(p) / q, 'R' + if hasattr(z, "imag"): # float/int don't have .real/.imag in py2.5 + intz = int(z.real) + else: + intz = int(z) + if z == intz: + return intz, 'Z' + return z, 'R' + + def _is_real_type(ctx, z): + return isinstance(z, float) or isinstance(z, int_types) + + def _is_complex_type(ctx, z): + return isinstance(z, complex) + + def hypsum(ctx, p, q, types, coeffs, z, maxterms=6000, **kwargs): + coeffs = list(coeffs) + num = range(p) + den = range(p,p+q) + tol = ctx.eps + s = t = 1.0 + k = 0 + while 1: + for i in num: t *= (coeffs[i]+k) + for i in den: t /= (coeffs[i]+k) + k += 1; t /= k; t *= z; s += t + if abs(t) < tol: + return s + if k > maxterms: + raise ctx.NoConvergence + + def atan2(ctx, x, y): + return math.atan2(x, y) + + def psi(ctx, m, z): + m = int(m) + if m == 0: + return ctx.digamma(z) + return (-1)**(m+1) * ctx.fac(m) * ctx.zeta(m+1, z) + + digamma = staticmethod(math2.digamma) + + def harmonic(ctx, x): + x = ctx.convert(x) + if x == 0 or x == 1: + return x + return ctx.digamma(x+1) + ctx.euler + + nstr = str + + def to_fixed(ctx, x, prec): + return int(math.ldexp(x, prec)) + + def rand(ctx): + import random + return random.random() + + _erf = staticmethod(math2.erf) + _erfc = staticmethod(math2.erfc) + + def sum_accurately(ctx, terms, check_step=1): + s = ctx.zero + k = 0 + for term in terms(): + s += term + if (not k % check_step) and term: + if abs(term) <= 1e-18*abs(s): + break + k += 1 + return s diff --git a/MLPY/Lib/site-packages/mpmath/ctx_iv.py b/MLPY/Lib/site-packages/mpmath/ctx_iv.py new file mode 100644 index 0000000000000000000000000000000000000000..c038e00a5677e318d222b63c22d225e3045e1c2b --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/ctx_iv.py @@ -0,0 +1,551 @@ +import operator + +from . import libmp + +from .libmp.backend import basestring + +from .libmp import ( + int_types, MPZ_ONE, + prec_to_dps, dps_to_prec, repr_dps, + round_floor, round_ceiling, + fzero, finf, fninf, fnan, + mpf_le, mpf_neg, + from_int, from_float, from_str, from_rational, + mpi_mid, mpi_delta, mpi_str, + mpi_abs, mpi_pos, mpi_neg, mpi_add, mpi_sub, + mpi_mul, mpi_div, mpi_pow_int, mpi_pow, + mpi_from_str, + mpci_pos, mpci_neg, mpci_add, mpci_sub, mpci_mul, mpci_div, mpci_pow, + mpci_abs, mpci_pow, mpci_exp, mpci_log, + ComplexResult, + mpf_hash, mpc_hash) +from .matrices.matrices import _matrix + +mpi_zero = (fzero, fzero) + +from .ctx_base import StandardBaseContext + +new = object.__new__ + +def convert_mpf_(x, prec, rounding): + if hasattr(x, "_mpf_"): return x._mpf_ + if isinstance(x, int_types): return from_int(x, prec, rounding) + if isinstance(x, float): return from_float(x, prec, rounding) + if isinstance(x, basestring): return from_str(x, prec, rounding) + raise NotImplementedError + + +class ivmpf(object): + """ + Interval arithmetic class. Precision is controlled by iv.prec. + """ + + def __new__(cls, x=0): + return cls.ctx.convert(x) + + def cast(self, cls, f_convert): + a, b = self._mpi_ + if a == b: + return cls(f_convert(a)) + raise ValueError + + def __int__(self): + return self.cast(int, libmp.to_int) + + def __float__(self): + return self.cast(float, libmp.to_float) + + def __complex__(self): + return self.cast(complex, libmp.to_float) + + def __hash__(self): + a, b = self._mpi_ + if a == b: + return mpf_hash(a) + else: + return hash(self._mpi_) + + @property + def real(self): return self + + @property + def imag(self): return self.ctx.zero + + def conjugate(self): return self + + @property + def a(self): + a, b = self._mpi_ + return self.ctx.make_mpf((a, a)) + + @property + def b(self): + a, b = self._mpi_ + return self.ctx.make_mpf((b, b)) + + @property + def mid(self): + ctx = self.ctx + v = mpi_mid(self._mpi_, ctx.prec) + return ctx.make_mpf((v, v)) + + @property + def delta(self): + ctx = self.ctx + v = mpi_delta(self._mpi_, ctx.prec) + return ctx.make_mpf((v,v)) + + @property + def _mpci_(self): + return self._mpi_, mpi_zero + + def _compare(*args): + raise TypeError("no ordering relation is defined for intervals") + + __gt__ = _compare + __le__ = _compare + __gt__ = _compare + __ge__ = _compare + + def __contains__(self, t): + t = self.ctx.mpf(t) + return (self.a <= t.a) and (t.b <= self.b) + + def __str__(self): + return mpi_str(self._mpi_, self.ctx.prec) + + def __repr__(self): + if self.ctx.pretty: + return str(self) + a, b = self._mpi_ + n = repr_dps(self.ctx.prec) + a = libmp.to_str(a, n) + b = libmp.to_str(b, n) + return "mpi(%r, %r)" % (a, b) + + def _compare(s, t, cmpfun): + if not hasattr(t, "_mpi_"): + try: + t = s.ctx.convert(t) + except: + return NotImplemented + return cmpfun(s._mpi_, t._mpi_) + + def __eq__(s, t): return s._compare(t, libmp.mpi_eq) + def __ne__(s, t): return s._compare(t, libmp.mpi_ne) + def __lt__(s, t): return s._compare(t, libmp.mpi_lt) + def __le__(s, t): return s._compare(t, libmp.mpi_le) + def __gt__(s, t): return s._compare(t, libmp.mpi_gt) + def __ge__(s, t): return s._compare(t, libmp.mpi_ge) + + def __abs__(self): + return self.ctx.make_mpf(mpi_abs(self._mpi_, self.ctx.prec)) + def __pos__(self): + return self.ctx.make_mpf(mpi_pos(self._mpi_, self.ctx.prec)) + def __neg__(self): + return self.ctx.make_mpf(mpi_neg(self._mpi_, self.ctx.prec)) + + def ae(s, t, rel_eps=None, abs_eps=None): + return s.ctx.almosteq(s, t, rel_eps, abs_eps) + +class ivmpc(object): + + def __new__(cls, re=0, im=0): + re = cls.ctx.convert(re) + im = cls.ctx.convert(im) + y = new(cls) + y._mpci_ = re._mpi_, im._mpi_ + return y + + def __hash__(self): + (a, b), (c,d) = self._mpci_ + if a == b and c == d: + return mpc_hash((a, c)) + else: + return hash(self._mpci_) + + def __repr__(s): + if s.ctx.pretty: + return str(s) + return "iv.mpc(%s, %s)" % (repr(s.real), repr(s.imag)) + + def __str__(s): + return "(%s + %s*j)" % (str(s.real), str(s.imag)) + + @property + def a(self): + (a, b), (c,d) = self._mpci_ + return self.ctx.make_mpf((a, a)) + + @property + def b(self): + (a, b), (c,d) = self._mpci_ + return self.ctx.make_mpf((b, b)) + + @property + def c(self): + (a, b), (c,d) = self._mpci_ + return self.ctx.make_mpf((c, c)) + + @property + def d(self): + (a, b), (c,d) = self._mpci_ + return self.ctx.make_mpf((d, d)) + + @property + def real(s): + return s.ctx.make_mpf(s._mpci_[0]) + + @property + def imag(s): + return s.ctx.make_mpf(s._mpci_[1]) + + def conjugate(s): + a, b = s._mpci_ + return s.ctx.make_mpc((a, mpf_neg(b))) + + def overlap(s, t): + t = s.ctx.convert(t) + real_overlap = (s.a <= t.a <= s.b) or (s.a <= t.b <= s.b) or (t.a <= s.a <= t.b) or (t.a <= s.b <= t.b) + imag_overlap = (s.c <= t.c <= s.d) or (s.c <= t.d <= s.d) or (t.c <= s.c <= t.d) or (t.c <= s.d <= t.d) + return real_overlap and imag_overlap + + def __contains__(s, t): + t = s.ctx.convert(t) + return t.real in s.real and t.imag in s.imag + + def _compare(s, t, ne=False): + if not isinstance(t, s.ctx._types): + try: + t = s.ctx.convert(t) + except: + return NotImplemented + if hasattr(t, '_mpi_'): + tval = t._mpi_, mpi_zero + elif hasattr(t, '_mpci_'): + tval = t._mpci_ + if ne: + return s._mpci_ != tval + return s._mpci_ == tval + + def __eq__(s, t): return s._compare(t) + def __ne__(s, t): return s._compare(t, True) + + def __lt__(s, t): raise TypeError("complex intervals cannot be ordered") + __le__ = __gt__ = __ge__ = __lt__ + + def __neg__(s): return s.ctx.make_mpc(mpci_neg(s._mpci_, s.ctx.prec)) + def __pos__(s): return s.ctx.make_mpc(mpci_pos(s._mpci_, s.ctx.prec)) + def __abs__(s): return s.ctx.make_mpf(mpci_abs(s._mpci_, s.ctx.prec)) + + def ae(s, t, rel_eps=None, abs_eps=None): + return s.ctx.almosteq(s, t, rel_eps, abs_eps) + +def _binary_op(f_real, f_complex): + def g_complex(ctx, sval, tval): + return ctx.make_mpc(f_complex(sval, tval, ctx.prec)) + def g_real(ctx, sval, tval): + try: + return ctx.make_mpf(f_real(sval, tval, ctx.prec)) + except ComplexResult: + sval = (sval, mpi_zero) + tval = (tval, mpi_zero) + return g_complex(ctx, sval, tval) + def lop_real(s, t): + if isinstance(t, _matrix): return NotImplemented + ctx = s.ctx + if not isinstance(t, ctx._types): t = ctx.convert(t) + if hasattr(t, "_mpi_"): return g_real(ctx, s._mpi_, t._mpi_) + if hasattr(t, "_mpci_"): return g_complex(ctx, (s._mpi_, mpi_zero), t._mpci_) + return NotImplemented + def rop_real(s, t): + ctx = s.ctx + if not isinstance(t, ctx._types): t = ctx.convert(t) + if hasattr(t, "_mpi_"): return g_real(ctx, t._mpi_, s._mpi_) + if hasattr(t, "_mpci_"): return g_complex(ctx, t._mpci_, (s._mpi_, mpi_zero)) + return NotImplemented + def lop_complex(s, t): + if isinstance(t, _matrix): return NotImplemented + ctx = s.ctx + if not isinstance(t, s.ctx._types): + try: + t = s.ctx.convert(t) + except (ValueError, TypeError): + return NotImplemented + return g_complex(ctx, s._mpci_, t._mpci_) + def rop_complex(s, t): + ctx = s.ctx + if not isinstance(t, s.ctx._types): + t = s.ctx.convert(t) + return g_complex(ctx, t._mpci_, s._mpci_) + return lop_real, rop_real, lop_complex, rop_complex + +ivmpf.__add__, ivmpf.__radd__, ivmpc.__add__, ivmpc.__radd__ = _binary_op(mpi_add, mpci_add) +ivmpf.__sub__, ivmpf.__rsub__, ivmpc.__sub__, ivmpc.__rsub__ = _binary_op(mpi_sub, mpci_sub) +ivmpf.__mul__, ivmpf.__rmul__, ivmpc.__mul__, ivmpc.__rmul__ = _binary_op(mpi_mul, mpci_mul) +ivmpf.__div__, ivmpf.__rdiv__, ivmpc.__div__, ivmpc.__rdiv__ = _binary_op(mpi_div, mpci_div) +ivmpf.__pow__, ivmpf.__rpow__, ivmpc.__pow__, ivmpc.__rpow__ = _binary_op(mpi_pow, mpci_pow) + +ivmpf.__truediv__ = ivmpf.__div__; ivmpf.__rtruediv__ = ivmpf.__rdiv__ +ivmpc.__truediv__ = ivmpc.__div__; ivmpc.__rtruediv__ = ivmpc.__rdiv__ + +class ivmpf_constant(ivmpf): + def __new__(cls, f): + self = new(cls) + self._f = f + return self + def _get_mpi_(self): + prec = self.ctx._prec[0] + a = self._f(prec, round_floor) + b = self._f(prec, round_ceiling) + return a, b + _mpi_ = property(_get_mpi_) + +class MPIntervalContext(StandardBaseContext): + + def __init__(ctx): + ctx.mpf = type('ivmpf', (ivmpf,), {}) + ctx.mpc = type('ivmpc', (ivmpc,), {}) + ctx._types = (ctx.mpf, ctx.mpc) + ctx._constant = type('ivmpf_constant', (ivmpf_constant,), {}) + ctx._prec = [53] + ctx._set_prec(53) + ctx._constant._ctxdata = ctx.mpf._ctxdata = ctx.mpc._ctxdata = [ctx.mpf, new, ctx._prec] + ctx._constant.ctx = ctx.mpf.ctx = ctx.mpc.ctx = ctx + ctx.pretty = False + StandardBaseContext.__init__(ctx) + ctx._init_builtins() + + def _mpi(ctx, a, b=None): + if b is None: + return ctx.mpf(a) + return ctx.mpf((a,b)) + + def _init_builtins(ctx): + ctx.one = ctx.mpf(1) + ctx.zero = ctx.mpf(0) + ctx.inf = ctx.mpf('inf') + ctx.ninf = -ctx.inf + ctx.nan = ctx.mpf('nan') + ctx.j = ctx.mpc(0,1) + ctx.exp = ctx._wrap_mpi_function(libmp.mpi_exp, libmp.mpci_exp) + ctx.sqrt = ctx._wrap_mpi_function(libmp.mpi_sqrt) + ctx.ln = ctx._wrap_mpi_function(libmp.mpi_log, libmp.mpci_log) + ctx.cos = ctx._wrap_mpi_function(libmp.mpi_cos, libmp.mpci_cos) + ctx.sin = ctx._wrap_mpi_function(libmp.mpi_sin, libmp.mpci_sin) + ctx.tan = ctx._wrap_mpi_function(libmp.mpi_tan) + ctx.gamma = ctx._wrap_mpi_function(libmp.mpi_gamma, libmp.mpci_gamma) + ctx.loggamma = ctx._wrap_mpi_function(libmp.mpi_loggamma, libmp.mpci_loggamma) + ctx.rgamma = ctx._wrap_mpi_function(libmp.mpi_rgamma, libmp.mpci_rgamma) + ctx.factorial = ctx._wrap_mpi_function(libmp.mpi_factorial, libmp.mpci_factorial) + ctx.fac = ctx.factorial + + ctx.eps = ctx._constant(lambda prec, rnd: (0, MPZ_ONE, 1-prec, 1)) + ctx.pi = ctx._constant(libmp.mpf_pi) + ctx.e = ctx._constant(libmp.mpf_e) + ctx.ln2 = ctx._constant(libmp.mpf_ln2) + ctx.ln10 = ctx._constant(libmp.mpf_ln10) + ctx.phi = ctx._constant(libmp.mpf_phi) + ctx.euler = ctx._constant(libmp.mpf_euler) + ctx.catalan = ctx._constant(libmp.mpf_catalan) + ctx.glaisher = ctx._constant(libmp.mpf_glaisher) + ctx.khinchin = ctx._constant(libmp.mpf_khinchin) + ctx.twinprime = ctx._constant(libmp.mpf_twinprime) + + def _wrap_mpi_function(ctx, f_real, f_complex=None): + def g(x, **kwargs): + if kwargs: + prec = kwargs.get('prec', ctx._prec[0]) + else: + prec = ctx._prec[0] + x = ctx.convert(x) + if hasattr(x, "_mpi_"): + return ctx.make_mpf(f_real(x._mpi_, prec)) + if hasattr(x, "_mpci_"): + return ctx.make_mpc(f_complex(x._mpci_, prec)) + raise ValueError + return g + + @classmethod + def _wrap_specfun(cls, name, f, wrap): + if wrap: + def f_wrapped(ctx, *args, **kwargs): + convert = ctx.convert + args = [convert(a) for a in args] + prec = ctx.prec + try: + ctx.prec += 10 + retval = f(ctx, *args, **kwargs) + finally: + ctx.prec = prec + return +retval + else: + f_wrapped = f + setattr(cls, name, f_wrapped) + + def _set_prec(ctx, n): + ctx._prec[0] = max(1, int(n)) + ctx._dps = prec_to_dps(n) + + def _set_dps(ctx, n): + ctx._prec[0] = dps_to_prec(n) + ctx._dps = max(1, int(n)) + + prec = property(lambda ctx: ctx._prec[0], _set_prec) + dps = property(lambda ctx: ctx._dps, _set_dps) + + def make_mpf(ctx, v): + a = new(ctx.mpf) + a._mpi_ = v + return a + + def make_mpc(ctx, v): + a = new(ctx.mpc) + a._mpci_ = v + return a + + def _mpq(ctx, pq): + p, q = pq + a = libmp.from_rational(p, q, ctx.prec, round_floor) + b = libmp.from_rational(p, q, ctx.prec, round_ceiling) + return ctx.make_mpf((a, b)) + + def convert(ctx, x): + if isinstance(x, (ctx.mpf, ctx.mpc)): + return x + if isinstance(x, ctx._constant): + return +x + if isinstance(x, complex) or hasattr(x, "_mpc_"): + re = ctx.convert(x.real) + im = ctx.convert(x.imag) + return ctx.mpc(re,im) + if isinstance(x, basestring): + v = mpi_from_str(x, ctx.prec) + return ctx.make_mpf(v) + if hasattr(x, "_mpi_"): + a, b = x._mpi_ + else: + try: + a, b = x + except (TypeError, ValueError): + a = b = x + if hasattr(a, "_mpi_"): + a = a._mpi_[0] + else: + a = convert_mpf_(a, ctx.prec, round_floor) + if hasattr(b, "_mpi_"): + b = b._mpi_[1] + else: + b = convert_mpf_(b, ctx.prec, round_ceiling) + if a == fnan or b == fnan: + a = fninf + b = finf + assert mpf_le(a, b), "endpoints must be properly ordered" + return ctx.make_mpf((a, b)) + + def nstr(ctx, x, n=5, **kwargs): + x = ctx.convert(x) + if hasattr(x, "_mpi_"): + return libmp.mpi_to_str(x._mpi_, n, **kwargs) + if hasattr(x, "_mpci_"): + re = libmp.mpi_to_str(x._mpci_[0], n, **kwargs) + im = libmp.mpi_to_str(x._mpci_[1], n, **kwargs) + return "(%s + %s*j)" % (re, im) + + def mag(ctx, x): + x = ctx.convert(x) + if isinstance(x, ctx.mpc): + return max(ctx.mag(x.real), ctx.mag(x.imag)) + 1 + a, b = libmp.mpi_abs(x._mpi_) + sign, man, exp, bc = b + if man: + return exp+bc + if b == fzero: + return ctx.ninf + if b == fnan: + return ctx.nan + return ctx.inf + + def isnan(ctx, x): + return False + + def isinf(ctx, x): + return x == ctx.inf + + def isint(ctx, x): + x = ctx.convert(x) + a, b = x._mpi_ + if a == b: + sign, man, exp, bc = a + if man: + return exp >= 0 + return a == fzero + return None + + def ldexp(ctx, x, n): + a, b = ctx.convert(x)._mpi_ + a = libmp.mpf_shift(a, n) + b = libmp.mpf_shift(b, n) + return ctx.make_mpf((a,b)) + + def absmin(ctx, x): + return abs(ctx.convert(x)).a + + def absmax(ctx, x): + return abs(ctx.convert(x)).b + + def atan2(ctx, y, x): + y = ctx.convert(y)._mpi_ + x = ctx.convert(x)._mpi_ + return ctx.make_mpf(libmp.mpi_atan2(y,x,ctx.prec)) + + def _convert_param(ctx, x): + if isinstance(x, libmp.int_types): + return x, 'Z' + if isinstance(x, tuple): + p, q = x + return (ctx.mpf(p) / ctx.mpf(q), 'R') + x = ctx.convert(x) + if isinstance(x, ctx.mpf): + return x, 'R' + if isinstance(x, ctx.mpc): + return x, 'C' + raise ValueError + + def _is_real_type(ctx, z): + return isinstance(z, ctx.mpf) or isinstance(z, int_types) + + def _is_complex_type(ctx, z): + return isinstance(z, ctx.mpc) + + def hypsum(ctx, p, q, types, coeffs, z, maxterms=6000, **kwargs): + coeffs = list(coeffs) + num = range(p) + den = range(p,p+q) + #tol = ctx.eps + s = t = ctx.one + k = 0 + while 1: + for i in num: t *= (coeffs[i]+k) + for i in den: t /= (coeffs[i]+k) + k += 1; t /= k; t *= z; s += t + if t == 0: + return s + #if abs(t) < tol: + # return s + if k > maxterms: + raise ctx.NoConvergence + + +# Register with "numbers" ABC +# We do not subclass, hence we do not use the @abstractmethod checks. While +# this is less invasive it may turn out that we do not actually support +# parts of the expected interfaces. See +# http://docs.python.org/2/library/numbers.html for list of abstract +# methods. +try: + import numbers + numbers.Complex.register(ivmpc) + numbers.Real.register(ivmpf) +except ImportError: + pass diff --git a/MLPY/Lib/site-packages/mpmath/ctx_mp.py b/MLPY/Lib/site-packages/mpmath/ctx_mp.py new file mode 100644 index 0000000000000000000000000000000000000000..93594dd44474a415c74e4b0beb83bd7012666c9d --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/ctx_mp.py @@ -0,0 +1,1339 @@ +""" +This module defines the mpf, mpc classes, and standard functions for +operating with them. +""" +__docformat__ = 'plaintext' + +import functools + +import re + +from .ctx_base import StandardBaseContext + +from .libmp.backend import basestring, BACKEND + +from . import libmp + +from .libmp import (MPZ, MPZ_ZERO, MPZ_ONE, int_types, repr_dps, + round_floor, round_ceiling, dps_to_prec, round_nearest, prec_to_dps, + ComplexResult, to_pickable, from_pickable, normalize, + from_int, from_float, from_str, to_int, to_float, to_str, + from_rational, from_man_exp, + fone, fzero, finf, fninf, fnan, + mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_mul_int, + mpf_div, mpf_rdiv_int, mpf_pow_int, mpf_mod, + mpf_eq, mpf_cmp, mpf_lt, mpf_gt, mpf_le, mpf_ge, + mpf_hash, mpf_rand, + mpf_sum, + bitcount, to_fixed, + mpc_to_str, + mpc_to_complex, mpc_hash, mpc_pos, mpc_is_nonzero, mpc_neg, mpc_conjugate, + mpc_abs, mpc_add, mpc_add_mpf, mpc_sub, mpc_sub_mpf, mpc_mul, mpc_mul_mpf, + mpc_mul_int, mpc_div, mpc_div_mpf, mpc_pow, mpc_pow_mpf, mpc_pow_int, + mpc_mpf_div, + mpf_pow, + mpf_pi, mpf_degree, mpf_e, mpf_phi, mpf_ln2, mpf_ln10, + mpf_euler, mpf_catalan, mpf_apery, mpf_khinchin, + mpf_glaisher, mpf_twinprime, mpf_mertens, + int_types) + +from . import function_docs +from . import rational + +new = object.__new__ + +get_complex = re.compile(r'^\(?(?P[\+\-]?\d*(\.\d*)?(e[\+\-]?\d+)?)??' + r'(?P[\+\-]?\d*(\.\d*)?(e[\+\-]?\d+)?j)?\)?$') + +if BACKEND == 'sage': + from sage.libs.mpmath.ext_main import Context as BaseMPContext + # pickle hack + import sage.libs.mpmath.ext_main as _mpf_module +else: + from .ctx_mp_python import PythonMPContext as BaseMPContext + from . import ctx_mp_python as _mpf_module + +from .ctx_mp_python import _mpf, _mpc, mpnumeric + +class MPContext(BaseMPContext, StandardBaseContext): + """ + Context for multiprecision arithmetic with a global precision. + """ + + def __init__(ctx): + BaseMPContext.__init__(ctx) + ctx.trap_complex = False + ctx.pretty = False + ctx.types = [ctx.mpf, ctx.mpc, ctx.constant] + ctx._mpq = rational.mpq + ctx.default() + StandardBaseContext.__init__(ctx) + + ctx.mpq = rational.mpq + ctx.init_builtins() + + ctx.hyp_summators = {} + + ctx._init_aliases() + + # XXX: automate + try: + ctx.bernoulli.im_func.func_doc = function_docs.bernoulli + ctx.primepi.im_func.func_doc = function_docs.primepi + ctx.psi.im_func.func_doc = function_docs.psi + ctx.atan2.im_func.func_doc = function_docs.atan2 + except AttributeError: + # python 3 + ctx.bernoulli.__func__.func_doc = function_docs.bernoulli + ctx.primepi.__func__.func_doc = function_docs.primepi + ctx.psi.__func__.func_doc = function_docs.psi + ctx.atan2.__func__.func_doc = function_docs.atan2 + + ctx.digamma.func_doc = function_docs.digamma + ctx.cospi.func_doc = function_docs.cospi + ctx.sinpi.func_doc = function_docs.sinpi + + def init_builtins(ctx): + + mpf = ctx.mpf + mpc = ctx.mpc + + # Exact constants + ctx.one = ctx.make_mpf(fone) + ctx.zero = ctx.make_mpf(fzero) + ctx.j = ctx.make_mpc((fzero,fone)) + ctx.inf = ctx.make_mpf(finf) + ctx.ninf = ctx.make_mpf(fninf) + ctx.nan = ctx.make_mpf(fnan) + + eps = ctx.constant(lambda prec, rnd: (0, MPZ_ONE, 1-prec, 1), + "epsilon of working precision", "eps") + ctx.eps = eps + + # Approximate constants + ctx.pi = ctx.constant(mpf_pi, "pi", "pi") + ctx.ln2 = ctx.constant(mpf_ln2, "ln(2)", "ln2") + ctx.ln10 = ctx.constant(mpf_ln10, "ln(10)", "ln10") + ctx.phi = ctx.constant(mpf_phi, "Golden ratio phi", "phi") + ctx.e = ctx.constant(mpf_e, "e = exp(1)", "e") + ctx.euler = ctx.constant(mpf_euler, "Euler's constant", "euler") + ctx.catalan = ctx.constant(mpf_catalan, "Catalan's constant", "catalan") + ctx.khinchin = ctx.constant(mpf_khinchin, "Khinchin's constant", "khinchin") + ctx.glaisher = ctx.constant(mpf_glaisher, "Glaisher's constant", "glaisher") + ctx.apery = ctx.constant(mpf_apery, "Apery's constant", "apery") + ctx.degree = ctx.constant(mpf_degree, "1 deg = pi / 180", "degree") + ctx.twinprime = ctx.constant(mpf_twinprime, "Twin prime constant", "twinprime") + ctx.mertens = ctx.constant(mpf_mertens, "Mertens' constant", "mertens") + + # Standard functions + ctx.sqrt = ctx._wrap_libmp_function(libmp.mpf_sqrt, libmp.mpc_sqrt) + ctx.cbrt = ctx._wrap_libmp_function(libmp.mpf_cbrt, libmp.mpc_cbrt) + ctx.ln = ctx._wrap_libmp_function(libmp.mpf_log, libmp.mpc_log) + ctx.atan = ctx._wrap_libmp_function(libmp.mpf_atan, libmp.mpc_atan) + ctx.exp = ctx._wrap_libmp_function(libmp.mpf_exp, libmp.mpc_exp) + ctx.expj = ctx._wrap_libmp_function(libmp.mpf_expj, libmp.mpc_expj) + ctx.expjpi = ctx._wrap_libmp_function(libmp.mpf_expjpi, libmp.mpc_expjpi) + ctx.sin = ctx._wrap_libmp_function(libmp.mpf_sin, libmp.mpc_sin) + ctx.cos = ctx._wrap_libmp_function(libmp.mpf_cos, libmp.mpc_cos) + ctx.tan = ctx._wrap_libmp_function(libmp.mpf_tan, libmp.mpc_tan) + ctx.sinh = ctx._wrap_libmp_function(libmp.mpf_sinh, libmp.mpc_sinh) + ctx.cosh = ctx._wrap_libmp_function(libmp.mpf_cosh, libmp.mpc_cosh) + ctx.tanh = ctx._wrap_libmp_function(libmp.mpf_tanh, libmp.mpc_tanh) + ctx.asin = ctx._wrap_libmp_function(libmp.mpf_asin, libmp.mpc_asin) + ctx.acos = ctx._wrap_libmp_function(libmp.mpf_acos, libmp.mpc_acos) + ctx.atan = ctx._wrap_libmp_function(libmp.mpf_atan, libmp.mpc_atan) + ctx.asinh = ctx._wrap_libmp_function(libmp.mpf_asinh, libmp.mpc_asinh) + ctx.acosh = ctx._wrap_libmp_function(libmp.mpf_acosh, libmp.mpc_acosh) + ctx.atanh = ctx._wrap_libmp_function(libmp.mpf_atanh, libmp.mpc_atanh) + ctx.sinpi = ctx._wrap_libmp_function(libmp.mpf_sin_pi, libmp.mpc_sin_pi) + ctx.cospi = ctx._wrap_libmp_function(libmp.mpf_cos_pi, libmp.mpc_cos_pi) + ctx.floor = ctx._wrap_libmp_function(libmp.mpf_floor, libmp.mpc_floor) + ctx.ceil = ctx._wrap_libmp_function(libmp.mpf_ceil, libmp.mpc_ceil) + ctx.nint = ctx._wrap_libmp_function(libmp.mpf_nint, libmp.mpc_nint) + ctx.frac = ctx._wrap_libmp_function(libmp.mpf_frac, libmp.mpc_frac) + ctx.fib = ctx.fibonacci = ctx._wrap_libmp_function(libmp.mpf_fibonacci, libmp.mpc_fibonacci) + + ctx.gamma = ctx._wrap_libmp_function(libmp.mpf_gamma, libmp.mpc_gamma) + ctx.rgamma = ctx._wrap_libmp_function(libmp.mpf_rgamma, libmp.mpc_rgamma) + ctx.loggamma = ctx._wrap_libmp_function(libmp.mpf_loggamma, libmp.mpc_loggamma) + ctx.fac = ctx.factorial = ctx._wrap_libmp_function(libmp.mpf_factorial, libmp.mpc_factorial) + + ctx.digamma = ctx._wrap_libmp_function(libmp.mpf_psi0, libmp.mpc_psi0) + ctx.harmonic = ctx._wrap_libmp_function(libmp.mpf_harmonic, libmp.mpc_harmonic) + ctx.ei = ctx._wrap_libmp_function(libmp.mpf_ei, libmp.mpc_ei) + ctx.e1 = ctx._wrap_libmp_function(libmp.mpf_e1, libmp.mpc_e1) + ctx._ci = ctx._wrap_libmp_function(libmp.mpf_ci, libmp.mpc_ci) + ctx._si = ctx._wrap_libmp_function(libmp.mpf_si, libmp.mpc_si) + ctx.ellipk = ctx._wrap_libmp_function(libmp.mpf_ellipk, libmp.mpc_ellipk) + ctx._ellipe = ctx._wrap_libmp_function(libmp.mpf_ellipe, libmp.mpc_ellipe) + ctx.agm1 = ctx._wrap_libmp_function(libmp.mpf_agm1, libmp.mpc_agm1) + ctx._erf = ctx._wrap_libmp_function(libmp.mpf_erf, None) + ctx._erfc = ctx._wrap_libmp_function(libmp.mpf_erfc, None) + ctx._zeta = ctx._wrap_libmp_function(libmp.mpf_zeta, libmp.mpc_zeta) + ctx._altzeta = ctx._wrap_libmp_function(libmp.mpf_altzeta, libmp.mpc_altzeta) + + # Faster versions + ctx.sqrt = getattr(ctx, "_sage_sqrt", ctx.sqrt) + ctx.exp = getattr(ctx, "_sage_exp", ctx.exp) + ctx.ln = getattr(ctx, "_sage_ln", ctx.ln) + ctx.cos = getattr(ctx, "_sage_cos", ctx.cos) + ctx.sin = getattr(ctx, "_sage_sin", ctx.sin) + + def to_fixed(ctx, x, prec): + return x.to_fixed(prec) + + def hypot(ctx, x, y): + r""" + Computes the Euclidean norm of the vector `(x, y)`, equal + to `\sqrt{x^2 + y^2}`. Both `x` and `y` must be real.""" + x = ctx.convert(x) + y = ctx.convert(y) + return ctx.make_mpf(libmp.mpf_hypot(x._mpf_, y._mpf_, *ctx._prec_rounding)) + + def _gamma_upper_int(ctx, n, z): + n = int(ctx._re(n)) + if n == 0: + return ctx.e1(z) + if not hasattr(z, '_mpf_'): + raise NotImplementedError + prec, rounding = ctx._prec_rounding + real, imag = libmp.mpf_expint(n, z._mpf_, prec, rounding, gamma=True) + if imag is None: + return ctx.make_mpf(real) + else: + return ctx.make_mpc((real, imag)) + + def _expint_int(ctx, n, z): + n = int(n) + if n == 1: + return ctx.e1(z) + if not hasattr(z, '_mpf_'): + raise NotImplementedError + prec, rounding = ctx._prec_rounding + real, imag = libmp.mpf_expint(n, z._mpf_, prec, rounding) + if imag is None: + return ctx.make_mpf(real) + else: + return ctx.make_mpc((real, imag)) + + def _nthroot(ctx, x, n): + if hasattr(x, '_mpf_'): + try: + return ctx.make_mpf(libmp.mpf_nthroot(x._mpf_, n, *ctx._prec_rounding)) + except ComplexResult: + if ctx.trap_complex: + raise + x = (x._mpf_, libmp.fzero) + else: + x = x._mpc_ + return ctx.make_mpc(libmp.mpc_nthroot(x, n, *ctx._prec_rounding)) + + def _besselj(ctx, n, z): + prec, rounding = ctx._prec_rounding + if hasattr(z, '_mpf_'): + return ctx.make_mpf(libmp.mpf_besseljn(n, z._mpf_, prec, rounding)) + elif hasattr(z, '_mpc_'): + return ctx.make_mpc(libmp.mpc_besseljn(n, z._mpc_, prec, rounding)) + + def _agm(ctx, a, b=1): + prec, rounding = ctx._prec_rounding + if hasattr(a, '_mpf_') and hasattr(b, '_mpf_'): + try: + v = libmp.mpf_agm(a._mpf_, b._mpf_, prec, rounding) + return ctx.make_mpf(v) + except ComplexResult: + pass + if hasattr(a, '_mpf_'): a = (a._mpf_, libmp.fzero) + else: a = a._mpc_ + if hasattr(b, '_mpf_'): b = (b._mpf_, libmp.fzero) + else: b = b._mpc_ + return ctx.make_mpc(libmp.mpc_agm(a, b, prec, rounding)) + + def bernoulli(ctx, n): + return ctx.make_mpf(libmp.mpf_bernoulli(int(n), *ctx._prec_rounding)) + + def _zeta_int(ctx, n): + return ctx.make_mpf(libmp.mpf_zeta_int(int(n), *ctx._prec_rounding)) + + def atan2(ctx, y, x): + x = ctx.convert(x) + y = ctx.convert(y) + return ctx.make_mpf(libmp.mpf_atan2(y._mpf_, x._mpf_, *ctx._prec_rounding)) + + def psi(ctx, m, z): + z = ctx.convert(z) + m = int(m) + if ctx._is_real_type(z): + return ctx.make_mpf(libmp.mpf_psi(m, z._mpf_, *ctx._prec_rounding)) + else: + return ctx.make_mpc(libmp.mpc_psi(m, z._mpc_, *ctx._prec_rounding)) + + def cos_sin(ctx, x, **kwargs): + if type(x) not in ctx.types: + x = ctx.convert(x) + prec, rounding = ctx._parse_prec(kwargs) + if hasattr(x, '_mpf_'): + c, s = libmp.mpf_cos_sin(x._mpf_, prec, rounding) + return ctx.make_mpf(c), ctx.make_mpf(s) + elif hasattr(x, '_mpc_'): + c, s = libmp.mpc_cos_sin(x._mpc_, prec, rounding) + return ctx.make_mpc(c), ctx.make_mpc(s) + else: + return ctx.cos(x, **kwargs), ctx.sin(x, **kwargs) + + def cospi_sinpi(ctx, x, **kwargs): + if type(x) not in ctx.types: + x = ctx.convert(x) + prec, rounding = ctx._parse_prec(kwargs) + if hasattr(x, '_mpf_'): + c, s = libmp.mpf_cos_sin_pi(x._mpf_, prec, rounding) + return ctx.make_mpf(c), ctx.make_mpf(s) + elif hasattr(x, '_mpc_'): + c, s = libmp.mpc_cos_sin_pi(x._mpc_, prec, rounding) + return ctx.make_mpc(c), ctx.make_mpc(s) + else: + return ctx.cos(x, **kwargs), ctx.sin(x, **kwargs) + + def clone(ctx): + """ + Create a copy of the context, with the same working precision. + """ + a = ctx.__class__() + a.prec = ctx.prec + return a + + # Several helper methods + # TODO: add more of these, make consistent, write docstrings, ... + + def _is_real_type(ctx, x): + if hasattr(x, '_mpc_') or type(x) is complex: + return False + return True + + def _is_complex_type(ctx, x): + if hasattr(x, '_mpc_') or type(x) is complex: + return True + return False + + def isnan(ctx, x): + """ + Return *True* if *x* is a NaN (not-a-number), or for a complex + number, whether either the real or complex part is NaN; + otherwise return *False*:: + + >>> from mpmath import * + >>> isnan(3.14) + False + >>> isnan(nan) + True + >>> isnan(mpc(3.14,2.72)) + False + >>> isnan(mpc(3.14,nan)) + True + + """ + if hasattr(x, "_mpf_"): + return x._mpf_ == fnan + if hasattr(x, "_mpc_"): + return fnan in x._mpc_ + if isinstance(x, int_types) or isinstance(x, rational.mpq): + return False + x = ctx.convert(x) + if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'): + return ctx.isnan(x) + raise TypeError("isnan() needs a number as input") + + def isfinite(ctx, x): + """ + Return *True* if *x* is a finite number, i.e. neither + an infinity or a NaN. + + >>> from mpmath import * + >>> isfinite(inf) + False + >>> isfinite(-inf) + False + >>> isfinite(3) + True + >>> isfinite(nan) + False + >>> isfinite(3+4j) + True + >>> isfinite(mpc(3,inf)) + False + >>> isfinite(mpc(nan,3)) + False + + """ + if ctx.isinf(x) or ctx.isnan(x): + return False + return True + + def isnpint(ctx, x): + """ + Determine if *x* is a nonpositive integer. + """ + if not x: + return True + if hasattr(x, '_mpf_'): + sign, man, exp, bc = x._mpf_ + return sign and exp >= 0 + if hasattr(x, '_mpc_'): + return not x.imag and ctx.isnpint(x.real) + if type(x) in int_types: + return x <= 0 + if isinstance(x, ctx.mpq): + p, q = x._mpq_ + if not p: + return True + return q == 1 and p <= 0 + return ctx.isnpint(ctx.convert(x)) + + def __str__(ctx): + lines = ["Mpmath settings:", + (" mp.prec = %s" % ctx.prec).ljust(30) + "[default: 53]", + (" mp.dps = %s" % ctx.dps).ljust(30) + "[default: 15]", + (" mp.trap_complex = %s" % ctx.trap_complex).ljust(30) + "[default: False]", + ] + return "\n".join(lines) + + @property + def _repr_digits(ctx): + return repr_dps(ctx._prec) + + @property + def _str_digits(ctx): + return ctx._dps + + def extraprec(ctx, n, normalize_output=False): + """ + The block + + with extraprec(n): + + + increases the precision n bits, executes , and then + restores the precision. + + extraprec(n)(f) returns a decorated version of the function f + that increases the working precision by n bits before execution, + and restores the parent precision afterwards. With + normalize_output=True, it rounds the return value to the parent + precision. + """ + return PrecisionManager(ctx, lambda p: p + n, None, normalize_output) + + def extradps(ctx, n, normalize_output=False): + """ + This function is analogous to extraprec (see documentation) + but changes the decimal precision instead of the number of bits. + """ + return PrecisionManager(ctx, None, lambda d: d + n, normalize_output) + + def workprec(ctx, n, normalize_output=False): + """ + The block + + with workprec(n): + + + sets the precision to n bits, executes , and then restores + the precision. + + workprec(n)(f) returns a decorated version of the function f + that sets the precision to n bits before execution, + and restores the precision afterwards. With normalize_output=True, + it rounds the return value to the parent precision. + """ + return PrecisionManager(ctx, lambda p: n, None, normalize_output) + + def workdps(ctx, n, normalize_output=False): + """ + This function is analogous to workprec (see documentation) + but changes the decimal precision instead of the number of bits. + """ + return PrecisionManager(ctx, None, lambda d: n, normalize_output) + + def autoprec(ctx, f, maxprec=None, catch=(), verbose=False): + r""" + Return a wrapped copy of *f* that repeatedly evaluates *f* + with increasing precision until the result converges to the + full precision used at the point of the call. + + This heuristically protects against rounding errors, at the cost of + roughly a 2x slowdown compared to manually setting the optimal + precision. This method can, however, easily be fooled if the results + from *f* depend "discontinuously" on the precision, for instance + if catastrophic cancellation can occur. Therefore, :func:`~mpmath.autoprec` + should be used judiciously. + + **Examples** + + Many functions are sensitive to perturbations of the input arguments. + If the arguments are decimal numbers, they may have to be converted + to binary at a much higher precision. If the amount of required + extra precision is unknown, :func:`~mpmath.autoprec` is convenient:: + + >>> from mpmath import * + >>> mp.dps = 15 + >>> mp.pretty = True + >>> besselj(5, 125 * 10**28) # Exact input + -8.03284785591801e-17 + >>> besselj(5, '1.25e30') # Bad + 7.12954868316652e-16 + >>> autoprec(besselj)(5, '1.25e30') # Good + -8.03284785591801e-17 + + The following fails to converge because `\sin(\pi) = 0` whereas all + finite-precision approximations of `\pi` give nonzero values:: + + >>> autoprec(sin)(pi) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + NoConvergence: autoprec: prec increased to 2910 without convergence + + As the following example shows, :func:`~mpmath.autoprec` can protect against + cancellation, but is fooled by too severe cancellation:: + + >>> x = 1e-10 + >>> exp(x)-1; expm1(x); autoprec(lambda t: exp(t)-1)(x) + 1.00000008274037e-10 + 1.00000000005e-10 + 1.00000000005e-10 + >>> x = 1e-50 + >>> exp(x)-1; expm1(x); autoprec(lambda t: exp(t)-1)(x) + 0.0 + 1.0e-50 + 0.0 + + With *catch*, an exception or list of exceptions to intercept + may be specified. The raised exception is interpreted + as signaling insufficient precision. This permits, for example, + evaluating a function where a too low precision results in a + division by zero:: + + >>> f = lambda x: 1/(exp(x)-1) + >>> f(1e-30) + Traceback (most recent call last): + ... + ZeroDivisionError + >>> autoprec(f, catch=ZeroDivisionError)(1e-30) + 1.0e+30 + + + """ + def f_autoprec_wrapped(*args, **kwargs): + prec = ctx.prec + if maxprec is None: + maxprec2 = ctx._default_hyper_maxprec(prec) + else: + maxprec2 = maxprec + try: + ctx.prec = prec + 10 + try: + v1 = f(*args, **kwargs) + except catch: + v1 = ctx.nan + prec2 = prec + 20 + while 1: + ctx.prec = prec2 + try: + v2 = f(*args, **kwargs) + except catch: + v2 = ctx.nan + if v1 == v2: + break + err = ctx.mag(v2-v1) - ctx.mag(v2) + if err < (-prec): + break + if verbose: + print("autoprec: target=%s, prec=%s, accuracy=%s" \ + % (prec, prec2, -err)) + v1 = v2 + if prec2 >= maxprec2: + raise ctx.NoConvergence(\ + "autoprec: prec increased to %i without convergence"\ + % prec2) + prec2 += int(prec2*2) + prec2 = min(prec2, maxprec2) + finally: + ctx.prec = prec + return +v2 + return f_autoprec_wrapped + + def nstr(ctx, x, n=6, **kwargs): + """ + Convert an ``mpf`` or ``mpc`` to a decimal string literal with *n* + significant digits. The small default value for *n* is chosen to + make this function useful for printing collections of numbers + (lists, matrices, etc). + + If *x* is a list or tuple, :func:`~mpmath.nstr` is applied recursively + to each element. For unrecognized classes, :func:`~mpmath.nstr` + simply returns ``str(x)``. + + The companion function :func:`~mpmath.nprint` prints the result + instead of returning it. + + The keyword arguments *strip_zeros*, *min_fixed*, *max_fixed* + and *show_zero_exponent* are forwarded to :func:`~mpmath.libmp.to_str`. + + The number will be printed in fixed-point format if the position + of the leading digit is strictly between min_fixed + (default = min(-dps/3,-5)) and max_fixed (default = dps). + + To force fixed-point format always, set min_fixed = -inf, + max_fixed = +inf. To force floating-point format, set + min_fixed >= max_fixed. + + >>> from mpmath import * + >>> nstr([+pi, ldexp(1,-500)]) + '[3.14159, 3.05494e-151]' + >>> nprint([+pi, ldexp(1,-500)]) + [3.14159, 3.05494e-151] + >>> nstr(mpf("5e-10"), 5) + '5.0e-10' + >>> nstr(mpf("5e-10"), 5, strip_zeros=False) + '5.0000e-10' + >>> nstr(mpf("5e-10"), 5, strip_zeros=False, min_fixed=-11) + '0.00000000050000' + >>> nstr(mpf(0), 5, show_zero_exponent=True) + '0.0e+0' + + """ + if isinstance(x, list): + return "[%s]" % (", ".join(ctx.nstr(c, n, **kwargs) for c in x)) + if isinstance(x, tuple): + return "(%s)" % (", ".join(ctx.nstr(c, n, **kwargs) for c in x)) + if hasattr(x, '_mpf_'): + return to_str(x._mpf_, n, **kwargs) + if hasattr(x, '_mpc_'): + return "(" + mpc_to_str(x._mpc_, n, **kwargs) + ")" + if isinstance(x, basestring): + return repr(x) + if isinstance(x, ctx.matrix): + return x.__nstr__(n, **kwargs) + return str(x) + + def _convert_fallback(ctx, x, strings): + if strings and isinstance(x, basestring): + if 'j' in x.lower(): + x = x.lower().replace(' ', '') + match = get_complex.match(x) + re = match.group('re') + if not re: + re = 0 + im = match.group('im').rstrip('j') + return ctx.mpc(ctx.convert(re), ctx.convert(im)) + if hasattr(x, "_mpi_"): + a, b = x._mpi_ + if a == b: + return ctx.make_mpf(a) + else: + raise ValueError("can only create mpf from zero-width interval") + raise TypeError("cannot create mpf from " + repr(x)) + + def mpmathify(ctx, *args, **kwargs): + return ctx.convert(*args, **kwargs) + + def _parse_prec(ctx, kwargs): + if kwargs: + if kwargs.get('exact'): + return 0, 'f' + prec, rounding = ctx._prec_rounding + if 'rounding' in kwargs: + rounding = kwargs['rounding'] + if 'prec' in kwargs: + prec = kwargs['prec'] + if prec == ctx.inf: + return 0, 'f' + else: + prec = int(prec) + elif 'dps' in kwargs: + dps = kwargs['dps'] + if dps == ctx.inf: + return 0, 'f' + prec = dps_to_prec(dps) + return prec, rounding + return ctx._prec_rounding + + _exact_overflow_msg = "the exact result does not fit in memory" + + _hypsum_msg = """hypsum() failed to converge to the requested %i bits of accuracy +using a working precision of %i bits. Try with a higher maxprec, +maxterms, or set zeroprec.""" + + def hypsum(ctx, p, q, flags, coeffs, z, accurate_small=True, **kwargs): + if hasattr(z, "_mpf_"): + key = p, q, flags, 'R' + v = z._mpf_ + elif hasattr(z, "_mpc_"): + key = p, q, flags, 'C' + v = z._mpc_ + if key not in ctx.hyp_summators: + ctx.hyp_summators[key] = libmp.make_hyp_summator(key)[1] + summator = ctx.hyp_summators[key] + prec = ctx.prec + maxprec = kwargs.get('maxprec', ctx._default_hyper_maxprec(prec)) + extraprec = 50 + epsshift = 25 + # Jumps in magnitude occur when parameters are close to negative + # integers. We must ensure that these terms are included in + # the sum and added accurately + magnitude_check = {} + max_total_jump = 0 + for i, c in enumerate(coeffs): + if flags[i] == 'Z': + if i >= p and c <= 0: + ok = False + for ii, cc in enumerate(coeffs[:p]): + # Note: c <= cc or c < cc, depending on convention + if flags[ii] == 'Z' and cc <= 0 and c <= cc: + ok = True + if not ok: + raise ZeroDivisionError("pole in hypergeometric series") + continue + n, d = ctx.nint_distance(c) + n = -int(n) + d = -d + if i >= p and n >= 0 and d > 4: + if n in magnitude_check: + magnitude_check[n] += d + else: + magnitude_check[n] = d + extraprec = max(extraprec, d - prec + 60) + max_total_jump += abs(d) + while 1: + if extraprec > maxprec: + raise ValueError(ctx._hypsum_msg % (prec, prec+extraprec)) + wp = prec + extraprec + if magnitude_check: + mag_dict = dict((n,None) for n in magnitude_check) + else: + mag_dict = {} + zv, have_complex, magnitude = summator(coeffs, v, prec, wp, \ + epsshift, mag_dict, **kwargs) + cancel = -magnitude + jumps_resolved = True + if extraprec < max_total_jump: + for n in mag_dict.values(): + if (n is None) or (n < prec): + jumps_resolved = False + break + accurate = (cancel < extraprec-25-5 or not accurate_small) + if jumps_resolved: + if accurate: + break + # zero? + zeroprec = kwargs.get('zeroprec') + if zeroprec is not None: + if cancel > zeroprec: + if have_complex: + return ctx.mpc(0) + else: + return ctx.zero + + # Some near-singularities were not included, so increase + # precision and repeat until they are + extraprec *= 2 + # Possible workaround for bad roundoff in fixed-point arithmetic + epsshift += 5 + extraprec += 5 + + if type(zv) is tuple: + if have_complex: + return ctx.make_mpc(zv) + else: + return ctx.make_mpf(zv) + else: + return zv + + def ldexp(ctx, x, n): + r""" + Computes `x 2^n` efficiently. No rounding is performed. + The argument `x` must be a real floating-point number (or + possible to convert into one) and `n` must be a Python ``int``. + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> ldexp(1, 10) + mpf('1024.0') + >>> ldexp(1, -3) + mpf('0.125') + + """ + x = ctx.convert(x) + return ctx.make_mpf(libmp.mpf_shift(x._mpf_, n)) + + def frexp(ctx, x): + r""" + Given a real number `x`, returns `(y, n)` with `y \in [0.5, 1)`, + `n` a Python integer, and such that `x = y 2^n`. No rounding is + performed. + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> frexp(7.5) + (mpf('0.9375'), 3) + + """ + x = ctx.convert(x) + y, n = libmp.mpf_frexp(x._mpf_) + return ctx.make_mpf(y), n + + def fneg(ctx, x, **kwargs): + """ + Negates the number *x*, giving a floating-point result, optionally + using a custom precision and rounding mode. + + See the documentation of :func:`~mpmath.fadd` for a detailed description + of how to specify precision and rounding. + + **Examples** + + An mpmath number is returned:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> fneg(2.5) + mpf('-2.5') + >>> fneg(-5+2j) + mpc(real='5.0', imag='-2.0') + + Precise control over rounding is possible:: + + >>> x = fadd(2, 1e-100, exact=True) + >>> fneg(x) + mpf('-2.0') + >>> fneg(x, rounding='f') + mpf('-2.0000000000000004') + + Negating with and without roundoff:: + + >>> n = 200000000000000000000001 + >>> print(int(-mpf(n))) + -200000000000000016777216 + >>> print(int(fneg(n))) + -200000000000000016777216 + >>> print(int(fneg(n, prec=log(n,2)+1))) + -200000000000000000000001 + >>> print(int(fneg(n, dps=log(n,10)+1))) + -200000000000000000000001 + >>> print(int(fneg(n, prec=inf))) + -200000000000000000000001 + >>> print(int(fneg(n, dps=inf))) + -200000000000000000000001 + >>> print(int(fneg(n, exact=True))) + -200000000000000000000001 + + """ + prec, rounding = ctx._parse_prec(kwargs) + x = ctx.convert(x) + if hasattr(x, '_mpf_'): + return ctx.make_mpf(mpf_neg(x._mpf_, prec, rounding)) + if hasattr(x, '_mpc_'): + return ctx.make_mpc(mpc_neg(x._mpc_, prec, rounding)) + raise ValueError("Arguments need to be mpf or mpc compatible numbers") + + def fadd(ctx, x, y, **kwargs): + """ + Adds the numbers *x* and *y*, giving a floating-point result, + optionally using a custom precision and rounding mode. + + The default precision is the working precision of the context. + You can specify a custom precision in bits by passing the *prec* keyword + argument, or by providing an equivalent decimal precision with the *dps* + keyword argument. If the precision is set to ``+inf``, or if the flag + *exact=True* is passed, an exact addition with no rounding is performed. + + When the precision is finite, the optional *rounding* keyword argument + specifies the direction of rounding. Valid options are ``'n'`` for + nearest (default), ``'f'`` for floor, ``'c'`` for ceiling, ``'d'`` + for down, ``'u'`` for up. + + **Examples** + + Using :func:`~mpmath.fadd` with precision and rounding control:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> fadd(2, 1e-20) + mpf('2.0') + >>> fadd(2, 1e-20, rounding='u') + mpf('2.0000000000000004') + >>> nprint(fadd(2, 1e-20, prec=100), 25) + 2.00000000000000000001 + >>> nprint(fadd(2, 1e-20, dps=15), 25) + 2.0 + >>> nprint(fadd(2, 1e-20, dps=25), 25) + 2.00000000000000000001 + >>> nprint(fadd(2, 1e-20, exact=True), 25) + 2.00000000000000000001 + + Exact addition avoids cancellation errors, enforcing familiar laws + of numbers such as `x+y-x = y`, which don't hold in floating-point + arithmetic with finite precision:: + + >>> x, y = mpf(2), mpf('1e-1000') + >>> print(x + y - x) + 0.0 + >>> print(fadd(x, y, prec=inf) - x) + 1.0e-1000 + >>> print(fadd(x, y, exact=True) - x) + 1.0e-1000 + + Exact addition can be inefficient and may be impossible to perform + with large magnitude differences:: + + >>> fadd(1, '1e-100000000000000000000', prec=inf) + Traceback (most recent call last): + ... + OverflowError: the exact result does not fit in memory + + """ + prec, rounding = ctx._parse_prec(kwargs) + x = ctx.convert(x) + y = ctx.convert(y) + try: + if hasattr(x, '_mpf_'): + if hasattr(y, '_mpf_'): + return ctx.make_mpf(mpf_add(x._mpf_, y._mpf_, prec, rounding)) + if hasattr(y, '_mpc_'): + return ctx.make_mpc(mpc_add_mpf(y._mpc_, x._mpf_, prec, rounding)) + if hasattr(x, '_mpc_'): + if hasattr(y, '_mpf_'): + return ctx.make_mpc(mpc_add_mpf(x._mpc_, y._mpf_, prec, rounding)) + if hasattr(y, '_mpc_'): + return ctx.make_mpc(mpc_add(x._mpc_, y._mpc_, prec, rounding)) + except (ValueError, OverflowError): + raise OverflowError(ctx._exact_overflow_msg) + raise ValueError("Arguments need to be mpf or mpc compatible numbers") + + def fsub(ctx, x, y, **kwargs): + """ + Subtracts the numbers *x* and *y*, giving a floating-point result, + optionally using a custom precision and rounding mode. + + See the documentation of :func:`~mpmath.fadd` for a detailed description + of how to specify precision and rounding. + + **Examples** + + Using :func:`~mpmath.fsub` with precision and rounding control:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> fsub(2, 1e-20) + mpf('2.0') + >>> fsub(2, 1e-20, rounding='d') + mpf('1.9999999999999998') + >>> nprint(fsub(2, 1e-20, prec=100), 25) + 1.99999999999999999999 + >>> nprint(fsub(2, 1e-20, dps=15), 25) + 2.0 + >>> nprint(fsub(2, 1e-20, dps=25), 25) + 1.99999999999999999999 + >>> nprint(fsub(2, 1e-20, exact=True), 25) + 1.99999999999999999999 + + Exact subtraction avoids cancellation errors, enforcing familiar laws + of numbers such as `x-y+y = x`, which don't hold in floating-point + arithmetic with finite precision:: + + >>> x, y = mpf(2), mpf('1e1000') + >>> print(x - y + y) + 0.0 + >>> print(fsub(x, y, prec=inf) + y) + 2.0 + >>> print(fsub(x, y, exact=True) + y) + 2.0 + + Exact addition can be inefficient and may be impossible to perform + with large magnitude differences:: + + >>> fsub(1, '1e-100000000000000000000', prec=inf) + Traceback (most recent call last): + ... + OverflowError: the exact result does not fit in memory + + """ + prec, rounding = ctx._parse_prec(kwargs) + x = ctx.convert(x) + y = ctx.convert(y) + try: + if hasattr(x, '_mpf_'): + if hasattr(y, '_mpf_'): + return ctx.make_mpf(mpf_sub(x._mpf_, y._mpf_, prec, rounding)) + if hasattr(y, '_mpc_'): + return ctx.make_mpc(mpc_sub((x._mpf_, fzero), y._mpc_, prec, rounding)) + if hasattr(x, '_mpc_'): + if hasattr(y, '_mpf_'): + return ctx.make_mpc(mpc_sub_mpf(x._mpc_, y._mpf_, prec, rounding)) + if hasattr(y, '_mpc_'): + return ctx.make_mpc(mpc_sub(x._mpc_, y._mpc_, prec, rounding)) + except (ValueError, OverflowError): + raise OverflowError(ctx._exact_overflow_msg) + raise ValueError("Arguments need to be mpf or mpc compatible numbers") + + def fmul(ctx, x, y, **kwargs): + """ + Multiplies the numbers *x* and *y*, giving a floating-point result, + optionally using a custom precision and rounding mode. + + See the documentation of :func:`~mpmath.fadd` for a detailed description + of how to specify precision and rounding. + + **Examples** + + The result is an mpmath number:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> fmul(2, 5.0) + mpf('10.0') + >>> fmul(0.5j, 0.5) + mpc(real='0.0', imag='0.25') + + Avoiding roundoff:: + + >>> x, y = 10**10+1, 10**15+1 + >>> print(x*y) + 10000000001000010000000001 + >>> print(mpf(x) * mpf(y)) + 1.0000000001e+25 + >>> print(int(mpf(x) * mpf(y))) + 10000000001000011026399232 + >>> print(int(fmul(x, y))) + 10000000001000011026399232 + >>> print(int(fmul(x, y, dps=25))) + 10000000001000010000000001 + >>> print(int(fmul(x, y, exact=True))) + 10000000001000010000000001 + + Exact multiplication with complex numbers can be inefficient and may + be impossible to perform with large magnitude differences between + real and imaginary parts:: + + >>> x = 1+2j + >>> y = mpc(2, '1e-100000000000000000000') + >>> fmul(x, y) + mpc(real='2.0', imag='4.0') + >>> fmul(x, y, rounding='u') + mpc(real='2.0', imag='4.0000000000000009') + >>> fmul(x, y, exact=True) + Traceback (most recent call last): + ... + OverflowError: the exact result does not fit in memory + + """ + prec, rounding = ctx._parse_prec(kwargs) + x = ctx.convert(x) + y = ctx.convert(y) + try: + if hasattr(x, '_mpf_'): + if hasattr(y, '_mpf_'): + return ctx.make_mpf(mpf_mul(x._mpf_, y._mpf_, prec, rounding)) + if hasattr(y, '_mpc_'): + return ctx.make_mpc(mpc_mul_mpf(y._mpc_, x._mpf_, prec, rounding)) + if hasattr(x, '_mpc_'): + if hasattr(y, '_mpf_'): + return ctx.make_mpc(mpc_mul_mpf(x._mpc_, y._mpf_, prec, rounding)) + if hasattr(y, '_mpc_'): + return ctx.make_mpc(mpc_mul(x._mpc_, y._mpc_, prec, rounding)) + except (ValueError, OverflowError): + raise OverflowError(ctx._exact_overflow_msg) + raise ValueError("Arguments need to be mpf or mpc compatible numbers") + + def fdiv(ctx, x, y, **kwargs): + """ + Divides the numbers *x* and *y*, giving a floating-point result, + optionally using a custom precision and rounding mode. + + See the documentation of :func:`~mpmath.fadd` for a detailed description + of how to specify precision and rounding. + + **Examples** + + The result is an mpmath number:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> fdiv(3, 2) + mpf('1.5') + >>> fdiv(2, 3) + mpf('0.66666666666666663') + >>> fdiv(2+4j, 0.5) + mpc(real='4.0', imag='8.0') + + The rounding direction and precision can be controlled:: + + >>> fdiv(2, 3, dps=3) # Should be accurate to at least 3 digits + mpf('0.6666259765625') + >>> fdiv(2, 3, rounding='d') + mpf('0.66666666666666663') + >>> fdiv(2, 3, prec=60) + mpf('0.66666666666666667') + >>> fdiv(2, 3, rounding='u') + mpf('0.66666666666666674') + + Checking the error of a division by performing it at higher precision:: + + >>> fdiv(2, 3) - fdiv(2, 3, prec=100) + mpf('-3.7007434154172148e-17') + + Unlike :func:`~mpmath.fadd`, :func:`~mpmath.fmul`, etc., exact division is not + allowed since the quotient of two floating-point numbers generally + does not have an exact floating-point representation. (In the + future this might be changed to allow the case where the division + is actually exact.) + + >>> fdiv(2, 3, exact=True) + Traceback (most recent call last): + ... + ValueError: division is not an exact operation + + """ + prec, rounding = ctx._parse_prec(kwargs) + if not prec: + raise ValueError("division is not an exact operation") + x = ctx.convert(x) + y = ctx.convert(y) + if hasattr(x, '_mpf_'): + if hasattr(y, '_mpf_'): + return ctx.make_mpf(mpf_div(x._mpf_, y._mpf_, prec, rounding)) + if hasattr(y, '_mpc_'): + return ctx.make_mpc(mpc_div((x._mpf_, fzero), y._mpc_, prec, rounding)) + if hasattr(x, '_mpc_'): + if hasattr(y, '_mpf_'): + return ctx.make_mpc(mpc_div_mpf(x._mpc_, y._mpf_, prec, rounding)) + if hasattr(y, '_mpc_'): + return ctx.make_mpc(mpc_div(x._mpc_, y._mpc_, prec, rounding)) + raise ValueError("Arguments need to be mpf or mpc compatible numbers") + + def nint_distance(ctx, x): + r""" + Return `(n,d)` where `n` is the nearest integer to `x` and `d` is + an estimate of `\log_2(|x-n|)`. If `d < 0`, `-d` gives the precision + (measured in bits) lost to cancellation when computing `x-n`. + + >>> from mpmath import * + >>> n, d = nint_distance(5) + >>> print(n); print(d) + 5 + -inf + >>> n, d = nint_distance(mpf(5)) + >>> print(n); print(d) + 5 + -inf + >>> n, d = nint_distance(mpf(5.00000001)) + >>> print(n); print(d) + 5 + -26 + >>> n, d = nint_distance(mpf(4.99999999)) + >>> print(n); print(d) + 5 + -26 + >>> n, d = nint_distance(mpc(5,10)) + >>> print(n); print(d) + 5 + 4 + >>> n, d = nint_distance(mpc(5,0.000001)) + >>> print(n); print(d) + 5 + -19 + + """ + typx = type(x) + if typx in int_types: + return int(x), ctx.ninf + elif typx is rational.mpq: + p, q = x._mpq_ + n, r = divmod(p, q) + if 2*r >= q: + n += 1 + elif not r: + return n, ctx.ninf + # log(p/q-n) = log((p-nq)/q) = log(p-nq) - log(q) + d = bitcount(abs(p-n*q)) - bitcount(q) + return n, d + if hasattr(x, "_mpf_"): + re = x._mpf_ + im_dist = ctx.ninf + elif hasattr(x, "_mpc_"): + re, im = x._mpc_ + isign, iman, iexp, ibc = im + if iman: + im_dist = iexp + ibc + elif im == fzero: + im_dist = ctx.ninf + else: + raise ValueError("requires a finite number") + else: + x = ctx.convert(x) + if hasattr(x, "_mpf_") or hasattr(x, "_mpc_"): + return ctx.nint_distance(x) + else: + raise TypeError("requires an mpf/mpc") + sign, man, exp, bc = re + mag = exp+bc + # |x| < 0.5 + if mag < 0: + n = 0 + re_dist = mag + elif man: + # exact integer + if exp >= 0: + n = man << exp + re_dist = ctx.ninf + # exact half-integer + elif exp == -1: + n = (man>>1)+1 + re_dist = 0 + else: + d = (-exp-1) + t = man >> d + if t & 1: + t += 1 + man = (t<>1 # int(t)>>1 + re_dist = exp+bitcount(man) + if sign: + n = -n + elif re == fzero: + re_dist = ctx.ninf + n = 0 + else: + raise ValueError("requires a finite number") + return n, max(re_dist, im_dist) + + def fprod(ctx, factors): + r""" + Calculates a product containing a finite number of factors (for + infinite products, see :func:`~mpmath.nprod`). The factors will be + converted to mpmath numbers. + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> fprod([1, 2, 0.5, 7]) + mpf('7.0') + + """ + orig = ctx.prec + try: + v = ctx.one + for p in factors: + v *= p + finally: + ctx.prec = orig + return +v + + def rand(ctx): + """ + Returns an ``mpf`` with value chosen randomly from `[0, 1)`. + The number of randomly generated bits in the mantissa is equal + to the working precision. + """ + return ctx.make_mpf(mpf_rand(ctx._prec)) + + def fraction(ctx, p, q): + """ + Given Python integers `(p, q)`, returns a lazy ``mpf`` representing + the fraction `p/q`. The value is updated with the precision. + + >>> from mpmath import * + >>> mp.dps = 15 + >>> a = fraction(1,100) + >>> b = mpf(1)/100 + >>> print(a); print(b) + 0.01 + 0.01 + >>> mp.dps = 30 + >>> print(a); print(b) # a will be accurate + 0.01 + 0.0100000000000000002081668171172 + >>> mp.dps = 15 + """ + return ctx.constant(lambda prec, rnd: from_rational(p, q, prec, rnd), + '%s/%s' % (p, q)) + + def absmin(ctx, x): + return abs(ctx.convert(x)) + + def absmax(ctx, x): + return abs(ctx.convert(x)) + + def _as_points(ctx, x): + # XXX: remove this? + if hasattr(x, '_mpi_'): + a, b = x._mpi_ + return [ctx.make_mpf(a), ctx.make_mpf(b)] + return x + + ''' + def _zetasum(ctx, s, a, b): + """ + Computes sum of k^(-s) for k = a, a+1, ..., b with a, b both small + integers. + """ + a = int(a) + b = int(b) + s = ctx.convert(s) + prec, rounding = ctx._prec_rounding + if hasattr(s, '_mpf_'): + v = ctx.make_mpf(libmp.mpf_zetasum(s._mpf_, a, b, prec)) + elif hasattr(s, '_mpc_'): + v = ctx.make_mpc(libmp.mpc_zetasum(s._mpc_, a, b, prec)) + return v + ''' + + def _zetasum_fast(ctx, s, a, n, derivatives=[0], reflect=False): + if not (ctx.isint(a) and hasattr(s, "_mpc_")): + raise NotImplementedError + a = int(a) + prec = ctx._prec + xs, ys = libmp.mpc_zetasum(s._mpc_, a, n, derivatives, reflect, prec) + xs = [ctx.make_mpc(x) for x in xs] + ys = [ctx.make_mpc(y) for y in ys] + return xs, ys + +class PrecisionManager: + def __init__(self, ctx, precfun, dpsfun, normalize_output=False): + self.ctx = ctx + self.precfun = precfun + self.dpsfun = dpsfun + self.normalize_output = normalize_output + def __call__(self, f): + @functools.wraps(f) + def g(*args, **kwargs): + orig = self.ctx.prec + try: + if self.precfun: + self.ctx.prec = self.precfun(self.ctx.prec) + else: + self.ctx.dps = self.dpsfun(self.ctx.dps) + if self.normalize_output: + v = f(*args, **kwargs) + if type(v) is tuple: + return tuple([+a for a in v]) + return +v + else: + return f(*args, **kwargs) + finally: + self.ctx.prec = orig + return g + def __enter__(self): + self.origp = self.ctx.prec + if self.precfun: + self.ctx.prec = self.precfun(self.ctx.prec) + else: + self.ctx.dps = self.dpsfun(self.ctx.dps) + def __exit__(self, exc_type, exc_val, exc_tb): + self.ctx.prec = self.origp + return False + + +if __name__ == '__main__': + import doctest + doctest.testmod() diff --git a/MLPY/Lib/site-packages/mpmath/ctx_mp_python.py b/MLPY/Lib/site-packages/mpmath/ctx_mp_python.py new file mode 100644 index 0000000000000000000000000000000000000000..cfbd72fb8300bf840069c38529b7b41418d26eeb --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/ctx_mp_python.py @@ -0,0 +1,1149 @@ +#from ctx_base import StandardBaseContext + +from .libmp.backend import basestring, exec_ + +from .libmp import (MPZ, MPZ_ZERO, MPZ_ONE, int_types, repr_dps, + round_floor, round_ceiling, dps_to_prec, round_nearest, prec_to_dps, + ComplexResult, to_pickable, from_pickable, normalize, + from_int, from_float, from_npfloat, from_Decimal, from_str, to_int, to_float, to_str, + from_rational, from_man_exp, + fone, fzero, finf, fninf, fnan, + mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_mul_int, + mpf_div, mpf_rdiv_int, mpf_pow_int, mpf_mod, + mpf_eq, mpf_cmp, mpf_lt, mpf_gt, mpf_le, mpf_ge, + mpf_hash, mpf_rand, + mpf_sum, + bitcount, to_fixed, + mpc_to_str, + mpc_to_complex, mpc_hash, mpc_pos, mpc_is_nonzero, mpc_neg, mpc_conjugate, + mpc_abs, mpc_add, mpc_add_mpf, mpc_sub, mpc_sub_mpf, mpc_mul, mpc_mul_mpf, + mpc_mul_int, mpc_div, mpc_div_mpf, mpc_pow, mpc_pow_mpf, mpc_pow_int, + mpc_mpf_div, + mpf_pow, + mpf_pi, mpf_degree, mpf_e, mpf_phi, mpf_ln2, mpf_ln10, + mpf_euler, mpf_catalan, mpf_apery, mpf_khinchin, + mpf_glaisher, mpf_twinprime, mpf_mertens, + int_types) + +from . import rational +from . import function_docs + +new = object.__new__ + +class mpnumeric(object): + """Base class for mpf and mpc.""" + __slots__ = [] + def __new__(cls, val): + raise NotImplementedError + +class _mpf(mpnumeric): + """ + An mpf instance holds a real-valued floating-point number. mpf:s + work analogously to Python floats, but support arbitrary-precision + arithmetic. + """ + __slots__ = ['_mpf_'] + + def __new__(cls, val=fzero, **kwargs): + """A new mpf can be created from a Python float, an int, a + or a decimal string representing a number in floating-point + format.""" + prec, rounding = cls.context._prec_rounding + if kwargs: + prec = kwargs.get('prec', prec) + if 'dps' in kwargs: + prec = dps_to_prec(kwargs['dps']) + rounding = kwargs.get('rounding', rounding) + if type(val) is cls: + sign, man, exp, bc = val._mpf_ + if (not man) and exp: + return val + v = new(cls) + v._mpf_ = normalize(sign, man, exp, bc, prec, rounding) + return v + elif type(val) is tuple: + if len(val) == 2: + v = new(cls) + v._mpf_ = from_man_exp(val[0], val[1], prec, rounding) + return v + if len(val) == 4: + if val not in (finf, fninf, fnan): + sign, man, exp, bc = val + val = normalize(sign, MPZ(man), exp, bc, prec, rounding) + v = new(cls) + v._mpf_ = val + return v + raise ValueError + else: + v = new(cls) + v._mpf_ = mpf_pos(cls.mpf_convert_arg(val, prec, rounding), prec, rounding) + return v + + @classmethod + def mpf_convert_arg(cls, x, prec, rounding): + if isinstance(x, int_types): return from_int(x) + if isinstance(x, float): return from_float(x) + if isinstance(x, basestring): return from_str(x, prec, rounding) + if isinstance(x, cls.context.constant): return x.func(prec, rounding) + if hasattr(x, '_mpf_'): return x._mpf_ + if hasattr(x, '_mpmath_'): + t = cls.context.convert(x._mpmath_(prec, rounding)) + if hasattr(t, '_mpf_'): + return t._mpf_ + if hasattr(x, '_mpi_'): + a, b = x._mpi_ + if a == b: + return a + raise ValueError("can only create mpf from zero-width interval") + raise TypeError("cannot create mpf from " + repr(x)) + + @classmethod + def mpf_convert_rhs(cls, x): + if isinstance(x, int_types): return from_int(x) + if isinstance(x, float): return from_float(x) + if isinstance(x, complex_types): return cls.context.mpc(x) + if isinstance(x, rational.mpq): + p, q = x._mpq_ + return from_rational(p, q, cls.context.prec) + if hasattr(x, '_mpf_'): return x._mpf_ + if hasattr(x, '_mpmath_'): + t = cls.context.convert(x._mpmath_(*cls.context._prec_rounding)) + if hasattr(t, '_mpf_'): + return t._mpf_ + return t + return NotImplemented + + @classmethod + def mpf_convert_lhs(cls, x): + x = cls.mpf_convert_rhs(x) + if type(x) is tuple: + return cls.context.make_mpf(x) + return x + + man_exp = property(lambda self: self._mpf_[1:3]) + man = property(lambda self: self._mpf_[1]) + exp = property(lambda self: self._mpf_[2]) + bc = property(lambda self: self._mpf_[3]) + + real = property(lambda self: self) + imag = property(lambda self: self.context.zero) + + conjugate = lambda self: self + + def __getstate__(self): return to_pickable(self._mpf_) + def __setstate__(self, val): self._mpf_ = from_pickable(val) + + def __repr__(s): + if s.context.pretty: + return str(s) + return "mpf('%s')" % to_str(s._mpf_, s.context._repr_digits) + + def __str__(s): return to_str(s._mpf_, s.context._str_digits) + def __hash__(s): return mpf_hash(s._mpf_) + def __int__(s): return int(to_int(s._mpf_)) + def __long__(s): return long(to_int(s._mpf_)) + def __float__(s): return to_float(s._mpf_, rnd=s.context._prec_rounding[1]) + def __complex__(s): return complex(float(s)) + def __nonzero__(s): return s._mpf_ != fzero + + __bool__ = __nonzero__ + + def __abs__(s): + cls, new, (prec, rounding) = s._ctxdata + v = new(cls) + v._mpf_ = mpf_abs(s._mpf_, prec, rounding) + return v + + def __pos__(s): + cls, new, (prec, rounding) = s._ctxdata + v = new(cls) + v._mpf_ = mpf_pos(s._mpf_, prec, rounding) + return v + + def __neg__(s): + cls, new, (prec, rounding) = s._ctxdata + v = new(cls) + v._mpf_ = mpf_neg(s._mpf_, prec, rounding) + return v + + def _cmp(s, t, func): + if hasattr(t, '_mpf_'): + t = t._mpf_ + else: + t = s.mpf_convert_rhs(t) + if t is NotImplemented: + return t + return func(s._mpf_, t) + + def __cmp__(s, t): return s._cmp(t, mpf_cmp) + def __lt__(s, t): return s._cmp(t, mpf_lt) + def __gt__(s, t): return s._cmp(t, mpf_gt) + def __le__(s, t): return s._cmp(t, mpf_le) + def __ge__(s, t): return s._cmp(t, mpf_ge) + + def __ne__(s, t): + v = s.__eq__(t) + if v is NotImplemented: + return v + return not v + + def __rsub__(s, t): + cls, new, (prec, rounding) = s._ctxdata + if type(t) in int_types: + v = new(cls) + v._mpf_ = mpf_sub(from_int(t), s._mpf_, prec, rounding) + return v + t = s.mpf_convert_lhs(t) + if t is NotImplemented: + return t + return t - s + + def __rdiv__(s, t): + cls, new, (prec, rounding) = s._ctxdata + if isinstance(t, int_types): + v = new(cls) + v._mpf_ = mpf_rdiv_int(t, s._mpf_, prec, rounding) + return v + t = s.mpf_convert_lhs(t) + if t is NotImplemented: + return t + return t / s + + def __rpow__(s, t): + t = s.mpf_convert_lhs(t) + if t is NotImplemented: + return t + return t ** s + + def __rmod__(s, t): + t = s.mpf_convert_lhs(t) + if t is NotImplemented: + return t + return t % s + + def sqrt(s): + return s.context.sqrt(s) + + def ae(s, t, rel_eps=None, abs_eps=None): + return s.context.almosteq(s, t, rel_eps, abs_eps) + + def to_fixed(self, prec): + return to_fixed(self._mpf_, prec) + + def __round__(self, *args): + return round(float(self), *args) + +mpf_binary_op = """ +def %NAME%(self, other): + mpf, new, (prec, rounding) = self._ctxdata + sval = self._mpf_ + if hasattr(other, '_mpf_'): + tval = other._mpf_ + %WITH_MPF% + ttype = type(other) + if ttype in int_types: + %WITH_INT% + elif ttype is float: + tval = from_float(other) + %WITH_MPF% + elif hasattr(other, '_mpc_'): + tval = other._mpc_ + mpc = type(other) + %WITH_MPC% + elif ttype is complex: + tval = from_float(other.real), from_float(other.imag) + mpc = self.context.mpc + %WITH_MPC% + if isinstance(other, mpnumeric): + return NotImplemented + try: + other = mpf.context.convert(other, strings=False) + except TypeError: + return NotImplemented + return self.%NAME%(other) +""" + +return_mpf = "; obj = new(mpf); obj._mpf_ = val; return obj" +return_mpc = "; obj = new(mpc); obj._mpc_ = val; return obj" + +mpf_pow_same = """ + try: + val = mpf_pow(sval, tval, prec, rounding) %s + except ComplexResult: + if mpf.context.trap_complex: + raise + mpc = mpf.context.mpc + val = mpc_pow((sval, fzero), (tval, fzero), prec, rounding) %s +""" % (return_mpf, return_mpc) + +def binary_op(name, with_mpf='', with_int='', with_mpc=''): + code = mpf_binary_op + code = code.replace("%WITH_INT%", with_int) + code = code.replace("%WITH_MPC%", with_mpc) + code = code.replace("%WITH_MPF%", with_mpf) + code = code.replace("%NAME%", name) + np = {} + exec_(code, globals(), np) + return np[name] + +_mpf.__eq__ = binary_op('__eq__', + 'return mpf_eq(sval, tval)', + 'return mpf_eq(sval, from_int(other))', + 'return (tval[1] == fzero) and mpf_eq(tval[0], sval)') + +_mpf.__add__ = binary_op('__add__', + 'val = mpf_add(sval, tval, prec, rounding)' + return_mpf, + 'val = mpf_add(sval, from_int(other), prec, rounding)' + return_mpf, + 'val = mpc_add_mpf(tval, sval, prec, rounding)' + return_mpc) + +_mpf.__sub__ = binary_op('__sub__', + 'val = mpf_sub(sval, tval, prec, rounding)' + return_mpf, + 'val = mpf_sub(sval, from_int(other), prec, rounding)' + return_mpf, + 'val = mpc_sub((sval, fzero), tval, prec, rounding)' + return_mpc) + +_mpf.__mul__ = binary_op('__mul__', + 'val = mpf_mul(sval, tval, prec, rounding)' + return_mpf, + 'val = mpf_mul_int(sval, other, prec, rounding)' + return_mpf, + 'val = mpc_mul_mpf(tval, sval, prec, rounding)' + return_mpc) + +_mpf.__div__ = binary_op('__div__', + 'val = mpf_div(sval, tval, prec, rounding)' + return_mpf, + 'val = mpf_div(sval, from_int(other), prec, rounding)' + return_mpf, + 'val = mpc_mpf_div(sval, tval, prec, rounding)' + return_mpc) + +_mpf.__mod__ = binary_op('__mod__', + 'val = mpf_mod(sval, tval, prec, rounding)' + return_mpf, + 'val = mpf_mod(sval, from_int(other), prec, rounding)' + return_mpf, + 'raise NotImplementedError("complex modulo")') + +_mpf.__pow__ = binary_op('__pow__', + mpf_pow_same, + 'val = mpf_pow_int(sval, other, prec, rounding)' + return_mpf, + 'val = mpc_pow((sval, fzero), tval, prec, rounding)' + return_mpc) + +_mpf.__radd__ = _mpf.__add__ +_mpf.__rmul__ = _mpf.__mul__ +_mpf.__truediv__ = _mpf.__div__ +_mpf.__rtruediv__ = _mpf.__rdiv__ + + +class _constant(_mpf): + """Represents a mathematical constant with dynamic precision. + When printed or used in an arithmetic operation, a constant + is converted to a regular mpf at the working precision. A + regular mpf can also be obtained using the operation +x.""" + + def __new__(cls, func, name, docname=''): + a = object.__new__(cls) + a.name = name + a.func = func + a.__doc__ = getattr(function_docs, docname, '') + return a + + def __call__(self, prec=None, dps=None, rounding=None): + prec2, rounding2 = self.context._prec_rounding + if not prec: prec = prec2 + if not rounding: rounding = rounding2 + if dps: prec = dps_to_prec(dps) + return self.context.make_mpf(self.func(prec, rounding)) + + @property + def _mpf_(self): + prec, rounding = self.context._prec_rounding + return self.func(prec, rounding) + + def __repr__(self): + return "<%s: %s~>" % (self.name, self.context.nstr(self(dps=15))) + + +class _mpc(mpnumeric): + """ + An mpc represents a complex number using a pair of mpf:s (one + for the real part and another for the imaginary part.) The mpc + class behaves fairly similarly to Python's complex type. + """ + + __slots__ = ['_mpc_'] + + def __new__(cls, real=0, imag=0): + s = object.__new__(cls) + if isinstance(real, complex_types): + real, imag = real.real, real.imag + elif hasattr(real, '_mpc_'): + s._mpc_ = real._mpc_ + return s + real = cls.context.mpf(real) + imag = cls.context.mpf(imag) + s._mpc_ = (real._mpf_, imag._mpf_) + return s + + real = property(lambda self: self.context.make_mpf(self._mpc_[0])) + imag = property(lambda self: self.context.make_mpf(self._mpc_[1])) + + def __getstate__(self): + return to_pickable(self._mpc_[0]), to_pickable(self._mpc_[1]) + + def __setstate__(self, val): + self._mpc_ = from_pickable(val[0]), from_pickable(val[1]) + + def __repr__(s): + if s.context.pretty: + return str(s) + r = repr(s.real)[4:-1] + i = repr(s.imag)[4:-1] + return "%s(real=%s, imag=%s)" % (type(s).__name__, r, i) + + def __str__(s): + return "(%s)" % mpc_to_str(s._mpc_, s.context._str_digits) + + def __complex__(s): + return mpc_to_complex(s._mpc_, rnd=s.context._prec_rounding[1]) + + def __pos__(s): + cls, new, (prec, rounding) = s._ctxdata + v = new(cls) + v._mpc_ = mpc_pos(s._mpc_, prec, rounding) + return v + + def __abs__(s): + prec, rounding = s.context._prec_rounding + v = new(s.context.mpf) + v._mpf_ = mpc_abs(s._mpc_, prec, rounding) + return v + + def __neg__(s): + cls, new, (prec, rounding) = s._ctxdata + v = new(cls) + v._mpc_ = mpc_neg(s._mpc_, prec, rounding) + return v + + def conjugate(s): + cls, new, (prec, rounding) = s._ctxdata + v = new(cls) + v._mpc_ = mpc_conjugate(s._mpc_, prec, rounding) + return v + + def __nonzero__(s): + return mpc_is_nonzero(s._mpc_) + + __bool__ = __nonzero__ + + def __hash__(s): + return mpc_hash(s._mpc_) + + @classmethod + def mpc_convert_lhs(cls, x): + try: + y = cls.context.convert(x) + return y + except TypeError: + return NotImplemented + + def __eq__(s, t): + if not hasattr(t, '_mpc_'): + if isinstance(t, str): + return False + t = s.mpc_convert_lhs(t) + if t is NotImplemented: + return t + return s.real == t.real and s.imag == t.imag + + def __ne__(s, t): + b = s.__eq__(t) + if b is NotImplemented: + return b + return not b + + def _compare(*args): + raise TypeError("no ordering relation is defined for complex numbers") + + __gt__ = _compare + __le__ = _compare + __gt__ = _compare + __ge__ = _compare + + def __add__(s, t): + cls, new, (prec, rounding) = s._ctxdata + if not hasattr(t, '_mpc_'): + t = s.mpc_convert_lhs(t) + if t is NotImplemented: + return t + if hasattr(t, '_mpf_'): + v = new(cls) + v._mpc_ = mpc_add_mpf(s._mpc_, t._mpf_, prec, rounding) + return v + v = new(cls) + v._mpc_ = mpc_add(s._mpc_, t._mpc_, prec, rounding) + return v + + def __sub__(s, t): + cls, new, (prec, rounding) = s._ctxdata + if not hasattr(t, '_mpc_'): + t = s.mpc_convert_lhs(t) + if t is NotImplemented: + return t + if hasattr(t, '_mpf_'): + v = new(cls) + v._mpc_ = mpc_sub_mpf(s._mpc_, t._mpf_, prec, rounding) + return v + v = new(cls) + v._mpc_ = mpc_sub(s._mpc_, t._mpc_, prec, rounding) + return v + + def __mul__(s, t): + cls, new, (prec, rounding) = s._ctxdata + if not hasattr(t, '_mpc_'): + if isinstance(t, int_types): + v = new(cls) + v._mpc_ = mpc_mul_int(s._mpc_, t, prec, rounding) + return v + t = s.mpc_convert_lhs(t) + if t is NotImplemented: + return t + if hasattr(t, '_mpf_'): + v = new(cls) + v._mpc_ = mpc_mul_mpf(s._mpc_, t._mpf_, prec, rounding) + return v + t = s.mpc_convert_lhs(t) + v = new(cls) + v._mpc_ = mpc_mul(s._mpc_, t._mpc_, prec, rounding) + return v + + def __div__(s, t): + cls, new, (prec, rounding) = s._ctxdata + if not hasattr(t, '_mpc_'): + t = s.mpc_convert_lhs(t) + if t is NotImplemented: + return t + if hasattr(t, '_mpf_'): + v = new(cls) + v._mpc_ = mpc_div_mpf(s._mpc_, t._mpf_, prec, rounding) + return v + v = new(cls) + v._mpc_ = mpc_div(s._mpc_, t._mpc_, prec, rounding) + return v + + def __pow__(s, t): + cls, new, (prec, rounding) = s._ctxdata + if isinstance(t, int_types): + v = new(cls) + v._mpc_ = mpc_pow_int(s._mpc_, t, prec, rounding) + return v + t = s.mpc_convert_lhs(t) + if t is NotImplemented: + return t + v = new(cls) + if hasattr(t, '_mpf_'): + v._mpc_ = mpc_pow_mpf(s._mpc_, t._mpf_, prec, rounding) + else: + v._mpc_ = mpc_pow(s._mpc_, t._mpc_, prec, rounding) + return v + + __radd__ = __add__ + + def __rsub__(s, t): + t = s.mpc_convert_lhs(t) + if t is NotImplemented: + return t + return t - s + + def __rmul__(s, t): + cls, new, (prec, rounding) = s._ctxdata + if isinstance(t, int_types): + v = new(cls) + v._mpc_ = mpc_mul_int(s._mpc_, t, prec, rounding) + return v + t = s.mpc_convert_lhs(t) + if t is NotImplemented: + return t + return t * s + + def __rdiv__(s, t): + t = s.mpc_convert_lhs(t) + if t is NotImplemented: + return t + return t / s + + def __rpow__(s, t): + t = s.mpc_convert_lhs(t) + if t is NotImplemented: + return t + return t ** s + + __truediv__ = __div__ + __rtruediv__ = __rdiv__ + + def ae(s, t, rel_eps=None, abs_eps=None): + return s.context.almosteq(s, t, rel_eps, abs_eps) + + +complex_types = (complex, _mpc) + + +class PythonMPContext(object): + + def __init__(ctx): + ctx._prec_rounding = [53, round_nearest] + ctx.mpf = type('mpf', (_mpf,), {}) + ctx.mpc = type('mpc', (_mpc,), {}) + ctx.mpf._ctxdata = [ctx.mpf, new, ctx._prec_rounding] + ctx.mpc._ctxdata = [ctx.mpc, new, ctx._prec_rounding] + ctx.mpf.context = ctx + ctx.mpc.context = ctx + ctx.constant = type('constant', (_constant,), {}) + ctx.constant._ctxdata = [ctx.mpf, new, ctx._prec_rounding] + ctx.constant.context = ctx + + def make_mpf(ctx, v): + a = new(ctx.mpf) + a._mpf_ = v + return a + + def make_mpc(ctx, v): + a = new(ctx.mpc) + a._mpc_ = v + return a + + def default(ctx): + ctx._prec = ctx._prec_rounding[0] = 53 + ctx._dps = 15 + ctx.trap_complex = False + + def _set_prec(ctx, n): + ctx._prec = ctx._prec_rounding[0] = max(1, int(n)) + ctx._dps = prec_to_dps(n) + + def _set_dps(ctx, n): + ctx._prec = ctx._prec_rounding[0] = dps_to_prec(n) + ctx._dps = max(1, int(n)) + + prec = property(lambda ctx: ctx._prec, _set_prec) + dps = property(lambda ctx: ctx._dps, _set_dps) + + def convert(ctx, x, strings=True): + """ + Converts *x* to an ``mpf`` or ``mpc``. If *x* is of type ``mpf``, + ``mpc``, ``int``, ``float``, ``complex``, the conversion + will be performed losslessly. + + If *x* is a string, the result will be rounded to the present + working precision. Strings representing fractions or complex + numbers are permitted. + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> mpmathify(3.5) + mpf('3.5') + >>> mpmathify('2.1') + mpf('2.1000000000000001') + >>> mpmathify('3/4') + mpf('0.75') + >>> mpmathify('2+3j') + mpc(real='2.0', imag='3.0') + + """ + if type(x) in ctx.types: return x + if isinstance(x, int_types): return ctx.make_mpf(from_int(x)) + if isinstance(x, float): return ctx.make_mpf(from_float(x)) + if isinstance(x, complex): + return ctx.make_mpc((from_float(x.real), from_float(x.imag))) + if type(x).__module__ == 'numpy': return ctx.npconvert(x) + if isinstance(x, numbers.Rational): # e.g. Fraction + try: x = rational.mpq(int(x.numerator), int(x.denominator)) + except: pass + prec, rounding = ctx._prec_rounding + if isinstance(x, rational.mpq): + p, q = x._mpq_ + return ctx.make_mpf(from_rational(p, q, prec)) + if strings and isinstance(x, basestring): + try: + _mpf_ = from_str(x, prec, rounding) + return ctx.make_mpf(_mpf_) + except ValueError: + pass + if hasattr(x, '_mpf_'): return ctx.make_mpf(x._mpf_) + if hasattr(x, '_mpc_'): return ctx.make_mpc(x._mpc_) + if hasattr(x, '_mpmath_'): + return ctx.convert(x._mpmath_(prec, rounding)) + if type(x).__module__ == 'decimal': + try: return ctx.make_mpf(from_Decimal(x, prec, rounding)) + except: pass + return ctx._convert_fallback(x, strings) + + def npconvert(ctx, x): + """ + Converts *x* to an ``mpf`` or ``mpc``. *x* should be a numpy + scalar. + """ + import numpy as np + if isinstance(x, np.integer): return ctx.make_mpf(from_int(int(x))) + if isinstance(x, np.floating): return ctx.make_mpf(from_npfloat(x)) + if isinstance(x, np.complexfloating): + return ctx.make_mpc((from_npfloat(x.real), from_npfloat(x.imag))) + raise TypeError("cannot create mpf from " + repr(x)) + + def isnan(ctx, x): + """ + Return *True* if *x* is a NaN (not-a-number), or for a complex + number, whether either the real or complex part is NaN; + otherwise return *False*:: + + >>> from mpmath import * + >>> isnan(3.14) + False + >>> isnan(nan) + True + >>> isnan(mpc(3.14,2.72)) + False + >>> isnan(mpc(3.14,nan)) + True + + """ + if hasattr(x, "_mpf_"): + return x._mpf_ == fnan + if hasattr(x, "_mpc_"): + return fnan in x._mpc_ + if isinstance(x, int_types) or isinstance(x, rational.mpq): + return False + x = ctx.convert(x) + if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'): + return ctx.isnan(x) + raise TypeError("isnan() needs a number as input") + + def isinf(ctx, x): + """ + Return *True* if the absolute value of *x* is infinite; + otherwise return *False*:: + + >>> from mpmath import * + >>> isinf(inf) + True + >>> isinf(-inf) + True + >>> isinf(3) + False + >>> isinf(3+4j) + False + >>> isinf(mpc(3,inf)) + True + >>> isinf(mpc(inf,3)) + True + + """ + if hasattr(x, "_mpf_"): + return x._mpf_ in (finf, fninf) + if hasattr(x, "_mpc_"): + re, im = x._mpc_ + return re in (finf, fninf) or im in (finf, fninf) + if isinstance(x, int_types) or isinstance(x, rational.mpq): + return False + x = ctx.convert(x) + if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'): + return ctx.isinf(x) + raise TypeError("isinf() needs a number as input") + + def isnormal(ctx, x): + """ + Determine whether *x* is "normal" in the sense of floating-point + representation; that is, return *False* if *x* is zero, an + infinity or NaN; otherwise return *True*. By extension, a + complex number *x* is considered "normal" if its magnitude is + normal:: + + >>> from mpmath import * + >>> isnormal(3) + True + >>> isnormal(0) + False + >>> isnormal(inf); isnormal(-inf); isnormal(nan) + False + False + False + >>> isnormal(0+0j) + False + >>> isnormal(0+3j) + True + >>> isnormal(mpc(2,nan)) + False + """ + if hasattr(x, "_mpf_"): + return bool(x._mpf_[1]) + if hasattr(x, "_mpc_"): + re, im = x._mpc_ + re_normal = bool(re[1]) + im_normal = bool(im[1]) + if re == fzero: return im_normal + if im == fzero: return re_normal + return re_normal and im_normal + if isinstance(x, int_types) or isinstance(x, rational.mpq): + return bool(x) + x = ctx.convert(x) + if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'): + return ctx.isnormal(x) + raise TypeError("isnormal() needs a number as input") + + def isint(ctx, x, gaussian=False): + """ + Return *True* if *x* is integer-valued; otherwise return + *False*:: + + >>> from mpmath import * + >>> isint(3) + True + >>> isint(mpf(3)) + True + >>> isint(3.2) + False + >>> isint(inf) + False + + Optionally, Gaussian integers can be checked for:: + + >>> isint(3+0j) + True + >>> isint(3+2j) + False + >>> isint(3+2j, gaussian=True) + True + + """ + if isinstance(x, int_types): + return True + if hasattr(x, "_mpf_"): + sign, man, exp, bc = xval = x._mpf_ + return bool((man and exp >= 0) or xval == fzero) + if hasattr(x, "_mpc_"): + re, im = x._mpc_ + rsign, rman, rexp, rbc = re + isign, iman, iexp, ibc = im + re_isint = (rman and rexp >= 0) or re == fzero + if gaussian: + im_isint = (iman and iexp >= 0) or im == fzero + return re_isint and im_isint + return re_isint and im == fzero + if isinstance(x, rational.mpq): + p, q = x._mpq_ + return p % q == 0 + x = ctx.convert(x) + if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'): + return ctx.isint(x, gaussian) + raise TypeError("isint() needs a number as input") + + def fsum(ctx, terms, absolute=False, squared=False): + """ + Calculates a sum containing a finite number of terms (for infinite + series, see :func:`~mpmath.nsum`). The terms will be converted to + mpmath numbers. For len(terms) > 2, this function is generally + faster and produces more accurate results than the builtin + Python function :func:`sum`. + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> fsum([1, 2, 0.5, 7]) + mpf('10.5') + + With squared=True each term is squared, and with absolute=True + the absolute value of each term is used. + """ + prec, rnd = ctx._prec_rounding + real = [] + imag = [] + for term in terms: + reval = imval = 0 + if hasattr(term, "_mpf_"): + reval = term._mpf_ + elif hasattr(term, "_mpc_"): + reval, imval = term._mpc_ + else: + term = ctx.convert(term) + if hasattr(term, "_mpf_"): + reval = term._mpf_ + elif hasattr(term, "_mpc_"): + reval, imval = term._mpc_ + else: + raise NotImplementedError + if imval: + if squared: + if absolute: + real.append(mpf_mul(reval,reval)) + real.append(mpf_mul(imval,imval)) + else: + reval, imval = mpc_pow_int((reval,imval),2,prec+10) + real.append(reval) + imag.append(imval) + elif absolute: + real.append(mpc_abs((reval,imval), prec)) + else: + real.append(reval) + imag.append(imval) + else: + if squared: + reval = mpf_mul(reval, reval) + elif absolute: + reval = mpf_abs(reval) + real.append(reval) + s = mpf_sum(real, prec, rnd, absolute) + if imag: + s = ctx.make_mpc((s, mpf_sum(imag, prec, rnd))) + else: + s = ctx.make_mpf(s) + return s + + def fdot(ctx, A, B=None, conjugate=False): + r""" + Computes the dot product of the iterables `A` and `B`, + + .. math :: + + \sum_{k=0} A_k B_k. + + Alternatively, :func:`~mpmath.fdot` accepts a single iterable of pairs. + In other words, ``fdot(A,B)`` and ``fdot(zip(A,B))`` are equivalent. + The elements are automatically converted to mpmath numbers. + + With ``conjugate=True``, the elements in the second vector + will be conjugated: + + .. math :: + + \sum_{k=0} A_k \overline{B_k} + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> A = [2, 1.5, 3] + >>> B = [1, -1, 2] + >>> fdot(A, B) + mpf('6.5') + >>> list(zip(A, B)) + [(2, 1), (1.5, -1), (3, 2)] + >>> fdot(_) + mpf('6.5') + >>> A = [2, 1.5, 3j] + >>> B = [1+j, 3, -1-j] + >>> fdot(A, B) + mpc(real='9.5', imag='-1.0') + >>> fdot(A, B, conjugate=True) + mpc(real='3.5', imag='-5.0') + + """ + if B is not None: + A = zip(A, B) + prec, rnd = ctx._prec_rounding + real = [] + imag = [] + hasattr_ = hasattr + types = (ctx.mpf, ctx.mpc) + for a, b in A: + if type(a) not in types: a = ctx.convert(a) + if type(b) not in types: b = ctx.convert(b) + a_real = hasattr_(a, "_mpf_") + b_real = hasattr_(b, "_mpf_") + if a_real and b_real: + real.append(mpf_mul(a._mpf_, b._mpf_)) + continue + a_complex = hasattr_(a, "_mpc_") + b_complex = hasattr_(b, "_mpc_") + if a_real and b_complex: + aval = a._mpf_ + bre, bim = b._mpc_ + if conjugate: + bim = mpf_neg(bim) + real.append(mpf_mul(aval, bre)) + imag.append(mpf_mul(aval, bim)) + elif b_real and a_complex: + are, aim = a._mpc_ + bval = b._mpf_ + real.append(mpf_mul(are, bval)) + imag.append(mpf_mul(aim, bval)) + elif a_complex and b_complex: + #re, im = mpc_mul(a._mpc_, b._mpc_, prec+20) + are, aim = a._mpc_ + bre, bim = b._mpc_ + if conjugate: + bim = mpf_neg(bim) + real.append(mpf_mul(are, bre)) + real.append(mpf_neg(mpf_mul(aim, bim))) + imag.append(mpf_mul(are, bim)) + imag.append(mpf_mul(aim, bre)) + else: + raise NotImplementedError + s = mpf_sum(real, prec, rnd) + if imag: + s = ctx.make_mpc((s, mpf_sum(imag, prec, rnd))) + else: + s = ctx.make_mpf(s) + return s + + def _wrap_libmp_function(ctx, mpf_f, mpc_f=None, mpi_f=None, doc=""): + """ + Given a low-level mpf_ function, and optionally similar functions + for mpc_ and mpi_, defines the function as a context method. + + It is assumed that the return type is the same as that of + the input; the exception is that propagation from mpf to mpc is possible + by raising ComplexResult. + + """ + def f(x, **kwargs): + if type(x) not in ctx.types: + x = ctx.convert(x) + prec, rounding = ctx._prec_rounding + if kwargs: + prec = kwargs.get('prec', prec) + if 'dps' in kwargs: + prec = dps_to_prec(kwargs['dps']) + rounding = kwargs.get('rounding', rounding) + if hasattr(x, '_mpf_'): + try: + return ctx.make_mpf(mpf_f(x._mpf_, prec, rounding)) + except ComplexResult: + # Handle propagation to complex + if ctx.trap_complex: + raise + return ctx.make_mpc(mpc_f((x._mpf_, fzero), prec, rounding)) + elif hasattr(x, '_mpc_'): + return ctx.make_mpc(mpc_f(x._mpc_, prec, rounding)) + raise NotImplementedError("%s of a %s" % (name, type(x))) + name = mpf_f.__name__[4:] + f.__doc__ = function_docs.__dict__.get(name, "Computes the %s of x" % doc) + return f + + # Called by SpecialFunctions.__init__() + @classmethod + def _wrap_specfun(cls, name, f, wrap): + if wrap: + def f_wrapped(ctx, *args, **kwargs): + convert = ctx.convert + args = [convert(a) for a in args] + prec = ctx.prec + try: + ctx.prec += 10 + retval = f(ctx, *args, **kwargs) + finally: + ctx.prec = prec + return +retval + else: + f_wrapped = f + f_wrapped.__doc__ = function_docs.__dict__.get(name, f.__doc__) + setattr(cls, name, f_wrapped) + + def _convert_param(ctx, x): + if hasattr(x, "_mpc_"): + v, im = x._mpc_ + if im != fzero: + return x, 'C' + elif hasattr(x, "_mpf_"): + v = x._mpf_ + else: + if type(x) in int_types: + return int(x), 'Z' + p = None + if isinstance(x, tuple): + p, q = x + elif hasattr(x, '_mpq_'): + p, q = x._mpq_ + elif isinstance(x, basestring) and '/' in x: + p, q = x.split('/') + p = int(p) + q = int(q) + if p is not None: + if not p % q: + return p // q, 'Z' + return ctx.mpq(p,q), 'Q' + x = ctx.convert(x) + if hasattr(x, "_mpc_"): + v, im = x._mpc_ + if im != fzero: + return x, 'C' + elif hasattr(x, "_mpf_"): + v = x._mpf_ + else: + return x, 'U' + sign, man, exp, bc = v + if man: + if exp >= -4: + if sign: + man = -man + if exp >= 0: + return int(man) << exp, 'Z' + if exp >= -4: + p, q = int(man), (1<<(-exp)) + return ctx.mpq(p,q), 'Q' + x = ctx.make_mpf(v) + return x, 'R' + elif not exp: + return 0, 'Z' + else: + return x, 'U' + + def _mpf_mag(ctx, x): + sign, man, exp, bc = x + if man: + return exp+bc + if x == fzero: + return ctx.ninf + if x == finf or x == fninf: + return ctx.inf + return ctx.nan + + def mag(ctx, x): + """ + Quick logarithmic magnitude estimate of a number. Returns an + integer or infinity `m` such that `|x| <= 2^m`. It is not + guaranteed that `m` is an optimal bound, but it will never + be too large by more than 2 (and probably not more than 1). + + **Examples** + + >>> from mpmath import * + >>> mp.pretty = True + >>> mag(10), mag(10.0), mag(mpf(10)), int(ceil(log(10,2))) + (4, 4, 4, 4) + >>> mag(10j), mag(10+10j) + (4, 5) + >>> mag(0.01), int(ceil(log(0.01,2))) + (-6, -6) + >>> mag(0), mag(inf), mag(-inf), mag(nan) + (-inf, +inf, +inf, nan) + + """ + if hasattr(x, "_mpf_"): + return ctx._mpf_mag(x._mpf_) + elif hasattr(x, "_mpc_"): + r, i = x._mpc_ + if r == fzero: + return ctx._mpf_mag(i) + if i == fzero: + return ctx._mpf_mag(r) + return 1+max(ctx._mpf_mag(r), ctx._mpf_mag(i)) + elif isinstance(x, int_types): + if x: + return bitcount(abs(x)) + return ctx.ninf + elif isinstance(x, rational.mpq): + p, q = x._mpq_ + if p: + return 1 + bitcount(abs(p)) - bitcount(q) + return ctx.ninf + else: + x = ctx.convert(x) + if hasattr(x, "_mpf_") or hasattr(x, "_mpc_"): + return ctx.mag(x) + else: + raise TypeError("requires an mpf/mpc") + + +# Register with "numbers" ABC +# We do not subclass, hence we do not use the @abstractmethod checks. While +# this is less invasive it may turn out that we do not actually support +# parts of the expected interfaces. See +# http://docs.python.org/2/library/numbers.html for list of abstract +# methods. +try: + import numbers + numbers.Complex.register(_mpc) + numbers.Real.register(_mpf) +except ImportError: + pass diff --git a/MLPY/Lib/site-packages/mpmath/function_docs.py b/MLPY/Lib/site-packages/mpmath/function_docs.py new file mode 100644 index 0000000000000000000000000000000000000000..73c071dc30a25c0ea1366e06a407a20206bd18a2 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/function_docs.py @@ -0,0 +1,10201 @@ +""" +Extended docstrings for functions.py +""" + + +pi = r""" +`\pi`, roughly equal to 3.141592654, represents the area of the unit +circle, the half-period of trigonometric functions, and many other +things in mathematics. + +Mpmath can evaluate `\pi` to arbitrary precision:: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +pi + 3.1415926535897932384626433832795028841971693993751 + +This shows digits 99991-100000 of `\pi` (the last digit is actually +a 4 when the decimal expansion is truncated, but here the nearest +rounding is used):: + + >>> mp.dps = 100000 + >>> str(pi)[-10:] + '5549362465' + +**Possible issues** + +:data:`pi` always rounds to the nearest floating-point +number when used. This means that exact mathematical identities +involving `\pi` will generally not be preserved in floating-point +arithmetic. In particular, multiples of :data:`pi` (except for +the trivial case ``0*pi``) are *not* the exact roots of +:func:`~mpmath.sin`, but differ roughly by the current epsilon:: + + >>> mp.dps = 15 + >>> sin(pi) + 1.22464679914735e-16 + +One solution is to use the :func:`~mpmath.sinpi` function instead:: + + >>> sinpi(1) + 0.0 + +See the documentation of trigonometric functions for additional +details. + +**References** + +* [BorweinBorwein]_ + +""" + +degree = r""" +Represents one degree of angle, `1^{\circ} = \pi/180`, or +about 0.01745329. This constant may be evaluated to arbitrary +precision:: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +degree + 0.017453292519943295769236907684886127134428718885417 + +The :data:`degree` object is convenient for conversion +to radians:: + + >>> sin(30 * degree) + 0.5 + >>> asin(0.5) / degree + 30.0 +""" + +e = r""" +The transcendental number `e` = 2.718281828... is the base of the +natural logarithm (:func:`~mpmath.ln`) and of the exponential function +(:func:`~mpmath.exp`). + +Mpmath can be evaluate `e` to arbitrary precision:: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +e + 2.7182818284590452353602874713526624977572470937 + +This shows digits 99991-100000 of `e` (the last digit is actually +a 5 when the decimal expansion is truncated, but here the nearest +rounding is used):: + + >>> mp.dps = 100000 + >>> str(e)[-10:] + '2100427166' + +**Possible issues** + +:data:`e` always rounds to the nearest floating-point number +when used, and mathematical identities involving `e` may not +hold in floating-point arithmetic. For example, ``ln(e)`` +might not evaluate exactly to 1. + +In particular, don't use ``e**x`` to compute the exponential +function. Use ``exp(x)`` instead; this is both faster and more +accurate. +""" + +phi = r""" +Represents the golden ratio `\phi = (1+\sqrt 5)/2`, +approximately equal to 1.6180339887. To high precision, +its value is:: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +phi + 1.6180339887498948482045868343656381177203091798058 + +Formulas for the golden ratio include the following:: + + >>> (1+sqrt(5))/2 + 1.6180339887498948482045868343656381177203091798058 + >>> findroot(lambda x: x**2-x-1, 1) + 1.6180339887498948482045868343656381177203091798058 + >>> limit(lambda n: fib(n+1)/fib(n), inf) + 1.6180339887498948482045868343656381177203091798058 +""" + +euler = r""" +Euler's constant or the Euler-Mascheroni constant `\gamma` += 0.57721566... is a number of central importance to +number theory and special functions. It is defined as the limit + +.. math :: + + \gamma = \lim_{n\to\infty} H_n - \log n + +where `H_n = 1 + \frac{1}{2} + \ldots + \frac{1}{n}` is a harmonic +number (see :func:`~mpmath.harmonic`). + +Evaluation of `\gamma` is supported at arbitrary precision:: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +euler + 0.57721566490153286060651209008240243104215933593992 + +We can also compute `\gamma` directly from the definition, +although this is less efficient:: + + >>> limit(lambda n: harmonic(n)-log(n), inf) + 0.57721566490153286060651209008240243104215933593992 + +This shows digits 9991-10000 of `\gamma` (the last digit is actually +a 5 when the decimal expansion is truncated, but here the nearest +rounding is used):: + + >>> mp.dps = 10000 + >>> str(euler)[-10:] + '4679858166' + +Integrals, series, and representations for `\gamma` in terms of +special functions include the following (there are many others):: + + >>> mp.dps = 25 + >>> -quad(lambda x: exp(-x)*log(x), [0,inf]) + 0.5772156649015328606065121 + >>> quad(lambda x,y: (x-1)/(1-x*y)/log(x*y), [0,1], [0,1]) + 0.5772156649015328606065121 + >>> nsum(lambda k: 1/k-log(1+1/k), [1,inf]) + 0.5772156649015328606065121 + >>> nsum(lambda k: (-1)**k*zeta(k)/k, [2,inf]) + 0.5772156649015328606065121 + >>> -diff(gamma, 1) + 0.5772156649015328606065121 + >>> limit(lambda x: 1/x-gamma(x), 0) + 0.5772156649015328606065121 + >>> limit(lambda x: zeta(x)-1/(x-1), 1) + 0.5772156649015328606065121 + >>> (log(2*pi*nprod(lambda n: + ... exp(-2+2/n)*(1+2/n)**n, [1,inf]))-3)/2 + 0.5772156649015328606065121 + +For generalizations of the identities `\gamma = -\Gamma'(1)` +and `\gamma = \lim_{x\to1} \zeta(x)-1/(x-1)`, see +:func:`~mpmath.psi` and :func:`~mpmath.stieltjes` respectively. + +**References** + +* [BorweinBailey]_ + +""" + +catalan = r""" +Catalan's constant `K` = 0.91596559... is given by the infinite +series + +.. math :: + + K = \sum_{k=0}^{\infty} \frac{(-1)^k}{(2k+1)^2}. + +Mpmath can evaluate it to arbitrary precision:: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +catalan + 0.91596559417721901505460351493238411077414937428167 + +One can also compute `K` directly from the definition, although +this is significantly less efficient:: + + >>> nsum(lambda k: (-1)**k/(2*k+1)**2, [0, inf]) + 0.91596559417721901505460351493238411077414937428167 + +This shows digits 9991-10000 of `K` (the last digit is actually +a 3 when the decimal expansion is truncated, but here the nearest +rounding is used):: + + >>> mp.dps = 10000 + >>> str(catalan)[-10:] + '9537871504' + +Catalan's constant has numerous integral representations:: + + >>> mp.dps = 50 + >>> quad(lambda x: -log(x)/(1+x**2), [0, 1]) + 0.91596559417721901505460351493238411077414937428167 + >>> quad(lambda x: atan(x)/x, [0, 1]) + 0.91596559417721901505460351493238411077414937428167 + >>> quad(lambda x: ellipk(x**2)/2, [0, 1]) + 0.91596559417721901505460351493238411077414937428167 + >>> quad(lambda x,y: 1/(1+(x*y)**2), [0, 1], [0, 1]) + 0.91596559417721901505460351493238411077414937428167 + +As well as series representations:: + + >>> pi*log(sqrt(3)+2)/8 + 3*nsum(lambda n: + ... (fac(n)/(2*n+1))**2/fac(2*n), [0, inf])/8 + 0.91596559417721901505460351493238411077414937428167 + >>> 1-nsum(lambda n: n*zeta(2*n+1)/16**n, [1,inf]) + 0.91596559417721901505460351493238411077414937428167 +""" + +khinchin = r""" +Khinchin's constant `K` = 2.68542... is a number that +appears in the theory of continued fractions. Mpmath can evaluate +it to arbitrary precision:: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +khinchin + 2.6854520010653064453097148354817956938203822939945 + +An integral representation is:: + + >>> I = quad(lambda x: log((1-x**2)/sincpi(x))/x/(1+x), [0, 1]) + >>> 2*exp(1/log(2)*I) + 2.6854520010653064453097148354817956938203822939945 + +The computation of ``khinchin`` is based on an efficient +implementation of the following series:: + + >>> f = lambda n: (zeta(2*n)-1)/n*sum((-1)**(k+1)/mpf(k) + ... for k in range(1,2*int(n))) + >>> exp(nsum(f, [1,inf])/log(2)) + 2.6854520010653064453097148354817956938203822939945 +""" + +glaisher = r""" +Glaisher's constant `A`, also known as the Glaisher-Kinkelin +constant, is a number approximately equal to 1.282427129 that +sometimes appears in formulas related to gamma and zeta functions. +It is also related to the Barnes G-function (see :func:`~mpmath.barnesg`). + +The constant is defined as `A = \exp(1/12-\zeta'(-1))` where +`\zeta'(s)` denotes the derivative of the Riemann zeta function +(see :func:`~mpmath.zeta`). + +Mpmath can evaluate Glaisher's constant to arbitrary precision: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +glaisher + 1.282427129100622636875342568869791727767688927325 + +We can verify that the value computed by :data:`glaisher` is +correct using mpmath's facilities for numerical +differentiation and arbitrary evaluation of the zeta function: + + >>> exp(mpf(1)/12 - diff(zeta, -1)) + 1.282427129100622636875342568869791727767688927325 + +Here is an example of an integral that can be evaluated in +terms of Glaisher's constant: + + >>> mp.dps = 15 + >>> quad(lambda x: log(gamma(x)), [1, 1.5]) + -0.0428537406502909 + >>> -0.5 - 7*log(2)/24 + log(pi)/4 + 3*log(glaisher)/2 + -0.042853740650291 + +Mpmath computes Glaisher's constant by applying Euler-Maclaurin +summation to a slowly convergent series. The implementation is +reasonably efficient up to about 10,000 digits. See the source +code for additional details. + +References: +http://mathworld.wolfram.com/Glaisher-KinkelinConstant.html +""" + +apery = r""" +Represents Apery's constant, which is the irrational number +approximately equal to 1.2020569 given by + +.. math :: + + \zeta(3) = \sum_{k=1}^\infty\frac{1}{k^3}. + +The calculation is based on an efficient hypergeometric +series. To 50 decimal places, the value is given by:: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +apery + 1.2020569031595942853997381615114499907649862923405 + +Other ways to evaluate Apery's constant using mpmath +include:: + + >>> zeta(3) + 1.2020569031595942853997381615114499907649862923405 + >>> -psi(2,1)/2 + 1.2020569031595942853997381615114499907649862923405 + >>> 8*nsum(lambda k: 1/(2*k+1)**3, [0,inf])/7 + 1.2020569031595942853997381615114499907649862923405 + >>> f = lambda k: 2/k**3/(exp(2*pi*k)-1) + >>> 7*pi**3/180 - nsum(f, [1,inf]) + 1.2020569031595942853997381615114499907649862923405 + +This shows digits 9991-10000 of Apery's constant:: + + >>> mp.dps = 10000 + >>> str(apery)[-10:] + '3189504235' +""" + +mertens = r""" +Represents the Mertens or Meissel-Mertens constant, which is the +prime number analog of Euler's constant: + +.. math :: + + B_1 = \lim_{N\to\infty} + \left(\sum_{p_k \le N} \frac{1}{p_k} - \log \log N \right) + +Here `p_k` denotes the `k`-th prime number. Other names for this +constant include the Hadamard-de la Vallee-Poussin constant or +the prime reciprocal constant. + +The following gives the Mertens constant to 50 digits:: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +mertens + 0.2614972128476427837554268386086958590515666482612 + +References: +http://mathworld.wolfram.com/MertensConstant.html +""" + +twinprime = r""" +Represents the twin prime constant, which is the factor `C_2` +featuring in the Hardy-Littlewood conjecture for the growth of the +twin prime counting function, + +.. math :: + + \pi_2(n) \sim 2 C_2 \frac{n}{\log^2 n}. + +It is given by the product over primes + +.. math :: + + C_2 = \prod_{p\ge3} \frac{p(p-2)}{(p-1)^2} \approx 0.66016 + +Computing `C_2` to 50 digits:: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +twinprime + 0.66016181584686957392781211001455577843262336028473 + +References: +http://mathworld.wolfram.com/TwinPrimesConstant.html +""" + +ln = r""" +Computes the natural logarithm of `x`, `\ln x`. +See :func:`~mpmath.log` for additional documentation.""" + +sqrt = r""" +``sqrt(x)`` gives the principal square root of `x`, `\sqrt x`. +For positive real numbers, the principal root is simply the +positive square root. For arbitrary complex numbers, the principal +square root is defined to satisfy `\sqrt x = \exp(\log(x)/2)`. +The function thus has a branch cut along the negative half real axis. + +For all mpmath numbers ``x``, calling ``sqrt(x)`` is equivalent to +performing ``x**0.5``. + +**Examples** + +Basic examples and limits:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> sqrt(10) + 3.16227766016838 + >>> sqrt(100) + 10.0 + >>> sqrt(-4) + (0.0 + 2.0j) + >>> sqrt(1+1j) + (1.09868411346781 + 0.455089860562227j) + >>> sqrt(inf) + +inf + +Square root evaluation is fast at huge precision:: + + >>> mp.dps = 50000 + >>> a = sqrt(3) + >>> str(a)[-10:] + '9329332815' + +:func:`mpmath.iv.sqrt` supports interval arguments:: + + >>> iv.dps = 15; iv.pretty = True + >>> iv.sqrt([16,100]) + [4.0, 10.0] + >>> iv.sqrt(2) + [1.4142135623730949234, 1.4142135623730951455] + >>> iv.sqrt(2) ** 2 + [1.9999999999999995559, 2.0000000000000004441] + +""" + +cbrt = r""" +``cbrt(x)`` computes the cube root of `x`, `x^{1/3}`. This +function is faster and more accurate than raising to a floating-point +fraction:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> 125**(mpf(1)/3) + mpf('4.9999999999999991') + >>> cbrt(125) + mpf('5.0') + +Every nonzero complex number has three cube roots. This function +returns the cube root defined by `\exp(\log(x)/3)` where the +principal branch of the natural logarithm is used. Note that this +does not give a real cube root for negative real numbers:: + + >>> mp.pretty = True + >>> cbrt(-1) + (0.5 + 0.866025403784439j) +""" + +exp = r""" +Computes the exponential function, + +.. math :: + + \exp(x) = e^x = \sum_{k=0}^{\infty} \frac{x^k}{k!}. + +For complex numbers, the exponential function also satisfies + +.. math :: + + \exp(x+yi) = e^x (\cos y + i \sin y). + +**Basic examples** + +Some values of the exponential function:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> exp(0) + 1.0 + >>> exp(1) + 2.718281828459045235360287 + >>> exp(-1) + 0.3678794411714423215955238 + >>> exp(inf) + +inf + >>> exp(-inf) + 0.0 + +Arguments can be arbitrarily large:: + + >>> exp(10000) + 8.806818225662921587261496e+4342 + >>> exp(-10000) + 1.135483865314736098540939e-4343 + +Evaluation is supported for interval arguments via +:func:`mpmath.iv.exp`:: + + >>> iv.dps = 25; iv.pretty = True + >>> iv.exp([-inf,0]) + [0.0, 1.0] + >>> iv.exp([0,1]) + [1.0, 2.71828182845904523536028749558] + +The exponential function can be evaluated efficiently to arbitrary +precision:: + + >>> mp.dps = 10000 + >>> exp(pi) #doctest: +ELLIPSIS + 23.140692632779269005729...8984304016040616 + +**Functional properties** + +Numerical verification of Euler's identity for the complex +exponential function:: + + >>> mp.dps = 15 + >>> exp(j*pi)+1 + (0.0 + 1.22464679914735e-16j) + >>> chop(exp(j*pi)+1) + 0.0 + +This recovers the coefficients (reciprocal factorials) in the +Maclaurin series expansion of exp:: + + >>> nprint(taylor(exp, 0, 5)) + [1.0, 1.0, 0.5, 0.166667, 0.0416667, 0.00833333] + +The exponential function is its own derivative and antiderivative:: + + >>> exp(pi) + 23.1406926327793 + >>> diff(exp, pi) + 23.1406926327793 + >>> quad(exp, [-inf, pi]) + 23.1406926327793 + +The exponential function can be evaluated using various methods, +including direct summation of the series, limits, and solving +the defining differential equation:: + + >>> nsum(lambda k: pi**k/fac(k), [0,inf]) + 23.1406926327793 + >>> limit(lambda k: (1+pi/k)**k, inf) + 23.1406926327793 + >>> odefun(lambda t, x: x, 0, 1)(pi) + 23.1406926327793 +""" + +cosh = r""" +Computes the hyperbolic cosine of `x`, +`\cosh(x) = (e^x + e^{-x})/2`. Values and limits include:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> cosh(0) + 1.0 + >>> cosh(1) + 1.543080634815243778477906 + >>> cosh(-inf), cosh(+inf) + (+inf, +inf) + +The hyperbolic cosine is an even, convex function with +a global minimum at `x = 0`, having a Maclaurin series +that starts:: + + >>> nprint(chop(taylor(cosh, 0, 5))) + [1.0, 0.0, 0.5, 0.0, 0.0416667, 0.0] + +Generalized to complex numbers, the hyperbolic cosine is +equivalent to a cosine with the argument rotated +in the imaginary direction, or `\cosh x = \cos ix`:: + + >>> cosh(2+3j) + (-3.724545504915322565473971 + 0.5118225699873846088344638j) + >>> cos(3-2j) + (-3.724545504915322565473971 + 0.5118225699873846088344638j) +""" + +sinh = r""" +Computes the hyperbolic sine of `x`, +`\sinh(x) = (e^x - e^{-x})/2`. Values and limits include:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> sinh(0) + 0.0 + >>> sinh(1) + 1.175201193643801456882382 + >>> sinh(-inf), sinh(+inf) + (-inf, +inf) + +The hyperbolic sine is an odd function, with a Maclaurin +series that starts:: + + >>> nprint(chop(taylor(sinh, 0, 5))) + [0.0, 1.0, 0.0, 0.166667, 0.0, 0.00833333] + +Generalized to complex numbers, the hyperbolic sine is +essentially a sine with a rotation `i` applied to +the argument; more precisely, `\sinh x = -i \sin ix`:: + + >>> sinh(2+3j) + (-3.590564589985779952012565 + 0.5309210862485198052670401j) + >>> j*sin(3-2j) + (-3.590564589985779952012565 + 0.5309210862485198052670401j) +""" + +tanh = r""" +Computes the hyperbolic tangent of `x`, +`\tanh(x) = \sinh(x)/\cosh(x)`. Values and limits include:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> tanh(0) + 0.0 + >>> tanh(1) + 0.7615941559557648881194583 + >>> tanh(-inf), tanh(inf) + (-1.0, 1.0) + +The hyperbolic tangent is an odd, sigmoidal function, similar +to the inverse tangent and error function. Its Maclaurin +series is:: + + >>> nprint(chop(taylor(tanh, 0, 5))) + [0.0, 1.0, 0.0, -0.333333, 0.0, 0.133333] + +Generalized to complex numbers, the hyperbolic tangent is +essentially a tangent with a rotation `i` applied to +the argument; more precisely, `\tanh x = -i \tan ix`:: + + >>> tanh(2+3j) + (0.9653858790221331242784803 - 0.009884375038322493720314034j) + >>> j*tan(3-2j) + (0.9653858790221331242784803 - 0.009884375038322493720314034j) +""" + +cos = r""" +Computes the cosine of `x`, `\cos(x)`. + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> cos(pi/3) + 0.5 + >>> cos(100000001) + -0.9802850113244713353133243 + >>> cos(2+3j) + (-4.189625690968807230132555 - 9.109227893755336597979197j) + >>> cos(inf) + nan + >>> nprint(chop(taylor(cos, 0, 6))) + [1.0, 0.0, -0.5, 0.0, 0.0416667, 0.0, -0.00138889] + +Intervals are supported via :func:`mpmath.iv.cos`:: + + >>> iv.dps = 25; iv.pretty = True + >>> iv.cos([0,1]) + [0.540302305868139717400936602301, 1.0] + >>> iv.cos([0,2]) + [-0.41614683654714238699756823214, 1.0] +""" + +sin = r""" +Computes the sine of `x`, `\sin(x)`. + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> sin(pi/3) + 0.8660254037844386467637232 + >>> sin(100000001) + 0.1975887055794968911438743 + >>> sin(2+3j) + (9.1544991469114295734673 - 4.168906959966564350754813j) + >>> sin(inf) + nan + >>> nprint(chop(taylor(sin, 0, 6))) + [0.0, 1.0, 0.0, -0.166667, 0.0, 0.00833333, 0.0] + +Intervals are supported via :func:`mpmath.iv.sin`:: + + >>> iv.dps = 25; iv.pretty = True + >>> iv.sin([0,1]) + [0.0, 0.841470984807896506652502331201] + >>> iv.sin([0,2]) + [0.0, 1.0] +""" + +tan = r""" +Computes the tangent of `x`, `\tan(x) = \frac{\sin(x)}{\cos(x)}`. +The tangent function is singular at `x = (n+1/2)\pi`, but +``tan(x)`` always returns a finite result since `(n+1/2)\pi` +cannot be represented exactly using floating-point arithmetic. + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> tan(pi/3) + 1.732050807568877293527446 + >>> tan(100000001) + -0.2015625081449864533091058 + >>> tan(2+3j) + (-0.003764025641504248292751221 + 1.003238627353609801446359j) + >>> tan(inf) + nan + >>> nprint(chop(taylor(tan, 0, 6))) + [0.0, 1.0, 0.0, 0.333333, 0.0, 0.133333, 0.0] + +Intervals are supported via :func:`mpmath.iv.tan`:: + + >>> iv.dps = 25; iv.pretty = True + >>> iv.tan([0,1]) + [0.0, 1.55740772465490223050697482944] + >>> iv.tan([0,2]) # Interval includes a singularity + [-inf, +inf] +""" + +sec = r""" +Computes the secant of `x`, `\mathrm{sec}(x) = \frac{1}{\cos(x)}`. +The secant function is singular at `x = (n+1/2)\pi`, but +``sec(x)`` always returns a finite result since `(n+1/2)\pi` +cannot be represented exactly using floating-point arithmetic. + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> sec(pi/3) + 2.0 + >>> sec(10000001) + -1.184723164360392819100265 + >>> sec(2+3j) + (-0.04167496441114427004834991 + 0.0906111371962375965296612j) + >>> sec(inf) + nan + >>> nprint(chop(taylor(sec, 0, 6))) + [1.0, 0.0, 0.5, 0.0, 0.208333, 0.0, 0.0847222] + +Intervals are supported via :func:`mpmath.iv.sec`:: + + >>> iv.dps = 25; iv.pretty = True + >>> iv.sec([0,1]) + [1.0, 1.85081571768092561791175326276] + >>> iv.sec([0,2]) # Interval includes a singularity + [-inf, +inf] +""" + +csc = r""" +Computes the cosecant of `x`, `\mathrm{csc}(x) = \frac{1}{\sin(x)}`. +This cosecant function is singular at `x = n \pi`, but with the +exception of the point `x = 0`, ``csc(x)`` returns a finite result +since `n \pi` cannot be represented exactly using floating-point +arithmetic. + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> csc(pi/3) + 1.154700538379251529018298 + >>> csc(10000001) + -1.864910497503629858938891 + >>> csc(2+3j) + (0.09047320975320743980579048 + 0.04120098628857412646300981j) + >>> csc(inf) + nan + +Intervals are supported via :func:`mpmath.iv.csc`:: + + >>> iv.dps = 25; iv.pretty = True + >>> iv.csc([0,1]) # Interval includes a singularity + [1.18839510577812121626159943988, +inf] + >>> iv.csc([0,2]) + [1.0, +inf] +""" + +cot = r""" +Computes the cotangent of `x`, +`\mathrm{cot}(x) = \frac{1}{\tan(x)} = \frac{\cos(x)}{\sin(x)}`. +This cotangent function is singular at `x = n \pi`, but with the +exception of the point `x = 0`, ``cot(x)`` returns a finite result +since `n \pi` cannot be represented exactly using floating-point +arithmetic. + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> cot(pi/3) + 0.5773502691896257645091488 + >>> cot(10000001) + 1.574131876209625656003562 + >>> cot(2+3j) + (-0.003739710376336956660117409 - 0.9967577965693583104609688j) + >>> cot(inf) + nan + +Intervals are supported via :func:`mpmath.iv.cot`:: + + >>> iv.dps = 25; iv.pretty = True + >>> iv.cot([0,1]) # Interval includes a singularity + [0.642092615934330703006419974862, +inf] + >>> iv.cot([1,2]) + [-inf, +inf] +""" + +acos = r""" +Computes the inverse cosine or arccosine of `x`, `\cos^{-1}(x)`. +Since `-1 \le \cos(x) \le 1` for real `x`, the inverse +cosine is real-valued only for `-1 \le x \le 1`. On this interval, +:func:`~mpmath.acos` is defined to be a monotonically decreasing +function assuming values between `+\pi` and `0`. + +Basic values are:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> acos(-1) + 3.141592653589793238462643 + >>> acos(0) + 1.570796326794896619231322 + >>> acos(1) + 0.0 + >>> nprint(chop(taylor(acos, 0, 6))) + [1.5708, -1.0, 0.0, -0.166667, 0.0, -0.075, 0.0] + +:func:`~mpmath.acos` is defined so as to be a proper inverse function of +`\cos(\theta)` for `0 \le \theta < \pi`. +We have `\cos(\cos^{-1}(x)) = x` for all `x`, but +`\cos^{-1}(\cos(x)) = x` only for `0 \le \Re[x] < \pi`:: + + >>> for x in [1, 10, -1, 2+3j, 10+3j]: + ... print("%s %s" % (cos(acos(x)), acos(cos(x)))) + ... + 1.0 1.0 + (10.0 + 0.0j) 2.566370614359172953850574 + -1.0 1.0 + (2.0 + 3.0j) (2.0 + 3.0j) + (10.0 + 3.0j) (2.566370614359172953850574 - 3.0j) + +The inverse cosine has two branch points: `x = \pm 1`. :func:`~mpmath.acos` +places the branch cuts along the line segments `(-\infty, -1)` and +`(+1, +\infty)`. In general, + +.. math :: + + \cos^{-1}(x) = \frac{\pi}{2} + i \log\left(ix + \sqrt{1-x^2} \right) + +where the principal-branch log and square root are implied. +""" + +asin = r""" +Computes the inverse sine or arcsine of `x`, `\sin^{-1}(x)`. +Since `-1 \le \sin(x) \le 1` for real `x`, the inverse +sine is real-valued only for `-1 \le x \le 1`. +On this interval, it is defined to be a monotonically increasing +function assuming values between `-\pi/2` and `\pi/2`. + +Basic values are:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> asin(-1) + -1.570796326794896619231322 + >>> asin(0) + 0.0 + >>> asin(1) + 1.570796326794896619231322 + >>> nprint(chop(taylor(asin, 0, 6))) + [0.0, 1.0, 0.0, 0.166667, 0.0, 0.075, 0.0] + +:func:`~mpmath.asin` is defined so as to be a proper inverse function of +`\sin(\theta)` for `-\pi/2 < \theta < \pi/2`. +We have `\sin(\sin^{-1}(x)) = x` for all `x`, but +`\sin^{-1}(\sin(x)) = x` only for `-\pi/2 < \Re[x] < \pi/2`:: + + >>> for x in [1, 10, -1, 1+3j, -2+3j]: + ... print("%s %s" % (chop(sin(asin(x))), asin(sin(x)))) + ... + 1.0 1.0 + 10.0 -0.5752220392306202846120698 + -1.0 -1.0 + (1.0 + 3.0j) (1.0 + 3.0j) + (-2.0 + 3.0j) (-1.141592653589793238462643 - 3.0j) + +The inverse sine has two branch points: `x = \pm 1`. :func:`~mpmath.asin` +places the branch cuts along the line segments `(-\infty, -1)` and +`(+1, +\infty)`. In general, + +.. math :: + + \sin^{-1}(x) = -i \log\left(ix + \sqrt{1-x^2} \right) + +where the principal-branch log and square root are implied. +""" + +atan = r""" +Computes the inverse tangent or arctangent of `x`, `\tan^{-1}(x)`. +This is a real-valued function for all real `x`, with range +`(-\pi/2, \pi/2)`. + +Basic values are:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> atan(-inf) + -1.570796326794896619231322 + >>> atan(-1) + -0.7853981633974483096156609 + >>> atan(0) + 0.0 + >>> atan(1) + 0.7853981633974483096156609 + >>> atan(inf) + 1.570796326794896619231322 + >>> nprint(chop(taylor(atan, 0, 6))) + [0.0, 1.0, 0.0, -0.333333, 0.0, 0.2, 0.0] + +The inverse tangent is often used to compute angles. However, +the atan2 function is often better for this as it preserves sign +(see :func:`~mpmath.atan2`). + +:func:`~mpmath.atan` is defined so as to be a proper inverse function of +`\tan(\theta)` for `-\pi/2 < \theta < \pi/2`. +We have `\tan(\tan^{-1}(x)) = x` for all `x`, but +`\tan^{-1}(\tan(x)) = x` only for `-\pi/2 < \Re[x] < \pi/2`:: + + >>> mp.dps = 25 + >>> for x in [1, 10, -1, 1+3j, -2+3j]: + ... print("%s %s" % (tan(atan(x)), atan(tan(x)))) + ... + 1.0 1.0 + 10.0 0.5752220392306202846120698 + -1.0 -1.0 + (1.0 + 3.0j) (1.000000000000000000000001 + 3.0j) + (-2.0 + 3.0j) (1.141592653589793238462644 + 3.0j) + +The inverse tangent has two branch points: `x = \pm i`. :func:`~mpmath.atan` +places the branch cuts along the line segments `(-i \infty, -i)` and +`(+i, +i \infty)`. In general, + +.. math :: + + \tan^{-1}(x) = \frac{i}{2}\left(\log(1-ix)-\log(1+ix)\right) + +where the principal-branch log is implied. +""" + +acot = r"""Computes the inverse cotangent of `x`, +`\mathrm{cot}^{-1}(x) = \tan^{-1}(1/x)`.""" + +asec = r"""Computes the inverse secant of `x`, +`\mathrm{sec}^{-1}(x) = \cos^{-1}(1/x)`.""" + +acsc = r"""Computes the inverse cosecant of `x`, +`\mathrm{csc}^{-1}(x) = \sin^{-1}(1/x)`.""" + +coth = r"""Computes the hyperbolic cotangent of `x`, +`\mathrm{coth}(x) = \frac{\cosh(x)}{\sinh(x)}`. +""" + +sech = r"""Computes the hyperbolic secant of `x`, +`\mathrm{sech}(x) = \frac{1}{\cosh(x)}`. +""" + +csch = r"""Computes the hyperbolic cosecant of `x`, +`\mathrm{csch}(x) = \frac{1}{\sinh(x)}`. +""" + +acosh = r"""Computes the inverse hyperbolic cosine of `x`, +`\mathrm{cosh}^{-1}(x) = \log(x+\sqrt{x+1}\sqrt{x-1})`. +""" + +asinh = r"""Computes the inverse hyperbolic sine of `x`, +`\mathrm{sinh}^{-1}(x) = \log(x+\sqrt{1+x^2})`. +""" + +atanh = r"""Computes the inverse hyperbolic tangent of `x`, +`\mathrm{tanh}^{-1}(x) = \frac{1}{2}\left(\log(1+x)-\log(1-x)\right)`. +""" + +acoth = r"""Computes the inverse hyperbolic cotangent of `x`, +`\mathrm{coth}^{-1}(x) = \tanh^{-1}(1/x)`.""" + +asech = r"""Computes the inverse hyperbolic secant of `x`, +`\mathrm{sech}^{-1}(x) = \cosh^{-1}(1/x)`.""" + +acsch = r"""Computes the inverse hyperbolic cosecant of `x`, +`\mathrm{csch}^{-1}(x) = \sinh^{-1}(1/x)`.""" + + + +sinpi = r""" +Computes `\sin(\pi x)`, more accurately than the expression +``sin(pi*x)``:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> sinpi(10**10), sin(pi*(10**10)) + (0.0, -2.23936276195592e-6) + >>> sinpi(10**10+0.5), sin(pi*(10**10+0.5)) + (1.0, 0.999999999998721) +""" + +cospi = r""" +Computes `\cos(\pi x)`, more accurately than the expression +``cos(pi*x)``:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> cospi(10**10), cos(pi*(10**10)) + (1.0, 0.999999999997493) + >>> cospi(10**10+0.5), cos(pi*(10**10+0.5)) + (0.0, 1.59960492420134e-6) +""" + +sinc = r""" +``sinc(x)`` computes the unnormalized sinc function, defined as + +.. math :: + + \mathrm{sinc}(x) = \begin{cases} + \sin(x)/x, & \mbox{if } x \ne 0 \\ + 1, & \mbox{if } x = 0. + \end{cases} + +See :func:`~mpmath.sincpi` for the normalized sinc function. + +Simple values and limits include:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> sinc(0) + 1.0 + >>> sinc(1) + 0.841470984807897 + >>> sinc(inf) + 0.0 + +The integral of the sinc function is the sine integral Si:: + + >>> quad(sinc, [0, 1]) + 0.946083070367183 + >>> si(1) + 0.946083070367183 +""" + +sincpi = r""" +``sincpi(x)`` computes the normalized sinc function, defined as + +.. math :: + + \mathrm{sinc}_{\pi}(x) = \begin{cases} + \sin(\pi x)/(\pi x), & \mbox{if } x \ne 0 \\ + 1, & \mbox{if } x = 0. + \end{cases} + +Equivalently, we have +`\mathrm{sinc}_{\pi}(x) = \mathrm{sinc}(\pi x)`. + +The normalization entails that the function integrates +to unity over the entire real line:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> quadosc(sincpi, [-inf, inf], period=2.0) + 1.0 + +Like, :func:`~mpmath.sinpi`, :func:`~mpmath.sincpi` is evaluated accurately +at its roots:: + + >>> sincpi(10) + 0.0 +""" + +expj = r""" +Convenience function for computing `e^{ix}`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> expj(0) + (1.0 + 0.0j) + >>> expj(-1) + (0.5403023058681397174009366 - 0.8414709848078965066525023j) + >>> expj(j) + (0.3678794411714423215955238 + 0.0j) + >>> expj(1+j) + (0.1987661103464129406288032 + 0.3095598756531121984439128j) +""" + +expjpi = r""" +Convenience function for computing `e^{i \pi x}`. +Evaluation is accurate near zeros (see also :func:`~mpmath.cospi`, +:func:`~mpmath.sinpi`):: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> expjpi(0) + (1.0 + 0.0j) + >>> expjpi(1) + (-1.0 + 0.0j) + >>> expjpi(0.5) + (0.0 + 1.0j) + >>> expjpi(-1) + (-1.0 + 0.0j) + >>> expjpi(j) + (0.04321391826377224977441774 + 0.0j) + >>> expjpi(1+j) + (-0.04321391826377224977441774 + 0.0j) +""" + +floor = r""" +Computes the floor of `x`, `\lfloor x \rfloor`, defined as +the largest integer less than or equal to `x`:: + + >>> from mpmath import * + >>> mp.pretty = False + >>> floor(3.5) + mpf('3.0') + +.. note :: + + :func:`~mpmath.floor`, :func:`~mpmath.ceil` and :func:`~mpmath.nint` return a + floating-point number, not a Python ``int``. If `\lfloor x \rfloor` is + too large to be represented exactly at the present working precision, + the result will be rounded, not necessarily in the direction + implied by the mathematical definition of the function. + +To avoid rounding, use *prec=0*:: + + >>> mp.dps = 15 + >>> print(int(floor(10**30+1))) + 1000000000000000019884624838656 + >>> print(int(floor(10**30+1, prec=0))) + 1000000000000000000000000000001 + +The floor function is defined for complex numbers and +acts on the real and imaginary parts separately:: + + >>> floor(3.25+4.75j) + mpc(real='3.0', imag='4.0') +""" + +ceil = r""" +Computes the ceiling of `x`, `\lceil x \rceil`, defined as +the smallest integer greater than or equal to `x`:: + + >>> from mpmath import * + >>> mp.pretty = False + >>> ceil(3.5) + mpf('4.0') + +The ceiling function is defined for complex numbers and +acts on the real and imaginary parts separately:: + + >>> ceil(3.25+4.75j) + mpc(real='4.0', imag='5.0') + +See notes about rounding for :func:`~mpmath.floor`. +""" + +nint = r""" +Evaluates the nearest integer function, `\mathrm{nint}(x)`. +This gives the nearest integer to `x`; on a tie, it +gives the nearest even integer:: + + >>> from mpmath import * + >>> mp.pretty = False + >>> nint(3.2) + mpf('3.0') + >>> nint(3.8) + mpf('4.0') + >>> nint(3.5) + mpf('4.0') + >>> nint(4.5) + mpf('4.0') + +The nearest integer function is defined for complex numbers and +acts on the real and imaginary parts separately:: + + >>> nint(3.25+4.75j) + mpc(real='3.0', imag='5.0') + +See notes about rounding for :func:`~mpmath.floor`. +""" + +frac = r""" +Gives the fractional part of `x`, defined as +`\mathrm{frac}(x) = x - \lfloor x \rfloor` (see :func:`~mpmath.floor`). +In effect, this computes `x` modulo 1, or `x+n` where +`n \in \mathbb{Z}` is such that `x+n \in [0,1)`:: + + >>> from mpmath import * + >>> mp.pretty = False + >>> frac(1.25) + mpf('0.25') + >>> frac(3) + mpf('0.0') + >>> frac(-1.25) + mpf('0.75') + +For a complex number, the fractional part function applies to +the real and imaginary parts separately:: + + >>> frac(2.25+3.75j) + mpc(real='0.25', imag='0.75') + +Plotted, the fractional part function gives a sawtooth +wave. The Fourier series coefficients have a simple +form:: + + >>> mp.dps = 15 + >>> nprint(fourier(lambda x: frac(x)-0.5, [0,1], 4)) + ([0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -0.31831, -0.159155, -0.106103, -0.0795775]) + >>> nprint([-1/(pi*k) for k in range(1,5)]) + [-0.31831, -0.159155, -0.106103, -0.0795775] + +.. note:: + + The fractional part is sometimes defined as a symmetric + function, i.e. returning `-\mathrm{frac}(-x)` if `x < 0`. + This convention is used, for instance, by Mathematica's + ``FractionalPart``. + +""" + +sign = r""" +Returns the sign of `x`, defined as `\mathrm{sign}(x) = x / |x|` +(with the special case `\mathrm{sign}(0) = 0`):: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> sign(10) + mpf('1.0') + >>> sign(-10) + mpf('-1.0') + >>> sign(0) + mpf('0.0') + +Note that the sign function is also defined for complex numbers, +for which it gives the projection onto the unit circle:: + + >>> mp.dps = 15; mp.pretty = True + >>> sign(1+j) + (0.707106781186547 + 0.707106781186547j) + +""" + +arg = r""" +Computes the complex argument (phase) of `x`, defined as the +signed angle between the positive real axis and `x` in the +complex plane:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> arg(3) + 0.0 + >>> arg(3+3j) + 0.785398163397448 + >>> arg(3j) + 1.5707963267949 + >>> arg(-3) + 3.14159265358979 + >>> arg(-3j) + -1.5707963267949 + +The angle is defined to satisfy `-\pi < \arg(x) \le \pi` and +with the sign convention that a nonnegative imaginary part +results in a nonnegative argument. + +The value returned by :func:`~mpmath.arg` is an ``mpf`` instance. +""" + +fabs = r""" +Returns the absolute value of `x`, `|x|`. Unlike :func:`abs`, +:func:`~mpmath.fabs` converts non-mpmath numbers (such as ``int``) +into mpmath numbers:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> fabs(3) + mpf('3.0') + >>> fabs(-3) + mpf('3.0') + >>> fabs(3+4j) + mpf('5.0') +""" + +re = r""" +Returns the real part of `x`, `\Re(x)`. :func:`~mpmath.re` +converts a non-mpmath number to an mpmath number:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> re(3) + mpf('3.0') + >>> re(-1+4j) + mpf('-1.0') +""" + +im = r""" +Returns the imaginary part of `x`, `\Im(x)`. :func:`~mpmath.im` +converts a non-mpmath number to an mpmath number:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> im(3) + mpf('0.0') + >>> im(-1+4j) + mpf('4.0') +""" + +conj = r""" +Returns the complex conjugate of `x`, `\overline{x}`. Unlike +``x.conjugate()``, :func:`~mpmath.im` converts `x` to a mpmath number:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> conj(3) + mpf('3.0') + >>> conj(-1+4j) + mpc(real='-1.0', imag='-4.0') +""" + +polar = r""" +Returns the polar representation of the complex number `z` +as a pair `(r, \phi)` such that `z = r e^{i \phi}`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> polar(-2) + (2.0, 3.14159265358979) + >>> polar(3-4j) + (5.0, -0.927295218001612) +""" + +rect = r""" +Returns the complex number represented by polar +coordinates `(r, \phi)`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> chop(rect(2, pi)) + -2.0 + >>> rect(sqrt(2), -pi/4) + (1.0 - 1.0j) +""" + +expm1 = r""" +Computes `e^x - 1`, accurately for small `x`. + +Unlike the expression ``exp(x) - 1``, ``expm1(x)`` does not suffer from +potentially catastrophic cancellation:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> exp(1e-10)-1; print(expm1(1e-10)) + 1.00000008274037e-10 + 1.00000000005e-10 + >>> exp(1e-20)-1; print(expm1(1e-20)) + 0.0 + 1.0e-20 + >>> 1/(exp(1e-20)-1) + Traceback (most recent call last): + ... + ZeroDivisionError + >>> 1/expm1(1e-20) + 1.0e+20 + +Evaluation works for extremely tiny values:: + + >>> expm1(0) + 0.0 + >>> expm1('1e-10000000') + 1.0e-10000000 + +""" + +log1p = r""" +Computes `\log(1+x)`, accurately for small `x`. + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> log(1+1e-10); print(mp.log1p(1e-10)) + 1.00000008269037e-10 + 9.9999999995e-11 + >>> mp.log1p(1e-100j) + (5.0e-201 + 1.0e-100j) + >>> mp.log1p(0) + 0.0 + +""" + + +powm1 = r""" +Computes `x^y - 1`, accurately when `x^y` is very close to 1. + +This avoids potentially catastrophic cancellation:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> power(0.99999995, 1e-10) - 1 + 0.0 + >>> powm1(0.99999995, 1e-10) + -5.00000012791934e-18 + +Powers exactly equal to 1, and only those powers, yield 0 exactly:: + + >>> powm1(-j, 4) + (0.0 + 0.0j) + >>> powm1(3, 0) + 0.0 + >>> powm1(fadd(-1, 1e-100, exact=True), 4) + -4.0e-100 + +Evaluation works for extremely tiny `y`:: + + >>> powm1(2, '1e-100000') + 6.93147180559945e-100001 + >>> powm1(j, '1e-1000') + (-1.23370055013617e-2000 + 1.5707963267949e-1000j) + +""" + +root = r""" +``root(z, n, k=0)`` computes an `n`-th root of `z`, i.e. returns a number +`r` that (up to possible approximation error) satisfies `r^n = z`. +(``nthroot`` is available as an alias for ``root``.) + +Every complex number `z \ne 0` has `n` distinct `n`-th roots, which are +equidistant points on a circle with radius `|z|^{1/n}`, centered around the +origin. A specific root may be selected using the optional index +`k`. The roots are indexed counterclockwise, starting with `k = 0` for the root +closest to the positive real half-axis. + +The `k = 0` root is the so-called principal `n`-th root, often denoted by +`\sqrt[n]{z}` or `z^{1/n}`, and also given by `\exp(\log(z) / n)`. If `z` is +a positive real number, the principal root is just the unique positive +`n`-th root of `z`. Under some circumstances, non-principal real roots exist: +for positive real `z`, `n` even, there is a negative root given by `k = n/2`; +for negative real `z`, `n` odd, there is a negative root given by `k = (n-1)/2`. + +To obtain all roots with a simple expression, use +``[root(z,n,k) for k in range(n)]``. + +An important special case, ``root(1, n, k)`` returns the `k`-th `n`-th root of +unity, `\zeta_k = e^{2 \pi i k / n}`. Alternatively, :func:`~mpmath.unitroots` +provides a slightly more convenient way to obtain the roots of unity, +including the option to compute only the primitive roots of unity. + +Both `k` and `n` should be integers; `k` outside of ``range(n)`` will be +reduced modulo `n`. If `n` is negative, `x^{-1/n} = 1/x^{1/n}` (or +the equivalent reciprocal for a non-principal root with `k \ne 0`) is computed. + +:func:`~mpmath.root` is implemented to use Newton's method for small +`n`. At high precision, this makes `x^{1/n}` not much more +expensive than the regular exponentiation, `x^n`. For very large +`n`, :func:`~mpmath.nthroot` falls back to use the exponential function. + +**Examples** + +:func:`~mpmath.nthroot`/:func:`~mpmath.root` is faster and more accurate than raising to a +floating-point fraction:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> 16807 ** (mpf(1)/5) + mpf('7.0000000000000009') + >>> root(16807, 5) + mpf('7.0') + >>> nthroot(16807, 5) # Alias + mpf('7.0') + +A high-precision root:: + + >>> mp.dps = 50; mp.pretty = True + >>> nthroot(10, 5) + 1.584893192461113485202101373391507013269442133825 + >>> nthroot(10, 5) ** 5 + 10.0 + +Computing principal and non-principal square and cube roots:: + + >>> mp.dps = 15 + >>> root(10, 2) + 3.16227766016838 + >>> root(10, 2, 1) + -3.16227766016838 + >>> root(-10, 3) + (1.07721734501594 + 1.86579517236206j) + >>> root(-10, 3, 1) + -2.15443469003188 + >>> root(-10, 3, 2) + (1.07721734501594 - 1.86579517236206j) + +All the 7th roots of a complex number:: + + >>> for r in [root(3+4j, 7, k) for k in range(7)]: + ... print("%s %s" % (r, r**7)) + ... + (1.24747270589553 + 0.166227124177353j) (3.0 + 4.0j) + (0.647824911301003 + 1.07895435170559j) (3.0 + 4.0j) + (-0.439648254723098 + 1.17920694574172j) (3.0 + 4.0j) + (-1.19605731775069 + 0.391492658196305j) (3.0 + 4.0j) + (-1.05181082538903 - 0.691023585965793j) (3.0 + 4.0j) + (-0.115529328478668 - 1.25318497558335j) (3.0 + 4.0j) + (0.907748109144957 - 0.871672518271819j) (3.0 + 4.0j) + +Cube roots of unity:: + + >>> for k in range(3): print(root(1, 3, k)) + ... + 1.0 + (-0.5 + 0.866025403784439j) + (-0.5 - 0.866025403784439j) + +Some exact high order roots:: + + >>> root(75**210, 105) + 5625.0 + >>> root(1, 128, 96) + (0.0 - 1.0j) + >>> root(4**128, 128, 96) + (0.0 - 4.0j) + +""" + +unitroots = r""" +``unitroots(n)`` returns `\zeta_0, \zeta_1, \ldots, \zeta_{n-1}`, +all the distinct `n`-th roots of unity, as a list. If the option +*primitive=True* is passed, only the primitive roots are returned. + +Every `n`-th root of unity satisfies `(\zeta_k)^n = 1`. There are `n` distinct +roots for each `n` (`\zeta_k` and `\zeta_j` are the same when +`k = j \pmod n`), which form a regular polygon with vertices on the unit +circle. They are ordered counterclockwise with increasing `k`, starting +with `\zeta_0 = 1`. + +**Examples** + +The roots of unity up to `n = 4`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> nprint(unitroots(1)) + [1.0] + >>> nprint(unitroots(2)) + [1.0, -1.0] + >>> nprint(unitroots(3)) + [1.0, (-0.5 + 0.866025j), (-0.5 - 0.866025j)] + >>> nprint(unitroots(4)) + [1.0, (0.0 + 1.0j), -1.0, (0.0 - 1.0j)] + +Roots of unity form a geometric series that sums to 0:: + + >>> mp.dps = 50 + >>> chop(fsum(unitroots(25))) + 0.0 + +Primitive roots up to `n = 4`:: + + >>> mp.dps = 15 + >>> nprint(unitroots(1, primitive=True)) + [1.0] + >>> nprint(unitroots(2, primitive=True)) + [-1.0] + >>> nprint(unitroots(3, primitive=True)) + [(-0.5 + 0.866025j), (-0.5 - 0.866025j)] + >>> nprint(unitroots(4, primitive=True)) + [(0.0 + 1.0j), (0.0 - 1.0j)] + +There are only four primitive 12th roots:: + + >>> nprint(unitroots(12, primitive=True)) + [(0.866025 + 0.5j), (-0.866025 + 0.5j), (-0.866025 - 0.5j), (0.866025 - 0.5j)] + +The `n`-th roots of unity form a group, the cyclic group of order `n`. +Any primitive root `r` is a generator for this group, meaning that +`r^0, r^1, \ldots, r^{n-1}` gives the whole set of unit roots (in +some permuted order):: + + >>> for r in unitroots(6): print(r) + ... + 1.0 + (0.5 + 0.866025403784439j) + (-0.5 + 0.866025403784439j) + -1.0 + (-0.5 - 0.866025403784439j) + (0.5 - 0.866025403784439j) + >>> r = unitroots(6, primitive=True)[1] + >>> for k in range(6): print(chop(r**k)) + ... + 1.0 + (0.5 - 0.866025403784439j) + (-0.5 - 0.866025403784439j) + -1.0 + (-0.5 + 0.866025403784438j) + (0.5 + 0.866025403784438j) + +The number of primitive roots equals the Euler totient function `\phi(n)`:: + + >>> [len(unitroots(n, primitive=True)) for n in range(1,20)] + [1, 1, 2, 2, 4, 2, 6, 4, 6, 4, 10, 4, 12, 6, 8, 8, 16, 6, 18] + +""" + + +log = r""" +Computes the base-`b` logarithm of `x`, `\log_b(x)`. If `b` is +unspecified, :func:`~mpmath.log` computes the natural (base `e`) logarithm +and is equivalent to :func:`~mpmath.ln`. In general, the base `b` logarithm +is defined in terms of the natural logarithm as +`\log_b(x) = \ln(x)/\ln(b)`. + +By convention, we take `\log(0) = -\infty`. + +The natural logarithm is real if `x > 0` and complex if `x < 0` or if +`x` is complex. The principal branch of the complex logarithm is +used, meaning that `\Im(\ln(x)) = -\pi < \arg(x) \le \pi`. + +**Examples** + +Some basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> log(1) + 0.0 + >>> log(2) + 0.693147180559945 + >>> log(1000,10) + 3.0 + >>> log(4, 16) + 0.5 + >>> log(j) + (0.0 + 1.5707963267949j) + >>> log(-1) + (0.0 + 3.14159265358979j) + >>> log(0) + -inf + >>> log(inf) + +inf + +The natural logarithm is the antiderivative of `1/x`:: + + >>> quad(lambda x: 1/x, [1, 5]) + 1.6094379124341 + >>> log(5) + 1.6094379124341 + >>> diff(log, 10) + 0.1 + +The Taylor series expansion of the natural logarithm around +`x = 1` has coefficients `(-1)^{n+1}/n`:: + + >>> nprint(taylor(log, 1, 7)) + [0.0, 1.0, -0.5, 0.333333, -0.25, 0.2, -0.166667, 0.142857] + +:func:`~mpmath.log` supports arbitrary precision evaluation:: + + >>> mp.dps = 50 + >>> log(pi) + 1.1447298858494001741434273513530587116472948129153 + >>> log(pi, pi**3) + 0.33333333333333333333333333333333333333333333333333 + >>> mp.dps = 25 + >>> log(3+4j) + (1.609437912434100374600759 + 0.9272952180016122324285125j) +""" + +log10 = r""" +Computes the base-10 logarithm of `x`, `\log_{10}(x)`. ``log10(x)`` +is equivalent to ``log(x, 10)``. +""" + +fmod = r""" +Converts `x` and `y` to mpmath numbers and returns `x \mod y`. +For mpmath numbers, this is equivalent to ``x % y``. + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> fmod(100, pi) + 2.61062773871641 + +You can use :func:`~mpmath.fmod` to compute fractional parts of numbers:: + + >>> fmod(10.25, 1) + 0.25 + +""" + +radians = r""" +Converts the degree angle `x` to radians:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> radians(60) + 1.0471975511966 +""" + +degrees = r""" +Converts the radian angle `x` to a degree angle:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> degrees(pi/3) + 60.0 +""" + +atan2 = r""" +Computes the two-argument arctangent, `\mathrm{atan2}(y, x)`, +giving the signed angle between the positive `x`-axis and the +point `(x, y)` in the 2D plane. This function is defined for +real `x` and `y` only. + +The two-argument arctangent essentially computes +`\mathrm{atan}(y/x)`, but accounts for the signs of both +`x` and `y` to give the angle for the correct quadrant. The +following examples illustrate the difference:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> atan2(1,1), atan(1/1.) + (0.785398163397448, 0.785398163397448) + >>> atan2(1,-1), atan(1/-1.) + (2.35619449019234, -0.785398163397448) + >>> atan2(-1,1), atan(-1/1.) + (-0.785398163397448, -0.785398163397448) + >>> atan2(-1,-1), atan(-1/-1.) + (-2.35619449019234, 0.785398163397448) + +The angle convention is the same as that used for the complex +argument; see :func:`~mpmath.arg`. +""" + +fibonacci = r""" +``fibonacci(n)`` computes the `n`-th Fibonacci number, `F(n)`. The +Fibonacci numbers are defined by the recurrence `F(n) = F(n-1) + F(n-2)` +with the initial values `F(0) = 0`, `F(1) = 1`. :func:`~mpmath.fibonacci` +extends this definition to arbitrary real and complex arguments +using the formula + +.. math :: + + F(z) = \frac{\phi^z - \cos(\pi z) \phi^{-z}}{\sqrt 5} + +where `\phi` is the golden ratio. :func:`~mpmath.fibonacci` also uses this +continuous formula to compute `F(n)` for extremely large `n`, where +calculating the exact integer would be wasteful. + +For convenience, :func:`~mpmath.fib` is available as an alias for +:func:`~mpmath.fibonacci`. + +**Basic examples** + +Some small Fibonacci numbers are:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for i in range(10): + ... print(fibonacci(i)) + ... + 0.0 + 1.0 + 1.0 + 2.0 + 3.0 + 5.0 + 8.0 + 13.0 + 21.0 + 34.0 + >>> fibonacci(50) + 12586269025.0 + +The recurrence for `F(n)` extends backwards to negative `n`:: + + >>> for i in range(10): + ... print(fibonacci(-i)) + ... + 0.0 + 1.0 + -1.0 + 2.0 + -3.0 + 5.0 + -8.0 + 13.0 + -21.0 + 34.0 + +Large Fibonacci numbers will be computed approximately unless +the precision is set high enough:: + + >>> fib(200) + 2.8057117299251e+41 + >>> mp.dps = 45 + >>> fib(200) + 280571172992510140037611932413038677189525.0 + +:func:`~mpmath.fibonacci` can compute approximate Fibonacci numbers +of stupendous size:: + + >>> mp.dps = 15 + >>> fibonacci(10**25) + 3.49052338550226e+2089876402499787337692720 + +**Real and complex arguments** + +The extended Fibonacci function is an analytic function. The +property `F(z) = F(z-1) + F(z-2)` holds for arbitrary `z`:: + + >>> mp.dps = 15 + >>> fib(pi) + 2.1170270579161 + >>> fib(pi-1) + fib(pi-2) + 2.1170270579161 + >>> fib(3+4j) + (-5248.51130728372 - 14195.962288353j) + >>> fib(2+4j) + fib(1+4j) + (-5248.51130728372 - 14195.962288353j) + +The Fibonacci function has infinitely many roots on the +negative half-real axis. The first root is at 0, the second is +close to -0.18, and then there are infinitely many roots that +asymptotically approach `-n+1/2`:: + + >>> findroot(fib, -0.2) + -0.183802359692956 + >>> findroot(fib, -2) + -1.57077646820395 + >>> findroot(fib, -17) + -16.4999999596115 + >>> findroot(fib, -24) + -23.5000000000479 + +**Mathematical relationships** + +For large `n`, `F(n+1)/F(n)` approaches the golden ratio:: + + >>> mp.dps = 50 + >>> fibonacci(101)/fibonacci(100) + 1.6180339887498948482045868343656381177203127439638 + >>> +phi + 1.6180339887498948482045868343656381177203091798058 + +The sum of reciprocal Fibonacci numbers converges to an irrational +number for which no closed form expression is known:: + + >>> mp.dps = 15 + >>> nsum(lambda n: 1/fib(n), [1, inf]) + 3.35988566624318 + +Amazingly, however, the sum of odd-index reciprocal Fibonacci +numbers can be expressed in terms of a Jacobi theta function:: + + >>> nsum(lambda n: 1/fib(2*n+1), [0, inf]) + 1.82451515740692 + >>> sqrt(5)*jtheta(2,0,(3-sqrt(5))/2)**2/4 + 1.82451515740692 + +Some related sums can be done in closed form:: + + >>> nsum(lambda k: 1/(1+fib(2*k+1)), [0, inf]) + 1.11803398874989 + >>> phi - 0.5 + 1.11803398874989 + >>> f = lambda k:(-1)**(k+1) / sum(fib(n)**2 for n in range(1,int(k+1))) + >>> nsum(f, [1, inf]) + 0.618033988749895 + >>> phi-1 + 0.618033988749895 + +**References** + +1. http://mathworld.wolfram.com/FibonacciNumber.html +""" + +altzeta = r""" +Gives the Dirichlet eta function, `\eta(s)`, also known as the +alternating zeta function. This function is defined in analogy +with the Riemann zeta function as providing the sum of the +alternating series + +.. math :: + + \eta(s) = \sum_{k=0}^{\infty} \frac{(-1)^k}{k^s} + = 1-\frac{1}{2^s}+\frac{1}{3^s}-\frac{1}{4^s}+\ldots + +The eta function, unlike the Riemann zeta function, is an entire +function, having a finite value for all complex `s`. The special case +`\eta(1) = \log(2)` gives the value of the alternating harmonic series. + +The alternating zeta function may expressed using the Riemann zeta function +as `\eta(s) = (1 - 2^{1-s}) \zeta(s)`. It can also be expressed +in terms of the Hurwitz zeta function, for example using +:func:`~mpmath.dirichlet` (see documentation for that function). + +**Examples** + +Some special values are:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> altzeta(1) + 0.693147180559945 + >>> altzeta(0) + 0.5 + >>> altzeta(-1) + 0.25 + >>> altzeta(-2) + 0.0 + +An example of a sum that can be computed more accurately and +efficiently via :func:`~mpmath.altzeta` than via numerical summation:: + + >>> sum(-(-1)**n / mpf(n)**2.5 for n in range(1, 100)) + 0.867204951503984 + >>> altzeta(2.5) + 0.867199889012184 + +At positive even integers, the Dirichlet eta function +evaluates to a rational multiple of a power of `\pi`:: + + >>> altzeta(2) + 0.822467033424113 + >>> pi**2/12 + 0.822467033424113 + +Like the Riemann zeta function, `\eta(s)`, approaches 1 +as `s` approaches positive infinity, although it does +so from below rather than from above:: + + >>> altzeta(30) + 0.999999999068682 + >>> altzeta(inf) + 1.0 + >>> mp.pretty = False + >>> altzeta(1000, rounding='d') + mpf('0.99999999999999989') + >>> altzeta(1000, rounding='u') + mpf('1.0') + +**References** + +1. http://mathworld.wolfram.com/DirichletEtaFunction.html + +2. http://en.wikipedia.org/wiki/Dirichlet_eta_function +""" + +factorial = r""" +Computes the factorial, `x!`. For integers `n \ge 0`, we have +`n! = 1 \cdot 2 \cdots (n-1) \cdot n` and more generally the factorial +is defined for real or complex `x` by `x! = \Gamma(x+1)`. + +**Examples** + +Basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for k in range(6): + ... print("%s %s" % (k, fac(k))) + ... + 0 1.0 + 1 1.0 + 2 2.0 + 3 6.0 + 4 24.0 + 5 120.0 + >>> fac(inf) + +inf + >>> fac(0.5), sqrt(pi)/2 + (0.886226925452758, 0.886226925452758) + +For large positive `x`, `x!` can be approximated by +Stirling's formula:: + + >>> x = 10**10 + >>> fac(x) + 2.32579620567308e+95657055186 + >>> sqrt(2*pi*x)*(x/e)**x + 2.32579597597705e+95657055186 + +:func:`~mpmath.fac` supports evaluation for astronomically large values:: + + >>> fac(10**30) + 6.22311232304258e+29565705518096748172348871081098 + +Reciprocal factorials appear in the Taylor series of the +exponential function (among many other contexts):: + + >>> nsum(lambda k: 1/fac(k), [0, inf]), exp(1) + (2.71828182845905, 2.71828182845905) + >>> nsum(lambda k: pi**k/fac(k), [0, inf]), exp(pi) + (23.1406926327793, 23.1406926327793) + +""" + +gamma = r""" +Computes the gamma function, `\Gamma(x)`. The gamma function is a +shifted version of the ordinary factorial, satisfying +`\Gamma(n) = (n-1)!` for integers `n > 0`. More generally, it +is defined by + +.. math :: + + \Gamma(x) = \int_0^{\infty} t^{x-1} e^{-t}\, dt + +for any real or complex `x` with `\Re(x) > 0` and for `\Re(x) < 0` +by analytic continuation. + +**Examples** + +Basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for k in range(1, 6): + ... print("%s %s" % (k, gamma(k))) + ... + 1 1.0 + 2 1.0 + 3 2.0 + 4 6.0 + 5 24.0 + >>> gamma(inf) + +inf + >>> gamma(0) + Traceback (most recent call last): + ... + ValueError: gamma function pole + +The gamma function of a half-integer is a rational multiple of +`\sqrt{\pi}`:: + + >>> gamma(0.5), sqrt(pi) + (1.77245385090552, 1.77245385090552) + >>> gamma(1.5), sqrt(pi)/2 + (0.886226925452758, 0.886226925452758) + +We can check the integral definition:: + + >>> gamma(3.5) + 3.32335097044784 + >>> quad(lambda t: t**2.5*exp(-t), [0,inf]) + 3.32335097044784 + +:func:`~mpmath.gamma` supports arbitrary-precision evaluation and +complex arguments:: + + >>> mp.dps = 50 + >>> gamma(sqrt(3)) + 0.91510229697308632046045539308226554038315280564184 + >>> mp.dps = 25 + >>> gamma(2j) + (0.009902440080927490985955066 - 0.07595200133501806872408048j) + +Arguments can also be large. Note that the gamma function grows +very quickly:: + + >>> mp.dps = 15 + >>> gamma(10**20) + 1.9328495143101e+1956570551809674817225 + +**References** + +* [Spouge]_ + +""" + +psi = r""" +Gives the polygamma function of order `m` of `z`, `\psi^{(m)}(z)`. +Special cases are known as the *digamma function* (`\psi^{(0)}(z)`), +the *trigamma function* (`\psi^{(1)}(z)`), etc. The polygamma +functions are defined as the logarithmic derivatives of the gamma +function: + +.. math :: + + \psi^{(m)}(z) = \left(\frac{d}{dz}\right)^{m+1} \log \Gamma(z) + +In particular, `\psi^{(0)}(z) = \Gamma'(z)/\Gamma(z)`. In the +present implementation of :func:`~mpmath.psi`, the order `m` must be a +nonnegative integer, while the argument `z` may be an arbitrary +complex number (with exception for the polygamma function's poles +at `z = 0, -1, -2, \ldots`). + +**Examples** + +For various rational arguments, the polygamma function reduces to +a combination of standard mathematical constants:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> psi(0, 1), -euler + (-0.5772156649015328606065121, -0.5772156649015328606065121) + >>> psi(1, '1/4'), pi**2+8*catalan + (17.19732915450711073927132, 17.19732915450711073927132) + >>> psi(2, '1/2'), -14*apery + (-16.82879664423431999559633, -16.82879664423431999559633) + +The polygamma functions are derivatives of each other:: + + >>> diff(lambda x: psi(3, x), pi), psi(4, pi) + (-0.1105749312578862734526952, -0.1105749312578862734526952) + >>> quad(lambda x: psi(4, x), [2, 3]), psi(3,3)-psi(3,2) + (-0.375, -0.375) + +The digamma function diverges logarithmically as `z \to \infty`, +while higher orders tend to zero:: + + >>> psi(0,inf), psi(1,inf), psi(2,inf) + (+inf, 0.0, 0.0) + +Evaluation for a complex argument:: + + >>> psi(2, -1-2j) + (0.03902435405364952654838445 + 0.1574325240413029954685366j) + +Evaluation is supported for large orders `m` and/or large +arguments `z`:: + + >>> psi(3, 10**100) + 2.0e-300 + >>> psi(250, 10**30+10**20*j) + (-1.293142504363642687204865e-7010 + 3.232856260909107391513108e-7018j) + +**Application to infinite series** + +Any infinite series where the summand is a rational function of +the index `k` can be evaluated in closed form in terms of polygamma +functions of the roots and poles of the summand:: + + >>> a = sqrt(2) + >>> b = sqrt(3) + >>> nsum(lambda k: 1/((k+a)**2*(k+b)), [0, inf]) + 0.4049668927517857061917531 + >>> (psi(0,a)-psi(0,b)-a*psi(1,a)+b*psi(1,a))/(a-b)**2 + 0.4049668927517857061917531 + +This follows from the series representation (`m > 0`) + +.. math :: + + \psi^{(m)}(z) = (-1)^{m+1} m! \sum_{k=0}^{\infty} + \frac{1}{(z+k)^{m+1}}. + +Since the roots of a polynomial may be complex, it is sometimes +necessary to use the complex polygamma function to evaluate +an entirely real-valued sum:: + + >>> nsum(lambda k: 1/(k**2-2*k+3), [0, inf]) + 1.694361433907061256154665 + >>> nprint(polyroots([1,-2,3])) + [(1.0 - 1.41421j), (1.0 + 1.41421j)] + >>> r1 = 1-sqrt(2)*j + >>> r2 = r1.conjugate() + >>> (psi(0,-r2)-psi(0,-r1))/(r1-r2) + (1.694361433907061256154665 + 0.0j) + +""" + +digamma = r""" +Shortcut for ``psi(0,z)``. +""" + +harmonic = r""" +If `n` is an integer, ``harmonic(n)`` gives a floating-point +approximation of the `n`-th harmonic number `H(n)`, defined as + +.. math :: + + H(n) = 1 + \frac{1}{2} + \frac{1}{3} + \ldots + \frac{1}{n} + +The first few harmonic numbers are:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(8): + ... print("%s %s" % (n, harmonic(n))) + ... + 0 0.0 + 1 1.0 + 2 1.5 + 3 1.83333333333333 + 4 2.08333333333333 + 5 2.28333333333333 + 6 2.45 + 7 2.59285714285714 + +The infinite harmonic series `1 + 1/2 + 1/3 + \ldots` diverges:: + + >>> harmonic(inf) + +inf + +:func:`~mpmath.harmonic` is evaluated using the digamma function rather +than by summing the harmonic series term by term. It can therefore +be computed quickly for arbitrarily large `n`, and even for +nonintegral arguments:: + + >>> harmonic(10**100) + 230.835724964306 + >>> harmonic(0.5) + 0.613705638880109 + >>> harmonic(3+4j) + (2.24757548223494 + 0.850502209186044j) + +:func:`~mpmath.harmonic` supports arbitrary precision evaluation:: + + >>> mp.dps = 50 + >>> harmonic(11) + 3.0198773448773448773448773448773448773448773448773 + >>> harmonic(pi) + 1.8727388590273302654363491032336134987519132374152 + +The harmonic series diverges, but at a glacial pace. It is possible +to calculate the exact number of terms required before the sum +exceeds a given amount, say 100:: + + >>> mp.dps = 50 + >>> v = 10**findroot(lambda x: harmonic(10**x) - 100, 10) + >>> v + 15092688622113788323693563264538101449859496.864101 + >>> v = int(ceil(v)) + >>> print(v) + 15092688622113788323693563264538101449859497 + >>> harmonic(v-1) + 99.999999999999999999999999999999999999999999942747 + >>> harmonic(v) + 100.000000000000000000000000000000000000000000009 + +""" + +bernoulli = r""" +Computes the nth Bernoulli number, `B_n`, for any integer `n \ge 0`. + +The Bernoulli numbers are rational numbers, but this function +returns a floating-point approximation. To obtain an exact +fraction, use :func:`~mpmath.bernfrac` instead. + +**Examples** + +Numerical values of the first few Bernoulli numbers:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(15): + ... print("%s %s" % (n, bernoulli(n))) + ... + 0 1.0 + 1 -0.5 + 2 0.166666666666667 + 3 0.0 + 4 -0.0333333333333333 + 5 0.0 + 6 0.0238095238095238 + 7 0.0 + 8 -0.0333333333333333 + 9 0.0 + 10 0.0757575757575758 + 11 0.0 + 12 -0.253113553113553 + 13 0.0 + 14 1.16666666666667 + +Bernoulli numbers can be approximated with arbitrary precision:: + + >>> mp.dps = 50 + >>> bernoulli(100) + -2.8382249570693706959264156336481764738284680928013e+78 + +Arbitrarily large `n` are supported:: + + >>> mp.dps = 15 + >>> bernoulli(10**20 + 2) + 3.09136296657021e+1876752564973863312327 + +The Bernoulli numbers are related to the Riemann zeta function +at integer arguments:: + + >>> -bernoulli(8) * (2*pi)**8 / (2*fac(8)) + 1.00407735619794 + >>> zeta(8) + 1.00407735619794 + +**Algorithm** + +For small `n` (`n < 3000`) :func:`~mpmath.bernoulli` uses a recurrence +formula due to Ramanujan. All results in this range are cached, +so sequential computation of small Bernoulli numbers is +guaranteed to be fast. + +For larger `n`, `B_n` is evaluated in terms of the Riemann zeta +function. +""" + +stieltjes = r""" +For a nonnegative integer `n`, ``stieltjes(n)`` computes the +`n`-th Stieltjes constant `\gamma_n`, defined as the +`n`-th coefficient in the Laurent series expansion of the +Riemann zeta function around the pole at `s = 1`. That is, +we have: + +.. math :: + + \zeta(s) = \frac{1}{s-1} \sum_{n=0}^{\infty} + \frac{(-1)^n}{n!} \gamma_n (s-1)^n + +More generally, ``stieltjes(n, a)`` gives the corresponding +coefficient `\gamma_n(a)` for the Hurwitz zeta function +`\zeta(s,a)` (with `\gamma_n = \gamma_n(1)`). + +**Examples** + +The zeroth Stieltjes constant is just Euler's constant `\gamma`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> stieltjes(0) + 0.577215664901533 + +Some more values are:: + + >>> stieltjes(1) + -0.0728158454836767 + >>> stieltjes(10) + 0.000205332814909065 + >>> stieltjes(30) + 0.00355772885557316 + >>> stieltjes(1000) + -1.57095384420474e+486 + >>> stieltjes(2000) + 2.680424678918e+1109 + >>> stieltjes(1, 2.5) + -0.23747539175716 + +An alternative way to compute `\gamma_1`:: + + >>> diff(extradps(15)(lambda x: 1/(x-1) - zeta(x)), 1) + -0.0728158454836767 + +:func:`~mpmath.stieltjes` supports arbitrary precision evaluation:: + + >>> mp.dps = 50 + >>> stieltjes(2) + -0.0096903631928723184845303860352125293590658061013408 + +**Algorithm** + +:func:`~mpmath.stieltjes` numerically evaluates the integral in +the following representation due to Ainsworth, Howell and +Coffey [1], [2]: + +.. math :: + + \gamma_n(a) = \frac{\log^n a}{2a} - \frac{\log^{n+1}(a)}{n+1} + + \frac{2}{a} \Re \int_0^{\infty} + \frac{(x/a-i)\log^n(a-ix)}{(1+x^2/a^2)(e^{2\pi x}-1)} dx. + +For some reference values with `a = 1`, see e.g. [4]. + +**References** + +1. O. R. Ainsworth & L. W. Howell, "An integral representation of + the generalized Euler-Mascheroni constants", NASA Technical + Paper 2456 (1985), + http://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19850014994_1985014994.pdf + +2. M. W. Coffey, "The Stieltjes constants, their relation to the + `\eta_j` coefficients, and representation of the Hurwitz + zeta function", arXiv:0706.0343v1 http://arxiv.org/abs/0706.0343 + +3. http://mathworld.wolfram.com/StieltjesConstants.html + +4. http://pi.lacim.uqam.ca/piDATA/stieltjesgamma.txt + +""" + +gammaprod = r""" +Given iterables `a` and `b`, ``gammaprod(a, b)`` computes the +product / quotient of gamma functions: + +.. math :: + + \frac{\Gamma(a_0) \Gamma(a_1) \cdots \Gamma(a_p)} + {\Gamma(b_0) \Gamma(b_1) \cdots \Gamma(b_q)} + +Unlike direct calls to :func:`~mpmath.gamma`, :func:`~mpmath.gammaprod` considers +the entire product as a limit and evaluates this limit properly if +any of the numerator or denominator arguments are nonpositive +integers such that poles of the gamma function are encountered. +That is, :func:`~mpmath.gammaprod` evaluates + +.. math :: + + \lim_{\epsilon \to 0} + \frac{\Gamma(a_0+\epsilon) \Gamma(a_1+\epsilon) \cdots + \Gamma(a_p+\epsilon)} + {\Gamma(b_0+\epsilon) \Gamma(b_1+\epsilon) \cdots + \Gamma(b_q+\epsilon)} + +In particular: + +* If there are equally many poles in the numerator and the + denominator, the limit is a rational number times the remaining, + regular part of the product. + +* If there are more poles in the numerator, :func:`~mpmath.gammaprod` + returns ``+inf``. + +* If there are more poles in the denominator, :func:`~mpmath.gammaprod` + returns 0. + +**Examples** + +The reciprocal gamma function `1/\Gamma(x)` evaluated at `x = 0`:: + + >>> from mpmath import * + >>> mp.dps = 15 + >>> gammaprod([], [0]) + 0.0 + +A limit:: + + >>> gammaprod([-4], [-3]) + -0.25 + >>> limit(lambda x: gamma(x-1)/gamma(x), -3, direction=1) + -0.25 + >>> limit(lambda x: gamma(x-1)/gamma(x), -3, direction=-1) + -0.25 + +""" + +beta = r""" +Computes the beta function, +`B(x,y) = \Gamma(x) \Gamma(y) / \Gamma(x+y)`. +The beta function is also commonly defined by the integral +representation + +.. math :: + + B(x,y) = \int_0^1 t^{x-1} (1-t)^{y-1} \, dt + +**Examples** + +For integer and half-integer arguments where all three gamma +functions are finite, the beta function becomes either rational +number or a rational multiple of `\pi`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> beta(5, 2) + 0.0333333333333333 + >>> beta(1.5, 2) + 0.266666666666667 + >>> 16*beta(2.5, 1.5) + 3.14159265358979 + +Where appropriate, :func:`~mpmath.beta` evaluates limits. A pole +of the beta function is taken to result in ``+inf``:: + + >>> beta(-0.5, 0.5) + 0.0 + >>> beta(-3, 3) + -0.333333333333333 + >>> beta(-2, 3) + +inf + >>> beta(inf, 1) + 0.0 + >>> beta(inf, 0) + nan + +:func:`~mpmath.beta` supports complex numbers and arbitrary precision +evaluation:: + + >>> beta(1, 2+j) + (0.4 - 0.2j) + >>> mp.dps = 25 + >>> beta(j,0.5) + (1.079424249270925780135675 - 1.410032405664160838288752j) + >>> mp.dps = 50 + >>> beta(pi, e) + 0.037890298781212201348153837138927165984170287886464 + +Various integrals can be computed by means of the +beta function:: + + >>> mp.dps = 15 + >>> quad(lambda t: t**2.5*(1-t)**2, [0, 1]) + 0.0230880230880231 + >>> beta(3.5, 3) + 0.0230880230880231 + >>> quad(lambda t: sin(t)**4 * sqrt(cos(t)), [0, pi/2]) + 0.319504062596158 + >>> beta(2.5, 0.75)/2 + 0.319504062596158 + +""" + +betainc = r""" +``betainc(a, b, x1=0, x2=1, regularized=False)`` gives the generalized +incomplete beta function, + +.. math :: + + I_{x_1}^{x_2}(a,b) = \int_{x_1}^{x_2} t^{a-1} (1-t)^{b-1} dt. + +When `x_1 = 0, x_2 = 1`, this reduces to the ordinary (complete) +beta function `B(a,b)`; see :func:`~mpmath.beta`. + +With the keyword argument ``regularized=True``, :func:`~mpmath.betainc` +computes the regularized incomplete beta function +`I_{x_1}^{x_2}(a,b) / B(a,b)`. This is the cumulative distribution of the +beta distribution with parameters `a`, `b`. + +.. note : + + Implementations of the incomplete beta function in some other + software uses a different argument order. For example, Mathematica uses the + reversed argument order ``Beta[x1,x2,a,b]``. For the equivalent of SciPy's + three-argument incomplete beta integral (implicitly with `x1 = 0`), use + ``betainc(a,b,0,x2,regularized=True)``. + +**Examples** + +Verifying that :func:`~mpmath.betainc` computes the integral in the +definition:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> x,y,a,b = 3, 4, 0, 6 + >>> betainc(x, y, a, b) + -4010.4 + >>> quad(lambda t: t**(x-1) * (1-t)**(y-1), [a, b]) + -4010.4 + +The arguments may be arbitrary complex numbers:: + + >>> betainc(0.75, 1-4j, 0, 2+3j) + (0.2241657956955709603655887 + 0.3619619242700451992411724j) + +With regularization:: + + >>> betainc(1, 2, 0, 0.25, regularized=True) + 0.4375 + >>> betainc(pi, e, 0, 1, regularized=True) # Complete + 1.0 + +The beta integral satisfies some simple argument transformation +symmetries:: + + >>> mp.dps = 15 + >>> betainc(2,3,4,5), -betainc(2,3,5,4), betainc(3,2,1-5,1-4) + (56.0833333333333, 56.0833333333333, 56.0833333333333) + +The beta integral can often be evaluated analytically. For integer and +rational arguments, the incomplete beta function typically reduces to a +simple algebraic-logarithmic expression:: + + >>> mp.dps = 25 + >>> identify(chop(betainc(0, 0, 3, 4))) + '-(log((9/8)))' + >>> identify(betainc(2, 3, 4, 5)) + '(673/12)' + >>> identify(betainc(1.5, 1, 1, 2)) + '((-12+sqrt(1152))/18)' + +""" + +binomial = r""" +Computes the binomial coefficient + +.. math :: + + {n \choose k} = \frac{n!}{k!(n-k)!}. + +The binomial coefficient gives the number of ways that `k` items +can be chosen from a set of `n` items. More generally, the binomial +coefficient is a well-defined function of arbitrary real or +complex `n` and `k`, via the gamma function. + +**Examples** + +Generate Pascal's triangle:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(5): + ... nprint([binomial(n,k) for k in range(n+1)]) + ... + [1.0] + [1.0, 1.0] + [1.0, 2.0, 1.0] + [1.0, 3.0, 3.0, 1.0] + [1.0, 4.0, 6.0, 4.0, 1.0] + +There is 1 way to select 0 items from the empty set, and 0 ways to +select 1 item from the empty set:: + + >>> binomial(0, 0) + 1.0 + >>> binomial(0, 1) + 0.0 + +:func:`~mpmath.binomial` supports large arguments:: + + >>> binomial(10**20, 10**20-5) + 8.33333333333333e+97 + >>> binomial(10**20, 10**10) + 2.60784095465201e+104342944813 + +Nonintegral binomial coefficients find use in series +expansions:: + + >>> nprint(taylor(lambda x: (1+x)**0.25, 0, 4)) + [1.0, 0.25, -0.09375, 0.0546875, -0.0375977] + >>> nprint([binomial(0.25, k) for k in range(5)]) + [1.0, 0.25, -0.09375, 0.0546875, -0.0375977] + +An integral representation:: + + >>> n, k = 5, 3 + >>> f = lambda t: exp(-j*k*t)*(1+exp(j*t))**n + >>> chop(quad(f, [-pi,pi])/(2*pi)) + 10.0 + >>> binomial(n,k) + 10.0 + +""" + +rf = r""" +Computes the rising factorial or Pochhammer symbol, + +.. math :: + + x^{(n)} = x (x+1) \cdots (x+n-1) = \frac{\Gamma(x+n)}{\Gamma(x)} + +where the rightmost expression is valid for nonintegral `n`. + +**Examples** + +For integral `n`, the rising factorial is a polynomial:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(5): + ... nprint(taylor(lambda x: rf(x,n), 0, n)) + ... + [1.0] + [0.0, 1.0] + [0.0, 1.0, 1.0] + [0.0, 2.0, 3.0, 1.0] + [0.0, 6.0, 11.0, 6.0, 1.0] + +Evaluation is supported for arbitrary arguments:: + + >>> rf(2+3j, 5.5) + (-7202.03920483347 - 3777.58810701527j) +""" + +ff = r""" +Computes the falling factorial, + +.. math :: + + (x)_n = x (x-1) \cdots (x-n+1) = \frac{\Gamma(x+1)}{\Gamma(x-n+1)} + +where the rightmost expression is valid for nonintegral `n`. + +**Examples** + +For integral `n`, the falling factorial is a polynomial:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(5): + ... nprint(taylor(lambda x: ff(x,n), 0, n)) + ... + [1.0] + [0.0, 1.0] + [0.0, -1.0, 1.0] + [0.0, 2.0, -3.0, 1.0] + [0.0, -6.0, 11.0, -6.0, 1.0] + +Evaluation is supported for arbitrary arguments:: + + >>> ff(2+3j, 5.5) + (-720.41085888203 + 316.101124983878j) +""" + +fac2 = r""" +Computes the double factorial `x!!`, defined for integers +`x > 0` by + +.. math :: + + x!! = \begin{cases} + 1 \cdot 3 \cdots (x-2) \cdot x & x \;\mathrm{odd} \\ + 2 \cdot 4 \cdots (x-2) \cdot x & x \;\mathrm{even} + \end{cases} + +and more generally by [1] + +.. math :: + + x!! = 2^{x/2} \left(\frac{\pi}{2}\right)^{(\cos(\pi x)-1)/4} + \Gamma\left(\frac{x}{2}+1\right). + +**Examples** + +The integer sequence of double factorials begins:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> nprint([fac2(n) for n in range(10)]) + [1.0, 1.0, 2.0, 3.0, 8.0, 15.0, 48.0, 105.0, 384.0, 945.0] + +For large `x`, double factorials follow a Stirling-like asymptotic +approximation:: + + >>> x = mpf(10000) + >>> fac2(x) + 5.97272691416282e+17830 + >>> sqrt(pi)*x**((x+1)/2)*exp(-x/2) + 5.97262736954392e+17830 + +The recurrence formula `x!! = x (x-2)!!` can be reversed to +define the double factorial of negative odd integers (but +not negative even integers):: + + >>> fac2(-1), fac2(-3), fac2(-5), fac2(-7) + (1.0, -1.0, 0.333333333333333, -0.0666666666666667) + >>> fac2(-2) + Traceback (most recent call last): + ... + ValueError: gamma function pole + +With the exception of the poles at negative even integers, +:func:`~mpmath.fac2` supports evaluation for arbitrary complex arguments. +The recurrence formula is valid generally:: + + >>> fac2(pi+2j) + (-1.3697207890154e-12 + 3.93665300979176e-12j) + >>> (pi+2j)*fac2(pi-2+2j) + (-1.3697207890154e-12 + 3.93665300979176e-12j) + +Double factorials should not be confused with nested factorials, +which are immensely larger:: + + >>> fac(fac(20)) + 5.13805976125208e+43675043585825292774 + >>> fac2(20) + 3715891200.0 + +Double factorials appear, among other things, in series expansions +of Gaussian functions and the error function. Infinite series +include:: + + >>> nsum(lambda k: 1/fac2(k), [0, inf]) + 3.05940740534258 + >>> sqrt(e)*(1+sqrt(pi/2)*erf(sqrt(2)/2)) + 3.05940740534258 + >>> nsum(lambda k: 2**k/fac2(2*k-1), [1, inf]) + 4.06015693855741 + >>> e * erf(1) * sqrt(pi) + 4.06015693855741 + +A beautiful Ramanujan sum:: + + >>> nsum(lambda k: (-1)**k*(fac2(2*k-1)/fac2(2*k))**3, [0,inf]) + 0.90917279454693 + >>> (gamma('9/8')/gamma('5/4')/gamma('7/8'))**2 + 0.90917279454693 + +**References** + +1. http://functions.wolfram.com/GammaBetaErf/Factorial2/27/01/0002/ + +2. http://mathworld.wolfram.com/DoubleFactorial.html + +""" + +hyper = r""" +Evaluates the generalized hypergeometric function + +.. math :: + + \,_pF_q(a_1,\ldots,a_p; b_1,\ldots,b_q; z) = + \sum_{n=0}^\infty \frac{(a_1)_n (a_2)_n \ldots (a_p)_n} + {(b_1)_n(b_2)_n\ldots(b_q)_n} \frac{z^n}{n!} + +where `(x)_n` denotes the rising factorial (see :func:`~mpmath.rf`). + +The parameters lists ``a_s`` and ``b_s`` may contain integers, +real numbers, complex numbers, as well as exact fractions given in +the form of tuples `(p, q)`. :func:`~mpmath.hyper` is optimized to handle +integers and fractions more efficiently than arbitrary +floating-point parameters (since rational parameters are by +far the most common). + +**Examples** + +Verifying that :func:`~mpmath.hyper` gives the sum in the definition, by +comparison with :func:`~mpmath.nsum`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> a,b,c,d = 2,3,4,5 + >>> x = 0.25 + >>> hyper([a,b],[c,d],x) + 1.078903941164934876086237 + >>> fn = lambda n: rf(a,n)*rf(b,n)/rf(c,n)/rf(d,n)*x**n/fac(n) + >>> nsum(fn, [0, inf]) + 1.078903941164934876086237 + +The parameters can be any combination of integers, fractions, +floats and complex numbers:: + + >>> a, b, c, d, e = 1, (-1,2), pi, 3+4j, (2,3) + >>> x = 0.2j + >>> hyper([a,b],[c,d,e],x) + (0.9923571616434024810831887 - 0.005753848733883879742993122j) + >>> b, e = -0.5, mpf(2)/3 + >>> fn = lambda n: rf(a,n)*rf(b,n)/rf(c,n)/rf(d,n)/rf(e,n)*x**n/fac(n) + >>> nsum(fn, [0, inf]) + (0.9923571616434024810831887 - 0.005753848733883879742993122j) + +The `\,_0F_0` and `\,_1F_0` series are just elementary functions:: + + >>> a, z = sqrt(2), +pi + >>> hyper([],[],z) + 23.14069263277926900572909 + >>> exp(z) + 23.14069263277926900572909 + >>> hyper([a],[],z) + (-0.09069132879922920160334114 + 0.3283224323946162083579656j) + >>> (1-z)**(-a) + (-0.09069132879922920160334114 + 0.3283224323946162083579656j) + +If any `a_k` coefficient is a nonpositive integer, the series terminates +into a finite polynomial:: + + >>> hyper([1,1,1,-3],[2,5],1) + 0.7904761904761904761904762 + >>> identify(_) + '(83/105)' + +If any `b_k` is a nonpositive integer, the function is undefined (unless the +series terminates before the division by zero occurs):: + + >>> hyper([1,1,1,-3],[-2,5],1) + Traceback (most recent call last): + ... + ZeroDivisionError: pole in hypergeometric series + >>> hyper([1,1,1,-1],[-2,5],1) + 1.1 + +Except for polynomial cases, the radius of convergence `R` of the hypergeometric +series is either `R = \infty` (if `p \le q`), `R = 1` (if `p = q+1`), or +`R = 0` (if `p > q+1`). + +The analytic continuations of the functions with `p = q+1`, i.e. `\,_2F_1`, +`\,_3F_2`, `\,_4F_3`, etc, are all implemented and therefore these functions +can be evaluated for `|z| \ge 1`. The shortcuts :func:`~mpmath.hyp2f1`, :func:`~mpmath.hyp3f2` +are available to handle the most common cases (see their documentation), +but functions of higher degree are also supported via :func:`~mpmath.hyper`:: + + >>> hyper([1,2,3,4], [5,6,7], 1) # 4F3 at finite-valued branch point + 1.141783505526870731311423 + >>> hyper([4,5,6,7], [1,2,3], 1) # 4F3 at pole + +inf + >>> hyper([1,2,3,4,5], [6,7,8,9], 10) # 5F4 + (1.543998916527972259717257 - 0.5876309929580408028816365j) + >>> hyper([1,2,3,4,5,6], [7,8,9,10,11], 1j) # 6F5 + (0.9996565821853579063502466 + 0.0129721075905630604445669j) + +Near `z = 1` with noninteger parameters:: + + >>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','41/8'], 1) + 2.219433352235586121250027 + >>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','5/4'], 1) + +inf + >>> eps1 = extradps(6)(lambda: 1 - mpf('1e-6'))() + >>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','5/4'], eps1) + 2923978034.412973409330956 + +Please note that, as currently implemented, evaluation of `\,_pF_{p-1}` +with `p \ge 3` may be slow or inaccurate when `|z-1|` is small, +for some parameter values. + +Evaluation may be aborted if convergence appears to be too slow. +The optional ``maxterms`` (limiting the number of series terms) and ``maxprec`` +(limiting the internal precision) keyword arguments can be used +to control evaluation:: + + >>> hyper([1,2,3], [4,5,6], 10000) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + NoConvergence: Hypergeometric series converges too slowly. Try increasing maxterms. + >>> hyper([1,2,3], [4,5,6], 10000, maxterms=10**6) + 7.622806053177969474396918e+4310 + +Additional options include ``force_series`` (which forces direct use of +a hypergeometric series even if another evaluation method might work better) +and ``asymp_tol`` which controls the target tolerance for using +asymptotic series. + +When `p > q+1`, ``hyper`` computes the (iterated) Borel sum of the divergent +series. For `\,_2F_0` the Borel sum has an analytic solution and can be +computed efficiently (see :func:`~mpmath.hyp2f0`). For higher degrees, the functions +is evaluated first by attempting to sum it directly as an asymptotic +series (this only works for tiny `|z|`), and then by evaluating the Borel +regularized sum using numerical integration. Except for +special parameter combinations, this can be extremely slow. + + >>> hyper([1,1], [], 0.5) # regularization of 2F0 + (1.340965419580146562086448 + 0.8503366631752726568782447j) + >>> hyper([1,1,1,1], [1], 0.5) # regularization of 4F1 + (1.108287213689475145830699 + 0.5327107430640678181200491j) + +With the following magnitude of argument, the asymptotic series for `\,_3F_1` +gives only a few digits. Using Borel summation, ``hyper`` can produce +a value with full accuracy:: + + >>> mp.dps = 15 + >>> hyper([2,0.5,4], [5.25], '0.08', force_series=True) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + NoConvergence: Hypergeometric series converges too slowly. Try increasing maxterms. + >>> hyper([2,0.5,4], [5.25], '0.08', asymp_tol=1e-4) + 1.0725535790737 + >>> hyper([2,0.5,4], [5.25], '0.08') + (1.07269542893559 + 5.54668863216891e-5j) + >>> hyper([2,0.5,4], [5.25], '-0.08', asymp_tol=1e-4) + 0.946344925484879 + >>> hyper([2,0.5,4], [5.25], '-0.08') + 0.946312503737771 + >>> mp.dps = 25 + >>> hyper([2,0.5,4], [5.25], '-0.08') + 0.9463125037377662296700858 + +Note that with the positive `z` value, there is a complex part in the +correct result, which falls below the tolerance of the asymptotic series. + +By default, a parameter that appears in both ``a_s`` and ``b_s`` will be removed +unless it is a nonpositive integer. This generally speeds up evaluation +by producing a hypergeometric function of lower order. +This optimization can be disabled by passing ``eliminate=False``. + + >>> hyper([1,2,3], [4,5,3], 10000) + 1.268943190440206905892212e+4321 + >>> hyper([1,2,3], [4,5,3], 10000, eliminate=False) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + NoConvergence: Hypergeometric series converges too slowly. Try increasing maxterms. + >>> hyper([1,2,3], [4,5,3], 10000, eliminate=False, maxterms=10**6) + 1.268943190440206905892212e+4321 + +If a nonpositive integer `-n` appears in both ``a_s`` and ``b_s``, this parameter +cannot be unambiguously removed since it creates a term 0 / 0. +In this case the hypergeometric series is understood to terminate before +the division by zero occurs. This convention is consistent with Mathematica. +An alternative convention of eliminating the parameters can be toggled +with ``eliminate_all=True``: + + >>> hyper([2,-1], [-1], 3) + 7.0 + >>> hyper([2,-1], [-1], 3, eliminate_all=True) + 0.25 + >>> hyper([2], [], 3) + 0.25 + +""" + +hypercomb = r""" +Computes a weighted combination of hypergeometric functions + +.. math :: + + \sum_{r=1}^N \left[ \prod_{k=1}^{l_r} {w_{r,k}}^{c_{r,k}} + \frac{\prod_{k=1}^{m_r} \Gamma(\alpha_{r,k})}{\prod_{k=1}^{n_r} + \Gamma(\beta_{r,k})} + \,_{p_r}F_{q_r}(a_{r,1},\ldots,a_{r,p}; b_{r,1}, + \ldots, b_{r,q}; z_r)\right]. + +Typically the parameters are linear combinations of a small set of base +parameters; :func:`~mpmath.hypercomb` permits computing a correct value in +the case that some of the `\alpha`, `\beta`, `b` turn out to be +nonpositive integers, or if division by zero occurs for some `w^c`, +assuming that there are opposing singularities that cancel out. +The limit is computed by evaluating the function with the base +parameters perturbed, at a higher working precision. + +The first argument should be a function that takes the perturbable +base parameters ``params`` as input and returns `N` tuples +``(w, c, alpha, beta, a, b, z)``, where the coefficients ``w``, ``c``, +gamma factors ``alpha``, ``beta``, and hypergeometric coefficients +``a``, ``b`` each should be lists of numbers, and ``z`` should be a single +number. + +**Examples** + +The following evaluates + +.. math :: + + (a-1) \frac{\Gamma(a-3)}{\Gamma(a-4)} \,_1F_1(a,a-1,z) = e^z(a-4)(a+z-1) + +with `a=1, z=3`. There is a zero factor, two gamma function poles, and +the 1F1 function is singular; all singularities cancel out to give a finite +value:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> hypercomb(lambda a: [([a-1],[1],[a-3],[a-4],[a],[a-1],3)], [1]) + -180.769832308689 + >>> -9*exp(3) + -180.769832308689 + +""" + +hyp0f1 = r""" +Gives the hypergeometric function `\,_0F_1`, sometimes known as the +confluent limit function, defined as + +.. math :: + + \,_0F_1(a,z) = \sum_{k=0}^{\infty} \frac{1}{(a)_k} \frac{z^k}{k!}. + +This function satisfies the differential equation `z f''(z) + a f'(z) = f(z)`, +and is related to the Bessel function of the first kind (see :func:`~mpmath.besselj`). + +``hyp0f1(a,z)`` is equivalent to ``hyper([],[a],z)``; see documentation for +:func:`~mpmath.hyper` for more information. + +**Examples** + +Evaluation for arbitrary arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> hyp0f1(2, 0.25) + 1.130318207984970054415392 + >>> hyp0f1((1,2), 1234567) + 6.27287187546220705604627e+964 + >>> hyp0f1(3+4j, 1000000j) + (3.905169561300910030267132e+606 + 3.807708544441684513934213e+606j) + +Evaluation is supported for arbitrarily large values of `z`, +using asymptotic expansions:: + + >>> hyp0f1(1, 10**50) + 2.131705322874965310390701e+8685889638065036553022565 + >>> hyp0f1(1, -10**50) + 1.115945364792025420300208e-13 + +Verifying the differential equation:: + + >>> a = 2.5 + >>> f = lambda z: hyp0f1(a,z) + >>> for z in [0, 10, 3+4j]: + ... chop(z*diff(f,z,2) + a*diff(f,z) - f(z)) + ... + 0.0 + 0.0 + 0.0 + +""" + +hyp1f1 = r""" +Gives the confluent hypergeometric function of the first kind, + +.. math :: + + \,_1F_1(a,b,z) = \sum_{k=0}^{\infty} \frac{(a)_k}{(b)_k} \frac{z^k}{k!}, + +also known as Kummer's function and sometimes denoted by `M(a,b,z)`. This +function gives one solution to the confluent (Kummer's) differential equation + +.. math :: + + z f''(z) + (b-z) f'(z) - af(z) = 0. + +A second solution is given by the `U` function; see :func:`~mpmath.hyperu`. +Solutions are also given in an alternate form by the Whittaker +functions (:func:`~mpmath.whitm`, :func:`~mpmath.whitw`). + +``hyp1f1(a,b,z)`` is equivalent +to ``hyper([a],[b],z)``; see documentation for :func:`~mpmath.hyper` for more +information. + +**Examples** + +Evaluation for real and complex values of the argument `z`, with +fixed parameters `a = 2, b = -1/3`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> hyp1f1(2, (-1,3), 3.25) + -2815.956856924817275640248 + >>> hyp1f1(2, (-1,3), -3.25) + -1.145036502407444445553107 + >>> hyp1f1(2, (-1,3), 1000) + -8.021799872770764149793693e+441 + >>> hyp1f1(2, (-1,3), -1000) + 0.000003131987633006813594535331 + >>> hyp1f1(2, (-1,3), 100+100j) + (-3.189190365227034385898282e+48 - 1.106169926814270418999315e+49j) + +Parameters may be complex:: + + >>> hyp1f1(2+3j, -1+j, 10j) + (261.8977905181045142673351 + 160.8930312845682213562172j) + +Arbitrarily large values of `z` are supported:: + + >>> hyp1f1(3, 4, 10**20) + 3.890569218254486878220752e+43429448190325182745 + >>> hyp1f1(3, 4, -10**20) + 6.0e-60 + >>> hyp1f1(3, 4, 10**20*j) + (-1.935753855797342532571597e-20 - 2.291911213325184901239155e-20j) + +Verifying the differential equation:: + + >>> a, b = 1.5, 2 + >>> f = lambda z: hyp1f1(a,b,z) + >>> for z in [0, -10, 3, 3+4j]: + ... chop(z*diff(f,z,2) + (b-z)*diff(f,z) - a*f(z)) + ... + 0.0 + 0.0 + 0.0 + 0.0 + +An integral representation:: + + >>> a, b = 1.5, 3 + >>> z = 1.5 + >>> hyp1f1(a,b,z) + 2.269381460919952778587441 + >>> g = lambda t: exp(z*t)*t**(a-1)*(1-t)**(b-a-1) + >>> gammaprod([b],[a,b-a])*quad(g, [0,1]) + 2.269381460919952778587441 + + +""" + +hyp1f2 = r""" +Gives the hypergeometric function `\,_1F_2(a_1,a_2;b_1,b_2; z)`. +The call ``hyp1f2(a1,b1,b2,z)`` is equivalent to +``hyper([a1],[b1,b2],z)``. + +Evaluation works for complex and arbitrarily large arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> a, b, c = 1.5, (-1,3), 2.25 + >>> hyp1f2(a, b, c, 10**20) + -1.159388148811981535941434e+8685889639 + >>> hyp1f2(a, b, c, -10**20) + -12.60262607892655945795907 + >>> hyp1f2(a, b, c, 10**20*j) + (4.237220401382240876065501e+6141851464 - 2.950930337531768015892987e+6141851464j) + >>> hyp1f2(2+3j, -2j, 0.5j, 10-20j) + (135881.9905586966432662004 - 86681.95885418079535738828j) + +""" + +hyp2f2 = r""" +Gives the hypergeometric function `\,_2F_2(a_1,a_2;b_1,b_2; z)`. +The call ``hyp2f2(a1,a2,b1,b2,z)`` is equivalent to +``hyper([a1,a2],[b1,b2],z)``. + +Evaluation works for complex and arbitrarily large arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> a, b, c, d = 1.5, (-1,3), 2.25, 4 + >>> hyp2f2(a, b, c, d, 10**20) + -5.275758229007902299823821e+43429448190325182663 + >>> hyp2f2(a, b, c, d, -10**20) + 2561445.079983207701073448 + >>> hyp2f2(a, b, c, d, 10**20*j) + (2218276.509664121194836667 - 1280722.539991603850462856j) + >>> hyp2f2(2+3j, -2j, 0.5j, 4j, 10-20j) + (80500.68321405666957342788 - 20346.82752982813540993502j) + +""" + +hyp2f3 = r""" +Gives the hypergeometric function `\,_2F_3(a_1,a_2;b_1,b_2,b_3; z)`. +The call ``hyp2f3(a1,a2,b1,b2,b3,z)`` is equivalent to +``hyper([a1,a2],[b1,b2,b3],z)``. + +Evaluation works for arbitrarily large arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> a1,a2,b1,b2,b3 = 1.5, (-1,3), 2.25, 4, (1,5) + >>> hyp2f3(a1,a2,b1,b2,b3,10**20) + -4.169178177065714963568963e+8685889590 + >>> hyp2f3(a1,a2,b1,b2,b3,-10**20) + 7064472.587757755088178629 + >>> hyp2f3(a1,a2,b1,b2,b3,10**20*j) + (-5.163368465314934589818543e+6141851415 + 1.783578125755972803440364e+6141851416j) + >>> hyp2f3(2+3j, -2j, 0.5j, 4j, -1-j, 10-20j) + (-2280.938956687033150740228 + 13620.97336609573659199632j) + >>> hyp2f3(2+3j, -2j, 0.5j, 4j, -1-j, 10000000-20000000j) + (4.849835186175096516193e+3504 - 3.365981529122220091353633e+3504j) + +""" + +hyp2f1 = r""" +Gives the Gauss hypergeometric function `\,_2F_1` (often simply referred to as +*the* hypergeometric function), defined for `|z| < 1` as + +.. math :: + + \,_2F_1(a,b,c,z) = \sum_{k=0}^{\infty} + \frac{(a)_k (b)_k}{(c)_k} \frac{z^k}{k!}. + +and for `|z| \ge 1` by analytic continuation, with a branch cut on `(1, \infty)` +when necessary. + +Special cases of this function include many of the orthogonal polynomials as +well as the incomplete beta function and other functions. Properties of the +Gauss hypergeometric function are documented comprehensively in many references, +for example Abramowitz & Stegun, section 15. + +The implementation supports the analytic continuation as well as evaluation +close to the unit circle where `|z| \approx 1`. The syntax ``hyp2f1(a,b,c,z)`` +is equivalent to ``hyper([a,b],[c],z)``. + +**Examples** + +Evaluation with `z` inside, outside and on the unit circle, for +fixed parameters:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> hyp2f1(2, (1,2), 4, 0.75) + 1.303703703703703703703704 + >>> hyp2f1(2, (1,2), 4, -1.75) + 0.7431290566046919177853916 + >>> hyp2f1(2, (1,2), 4, 1.75) + (1.418075801749271137026239 - 1.114976146679907015775102j) + >>> hyp2f1(2, (1,2), 4, 1) + 1.6 + >>> hyp2f1(2, (1,2), 4, -1) + 0.8235498012182875315037882 + >>> hyp2f1(2, (1,2), 4, j) + (0.9144026291433065674259078 + 0.2050415770437884900574923j) + >>> hyp2f1(2, (1,2), 4, 2+j) + (0.9274013540258103029011549 + 0.7455257875808100868984496j) + >>> hyp2f1(2, (1,2), 4, 0.25j) + (0.9931169055799728251931672 + 0.06154836525312066938147793j) + +Evaluation with complex parameter values:: + + >>> hyp2f1(1+j, 0.75, 10j, 1+5j) + (0.8834833319713479923389638 + 0.7053886880648105068343509j) + +Evaluation with `z = 1`:: + + >>> hyp2f1(-2.5, 3.5, 1.5, 1) + 0.0 + >>> hyp2f1(-2.5, 3, 4, 1) + 0.06926406926406926406926407 + >>> hyp2f1(2, 3, 4, 1) + +inf + +Evaluation for huge arguments:: + + >>> hyp2f1((-1,3), 1.75, 4, '1e100') + (7.883714220959876246415651e+32 + 1.365499358305579597618785e+33j) + >>> hyp2f1((-1,3), 1.75, 4, '1e1000000') + (7.883714220959876246415651e+333332 + 1.365499358305579597618785e+333333j) + >>> hyp2f1((-1,3), 1.75, 4, '1e1000000j') + (1.365499358305579597618785e+333333 - 7.883714220959876246415651e+333332j) + +An integral representation:: + + >>> a,b,c,z = -0.5, 1, 2.5, 0.25 + >>> g = lambda t: t**(b-1) * (1-t)**(c-b-1) * (1-t*z)**(-a) + >>> gammaprod([c],[b,c-b]) * quad(g, [0,1]) + 0.9480458814362824478852618 + >>> hyp2f1(a,b,c,z) + 0.9480458814362824478852618 + +Verifying the hypergeometric differential equation:: + + >>> f = lambda z: hyp2f1(a,b,c,z) + >>> chop(z*(1-z)*diff(f,z,2) + (c-(a+b+1)*z)*diff(f,z) - a*b*f(z)) + 0.0 + +""" + +hyp3f2 = r""" +Gives the generalized hypergeometric function `\,_3F_2`, defined for `|z| < 1` +as + +.. math :: + + \,_3F_2(a_1,a_2,a_3,b_1,b_2,z) = \sum_{k=0}^{\infty} + \frac{(a_1)_k (a_2)_k (a_3)_k}{(b_1)_k (b_2)_k} \frac{z^k}{k!}. + +and for `|z| \ge 1` by analytic continuation. The analytic structure of this +function is similar to that of `\,_2F_1`, generally with a singularity at +`z = 1` and a branch cut on `(1, \infty)`. + +Evaluation is supported inside, on, and outside +the circle of convergence `|z| = 1`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> hyp3f2(1,2,3,4,5,0.25) + 1.083533123380934241548707 + >>> hyp3f2(1,2+2j,3,4,5,-10+10j) + (0.1574651066006004632914361 - 0.03194209021885226400892963j) + >>> hyp3f2(1,2,3,4,5,-10) + 0.3071141169208772603266489 + >>> hyp3f2(1,2,3,4,5,10) + (-0.4857045320523947050581423 - 0.5988311440454888436888028j) + >>> hyp3f2(0.25,1,1,2,1.5,1) + 1.157370995096772047567631 + >>> (8-pi-2*ln2)/3 + 1.157370995096772047567631 + >>> hyp3f2(1+j,0.5j,2,1,-2j,-1) + (1.74518490615029486475959 + 0.1454701525056682297614029j) + >>> hyp3f2(1+j,0.5j,2,1,-2j,sqrt(j)) + (0.9829816481834277511138055 - 0.4059040020276937085081127j) + >>> hyp3f2(-3,2,1,-5,4,1) + 1.41 + >>> hyp3f2(-3,2,1,-5,4,2) + 2.12 + +Evaluation very close to the unit circle:: + + >>> hyp3f2(1,2,3,4,5,'1.0001') + (1.564877796743282766872279 - 3.76821518787438186031973e-11j) + >>> hyp3f2(1,2,3,4,5,'1+0.0001j') + (1.564747153061671573212831 + 0.0001305757570366084557648482j) + >>> hyp3f2(1,2,3,4,5,'0.9999') + 1.564616644881686134983664 + >>> hyp3f2(1,2,3,4,5,'-0.9999') + 0.7823896253461678060196207 + +.. note :: + + Evaluation for `|z-1|` small can currently be inaccurate or slow + for some parameter combinations. + +For various parameter combinations, `\,_3F_2` admits representation in terms +of hypergeometric functions of lower degree, or in terms of +simpler functions:: + + >>> for a, b, z in [(1,2,-1), (2,0.5,1)]: + ... hyp2f1(a,b,a+b+0.5,z)**2 + ... hyp3f2(2*a,a+b,2*b,a+b+0.5,2*a+2*b,z) + ... + 0.4246104461966439006086308 + 0.4246104461966439006086308 + 7.111111111111111111111111 + 7.111111111111111111111111 + + >>> z = 2+3j + >>> hyp3f2(0.5,1,1.5,2,2,z) + (0.7621440939243342419729144 + 0.4249117735058037649915723j) + >>> 4*(pi-2*ellipe(z))/(pi*z) + (0.7621440939243342419729144 + 0.4249117735058037649915723j) + +""" + +hyperu = r""" +Gives the Tricomi confluent hypergeometric function `U`, also known as +the Kummer or confluent hypergeometric function of the second kind. This +function gives a second linearly independent solution to the confluent +hypergeometric differential equation (the first is provided by `\,_1F_1` -- +see :func:`~mpmath.hyp1f1`). + +**Examples** + +Evaluation for arbitrary complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> hyperu(2,3,4) + 0.0625 + >>> hyperu(0.25, 5, 1000) + 0.1779949416140579573763523 + >>> hyperu(0.25, 5, -1000) + (0.1256256609322773150118907 - 0.1256256609322773150118907j) + +The `U` function may be singular at `z = 0`:: + + >>> hyperu(1.5, 2, 0) + +inf + >>> hyperu(1.5, -2, 0) + 0.1719434921288400112603671 + +Verifying the differential equation:: + + >>> a, b = 1.5, 2 + >>> f = lambda z: hyperu(a,b,z) + >>> for z in [-10, 3, 3+4j]: + ... chop(z*diff(f,z,2) + (b-z)*diff(f,z) - a*f(z)) + ... + 0.0 + 0.0 + 0.0 + +An integral representation:: + + >>> a,b,z = 2, 3.5, 4.25 + >>> hyperu(a,b,z) + 0.06674960718150520648014567 + >>> quad(lambda t: exp(-z*t)*t**(a-1)*(1+t)**(b-a-1),[0,inf]) / gamma(a) + 0.06674960718150520648014567 + + +[1] http://people.math.sfu.ca/~cbm/aands/page_504.htm +""" + +hyp2f0 = r""" +Gives the hypergeometric function `\,_2F_0`, defined formally by the +series + +.. math :: + + \,_2F_0(a,b;;z) = \sum_{n=0}^{\infty} (a)_n (b)_n \frac{z^n}{n!}. + +This series usually does not converge. For small enough `z`, it can be viewed +as an asymptotic series that may be summed directly with an appropriate +truncation. When this is not the case, :func:`~mpmath.hyp2f0` gives a regularized sum, +or equivalently, it uses a representation in terms of the +hypergeometric U function [1]. The series also converges when either `a` or `b` +is a nonpositive integer, as it then terminates into a polynomial +after `-a` or `-b` terms. + +**Examples** + +Evaluation is supported for arbitrary complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> hyp2f0((2,3), 1.25, -100) + 0.07095851870980052763312791 + >>> hyp2f0((2,3), 1.25, 100) + (-0.03254379032170590665041131 + 0.07269254613282301012735797j) + >>> hyp2f0(-0.75, 1-j, 4j) + (-0.3579987031082732264862155 - 3.052951783922142735255881j) + +Even with real arguments, the regularized value of 2F0 is often complex-valued, +but the imaginary part decreases exponentially as `z \to 0`. In the following +example, the first call uses complex evaluation while the second has a small +enough `z` to evaluate using the direct series and thus the returned value +is strictly real (this should be taken to indicate that the imaginary +part is less than ``eps``):: + + >>> mp.dps = 15 + >>> hyp2f0(1.5, 0.5, 0.05) + (1.04166637647907 + 8.34584913683906e-8j) + >>> hyp2f0(1.5, 0.5, 0.0005) + 1.00037535207621 + +The imaginary part can be retrieved by increasing the working precision:: + + >>> mp.dps = 80 + >>> nprint(hyp2f0(1.5, 0.5, 0.009).imag) + 1.23828e-46 + +In the polynomial case (the series terminating), 2F0 can evaluate exactly:: + + >>> mp.dps = 15 + >>> hyp2f0(-6,-6,2) + 291793.0 + >>> identify(hyp2f0(-2,1,0.25)) + '(5/8)' + +The coefficients of the polynomials can be recovered using Taylor expansion:: + + >>> nprint(taylor(lambda x: hyp2f0(-3,0.5,x), 0, 10)) + [1.0, -1.5, 2.25, -1.875, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + >>> nprint(taylor(lambda x: hyp2f0(-4,0.5,x), 0, 10)) + [1.0, -2.0, 4.5, -7.5, 6.5625, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + + +[1] http://people.math.sfu.ca/~cbm/aands/page_504.htm +""" + + +gammainc = r""" +``gammainc(z, a=0, b=inf)`` computes the (generalized) incomplete +gamma function with integration limits `[a, b]`: + +.. math :: + + \Gamma(z,a,b) = \int_a^b t^{z-1} e^{-t} \, dt + +The generalized incomplete gamma function reduces to the +following special cases when one or both endpoints are fixed: + +* `\Gamma(z,0,\infty)` is the standard ("complete") + gamma function, `\Gamma(z)` (available directly + as the mpmath function :func:`~mpmath.gamma`) +* `\Gamma(z,a,\infty)` is the "upper" incomplete gamma + function, `\Gamma(z,a)` +* `\Gamma(z,0,b)` is the "lower" incomplete gamma + function, `\gamma(z,b)`. + +Of course, we have +`\Gamma(z,0,x) + \Gamma(z,x,\infty) = \Gamma(z)` +for all `z` and `x`. + +Note however that some authors reverse the order of the +arguments when defining the lower and upper incomplete +gamma function, so one should be careful to get the correct +definition. + +If also given the keyword argument ``regularized=True``, +:func:`~mpmath.gammainc` computes the "regularized" incomplete gamma +function + +.. math :: + + P(z,a,b) = \frac{\Gamma(z,a,b)}{\Gamma(z)}. + +**Examples** + +We can compare with numerical quadrature to verify that +:func:`~mpmath.gammainc` computes the integral in the definition:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> gammainc(2+3j, 4, 10) + (0.00977212668627705160602312 - 0.0770637306312989892451977j) + >>> quad(lambda t: t**(2+3j-1) * exp(-t), [4, 10]) + (0.00977212668627705160602312 - 0.0770637306312989892451977j) + +Argument symmetries follow directly from the integral definition:: + + >>> gammainc(3, 4, 5) + gammainc(3, 5, 4) + 0.0 + >>> gammainc(3,0,2) + gammainc(3,2,4); gammainc(3,0,4) + 1.523793388892911312363331 + 1.523793388892911312363331 + >>> findroot(lambda z: gammainc(2,z,3), 1) + 3.0 + +Evaluation for arbitrarily large arguments:: + + >>> gammainc(10, 100) + 4.083660630910611272288592e-26 + >>> gammainc(10, 10000000000000000) + 5.290402449901174752972486e-4342944819032375 + >>> gammainc(3+4j, 1000000+1000000j) + (-1.257913707524362408877881e-434284 + 2.556691003883483531962095e-434284j) + +Evaluation of a generalized incomplete gamma function automatically chooses +the representation that gives a more accurate result, depending on which +parameter is larger:: + + >>> gammainc(10000000, 3) - gammainc(10000000, 2) # Bad + 0.0 + >>> gammainc(10000000, 2, 3) # Good + 1.755146243738946045873491e+4771204 + >>> gammainc(2, 0, 100000001) - gammainc(2, 0, 100000000) # Bad + 0.0 + >>> gammainc(2, 100000000, 100000001) # Good + 4.078258353474186729184421e-43429441 + +The incomplete gamma functions satisfy simple recurrence +relations:: + + >>> mp.dps = 25 + >>> z, a = mpf(3.5), mpf(2) + >>> gammainc(z+1, a); z*gammainc(z,a) + a**z*exp(-a) + 10.60130296933533459267329 + 10.60130296933533459267329 + >>> gammainc(z+1,0,a); z*gammainc(z,0,a) - a**z*exp(-a) + 1.030425427232114336470932 + 1.030425427232114336470932 + +Evaluation at integers and poles:: + + >>> gammainc(-3, -4, -5) + (-0.2214577048967798566234192 + 0.0j) + >>> gammainc(-3, 0, 5) + +inf + +If `z` is an integer, the recurrence reduces the incomplete gamma +function to `P(a) \exp(-a) + Q(b) \exp(-b)` where `P` and +`Q` are polynomials:: + + >>> gammainc(1, 2); exp(-2) + 0.1353352832366126918939995 + 0.1353352832366126918939995 + >>> mp.dps = 50 + >>> identify(gammainc(6, 1, 2), ['exp(-1)', 'exp(-2)']) + '(326*exp(-1) + (-872)*exp(-2))' + +The incomplete gamma functions reduce to functions such as +the exponential integral Ei and the error function for special +arguments:: + + >>> mp.dps = 25 + >>> gammainc(0, 4); -ei(-4) + 0.00377935240984890647887486 + 0.00377935240984890647887486 + >>> gammainc(0.5, 0, 2); sqrt(pi)*erf(sqrt(2)) + 1.691806732945198336509541 + 1.691806732945198336509541 + +""" + +erf = r""" +Computes the error function, `\mathrm{erf}(x)`. The error +function is the normalized antiderivative of the Gaussian function +`\exp(-t^2)`. More precisely, + +.. math:: + + \mathrm{erf}(x) = \frac{2}{\sqrt \pi} \int_0^x \exp(-t^2) \,dt + +**Basic examples** + +Simple values and limits include:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> erf(0) + 0.0 + >>> erf(1) + 0.842700792949715 + >>> erf(-1) + -0.842700792949715 + >>> erf(inf) + 1.0 + >>> erf(-inf) + -1.0 + +For large real `x`, `\mathrm{erf}(x)` approaches 1 very +rapidly:: + + >>> erf(3) + 0.999977909503001 + >>> erf(5) + 0.999999999998463 + +The error function is an odd function:: + + >>> nprint(chop(taylor(erf, 0, 5))) + [0.0, 1.12838, 0.0, -0.376126, 0.0, 0.112838] + +:func:`~mpmath.erf` implements arbitrary-precision evaluation and +supports complex numbers:: + + >>> mp.dps = 50 + >>> erf(0.5) + 0.52049987781304653768274665389196452873645157575796 + >>> mp.dps = 25 + >>> erf(1+j) + (1.316151281697947644880271 + 0.1904534692378346862841089j) + +Evaluation is supported for large arguments:: + + >>> mp.dps = 25 + >>> erf('1e1000') + 1.0 + >>> erf('-1e1000') + -1.0 + >>> erf('1e-1000') + 1.128379167095512573896159e-1000 + >>> erf('1e7j') + (0.0 + 8.593897639029319267398803e+43429448190317j) + >>> erf('1e7+1e7j') + (0.9999999858172446172631323 + 3.728805278735270407053139e-8j) + +**Related functions** + +See also :func:`~mpmath.erfc`, which is more accurate for large `x`, +and :func:`~mpmath.erfi` which gives the antiderivative of +`\exp(t^2)`. + +The Fresnel integrals :func:`~mpmath.fresnels` and :func:`~mpmath.fresnelc` +are also related to the error function. +""" + +erfc = r""" +Computes the complementary error function, +`\mathrm{erfc}(x) = 1-\mathrm{erf}(x)`. +This function avoids cancellation that occurs when naively +computing the complementary error function as ``1-erf(x)``:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> 1 - erf(10) + 0.0 + >>> erfc(10) + 2.08848758376254e-45 + +:func:`~mpmath.erfc` works accurately even for ludicrously large +arguments:: + + >>> erfc(10**10) + 4.3504398860243e-43429448190325182776 + +Complex arguments are supported:: + + >>> erfc(500+50j) + (1.19739830969552e-107492 + 1.46072418957528e-107491j) + +""" + + +erfi = r""" +Computes the imaginary error function, `\mathrm{erfi}(x)`. +The imaginary error function is defined in analogy with the +error function, but with a positive sign in the integrand: + +.. math :: + + \mathrm{erfi}(x) = \frac{2}{\sqrt \pi} \int_0^x \exp(t^2) \,dt + +Whereas the error function rapidly converges to 1 as `x` grows, +the imaginary error function rapidly diverges to infinity. +The functions are related as +`\mathrm{erfi}(x) = -i\,\mathrm{erf}(ix)` for all complex +numbers `x`. + +**Examples** + +Basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> erfi(0) + 0.0 + >>> erfi(1) + 1.65042575879754 + >>> erfi(-1) + -1.65042575879754 + >>> erfi(inf) + +inf + >>> erfi(-inf) + -inf + +Note the symmetry between erf and erfi:: + + >>> erfi(3j) + (0.0 + 0.999977909503001j) + >>> erf(3) + 0.999977909503001 + >>> erf(1+2j) + (-0.536643565778565 - 5.04914370344703j) + >>> erfi(2+1j) + (-5.04914370344703 - 0.536643565778565j) + +Large arguments are supported:: + + >>> erfi(1000) + 1.71130938718796e+434291 + >>> erfi(10**10) + 7.3167287567024e+43429448190325182754 + >>> erfi(-10**10) + -7.3167287567024e+43429448190325182754 + >>> erfi(1000-500j) + (2.49895233563961e+325717 + 2.6846779342253e+325717j) + >>> erfi(100000j) + (0.0 + 1.0j) + >>> erfi(-100000j) + (0.0 - 1.0j) + + +""" + +erfinv = r""" +Computes the inverse error function, satisfying + +.. math :: + + \mathrm{erf}(\mathrm{erfinv}(x)) = + \mathrm{erfinv}(\mathrm{erf}(x)) = x. + +This function is defined only for `-1 \le x \le 1`. + +**Examples** + +Special values include:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> erfinv(0) + 0.0 + >>> erfinv(1) + +inf + >>> erfinv(-1) + -inf + +The domain is limited to the standard interval:: + + >>> erfinv(2) + Traceback (most recent call last): + ... + ValueError: erfinv(x) is defined only for -1 <= x <= 1 + +It is simple to check that :func:`~mpmath.erfinv` computes inverse values of +:func:`~mpmath.erf` as promised:: + + >>> erf(erfinv(0.75)) + 0.75 + >>> erf(erfinv(-0.995)) + -0.995 + +:func:`~mpmath.erfinv` supports arbitrary-precision evaluation:: + + >>> mp.dps = 50 + >>> x = erf(2) + >>> x + 0.99532226501895273416206925636725292861089179704006 + >>> erfinv(x) + 2.0 + +A definite integral involving the inverse error function:: + + >>> mp.dps = 15 + >>> quad(erfinv, [0, 1]) + 0.564189583547756 + >>> 1/sqrt(pi) + 0.564189583547756 + +The inverse error function can be used to generate random numbers +with a Gaussian distribution (although this is a relatively +inefficient algorithm):: + + >>> nprint([erfinv(2*rand()-1) for n in range(6)]) # doctest: +SKIP + [-0.586747, 1.10233, -0.376796, 0.926037, -0.708142, -0.732012] + +""" + +npdf = r""" +``npdf(x, mu=0, sigma=1)`` evaluates the probability density +function of a normal distribution with mean value `\mu` +and variance `\sigma^2`. + +Elementary properties of the probability distribution can +be verified using numerical integration:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> quad(npdf, [-inf, inf]) + 1.0 + >>> quad(lambda x: npdf(x, 3), [3, inf]) + 0.5 + >>> quad(lambda x: npdf(x, 3, 2), [3, inf]) + 0.5 + +See also :func:`~mpmath.ncdf`, which gives the cumulative +distribution. +""" + +ncdf = r""" +``ncdf(x, mu=0, sigma=1)`` evaluates the cumulative distribution +function of a normal distribution with mean value `\mu` +and variance `\sigma^2`. + +See also :func:`~mpmath.npdf`, which gives the probability density. + +Elementary properties include:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> ncdf(pi, mu=pi) + 0.5 + >>> ncdf(-inf) + 0.0 + >>> ncdf(+inf) + 1.0 + +The cumulative distribution is the integral of the density +function having identical mu and sigma:: + + >>> mp.dps = 15 + >>> diff(ncdf, 2) + 0.053990966513188 + >>> npdf(2) + 0.053990966513188 + >>> diff(lambda x: ncdf(x, 1, 0.5), 0) + 0.107981933026376 + >>> npdf(0, 1, 0.5) + 0.107981933026376 +""" + +expint = r""" +:func:`~mpmath.expint(n,z)` gives the generalized exponential integral +or En-function, + +.. math :: + + \mathrm{E}_n(z) = \int_1^{\infty} \frac{e^{-zt}}{t^n} dt, + +where `n` and `z` may both be complex numbers. The case with `n = 1` is +also given by :func:`~mpmath.e1`. + +**Examples** + +Evaluation at real and complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> expint(1, 6.25) + 0.0002704758872637179088496194 + >>> expint(-3, 2+3j) + (0.00299658467335472929656159 + 0.06100816202125885450319632j) + >>> expint(2+3j, 4-5j) + (0.001803529474663565056945248 - 0.002235061547756185403349091j) + +At negative integer values of `n`, `E_n(z)` reduces to a +rational-exponential function:: + + >>> f = lambda n, z: fac(n)*sum(z**k/fac(k-1) for k in range(1,n+2))/\ + ... exp(z)/z**(n+2) + >>> n = 3 + >>> z = 1/pi + >>> expint(-n,z) + 584.2604820613019908668219 + >>> f(n,z) + 584.2604820613019908668219 + >>> n = 5 + >>> expint(-n,z) + 115366.5762594725451811138 + >>> f(n,z) + 115366.5762594725451811138 +""" + +e1 = r""" +Computes the exponential integral `\mathrm{E}_1(z)`, given by + +.. math :: + + \mathrm{E}_1(z) = \int_z^{\infty} \frac{e^{-t}}{t} dt. + +This is equivalent to :func:`~mpmath.expint` with `n = 1`. + +**Examples** + +Two ways to evaluate this function:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> e1(6.25) + 0.0002704758872637179088496194 + >>> expint(1,6.25) + 0.0002704758872637179088496194 + +The E1-function is essentially the same as the Ei-function (:func:`~mpmath.ei`) +with negated argument, except for an imaginary branch cut term:: + + >>> e1(2.5) + 0.02491491787026973549562801 + >>> -ei(-2.5) + 0.02491491787026973549562801 + >>> e1(-2.5) + (-7.073765894578600711923552 - 3.141592653589793238462643j) + >>> -ei(2.5) + -7.073765894578600711923552 + +""" + +ei = r""" +Computes the exponential integral or Ei-function, `\mathrm{Ei}(x)`. +The exponential integral is defined as + +.. math :: + + \mathrm{Ei}(x) = \int_{-\infty\,}^x \frac{e^t}{t} \, dt. + +When the integration range includes `t = 0`, the exponential +integral is interpreted as providing the Cauchy principal value. + +For real `x`, the Ei-function behaves roughly like +`\mathrm{Ei}(x) \approx \exp(x) + \log(|x|)`. + +The Ei-function is related to the more general family of exponential +integral functions denoted by `E_n`, which are available as :func:`~mpmath.expint`. + +**Basic examples** + +Some basic values and limits are:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> ei(0) + -inf + >>> ei(1) + 1.89511781635594 + >>> ei(inf) + +inf + >>> ei(-inf) + 0.0 + +For `x < 0`, the defining integral can be evaluated +numerically as a reference:: + + >>> ei(-4) + -0.00377935240984891 + >>> quad(lambda t: exp(t)/t, [-inf, -4]) + -0.00377935240984891 + +:func:`~mpmath.ei` supports complex arguments and arbitrary +precision evaluation:: + + >>> mp.dps = 50 + >>> ei(pi) + 10.928374389331410348638445906907535171566338835056 + >>> mp.dps = 25 + >>> ei(3+4j) + (-4.154091651642689822535359 + 4.294418620024357476985535j) + +**Related functions** + +The exponential integral is closely related to the logarithmic +integral. See :func:`~mpmath.li` for additional information. + +The exponential integral is related to the hyperbolic +and trigonometric integrals (see :func:`~mpmath.chi`, :func:`~mpmath.shi`, +:func:`~mpmath.ci`, :func:`~mpmath.si`) similarly to how the ordinary +exponential function is related to the hyperbolic and +trigonometric functions:: + + >>> mp.dps = 15 + >>> ei(3) + 9.93383257062542 + >>> chi(3) + shi(3) + 9.93383257062542 + >>> chop(ci(3j) - j*si(3j) - pi*j/2) + 9.93383257062542 + +Beware that logarithmic corrections, as in the last example +above, are required to obtain the correct branch in general. +For details, see [1]. + +The exponential integral is also a special case of the +hypergeometric function `\,_2F_2`:: + + >>> z = 0.6 + >>> z*hyper([1,1],[2,2],z) + (ln(z)-ln(1/z))/2 + euler + 0.769881289937359 + >>> ei(z) + 0.769881289937359 + +**References** + +1. Relations between Ei and other functions: + http://functions.wolfram.com/GammaBetaErf/ExpIntegralEi/27/01/ + +2. Abramowitz & Stegun, section 5: + http://people.math.sfu.ca/~cbm/aands/page_228.htm + +3. Asymptotic expansion for Ei: + http://mathworld.wolfram.com/En-Function.html +""" + +li = r""" +Computes the logarithmic integral or li-function +`\mathrm{li}(x)`, defined by + +.. math :: + + \mathrm{li}(x) = \int_0^x \frac{1}{\log t} \, dt + +The logarithmic integral has a singularity at `x = 1`. + +Alternatively, ``li(x, offset=True)`` computes the offset +logarithmic integral (used in number theory) + +.. math :: + + \mathrm{Li}(x) = \int_2^x \frac{1}{\log t} \, dt. + +These two functions are related via the simple identity +`\mathrm{Li}(x) = \mathrm{li}(x) - \mathrm{li}(2)`. + +The logarithmic integral should also not be confused with +the polylogarithm (also denoted by Li), which is implemented +as :func:`~mpmath.polylog`. + +**Examples** + +Some basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 30; mp.pretty = True + >>> li(0) + 0.0 + >>> li(1) + -inf + >>> li(1) + -inf + >>> li(2) + 1.04516378011749278484458888919 + >>> findroot(li, 2) + 1.45136923488338105028396848589 + >>> li(inf) + +inf + >>> li(2, offset=True) + 0.0 + >>> li(1, offset=True) + -inf + >>> li(0, offset=True) + -1.04516378011749278484458888919 + >>> li(10, offset=True) + 5.12043572466980515267839286347 + +The logarithmic integral can be evaluated for arbitrary +complex arguments:: + + >>> mp.dps = 20 + >>> li(3+4j) + (3.1343755504645775265 + 2.6769247817778742392j) + +The logarithmic integral is related to the exponential integral:: + + >>> ei(log(3)) + 2.1635885946671919729 + >>> li(3) + 2.1635885946671919729 + +The logarithmic integral grows like `O(x/\log(x))`:: + + >>> mp.dps = 15 + >>> x = 10**100 + >>> x/log(x) + 4.34294481903252e+97 + >>> li(x) + 4.3619719871407e+97 + +The prime number theorem states that the number of primes less +than `x` is asymptotic to `\mathrm{Li}(x)` (equivalently +`\mathrm{li}(x)`). For example, it is known that there are +exactly 1,925,320,391,606,803,968,923 prime numbers less than +`10^{23}` [1]. The logarithmic integral provides a very +accurate estimate:: + + >>> li(10**23, offset=True) + 1.92532039161405e+21 + +A definite integral is:: + + >>> quad(li, [0, 1]) + -0.693147180559945 + >>> -ln(2) + -0.693147180559945 + +**References** + +1. http://mathworld.wolfram.com/PrimeCountingFunction.html + +2. http://mathworld.wolfram.com/LogarithmicIntegral.html + +""" + +ci = r""" +Computes the cosine integral, + +.. math :: + + \mathrm{Ci}(x) = -\int_x^{\infty} \frac{\cos t}{t}\,dt + = \gamma + \log x + \int_0^x \frac{\cos t - 1}{t}\,dt + +**Examples** + +Some values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> ci(0) + -inf + >>> ci(1) + 0.3374039229009681346626462 + >>> ci(pi) + 0.07366791204642548599010096 + >>> ci(inf) + 0.0 + >>> ci(-inf) + (0.0 + 3.141592653589793238462643j) + >>> ci(2+3j) + (1.408292501520849518759125 - 2.983617742029605093121118j) + +The cosine integral behaves roughly like the sinc function +(see :func:`~mpmath.sinc`) for large real `x`:: + + >>> ci(10**10) + -4.875060251748226537857298e-11 + >>> sinc(10**10) + -4.875060250875106915277943e-11 + >>> chop(limit(ci, inf)) + 0.0 + +It has infinitely many roots on the positive real axis:: + + >>> findroot(ci, 1) + 0.6165054856207162337971104 + >>> findroot(ci, 2) + 3.384180422551186426397851 + +Evaluation is supported for `z` anywhere in the complex plane:: + + >>> ci(10**6*(1+j)) + (4.449410587611035724984376e+434287 + 9.75744874290013526417059e+434287j) + +We can evaluate the defining integral as a reference:: + + >>> mp.dps = 15 + >>> -quadosc(lambda t: cos(t)/t, [5, inf], omega=1) + -0.190029749656644 + >>> ci(5) + -0.190029749656644 + +Some infinite series can be evaluated using the +cosine integral:: + + >>> nsum(lambda k: (-1)**k/(fac(2*k)*(2*k)), [1,inf]) + -0.239811742000565 + >>> ci(1) - euler + -0.239811742000565 + +""" + +si = r""" +Computes the sine integral, + +.. math :: + + \mathrm{Si}(x) = \int_0^x \frac{\sin t}{t}\,dt. + +The sine integral is thus the antiderivative of the sinc +function (see :func:`~mpmath.sinc`). + +**Examples** + +Some values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> si(0) + 0.0 + >>> si(1) + 0.9460830703671830149413533 + >>> si(-1) + -0.9460830703671830149413533 + >>> si(pi) + 1.851937051982466170361053 + >>> si(inf) + 1.570796326794896619231322 + >>> si(-inf) + -1.570796326794896619231322 + >>> si(2+3j) + (4.547513889562289219853204 + 1.399196580646054789459839j) + +The sine integral approaches `\pi/2` for large real `x`:: + + >>> si(10**10) + 1.570796326707584656968511 + >>> pi/2 + 1.570796326794896619231322 + +Evaluation is supported for `z` anywhere in the complex plane:: + + >>> si(10**6*(1+j)) + (-9.75744874290013526417059e+434287 + 4.449410587611035724984376e+434287j) + +We can evaluate the defining integral as a reference:: + + >>> mp.dps = 15 + >>> quad(sinc, [0, 5]) + 1.54993124494467 + >>> si(5) + 1.54993124494467 + +Some infinite series can be evaluated using the +sine integral:: + + >>> nsum(lambda k: (-1)**k/(fac(2*k+1)*(2*k+1)), [0,inf]) + 0.946083070367183 + >>> si(1) + 0.946083070367183 + +""" + +chi = r""" +Computes the hyperbolic cosine integral, defined +in analogy with the cosine integral (see :func:`~mpmath.ci`) as + +.. math :: + + \mathrm{Chi}(x) = -\int_x^{\infty} \frac{\cosh t}{t}\,dt + = \gamma + \log x + \int_0^x \frac{\cosh t - 1}{t}\,dt + +Some values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> chi(0) + -inf + >>> chi(1) + 0.8378669409802082408946786 + >>> chi(inf) + +inf + >>> findroot(chi, 0.5) + 0.5238225713898644064509583 + >>> chi(2+3j) + (-0.1683628683277204662429321 + 2.625115880451325002151688j) + +Evaluation is supported for `z` anywhere in the complex plane:: + + >>> chi(10**6*(1+j)) + (4.449410587611035724984376e+434287 - 9.75744874290013526417059e+434287j) + +""" + +shi = r""" +Computes the hyperbolic sine integral, defined +in analogy with the sine integral (see :func:`~mpmath.si`) as + +.. math :: + + \mathrm{Shi}(x) = \int_0^x \frac{\sinh t}{t}\,dt. + +Some values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> shi(0) + 0.0 + >>> shi(1) + 1.057250875375728514571842 + >>> shi(-1) + -1.057250875375728514571842 + >>> shi(inf) + +inf + >>> shi(2+3j) + (-0.1931890762719198291678095 + 2.645432555362369624818525j) + +Evaluation is supported for `z` anywhere in the complex plane:: + + >>> shi(10**6*(1+j)) + (4.449410587611035724984376e+434287 - 9.75744874290013526417059e+434287j) + +""" + +fresnels = r""" +Computes the Fresnel sine integral + +.. math :: + + S(x) = \int_0^x \sin\left(\frac{\pi t^2}{2}\right) \,dt + +Note that some sources define this function +without the normalization factor `\pi/2`. + +**Examples** + +Some basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> fresnels(0) + 0.0 + >>> fresnels(inf) + 0.5 + >>> fresnels(-inf) + -0.5 + >>> fresnels(1) + 0.4382591473903547660767567 + >>> fresnels(1+2j) + (36.72546488399143842838788 + 15.58775110440458732748279j) + +Comparing with the definition:: + + >>> fresnels(3) + 0.4963129989673750360976123 + >>> quad(lambda t: sin(pi*t**2/2), [0,3]) + 0.4963129989673750360976123 +""" + +fresnelc = r""" +Computes the Fresnel cosine integral + +.. math :: + + C(x) = \int_0^x \cos\left(\frac{\pi t^2}{2}\right) \,dt + +Note that some sources define this function +without the normalization factor `\pi/2`. + +**Examples** + +Some basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> fresnelc(0) + 0.0 + >>> fresnelc(inf) + 0.5 + >>> fresnelc(-inf) + -0.5 + >>> fresnelc(1) + 0.7798934003768228294742064 + >>> fresnelc(1+2j) + (16.08787137412548041729489 - 36.22568799288165021578758j) + +Comparing with the definition:: + + >>> fresnelc(3) + 0.6057207892976856295561611 + >>> quad(lambda t: cos(pi*t**2/2), [0,3]) + 0.6057207892976856295561611 +""" + +airyai = r""" +Computes the Airy function `\operatorname{Ai}(z)`, which is +the solution of the Airy differential equation `f''(z) - z f(z) = 0` +with initial conditions + +.. math :: + + \operatorname{Ai}(0) = + \frac{1}{3^{2/3}\Gamma\left(\frac{2}{3}\right)} + + \operatorname{Ai}'(0) = + -\frac{1}{3^{1/3}\Gamma\left(\frac{1}{3}\right)}. + +Other common ways of defining the Ai-function include +integrals such as + +.. math :: + + \operatorname{Ai}(x) = \frac{1}{\pi} + \int_0^{\infty} \cos\left(\frac{1}{3}t^3+xt\right) dt + \qquad x \in \mathbb{R} + + \operatorname{Ai}(z) = \frac{\sqrt{3}}{2\pi} + \int_0^{\infty} + \exp\left(-\frac{t^3}{3}-\frac{z^3}{3t^3}\right) dt. + +The Ai-function is an entire function with a turning point, +behaving roughly like a slowly decaying sine wave for `z < 0` and +like a rapidly decreasing exponential for `z > 0`. +A second solution of the Airy differential equation +is given by `\operatorname{Bi}(z)` (see :func:`~mpmath.airybi`). + +Optionally, with *derivative=alpha*, :func:`airyai` can compute the +`\alpha`-th order fractional derivative with respect to `z`. +For `\alpha = n = 1,2,3,\ldots` this gives the derivative +`\operatorname{Ai}^{(n)}(z)`, and for `\alpha = -n = -1,-2,-3,\ldots` +this gives the `n`-fold iterated integral + +.. math :: + + f_0(z) = \operatorname{Ai}(z) + + f_n(z) = \int_0^z f_{n-1}(t) dt. + +The Ai-function has infinitely many zeros, all located along the +negative half of the real axis. They can be computed with +:func:`~mpmath.airyaizero`. + +**Plots** + +.. literalinclude :: /plots/ai.py +.. image :: /plots/ai.png +.. literalinclude :: /plots/ai_c.py +.. image :: /plots/ai_c.png + +**Basic examples** + +Limits and values include:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> airyai(0); 1/(power(3,'2/3')*gamma('2/3')) + 0.3550280538878172392600632 + 0.3550280538878172392600632 + >>> airyai(1) + 0.1352924163128814155241474 + >>> airyai(-1) + 0.5355608832923521187995166 + >>> airyai(inf); airyai(-inf) + 0.0 + 0.0 + +Evaluation is supported for large magnitudes of the argument:: + + >>> airyai(-100) + 0.1767533932395528780908311 + >>> airyai(100) + 2.634482152088184489550553e-291 + >>> airyai(50+50j) + (-5.31790195707456404099817e-68 - 1.163588003770709748720107e-67j) + >>> airyai(-50+50j) + (1.041242537363167632587245e+158 + 3.347525544923600321838281e+157j) + +Huge arguments are also fine:: + + >>> airyai(10**10) + 1.162235978298741779953693e-289529654602171 + >>> airyai(-10**10) + 0.0001736206448152818510510181 + >>> w = airyai(10**10*(1+j)) + >>> w.real + 5.711508683721355528322567e-186339621747698 + >>> w.imag + 1.867245506962312577848166e-186339621747697 + +The first root of the Ai-function is:: + + >>> findroot(airyai, -2) + -2.338107410459767038489197 + >>> airyaizero(1) + -2.338107410459767038489197 + +**Properties and relations** + +Verifying the Airy differential equation:: + + >>> for z in [-3.4, 0, 2.5, 1+2j]: + ... chop(airyai(z,2) - z*airyai(z)) + ... + 0.0 + 0.0 + 0.0 + 0.0 + +The first few terms of the Taylor series expansion around `z = 0` +(every third term is zero):: + + >>> nprint(taylor(airyai, 0, 5)) + [0.355028, -0.258819, 0.0, 0.0591713, -0.0215683, 0.0] + +The Airy functions satisfy the Wronskian relation +`\operatorname{Ai}(z) \operatorname{Bi}'(z) - +\operatorname{Ai}'(z) \operatorname{Bi}(z) = 1/\pi`:: + + >>> z = -0.5 + >>> airyai(z)*airybi(z,1) - airyai(z,1)*airybi(z) + 0.3183098861837906715377675 + >>> 1/pi + 0.3183098861837906715377675 + +The Airy functions can be expressed in terms of Bessel +functions of order `\pm 1/3`. For `\Re[z] \le 0`, we have:: + + >>> z = -3 + >>> airyai(z) + -0.3788142936776580743472439 + >>> y = 2*power(-z,'3/2')/3 + >>> (sqrt(-z) * (besselj('1/3',y) + besselj('-1/3',y)))/3 + -0.3788142936776580743472439 + +**Derivatives and integrals** + +Derivatives of the Ai-function (directly and using :func:`~mpmath.diff`):: + + >>> airyai(-3,1); diff(airyai,-3) + 0.3145837692165988136507873 + 0.3145837692165988136507873 + >>> airyai(-3,2); diff(airyai,-3,2) + 1.136442881032974223041732 + 1.136442881032974223041732 + >>> airyai(1000,1); diff(airyai,1000) + -2.943133917910336090459748e-9156 + -2.943133917910336090459748e-9156 + +Several derivatives at `z = 0`:: + + >>> airyai(0,0); airyai(0,1); airyai(0,2) + 0.3550280538878172392600632 + -0.2588194037928067984051836 + 0.0 + >>> airyai(0,3); airyai(0,4); airyai(0,5) + 0.3550280538878172392600632 + -0.5176388075856135968103671 + 0.0 + >>> airyai(0,15); airyai(0,16); airyai(0,17) + 1292.30211615165475090663 + -3188.655054727379756351861 + 0.0 + +The integral of the Ai-function:: + + >>> airyai(3,-1); quad(airyai, [0,3]) + 0.3299203760070217725002701 + 0.3299203760070217725002701 + >>> airyai(-10,-1); quad(airyai, [0,-10]) + -0.765698403134212917425148 + -0.765698403134212917425148 + +Integrals of high or fractional order:: + + >>> airyai(-2,0.5); differint(airyai,-2,0.5,0) + (0.0 + 0.2453596101351438273844725j) + (0.0 + 0.2453596101351438273844725j) + >>> airyai(-2,-4); differint(airyai,-2,-4,0) + 0.2939176441636809580339365 + 0.2939176441636809580339365 + >>> airyai(0,-1); airyai(0,-2); airyai(0,-3) + 0.0 + 0.0 + 0.0 + +Integrals of the Ai-function can be evaluated at limit points:: + + >>> airyai(-1000000,-1); airyai(-inf,-1) + -0.6666843728311539978751512 + -0.6666666666666666666666667 + >>> airyai(10,-1); airyai(+inf,-1) + 0.3333333332991690159427932 + 0.3333333333333333333333333 + >>> airyai(+inf,-2); airyai(+inf,-3) + +inf + +inf + >>> airyai(-1000000,-2); airyai(-inf,-2) + 666666.4078472650651209742 + +inf + >>> airyai(-1000000,-3); airyai(-inf,-3) + -333333074513.7520264995733 + -inf + +**References** + +1. [DLMF]_ Chapter 9: Airy and Related Functions +2. [WolframFunctions]_ section: Bessel-Type Functions + +""" + +airybi = r""" +Computes the Airy function `\operatorname{Bi}(z)`, which is +the solution of the Airy differential equation `f''(z) - z f(z) = 0` +with initial conditions + +.. math :: + + \operatorname{Bi}(0) = + \frac{1}{3^{1/6}\Gamma\left(\frac{2}{3}\right)} + + \operatorname{Bi}'(0) = + \frac{3^{1/6}}{\Gamma\left(\frac{1}{3}\right)}. + +Like the Ai-function (see :func:`~mpmath.airyai`), the Bi-function +is oscillatory for `z < 0`, but it grows rather than decreases +for `z > 0`. + +Optionally, as for :func:`~mpmath.airyai`, derivatives, integrals +and fractional derivatives can be computed with the *derivative* +parameter. + +The Bi-function has infinitely many zeros along the negative +half-axis, as well as complex zeros, which can all be computed +with :func:`~mpmath.airybizero`. + +**Plots** + +.. literalinclude :: /plots/bi.py +.. image :: /plots/bi.png +.. literalinclude :: /plots/bi_c.py +.. image :: /plots/bi_c.png + +**Basic examples** + +Limits and values include:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> airybi(0); 1/(power(3,'1/6')*gamma('2/3')) + 0.6149266274460007351509224 + 0.6149266274460007351509224 + >>> airybi(1) + 1.207423594952871259436379 + >>> airybi(-1) + 0.10399738949694461188869 + >>> airybi(inf); airybi(-inf) + +inf + 0.0 + +Evaluation is supported for large magnitudes of the argument:: + + >>> airybi(-100) + 0.02427388768016013160566747 + >>> airybi(100) + 6.041223996670201399005265e+288 + >>> airybi(50+50j) + (-5.322076267321435669290334e+63 + 1.478450291165243789749427e+65j) + >>> airybi(-50+50j) + (-3.347525544923600321838281e+157 + 1.041242537363167632587245e+158j) + +Huge arguments:: + + >>> airybi(10**10) + 1.369385787943539818688433e+289529654602165 + >>> airybi(-10**10) + 0.001775656141692932747610973 + >>> w = airybi(10**10*(1+j)) + >>> w.real + -6.559955931096196875845858e+186339621747689 + >>> w.imag + -6.822462726981357180929024e+186339621747690 + +The first real root of the Bi-function is:: + + >>> findroot(airybi, -1); airybizero(1) + -1.17371322270912792491998 + -1.17371322270912792491998 + +**Properties and relations** + +Verifying the Airy differential equation:: + + >>> for z in [-3.4, 0, 2.5, 1+2j]: + ... chop(airybi(z,2) - z*airybi(z)) + ... + 0.0 + 0.0 + 0.0 + 0.0 + +The first few terms of the Taylor series expansion around `z = 0` +(every third term is zero):: + + >>> nprint(taylor(airybi, 0, 5)) + [0.614927, 0.448288, 0.0, 0.102488, 0.0373574, 0.0] + +The Airy functions can be expressed in terms of Bessel +functions of order `\pm 1/3`. For `\Re[z] \le 0`, we have:: + + >>> z = -3 + >>> airybi(z) + -0.1982896263749265432206449 + >>> p = 2*power(-z,'3/2')/3 + >>> sqrt(-mpf(z)/3)*(besselj('-1/3',p) - besselj('1/3',p)) + -0.1982896263749265432206449 + +**Derivatives and integrals** + +Derivatives of the Bi-function (directly and using :func:`~mpmath.diff`):: + + >>> airybi(-3,1); diff(airybi,-3) + -0.675611222685258537668032 + -0.675611222685258537668032 + >>> airybi(-3,2); diff(airybi,-3,2) + 0.5948688791247796296619346 + 0.5948688791247796296619346 + >>> airybi(1000,1); diff(airybi,1000) + 1.710055114624614989262335e+9156 + 1.710055114624614989262335e+9156 + +Several derivatives at `z = 0`:: + + >>> airybi(0,0); airybi(0,1); airybi(0,2) + 0.6149266274460007351509224 + 0.4482883573538263579148237 + 0.0 + >>> airybi(0,3); airybi(0,4); airybi(0,5) + 0.6149266274460007351509224 + 0.8965767147076527158296474 + 0.0 + >>> airybi(0,15); airybi(0,16); airybi(0,17) + 2238.332923903442675949357 + 5522.912562599140729510628 + 0.0 + +The integral of the Bi-function:: + + >>> airybi(3,-1); quad(airybi, [0,3]) + 10.06200303130620056316655 + 10.06200303130620056316655 + >>> airybi(-10,-1); quad(airybi, [0,-10]) + -0.01504042480614002045135483 + -0.01504042480614002045135483 + +Integrals of high or fractional order:: + + >>> airybi(-2,0.5); differint(airybi, -2, 0.5, 0) + (0.0 + 0.5019859055341699223453257j) + (0.0 + 0.5019859055341699223453257j) + >>> airybi(-2,-4); differint(airybi,-2,-4,0) + 0.2809314599922447252139092 + 0.2809314599922447252139092 + >>> airybi(0,-1); airybi(0,-2); airybi(0,-3) + 0.0 + 0.0 + 0.0 + +Integrals of the Bi-function can be evaluated at limit points:: + + >>> airybi(-1000000,-1); airybi(-inf,-1) + 0.000002191261128063434047966873 + 0.0 + >>> airybi(10,-1); airybi(+inf,-1) + 147809803.1074067161675853 + +inf + >>> airybi(+inf,-2); airybi(+inf,-3) + +inf + +inf + >>> airybi(-1000000,-2); airybi(-inf,-2) + 0.4482883750599908479851085 + 0.4482883573538263579148237 + >>> gamma('2/3')*power(3,'2/3')/(2*pi) + 0.4482883573538263579148237 + >>> airybi(-100000,-3); airybi(-inf,-3) + -44828.52827206932872493133 + -inf + >>> airybi(-100000,-4); airybi(-inf,-4) + 2241411040.437759489540248 + +inf + +""" + +airyaizero = r""" +Gives the `k`-th zero of the Airy Ai-function, +i.e. the `k`-th number `a_k` ordered by magnitude for which +`\operatorname{Ai}(a_k) = 0`. + +Optionally, with *derivative=1*, the corresponding +zero `a'_k` of the derivative function, i.e. +`\operatorname{Ai}'(a'_k) = 0`, is computed. + +**Examples** + +Some values of `a_k`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> airyaizero(1) + -2.338107410459767038489197 + >>> airyaizero(2) + -4.087949444130970616636989 + >>> airyaizero(3) + -5.520559828095551059129856 + >>> airyaizero(1000) + -281.0315196125215528353364 + +Some values of `a'_k`:: + + >>> airyaizero(1,1) + -1.018792971647471089017325 + >>> airyaizero(2,1) + -3.248197582179836537875424 + >>> airyaizero(3,1) + -4.820099211178735639400616 + >>> airyaizero(1000,1) + -280.9378080358935070607097 + +Verification:: + + >>> chop(airyai(airyaizero(1))) + 0.0 + >>> chop(airyai(airyaizero(1,1),1)) + 0.0 + +""" + +airybizero = r""" +With *complex=False*, gives the `k`-th real zero of the Airy Bi-function, +i.e. the `k`-th number `b_k` ordered by magnitude for which +`\operatorname{Bi}(b_k) = 0`. + +With *complex=True*, gives the `k`-th complex zero in the upper +half plane `\beta_k`. Also the conjugate `\overline{\beta_k}` +is a zero. + +Optionally, with *derivative=1*, the corresponding +zero `b'_k` or `\beta'_k` of the derivative function, i.e. +`\operatorname{Bi}'(b'_k) = 0` or `\operatorname{Bi}'(\beta'_k) = 0`, +is computed. + +**Examples** + +Some values of `b_k`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> airybizero(1) + -1.17371322270912792491998 + >>> airybizero(2) + -3.271093302836352715680228 + >>> airybizero(3) + -4.830737841662015932667709 + >>> airybizero(1000) + -280.9378112034152401578834 + +Some values of `b_k`:: + + >>> airybizero(1,1) + -2.294439682614123246622459 + >>> airybizero(2,1) + -4.073155089071828215552369 + >>> airybizero(3,1) + -5.512395729663599496259593 + >>> airybizero(1000,1) + -281.0315164471118527161362 + +Some values of `\beta_k`:: + + >>> airybizero(1,complex=True) + (0.9775448867316206859469927 + 2.141290706038744575749139j) + >>> airybizero(2,complex=True) + (1.896775013895336346627217 + 3.627291764358919410440499j) + >>> airybizero(3,complex=True) + (2.633157739354946595708019 + 4.855468179979844983174628j) + >>> airybizero(1000,complex=True) + (140.4978560578493018899793 + 243.3907724215792121244867j) + +Some values of `\beta'_k`:: + + >>> airybizero(1,1,complex=True) + (0.2149470745374305676088329 + 1.100600143302797880647194j) + >>> airybizero(2,1,complex=True) + (1.458168309223507392028211 + 2.912249367458445419235083j) + >>> airybizero(3,1,complex=True) + (2.273760763013482299792362 + 4.254528549217097862167015j) + >>> airybizero(1000,1,complex=True) + (140.4509972835270559730423 + 243.3096175398562811896208j) + +Verification:: + + >>> chop(airybi(airybizero(1))) + 0.0 + >>> chop(airybi(airybizero(1,1),1)) + 0.0 + >>> u = airybizero(1,complex=True) + >>> chop(airybi(u)) + 0.0 + >>> chop(airybi(conj(u))) + 0.0 + +The complex zeros (in the upper and lower half-planes respectively) +asymptotically approach the rays `z = R \exp(\pm i \pi /3)`:: + + >>> arg(airybizero(1,complex=True)) + 1.142532510286334022305364 + >>> arg(airybizero(1000,complex=True)) + 1.047271114786212061583917 + >>> arg(airybizero(1000000,complex=True)) + 1.047197624741816183341355 + >>> pi/3 + 1.047197551196597746154214 + +""" + + +ellipk = r""" +Evaluates the complete elliptic integral of the first kind, +`K(m)`, defined by + +.. math :: + + K(m) = \int_0^{\pi/2} \frac{dt}{\sqrt{1-m \sin^2 t}} \, = \, + \frac{\pi}{2} \,_2F_1\left(\frac{1}{2}, \frac{1}{2}, 1, m\right). + +Note that the argument is the parameter `m = k^2`, +not the modulus `k` which is sometimes used. + +**Plots** + +.. literalinclude :: /plots/ellipk.py +.. image :: /plots/ellipk.png + +**Examples** + +Values and limits include:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> ellipk(0) + 1.570796326794896619231322 + >>> ellipk(inf) + (0.0 + 0.0j) + >>> ellipk(-inf) + 0.0 + >>> ellipk(1) + +inf + >>> ellipk(-1) + 1.31102877714605990523242 + >>> ellipk(2) + (1.31102877714605990523242 - 1.31102877714605990523242j) + +Verifying the defining integral and hypergeometric +representation:: + + >>> ellipk(0.5) + 1.85407467730137191843385 + >>> quad(lambda t: (1-0.5*sin(t)**2)**-0.5, [0, pi/2]) + 1.85407467730137191843385 + >>> pi/2*hyp2f1(0.5,0.5,1,0.5) + 1.85407467730137191843385 + +Evaluation is supported for arbitrary complex `m`:: + + >>> ellipk(3+4j) + (0.9111955638049650086562171 + 0.6313342832413452438845091j) + +A definite integral:: + + >>> quad(ellipk, [0, 1]) + 2.0 +""" + +agm = r""" +``agm(a, b)`` computes the arithmetic-geometric mean of `a` and +`b`, defined as the limit of the following iteration: + +.. math :: + + a_0 = a + + b_0 = b + + a_{n+1} = \frac{a_n+b_n}{2} + + b_{n+1} = \sqrt{a_n b_n} + +This function can be called with a single argument, computing +`\mathrm{agm}(a,1) = \mathrm{agm}(1,a)`. + +**Examples** + +It is a well-known theorem that the geometric mean of +two distinct positive numbers is less than the arithmetic +mean. It follows that the arithmetic-geometric mean lies +between the two means:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> a = mpf(3) + >>> b = mpf(4) + >>> sqrt(a*b) + 3.46410161513775 + >>> agm(a,b) + 3.48202767635957 + >>> (a+b)/2 + 3.5 + +The arithmetic-geometric mean is scale-invariant:: + + >>> agm(10*e, 10*pi) + 29.261085515723 + >>> 10*agm(e, pi) + 29.261085515723 + +As an order-of-magnitude estimate, `\mathrm{agm}(1,x) \approx x` +for large `x`:: + + >>> agm(10**10) + 643448704.760133 + >>> agm(10**50) + 1.34814309345871e+48 + +For tiny `x`, `\mathrm{agm}(1,x) \approx -\pi/(2 \log(x/4))`:: + + >>> agm('0.01') + 0.262166887202249 + >>> -pi/2/log('0.0025') + 0.262172347753122 + +The arithmetic-geometric mean can also be computed for complex +numbers:: + + >>> agm(3, 2+j) + (2.51055133276184 + 0.547394054060638j) + +The AGM iteration converges very quickly (each step doubles +the number of correct digits), so :func:`~mpmath.agm` supports efficient +high-precision evaluation:: + + >>> mp.dps = 10000 + >>> a = agm(1,2) + >>> str(a)[-10:] + '1679581912' + +**Mathematical relations** + +The arithmetic-geometric mean may be used to evaluate the +following two parametric definite integrals: + +.. math :: + + I_1 = \int_0^{\infty} + \frac{1}{\sqrt{(x^2+a^2)(x^2+b^2)}} \,dx + + I_2 = \int_0^{\pi/2} + \frac{1}{\sqrt{a^2 \cos^2(x) + b^2 \sin^2(x)}} \,dx + +We have:: + + >>> mp.dps = 15 + >>> a = 3 + >>> b = 4 + >>> f1 = lambda x: ((x**2+a**2)*(x**2+b**2))**-0.5 + >>> f2 = lambda x: ((a*cos(x))**2 + (b*sin(x))**2)**-0.5 + >>> quad(f1, [0, inf]) + 0.451115405388492 + >>> quad(f2, [0, pi/2]) + 0.451115405388492 + >>> pi/(2*agm(a,b)) + 0.451115405388492 + +A formula for `\Gamma(1/4)`:: + + >>> gamma(0.25) + 3.62560990822191 + >>> sqrt(2*sqrt(2*pi**3)/agm(1,sqrt(2))) + 3.62560990822191 + +**Possible issues** + +The branch cut chosen for complex `a` and `b` is somewhat +arbitrary. + +""" + +gegenbauer = r""" +Evaluates the Gegenbauer polynomial, or ultraspherical polynomial, + +.. math :: + + C_n^{(a)}(z) = {n+2a-1 \choose n} \,_2F_1\left(-n, n+2a; + a+\frac{1}{2}; \frac{1}{2}(1-z)\right). + +When `n` is a nonnegative integer, this formula gives a polynomial +in `z` of degree `n`, but all parameters are permitted to be +complex numbers. With `a = 1/2`, the Gegenbauer polynomial +reduces to a Legendre polynomial. + +**Examples** + +Evaluation for arbitrary arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> gegenbauer(3, 0.5, -10) + -2485.0 + >>> gegenbauer(1000, 10, 100) + 3.012757178975667428359374e+2322 + >>> gegenbauer(2+3j, -0.75, -1000j) + (-5038991.358609026523401901 + 9414549.285447104177860806j) + +Evaluation at negative integer orders:: + + >>> gegenbauer(-4, 2, 1.75) + -1.0 + >>> gegenbauer(-4, 3, 1.75) + 0.0 + >>> gegenbauer(-4, 2j, 1.75) + 0.0 + >>> gegenbauer(-7, 0.5, 3) + 8989.0 + +The Gegenbauer polynomials solve the differential equation:: + + >>> n, a = 4.5, 1+2j + >>> f = lambda z: gegenbauer(n, a, z) + >>> for z in [0, 0.75, -0.5j]: + ... chop((1-z**2)*diff(f,z,2) - (2*a+1)*z*diff(f,z) + n*(n+2*a)*f(z)) + ... + 0.0 + 0.0 + 0.0 + +The Gegenbauer polynomials have generating function +`(1-2zt+t^2)^{-a}`:: + + >>> a, z = 2.5, 1 + >>> taylor(lambda t: (1-2*z*t+t**2)**(-a), 0, 3) + [1.0, 5.0, 15.0, 35.0] + >>> [gegenbauer(n,a,z) for n in range(4)] + [1.0, 5.0, 15.0, 35.0] + +The Gegenbauer polynomials are orthogonal on `[-1, 1]` with respect +to the weight `(1-z^2)^{a-\frac{1}{2}}`:: + + >>> a, n, m = 2.5, 4, 5 + >>> Cn = lambda z: gegenbauer(n, a, z, zeroprec=1000) + >>> Cm = lambda z: gegenbauer(m, a, z, zeroprec=1000) + >>> chop(quad(lambda z: Cn(z)*Cm(z)*(1-z**2)*(a-0.5), [-1, 1])) + 0.0 +""" + +laguerre = r""" +Gives the generalized (associated) Laguerre polynomial, defined by + +.. math :: + + L_n^a(z) = \frac{\Gamma(n+b+1)}{\Gamma(b+1) \Gamma(n+1)} + \,_1F_1(-n, a+1, z). + +With `a = 0` and `n` a nonnegative integer, this reduces to an ordinary +Laguerre polynomial, the sequence of which begins +`L_0(z) = 1, L_1(z) = 1-z, L_2(z) = z^2-2z+1, \ldots`. + +The Laguerre polynomials are orthogonal with respect to the weight +`z^a e^{-z}` on `[0, \infty)`. + +**Plots** + +.. literalinclude :: /plots/laguerre.py +.. image :: /plots/laguerre.png + +**Examples** + +Evaluation for arbitrary arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> laguerre(5, 0, 0.25) + 0.03726399739583333333333333 + >>> laguerre(1+j, 0.5, 2+3j) + (4.474921610704496808379097 - 11.02058050372068958069241j) + >>> laguerre(2, 0, 10000) + 49980001.0 + >>> laguerre(2.5, 0, 10000) + -9.327764910194842158583189e+4328 + +The first few Laguerre polynomials, normalized to have integer +coefficients:: + + >>> for n in range(7): + ... chop(taylor(lambda z: fac(n)*laguerre(n, 0, z), 0, n)) + ... + [1.0] + [1.0, -1.0] + [2.0, -4.0, 1.0] + [6.0, -18.0, 9.0, -1.0] + [24.0, -96.0, 72.0, -16.0, 1.0] + [120.0, -600.0, 600.0, -200.0, 25.0, -1.0] + [720.0, -4320.0, 5400.0, -2400.0, 450.0, -36.0, 1.0] + +Verifying orthogonality:: + + >>> Lm = lambda t: laguerre(m,a,t) + >>> Ln = lambda t: laguerre(n,a,t) + >>> a, n, m = 2.5, 2, 3 + >>> chop(quad(lambda t: exp(-t)*t**a*Lm(t)*Ln(t), [0,inf])) + 0.0 + + +""" + +hermite = r""" +Evaluates the Hermite polynomial `H_n(z)`, which may be defined using +the recurrence + +.. math :: + + H_0(z) = 1 + + H_1(z) = 2z + + H_{n+1} = 2z H_n(z) - 2n H_{n-1}(z). + +The Hermite polynomials are orthogonal on `(-\infty, \infty)` with +respect to the weight `e^{-z^2}`. More generally, allowing arbitrary complex +values of `n`, the Hermite function `H_n(z)` is defined as + +.. math :: + + H_n(z) = (2z)^n \,_2F_0\left(-\frac{n}{2}, \frac{1-n}{2}, + -\frac{1}{z^2}\right) + +for `\Re{z} > 0`, or generally + +.. math :: + + H_n(z) = 2^n \sqrt{\pi} \left( + \frac{1}{\Gamma\left(\frac{1-n}{2}\right)} + \,_1F_1\left(-\frac{n}{2}, \frac{1}{2}, z^2\right) - + \frac{2z}{\Gamma\left(-\frac{n}{2}\right)} + \,_1F_1\left(\frac{1-n}{2}, \frac{3}{2}, z^2\right) + \right). + +**Plots** + +.. literalinclude :: /plots/hermite.py +.. image :: /plots/hermite.png + +**Examples** + +Evaluation for arbitrary arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> hermite(0, 10) + 1.0 + >>> hermite(1, 10); hermite(2, 10) + 20.0 + 398.0 + >>> hermite(10000, 2) + 4.950440066552087387515653e+19334 + >>> hermite(3, -10**8) + -7999999999999998800000000.0 + >>> hermite(-3, -10**8) + 1.675159751729877682920301e+4342944819032534 + >>> hermite(2+3j, -1+2j) + (-0.07652130602993513389421901 - 0.1084662449961914580276007j) + +Coefficients of the first few Hermite polynomials are:: + + >>> for n in range(7): + ... chop(taylor(lambda z: hermite(n, z), 0, n)) + ... + [1.0] + [0.0, 2.0] + [-2.0, 0.0, 4.0] + [0.0, -12.0, 0.0, 8.0] + [12.0, 0.0, -48.0, 0.0, 16.0] + [0.0, 120.0, 0.0, -160.0, 0.0, 32.0] + [-120.0, 0.0, 720.0, 0.0, -480.0, 0.0, 64.0] + +Values at `z = 0`:: + + >>> for n in range(-5, 9): + ... hermite(n, 0) + ... + 0.02769459142039868792653387 + 0.08333333333333333333333333 + 0.2215567313631895034122709 + 0.5 + 0.8862269254527580136490837 + 1.0 + 0.0 + -2.0 + 0.0 + 12.0 + 0.0 + -120.0 + 0.0 + 1680.0 + +Hermite functions satisfy the differential equation:: + + >>> n = 4 + >>> f = lambda z: hermite(n, z) + >>> z = 1.5 + >>> chop(diff(f,z,2) - 2*z*diff(f,z) + 2*n*f(z)) + 0.0 + +Verifying orthogonality:: + + >>> chop(quad(lambda t: hermite(2,t)*hermite(4,t)*exp(-t**2), [-inf,inf])) + 0.0 + +""" + +jacobi = r""" +``jacobi(n, a, b, x)`` evaluates the Jacobi polynomial +`P_n^{(a,b)}(x)`. The Jacobi polynomials are a special +case of the hypergeometric function `\,_2F_1` given by: + +.. math :: + + P_n^{(a,b)}(x) = {n+a \choose n} + \,_2F_1\left(-n,1+a+b+n,a+1,\frac{1-x}{2}\right). + +Note that this definition generalizes to nonintegral values +of `n`. When `n` is an integer, the hypergeometric series +terminates after a finite number of terms, giving +a polynomial in `x`. + +**Evaluation of Jacobi polynomials** + +A special evaluation is `P_n^{(a,b)}(1) = {n+a \choose n}`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> jacobi(4, 0.5, 0.25, 1) + 2.4609375 + >>> binomial(4+0.5, 4) + 2.4609375 + +A Jacobi polynomial of degree `n` is equal to its +Taylor polynomial of degree `n`. The explicit +coefficients of Jacobi polynomials can therefore +be recovered easily using :func:`~mpmath.taylor`:: + + >>> for n in range(5): + ... nprint(taylor(lambda x: jacobi(n,1,2,x), 0, n)) + ... + [1.0] + [-0.5, 2.5] + [-0.75, -1.5, 5.25] + [0.5, -3.5, -3.5, 10.5] + [0.625, 2.5, -11.25, -7.5, 20.625] + +For nonintegral `n`, the Jacobi "polynomial" is no longer +a polynomial:: + + >>> nprint(taylor(lambda x: jacobi(0.5,1,2,x), 0, 4)) + [0.309983, 1.84119, -1.26933, 1.26699, -1.34808] + +**Orthogonality** + +The Jacobi polynomials are orthogonal on the interval +`[-1, 1]` with respect to the weight function +`w(x) = (1-x)^a (1+x)^b`. That is, +`w(x) P_n^{(a,b)}(x) P_m^{(a,b)}(x)` integrates to +zero if `m \ne n` and to a nonzero number if `m = n`. + +The orthogonality is easy to verify using numerical +quadrature:: + + >>> P = jacobi + >>> f = lambda x: (1-x)**a * (1+x)**b * P(m,a,b,x) * P(n,a,b,x) + >>> a = 2 + >>> b = 3 + >>> m, n = 3, 4 + >>> chop(quad(f, [-1, 1]), 1) + 0.0 + >>> m, n = 4, 4 + >>> quad(f, [-1, 1]) + 1.9047619047619 + +**Differential equation** + +The Jacobi polynomials are solutions of the differential +equation + +.. math :: + + (1-x^2) y'' + (b-a-(a+b+2)x) y' + n (n+a+b+1) y = 0. + +We can verify that :func:`~mpmath.jacobi` approximately satisfies +this equation:: + + >>> from mpmath import * + >>> mp.dps = 15 + >>> a = 2.5 + >>> b = 4 + >>> n = 3 + >>> y = lambda x: jacobi(n,a,b,x) + >>> x = pi + >>> A0 = n*(n+a+b+1)*y(x) + >>> A1 = (b-a-(a+b+2)*x)*diff(y,x) + >>> A2 = (1-x**2)*diff(y,x,2) + >>> nprint(A2 + A1 + A0, 1) + 4.0e-12 + +The difference of order `10^{-12}` is as close to zero as +it could be at 15-digit working precision, since the terms +are large:: + + >>> A0, A1, A2 + (26560.2328981879, -21503.7641037294, -5056.46879445852) + +""" + +legendre = r""" +``legendre(n, x)`` evaluates the Legendre polynomial `P_n(x)`. +The Legendre polynomials are given by the formula + +.. math :: + + P_n(x) = \frac{1}{2^n n!} \frac{d^n}{dx^n} (x^2 -1)^n. + +Alternatively, they can be computed recursively using + +.. math :: + + P_0(x) = 1 + + P_1(x) = x + + (n+1) P_{n+1}(x) = (2n+1) x P_n(x) - n P_{n-1}(x). + +A third definition is in terms of the hypergeometric function +`\,_2F_1`, whereby they can be generalized to arbitrary `n`: + +.. math :: + + P_n(x) = \,_2F_1\left(-n, n+1, 1, \frac{1-x}{2}\right) + +**Plots** + +.. literalinclude :: /plots/legendre.py +.. image :: /plots/legendre.png + +**Basic evaluation** + +The Legendre polynomials assume fixed values at the points +`x = -1` and `x = 1`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> nprint([legendre(n, 1) for n in range(6)]) + [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + >>> nprint([legendre(n, -1) for n in range(6)]) + [1.0, -1.0, 1.0, -1.0, 1.0, -1.0] + +The coefficients of Legendre polynomials can be recovered +using degree-`n` Taylor expansion:: + + >>> for n in range(5): + ... nprint(chop(taylor(lambda x: legendre(n, x), 0, n))) + ... + [1.0] + [0.0, 1.0] + [-0.5, 0.0, 1.5] + [0.0, -1.5, 0.0, 2.5] + [0.375, 0.0, -3.75, 0.0, 4.375] + +The roots of Legendre polynomials are located symmetrically +on the interval `[-1, 1]`:: + + >>> for n in range(5): + ... nprint(polyroots(taylor(lambda x: legendre(n, x), 0, n)[::-1])) + ... + [] + [0.0] + [-0.57735, 0.57735] + [-0.774597, 0.0, 0.774597] + [-0.861136, -0.339981, 0.339981, 0.861136] + +An example of an evaluation for arbitrary `n`:: + + >>> legendre(0.75, 2+4j) + (1.94952805264875 + 2.1071073099422j) + +**Orthogonality** + +The Legendre polynomials are orthogonal on `[-1, 1]` with respect +to the trivial weight `w(x) = 1`. That is, `P_m(x) P_n(x)` +integrates to zero if `m \ne n` and to `2/(2n+1)` if `m = n`:: + + >>> m, n = 3, 4 + >>> quad(lambda x: legendre(m,x)*legendre(n,x), [-1, 1]) + 0.0 + >>> m, n = 4, 4 + >>> quad(lambda x: legendre(m,x)*legendre(n,x), [-1, 1]) + 0.222222222222222 + +**Differential equation** + +The Legendre polynomials satisfy the differential equation + +.. math :: + + ((1-x^2) y')' + n(n+1) y' = 0. + +We can verify this numerically:: + + >>> n = 3.6 + >>> x = 0.73 + >>> P = legendre + >>> A = diff(lambda t: (1-t**2)*diff(lambda u: P(n,u), t), x) + >>> B = n*(n+1)*P(n,x) + >>> nprint(A+B,1) + 9.0e-16 + +""" + + +legenp = r""" +Calculates the (associated) Legendre function of the first kind of +degree *n* and order *m*, `P_n^m(z)`. Taking `m = 0` gives the ordinary +Legendre function of the first kind, `P_n(z)`. The parameters may be +complex numbers. + +In terms of the Gauss hypergeometric function, the (associated) Legendre +function is defined as + +.. math :: + + P_n^m(z) = \frac{1}{\Gamma(1-m)} \frac{(1+z)^{m/2}}{(1-z)^{m/2}} + \,_2F_1\left(-n, n+1, 1-m, \frac{1-z}{2}\right). + +With *type=3* instead of *type=2*, the alternative +definition + +.. math :: + + \hat{P}_n^m(z) = \frac{1}{\Gamma(1-m)} \frac{(z+1)^{m/2}}{(z-1)^{m/2}} + \,_2F_1\left(-n, n+1, 1-m, \frac{1-z}{2}\right). + +is used. These functions correspond respectively to ``LegendreP[n,m,2,z]`` +and ``LegendreP[n,m,3,z]`` in Mathematica. + +The general solution of the (associated) Legendre differential equation + +.. math :: + + (1-z^2) f''(z) - 2zf'(z) + \left(n(n+1)-\frac{m^2}{1-z^2}\right)f(z) = 0 + +is given by `C_1 P_n^m(z) + C_2 Q_n^m(z)` for arbitrary constants +`C_1`, `C_2`, where `Q_n^m(z)` is a Legendre function of the +second kind as implemented by :func:`~mpmath.legenq`. + +**Examples** + +Evaluation for arbitrary parameters and arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> legenp(2, 0, 10); legendre(2, 10) + 149.5 + 149.5 + >>> legenp(-2, 0.5, 2.5) + (1.972260393822275434196053 - 1.972260393822275434196053j) + >>> legenp(2+3j, 1-j, -0.5+4j) + (-3.335677248386698208736542 - 5.663270217461022307645625j) + >>> chop(legenp(3, 2, -1.5, type=2)) + 28.125 + >>> chop(legenp(3, 2, -1.5, type=3)) + -28.125 + +Verifying the associated Legendre differential equation:: + + >>> n, m = 2, -0.5 + >>> C1, C2 = 1, -3 + >>> f = lambda z: C1*legenp(n,m,z) + C2*legenq(n,m,z) + >>> deq = lambda z: (1-z**2)*diff(f,z,2) - 2*z*diff(f,z) + \ + ... (n*(n+1)-m**2/(1-z**2))*f(z) + >>> for z in [0, 2, -1.5, 0.5+2j]: + ... chop(deq(mpmathify(z))) + ... + 0.0 + 0.0 + 0.0 + 0.0 +""" + +legenq = r""" +Calculates the (associated) Legendre function of the second kind of +degree *n* and order *m*, `Q_n^m(z)`. Taking `m = 0` gives the ordinary +Legendre function of the second kind, `Q_n(z)`. The parameters may be +complex numbers. + +The Legendre functions of the second kind give a second set of +solutions to the (associated) Legendre differential equation. +(See :func:`~mpmath.legenp`.) +Unlike the Legendre functions of the first kind, they are not +polynomials of `z` for integer `n`, `m` but rational or logarithmic +functions with poles at `z = \pm 1`. + +There are various ways to define Legendre functions of +the second kind, giving rise to different complex structure. +A version can be selected using the *type* keyword argument. +The *type=2* and *type=3* functions are given respectively by + +.. math :: + + Q_n^m(z) = \frac{\pi}{2 \sin(\pi m)} + \left( \cos(\pi m) P_n^m(z) - + \frac{\Gamma(1+m+n)}{\Gamma(1-m+n)} P_n^{-m}(z)\right) + + \hat{Q}_n^m(z) = \frac{\pi}{2 \sin(\pi m)} e^{\pi i m} + \left( \hat{P}_n^m(z) - + \frac{\Gamma(1+m+n)}{\Gamma(1-m+n)} \hat{P}_n^{-m}(z)\right) + +where `P` and `\hat{P}` are the *type=2* and *type=3* Legendre functions +of the first kind. The formulas above should be understood as limits +when `m` is an integer. + +These functions correspond to ``LegendreQ[n,m,2,z]`` (or ``LegendreQ[n,m,z]``) +and ``LegendreQ[n,m,3,z]`` in Mathematica. The *type=3* function +is essentially the same as the function defined in +Abramowitz & Stegun (eq. 8.1.3) but with `(z+1)^{m/2}(z-1)^{m/2}` instead +of `(z^2-1)^{m/2}`, giving slightly different branches. + +**Examples** + +Evaluation for arbitrary parameters and arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> legenq(2, 0, 0.5) + -0.8186632680417568557122028 + >>> legenq(-1.5, -2, 2.5) + (0.6655964618250228714288277 + 0.3937692045497259717762649j) + >>> legenq(2-j, 3+4j, -6+5j) + (-10001.95256487468541686564 - 6011.691337610097577791134j) + +Different versions of the function:: + + >>> legenq(2, 1, 0.5) + 0.7298060598018049369381857 + >>> legenq(2, 1, 1.5) + (-7.902916572420817192300921 + 0.1998650072605976600724502j) + >>> legenq(2, 1, 0.5, type=3) + (2.040524284763495081918338 - 0.7298060598018049369381857j) + >>> chop(legenq(2, 1, 1.5, type=3)) + -0.1998650072605976600724502 + +""" + +chebyt = r""" +``chebyt(n, x)`` evaluates the Chebyshev polynomial of the first +kind `T_n(x)`, defined by the identity + +.. math :: + + T_n(\cos x) = \cos(n x). + +The Chebyshev polynomials of the first kind are a special +case of the Jacobi polynomials, and by extension of the +hypergeometric function `\,_2F_1`. They can thus also be +evaluated for nonintegral `n`. + +**Plots** + +.. literalinclude :: /plots/chebyt.py +.. image :: /plots/chebyt.png + +**Basic evaluation** + +The coefficients of the `n`-th polynomial can be recovered +using using degree-`n` Taylor expansion:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(5): + ... nprint(chop(taylor(lambda x: chebyt(n, x), 0, n))) + ... + [1.0] + [0.0, 1.0] + [-1.0, 0.0, 2.0] + [0.0, -3.0, 0.0, 4.0] + [1.0, 0.0, -8.0, 0.0, 8.0] + +**Orthogonality** + +The Chebyshev polynomials of the first kind are orthogonal +on the interval `[-1, 1]` with respect to the weight +function `w(x) = 1/\sqrt{1-x^2}`:: + + >>> f = lambda x: chebyt(m,x)*chebyt(n,x)/sqrt(1-x**2) + >>> m, n = 3, 4 + >>> nprint(quad(f, [-1, 1]),1) + 0.0 + >>> m, n = 4, 4 + >>> quad(f, [-1, 1]) + 1.57079632596448 + +""" + +chebyu = r""" +``chebyu(n, x)`` evaluates the Chebyshev polynomial of the second +kind `U_n(x)`, defined by the identity + +.. math :: + + U_n(\cos x) = \frac{\sin((n+1)x)}{\sin(x)}. + +The Chebyshev polynomials of the second kind are a special +case of the Jacobi polynomials, and by extension of the +hypergeometric function `\,_2F_1`. They can thus also be +evaluated for nonintegral `n`. + +**Plots** + +.. literalinclude :: /plots/chebyu.py +.. image :: /plots/chebyu.png + +**Basic evaluation** + +The coefficients of the `n`-th polynomial can be recovered +using using degree-`n` Taylor expansion:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(5): + ... nprint(chop(taylor(lambda x: chebyu(n, x), 0, n))) + ... + [1.0] + [0.0, 2.0] + [-1.0, 0.0, 4.0] + [0.0, -4.0, 0.0, 8.0] + [1.0, 0.0, -12.0, 0.0, 16.0] + +**Orthogonality** + +The Chebyshev polynomials of the second kind are orthogonal +on the interval `[-1, 1]` with respect to the weight +function `w(x) = \sqrt{1-x^2}`:: + + >>> f = lambda x: chebyu(m,x)*chebyu(n,x)*sqrt(1-x**2) + >>> m, n = 3, 4 + >>> quad(f, [-1, 1]) + 0.0 + >>> m, n = 4, 4 + >>> quad(f, [-1, 1]) + 1.5707963267949 +""" + +besselj = r""" +``besselj(n, x, derivative=0)`` gives the Bessel function of the first kind +`J_n(x)`. Bessel functions of the first kind are defined as +solutions of the differential equation + +.. math :: + + x^2 y'' + x y' + (x^2 - n^2) y = 0 + +which appears, among other things, when solving the radial +part of Laplace's equation in cylindrical coordinates. This +equation has two solutions for given `n`, where the +`J_n`-function is the solution that is nonsingular at `x = 0`. +For positive integer `n`, `J_n(x)` behaves roughly like a sine +(odd `n`) or cosine (even `n`) multiplied by a magnitude factor +that decays slowly as `x \to \pm\infty`. + +Generally, `J_n` is a special case of the hypergeometric +function `\,_0F_1`: + +.. math :: + + J_n(x) = \frac{x^n}{2^n \Gamma(n+1)} + \,_0F_1\left(n+1,-\frac{x^2}{4}\right) + +With *derivative* = `m \ne 0`, the `m`-th derivative + +.. math :: + + \frac{d^m}{dx^m} J_n(x) + +is computed. + +**Plots** + +.. literalinclude :: /plots/besselj.py +.. image :: /plots/besselj.png +.. literalinclude :: /plots/besselj_c.py +.. image :: /plots/besselj_c.png + +**Examples** + +Evaluation is supported for arbitrary arguments, and at +arbitrary precision:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> besselj(2, 1000) + -0.024777229528606 + >>> besselj(4, 0.75) + 0.000801070086542314 + >>> besselj(2, 1000j) + (-2.48071721019185e+432 + 6.41567059811949e-437j) + >>> mp.dps = 25 + >>> besselj(0.75j, 3+4j) + (-2.778118364828153309919653 - 1.5863603889018621585533j) + >>> mp.dps = 50 + >>> besselj(1, pi) + 0.28461534317975275734531059968613140570981118184947 + +Arguments may be large:: + + >>> mp.dps = 25 + >>> besselj(0, 10000) + -0.007096160353388801477265164 + >>> besselj(0, 10**10) + 0.000002175591750246891726859055 + >>> besselj(2, 10**100) + 7.337048736538615712436929e-51 + >>> besselj(2, 10**5*j) + (-3.540725411970948860173735e+43426 + 4.4949812409615803110051e-43433j) + +The Bessel functions of the first kind satisfy simple +symmetries around `x = 0`:: + + >>> mp.dps = 15 + >>> nprint([besselj(n,0) for n in range(5)]) + [1.0, 0.0, 0.0, 0.0, 0.0] + >>> nprint([besselj(n,pi) for n in range(5)]) + [-0.304242, 0.284615, 0.485434, 0.333458, 0.151425] + >>> nprint([besselj(n,-pi) for n in range(5)]) + [-0.304242, -0.284615, 0.485434, -0.333458, 0.151425] + +Roots of Bessel functions are often used:: + + >>> nprint([findroot(j0, k) for k in [2, 5, 8, 11, 14]]) + [2.40483, 5.52008, 8.65373, 11.7915, 14.9309] + >>> nprint([findroot(j1, k) for k in [3, 7, 10, 13, 16]]) + [3.83171, 7.01559, 10.1735, 13.3237, 16.4706] + +The roots are not periodic, but the distance between successive +roots asymptotically approaches `2 \pi`. Bessel functions of +the first kind have the following normalization:: + + >>> quadosc(j0, [0, inf], period=2*pi) + 1.0 + >>> quadosc(j1, [0, inf], period=2*pi) + 1.0 + +For `n = 1/2` or `n = -1/2`, the Bessel function reduces to a +trigonometric function:: + + >>> x = 10 + >>> besselj(0.5, x), sqrt(2/(pi*x))*sin(x) + (-0.13726373575505, -0.13726373575505) + >>> besselj(-0.5, x), sqrt(2/(pi*x))*cos(x) + (-0.211708866331398, -0.211708866331398) + +Derivatives of any order can be computed (negative orders +correspond to integration):: + + >>> mp.dps = 25 + >>> besselj(0, 7.5, 1) + -0.1352484275797055051822405 + >>> diff(lambda x: besselj(0,x), 7.5) + -0.1352484275797055051822405 + >>> besselj(0, 7.5, 10) + -0.1377811164763244890135677 + >>> diff(lambda x: besselj(0,x), 7.5, 10) + -0.1377811164763244890135677 + >>> besselj(0,7.5,-1) - besselj(0,3.5,-1) + -0.1241343240399987693521378 + >>> quad(j0, [3.5, 7.5]) + -0.1241343240399987693521378 + +Differentiation with a noninteger order gives the fractional derivative +in the sense of the Riemann-Liouville differintegral, as computed by +:func:`~mpmath.differint`:: + + >>> mp.dps = 15 + >>> besselj(1, 3.5, 0.75) + -0.385977722939384 + >>> differint(lambda x: besselj(1, x), 3.5, 0.75) + -0.385977722939384 + +""" + +besseli = r""" +``besseli(n, x, derivative=0)`` gives the modified Bessel function of the +first kind, + +.. math :: + + I_n(x) = i^{-n} J_n(ix). + +With *derivative* = `m \ne 0`, the `m`-th derivative + +.. math :: + + \frac{d^m}{dx^m} I_n(x) + +is computed. + +**Plots** + +.. literalinclude :: /plots/besseli.py +.. image :: /plots/besseli.png +.. literalinclude :: /plots/besseli_c.py +.. image :: /plots/besseli_c.png + +**Examples** + +Some values of `I_n(x)`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> besseli(0,0) + 1.0 + >>> besseli(1,0) + 0.0 + >>> besseli(0,1) + 1.266065877752008335598245 + >>> besseli(3.5, 2+3j) + (-0.2904369752642538144289025 - 0.4469098397654815837307006j) + +Arguments may be large:: + + >>> besseli(2, 1000) + 2.480717210191852440616782e+432 + >>> besseli(2, 10**10) + 4.299602851624027900335391e+4342944813 + >>> besseli(2, 6000+10000j) + (-2.114650753239580827144204e+2603 + 4.385040221241629041351886e+2602j) + +For integers `n`, the following integral representation holds:: + + >>> mp.dps = 15 + >>> n = 3 + >>> x = 2.3 + >>> quad(lambda t: exp(x*cos(t))*cos(n*t), [0,pi])/pi + 0.349223221159309 + >>> besseli(n,x) + 0.349223221159309 + +Derivatives and antiderivatives of any order can be computed:: + + >>> mp.dps = 25 + >>> besseli(2, 7.5, 1) + 195.8229038931399062565883 + >>> diff(lambda x: besseli(2,x), 7.5) + 195.8229038931399062565883 + >>> besseli(2, 7.5, 10) + 153.3296508971734525525176 + >>> diff(lambda x: besseli(2,x), 7.5, 10) + 153.3296508971734525525176 + >>> besseli(2,7.5,-1) - besseli(2,3.5,-1) + 202.5043900051930141956876 + >>> quad(lambda x: besseli(2,x), [3.5, 7.5]) + 202.5043900051930141956876 + +""" + +bessely = r""" +``bessely(n, x, derivative=0)`` gives the Bessel function of the second kind, + +.. math :: + + Y_n(x) = \frac{J_n(x) \cos(\pi n) - J_{-n}(x)}{\sin(\pi n)}. + +For `n` an integer, this formula should be understood as a +limit. With *derivative* = `m \ne 0`, the `m`-th derivative + +.. math :: + + \frac{d^m}{dx^m} Y_n(x) + +is computed. + +**Plots** + +.. literalinclude :: /plots/bessely.py +.. image :: /plots/bessely.png +.. literalinclude :: /plots/bessely_c.py +.. image :: /plots/bessely_c.png + +**Examples** + +Some values of `Y_n(x)`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> bessely(0,0), bessely(1,0), bessely(2,0) + (-inf, -inf, -inf) + >>> bessely(1, pi) + 0.3588729167767189594679827 + >>> bessely(0.5, 3+4j) + (9.242861436961450520325216 - 3.085042824915332562522402j) + +Arguments may be large:: + + >>> bessely(0, 10000) + 0.00364780555898660588668872 + >>> bessely(2.5, 10**50) + -4.8952500412050989295774e-26 + >>> bessely(2.5, -10**50) + (0.0 + 4.8952500412050989295774e-26j) + +Derivatives and antiderivatives of any order can be computed:: + + >>> bessely(2, 3.5, 1) + 0.3842618820422660066089231 + >>> diff(lambda x: bessely(2, x), 3.5) + 0.3842618820422660066089231 + >>> bessely(0.5, 3.5, 1) + -0.2066598304156764337900417 + >>> diff(lambda x: bessely(0.5, x), 3.5) + -0.2066598304156764337900417 + >>> diff(lambda x: bessely(2, x), 0.5, 10) + -208173867409.5547350101511 + >>> bessely(2, 0.5, 10) + -208173867409.5547350101511 + >>> bessely(2, 100.5, 100) + 0.02668487547301372334849043 + >>> quad(lambda x: bessely(2,x), [1,3]) + -1.377046859093181969213262 + >>> bessely(2,3,-1) - bessely(2,1,-1) + -1.377046859093181969213262 + +""" + +besselk = r""" +``besselk(n, x)`` gives the modified Bessel function of the +second kind, + +.. math :: + + K_n(x) = \frac{\pi}{2} \frac{I_{-n}(x)-I_{n}(x)}{\sin(\pi n)} + +For `n` an integer, this formula should be understood as a +limit. + +**Plots** + +.. literalinclude :: /plots/besselk.py +.. image :: /plots/besselk.png +.. literalinclude :: /plots/besselk_c.py +.. image :: /plots/besselk_c.png + +**Examples** + +Evaluation is supported for arbitrary complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> besselk(0,1) + 0.4210244382407083333356274 + >>> besselk(0, -1) + (0.4210244382407083333356274 - 3.97746326050642263725661j) + >>> besselk(3.5, 2+3j) + (-0.02090732889633760668464128 + 0.2464022641351420167819697j) + >>> besselk(2+3j, 0.5) + (0.9615816021726349402626083 + 0.1918250181801757416908224j) + +Arguments may be large:: + + >>> besselk(0, 100) + 4.656628229175902018939005e-45 + >>> besselk(1, 10**6) + 4.131967049321725588398296e-434298 + >>> besselk(1, 10**6*j) + (0.001140348428252385844876706 - 0.0005200017201681152909000961j) + >>> besselk(4.5, fmul(10**50, j, exact=True)) + (1.561034538142413947789221e-26 + 1.243554598118700063281496e-25j) + +The point `x = 0` is a singularity (logarithmic if `n = 0`):: + + >>> besselk(0,0) + +inf + >>> besselk(1,0) + +inf + >>> for n in range(-4, 5): + ... print(besselk(n, '1e-1000')) + ... + 4.8e+4001 + 8.0e+3000 + 2.0e+2000 + 1.0e+1000 + 2302.701024509704096466802 + 1.0e+1000 + 2.0e+2000 + 8.0e+3000 + 4.8e+4001 + +""" + +hankel1 = r""" +``hankel1(n,x)`` computes the Hankel function of the first kind, +which is the complex combination of Bessel functions given by + +.. math :: + + H_n^{(1)}(x) = J_n(x) + i Y_n(x). + +**Plots** + +.. literalinclude :: /plots/hankel1.py +.. image :: /plots/hankel1.png +.. literalinclude :: /plots/hankel1_c.py +.. image :: /plots/hankel1_c.png + +**Examples** + +The Hankel function is generally complex-valued:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> hankel1(2, pi) + (0.4854339326315091097054957 - 0.0999007139290278787734903j) + >>> hankel1(3.5, pi) + (0.2340002029630507922628888 - 0.6419643823412927142424049j) +""" + +hankel2 = r""" +``hankel2(n,x)`` computes the Hankel function of the second kind, +which is the complex combination of Bessel functions given by + +.. math :: + + H_n^{(2)}(x) = J_n(x) - i Y_n(x). + +**Plots** + +.. literalinclude :: /plots/hankel2.py +.. image :: /plots/hankel2.png +.. literalinclude :: /plots/hankel2_c.py +.. image :: /plots/hankel2_c.png + +**Examples** + +The Hankel function is generally complex-valued:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> hankel2(2, pi) + (0.4854339326315091097054957 + 0.0999007139290278787734903j) + >>> hankel2(3.5, pi) + (0.2340002029630507922628888 + 0.6419643823412927142424049j) +""" + +lambertw = r""" +The Lambert W function `W(z)` is defined as the inverse function +of `w \exp(w)`. In other words, the value of `W(z)` is such that +`z = W(z) \exp(W(z))` for any complex number `z`. + +The Lambert W function is a multivalued function with infinitely +many branches `W_k(z)`, indexed by `k \in \mathbb{Z}`. Each branch +gives a different solution `w` of the equation `z = w \exp(w)`. +All branches are supported by :func:`~mpmath.lambertw`: + +* ``lambertw(z)`` gives the principal solution (branch 0) + +* ``lambertw(z, k)`` gives the solution on branch `k` + +The Lambert W function has two partially real branches: the +principal branch (`k = 0`) is real for real `z > -1/e`, and the +`k = -1` branch is real for `-1/e < z < 0`. All branches except +`k = 0` have a logarithmic singularity at `z = 0`. + +The definition, implementation and choice of branches +is based on [Corless]_. + +**Plots** + +.. literalinclude :: /plots/lambertw.py +.. image :: /plots/lambertw.png +.. literalinclude :: /plots/lambertw_c.py +.. image :: /plots/lambertw_c.png + +**Basic examples** + +The Lambert W function is the inverse of `w \exp(w)`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> w = lambertw(1) + >>> w + 0.5671432904097838729999687 + >>> w*exp(w) + 1.0 + +Any branch gives a valid inverse:: + + >>> w = lambertw(1, k=3) + >>> w + (-2.853581755409037807206819 + 17.11353553941214591260783j) + >>> w = lambertw(1, k=25) + >>> w + (-5.047020464221569709378686 + 155.4763860949415867162066j) + >>> chop(w*exp(w)) + 1.0 + +**Applications to equation-solving** + +The Lambert W function may be used to solve various kinds of +equations, such as finding the value of the infinite power +tower `z^{z^{z^{\ldots}}}`:: + + >>> def tower(z, n): + ... if n == 0: + ... return z + ... return z ** tower(z, n-1) + ... + >>> tower(mpf(0.5), 100) + 0.6411857445049859844862005 + >>> -lambertw(-log(0.5))/log(0.5) + 0.6411857445049859844862005 + +**Properties** + +The Lambert W function grows roughly like the natural logarithm +for large arguments:: + + >>> lambertw(1000); log(1000) + 5.249602852401596227126056 + 6.907755278982137052053974 + >>> lambertw(10**100); log(10**100) + 224.8431064451185015393731 + 230.2585092994045684017991 + +The principal branch of the Lambert W function has a rational +Taylor series expansion around `z = 0`:: + + >>> nprint(taylor(lambertw, 0, 6), 10) + [0.0, 1.0, -1.0, 1.5, -2.666666667, 5.208333333, -10.8] + +Some special values and limits are:: + + >>> lambertw(0) + 0.0 + >>> lambertw(1) + 0.5671432904097838729999687 + >>> lambertw(e) + 1.0 + >>> lambertw(inf) + +inf + >>> lambertw(0, k=-1) + -inf + >>> lambertw(0, k=3) + -inf + >>> lambertw(inf, k=2) + (+inf + 12.56637061435917295385057j) + >>> lambertw(inf, k=3) + (+inf + 18.84955592153875943077586j) + >>> lambertw(-inf, k=3) + (+inf + 21.9911485751285526692385j) + +The `k = 0` and `k = -1` branches join at `z = -1/e` where +`W(z) = -1` for both branches. Since `-1/e` can only be represented +approximately with binary floating-point numbers, evaluating the +Lambert W function at this point only gives `-1` approximately:: + + >>> lambertw(-1/e, 0) + -0.9999999999998371330228251 + >>> lambertw(-1/e, -1) + -1.000000000000162866977175 + +If `-1/e` happens to round in the negative direction, there might be +a small imaginary part:: + + >>> mp.dps = 15 + >>> lambertw(-1/e) + (-1.0 + 8.22007971483662e-9j) + >>> lambertw(-1/e+eps) + -0.999999966242188 + +**References** + +1. [Corless]_ +""" + +barnesg = r""" +Evaluates the Barnes G-function, which generalizes the +superfactorial (:func:`~mpmath.superfac`) and by extension also the +hyperfactorial (:func:`~mpmath.hyperfac`) to the complex numbers +in an analogous way to how the gamma function generalizes +the ordinary factorial. + +The Barnes G-function may be defined in terms of a Weierstrass +product: + +.. math :: + + G(z+1) = (2\pi)^{z/2} e^{-[z(z+1)+\gamma z^2]/2} + \prod_{n=1}^\infty + \left[\left(1+\frac{z}{n}\right)^ne^{-z+z^2/(2n)}\right] + +For positive integers `n`, we have have relation to superfactorials +`G(n) = \mathrm{sf}(n-2) = 0! \cdot 1! \cdots (n-2)!`. + +**Examples** + +Some elementary values and limits of the Barnes G-function:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> barnesg(1), barnesg(2), barnesg(3) + (1.0, 1.0, 1.0) + >>> barnesg(4) + 2.0 + >>> barnesg(5) + 12.0 + >>> barnesg(6) + 288.0 + >>> barnesg(7) + 34560.0 + >>> barnesg(8) + 24883200.0 + >>> barnesg(inf) + +inf + >>> barnesg(0), barnesg(-1), barnesg(-2) + (0.0, 0.0, 0.0) + +Closed-form values are known for some rational arguments:: + + >>> barnesg('1/2') + 0.603244281209446 + >>> sqrt(exp(0.25+log(2)/12)/sqrt(pi)/glaisher**3) + 0.603244281209446 + >>> barnesg('1/4') + 0.29375596533861 + >>> nthroot(exp('3/8')/exp(catalan/pi)/ + ... gamma(0.25)**3/sqrt(glaisher)**9, 4) + 0.29375596533861 + +The Barnes G-function satisfies the functional equation +`G(z+1) = \Gamma(z) G(z)`:: + + >>> z = pi + >>> barnesg(z+1) + 2.39292119327948 + >>> gamma(z)*barnesg(z) + 2.39292119327948 + +The asymptotic growth rate of the Barnes G-function is related to +the Glaisher-Kinkelin constant:: + + >>> limit(lambda n: barnesg(n+1)/(n**(n**2/2-mpf(1)/12)* + ... (2*pi)**(n/2)*exp(-3*n**2/4)), inf) + 0.847536694177301 + >>> exp('1/12')/glaisher + 0.847536694177301 + +The Barnes G-function can be differentiated in closed form:: + + >>> z = 3 + >>> diff(barnesg, z) + 0.264507203401607 + >>> barnesg(z)*((z-1)*psi(0,z)-z+(log(2*pi)+1)/2) + 0.264507203401607 + +Evaluation is supported for arbitrary arguments and at arbitrary +precision:: + + >>> barnesg(6.5) + 2548.7457695685 + >>> barnesg(-pi) + 0.00535976768353037 + >>> barnesg(3+4j) + (-0.000676375932234244 - 4.42236140124728e-5j) + >>> mp.dps = 50 + >>> barnesg(1/sqrt(2)) + 0.81305501090451340843586085064413533788206204124732 + >>> q = barnesg(10j) + >>> q.real + 0.000000000021852360840356557241543036724799812371995850552234 + >>> q.imag + -0.00000000000070035335320062304849020654215545839053210041457588 + >>> mp.dps = 15 + >>> barnesg(100) + 3.10361006263698e+6626 + >>> barnesg(-101) + 0.0 + >>> barnesg(-10.5) + 5.94463017605008e+25 + >>> barnesg(-10000.5) + -6.14322868174828e+167480422 + >>> barnesg(1000j) + (5.21133054865546e-1173597 + 4.27461836811016e-1173597j) + >>> barnesg(-1000+1000j) + (2.43114569750291e+1026623 + 2.24851410674842e+1026623j) + + +**References** + +1. Whittaker & Watson, *A Course of Modern Analysis*, + Cambridge University Press, 4th edition (1927), p.264 +2. http://en.wikipedia.org/wiki/Barnes_G-function +3. http://mathworld.wolfram.com/BarnesG-Function.html + +""" + +superfac = r""" +Computes the superfactorial, defined as the product of +consecutive factorials + +.. math :: + + \mathrm{sf}(n) = \prod_{k=1}^n k! + +For general complex `z`, `\mathrm{sf}(z)` is defined +in terms of the Barnes G-function (see :func:`~mpmath.barnesg`). + +**Examples** + +The first few superfactorials are (OEIS A000178):: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(10): + ... print("%s %s" % (n, superfac(n))) + ... + 0 1.0 + 1 1.0 + 2 2.0 + 3 12.0 + 4 288.0 + 5 34560.0 + 6 24883200.0 + 7 125411328000.0 + 8 5.05658474496e+15 + 9 1.83493347225108e+21 + +Superfactorials grow very rapidly:: + + >>> superfac(1000) + 3.24570818422368e+1177245 + >>> superfac(10**10) + 2.61398543581249e+467427913956904067453 + +Evaluation is supported for arbitrary arguments:: + + >>> mp.dps = 25 + >>> superfac(pi) + 17.20051550121297985285333 + >>> superfac(2+3j) + (-0.005915485633199789627466468 + 0.008156449464604044948738263j) + >>> diff(superfac, 1) + 0.2645072034016070205673056 + +**References** + +1. http://oeis.org/A000178 + +""" + + +hyperfac = r""" +Computes the hyperfactorial, defined for integers as the product + +.. math :: + + H(n) = \prod_{k=1}^n k^k. + + +The hyperfactorial satisfies the recurrence formula `H(z) = z^z H(z-1)`. +It can be defined more generally in terms of the Barnes G-function (see +:func:`~mpmath.barnesg`) and the gamma function by the formula + +.. math :: + + H(z) = \frac{\Gamma(z+1)^z}{G(z)}. + +The extension to complex numbers can also be done via +the integral representation + +.. math :: + + H(z) = (2\pi)^{-z/2} \exp \left[ + {z+1 \choose 2} + \int_0^z \log(t!)\,dt + \right]. + +**Examples** + +The rapidly-growing sequence of hyperfactorials begins +(OEIS A002109):: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(10): + ... print("%s %s" % (n, hyperfac(n))) + ... + 0 1.0 + 1 1.0 + 2 4.0 + 3 108.0 + 4 27648.0 + 5 86400000.0 + 6 4031078400000.0 + 7 3.3197663987712e+18 + 8 5.56964379417266e+25 + 9 2.15779412229419e+34 + +Some even larger hyperfactorials are:: + + >>> hyperfac(1000) + 5.46458120882585e+1392926 + >>> hyperfac(10**10) + 4.60408207642219e+489142638002418704309 + +The hyperfactorial can be evaluated for arbitrary arguments:: + + >>> hyperfac(0.5) + 0.880449235173423 + >>> diff(hyperfac, 1) + 0.581061466795327 + >>> hyperfac(pi) + 205.211134637462 + >>> hyperfac(-10+1j) + (3.01144471378225e+46 - 2.45285242480185e+46j) + +The recurrence property of the hyperfactorial holds +generally:: + + >>> z = 3-4*j + >>> hyperfac(z) + (-4.49795891462086e-7 - 6.33262283196162e-7j) + >>> z**z * hyperfac(z-1) + (-4.49795891462086e-7 - 6.33262283196162e-7j) + >>> z = mpf(-0.6) + >>> chop(z**z * hyperfac(z-1)) + 1.28170142849352 + >>> hyperfac(z) + 1.28170142849352 + +The hyperfactorial may also be computed using the integral +definition:: + + >>> z = 2.5 + >>> hyperfac(z) + 15.9842119922237 + >>> (2*pi)**(-z/2)*exp(binomial(z+1,2) + + ... quad(lambda t: loggamma(t+1), [0, z])) + 15.9842119922237 + +:func:`~mpmath.hyperfac` supports arbitrary-precision evaluation:: + + >>> mp.dps = 50 + >>> hyperfac(10) + 215779412229418562091680268288000000000000000.0 + >>> hyperfac(1/sqrt(2)) + 0.89404818005227001975423476035729076375705084390942 + +**References** + +1. http://oeis.org/A002109 +2. http://mathworld.wolfram.com/Hyperfactorial.html + +""" + +rgamma = r""" +Computes the reciprocal of the gamma function, `1/\Gamma(z)`. This +function evaluates to zero at the poles +of the gamma function, `z = 0, -1, -2, \ldots`. + +**Examples** + +Basic examples:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> rgamma(1) + 1.0 + >>> rgamma(4) + 0.1666666666666666666666667 + >>> rgamma(0); rgamma(-1) + 0.0 + 0.0 + >>> rgamma(1000) + 2.485168143266784862783596e-2565 + >>> rgamma(inf) + 0.0 + +A definite integral that can be evaluated in terms of elementary +integrals:: + + >>> quad(rgamma, [0,inf]) + 2.807770242028519365221501 + >>> e + quad(lambda t: exp(-t)/(pi**2+log(t)**2), [0,inf]) + 2.807770242028519365221501 +""" + +loggamma = r""" +Computes the principal branch of the log-gamma function, +`\ln \Gamma(z)`. Unlike `\ln(\Gamma(z))`, which has infinitely many +complex branch cuts, the principal log-gamma function only has a single +branch cut along the negative half-axis. The principal branch +continuously matches the asymptotic Stirling expansion + +.. math :: + + \ln \Gamma(z) \sim \frac{\ln(2 \pi)}{2} + + \left(z-\frac{1}{2}\right) \ln(z) - z + O(z^{-1}). + +The real parts of both functions agree, but their imaginary +parts generally differ by `2 n \pi` for some `n \in \mathbb{Z}`. +They coincide for `z \in \mathbb{R}, z > 0`. + +Computationally, it is advantageous to use :func:`~mpmath.loggamma` +instead of :func:`~mpmath.gamma` for extremely large arguments. + +**Examples** + +Comparing with `\ln(\Gamma(z))`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> loggamma('13.2'); log(gamma('13.2')) + 20.49400419456603678498394 + 20.49400419456603678498394 + >>> loggamma(3+4j) + (-1.756626784603784110530604 + 4.742664438034657928194889j) + >>> log(gamma(3+4j)) + (-1.756626784603784110530604 - 1.540520869144928548730397j) + >>> log(gamma(3+4j)) + 2*pi*j + (-1.756626784603784110530604 + 4.742664438034657928194889j) + +Note the imaginary parts for negative arguments:: + + >>> loggamma(-0.5); loggamma(-1.5); loggamma(-2.5) + (1.265512123484645396488946 - 3.141592653589793238462643j) + (0.8600470153764810145109327 - 6.283185307179586476925287j) + (-0.05624371649767405067259453 - 9.42477796076937971538793j) + +Some special values:: + + >>> loggamma(1); loggamma(2) + 0.0 + 0.0 + >>> loggamma(3); +ln2 + 0.6931471805599453094172321 + 0.6931471805599453094172321 + >>> loggamma(3.5); log(15*sqrt(pi)/8) + 1.200973602347074224816022 + 1.200973602347074224816022 + >>> loggamma(inf) + +inf + +Huge arguments are permitted:: + + >>> loggamma('1e30') + 6.807755278982137052053974e+31 + >>> loggamma('1e300') + 6.897755278982137052053974e+302 + >>> loggamma('1e3000') + 6.906755278982137052053974e+3003 + >>> loggamma('1e100000000000000000000') + 2.302585092994045684007991e+100000000000000000020 + >>> loggamma('1e30j') + (-1.570796326794896619231322e+30 + 6.807755278982137052053974e+31j) + >>> loggamma('1e300j') + (-1.570796326794896619231322e+300 + 6.897755278982137052053974e+302j) + >>> loggamma('1e3000j') + (-1.570796326794896619231322e+3000 + 6.906755278982137052053974e+3003j) + +The log-gamma function can be integrated analytically +on any interval of unit length:: + + >>> z = 0 + >>> quad(loggamma, [z,z+1]); log(2*pi)/2 + 0.9189385332046727417803297 + 0.9189385332046727417803297 + >>> z = 3+4j + >>> quad(loggamma, [z,z+1]); (log(z)-1)*z + log(2*pi)/2 + (-0.9619286014994750641314421 + 5.219637303741238195688575j) + (-0.9619286014994750641314421 + 5.219637303741238195688575j) + +The derivatives of the log-gamma function are given by the +polygamma function (:func:`~mpmath.psi`):: + + >>> diff(loggamma, -4+3j); psi(0, -4+3j) + (1.688493531222971393607153 + 2.554898911356806978892748j) + (1.688493531222971393607153 + 2.554898911356806978892748j) + >>> diff(loggamma, -4+3j, 2); psi(1, -4+3j) + (-0.1539414829219882371561038 - 0.1020485197430267719746479j) + (-0.1539414829219882371561038 - 0.1020485197430267719746479j) + +The log-gamma function satisfies an additive form of the +recurrence relation for the ordinary gamma function:: + + >>> z = 2+3j + >>> loggamma(z); loggamma(z+1) - log(z) + (-2.092851753092733349564189 + 2.302396543466867626153708j) + (-2.092851753092733349564189 + 2.302396543466867626153708j) + +""" + +siegeltheta = r""" +Computes the Riemann-Siegel theta function, + +.. math :: + + \theta(t) = \frac{ + \log\Gamma\left(\frac{1+2it}{4}\right) - + \log\Gamma\left(\frac{1-2it}{4}\right) + }{2i} - \frac{\log \pi}{2} t. + +The Riemann-Siegel theta function is important in +providing the phase factor for the Z-function +(see :func:`~mpmath.siegelz`). Evaluation is supported for real and +complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> siegeltheta(0) + 0.0 + >>> siegeltheta(inf) + +inf + >>> siegeltheta(-inf) + -inf + >>> siegeltheta(1) + -1.767547952812290388302216 + >>> siegeltheta(10+0.25j) + (-3.068638039426838572528867 + 0.05804937947429712998395177j) + +Arbitrary derivatives may be computed with derivative = k + + >>> siegeltheta(1234, derivative=2) + 0.0004051864079114053109473741 + >>> diff(siegeltheta, 1234, n=2) + 0.0004051864079114053109473741 + + +The Riemann-Siegel theta function has odd symmetry around `t = 0`, +two local extreme points and three real roots including 0 (located +symmetrically):: + + >>> nprint(chop(taylor(siegeltheta, 0, 5))) + [0.0, -2.68609, 0.0, 2.69433, 0.0, -6.40218] + >>> findroot(diffun(siegeltheta), 7) + 6.28983598883690277966509 + >>> findroot(siegeltheta, 20) + 17.84559954041086081682634 + +For large `t`, there is a famous asymptotic formula +for `\theta(t)`, to first order given by:: + + >>> t = mpf(10**6) + >>> siegeltheta(t) + 5488816.353078403444882823 + >>> -t*log(2*pi/t)/2-t/2 + 5488816.745777464310273645 +""" + +grampoint = r""" +Gives the `n`-th Gram point `g_n`, defined as the solution +to the equation `\theta(g_n) = \pi n` where `\theta(t)` +is the Riemann-Siegel theta function (:func:`~mpmath.siegeltheta`). + +The first few Gram points are:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> grampoint(0) + 17.84559954041086081682634 + >>> grampoint(1) + 23.17028270124630927899664 + >>> grampoint(2) + 27.67018221781633796093849 + >>> grampoint(3) + 31.71797995476405317955149 + +Checking the definition:: + + >>> siegeltheta(grampoint(3)) + 9.42477796076937971538793 + >>> 3*pi + 9.42477796076937971538793 + +A large Gram point:: + + >>> grampoint(10**10) + 3293531632.728335454561153 + +Gram points are useful when studying the Z-function +(:func:`~mpmath.siegelz`). See the documentation of that function +for additional examples. + +:func:`~mpmath.grampoint` can solve the defining equation for +nonintegral `n`. There is a fixed point where `g(x) = x`:: + + >>> findroot(lambda x: grampoint(x) - x, 10000) + 9146.698193171459265866198 + +**References** + +1. http://mathworld.wolfram.com/GramPoint.html + +""" + +siegelz = r""" +Computes the Z-function, also known as the Riemann-Siegel Z function, + +.. math :: + + Z(t) = e^{i \theta(t)} \zeta(1/2+it) + +where `\zeta(s)` is the Riemann zeta function (:func:`~mpmath.zeta`) +and where `\theta(t)` denotes the Riemann-Siegel theta function +(see :func:`~mpmath.siegeltheta`). + +Evaluation is supported for real and complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> siegelz(1) + -0.7363054628673177346778998 + >>> siegelz(3+4j) + (-0.1852895764366314976003936 - 0.2773099198055652246992479j) + +The first four derivatives are supported, using the +optional *derivative* keyword argument:: + + >>> siegelz(1234567, derivative=3) + 56.89689348495089294249178 + >>> diff(siegelz, 1234567, n=3) + 56.89689348495089294249178 + + +The Z-function has a Maclaurin expansion:: + + >>> nprint(chop(taylor(siegelz, 0, 4))) + [-1.46035, 0.0, 2.73588, 0.0, -8.39357] + +The Z-function `Z(t)` is equal to `\pm |\zeta(s)|` on the +critical line `s = 1/2+it` (i.e. for real arguments `t` +to `Z`). Its zeros coincide with those of the Riemann zeta +function:: + + >>> findroot(siegelz, 14) + 14.13472514173469379045725 + >>> findroot(siegelz, 20) + 21.02203963877155499262848 + >>> findroot(zeta, 0.5+14j) + (0.5 + 14.13472514173469379045725j) + >>> findroot(zeta, 0.5+20j) + (0.5 + 21.02203963877155499262848j) + +Since the Z-function is real-valued on the critical line +(and unlike `|\zeta(s)|` analytic), it is useful for +investigating the zeros of the Riemann zeta function. +For example, one can use a root-finding algorithm based +on sign changes:: + + >>> findroot(siegelz, [100, 200], solver='bisect') + 176.4414342977104188888926 + +To locate roots, Gram points `g_n` which can be computed +by :func:`~mpmath.grampoint` are useful. If `(-1)^n Z(g_n)` is +positive for two consecutive `n`, then `Z(t)` must have +a zero between those points:: + + >>> g10 = grampoint(10) + >>> g11 = grampoint(11) + >>> (-1)**10 * siegelz(g10) > 0 + True + >>> (-1)**11 * siegelz(g11) > 0 + True + >>> findroot(siegelz, [g10, g11], solver='bisect') + 56.44624769706339480436776 + >>> g10, g11 + (54.67523744685325626632663, 57.54516517954725443703014) + +""" + +riemannr = r""" +Evaluates the Riemann R function, a smooth approximation of the +prime counting function `\pi(x)` (see :func:`~mpmath.primepi`). The Riemann +R function gives a fast numerical approximation useful e.g. to +roughly estimate the number of primes in a given interval. + +The Riemann R function is computed using the rapidly convergent Gram +series, + +.. math :: + + R(x) = 1 + \sum_{k=1}^{\infty} + \frac{\log^k x}{k k! \zeta(k+1)}. + +From the Gram series, one sees that the Riemann R function is a +well-defined analytic function (except for a branch cut along +the negative real half-axis); it can be evaluated for arbitrary +real or complex arguments. + +The Riemann R function gives a very accurate approximation +of the prime counting function. For example, it is wrong by at +most 2 for `x < 1000`, and for `x = 10^9` differs from the exact +value of `\pi(x)` by 79, or less than two parts in a million. +It is about 10 times more accurate than the logarithmic integral +estimate (see :func:`~mpmath.li`), which however is even faster to evaluate. +It is orders of magnitude more accurate than the extremely +fast `x/\log x` estimate. + +**Examples** + +For small arguments, the Riemann R function almost exactly +gives the prime counting function if rounded to the nearest +integer:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> primepi(50), riemannr(50) + (15, 14.9757023241462) + >>> max(abs(primepi(n)-int(round(riemannr(n)))) for n in range(100)) + 1 + >>> max(abs(primepi(n)-int(round(riemannr(n)))) for n in range(300)) + 2 + +The Riemann R function can be evaluated for arguments far too large +for exact determination of `\pi(x)` to be computationally +feasible with any presently known algorithm:: + + >>> riemannr(10**30) + 1.46923988977204e+28 + >>> riemannr(10**100) + 4.3619719871407e+97 + >>> riemannr(10**1000) + 4.3448325764012e+996 + +A comparison of the Riemann R function and logarithmic integral estimates +for `\pi(x)` using exact values of `\pi(10^n)` up to `n = 9`. +The fractional error is shown in parentheses:: + + >>> exact = [4,25,168,1229,9592,78498,664579,5761455,50847534] + >>> for n, p in enumerate(exact): + ... n += 1 + ... r, l = riemannr(10**n), li(10**n) + ... rerr, lerr = nstr((r-p)/p,3), nstr((l-p)/p,3) + ... print("%i %i %s(%s) %s(%s)" % (n, p, r, rerr, l, lerr)) + ... + 1 4 4.56458314100509(0.141) 6.1655995047873(0.541) + 2 25 25.6616332669242(0.0265) 30.1261415840796(0.205) + 3 168 168.359446281167(0.00214) 177.609657990152(0.0572) + 4 1229 1226.93121834343(-0.00168) 1246.13721589939(0.0139) + 5 9592 9587.43173884197(-0.000476) 9629.8090010508(0.00394) + 6 78498 78527.3994291277(0.000375) 78627.5491594622(0.00165) + 7 664579 664667.447564748(0.000133) 664918.405048569(0.000511) + 8 5761455 5761551.86732017(1.68e-5) 5762209.37544803(0.000131) + 9 50847534 50847455.4277214(-1.55e-6) 50849234.9570018(3.35e-5) + +The derivative of the Riemann R function gives the approximate +probability for a number of magnitude `x` to be prime:: + + >>> diff(riemannr, 1000) + 0.141903028110784 + >>> mpf(primepi(1050) - primepi(950)) / 100 + 0.15 + +Evaluation is supported for arbitrary arguments and at arbitrary +precision:: + + >>> mp.dps = 30 + >>> riemannr(7.5) + 3.72934743264966261918857135136 + >>> riemannr(-4+2j) + (-0.551002208155486427591793957644 + 2.16966398138119450043195899746j) + +""" + +primepi = r""" +Evaluates the prime counting function, `\pi(x)`, which gives +the number of primes less than or equal to `x`. The argument +`x` may be fractional. + +The prime counting function is very expensive to evaluate +precisely for large `x`, and the present implementation is +not optimized in any way. For numerical approximation of the +prime counting function, it is better to use :func:`~mpmath.primepi2` +or :func:`~mpmath.riemannr`. + +Some values of the prime counting function:: + + >>> from mpmath import * + >>> [primepi(k) for k in range(20)] + [0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 8] + >>> primepi(3.5) + 2 + >>> primepi(100000) + 9592 + +""" + +primepi2 = r""" +Returns an interval (as an ``mpi`` instance) providing bounds +for the value of the prime counting function `\pi(x)`. For small +`x`, :func:`~mpmath.primepi2` returns an exact interval based on +the output of :func:`~mpmath.primepi`. For `x > 2656`, a loose interval +based on Schoenfeld's inequality + +.. math :: + + |\pi(x) - \mathrm{li}(x)| < \frac{\sqrt x \log x}{8 \pi} + +is returned. This estimate is rigorous assuming the truth of +the Riemann hypothesis, and can be computed very quickly. + +**Examples** + +Exact values of the prime counting function for small `x`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> iv.dps = 15; iv.pretty = True + >>> primepi2(10) + [4.0, 4.0] + >>> primepi2(100) + [25.0, 25.0] + >>> primepi2(1000) + [168.0, 168.0] + +Loose intervals are generated for moderately large `x`: + + >>> primepi2(10000), primepi(10000) + ([1209.0, 1283.0], 1229) + >>> primepi2(50000), primepi(50000) + ([5070.0, 5263.0], 5133) + +As `x` increases, the absolute error gets worse while the relative +error improves. The exact value of `\pi(10^{23})` is +1925320391606803968923, and :func:`~mpmath.primepi2` gives 9 significant +digits:: + + >>> p = primepi2(10**23) + >>> p + [1.9253203909477020467e+21, 1.925320392280406229e+21] + >>> mpf(p.delta) / mpf(p.a) + 6.9219865355293e-10 + +A more precise, nonrigorous estimate for `\pi(x)` can be +obtained using the Riemann R function (:func:`~mpmath.riemannr`). +For large enough `x`, the value returned by :func:`~mpmath.primepi2` +essentially amounts to a small perturbation of the value returned by +:func:`~mpmath.riemannr`:: + + >>> primepi2(10**100) + [4.3619719871407024816e+97, 4.3619719871407032404e+97] + >>> riemannr(10**100) + 4.3619719871407e+97 +""" + +primezeta = r""" +Computes the prime zeta function, which is defined +in analogy with the Riemann zeta function (:func:`~mpmath.zeta`) +as + +.. math :: + + P(s) = \sum_p \frac{1}{p^s} + +where the sum is taken over all prime numbers `p`. Although +this sum only converges for `\mathrm{Re}(s) > 1`, the +function is defined by analytic continuation in the +half-plane `\mathrm{Re}(s) > 0`. + +**Examples** + +Arbitrary-precision evaluation for real and complex arguments is +supported:: + + >>> from mpmath import * + >>> mp.dps = 30; mp.pretty = True + >>> primezeta(2) + 0.452247420041065498506543364832 + >>> primezeta(pi) + 0.15483752698840284272036497397 + >>> mp.dps = 50 + >>> primezeta(3) + 0.17476263929944353642311331466570670097541212192615 + >>> mp.dps = 20 + >>> primezeta(3+4j) + (-0.12085382601645763295 - 0.013370403397787023602j) + +The prime zeta function has a logarithmic pole at `s = 1`, +with residue equal to the difference of the Mertens and +Euler constants:: + + >>> primezeta(1) + +inf + >>> extradps(25)(lambda x: primezeta(1+x)+log(x))(+eps) + -0.31571845205389007685 + >>> mertens-euler + -0.31571845205389007685 + +The analytic continuation to `0 < \mathrm{Re}(s) \le 1` +is implemented. In this strip the function exhibits +very complex behavior; on the unit interval, it has poles at +`1/n` for every squarefree integer `n`:: + + >>> primezeta(0.5) # Pole at s = 1/2 + (-inf + 3.1415926535897932385j) + >>> primezeta(0.25) + (-1.0416106801757269036 + 0.52359877559829887308j) + >>> primezeta(0.5+10j) + (0.54892423556409790529 + 0.45626803423487934264j) + +Although evaluation works in principle for any `\mathrm{Re}(s) > 0`, +it should be noted that the evaluation time increases exponentially +as `s` approaches the imaginary axis. + +For large `\mathrm{Re}(s)`, `P(s)` is asymptotic to `2^{-s}`:: + + >>> primezeta(inf) + 0.0 + >>> primezeta(10), mpf(2)**-10 + (0.00099360357443698021786, 0.0009765625) + >>> primezeta(1000) + 9.3326361850321887899e-302 + >>> primezeta(1000+1000j) + (-3.8565440833654995949e-302 - 8.4985390447553234305e-302j) + +**References** + +Carl-Erik Froberg, "On the prime zeta function", +BIT 8 (1968), pp. 187-202. + +""" + +bernpoly = r""" +Evaluates the Bernoulli polynomial `B_n(z)`. + +The first few Bernoulli polynomials are:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(6): + ... nprint(chop(taylor(lambda x: bernpoly(n,x), 0, n))) + ... + [1.0] + [-0.5, 1.0] + [0.166667, -1.0, 1.0] + [0.0, 0.5, -1.5, 1.0] + [-0.0333333, 0.0, 1.0, -2.0, 1.0] + [0.0, -0.166667, 0.0, 1.66667, -2.5, 1.0] + +At `z = 0`, the Bernoulli polynomial evaluates to a +Bernoulli number (see :func:`~mpmath.bernoulli`):: + + >>> bernpoly(12, 0), bernoulli(12) + (-0.253113553113553, -0.253113553113553) + >>> bernpoly(13, 0), bernoulli(13) + (0.0, 0.0) + +Evaluation is accurate for large `n` and small `z`:: + + >>> mp.dps = 25 + >>> bernpoly(100, 0.5) + 2.838224957069370695926416e+78 + >>> bernpoly(1000, 10.5) + 5.318704469415522036482914e+1769 + +""" + +polylog = r""" +Computes the polylogarithm, defined by the sum + +.. math :: + + \mathrm{Li}_s(z) = \sum_{k=1}^{\infty} \frac{z^k}{k^s}. + +This series is convergent only for `|z| < 1`, so elsewhere +the analytic continuation is implied. + +The polylogarithm should not be confused with the logarithmic +integral (also denoted by Li or li), which is implemented +as :func:`~mpmath.li`. + +**Examples** + +The polylogarithm satisfies a huge number of functional identities. +A sample of polylogarithm evaluations is shown below:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> polylog(1,0.5), log(2) + (0.693147180559945, 0.693147180559945) + >>> polylog(2,0.5), (pi**2-6*log(2)**2)/12 + (0.582240526465012, 0.582240526465012) + >>> polylog(2,-phi), -log(phi)**2-pi**2/10 + (-1.21852526068613, -1.21852526068613) + >>> polylog(3,0.5), 7*zeta(3)/8-pi**2*log(2)/12+log(2)**3/6 + (0.53721319360804, 0.53721319360804) + +:func:`~mpmath.polylog` can evaluate the analytic continuation of the +polylogarithm when `s` is an integer:: + + >>> polylog(2, 10) + (0.536301287357863 - 7.23378441241546j) + >>> polylog(2, -10) + -4.1982778868581 + >>> polylog(2, 10j) + (-3.05968879432873 + 3.71678149306807j) + >>> polylog(-2, 10) + -0.150891632373114 + >>> polylog(-2, -10) + 0.067618332081142 + >>> polylog(-2, 10j) + (0.0384353698579347 + 0.0912451798066779j) + +Some more examples, with arguments on the unit circle (note that +the series definition cannot be used for computation here):: + + >>> polylog(2,j) + (-0.205616758356028 + 0.915965594177219j) + >>> j*catalan-pi**2/48 + (-0.205616758356028 + 0.915965594177219j) + >>> polylog(3,exp(2*pi*j/3)) + (-0.534247512515375 + 0.765587078525922j) + >>> -4*zeta(3)/9 + 2*j*pi**3/81 + (-0.534247512515375 + 0.765587078525921j) + +Polylogarithms of different order are related by integration +and differentiation:: + + >>> s, z = 3, 0.5 + >>> polylog(s+1, z) + 0.517479061673899 + >>> quad(lambda t: polylog(s,t)/t, [0, z]) + 0.517479061673899 + >>> z*diff(lambda t: polylog(s+2,t), z) + 0.517479061673899 + +Taylor series expansions around `z = 0` are:: + + >>> for n in range(-3, 4): + ... nprint(taylor(lambda x: polylog(n,x), 0, 5)) + ... + [0.0, 1.0, 8.0, 27.0, 64.0, 125.0] + [0.0, 1.0, 4.0, 9.0, 16.0, 25.0] + [0.0, 1.0, 2.0, 3.0, 4.0, 5.0] + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0] + [0.0, 1.0, 0.5, 0.333333, 0.25, 0.2] + [0.0, 1.0, 0.25, 0.111111, 0.0625, 0.04] + [0.0, 1.0, 0.125, 0.037037, 0.015625, 0.008] + +The series defining the polylogarithm is simultaneously +a Taylor series and an L-series. For certain values of `z`, the +polylogarithm reduces to a pure zeta function:: + + >>> polylog(pi, 1), zeta(pi) + (1.17624173838258, 1.17624173838258) + >>> polylog(pi, -1), -altzeta(pi) + (-0.909670702980385, -0.909670702980385) + +Evaluation for arbitrary, nonintegral `s` is supported +for `z` within the unit circle: + + >>> polylog(3+4j, 0.25) + (0.24258605789446 - 0.00222938275488344j) + >>> nsum(lambda k: 0.25**k / k**(3+4j), [1,inf]) + (0.24258605789446 - 0.00222938275488344j) + +It is also supported outside of the unit circle:: + + >>> polylog(1+j, 20+40j) + (-7.1421172179728 - 3.92726697721369j) + >>> polylog(1+j, 200+400j) + (-5.41934747194626 - 9.94037752563927j) + +**References** + +1. Richard Crandall, "Note on fast polylogarithm computation" + http://www.reed.edu/physics/faculty/crandall/papers/Polylog.pdf +2. http://en.wikipedia.org/wiki/Polylogarithm +3. http://mathworld.wolfram.com/Polylogarithm.html + +""" + +bell = r""" +For `n` a nonnegative integer, ``bell(n,x)`` evaluates the Bell +polynomial `B_n(x)`, the first few of which are + +.. math :: + + B_0(x) = 1 + + B_1(x) = x + + B_2(x) = x^2+x + + B_3(x) = x^3+3x^2+x + +If `x = 1` or :func:`~mpmath.bell` is called with only one argument, it +gives the `n`-th Bell number `B_n`, which is the number of +partitions of a set with `n` elements. By setting the precision to +at least `\log_{10} B_n` digits, :func:`~mpmath.bell` provides fast +calculation of exact Bell numbers. + +In general, :func:`~mpmath.bell` computes + +.. math :: + + B_n(x) = e^{-x} \left(\mathrm{sinc}(\pi n) + E_n(x)\right) + +where `E_n(x)` is the generalized exponential function implemented +by :func:`~mpmath.polyexp`. This is an extension of Dobinski's formula [1], +where the modification is the sinc term ensuring that `B_n(x)` is +continuous in `n`; :func:`~mpmath.bell` can thus be evaluated, +differentiated, etc for arbitrary complex arguments. + +**Examples** + +Simple evaluations:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> bell(0, 2.5) + 1.0 + >>> bell(1, 2.5) + 2.5 + >>> bell(2, 2.5) + 8.75 + +Evaluation for arbitrary complex arguments:: + + >>> bell(5.75+1j, 2-3j) + (-10767.71345136587098445143 - 15449.55065599872579097221j) + +The first few Bell polynomials:: + + >>> for k in range(7): + ... nprint(taylor(lambda x: bell(k,x), 0, k)) + ... + [1.0] + [0.0, 1.0] + [0.0, 1.0, 1.0] + [0.0, 1.0, 3.0, 1.0] + [0.0, 1.0, 7.0, 6.0, 1.0] + [0.0, 1.0, 15.0, 25.0, 10.0, 1.0] + [0.0, 1.0, 31.0, 90.0, 65.0, 15.0, 1.0] + +The first few Bell numbers and complementary Bell numbers:: + + >>> [int(bell(k)) for k in range(10)] + [1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147] + >>> [int(bell(k,-1)) for k in range(10)] + [1, -1, 0, 1, 1, -2, -9, -9, 50, 267] + +Large Bell numbers:: + + >>> mp.dps = 50 + >>> bell(50) + 185724268771078270438257767181908917499221852770.0 + >>> bell(50,-1) + -29113173035759403920216141265491160286912.0 + +Some even larger values:: + + >>> mp.dps = 25 + >>> bell(1000,-1) + -1.237132026969293954162816e+1869 + >>> bell(1000) + 2.989901335682408421480422e+1927 + >>> bell(1000,2) + 6.591553486811969380442171e+1987 + >>> bell(1000,100.5) + 9.101014101401543575679639e+2529 + +A determinant identity satisfied by Bell numbers:: + + >>> mp.dps = 15 + >>> N = 8 + >>> det([[bell(k+j) for j in range(N)] for k in range(N)]) + 125411328000.0 + >>> superfac(N-1) + 125411328000.0 + +**References** + +1. http://mathworld.wolfram.com/DobinskisFormula.html + +""" + +polyexp = r""" +Evaluates the polyexponential function, defined for arbitrary +complex `s`, `z` by the series + +.. math :: + + E_s(z) = \sum_{k=1}^{\infty} \frac{k^s}{k!} z^k. + +`E_s(z)` is constructed from the exponential function analogously +to how the polylogarithm is constructed from the ordinary +logarithm; as a function of `s` (with `z` fixed), `E_s` is an L-series +It is an entire function of both `s` and `z`. + +The polyexponential function provides a generalization of the +Bell polynomials `B_n(x)` (see :func:`~mpmath.bell`) to noninteger orders `n`. +In terms of the Bell polynomials, + +.. math :: + + E_s(z) = e^z B_s(z) - \mathrm{sinc}(\pi s). + +Note that `B_n(x)` and `e^{-x} E_n(x)` are identical if `n` +is a nonzero integer, but not otherwise. In particular, they differ +at `n = 0`. + +**Examples** + +Evaluating a series:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> nsum(lambda k: sqrt(k)/fac(k), [1,inf]) + 2.101755547733791780315904 + >>> polyexp(0.5,1) + 2.101755547733791780315904 + +Evaluation for arbitrary arguments:: + + >>> polyexp(-3-4j, 2.5+2j) + (2.351660261190434618268706 + 1.202966666673054671364215j) + +Evaluation is accurate for tiny function values:: + + >>> polyexp(4, -100) + 3.499471750566824369520223e-36 + +If `n` is a nonpositive integer, `E_n` reduces to a special +instance of the hypergeometric function `\,_pF_q`:: + + >>> n = 3 + >>> x = pi + >>> polyexp(-n,x) + 4.042192318847986561771779 + >>> x*hyper([1]*(n+1), [2]*(n+1), x) + 4.042192318847986561771779 + +""" + +cyclotomic = r""" +Evaluates the cyclotomic polynomial `\Phi_n(x)`, defined by + +.. math :: + + \Phi_n(x) = \prod_{\zeta} (x - \zeta) + +where `\zeta` ranges over all primitive `n`-th roots of unity +(see :func:`~mpmath.unitroots`). An equivalent representation, used +for computation, is + +.. math :: + + \Phi_n(x) = \prod_{d\mid n}(x^d-1)^{\mu(n/d)} = \Phi_n(x) + +where `\mu(m)` denotes the Moebius function. The cyclotomic +polynomials are integer polynomials, the first of which can be +written explicitly as + +.. math :: + + \Phi_0(x) = 1 + + \Phi_1(x) = x - 1 + + \Phi_2(x) = x + 1 + + \Phi_3(x) = x^3 + x^2 + 1 + + \Phi_4(x) = x^2 + 1 + + \Phi_5(x) = x^4 + x^3 + x^2 + x + 1 + + \Phi_6(x) = x^2 - x + 1 + +**Examples** + +The coefficients of low-order cyclotomic polynomials can be recovered +using Taylor expansion:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(9): + ... p = chop(taylor(lambda x: cyclotomic(n,x), 0, 10)) + ... print("%s %s" % (n, nstr(p[:10+1-p[::-1].index(1)]))) + ... + 0 [1.0] + 1 [-1.0, 1.0] + 2 [1.0, 1.0] + 3 [1.0, 1.0, 1.0] + 4 [1.0, 0.0, 1.0] + 5 [1.0, 1.0, 1.0, 1.0, 1.0] + 6 [1.0, -1.0, 1.0] + 7 [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + 8 [1.0, 0.0, 0.0, 0.0, 1.0] + +The definition as a product over primitive roots may be checked +by computing the product explicitly (for a real argument, this +method will generally introduce numerical noise in the imaginary +part):: + + >>> mp.dps = 25 + >>> z = 3+4j + >>> cyclotomic(10, z) + (-419.0 - 360.0j) + >>> fprod(z-r for r in unitroots(10, primitive=True)) + (-419.0 - 360.0j) + >>> z = 3 + >>> cyclotomic(10, z) + 61.0 + >>> fprod(z-r for r in unitroots(10, primitive=True)) + (61.0 - 3.146045605088568607055454e-25j) + +Up to permutation, the roots of a given cyclotomic polynomial +can be checked to agree with the list of primitive roots:: + + >>> p = taylor(lambda x: cyclotomic(6,x), 0, 6)[:3] + >>> for r in polyroots(p[::-1]): + ... print(r) + ... + (0.5 - 0.8660254037844386467637232j) + (0.5 + 0.8660254037844386467637232j) + >>> + >>> for r in unitroots(6, primitive=True): + ... print(r) + ... + (0.5 + 0.8660254037844386467637232j) + (0.5 - 0.8660254037844386467637232j) + +""" + +meijerg = r""" +Evaluates the Meijer G-function, defined as + +.. math :: + + G^{m,n}_{p,q} \left( \left. \begin{matrix} + a_1, \dots, a_n ; a_{n+1} \dots a_p \\ + b_1, \dots, b_m ; b_{m+1} \dots b_q + \end{matrix}\; \right| \; z ; r \right) = + \frac{1}{2 \pi i} \int_L + \frac{\prod_{j=1}^m \Gamma(b_j+s) \prod_{j=1}^n\Gamma(1-a_j-s)} + {\prod_{j=n+1}^{p}\Gamma(a_j+s) \prod_{j=m+1}^q \Gamma(1-b_j-s)} + z^{-s/r} ds + +for an appropriate choice of the contour `L` (see references). + +There are `p` elements `a_j`. +The argument *a_s* should be a pair of lists, the first containing the +`n` elements `a_1, \ldots, a_n` and the second containing +the `p-n` elements `a_{n+1}, \ldots a_p`. + +There are `q` elements `b_j`. +The argument *b_s* should be a pair of lists, the first containing the +`m` elements `b_1, \ldots, b_m` and the second containing +the `q-m` elements `b_{m+1}, \ldots b_q`. + +The implicit tuple `(m, n, p, q)` constitutes the order or degree of the +Meijer G-function, and is determined by the lengths of the coefficient +vectors. Confusingly, the indices in this tuple appear in a different order +from the coefficients, but this notation is standard. The many examples +given below should hopefully clear up any potential confusion. + +**Algorithm** + +The Meijer G-function is evaluated as a combination of hypergeometric series. +There are two versions of the function, which can be selected with +the optional *series* argument. + +*series=1* uses a sum of `m` `\,_pF_{q-1}` functions of `z` + +*series=2* uses a sum of `n` `\,_qF_{p-1}` functions of `1/z` + +The default series is chosen based on the degree and `|z|` in order +to be consistent with Mathematica's. This definition of the Meijer G-function +has a discontinuity at `|z| = 1` for some orders, which can +be avoided by explicitly specifying a series. + +Keyword arguments are forwarded to :func:`~mpmath.hypercomb`. + +**Examples** + +Many standard functions are special cases of the Meijer G-function +(possibly rescaled and/or with branch cut corrections). We define +some test parameters:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> a = mpf(0.75) + >>> b = mpf(1.5) + >>> z = mpf(2.25) + +The exponential function: +`e^z = G^{1,0}_{0,1} \left( \left. \begin{matrix} - \\ 0 \end{matrix} \; +\right| \; -z \right)` + + >>> meijerg([[],[]], [[0],[]], -z) + 9.487735836358525720550369 + >>> exp(z) + 9.487735836358525720550369 + +The natural logarithm: +`\log(1+z) = G^{1,2}_{2,2} \left( \left. \begin{matrix} 1, 1 \\ 1, 0 +\end{matrix} \; \right| \; -z \right)` + + >>> meijerg([[1,1],[]], [[1],[0]], z) + 1.178654996341646117219023 + >>> log(1+z) + 1.178654996341646117219023 + +A rational function: +`\frac{z}{z+1} = G^{1,2}_{2,2} \left( \left. \begin{matrix} 1, 1 \\ 1, 1 +\end{matrix} \; \right| \; z \right)` + + >>> meijerg([[1,1],[]], [[1],[1]], z) + 0.6923076923076923076923077 + >>> z/(z+1) + 0.6923076923076923076923077 + +The sine and cosine functions: + +`\frac{1}{\sqrt \pi} \sin(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. \begin{matrix} +- \\ \frac{1}{2}, 0 \end{matrix} \; \right| \; z \right)` + +`\frac{1}{\sqrt \pi} \cos(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. \begin{matrix} +- \\ 0, \frac{1}{2} \end{matrix} \; \right| \; z \right)` + + >>> meijerg([[],[]], [[0.5],[0]], (z/2)**2) + 0.4389807929218676682296453 + >>> sin(z)/sqrt(pi) + 0.4389807929218676682296453 + >>> meijerg([[],[]], [[0],[0.5]], (z/2)**2) + -0.3544090145996275423331762 + >>> cos(z)/sqrt(pi) + -0.3544090145996275423331762 + +Bessel functions: + +`J_a(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. +\begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2} +\end{matrix} \; \right| \; z \right)` + +`Y_a(2 \sqrt z) = G^{2,0}_{1,3} \left( \left. +\begin{matrix} \frac{-a-1}{2} \\ \frac{a}{2}, -\frac{a}{2}, \frac{-a-1}{2} +\end{matrix} \; \right| \; z \right)` + +`(-z)^{a/2} z^{-a/2} I_a(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. +\begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2} +\end{matrix} \; \right| \; -z \right)` + +`2 K_a(2 \sqrt z) = G^{2,0}_{0,2} \left( \left. +\begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2} +\end{matrix} \; \right| \; z \right)` + +As the example with the Bessel *I* function shows, a branch +factor is required for some arguments when inverting the square root. + + >>> meijerg([[],[]], [[a/2],[-a/2]], (z/2)**2) + 0.5059425789597154858527264 + >>> besselj(a,z) + 0.5059425789597154858527264 + >>> meijerg([[],[(-a-1)/2]], [[a/2,-a/2],[(-a-1)/2]], (z/2)**2) + 0.1853868950066556941442559 + >>> bessely(a, z) + 0.1853868950066556941442559 + >>> meijerg([[],[]], [[a/2],[-a/2]], -(z/2)**2) + (0.8685913322427653875717476 + 2.096964974460199200551738j) + >>> (-z)**(a/2) / z**(a/2) * besseli(a, z) + (0.8685913322427653875717476 + 2.096964974460199200551738j) + >>> 0.5*meijerg([[],[]], [[a/2,-a/2],[]], (z/2)**2) + 0.09334163695597828403796071 + >>> besselk(a,z) + 0.09334163695597828403796071 + +Error functions: + +`\sqrt{\pi} z^{2(a-1)} \mathrm{erfc}(z) = G^{2,0}_{1,2} \left( \left. +\begin{matrix} a \\ a-1, a-\frac{1}{2} +\end{matrix} \; \right| \; z, \frac{1}{2} \right)` + + >>> meijerg([[],[a]], [[a-1,a-0.5],[]], z, 0.5) + 0.00172839843123091957468712 + >>> sqrt(pi) * z**(2*a-2) * erfc(z) + 0.00172839843123091957468712 + +A Meijer G-function of higher degree, (1,1,2,3): + + >>> meijerg([[a],[b]], [[a],[b,a-1]], z) + 1.55984467443050210115617 + >>> sin((b-a)*pi)/pi*(exp(z)-1)*z**(a-1) + 1.55984467443050210115617 + +A Meijer G-function of still higher degree, (4,1,2,4), that can +be expanded as a messy combination of exponential integrals: + + >>> meijerg([[a],[2*b-a]], [[b,a,b-0.5,-1-a+2*b],[]], z) + 0.3323667133658557271898061 + >>> chop(4**(a-b+1)*sqrt(pi)*gamma(2*b-2*a)*z**a*\ + ... expint(2*b-2*a, -2*sqrt(-z))*expint(2*b-2*a, 2*sqrt(-z))) + 0.3323667133658557271898061 + +In the following case, different series give different values:: + + >>> chop(meijerg([[1],[0.25]],[[3],[0.5]],-2)) + -0.06417628097442437076207337 + >>> meijerg([[1],[0.25]],[[3],[0.5]],-2,series=1) + 0.1428699426155117511873047 + >>> chop(meijerg([[1],[0.25]],[[3],[0.5]],-2,series=2)) + -0.06417628097442437076207337 + +**References** + +1. http://en.wikipedia.org/wiki/Meijer_G-function + +2. http://mathworld.wolfram.com/MeijerG-Function.html + +3. http://functions.wolfram.com/HypergeometricFunctions/MeijerG/ + +4. http://functions.wolfram.com/HypergeometricFunctions/MeijerG1/ + +""" + +clsin = r""" +Computes the Clausen sine function, defined formally by the series + +.. math :: + + \mathrm{Cl}_s(z) = \sum_{k=1}^{\infty} \frac{\sin(kz)}{k^s}. + +The special case `\mathrm{Cl}_2(z)` (i.e. ``clsin(2,z)``) is the classical +"Clausen function". More generally, the Clausen function is defined for +complex `s` and `z`, even when the series does not converge. The +Clausen function is related to the polylogarithm (:func:`~mpmath.polylog`) as + +.. math :: + + \mathrm{Cl}_s(z) = \frac{1}{2i}\left(\mathrm{Li}_s\left(e^{iz}\right) - + \mathrm{Li}_s\left(e^{-iz}\right)\right) + + = \mathrm{Im}\left[\mathrm{Li}_s(e^{iz})\right] \quad (s, z \in \mathbb{R}), + +and this representation can be taken to provide the analytic continuation of the +series. The complementary function :func:`~mpmath.clcos` gives the corresponding +cosine sum. + +**Examples** + +Evaluation for arbitrarily chosen `s` and `z`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> s, z = 3, 4 + >>> clsin(s, z); nsum(lambda k: sin(z*k)/k**s, [1,inf]) + -0.6533010136329338746275795 + -0.6533010136329338746275795 + +Using `z + \pi` instead of `z` gives an alternating series:: + + >>> clsin(s, z+pi) + 0.8860032351260589402871624 + >>> nsum(lambda k: (-1)**k*sin(z*k)/k**s, [1,inf]) + 0.8860032351260589402871624 + +With `s = 1`, the sum can be expressed in closed form +using elementary functions:: + + >>> z = 1 + sqrt(3) + >>> clsin(1, z) + 0.2047709230104579724675985 + >>> chop((log(1-exp(-j*z)) - log(1-exp(j*z)))/(2*j)) + 0.2047709230104579724675985 + >>> nsum(lambda k: sin(k*z)/k, [1,inf]) + 0.2047709230104579724675985 + +The classical Clausen function `\mathrm{Cl}_2(\theta)` gives the +value of the integral `\int_0^{\theta} -\ln(2\sin(x/2)) dx` for +`0 < \theta < 2 \pi`:: + + >>> cl2 = lambda t: clsin(2, t) + >>> cl2(3.5) + -0.2465045302347694216534255 + >>> -quad(lambda x: ln(2*sin(0.5*x)), [0, 3.5]) + -0.2465045302347694216534255 + +This function is symmetric about `\theta = \pi` with zeros and extreme +points:: + + >>> cl2(0); cl2(pi/3); chop(cl2(pi)); cl2(5*pi/3); chop(cl2(2*pi)) + 0.0 + 1.014941606409653625021203 + 0.0 + -1.014941606409653625021203 + 0.0 + +Catalan's constant is a special value:: + + >>> cl2(pi/2) + 0.9159655941772190150546035 + >>> +catalan + 0.9159655941772190150546035 + +The Clausen sine function can be expressed in closed form when +`s` is an odd integer (becoming zero when `s` < 0):: + + >>> z = 1 + sqrt(2) + >>> clsin(1, z); (pi-z)/2 + 0.3636895456083490948304773 + 0.3636895456083490948304773 + >>> clsin(3, z); pi**2/6*z - pi*z**2/4 + z**3/12 + 0.5661751584451144991707161 + 0.5661751584451144991707161 + >>> clsin(-1, z) + 0.0 + >>> clsin(-3, z) + 0.0 + +It can also be expressed in closed form for even integer `s \le 0`, +providing a finite sum for series such as +`\sin(z) + \sin(2z) + \sin(3z) + \ldots`:: + + >>> z = 1 + sqrt(2) + >>> clsin(0, z) + 0.1903105029507513881275865 + >>> cot(z/2)/2 + 0.1903105029507513881275865 + >>> clsin(-2, z) + -0.1089406163841548817581392 + >>> -cot(z/2)*csc(z/2)**2/4 + -0.1089406163841548817581392 + +Call with ``pi=True`` to multiply `z` by `\pi` exactly:: + + >>> clsin(3, 3*pi) + -8.892316224968072424732898e-26 + >>> clsin(3, 3, pi=True) + 0.0 + +Evaluation for complex `s`, `z` in a nonconvergent case:: + + >>> s, z = -1-j, 1+2j + >>> clsin(s, z) + (-0.593079480117379002516034 + 0.9038644233367868273362446j) + >>> extraprec(20)(nsum)(lambda k: sin(k*z)/k**s, [1,inf]) + (-0.593079480117379002516034 + 0.9038644233367868273362446j) + +""" + +clcos = r""" +Computes the Clausen cosine function, defined formally by the series + +.. math :: + + \mathrm{\widetilde{Cl}}_s(z) = \sum_{k=1}^{\infty} \frac{\cos(kz)}{k^s}. + +This function is complementary to the Clausen sine function +:func:`~mpmath.clsin`. In terms of the polylogarithm, + +.. math :: + + \mathrm{\widetilde{Cl}}_s(z) = + \frac{1}{2}\left(\mathrm{Li}_s\left(e^{iz}\right) + + \mathrm{Li}_s\left(e^{-iz}\right)\right) + + = \mathrm{Re}\left[\mathrm{Li}_s(e^{iz})\right] \quad (s, z \in \mathbb{R}). + +**Examples** + +Evaluation for arbitrarily chosen `s` and `z`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> s, z = 3, 4 + >>> clcos(s, z); nsum(lambda k: cos(z*k)/k**s, [1,inf]) + -0.6518926267198991308332759 + -0.6518926267198991308332759 + +Using `z + \pi` instead of `z` gives an alternating series:: + + >>> s, z = 3, 0.5 + >>> clcos(s, z+pi) + -0.8155530586502260817855618 + >>> nsum(lambda k: (-1)**k*cos(z*k)/k**s, [1,inf]) + -0.8155530586502260817855618 + +With `s = 1`, the sum can be expressed in closed form +using elementary functions:: + + >>> z = 1 + sqrt(3) + >>> clcos(1, z) + -0.6720334373369714849797918 + >>> chop(-0.5*(log(1-exp(j*z))+log(1-exp(-j*z)))) + -0.6720334373369714849797918 + >>> -log(abs(2*sin(0.5*z))) # Equivalent to above when z is real + -0.6720334373369714849797918 + >>> nsum(lambda k: cos(k*z)/k, [1,inf]) + -0.6720334373369714849797918 + +It can also be expressed in closed form when `s` is an even integer. +For example, + + >>> clcos(2,z) + -0.7805359025135583118863007 + >>> pi**2/6 - pi*z/2 + z**2/4 + -0.7805359025135583118863007 + +The case `s = 0` gives the renormalized sum of +`\cos(z) + \cos(2z) + \cos(3z) + \ldots` (which happens to be the same for +any value of `z`):: + + >>> clcos(0, z) + -0.5 + >>> nsum(lambda k: cos(k*z), [1,inf]) + -0.5 + +Also the sums + +.. math :: + + \cos(z) + 2\cos(2z) + 3\cos(3z) + \ldots + +and + +.. math :: + + \cos(z) + 2^n \cos(2z) + 3^n \cos(3z) + \ldots + +for higher integer powers `n = -s` can be done in closed form. They are zero +when `n` is positive and even (`s` negative and even):: + + >>> clcos(-1, z); 1/(2*cos(z)-2) + -0.2607829375240542480694126 + -0.2607829375240542480694126 + >>> clcos(-3, z); (2+cos(z))*csc(z/2)**4/8 + 0.1472635054979944390848006 + 0.1472635054979944390848006 + >>> clcos(-2, z); clcos(-4, z); clcos(-6, z) + 0.0 + 0.0 + 0.0 + +With `z = \pi`, the series reduces to that of the Riemann zeta function +(more generally, if `z = p \pi/q`, it is a finite sum over Hurwitz zeta +function values):: + + >>> clcos(2.5, 0); zeta(2.5) + 1.34148725725091717975677 + 1.34148725725091717975677 + >>> clcos(2.5, pi); -altzeta(2.5) + -0.8671998890121841381913472 + -0.8671998890121841381913472 + +Call with ``pi=True`` to multiply `z` by `\pi` exactly:: + + >>> clcos(-3, 2*pi) + 2.997921055881167659267063e+102 + >>> clcos(-3, 2, pi=True) + 0.008333333333333333333333333 + +Evaluation for complex `s`, `z` in a nonconvergent case:: + + >>> s, z = -1-j, 1+2j + >>> clcos(s, z) + (0.9407430121562251476136807 + 0.715826296033590204557054j) + >>> extraprec(20)(nsum)(lambda k: cos(k*z)/k**s, [1,inf]) + (0.9407430121562251476136807 + 0.715826296033590204557054j) + +""" + +whitm = r""" +Evaluates the Whittaker function `M(k,m,z)`, which gives a solution +to the Whittaker differential equation + +.. math :: + + \frac{d^2f}{dz^2} + \left(-\frac{1}{4}+\frac{k}{z}+ + \frac{(\frac{1}{4}-m^2)}{z^2}\right) f = 0. + +A second solution is given by :func:`~mpmath.whitw`. + +The Whittaker functions are defined in Abramowitz & Stegun, section 13.1. +They are alternate forms of the confluent hypergeometric functions +`\,_1F_1` and `U`: + +.. math :: + + M(k,m,z) = e^{-\frac{1}{2}z} z^{\frac{1}{2}+m} + \,_1F_1(\tfrac{1}{2}+m-k, 1+2m, z) + + W(k,m,z) = e^{-\frac{1}{2}z} z^{\frac{1}{2}+m} + U(\tfrac{1}{2}+m-k, 1+2m, z). + +**Examples** + +Evaluation for arbitrary real and complex arguments is supported:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> whitm(1, 1, 1) + 0.7302596799460411820509668 + >>> whitm(1, 1, -1) + (0.0 - 1.417977827655098025684246j) + >>> whitm(j, j/2, 2+3j) + (3.245477713363581112736478 - 0.822879187542699127327782j) + >>> whitm(2, 3, 100000) + 4.303985255686378497193063e+21707 + +Evaluation at zero:: + + >>> whitm(1,-1,0); whitm(1,-0.5,0); whitm(1,0,0) + +inf + nan + 0.0 + +We can verify that :func:`~mpmath.whitm` numerically satisfies the +differential equation for arbitrarily chosen values:: + + >>> k = mpf(0.25) + >>> m = mpf(1.5) + >>> f = lambda z: whitm(k,m,z) + >>> for z in [-1, 2.5, 3, 1+2j]: + ... chop(diff(f,z,2) + (-0.25 + k/z + (0.25-m**2)/z**2)*f(z)) + ... + 0.0 + 0.0 + 0.0 + 0.0 + +An integral involving both :func:`~mpmath.whitm` and :func:`~mpmath.whitw`, +verifying evaluation along the real axis:: + + >>> quad(lambda x: exp(-x)*whitm(3,2,x)*whitw(1,-2,x), [0,inf]) + 3.438869842576800225207341 + >>> 128/(21*sqrt(pi)) + 3.438869842576800225207341 + +""" + +whitw = r""" +Evaluates the Whittaker function `W(k,m,z)`, which gives a second +solution to the Whittaker differential equation. (See :func:`~mpmath.whitm`.) + +**Examples** + +Evaluation for arbitrary real and complex arguments is supported:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> whitw(1, 1, 1) + 1.19532063107581155661012 + >>> whitw(1, 1, -1) + (-0.9424875979222187313924639 - 0.2607738054097702293308689j) + >>> whitw(j, j/2, 2+3j) + (0.1782899315111033879430369 - 0.01609578360403649340169406j) + >>> whitw(2, 3, 100000) + 1.887705114889527446891274e-21705 + >>> whitw(-1, -1, 100) + 1.905250692824046162462058e-24 + +Evaluation at zero:: + + >>> for m in [-1, -0.5, 0, 0.5, 1]: + ... whitw(1, m, 0) + ... + +inf + nan + 0.0 + nan + +inf + +We can verify that :func:`~mpmath.whitw` numerically satisfies the +differential equation for arbitrarily chosen values:: + + >>> k = mpf(0.25) + >>> m = mpf(1.5) + >>> f = lambda z: whitw(k,m,z) + >>> for z in [-1, 2.5, 3, 1+2j]: + ... chop(diff(f,z,2) + (-0.25 + k/z + (0.25-m**2)/z**2)*f(z)) + ... + 0.0 + 0.0 + 0.0 + 0.0 + +""" + +ber = r""" +Computes the Kelvin function ber, which for real arguments gives the real part +of the Bessel J function of a rotated argument + +.. math :: + + J_n\left(x e^{3\pi i/4}\right) = \mathrm{ber}_n(x) + i \mathrm{bei}_n(x). + +The imaginary part is given by :func:`~mpmath.bei`. + +**Plots** + +.. literalinclude :: /plots/ber.py +.. image :: /plots/ber.png + +**Examples** + +Verifying the defining relation:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> n, x = 2, 3.5 + >>> ber(n,x) + 1.442338852571888752631129 + >>> bei(n,x) + -0.948359035324558320217678 + >>> besselj(n, x*root(1,8,3)) + (1.442338852571888752631129 - 0.948359035324558320217678j) + +The ber and bei functions are also defined by analytic continuation +for complex arguments:: + + >>> ber(1+j, 2+3j) + (4.675445984756614424069563 - 15.84901771719130765656316j) + >>> bei(1+j, 2+3j) + (15.83886679193707699364398 + 4.684053288183046528703611j) + +""" + +bei = r""" +Computes the Kelvin function bei, which for real arguments gives the +imaginary part of the Bessel J function of a rotated argument. +See :func:`~mpmath.ber`. +""" + +ker = r""" +Computes the Kelvin function ker, which for real arguments gives the real part +of the (rescaled) Bessel K function of a rotated argument + +.. math :: + + e^{-\pi i/2} K_n\left(x e^{3\pi i/4}\right) = \mathrm{ker}_n(x) + i \mathrm{kei}_n(x). + +The imaginary part is given by :func:`~mpmath.kei`. + +**Plots** + +.. literalinclude :: /plots/ker.py +.. image :: /plots/ker.png + +**Examples** + +Verifying the defining relation:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> n, x = 2, 4.5 + >>> ker(n,x) + 0.02542895201906369640249801 + >>> kei(n,x) + -0.02074960467222823237055351 + >>> exp(-n*pi*j/2) * besselk(n, x*root(1,8,1)) + (0.02542895201906369640249801 - 0.02074960467222823237055351j) + +The ker and kei functions are also defined by analytic continuation +for complex arguments:: + + >>> ker(1+j, 3+4j) + (1.586084268115490421090533 - 2.939717517906339193598719j) + >>> kei(1+j, 3+4j) + (-2.940403256319453402690132 - 1.585621643835618941044855j) + +""" + +kei = r""" +Computes the Kelvin function kei, which for real arguments gives the +imaginary part of the (rescaled) Bessel K function of a rotated argument. +See :func:`~mpmath.ker`. +""" + +struveh = r""" +Gives the Struve function + +.. math :: + + \,\mathbf{H}_n(z) = + \sum_{k=0}^\infty \frac{(-1)^k}{\Gamma(k+\frac{3}{2}) + \Gamma(k+n+\frac{3}{2})} {\left({\frac{z}{2}}\right)}^{2k+n+1} + +which is a solution to the Struve differential equation + +.. math :: + + z^2 f''(z) + z f'(z) + (z^2-n^2) f(z) = \frac{2 z^{n+1}}{\pi (2n-1)!!}. + +**Examples** + +Evaluation for arbitrary real and complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> struveh(0, 3.5) + 0.3608207733778295024977797 + >>> struveh(-1, 10) + -0.255212719726956768034732 + >>> struveh(1, -100.5) + 0.5819566816797362287502246 + >>> struveh(2.5, 10000000000000) + 3153915652525200060.308937 + >>> struveh(2.5, -10000000000000) + (0.0 - 3153915652525200060.308937j) + >>> struveh(1+j, 1000000+4000000j) + (-3.066421087689197632388731e+1737173 - 1.596619701076529803290973e+1737173j) + +A Struve function of half-integer order is elementary; for example: + + >>> z = 3 + >>> struveh(0.5, 3) + 0.9167076867564138178671595 + >>> sqrt(2/(pi*z))*(1-cos(z)) + 0.9167076867564138178671595 + +Numerically verifying the differential equation:: + + >>> z = mpf(4.5) + >>> n = 3 + >>> f = lambda z: struveh(n,z) + >>> lhs = z**2*diff(f,z,2) + z*diff(f,z) + (z**2-n**2)*f(z) + >>> rhs = 2*z**(n+1)/fac2(2*n-1)/pi + >>> lhs + 17.40359302709875496632744 + >>> rhs + 17.40359302709875496632744 + +""" + +struvel = r""" +Gives the modified Struve function + +.. math :: + + \,\mathbf{L}_n(z) = -i e^{-n\pi i/2} \mathbf{H}_n(i z) + +which solves to the modified Struve differential equation + +.. math :: + + z^2 f''(z) + z f'(z) - (z^2+n^2) f(z) = \frac{2 z^{n+1}}{\pi (2n-1)!!}. + +**Examples** + +Evaluation for arbitrary real and complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> struvel(0, 3.5) + 7.180846515103737996249972 + >>> struvel(-1, 10) + 2670.994904980850550721511 + >>> struvel(1, -100.5) + 1.757089288053346261497686e+42 + >>> struvel(2.5, 10000000000000) + 4.160893281017115450519948e+4342944819025 + >>> struvel(2.5, -10000000000000) + (0.0 - 4.160893281017115450519948e+4342944819025j) + >>> struvel(1+j, 700j) + (-0.1721150049480079451246076 + 0.1240770953126831093464055j) + >>> struvel(1+j, 1000000+4000000j) + (-2.973341637511505389128708e+434290 - 5.164633059729968297147448e+434290j) + +Numerically verifying the differential equation:: + + >>> z = mpf(3.5) + >>> n = 3 + >>> f = lambda z: struvel(n,z) + >>> lhs = z**2*diff(f,z,2) + z*diff(f,z) - (z**2+n**2)*f(z) + >>> rhs = 2*z**(n+1)/fac2(2*n-1)/pi + >>> lhs + 6.368850306060678353018165 + >>> rhs + 6.368850306060678353018165 +""" + +appellf1 = r""" +Gives the Appell F1 hypergeometric function of two variables, + +.. math :: + + F_1(a,b_1,b_2,c,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty} + \frac{(a)_{m+n} (b_1)_m (b_2)_n}{(c)_{m+n}} + \frac{x^m y^n}{m! n!}. + +This series is only generally convergent when `|x| < 1` and `|y| < 1`, +although :func:`~mpmath.appellf1` can evaluate an analytic continuation +with respecto to either variable, and sometimes both. + +**Examples** + +Evaluation is supported for real and complex parameters:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> appellf1(1,0,0.5,1,0.5,0.25) + 1.154700538379251529018298 + >>> appellf1(1,1+j,0.5,1,0.5,0.5j) + (1.138403860350148085179415 + 1.510544741058517621110615j) + +For some integer parameters, the F1 series reduces to a polynomial:: + + >>> appellf1(2,-4,-3,1,2,5) + -816.0 + >>> appellf1(-5,1,2,1,4,5) + -20528.0 + +The analytic continuation with respect to either `x` or `y`, +and sometimes with respect to both, can be evaluated:: + + >>> appellf1(2,3,4,5,100,0.5) + (0.0006231042714165329279738662 + 0.0000005769149277148425774499857j) + >>> appellf1('1.1', '0.3', '0.2+2j', '0.4', '0.2', 1.5+3j) + (-0.1782604566893954897128702 + 0.002472407104546216117161499j) + >>> appellf1(1,2,3,4,10,12) + -0.07122993830066776374929313 + +For certain arguments, F1 reduces to an ordinary hypergeometric function:: + + >>> appellf1(1,2,3,5,0.5,0.25) + 1.547902270302684019335555 + >>> 4*hyp2f1(1,2,5,'1/3')/3 + 1.547902270302684019335555 + >>> appellf1(1,2,3,4,0,1.5) + (-1.717202506168937502740238 - 2.792526803190927323077905j) + >>> hyp2f1(1,3,4,1.5) + (-1.717202506168937502740238 - 2.792526803190927323077905j) + +The F1 function satisfies a system of partial differential equations:: + + >>> a,b1,b2,c,x,y = map(mpf, [1,0.5,0.25,1.125,0.25,-0.25]) + >>> F = lambda x,y: appellf1(a,b1,b2,c,x,y) + >>> chop(x*(1-x)*diff(F,(x,y),(2,0)) + + ... y*(1-x)*diff(F,(x,y),(1,1)) + + ... (c-(a+b1+1)*x)*diff(F,(x,y),(1,0)) - + ... b1*y*diff(F,(x,y),(0,1)) - + ... a*b1*F(x,y)) + 0.0 + >>> + >>> chop(y*(1-y)*diff(F,(x,y),(0,2)) + + ... x*(1-y)*diff(F,(x,y),(1,1)) + + ... (c-(a+b2+1)*y)*diff(F,(x,y),(0,1)) - + ... b2*x*diff(F,(x,y),(1,0)) - + ... a*b2*F(x,y)) + 0.0 + +The Appell F1 function allows for closed-form evaluation of various +integrals, such as any integral of the form +`\int x^r (x+a)^p (x+b)^q dx`:: + + >>> def integral(a,b,p,q,r,x1,x2): + ... a,b,p,q,r,x1,x2 = map(mpmathify, [a,b,p,q,r,x1,x2]) + ... f = lambda x: x**r * (x+a)**p * (x+b)**q + ... def F(x): + ... v = x**(r+1)/(r+1) * (a+x)**p * (b+x)**q + ... v *= (1+x/a)**(-p) + ... v *= (1+x/b)**(-q) + ... v *= appellf1(r+1,-p,-q,2+r,-x/a,-x/b) + ... return v + ... print("Num. quad: %s" % quad(f, [x1,x2])) + ... print("Appell F1: %s" % (F(x2)-F(x1))) + ... + >>> integral('1/5','4/3','-2','3','1/2',0,1) + Num. quad: 9.073335358785776206576981 + Appell F1: 9.073335358785776206576981 + >>> integral('3/2','4/3','-2','3','1/2',0,1) + Num. quad: 1.092829171999626454344678 + Appell F1: 1.092829171999626454344678 + >>> integral('3/2','4/3','-2','3','1/2',12,25) + Num. quad: 1106.323225040235116498927 + Appell F1: 1106.323225040235116498927 + +Also incomplete elliptic integrals fall into this category [1]:: + + >>> def E(z, m): + ... if (pi/2).ae(z): + ... return ellipe(m) + ... return 2*round(re(z)/pi)*ellipe(m) + mpf(-1)**round(re(z)/pi)*\ + ... sin(z)*appellf1(0.5,0.5,-0.5,1.5,sin(z)**2,m*sin(z)**2) + ... + >>> z, m = 1, 0.5 + >>> E(z,m); quad(lambda t: sqrt(1-m*sin(t)**2), [0,pi/4,3*pi/4,z]) + 0.9273298836244400669659042 + 0.9273298836244400669659042 + >>> z, m = 3, 2 + >>> E(z,m); quad(lambda t: sqrt(1-m*sin(t)**2), [0,pi/4,3*pi/4,z]) + (1.057495752337234229715836 + 1.198140234735592207439922j) + (1.057495752337234229715836 + 1.198140234735592207439922j) + +**References** + +1. [WolframFunctions]_ http://functions.wolfram.com/EllipticIntegrals/EllipticE2/26/01/ +2. [SrivastavaKarlsson]_ +3. [CabralRosetti]_ +4. [Vidunas]_ +5. [Slater]_ + +""" + +angerj = r""" +Gives the Anger function + +.. math :: + + \mathbf{J}_{\nu}(z) = \frac{1}{\pi} + \int_0^{\pi} \cos(\nu t - z \sin t) dt + +which is an entire function of both the parameter `\nu` and +the argument `z`. It solves the inhomogeneous Bessel differential +equation + +.. math :: + + f''(z) + \frac{1}{z}f'(z) + \left(1-\frac{\nu^2}{z^2}\right) f(z) + = \frac{(z-\nu)}{\pi z^2} \sin(\pi \nu). + +**Examples** + +Evaluation for real and complex parameter and argument:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> angerj(2,3) + 0.4860912605858910769078311 + >>> angerj(-3+4j, 2+5j) + (-5033.358320403384472395612 + 585.8011892476145118551756j) + >>> angerj(3.25, 1e6j) + (4.630743639715893346570743e+434290 - 1.117960409887505906848456e+434291j) + >>> angerj(-1.5, 1e6) + 0.0002795719747073879393087011 + +The Anger function coincides with the Bessel J-function when `\nu` +is an integer:: + + >>> angerj(1,3); besselj(1,3) + 0.3390589585259364589255146 + 0.3390589585259364589255146 + >>> angerj(1.5,3); besselj(1.5,3) + 0.4088969848691080859328847 + 0.4777182150870917715515015 + +Verifying the differential equation:: + + >>> v,z = mpf(2.25), 0.75 + >>> f = lambda z: angerj(v,z) + >>> diff(f,z,2) + diff(f,z)/z + (1-(v/z)**2)*f(z) + -0.6002108774380707130367995 + >>> (z-v)/(pi*z**2) * sinpi(v) + -0.6002108774380707130367995 + +Verifying the integral representation:: + + >>> angerj(v,z) + 0.1145380759919333180900501 + >>> quad(lambda t: cos(v*t-z*sin(t))/pi, [0,pi]) + 0.1145380759919333180900501 + +**References** + +1. [DLMF]_ section 11.10: Anger-Weber Functions +""" + +webere = r""" +Gives the Weber function + +.. math :: + + \mathbf{E}_{\nu}(z) = \frac{1}{\pi} + \int_0^{\pi} \sin(\nu t - z \sin t) dt + +which is an entire function of both the parameter `\nu` and +the argument `z`. It solves the inhomogeneous Bessel differential +equation + +.. math :: + + f''(z) + \frac{1}{z}f'(z) + \left(1-\frac{\nu^2}{z^2}\right) f(z) + = -\frac{1}{\pi z^2} (z+\nu+(z-\nu)\cos(\pi \nu)). + +**Examples** + +Evaluation for real and complex parameter and argument:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> webere(2,3) + -0.1057668973099018425662646 + >>> webere(-3+4j, 2+5j) + (-585.8081418209852019290498 - 5033.314488899926921597203j) + >>> webere(3.25, 1e6j) + (-1.117960409887505906848456e+434291 - 4.630743639715893346570743e+434290j) + >>> webere(3.25, 1e6) + -0.00002812518265894315604914453 + +Up to addition of a rational function of `z`, the Weber function coincides +with the Struve H-function when `\nu` is an integer:: + + >>> webere(1,3); 2/pi-struveh(1,3) + -0.3834897968188690177372881 + -0.3834897968188690177372881 + >>> webere(5,3); 26/(35*pi)-struveh(5,3) + 0.2009680659308154011878075 + 0.2009680659308154011878075 + +Verifying the differential equation:: + + >>> v,z = mpf(2.25), 0.75 + >>> f = lambda z: webere(v,z) + >>> diff(f,z,2) + diff(f,z)/z + (1-(v/z)**2)*f(z) + -1.097441848875479535164627 + >>> -(z+v+(z-v)*cospi(v))/(pi*z**2) + -1.097441848875479535164627 + +Verifying the integral representation:: + + >>> webere(v,z) + 0.1486507351534283744485421 + >>> quad(lambda t: sin(v*t-z*sin(t))/pi, [0,pi]) + 0.1486507351534283744485421 + +**References** + +1. [DLMF]_ section 11.10: Anger-Weber Functions +""" + +lommels1 = r""" +Gives the Lommel function `s_{\mu,\nu}` or `s^{(1)}_{\mu,\nu}` + +.. math :: + + s_{\mu,\nu}(z) = \frac{z^{\mu+1}}{(\mu-\nu+1)(\mu+\nu+1)} + \,_1F_2\left(1; \frac{\mu-\nu+3}{2}, \frac{\mu+\nu+3}{2}; + -\frac{z^2}{4} \right) + +which solves the inhomogeneous Bessel equation + +.. math :: + + z^2 f''(z) + z f'(z) + (z^2-\nu^2) f(z) = z^{\mu+1}. + +A second solution is given by :func:`~mpmath.lommels2`. + +**Plots** + +.. literalinclude :: /plots/lommels1.py +.. image :: /plots/lommels1.png + +**Examples** + +An integral representation:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> u,v,z = 0.25, 0.125, mpf(0.75) + >>> lommels1(u,v,z) + 0.4276243877565150372999126 + >>> (bessely(v,z)*quad(lambda t: t**u*besselj(v,t), [0,z]) - \ + ... besselj(v,z)*quad(lambda t: t**u*bessely(v,t), [0,z]))*(pi/2) + 0.4276243877565150372999126 + +A special value:: + + >>> lommels1(v,v,z) + 0.5461221367746048054932553 + >>> gamma(v+0.5)*sqrt(pi)*power(2,v-1)*struveh(v,z) + 0.5461221367746048054932553 + +Verifying the differential equation:: + + >>> f = lambda z: lommels1(u,v,z) + >>> z**2*diff(f,z,2) + z*diff(f,z) + (z**2-v**2)*f(z) + 0.6979536443265746992059141 + >>> z**(u+1) + 0.6979536443265746992059141 + +**References** + +1. [GradshteynRyzhik]_ +2. [Weisstein]_ http://mathworld.wolfram.com/LommelFunction.html +""" + +lommels2 = r""" +Gives the second Lommel function `S_{\mu,\nu}` or `s^{(2)}_{\mu,\nu}` + +.. math :: + + S_{\mu,\nu}(z) = s_{\mu,\nu}(z) + 2^{\mu-1} + \Gamma\left(\tfrac{1}{2}(\mu-\nu+1)\right) + \Gamma\left(\tfrac{1}{2}(\mu+\nu+1)\right) \times + + \left[\sin(\tfrac{1}{2}(\mu-\nu)\pi) J_{\nu}(z) - + \cos(\tfrac{1}{2}(\mu-\nu)\pi) Y_{\nu}(z) + \right] + +which solves the same differential equation as +:func:`~mpmath.lommels1`. + +**Plots** + +.. literalinclude :: /plots/lommels2.py +.. image :: /plots/lommels2.png + +**Examples** + +For large `|z|`, `S_{\mu,\nu} \sim z^{\mu-1}`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> lommels2(10,2,30000) + 1.968299831601008419949804e+40 + >>> power(30000,9) + 1.9683e+40 + +A special value:: + + >>> u,v,z = 0.5, 0.125, mpf(0.75) + >>> lommels2(v,v,z) + 0.9589683199624672099969765 + >>> (struveh(v,z)-bessely(v,z))*power(2,v-1)*sqrt(pi)*gamma(v+0.5) + 0.9589683199624672099969765 + +Verifying the differential equation:: + + >>> f = lambda z: lommels2(u,v,z) + >>> z**2*diff(f,z,2) + z*diff(f,z) + (z**2-v**2)*f(z) + 0.6495190528383289850727924 + >>> z**(u+1) + 0.6495190528383289850727924 + +**References** + +1. [GradshteynRyzhik]_ +2. [Weisstein]_ http://mathworld.wolfram.com/LommelFunction.html +""" + +appellf2 = r""" +Gives the Appell F2 hypergeometric function of two variables + +.. math :: + + F_2(a,b_1,b_2,c_1,c_2,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty} + \frac{(a)_{m+n} (b_1)_m (b_2)_n}{(c_1)_m (c_2)_n} + \frac{x^m y^n}{m! n!}. + +The series is generally absolutely convergent for `|x| + |y| < 1`. + +**Examples** + +Evaluation for real and complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> appellf2(1,2,3,4,5,0.25,0.125) + 1.257417193533135344785602 + >>> appellf2(1,-3,-4,2,3,2,3) + -42.8 + >>> appellf2(0.5,0.25,-0.25,2,3,0.25j,0.25) + (0.9880539519421899867041719 + 0.01497616165031102661476978j) + >>> chop(appellf2(1,1+j,1-j,3j,-3j,0.25,0.25)) + 1.201311219287411337955192 + >>> appellf2(1,1,1,4,6,0.125,16) + (-0.09455532250274744282125152 - 0.7647282253046207836769297j) + +A transformation formula:: + + >>> a,b1,b2,c1,c2,x,y = map(mpf, [1,2,0.5,0.25,1.625,-0.125,0.125]) + >>> appellf2(a,b1,b2,c1,c2,x,y) + 0.2299211717841180783309688 + >>> (1-x)**(-a)*appellf2(a,c1-b1,b2,c1,c2,x/(x-1),y/(1-x)) + 0.2299211717841180783309688 + +A system of partial differential equations satisfied by F2:: + + >>> a,b1,b2,c1,c2,x,y = map(mpf, [1,0.5,0.25,1.125,1.5,0.0625,-0.0625]) + >>> F = lambda x,y: appellf2(a,b1,b2,c1,c2,x,y) + >>> chop(x*(1-x)*diff(F,(x,y),(2,0)) - + ... x*y*diff(F,(x,y),(1,1)) + + ... (c1-(a+b1+1)*x)*diff(F,(x,y),(1,0)) - + ... b1*y*diff(F,(x,y),(0,1)) - + ... a*b1*F(x,y)) + 0.0 + >>> chop(y*(1-y)*diff(F,(x,y),(0,2)) - + ... x*y*diff(F,(x,y),(1,1)) + + ... (c2-(a+b2+1)*y)*diff(F,(x,y),(0,1)) - + ... b2*x*diff(F,(x,y),(1,0)) - + ... a*b2*F(x,y)) + 0.0 + +**References** + +See references for :func:`~mpmath.appellf1`. +""" + +appellf3 = r""" +Gives the Appell F3 hypergeometric function of two variables + +.. math :: + + F_3(a_1,a_2,b_1,b_2,c,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty} + \frac{(a_1)_m (a_2)_n (b_1)_m (b_2)_n}{(c)_{m+n}} + \frac{x^m y^n}{m! n!}. + +The series is generally absolutely convergent for `|x| < 1, |y| < 1`. + +**Examples** + +Evaluation for various parameters and variables:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> appellf3(1,2,3,4,5,0.5,0.25) + 2.221557778107438938158705 + >>> appellf3(1,2,3,4,5,6,0); hyp2f1(1,3,5,6) + (-0.5189554589089861284537389 - 0.1454441043328607980769742j) + (-0.5189554589089861284537389 - 0.1454441043328607980769742j) + >>> appellf3(1,-2,-3,1,1,4,6) + -17.4 + >>> appellf3(1,2,-3,1,1,4,6) + (17.7876136773677356641825 + 19.54768762233649126154534j) + >>> appellf3(1,2,-3,1,1,6,4) + (85.02054175067929402953645 + 148.4402528821177305173599j) + >>> chop(appellf3(1+j,2,1-j,2,3,0.25,0.25)) + 1.719992169545200286696007 + +Many transformations and evaluations for special combinations +of the parameters are possible, e.g.: + + >>> a,b,c,x,y = map(mpf, [0.5,0.25,0.125,0.125,-0.125]) + >>> appellf3(a,c-a,b,c-b,c,x,y) + 1.093432340896087107444363 + >>> (1-y)**(a+b-c)*hyp2f1(a,b,c,x+y-x*y) + 1.093432340896087107444363 + >>> x**2*appellf3(1,1,1,1,3,x,-x) + 0.01568646277445385390945083 + >>> polylog(2,x**2) + 0.01568646277445385390945083 + >>> a1,a2,b1,b2,c,x = map(mpf, [0.5,0.25,0.125,0.5,4.25,0.125]) + >>> appellf3(a1,a2,b1,b2,c,x,1) + 1.03947361709111140096947 + >>> gammaprod([c,c-a2-b2],[c-a2,c-b2])*hyp3f2(a1,b1,c-a2-b2,c-a2,c-b2,x) + 1.03947361709111140096947 + +The Appell F3 function satisfies a pair of partial +differential equations:: + + >>> a1,a2,b1,b2,c,x,y = map(mpf, [0.5,0.25,0.125,0.5,0.625,0.0625,-0.0625]) + >>> F = lambda x,y: appellf3(a1,a2,b1,b2,c,x,y) + >>> chop(x*(1-x)*diff(F,(x,y),(2,0)) + + ... y*diff(F,(x,y),(1,1)) + + ... (c-(a1+b1+1)*x)*diff(F,(x,y),(1,0)) - + ... a1*b1*F(x,y)) + 0.0 + >>> chop(y*(1-y)*diff(F,(x,y),(0,2)) + + ... x*diff(F,(x,y),(1,1)) + + ... (c-(a2+b2+1)*y)*diff(F,(x,y),(0,1)) - + ... a2*b2*F(x,y)) + 0.0 + +**References** + +See references for :func:`~mpmath.appellf1`. +""" + +appellf4 = r""" +Gives the Appell F4 hypergeometric function of two variables + +.. math :: + + F_4(a,b,c_1,c_2,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty} + \frac{(a)_{m+n} (b)_{m+n}}{(c_1)_m (c_2)_n} + \frac{x^m y^n}{m! n!}. + +The series is generally absolutely convergent for +`\sqrt{|x|} + \sqrt{|y|} < 1`. + +**Examples** + +Evaluation for various parameters and arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> appellf4(1,1,2,2,0.25,0.125) + 1.286182069079718313546608 + >>> appellf4(-2,-3,4,5,4,5) + 34.8 + >>> appellf4(5,4,2,3,0.25j,-0.125j) + (-0.2585967215437846642163352 + 2.436102233553582711818743j) + +Reduction to `\,_2F_1` in a special case:: + + >>> a,b,c,x,y = map(mpf, [0.5,0.25,0.125,0.125,-0.125]) + >>> appellf4(a,b,c,a+b-c+1,x*(1-y),y*(1-x)) + 1.129143488466850868248364 + >>> hyp2f1(a,b,c,x)*hyp2f1(a,b,a+b-c+1,y) + 1.129143488466850868248364 + +A system of partial differential equations satisfied by F4:: + + >>> a,b,c1,c2,x,y = map(mpf, [1,0.5,0.25,1.125,0.0625,-0.0625]) + >>> F = lambda x,y: appellf4(a,b,c1,c2,x,y) + >>> chop(x*(1-x)*diff(F,(x,y),(2,0)) - + ... y**2*diff(F,(x,y),(0,2)) - + ... 2*x*y*diff(F,(x,y),(1,1)) + + ... (c1-(a+b+1)*x)*diff(F,(x,y),(1,0)) - + ... ((a+b+1)*y)*diff(F,(x,y),(0,1)) - + ... a*b*F(x,y)) + 0.0 + >>> chop(y*(1-y)*diff(F,(x,y),(0,2)) - + ... x**2*diff(F,(x,y),(2,0)) - + ... 2*x*y*diff(F,(x,y),(1,1)) + + ... (c2-(a+b+1)*y)*diff(F,(x,y),(0,1)) - + ... ((a+b+1)*x)*diff(F,(x,y),(1,0)) - + ... a*b*F(x,y)) + 0.0 + +**References** + +See references for :func:`~mpmath.appellf1`. +""" + +zeta = r""" +Computes the Riemann zeta function + +.. math :: + + \zeta(s) = 1+\frac{1}{2^s}+\frac{1}{3^s}+\frac{1}{4^s}+\ldots + +or, with `a \ne 1`, the more general Hurwitz zeta function + +.. math :: + + \zeta(s,a) = \sum_{k=0}^\infty \frac{1}{(a+k)^s}. + +Optionally, ``zeta(s, a, n)`` computes the `n`-th derivative with +respect to `s`, + +.. math :: + + \zeta^{(n)}(s,a) = (-1)^n \sum_{k=0}^\infty \frac{\log^n(a+k)}{(a+k)^s}. + +Although these series only converge for `\Re(s) > 1`, the Riemann and Hurwitz +zeta functions are defined through analytic continuation for arbitrary +complex `s \ne 1` (`s = 1` is a pole). + +The implementation uses three algorithms: the Borwein algorithm for +the Riemann zeta function when `s` is close to the real line; +the Riemann-Siegel formula for the Riemann zeta function when `s` is +large imaginary, and Euler-Maclaurin summation in all other cases. +The reflection formula for `\Re(s) < 0` is implemented in some cases. +The algorithm can be chosen with ``method = 'borwein'``, +``method='riemann-siegel'`` or ``method = 'euler-maclaurin'``. + +The parameter `a` is usually a rational number `a = p/q`, and may be specified +as such by passing an integer tuple `(p, q)`. Evaluation is supported for +arbitrary complex `a`, but may be slow and/or inaccurate when `\Re(s) < 0` for +nonrational `a` or when computing derivatives. + +**Examples** + +Some values of the Riemann zeta function:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> zeta(2); pi**2 / 6 + 1.644934066848226436472415 + 1.644934066848226436472415 + >>> zeta(0) + -0.5 + >>> zeta(-1) + -0.08333333333333333333333333 + >>> zeta(-2) + 0.0 + +For large positive `s`, `\zeta(s)` rapidly approaches 1:: + + >>> zeta(50) + 1.000000000000000888178421 + >>> zeta(100) + 1.0 + >>> zeta(inf) + 1.0 + >>> 1-sum((zeta(k)-1)/k for k in range(2,85)); +euler + 0.5772156649015328606065121 + 0.5772156649015328606065121 + >>> nsum(lambda k: zeta(k)-1, [2, inf]) + 1.0 + +Evaluation is supported for complex `s` and `a`: + + >>> zeta(-3+4j) + (-0.03373057338827757067584698 + 0.2774499251557093745297677j) + >>> zeta(2+3j, -1+j) + (389.6841230140842816370741 + 295.2674610150305334025962j) + +The Riemann zeta function has so-called nontrivial zeros on +the critical line `s = 1/2 + it`:: + + >>> findroot(zeta, 0.5+14j); zetazero(1) + (0.5 + 14.13472514173469379045725j) + (0.5 + 14.13472514173469379045725j) + >>> findroot(zeta, 0.5+21j); zetazero(2) + (0.5 + 21.02203963877155499262848j) + (0.5 + 21.02203963877155499262848j) + >>> findroot(zeta, 0.5+25j); zetazero(3) + (0.5 + 25.01085758014568876321379j) + (0.5 + 25.01085758014568876321379j) + >>> chop(zeta(zetazero(10))) + 0.0 + +Evaluation on and near the critical line is supported for large +heights `t` by means of the Riemann-Siegel formula (currently +for `a = 1`, `n \le 4`):: + + >>> zeta(0.5+100000j) + (1.073032014857753132114076 + 5.780848544363503984261041j) + >>> zeta(0.75+1000000j) + (0.9535316058375145020351559 + 0.9525945894834273060175651j) + >>> zeta(0.5+10000000j) + (11.45804061057709254500227 - 8.643437226836021723818215j) + >>> zeta(0.5+100000000j, derivative=1) + (51.12433106710194942681869 + 43.87221167872304520599418j) + >>> zeta(0.5+100000000j, derivative=2) + (-444.2760822795430400549229 - 896.3789978119185981665403j) + >>> zeta(0.5+100000000j, derivative=3) + (3230.72682687670422215339 + 14374.36950073615897616781j) + >>> zeta(0.5+100000000j, derivative=4) + (-11967.35573095046402130602 - 218945.7817789262839266148j) + >>> zeta(1+10000000j) # off the line + (2.859846483332530337008882 + 0.491808047480981808903986j) + >>> zeta(1+10000000j, derivative=1) + (-4.333835494679647915673205 - 0.08405337962602933636096103j) + >>> zeta(1+10000000j, derivative=4) + (453.2764822702057701894278 - 581.963625832768189140995j) + +For investigation of the zeta function zeros, the Riemann-Siegel +Z-function is often more convenient than working with the Riemann +zeta function directly (see :func:`~mpmath.siegelz`). + +Some values of the Hurwitz zeta function:: + + >>> zeta(2, 3); -5./4 + pi**2/6 + 0.3949340668482264364724152 + 0.3949340668482264364724152 + >>> zeta(2, (3,4)); pi**2 - 8*catalan + 2.541879647671606498397663 + 2.541879647671606498397663 + +For positive integer values of `s`, the Hurwitz zeta function is +equivalent to a polygamma function (except for a normalizing factor):: + + >>> zeta(4, (1,5)); psi(3, '1/5')/6 + 625.5408324774542966919938 + 625.5408324774542966919938 + +Evaluation of derivatives:: + + >>> zeta(0, 3+4j, 1); loggamma(3+4j) - ln(2*pi)/2 + (-2.675565317808456852310934 + 4.742664438034657928194889j) + (-2.675565317808456852310934 + 4.742664438034657928194889j) + >>> zeta(2, 1, 20) + 2432902008176640000.000242 + >>> zeta(3+4j, 5.5+2j, 4) + (-0.140075548947797130681075 - 0.3109263360275413251313634j) + >>> zeta(0.5+100000j, 1, 4) + (-10407.16081931495861539236 + 13777.78669862804508537384j) + >>> zeta(-100+0.5j, (1,3), derivative=4) + (4.007180821099823942702249e+79 + 4.916117957092593868321778e+78j) + +Generating a Taylor series at `s = 2` using derivatives:: + + >>> for k in range(11): print("%s * (s-2)^%i" % (zeta(2,1,k)/fac(k), k)) + ... + 1.644934066848226436472415 * (s-2)^0 + -0.9375482543158437537025741 * (s-2)^1 + 0.9946401171494505117104293 * (s-2)^2 + -1.000024300473840810940657 * (s-2)^3 + 1.000061933072352565457512 * (s-2)^4 + -1.000006869443931806408941 * (s-2)^5 + 1.000000173233769531820592 * (s-2)^6 + -0.9999999569989868493432399 * (s-2)^7 + 0.9999999937218844508684206 * (s-2)^8 + -0.9999999996355013916608284 * (s-2)^9 + 1.000000000004610645020747 * (s-2)^10 + +Evaluation at zero and for negative integer `s`:: + + >>> zeta(0, 10) + -9.5 + >>> zeta(-2, (2,3)); mpf(1)/81 + 0.01234567901234567901234568 + 0.01234567901234567901234568 + >>> zeta(-3+4j, (5,4)) + (0.2899236037682695182085988 + 0.06561206166091757973112783j) + >>> zeta(-3.25, 1/pi) + -0.0005117269627574430494396877 + >>> zeta(-3.5, pi, 1) + 11.156360390440003294709 + >>> zeta(-100.5, (8,3)) + -4.68162300487989766727122e+77 + >>> zeta(-10.5, (-8,3)) + (-0.01521913704446246609237979 + 29907.72510874248161608216j) + >>> zeta(-1000.5, (-8,3)) + (1.031911949062334538202567e+1770 + 1.519555750556794218804724e+426j) + >>> zeta(-1+j, 3+4j) + (-16.32988355630802510888631 - 22.17706465801374033261383j) + >>> zeta(-1+j, 3+4j, 2) + (32.48985276392056641594055 - 51.11604466157397267043655j) + >>> diff(lambda s: zeta(s, 3+4j), -1+j, 2) + (32.48985276392056641594055 - 51.11604466157397267043655j) + +**References** + +1. http://mathworld.wolfram.com/RiemannZetaFunction.html + +2. http://mathworld.wolfram.com/HurwitzZetaFunction.html + +3. [BorweinZeta]_ + +""" + +dirichlet = r""" +Evaluates the Dirichlet L-function + +.. math :: + + L(s,\chi) = \sum_{k=1}^\infty \frac{\chi(k)}{k^s}. + +where `\chi` is a periodic sequence of length `q` which should be supplied +in the form of a list `[\chi(0), \chi(1), \ldots, \chi(q-1)]`. +Strictly, `\chi` should be a Dirichlet character, but any periodic +sequence will work. + +For example, ``dirichlet(s, [1])`` gives the ordinary +Riemann zeta function and ``dirichlet(s, [-1,1])`` gives +the alternating zeta function (Dirichlet eta function). + +Also the derivative with respect to `s` (currently only a first +derivative) can be evaluated. + +**Examples** + +The ordinary Riemann zeta function:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> dirichlet(3, [1]); zeta(3) + 1.202056903159594285399738 + 1.202056903159594285399738 + >>> dirichlet(1, [1]) + +inf + +The alternating zeta function:: + + >>> dirichlet(1, [-1,1]); ln(2) + 0.6931471805599453094172321 + 0.6931471805599453094172321 + +The following defines the Dirichlet beta function +`\beta(s) = \sum_{k=0}^\infty \frac{(-1)^k}{(2k+1)^s}` and verifies +several values of this function:: + + >>> B = lambda s, d=0: dirichlet(s, [0, 1, 0, -1], d) + >>> B(0); 1./2 + 0.5 + 0.5 + >>> B(1); pi/4 + 0.7853981633974483096156609 + 0.7853981633974483096156609 + >>> B(2); +catalan + 0.9159655941772190150546035 + 0.9159655941772190150546035 + >>> B(2,1); diff(B, 2) + 0.08158073611659279510291217 + 0.08158073611659279510291217 + >>> B(-1,1); 2*catalan/pi + 0.5831218080616375602767689 + 0.5831218080616375602767689 + >>> B(0,1); log(gamma(0.25)**2/(2*pi*sqrt(2))) + 0.3915943927068367764719453 + 0.3915943927068367764719454 + >>> B(1,1); 0.25*pi*(euler+2*ln2+3*ln(pi)-4*ln(gamma(0.25))) + 0.1929013167969124293631898 + 0.1929013167969124293631898 + +A custom L-series of period 3:: + + >>> dirichlet(2, [2,0,1]) + 0.7059715047839078092146831 + >>> 2*nsum(lambda k: (3*k)**-2, [1,inf]) + \ + ... nsum(lambda k: (3*k+2)**-2, [0,inf]) + 0.7059715047839078092146831 + +""" + +coulombf = r""" +Calculates the regular Coulomb wave function + +.. math :: + + F_l(\eta,z) = C_l(\eta) z^{l+1} e^{-iz} \,_1F_1(l+1-i\eta, 2l+2, 2iz) + +where the normalization constant `C_l(\eta)` is as calculated by +:func:`~mpmath.coulombc`. This function solves the differential equation + +.. math :: + + f''(z) + \left(1-\frac{2\eta}{z}-\frac{l(l+1)}{z^2}\right) f(z) = 0. + +A second linearly independent solution is given by the irregular +Coulomb wave function `G_l(\eta,z)` (see :func:`~mpmath.coulombg`) +and thus the general solution is +`f(z) = C_1 F_l(\eta,z) + C_2 G_l(\eta,z)` for arbitrary +constants `C_1`, `C_2`. +Physically, the Coulomb wave functions give the radial solution +to the Schrodinger equation for a point particle in a `1/z` potential; `z` is +then the radius and `l`, `\eta` are quantum numbers. + +The Coulomb wave functions with real parameters are defined +in Abramowitz & Stegun, section 14. However, all parameters are permitted +to be complex in this implementation (see references). + +**Plots** + +.. literalinclude :: /plots/coulombf.py +.. image :: /plots/coulombf.png +.. literalinclude :: /plots/coulombf_c.py +.. image :: /plots/coulombf_c.png + +**Examples** + +Evaluation is supported for arbitrary magnitudes of `z`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> coulombf(2, 1.5, 3.5) + 0.4080998961088761187426445 + >>> coulombf(-2, 1.5, 3.5) + 0.7103040849492536747533465 + >>> coulombf(2, 1.5, '1e-10') + 4.143324917492256448770769e-33 + >>> coulombf(2, 1.5, 1000) + 0.4482623140325567050716179 + >>> coulombf(2, 1.5, 10**10) + -0.066804196437694360046619 + +Verifying the differential equation:: + + >>> l, eta, z = 2, 3, mpf(2.75) + >>> A, B = 1, 2 + >>> f = lambda z: A*coulombf(l,eta,z) + B*coulombg(l,eta,z) + >>> chop(diff(f,z,2) + (1-2*eta/z - l*(l+1)/z**2)*f(z)) + 0.0 + +A Wronskian relation satisfied by the Coulomb wave functions:: + + >>> l = 2 + >>> eta = 1.5 + >>> F = lambda z: coulombf(l,eta,z) + >>> G = lambda z: coulombg(l,eta,z) + >>> for z in [3.5, -1, 2+3j]: + ... chop(diff(F,z)*G(z) - F(z)*diff(G,z)) + ... + 1.0 + 1.0 + 1.0 + +Another Wronskian relation:: + + >>> F = coulombf + >>> G = coulombg + >>> for z in [3.5, -1, 2+3j]: + ... chop(F(l-1,eta,z)*G(l,eta,z)-F(l,eta,z)*G(l-1,eta,z) - l/sqrt(l**2+eta**2)) + ... + 0.0 + 0.0 + 0.0 + +An integral identity connecting the regular and irregular wave functions:: + + >>> l, eta, z = 4+j, 2-j, 5+2j + >>> coulombf(l,eta,z) + j*coulombg(l,eta,z) + (0.7997977752284033239714479 + 0.9294486669502295512503127j) + >>> g = lambda t: exp(-t)*t**(l-j*eta)*(t+2*j*z)**(l+j*eta) + >>> j*exp(-j*z)*z**(-l)/fac(2*l+1)/coulombc(l,eta)*quad(g, [0,inf]) + (0.7997977752284033239714479 + 0.9294486669502295512503127j) + +Some test case with complex parameters, taken from Michel [2]:: + + >>> mp.dps = 15 + >>> coulombf(1+0.1j, 50+50j, 100.156) + (-1.02107292320897e+15 - 2.83675545731519e+15j) + >>> coulombg(1+0.1j, 50+50j, 100.156) + (2.83675545731519e+15 - 1.02107292320897e+15j) + >>> coulombf(1e-5j, 10+1e-5j, 0.1+1e-6j) + (4.30566371247811e-14 - 9.03347835361657e-19j) + >>> coulombg(1e-5j, 10+1e-5j, 0.1+1e-6j) + (778709182061.134 + 18418936.2660553j) + +The following reproduces a table in Abramowitz & Stegun, at twice +the precision:: + + >>> mp.dps = 10 + >>> eta = 2; z = 5 + >>> for l in [5, 4, 3, 2, 1, 0]: + ... print("%s %s %s" % (l, coulombf(l,eta,z), + ... diff(lambda z: coulombf(l,eta,z), z))) + ... + 5 0.09079533488 0.1042553261 + 4 0.2148205331 0.2029591779 + 3 0.4313159311 0.320534053 + 2 0.7212774133 0.3952408216 + 1 0.9935056752 0.3708676452 + 0 1.143337392 0.2937960375 + +**References** + +1. I.J. Thompson & A.R. Barnett, "Coulomb and Bessel Functions of Complex + Arguments and Order", J. Comp. Phys., vol 64, no. 2, June 1986. + +2. N. Michel, "Precise Coulomb wave functions for a wide range of + complex `l`, `\eta` and `z`", http://arxiv.org/abs/physics/0702051v1 + +""" + +coulombg = r""" +Calculates the irregular Coulomb wave function + +.. math :: + + G_l(\eta,z) = \frac{F_l(\eta,z) \cos(\chi) - F_{-l-1}(\eta,z)}{\sin(\chi)} + +where `\chi = \sigma_l - \sigma_{-l-1} - (l+1/2) \pi` +and `\sigma_l(\eta) = (\ln \Gamma(1+l+i\eta)-\ln \Gamma(1+l-i\eta))/(2i)`. + +See :func:`~mpmath.coulombf` for additional information. + +**Plots** + +.. literalinclude :: /plots/coulombg.py +.. image :: /plots/coulombg.png +.. literalinclude :: /plots/coulombg_c.py +.. image :: /plots/coulombg_c.png + +**Examples** + +Evaluation is supported for arbitrary magnitudes of `z`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> coulombg(-2, 1.5, 3.5) + 1.380011900612186346255524 + >>> coulombg(2, 1.5, 3.5) + 1.919153700722748795245926 + >>> coulombg(-2, 1.5, '1e-10') + 201126715824.7329115106793 + >>> coulombg(-2, 1.5, 1000) + 0.1802071520691149410425512 + >>> coulombg(-2, 1.5, 10**10) + 0.652103020061678070929794 + +The following reproduces a table in Abramowitz & Stegun, +at twice the precision:: + + >>> mp.dps = 10 + >>> eta = 2; z = 5 + >>> for l in [1, 2, 3, 4, 5]: + ... print("%s %s %s" % (l, coulombg(l,eta,z), + ... -diff(lambda z: coulombg(l,eta,z), z))) + ... + 1 1.08148276 0.6028279961 + 2 1.496877075 0.5661803178 + 3 2.048694714 0.7959909551 + 4 3.09408669 1.731802374 + 5 5.629840456 4.549343289 + +Evaluation close to the singularity at `z = 0`:: + + >>> mp.dps = 15 + >>> coulombg(0,10,1) + 3088184933.67358 + >>> coulombg(0,10,'1e-10') + 5554866000719.8 + >>> coulombg(0,10,'1e-100') + 5554866221524.1 + +Evaluation with a half-integer value for `l`:: + + >>> coulombg(1.5, 1, 10) + 0.852320038297334 +""" + +coulombc = r""" +Gives the normalizing Gamow constant for Coulomb wave functions, + +.. math :: + + C_l(\eta) = 2^l \exp\left(-\pi \eta/2 + [\ln \Gamma(1+l+i\eta) + + \ln \Gamma(1+l-i\eta)]/2 - \ln \Gamma(2l+2)\right), + +where the log gamma function with continuous imaginary part +away from the negative half axis (see :func:`~mpmath.loggamma`) is implied. + +This function is used internally for the calculation of +Coulomb wave functions, and automatically cached to make multiple +evaluations with fixed `l`, `\eta` fast. +""" + +ellipfun = r""" +Computes any of the Jacobi elliptic functions, defined +in terms of Jacobi theta functions as + +.. math :: + + \mathrm{sn}(u,m) = \frac{\vartheta_3(0,q)}{\vartheta_2(0,q)} + \frac{\vartheta_1(t,q)}{\vartheta_4(t,q)} + + \mathrm{cn}(u,m) = \frac{\vartheta_4(0,q)}{\vartheta_2(0,q)} + \frac{\vartheta_2(t,q)}{\vartheta_4(t,q)} + + \mathrm{dn}(u,m) = \frac{\vartheta_4(0,q)}{\vartheta_3(0,q)} + \frac{\vartheta_3(t,q)}{\vartheta_4(t,q)}, + +or more generally computes a ratio of two such functions. Here +`t = u/\vartheta_3(0,q)^2`, and `q = q(m)` denotes the nome (see +:func:`~mpmath.nome`). Optionally, you can specify the nome directly +instead of `m` by passing ``q=``, or you can directly +specify the elliptic parameter `k` with ``k=``. + +The first argument should be a two-character string specifying the +function using any combination of ``'s'``, ``'c'``, ``'d'``, ``'n'``. These +letters respectively denote the basic functions +`\mathrm{sn}(u,m)`, `\mathrm{cn}(u,m)`, `\mathrm{dn}(u,m)`, and `1`. +The identifier specifies the ratio of two such functions. +For example, ``'ns'`` identifies the function + +.. math :: + + \mathrm{ns}(u,m) = \frac{1}{\mathrm{sn}(u,m)} + +and ``'cd'`` identifies the function + +.. math :: + + \mathrm{cd}(u,m) = \frac{\mathrm{cn}(u,m)}{\mathrm{dn}(u,m)}. + +If called with only the first argument, a function object +evaluating the chosen function for given arguments is returned. + +**Examples** + +Basic evaluation:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> ellipfun('cd', 3.5, 0.5) + -0.9891101840595543931308394 + >>> ellipfun('cd', 3.5, q=0.25) + 0.07111979240214668158441418 + +The sn-function is doubly periodic in the complex plane with periods +`4 K(m)` and `2 i K(1-m)` (see :func:`~mpmath.ellipk`):: + + >>> sn = ellipfun('sn') + >>> sn(2, 0.25) + 0.9628981775982774425751399 + >>> sn(2+4*ellipk(0.25), 0.25) + 0.9628981775982774425751399 + >>> chop(sn(2+2*j*ellipk(1-0.25), 0.25)) + 0.9628981775982774425751399 + +The cn-function is doubly periodic with periods `4 K(m)` and `2 K(m) + 2 i K(1-m)`:: + + >>> cn = ellipfun('cn') + >>> cn(2, 0.25) + -0.2698649654510865792581416 + >>> cn(2+4*ellipk(0.25), 0.25) + -0.2698649654510865792581416 + >>> chop(cn(2+2*ellipk(0.25)+2*j*ellipk(1-0.25), 0.25)) + -0.2698649654510865792581416 + +The dn-function is doubly periodic with periods `2 K(m)` and `4 i K(1-m)`:: + + >>> dn = ellipfun('dn') + >>> dn(2, 0.25) + 0.8764740583123262286931578 + >>> dn(2+2*ellipk(0.25), 0.25) + 0.8764740583123262286931578 + >>> chop(dn(2+4*j*ellipk(1-0.25), 0.25)) + 0.8764740583123262286931578 + +""" + + +jtheta = r""" +Computes the Jacobi theta function `\vartheta_n(z, q)`, where +`n = 1, 2, 3, 4`, defined by the infinite series: + +.. math :: + + \vartheta_1(z,q) = 2 q^{1/4} \sum_{n=0}^{\infty} + (-1)^n q^{n^2+n\,} \sin((2n+1)z) + + \vartheta_2(z,q) = 2 q^{1/4} \sum_{n=0}^{\infty} + q^{n^{2\,} + n} \cos((2n+1)z) + + \vartheta_3(z,q) = 1 + 2 \sum_{n=1}^{\infty} + q^{n^2\,} \cos(2 n z) + + \vartheta_4(z,q) = 1 + 2 \sum_{n=1}^{\infty} + (-q)^{n^2\,} \cos(2 n z) + +The theta functions are functions of two variables: + +* `z` is the *argument*, an arbitrary real or complex number + +* `q` is the *nome*, which must be a real or complex number + in the unit disk (i.e. `|q| < 1`). For `|q| \ll 1`, the + series converge very quickly, so the Jacobi theta functions + can efficiently be evaluated to high precision. + +The compact notations `\vartheta_n(q) = \vartheta_n(0,q)` +and `\vartheta_n = \vartheta_n(0,q)` are also frequently +encountered. Finally, Jacobi theta functions are frequently +considered as functions of the half-period ratio `\tau` +and then usually denoted by `\vartheta_n(z|\tau)`. + +Optionally, ``jtheta(n, z, q, derivative=d)`` with `d > 0` computes +a `d`-th derivative with respect to `z`. + +**Examples and basic properties** + +Considered as functions of `z`, the Jacobi theta functions may be +viewed as generalizations of the ordinary trigonometric functions +cos and sin. They are periodic functions:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> jtheta(1, 0.25, '0.2') + 0.2945120798627300045053104 + >>> jtheta(1, 0.25 + 2*pi, '0.2') + 0.2945120798627300045053104 + +Indeed, the series defining the theta functions are essentially +trigonometric Fourier series. The coefficients can be retrieved +using :func:`~mpmath.fourier`:: + + >>> mp.dps = 10 + >>> nprint(fourier(lambda x: jtheta(2, x, 0.5), [-pi, pi], 4)) + ([0.0, 1.68179, 0.0, 0.420448, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0]) + +The Jacobi theta functions are also so-called quasiperiodic +functions of `z` and `\tau`, meaning that for fixed `\tau`, +`\vartheta_n(z, q)` and `\vartheta_n(z+\pi \tau, q)` are the same +except for an exponential factor:: + + >>> mp.dps = 25 + >>> tau = 3*j/10 + >>> q = exp(pi*j*tau) + >>> z = 10 + >>> jtheta(4, z+tau*pi, q) + (-0.682420280786034687520568 + 1.526683999721399103332021j) + >>> -exp(-2*j*z)/q * jtheta(4, z, q) + (-0.682420280786034687520568 + 1.526683999721399103332021j) + +The Jacobi theta functions satisfy a huge number of other +functional equations, such as the following identity (valid for +any `q`):: + + >>> q = mpf(3)/10 + >>> jtheta(3,0,q)**4 + 6.823744089352763305137427 + >>> jtheta(2,0,q)**4 + jtheta(4,0,q)**4 + 6.823744089352763305137427 + +Extensive listings of identities satisfied by the Jacobi theta +functions can be found in standard reference works. + +The Jacobi theta functions are related to the gamma function +for special arguments:: + + >>> jtheta(3, 0, exp(-pi)) + 1.086434811213308014575316 + >>> pi**(1/4.) / gamma(3/4.) + 1.086434811213308014575316 + +:func:`~mpmath.jtheta` supports arbitrary precision evaluation and complex +arguments:: + + >>> mp.dps = 50 + >>> jtheta(4, sqrt(2), 0.5) + 2.0549510717571539127004115835148878097035750653737 + >>> mp.dps = 25 + >>> jtheta(4, 1+2j, (1+j)/5) + (7.180331760146805926356634 - 1.634292858119162417301683j) + +Evaluation of derivatives:: + + >>> mp.dps = 25 + >>> jtheta(1, 7, 0.25, 1); diff(lambda z: jtheta(1, z, 0.25), 7) + 1.209857192844475388637236 + 1.209857192844475388637236 + >>> jtheta(1, 7, 0.25, 2); diff(lambda z: jtheta(1, z, 0.25), 7, 2) + -0.2598718791650217206533052 + -0.2598718791650217206533052 + >>> jtheta(2, 7, 0.25, 1); diff(lambda z: jtheta(2, z, 0.25), 7) + -1.150231437070259644461474 + -1.150231437070259644461474 + >>> jtheta(2, 7, 0.25, 2); diff(lambda z: jtheta(2, z, 0.25), 7, 2) + -0.6226636990043777445898114 + -0.6226636990043777445898114 + >>> jtheta(3, 7, 0.25, 1); diff(lambda z: jtheta(3, z, 0.25), 7) + -0.9990312046096634316587882 + -0.9990312046096634316587882 + >>> jtheta(3, 7, 0.25, 2); diff(lambda z: jtheta(3, z, 0.25), 7, 2) + -0.1530388693066334936151174 + -0.1530388693066334936151174 + >>> jtheta(4, 7, 0.25, 1); diff(lambda z: jtheta(4, z, 0.25), 7) + 0.9820995967262793943571139 + 0.9820995967262793943571139 + >>> jtheta(4, 7, 0.25, 2); diff(lambda z: jtheta(4, z, 0.25), 7, 2) + 0.3936902850291437081667755 + 0.3936902850291437081667755 + +**Possible issues** + +For `|q| \ge 1` or `\Im(\tau) \le 0`, :func:`~mpmath.jtheta` raises +``ValueError``. This exception is also raised for `|q|` extremely +close to 1 (or equivalently `\tau` very close to 0), since the +series would converge too slowly:: + + >>> jtheta(1, 10, 0.99999999 * exp(0.5*j)) + Traceback (most recent call last): + ... + ValueError: abs(q) > THETA_Q_LIM = 1.000000 + +""" + +eulernum = r""" +Gives the `n`-th Euler number, defined as the `n`-th derivative of +`\mathrm{sech}(t) = 1/\cosh(t)` evaluated at `t = 0`. Equivalently, the +Euler numbers give the coefficients of the Taylor series + +.. math :: + + \mathrm{sech}(t) = \sum_{n=0}^{\infty} \frac{E_n}{n!} t^n. + +The Euler numbers are closely related to Bernoulli numbers +and Bernoulli polynomials. They can also be evaluated in terms of +Euler polynomials (see :func:`~mpmath.eulerpoly`) as `E_n = 2^n E_n(1/2)`. + +**Examples** + +Computing the first few Euler numbers and verifying that they +agree with the Taylor series:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> [eulernum(n) for n in range(11)] + [1.0, 0.0, -1.0, 0.0, 5.0, 0.0, -61.0, 0.0, 1385.0, 0.0, -50521.0] + >>> chop(diffs(sech, 0, 10)) + [1.0, 0.0, -1.0, 0.0, 5.0, 0.0, -61.0, 0.0, 1385.0, 0.0, -50521.0] + +Euler numbers grow very rapidly. :func:`~mpmath.eulernum` efficiently +computes numerical approximations for large indices:: + + >>> eulernum(50) + -6.053285248188621896314384e+54 + >>> eulernum(1000) + 3.887561841253070615257336e+2371 + >>> eulernum(10**20) + 4.346791453661149089338186e+1936958564106659551331 + +Comparing with an asymptotic formula for the Euler numbers:: + + >>> n = 10**5 + >>> (-1)**(n//2) * 8 * sqrt(n/(2*pi)) * (2*n/(pi*e))**n + 3.69919063017432362805663e+436961 + >>> eulernum(n) + 3.699193712834466537941283e+436961 + +Pass ``exact=True`` to obtain exact values of Euler numbers as integers:: + + >>> print(eulernum(50, exact=True)) + -6053285248188621896314383785111649088103498225146815121 + >>> print(eulernum(200, exact=True) % 10**10) + 1925859625 + >>> eulernum(1001, exact=True) + 0 +""" + +eulerpoly = r""" +Evaluates the Euler polynomial `E_n(z)`, defined by the generating function +representation + +.. math :: + + \frac{2e^{zt}}{e^t+1} = \sum_{n=0}^\infty E_n(z) \frac{t^n}{n!}. + +The Euler polynomials may also be represented in terms of +Bernoulli polynomials (see :func:`~mpmath.bernpoly`) using various formulas, for +example + +.. math :: + + E_n(z) = \frac{2}{n+1} \left( + B_n(z)-2^{n+1}B_n\left(\frac{z}{2}\right) + \right). + +Special values include the Euler numbers `E_n = 2^n E_n(1/2)` (see +:func:`~mpmath.eulernum`). + +**Examples** + +Computing the coefficients of the first few Euler polynomials:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> for n in range(6): + ... chop(taylor(lambda z: eulerpoly(n,z), 0, n)) + ... + [1.0] + [-0.5, 1.0] + [0.0, -1.0, 1.0] + [0.25, 0.0, -1.5, 1.0] + [0.0, 1.0, 0.0, -2.0, 1.0] + [-0.5, 0.0, 2.5, 0.0, -2.5, 1.0] + +Evaluation for arbitrary `z`:: + + >>> eulerpoly(2,3) + 6.0 + >>> eulerpoly(5,4) + 423.5 + >>> eulerpoly(35, 11111111112) + 3.994957561486776072734601e+351 + >>> eulerpoly(4, 10+20j) + (-47990.0 - 235980.0j) + >>> eulerpoly(2, '-3.5e-5') + 0.000035001225 + >>> eulerpoly(3, 0.5) + 0.0 + >>> eulerpoly(55, -10**80) + -1.0e+4400 + >>> eulerpoly(5, -inf) + -inf + >>> eulerpoly(6, -inf) + +inf + +Computing Euler numbers:: + + >>> 2**26 * eulerpoly(26,0.5) + -4087072509293123892361.0 + >>> eulernum(26) + -4087072509293123892361.0 + +Evaluation is accurate for large `n` and small `z`:: + + >>> eulerpoly(100, 0.5) + 2.29047999988194114177943e+108 + >>> eulerpoly(1000, 10.5) + 3.628120031122876847764566e+2070 + >>> eulerpoly(10000, 10.5) + 1.149364285543783412210773e+30688 +""" + +spherharm = r""" +Evaluates the spherical harmonic `Y_l^m(\theta,\phi)`, + +.. math :: + + Y_l^m(\theta,\phi) = \sqrt{\frac{2l+1}{4\pi}\frac{(l-m)!}{(l+m)!}} + P_l^m(\cos \theta) e^{i m \phi} + +where `P_l^m` is an associated Legendre function (see :func:`~mpmath.legenp`). + +Here `\theta \in [0, \pi]` denotes the polar coordinate (ranging +from the north pole to the south pole) and `\phi \in [0, 2 \pi]` denotes the +azimuthal coordinate on a sphere. Care should be used since many different +conventions for spherical coordinate variables are used. + +Usually spherical harmonics are considered for `l \in \mathbb{N}`, +`m \in \mathbb{Z}`, `|m| \le l`. More generally, `l,m,\theta,\phi` +are permitted to be complex numbers. + +.. note :: + + :func:`~mpmath.spherharm` returns a complex number, even if the value is + purely real. + +**Plots** + +.. literalinclude :: /plots/spherharm40.py + +`Y_{4,0}`: + +.. image :: /plots/spherharm40.png + +`Y_{4,1}`: + +.. image :: /plots/spherharm41.png + +`Y_{4,2}`: + +.. image :: /plots/spherharm42.png + +`Y_{4,3}`: + +.. image :: /plots/spherharm43.png + +`Y_{4,4}`: + +.. image :: /plots/spherharm44.png + +**Examples** + +Some low-order spherical harmonics with reference values:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> theta = pi/4 + >>> phi = pi/3 + >>> spherharm(0,0,theta,phi); 0.5*sqrt(1/pi)*expj(0) + (0.2820947917738781434740397 + 0.0j) + (0.2820947917738781434740397 + 0.0j) + >>> spherharm(1,-1,theta,phi); 0.5*sqrt(3/(2*pi))*expj(-phi)*sin(theta) + (0.1221506279757299803965962 - 0.2115710938304086076055298j) + (0.1221506279757299803965962 - 0.2115710938304086076055298j) + >>> spherharm(1,0,theta,phi); 0.5*sqrt(3/pi)*cos(theta)*expj(0) + (0.3454941494713354792652446 + 0.0j) + (0.3454941494713354792652446 + 0.0j) + >>> spherharm(1,1,theta,phi); -0.5*sqrt(3/(2*pi))*expj(phi)*sin(theta) + (-0.1221506279757299803965962 - 0.2115710938304086076055298j) + (-0.1221506279757299803965962 - 0.2115710938304086076055298j) + +With the normalization convention used, the spherical harmonics are orthonormal +on the unit sphere:: + + >>> sphere = [0,pi], [0,2*pi] + >>> dS = lambda t,p: fp.sin(t) # differential element + >>> Y1 = lambda t,p: fp.spherharm(l1,m1,t,p) + >>> Y2 = lambda t,p: fp.conj(fp.spherharm(l2,m2,t,p)) + >>> l1 = l2 = 3; m1 = m2 = 2 + >>> fp.chop(fp.quad(lambda t,p: Y1(t,p)*Y2(t,p)*dS(t,p), *sphere)) + 1.0000000000000007 + >>> m2 = 1 # m1 != m2 + >>> print(fp.chop(fp.quad(lambda t,p: Y1(t,p)*Y2(t,p)*dS(t,p), *sphere))) + 0.0 + +Evaluation is accurate for large orders:: + + >>> spherharm(1000,750,0.5,0.25) + (3.776445785304252879026585e-102 - 5.82441278771834794493484e-102j) + +Evaluation works with complex parameter values:: + + >>> spherharm(1+j, 2j, 2+3j, -0.5j) + (64.44922331113759992154992 + 1981.693919841408089681743j) +""" + +scorergi = r""" +Evaluates the Scorer function + +.. math :: + + \operatorname{Gi}(z) = + \operatorname{Ai}(z) \int_0^z \operatorname{Bi}(t) dt + + \operatorname{Bi}(z) \int_z^{\infty} \operatorname{Ai}(t) dt + +which gives a particular solution to the inhomogeneous Airy +differential equation `f''(z) - z f(z) = 1/\pi`. Another +particular solution is given by the Scorer Hi-function +(:func:`~mpmath.scorerhi`). The two functions are related as +`\operatorname{Gi}(z) + \operatorname{Hi}(z) = \operatorname{Bi}(z)`. + +**Plots** + +.. literalinclude :: /plots/gi.py +.. image :: /plots/gi.png +.. literalinclude :: /plots/gi_c.py +.. image :: /plots/gi_c.png + +**Examples** + +Some values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> scorergi(0); 1/(power(3,'7/6')*gamma('2/3')) + 0.2049755424820002450503075 + 0.2049755424820002450503075 + >>> diff(scorergi, 0); 1/(power(3,'5/6')*gamma('1/3')) + 0.1494294524512754526382746 + 0.1494294524512754526382746 + >>> scorergi(+inf); scorergi(-inf) + 0.0 + 0.0 + >>> scorergi(1) + 0.2352184398104379375986902 + >>> scorergi(-1) + -0.1166722172960152826494198 + +Evaluation for large arguments:: + + >>> scorergi(10) + 0.03189600510067958798062034 + >>> scorergi(100) + 0.003183105228162961476590531 + >>> scorergi(1000000) + 0.0000003183098861837906721743873 + >>> 1/(pi*1000000) + 0.0000003183098861837906715377675 + >>> scorergi(-1000) + -0.08358288400262780392338014 + >>> scorergi(-100000) + 0.02886866118619660226809581 + >>> scorergi(50+10j) + (0.0061214102799778578790984 - 0.001224335676457532180747917j) + >>> scorergi(-50-10j) + (5.236047850352252236372551e+29 - 3.08254224233701381482228e+29j) + >>> scorergi(100000j) + (-8.806659285336231052679025e+6474077 + 8.684731303500835514850962e+6474077j) + +Verifying the connection between Gi and Hi:: + + >>> z = 0.25 + >>> scorergi(z) + scorerhi(z) + 0.7287469039362150078694543 + >>> airybi(z) + 0.7287469039362150078694543 + +Verifying the differential equation:: + + >>> for z in [-3.4, 0, 2.5, 1+2j]: + ... chop(diff(scorergi,z,2) - z*scorergi(z)) + ... + -0.3183098861837906715377675 + -0.3183098861837906715377675 + -0.3183098861837906715377675 + -0.3183098861837906715377675 + +Verifying the integral representation:: + + >>> z = 0.5 + >>> scorergi(z) + 0.2447210432765581976910539 + >>> Ai,Bi = airyai,airybi + >>> Bi(z)*(Ai(inf,-1)-Ai(z,-1)) + Ai(z)*(Bi(z,-1)-Bi(0,-1)) + 0.2447210432765581976910539 + +**References** + +1. [DLMF]_ section 9.12: Scorer Functions + +""" + +scorerhi = r""" +Evaluates the second Scorer function + +.. math :: + + \operatorname{Hi}(z) = + \operatorname{Bi}(z) \int_{-\infty}^z \operatorname{Ai}(t) dt - + \operatorname{Ai}(z) \int_{-\infty}^z \operatorname{Bi}(t) dt + +which gives a particular solution to the inhomogeneous Airy +differential equation `f''(z) - z f(z) = 1/\pi`. See also +:func:`~mpmath.scorergi`. + +**Plots** + +.. literalinclude :: /plots/hi.py +.. image :: /plots/hi.png +.. literalinclude :: /plots/hi_c.py +.. image :: /plots/hi_c.png + +**Examples** + +Some values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> scorerhi(0); 2/(power(3,'7/6')*gamma('2/3')) + 0.4099510849640004901006149 + 0.4099510849640004901006149 + >>> diff(scorerhi,0); 2/(power(3,'5/6')*gamma('1/3')) + 0.2988589049025509052765491 + 0.2988589049025509052765491 + >>> scorerhi(+inf); scorerhi(-inf) + +inf + 0.0 + >>> scorerhi(1) + 0.9722051551424333218376886 + >>> scorerhi(-1) + 0.2206696067929598945381098 + +Evaluation for large arguments:: + + >>> scorerhi(10) + 455641153.5163291358991077 + >>> scorerhi(100) + 6.041223996670201399005265e+288 + >>> scorerhi(1000000) + 7.138269638197858094311122e+289529652 + >>> scorerhi(-10) + 0.0317685352825022727415011 + >>> scorerhi(-100) + 0.003183092495767499864680483 + >>> scorerhi(100j) + (-6.366197716545672122983857e-9 + 0.003183098861710582761688475j) + >>> scorerhi(50+50j) + (-5.322076267321435669290334e+63 + 1.478450291165243789749427e+65j) + >>> scorerhi(-1000-1000j) + (0.0001591549432510502796565538 - 0.000159154943091895334973109j) + +Verifying the differential equation:: + + >>> for z in [-3.4, 0, 2, 1+2j]: + ... chop(diff(scorerhi,z,2) - z*scorerhi(z)) + ... + 0.3183098861837906715377675 + 0.3183098861837906715377675 + 0.3183098861837906715377675 + 0.3183098861837906715377675 + +Verifying the integral representation:: + + >>> z = 0.5 + >>> scorerhi(z) + 0.6095559998265972956089949 + >>> Ai,Bi = airyai,airybi + >>> Bi(z)*(Ai(z,-1)-Ai(-inf,-1)) - Ai(z)*(Bi(z,-1)-Bi(-inf,-1)) + 0.6095559998265972956089949 + +""" + + +stirling1 = r""" +Gives the Stirling number of the first kind `s(n,k)`, defined by + +.. math :: + + x(x-1)(x-2)\cdots(x-n+1) = \sum_{k=0}^n s(n,k) x^k. + +The value is computed using an integer recurrence. The implementation +is not optimized for approximating large values quickly. + +**Examples** + +Comparing with the generating function:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> taylor(lambda x: ff(x, 5), 0, 5) + [0.0, 24.0, -50.0, 35.0, -10.0, 1.0] + >>> [stirling1(5, k) for k in range(6)] + [0.0, 24.0, -50.0, 35.0, -10.0, 1.0] + +Recurrence relation:: + + >>> n, k = 5, 3 + >>> stirling1(n+1,k) + n*stirling1(n,k) - stirling1(n,k-1) + 0.0 + +The matrices of Stirling numbers of first and second kind are inverses +of each other:: + + >>> A = matrix(5, 5); B = matrix(5, 5) + >>> for n in range(5): + ... for k in range(5): + ... A[n,k] = stirling1(n,k) + ... B[n,k] = stirling2(n,k) + ... + >>> A * B + [1.0 0.0 0.0 0.0 0.0] + [0.0 1.0 0.0 0.0 0.0] + [0.0 0.0 1.0 0.0 0.0] + [0.0 0.0 0.0 1.0 0.0] + [0.0 0.0 0.0 0.0 1.0] + +Pass ``exact=True`` to obtain exact values of Stirling numbers as integers:: + + >>> stirling1(42, 5) + -2.864498971768501633736628e+50 + >>> print(stirling1(42, 5, exact=True)) + -286449897176850163373662803014001546235808317440000 + +""" + +stirling2 = r""" +Gives the Stirling number of the second kind `S(n,k)`, defined by + +.. math :: + + x^n = \sum_{k=0}^n S(n,k) x(x-1)(x-2)\cdots(x-k+1) + +The value is computed using integer arithmetic to evaluate a power sum. +The implementation is not optimized for approximating large values quickly. + +**Examples** + +Comparing with the generating function:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> taylor(lambda x: sum(stirling2(5,k) * ff(x,k) for k in range(6)), 0, 5) + [0.0, 0.0, 0.0, 0.0, 0.0, 1.0] + +Recurrence relation:: + + >>> n, k = 5, 3 + >>> stirling2(n+1,k) - k*stirling2(n,k) - stirling2(n,k-1) + 0.0 + +Pass ``exact=True`` to obtain exact values of Stirling numbers as integers:: + + >>> stirling2(52, 10) + 2.641822121003543906807485e+45 + >>> print(stirling2(52, 10, exact=True)) + 2641822121003543906807485307053638921722527655 + + +""" + +squarew = r""" +Computes the square wave function using the definition: + +.. math:: + x(t) = A(-1)^{\left\lfloor{2t / P}\right\rfloor} + +where `P` is the period of the wave and `A` is the amplitude. + +**Examples** + +Square wave with period = 2, amplitude = 1 :: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> squarew(0,1,2) + 1.0 + >>> squarew(0.5,1,2) + 1.0 + >>> squarew(1,1,2) + -1.0 + >>> squarew(1.5,1,2) + -1.0 + >>> squarew(2,1,2) + 1.0 +""" + +trianglew = r""" +Computes the triangle wave function using the definition: + +.. math:: + x(t) = 2A\left(\frac{1}{2}-\left|1-2 \operatorname{frac}\left(\frac{x}{P}+\frac{1}{4}\right)\right|\right) + +where :math:`\operatorname{frac}\left(\frac{t}{T}\right) = \frac{t}{T}-\left\lfloor{\frac{t}{T}}\right\rfloor` +, `P` is the period of the wave, and `A` is the amplitude. + +**Examples** + +Triangle wave with period = 2, amplitude = 1 :: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> trianglew(0,1,2) + 0.0 + >>> trianglew(0.25,1,2) + 0.5 + >>> trianglew(0.5,1,2) + 1.0 + >>> trianglew(1,1,2) + 0.0 + >>> trianglew(1.5,1,2) + -1.0 + >>> trianglew(2,1,2) + 0.0 +""" + +sawtoothw = r""" +Computes the sawtooth wave function using the definition: + +.. math:: + x(t) = A\operatorname{frac}\left(\frac{t}{T}\right) + +where :math:`\operatorname{frac}\left(\frac{t}{T}\right) = \frac{t}{T}-\left\lfloor{\frac{t}{T}}\right\rfloor`, +`P` is the period of the wave, and `A` is the amplitude. + +**Examples** + +Sawtooth wave with period = 2, amplitude = 1 :: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> sawtoothw(0,1,2) + 0.0 + >>> sawtoothw(0.5,1,2) + 0.25 + >>> sawtoothw(1,1,2) + 0.5 + >>> sawtoothw(1.5,1,2) + 0.75 + >>> sawtoothw(2,1,2) + 0.0 +""" + +unit_triangle = r""" +Computes the unit triangle using the definition: + +.. math:: + x(t) = A(-\left| t \right| + 1) + +where `A` is the amplitude. + +**Examples** + +Unit triangle with amplitude = 1 :: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> unit_triangle(-1,1) + 0.0 + >>> unit_triangle(-0.5,1) + 0.5 + >>> unit_triangle(0,1) + 1.0 + >>> unit_triangle(0.5,1) + 0.5 + >>> unit_triangle(1,1) + 0.0 +""" + +sigmoid = r""" +Computes the sigmoid function using the definition: + +.. math:: + x(t) = \frac{A}{1 + e^{-t}} + +where `A` is the amplitude. + +**Examples** + +Sigmoid function with amplitude = 1 :: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> sigmoid(-1,1) + 0.2689414213699951207488408 + >>> sigmoid(-0.5,1) + 0.3775406687981454353610994 + >>> sigmoid(0,1) + 0.5 + >>> sigmoid(0.5,1) + 0.6224593312018545646389006 + >>> sigmoid(1,1) + 0.7310585786300048792511592 + +""" diff --git a/MLPY/Lib/site-packages/mpmath/functions/__init__.py b/MLPY/Lib/site-packages/mpmath/functions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5896ed0579eceab086dc5c67eaa649b6061a53dc --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/functions/__init__.py @@ -0,0 +1,14 @@ +from . import functions +# Hack to update methods +from . import factorials +from . import hypergeometric +from . import expintegrals +from . import bessel +from . import orthogonal +from . import theta +from . import elliptic +from . import signals +from . import zeta +from . import rszeta +from . import zetazeros +from . import qfunctions diff --git a/MLPY/Lib/site-packages/mpmath/functions/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68bce4d25325f35c524eb1fbd8c504534d5e675e Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/functions/__pycache__/bessel.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/bessel.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d9ebead55114370c40f9384a05f00114236c787 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/bessel.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/functions/__pycache__/elliptic.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/elliptic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5f76239e384cb733cffd07dc28ee62a3a075d77 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/elliptic.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/functions/__pycache__/expintegrals.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/expintegrals.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a6e1d6f7b3d9514cf4176e432a2510f31849417 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/expintegrals.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/functions/__pycache__/factorials.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/factorials.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48ca408870389cb8bc8a275b146c1bd12afe0a62 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/factorials.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/functions/__pycache__/functions.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/functions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7711666f6b0072ebd68fa6b8563dd5a022530b8 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/functions.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/functions/__pycache__/hypergeometric.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/hypergeometric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da081c411c7e9bc792a88cc2bbd0348262825ed6 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/hypergeometric.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/functions/__pycache__/orthogonal.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/orthogonal.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a65d9ef6993526e65df1102eca09bf9c199515c3 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/orthogonal.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/functions/__pycache__/qfunctions.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/qfunctions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..687ce0e216717afdf296b6ebaf57e0f107488812 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/qfunctions.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/functions/__pycache__/rszeta.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/rszeta.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4beeb3b57e90e255f4b48f96ae7c2166d4972765 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/rszeta.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/functions/__pycache__/signals.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/signals.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..385d21e7a915a26babf135672ec24284d92a70f4 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/signals.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/functions/__pycache__/theta.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/theta.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbd3069126eca5ac32729b0d712f2277212097c6 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/theta.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/functions/__pycache__/zeta.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/zeta.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee22c9757564fc017ec8fcfb3309b1d987756d49 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/zeta.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/functions/__pycache__/zetazeros.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/zetazeros.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a7d29206bb1c924ad525d6780644698220db247 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/functions/__pycache__/zetazeros.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/functions/bessel.py b/MLPY/Lib/site-packages/mpmath/functions/bessel.py new file mode 100644 index 0000000000000000000000000000000000000000..8b41d87bb0118de61d5561433dabcb181f872f84 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/functions/bessel.py @@ -0,0 +1,1108 @@ +from .functions import defun, defun_wrapped + +@defun +def j0(ctx, x): + """Computes the Bessel function `J_0(x)`. See :func:`~mpmath.besselj`.""" + return ctx.besselj(0, x) + +@defun +def j1(ctx, x): + """Computes the Bessel function `J_1(x)`. See :func:`~mpmath.besselj`.""" + return ctx.besselj(1, x) + +@defun +def besselj(ctx, n, z, derivative=0, **kwargs): + if type(n) is int: + n_isint = True + else: + n = ctx.convert(n) + n_isint = ctx.isint(n) + if n_isint: + n = int(ctx._re(n)) + if n_isint and n < 0: + return (-1)**n * ctx.besselj(-n, z, derivative, **kwargs) + z = ctx.convert(z) + M = ctx.mag(z) + if derivative: + d = ctx.convert(derivative) + # TODO: the integer special-casing shouldn't be necessary. + # However, the hypergeometric series gets inaccurate for large d + # because of inaccurate pole cancellation at a pole far from + # zero (needs to be fixed in hypercomb or hypsum) + if ctx.isint(d) and d >= 0: + d = int(d) + orig = ctx.prec + try: + ctx.prec += 15 + v = ctx.fsum((-1)**k * ctx.binomial(d,k) * ctx.besselj(2*k+n-d,z) + for k in range(d+1)) + finally: + ctx.prec = orig + v *= ctx.mpf(2)**(-d) + else: + def h(n,d): + r = ctx.fmul(ctx.fmul(z, z, prec=ctx.prec+M), -0.25, exact=True) + B = [0.5*(n-d+1), 0.5*(n-d+2)] + T = [([2,ctx.pi,z],[d-2*n,0.5,n-d],[],B,[(n+1)*0.5,(n+2)*0.5],B+[n+1],r)] + return T + v = ctx.hypercomb(h, [n,d], **kwargs) + else: + # Fast case: J_n(x), n int, appropriate magnitude for fixed-point calculation + if (not derivative) and n_isint and abs(M) < 10 and abs(n) < 20: + try: + return ctx._besselj(n, z) + except NotImplementedError: + pass + if not z: + if not n: + v = ctx.one + n+z + elif ctx.re(n) > 0: + v = n*z + else: + v = ctx.inf + z + n + else: + #v = 0 + orig = ctx.prec + try: + # XXX: workaround for accuracy in low level hypergeometric series + # when alternating, large arguments + ctx.prec += min(3*abs(M), ctx.prec) + w = ctx.fmul(z, 0.5, exact=True) + def h(n): + r = ctx.fneg(ctx.fmul(w, w, prec=max(0,ctx.prec+M)), exact=True) + return [([w], [n], [], [n+1], [], [n+1], r)] + v = ctx.hypercomb(h, [n], **kwargs) + finally: + ctx.prec = orig + v = +v + return v + +@defun +def besseli(ctx, n, z, derivative=0, **kwargs): + n = ctx.convert(n) + z = ctx.convert(z) + if not z: + if derivative: + raise ValueError + if not n: + # I(0,0) = 1 + return 1+n+z + if ctx.isint(n): + return 0*(n+z) + r = ctx.re(n) + if r == 0: + return ctx.nan*(n+z) + elif r > 0: + return 0*(n+z) + else: + return ctx.inf+(n+z) + M = ctx.mag(z) + if derivative: + d = ctx.convert(derivative) + def h(n,d): + r = ctx.fmul(ctx.fmul(z, z, prec=ctx.prec+M), 0.25, exact=True) + B = [0.5*(n-d+1), 0.5*(n-d+2), n+1] + T = [([2,ctx.pi,z],[d-2*n,0.5,n-d],[n+1],B,[(n+1)*0.5,(n+2)*0.5],B,r)] + return T + v = ctx.hypercomb(h, [n,d], **kwargs) + else: + def h(n): + w = ctx.fmul(z, 0.5, exact=True) + r = ctx.fmul(w, w, prec=max(0,ctx.prec+M)) + return [([w], [n], [], [n+1], [], [n+1], r)] + v = ctx.hypercomb(h, [n], **kwargs) + return v + +@defun_wrapped +def bessely(ctx, n, z, derivative=0, **kwargs): + if not z: + if derivative: + # Not implemented + raise ValueError + if not n: + # ~ log(z/2) + return -ctx.inf + (n+z) + if ctx.im(n): + return ctx.nan * (n+z) + r = ctx.re(n) + q = n+0.5 + if ctx.isint(q): + if n > 0: + return -ctx.inf + (n+z) + else: + return 0 * (n+z) + if r < 0 and int(ctx.floor(q)) % 2: + return ctx.inf + (n+z) + else: + return ctx.ninf + (n+z) + # XXX: use hypercomb + ctx.prec += 10 + m, d = ctx.nint_distance(n) + if d < -ctx.prec: + h = +ctx.eps + ctx.prec *= 2 + n += h + elif d < 0: + ctx.prec -= d + # TODO: avoid cancellation for imaginary arguments + cos, sin = ctx.cospi_sinpi(n) + return (ctx.besselj(n,z,derivative,**kwargs)*cos - \ + ctx.besselj(-n,z,derivative,**kwargs))/sin + +@defun_wrapped +def besselk(ctx, n, z, **kwargs): + if not z: + return ctx.inf + M = ctx.mag(z) + if M < 1: + # Represent as limit definition + def h(n): + r = (z/2)**2 + T1 = [z, 2], [-n, n-1], [n], [], [], [1-n], r + T2 = [z, 2], [n, -n-1], [-n], [], [], [1+n], r + return T1, T2 + # We could use the limit definition always, but it leads + # to very bad cancellation (of exponentially large terms) + # for large real z + # Instead represent in terms of 2F0 + else: + ctx.prec += M + def h(n): + return [([ctx.pi/2, z, ctx.exp(-z)], [0.5,-0.5,1], [], [], \ + [n+0.5, 0.5-n], [], -1/(2*z))] + return ctx.hypercomb(h, [n], **kwargs) + +@defun_wrapped +def hankel1(ctx,n,x,**kwargs): + return ctx.besselj(n,x,**kwargs) + ctx.j*ctx.bessely(n,x,**kwargs) + +@defun_wrapped +def hankel2(ctx,n,x,**kwargs): + return ctx.besselj(n,x,**kwargs) - ctx.j*ctx.bessely(n,x,**kwargs) + +@defun_wrapped +def whitm(ctx,k,m,z,**kwargs): + if z == 0: + # M(k,m,z) = 0^(1/2+m) + if ctx.re(m) > -0.5: + return z + elif ctx.re(m) < -0.5: + return ctx.inf + z + else: + return ctx.nan * z + x = ctx.fmul(-0.5, z, exact=True) + y = 0.5+m + return ctx.exp(x) * z**y * ctx.hyp1f1(y-k, 1+2*m, z, **kwargs) + +@defun_wrapped +def whitw(ctx,k,m,z,**kwargs): + if z == 0: + g = abs(ctx.re(m)) + if g < 0.5: + return z + elif g > 0.5: + return ctx.inf + z + else: + return ctx.nan * z + x = ctx.fmul(-0.5, z, exact=True) + y = 0.5+m + return ctx.exp(x) * z**y * ctx.hyperu(y-k, 1+2*m, z, **kwargs) + +@defun +def hyperu(ctx, a, b, z, **kwargs): + a, atype = ctx._convert_param(a) + b, btype = ctx._convert_param(b) + z = ctx.convert(z) + if not z: + if ctx.re(b) <= 1: + return ctx.gammaprod([1-b],[a-b+1]) + else: + return ctx.inf + z + bb = 1+a-b + bb, bbtype = ctx._convert_param(bb) + try: + orig = ctx.prec + try: + ctx.prec += 10 + v = ctx.hypsum(2, 0, (atype, bbtype), [a, bb], -1/z, maxterms=ctx.prec) + return v / z**a + finally: + ctx.prec = orig + except ctx.NoConvergence: + pass + def h(a,b): + w = ctx.sinpi(b) + T1 = ([ctx.pi,w],[1,-1],[],[a-b+1,b],[a],[b],z) + T2 = ([-ctx.pi,w,z],[1,-1,1-b],[],[a,2-b],[a-b+1],[2-b],z) + return T1, T2 + return ctx.hypercomb(h, [a,b], **kwargs) + +@defun +def struveh(ctx,n,z, **kwargs): + n = ctx.convert(n) + z = ctx.convert(z) + # http://functions.wolfram.com/Bessel-TypeFunctions/StruveH/26/01/02/ + def h(n): + return [([z/2, 0.5*ctx.sqrt(ctx.pi)], [n+1, -1], [], [n+1.5], [1], [1.5, n+1.5], -(z/2)**2)] + return ctx.hypercomb(h, [n], **kwargs) + +@defun +def struvel(ctx,n,z, **kwargs): + n = ctx.convert(n) + z = ctx.convert(z) + # http://functions.wolfram.com/Bessel-TypeFunctions/StruveL/26/01/02/ + def h(n): + return [([z/2, 0.5*ctx.sqrt(ctx.pi)], [n+1, -1], [], [n+1.5], [1], [1.5, n+1.5], (z/2)**2)] + return ctx.hypercomb(h, [n], **kwargs) + +def _anger(ctx,which,v,z,**kwargs): + v = ctx._convert_param(v)[0] + z = ctx.convert(z) + def h(v): + b = ctx.mpq_1_2 + u = v*b + m = b*3 + a1,a2,b1,b2 = m-u, m+u, 1-u, 1+u + c, s = ctx.cospi_sinpi(u) + if which == 0: + A, B = [b*z, s], [c] + if which == 1: + A, B = [b*z, -c], [s] + w = ctx.square_exp_arg(z, mult=-0.25) + T1 = A, [1, 1], [], [a1,a2], [1], [a1,a2], w + T2 = B, [1], [], [b1,b2], [1], [b1,b2], w + return T1, T2 + return ctx.hypercomb(h, [v], **kwargs) + +@defun +def angerj(ctx, v, z, **kwargs): + return _anger(ctx, 0, v, z, **kwargs) + +@defun +def webere(ctx, v, z, **kwargs): + return _anger(ctx, 1, v, z, **kwargs) + +@defun +def lommels1(ctx, u, v, z, **kwargs): + u = ctx._convert_param(u)[0] + v = ctx._convert_param(v)[0] + z = ctx.convert(z) + def h(u,v): + b = ctx.mpq_1_2 + w = ctx.square_exp_arg(z, mult=-0.25) + return ([u-v+1, u+v+1, z], [-1, -1, u+1], [], [], [1], \ + [b*(u-v+3),b*(u+v+3)], w), + return ctx.hypercomb(h, [u,v], **kwargs) + +@defun +def lommels2(ctx, u, v, z, **kwargs): + u = ctx._convert_param(u)[0] + v = ctx._convert_param(v)[0] + z = ctx.convert(z) + # Asymptotic expansion (GR p. 947) -- need to be careful + # not to use for small arguments + # def h(u,v): + # b = ctx.mpq_1_2 + # w = -(z/2)**(-2) + # return ([z], [u-1], [], [], [b*(1-u+v)], [b*(1-u-v)], w), + def h(u,v): + b = ctx.mpq_1_2 + w = ctx.square_exp_arg(z, mult=-0.25) + T1 = [u-v+1, u+v+1, z], [-1, -1, u+1], [], [], [1], [b*(u-v+3),b*(u+v+3)], w + T2 = [2, z], [u+v-1, -v], [v, b*(u+v+1)], [b*(v-u+1)], [], [1-v], w + T3 = [2, z], [u-v-1, v], [-v, b*(u-v+1)], [b*(1-u-v)], [], [1+v], w + #c1 = ctx.cospi((u-v)*b) + #c2 = ctx.cospi((u+v)*b) + #s = ctx.sinpi(v) + #r1 = (u-v+1)*b + #r2 = (u+v+1)*b + #T2 = [c1, s, z, 2], [1, -1, -v, v], [], [-v+1], [], [-v+1], w + #T3 = [-c2, s, z, 2], [1, -1, v, -v], [], [v+1], [], [v+1], w + #T2 = [c1, s, z, 2], [1, -1, -v, v+u-1], [r1, r2], [-v+1], [], [-v+1], w + #T3 = [-c2, s, z, 2], [1, -1, v, -v+u-1], [r1, r2], [v+1], [], [v+1], w + return T1, T2, T3 + return ctx.hypercomb(h, [u,v], **kwargs) + +@defun +def ber(ctx, n, z, **kwargs): + n = ctx.convert(n) + z = ctx.convert(z) + # http://functions.wolfram.com/Bessel-TypeFunctions/KelvinBer2/26/01/02/0001/ + def h(n): + r = -(z/4)**4 + cos, sin = ctx.cospi_sinpi(-0.75*n) + T1 = [cos, z/2], [1, n], [], [n+1], [], [0.5, 0.5*(n+1), 0.5*n+1], r + T2 = [sin, z/2], [1, n+2], [], [n+2], [], [1.5, 0.5*(n+3), 0.5*n+1], r + return T1, T2 + return ctx.hypercomb(h, [n], **kwargs) + +@defun +def bei(ctx, n, z, **kwargs): + n = ctx.convert(n) + z = ctx.convert(z) + # http://functions.wolfram.com/Bessel-TypeFunctions/KelvinBei2/26/01/02/0001/ + def h(n): + r = -(z/4)**4 + cos, sin = ctx.cospi_sinpi(0.75*n) + T1 = [cos, z/2], [1, n+2], [], [n+2], [], [1.5, 0.5*(n+3), 0.5*n+1], r + T2 = [sin, z/2], [1, n], [], [n+1], [], [0.5, 0.5*(n+1), 0.5*n+1], r + return T1, T2 + return ctx.hypercomb(h, [n], **kwargs) + +@defun +def ker(ctx, n, z, **kwargs): + n = ctx.convert(n) + z = ctx.convert(z) + # http://functions.wolfram.com/Bessel-TypeFunctions/KelvinKer2/26/01/02/0001/ + def h(n): + r = -(z/4)**4 + cos1, sin1 = ctx.cospi_sinpi(0.25*n) + cos2, sin2 = ctx.cospi_sinpi(0.75*n) + T1 = [2, z, 4*cos1], [-n-3, n, 1], [-n], [], [], [0.5, 0.5*(1+n), 0.5*(n+2)], r + T2 = [2, z, -sin1], [-n-3, 2+n, 1], [-n-1], [], [], [1.5, 0.5*(3+n), 0.5*(n+2)], r + T3 = [2, z, 4*cos2], [n-3, -n, 1], [n], [], [], [0.5, 0.5*(1-n), 1-0.5*n], r + T4 = [2, z, -sin2], [n-3, 2-n, 1], [n-1], [], [], [1.5, 0.5*(3-n), 1-0.5*n], r + return T1, T2, T3, T4 + return ctx.hypercomb(h, [n], **kwargs) + +@defun +def kei(ctx, n, z, **kwargs): + n = ctx.convert(n) + z = ctx.convert(z) + # http://functions.wolfram.com/Bessel-TypeFunctions/KelvinKei2/26/01/02/0001/ + def h(n): + r = -(z/4)**4 + cos1, sin1 = ctx.cospi_sinpi(0.75*n) + cos2, sin2 = ctx.cospi_sinpi(0.25*n) + T1 = [-cos1, 2, z], [1, n-3, 2-n], [n-1], [], [], [1.5, 0.5*(3-n), 1-0.5*n], r + T2 = [-sin1, 2, z], [1, n-1, -n], [n], [], [], [0.5, 0.5*(1-n), 1-0.5*n], r + T3 = [-sin2, 2, z], [1, -n-1, n], [-n], [], [], [0.5, 0.5*(n+1), 0.5*(n+2)], r + T4 = [-cos2, 2, z], [1, -n-3, n+2], [-n-1], [], [], [1.5, 0.5*(n+3), 0.5*(n+2)], r + return T1, T2, T3, T4 + return ctx.hypercomb(h, [n], **kwargs) + +# TODO: do this more generically? +def c_memo(f): + name = f.__name__ + def f_wrapped(ctx): + cache = ctx._misc_const_cache + prec = ctx.prec + p,v = cache.get(name, (-1,0)) + if p >= prec: + return +v + else: + cache[name] = (prec, f(ctx)) + return cache[name][1] + return f_wrapped + +@c_memo +def _airyai_C1(ctx): + return 1 / (ctx.cbrt(9) * ctx.gamma(ctx.mpf(2)/3)) + +@c_memo +def _airyai_C2(ctx): + return -1 / (ctx.cbrt(3) * ctx.gamma(ctx.mpf(1)/3)) + +@c_memo +def _airybi_C1(ctx): + return 1 / (ctx.nthroot(3,6) * ctx.gamma(ctx.mpf(2)/3)) + +@c_memo +def _airybi_C2(ctx): + return ctx.nthroot(3,6) / ctx.gamma(ctx.mpf(1)/3) + +def _airybi_n2_inf(ctx): + prec = ctx.prec + try: + v = ctx.power(3,'2/3')*ctx.gamma('2/3')/(2*ctx.pi) + finally: + ctx.prec = prec + return +v + +# Derivatives at z = 0 +# TODO: could be expressed more elegantly using triple factorials +def _airyderiv_0(ctx, z, n, ntype, which): + if ntype == 'Z': + if n < 0: + return z + r = ctx.mpq_1_3 + prec = ctx.prec + try: + ctx.prec += 10 + v = ctx.gamma((n+1)*r) * ctx.power(3,n*r) / ctx.pi + if which == 0: + v *= ctx.sinpi(2*(n+1)*r) + v /= ctx.power(3,'2/3') + else: + v *= abs(ctx.sinpi(2*(n+1)*r)) + v /= ctx.power(3,'1/6') + finally: + ctx.prec = prec + return +v + z + else: + # singular (does the limit exist?) + raise NotImplementedError + +@defun +def airyai(ctx, z, derivative=0, **kwargs): + z = ctx.convert(z) + if derivative: + n, ntype = ctx._convert_param(derivative) + else: + n = 0 + # Values at infinities + if not ctx.isnormal(z) and z: + if n and ntype == 'Z': + if n == -1: + if z == ctx.inf: + return ctx.mpf(1)/3 + 1/z + if z == ctx.ninf: + return ctx.mpf(-2)/3 + 1/z + if n < -1: + if z == ctx.inf: + return z + if z == ctx.ninf: + return (-1)**n * (-z) + if (not n) and z == ctx.inf or z == ctx.ninf: + return 1/z + # TODO: limits + raise ValueError("essential singularity of Ai(z)") + # Account for exponential scaling + if z: + extraprec = max(0, int(1.5*ctx.mag(z))) + else: + extraprec = 0 + if n: + if n == 1: + def h(): + # http://functions.wolfram.com/03.07.06.0005.01 + if ctx._re(z) > 4: + ctx.prec += extraprec + w = z**1.5; r = -0.75/w; u = -2*w/3 + ctx.prec -= extraprec + C = -ctx.exp(u)/(2*ctx.sqrt(ctx.pi))*ctx.nthroot(z,4) + return ([C],[1],[],[],[(-1,6),(7,6)],[],r), + # http://functions.wolfram.com/03.07.26.0001.01 + else: + ctx.prec += extraprec + w = z**3 / 9 + ctx.prec -= extraprec + C1 = _airyai_C1(ctx) * 0.5 + C2 = _airyai_C2(ctx) + T1 = [C1,z],[1,2],[],[],[],[ctx.mpq_5_3],w + T2 = [C2],[1],[],[],[],[ctx.mpq_1_3],w + return T1, T2 + return ctx.hypercomb(h, [], **kwargs) + else: + if z == 0: + return _airyderiv_0(ctx, z, n, ntype, 0) + # http://functions.wolfram.com/03.05.20.0004.01 + def h(n): + ctx.prec += extraprec + w = z**3/9 + ctx.prec -= extraprec + q13,q23,q43 = ctx.mpq_1_3, ctx.mpq_2_3, ctx.mpq_4_3 + a1=q13; a2=1; b1=(1-n)*q13; b2=(2-n)*q13; b3=1-n*q13 + T1 = [3, z], [n-q23, -n], [a1], [b1,b2,b3], \ + [a1,a2], [b1,b2,b3], w + a1=q23; b1=(2-n)*q13; b2=1-n*q13; b3=(4-n)*q13 + T2 = [3, z, -z], [n-q43, -n, 1], [a1], [b1,b2,b3], \ + [a1,a2], [b1,b2,b3], w + return T1, T2 + v = ctx.hypercomb(h, [n], **kwargs) + if ctx._is_real_type(z) and ctx.isint(n): + v = ctx._re(v) + return v + else: + def h(): + if ctx._re(z) > 4: + # We could use 1F1, but it results in huge cancellation; + # the following expansion is better. + # TODO: asymptotic series for derivatives + ctx.prec += extraprec + w = z**1.5; r = -0.75/w; u = -2*w/3 + ctx.prec -= extraprec + C = ctx.exp(u)/(2*ctx.sqrt(ctx.pi)*ctx.nthroot(z,4)) + return ([C],[1],[],[],[(1,6),(5,6)],[],r), + else: + ctx.prec += extraprec + w = z**3 / 9 + ctx.prec -= extraprec + C1 = _airyai_C1(ctx) + C2 = _airyai_C2(ctx) + T1 = [C1],[1],[],[],[],[ctx.mpq_2_3],w + T2 = [z*C2],[1],[],[],[],[ctx.mpq_4_3],w + return T1, T2 + return ctx.hypercomb(h, [], **kwargs) + +@defun +def airybi(ctx, z, derivative=0, **kwargs): + z = ctx.convert(z) + if derivative: + n, ntype = ctx._convert_param(derivative) + else: + n = 0 + # Values at infinities + if not ctx.isnormal(z) and z: + if n and ntype == 'Z': + if z == ctx.inf: + return z + if z == ctx.ninf: + if n == -1: + return 1/z + if n == -2: + return _airybi_n2_inf(ctx) + if n < -2: + return (-1)**n * (-z) + if not n: + if z == ctx.inf: + return z + if z == ctx.ninf: + return 1/z + # TODO: limits + raise ValueError("essential singularity of Bi(z)") + if z: + extraprec = max(0, int(1.5*ctx.mag(z))) + else: + extraprec = 0 + if n: + if n == 1: + # http://functions.wolfram.com/03.08.26.0001.01 + def h(): + ctx.prec += extraprec + w = z**3 / 9 + ctx.prec -= extraprec + C1 = _airybi_C1(ctx)*0.5 + C2 = _airybi_C2(ctx) + T1 = [C1,z],[1,2],[],[],[],[ctx.mpq_5_3],w + T2 = [C2],[1],[],[],[],[ctx.mpq_1_3],w + return T1, T2 + return ctx.hypercomb(h, [], **kwargs) + else: + if z == 0: + return _airyderiv_0(ctx, z, n, ntype, 1) + def h(n): + ctx.prec += extraprec + w = z**3/9 + ctx.prec -= extraprec + q13,q23,q43 = ctx.mpq_1_3, ctx.mpq_2_3, ctx.mpq_4_3 + q16 = ctx.mpq_1_6 + q56 = ctx.mpq_5_6 + a1=q13; a2=1; b1=(1-n)*q13; b2=(2-n)*q13; b3=1-n*q13 + T1 = [3, z], [n-q16, -n], [a1], [b1,b2,b3], \ + [a1,a2], [b1,b2,b3], w + a1=q23; b1=(2-n)*q13; b2=1-n*q13; b3=(4-n)*q13 + T2 = [3, z], [n-q56, 1-n], [a1], [b1,b2,b3], \ + [a1,a2], [b1,b2,b3], w + return T1, T2 + v = ctx.hypercomb(h, [n], **kwargs) + if ctx._is_real_type(z) and ctx.isint(n): + v = ctx._re(v) + return v + else: + def h(): + ctx.prec += extraprec + w = z**3 / 9 + ctx.prec -= extraprec + C1 = _airybi_C1(ctx) + C2 = _airybi_C2(ctx) + T1 = [C1],[1],[],[],[],[ctx.mpq_2_3],w + T2 = [z*C2],[1],[],[],[],[ctx.mpq_4_3],w + return T1, T2 + return ctx.hypercomb(h, [], **kwargs) + +def _airy_zero(ctx, which, k, derivative, complex=False): + # Asymptotic formulas are given in DLMF section 9.9 + def U(t): return t**(2/3.)*(1-7/(t**2*48)) + def T(t): return t**(2/3.)*(1+5/(t**2*48)) + k = int(k) + if k < 1: + raise ValueError("k cannot be less than 1") + if not derivative in (0,1): + raise ValueError("Derivative should lie between 0 and 1") + if which == 0: + if derivative: + return ctx.findroot(lambda z: ctx.airyai(z,1), + -U(3*ctx.pi*(4*k-3)/8)) + return ctx.findroot(ctx.airyai, -T(3*ctx.pi*(4*k-1)/8)) + if which == 1 and complex == False: + if derivative: + return ctx.findroot(lambda z: ctx.airybi(z,1), + -U(3*ctx.pi*(4*k-1)/8)) + return ctx.findroot(ctx.airybi, -T(3*ctx.pi*(4*k-3)/8)) + if which == 1 and complex == True: + if derivative: + t = 3*ctx.pi*(4*k-3)/8 + 0.75j*ctx.ln2 + s = ctx.expjpi(ctx.mpf(1)/3) * T(t) + return ctx.findroot(lambda z: ctx.airybi(z,1), s) + t = 3*ctx.pi*(4*k-1)/8 + 0.75j*ctx.ln2 + s = ctx.expjpi(ctx.mpf(1)/3) * U(t) + return ctx.findroot(ctx.airybi, s) + +@defun +def airyaizero(ctx, k, derivative=0): + return _airy_zero(ctx, 0, k, derivative, False) + +@defun +def airybizero(ctx, k, derivative=0, complex=False): + return _airy_zero(ctx, 1, k, derivative, complex) + +def _scorer(ctx, z, which, kwargs): + z = ctx.convert(z) + if ctx.isinf(z): + if z == ctx.inf: + if which == 0: return 1/z + if which == 1: return z + if z == ctx.ninf: + return 1/z + raise ValueError("essential singularity") + if z: + extraprec = max(0, int(1.5*ctx.mag(z))) + else: + extraprec = 0 + if kwargs.get('derivative'): + raise NotImplementedError + # Direct asymptotic expansions, to avoid + # exponentially large cancellation + try: + if ctx.mag(z) > 3: + if which == 0 and abs(ctx.arg(z)) < ctx.pi/3 * 0.999: + def h(): + return (([ctx.pi,z],[-1,-1],[],[],[(1,3),(2,3),1],[],9/z**3),) + return ctx.hypercomb(h, [], maxterms=ctx.prec, force_series=True) + if which == 1 and abs(ctx.arg(-z)) < 2*ctx.pi/3 * 0.999: + def h(): + return (([-ctx.pi,z],[-1,-1],[],[],[(1,3),(2,3),1],[],9/z**3),) + return ctx.hypercomb(h, [], maxterms=ctx.prec, force_series=True) + except ctx.NoConvergence: + pass + def h(): + A = ctx.airybi(z, **kwargs)/3 + B = -2*ctx.pi + if which == 1: + A *= 2 + B *= -1 + ctx.prec += extraprec + w = z**3/9 + ctx.prec -= extraprec + T1 = [A], [1], [], [], [], [], 0 + T2 = [B,z], [-1,2], [], [], [1], [ctx.mpq_4_3,ctx.mpq_5_3], w + return T1, T2 + return ctx.hypercomb(h, [], **kwargs) + +@defun +def scorergi(ctx, z, **kwargs): + return _scorer(ctx, z, 0, kwargs) + +@defun +def scorerhi(ctx, z, **kwargs): + return _scorer(ctx, z, 1, kwargs) + +@defun_wrapped +def coulombc(ctx, l, eta, _cache={}): + if (l, eta) in _cache and _cache[l,eta][0] >= ctx.prec: + return +_cache[l,eta][1] + G3 = ctx.loggamma(2*l+2) + G1 = ctx.loggamma(1+l+ctx.j*eta) + G2 = ctx.loggamma(1+l-ctx.j*eta) + v = 2**l * ctx.exp((-ctx.pi*eta+G1+G2)/2 - G3) + if not (ctx.im(l) or ctx.im(eta)): + v = ctx.re(v) + _cache[l,eta] = (ctx.prec, v) + return v + +@defun_wrapped +def coulombf(ctx, l, eta, z, w=1, chop=True, **kwargs): + # Regular Coulomb wave function + # Note: w can be either 1 or -1; the other may be better in some cases + # TODO: check that chop=True chops when and only when it should + #ctx.prec += 10 + def h(l, eta): + try: + jw = ctx.j*w + jwz = ctx.fmul(jw, z, exact=True) + jwz2 = ctx.fmul(jwz, -2, exact=True) + C = ctx.coulombc(l, eta) + T1 = [C, z, ctx.exp(jwz)], [1, l+1, 1], [], [], [1+l+jw*eta], \ + [2*l+2], jwz2 + except ValueError: + T1 = [0], [-1], [], [], [], [], 0 + return (T1,) + v = ctx.hypercomb(h, [l,eta], **kwargs) + if chop and (not ctx.im(l)) and (not ctx.im(eta)) and (not ctx.im(z)) and \ + (ctx.re(z) >= 0): + v = ctx.re(v) + return v + +@defun_wrapped +def _coulomb_chi(ctx, l, eta, _cache={}): + if (l, eta) in _cache and _cache[l,eta][0] >= ctx.prec: + return _cache[l,eta][1] + def terms(): + l2 = -l-1 + jeta = ctx.j*eta + return [ctx.loggamma(1+l+jeta) * (-0.5j), + ctx.loggamma(1+l-jeta) * (0.5j), + ctx.loggamma(1+l2+jeta) * (0.5j), + ctx.loggamma(1+l2-jeta) * (-0.5j), + -(l+0.5)*ctx.pi] + v = ctx.sum_accurately(terms, 1) + _cache[l,eta] = (ctx.prec, v) + return v + +@defun_wrapped +def coulombg(ctx, l, eta, z, w=1, chop=True, **kwargs): + # Irregular Coulomb wave function + # Note: w can be either 1 or -1; the other may be better in some cases + # TODO: check that chop=True chops when and only when it should + if not ctx._im(l): + l = ctx._re(l) # XXX: for isint + def h(l, eta): + # Force perturbation for integers and half-integers + if ctx.isint(l*2): + T1 = [0], [-1], [], [], [], [], 0 + return (T1,) + l2 = -l-1 + try: + chi = ctx._coulomb_chi(l, eta) + jw = ctx.j*w + s = ctx.sin(chi); c = ctx.cos(chi) + C1 = ctx.coulombc(l,eta) + C2 = ctx.coulombc(l2,eta) + u = ctx.exp(jw*z) + x = -2*jw*z + T1 = [s, C1, z, u, c], [-1, 1, l+1, 1, 1], [], [], \ + [1+l+jw*eta], [2*l+2], x + T2 = [-s, C2, z, u], [-1, 1, l2+1, 1], [], [], \ + [1+l2+jw*eta], [2*l2+2], x + return T1, T2 + except ValueError: + T1 = [0], [-1], [], [], [], [], 0 + return (T1,) + v = ctx.hypercomb(h, [l,eta], **kwargs) + if chop and (not ctx._im(l)) and (not ctx._im(eta)) and (not ctx._im(z)) and \ + (ctx._re(z) >= 0): + v = ctx._re(v) + return v + +def mcmahon(ctx,kind,prime,v,m): + """ + Computes an estimate for the location of the Bessel function zero + j_{v,m}, y_{v,m}, j'_{v,m} or y'_{v,m} using McMahon's asymptotic + expansion (Abramowitz & Stegun 9.5.12-13, DLMF 20.21(vi)). + + Returns (r,err) where r is the estimated location of the root + and err is a positive number estimating the error of the + asymptotic expansion. + """ + u = 4*v**2 + if kind == 1 and not prime: b = (4*m+2*v-1)*ctx.pi/4 + if kind == 2 and not prime: b = (4*m+2*v-3)*ctx.pi/4 + if kind == 1 and prime: b = (4*m+2*v-3)*ctx.pi/4 + if kind == 2 and prime: b = (4*m+2*v-1)*ctx.pi/4 + if not prime: + s1 = b + s2 = -(u-1)/(8*b) + s3 = -4*(u-1)*(7*u-31)/(3*(8*b)**3) + s4 = -32*(u-1)*(83*u**2-982*u+3779)/(15*(8*b)**5) + s5 = -64*(u-1)*(6949*u**3-153855*u**2+1585743*u-6277237)/(105*(8*b)**7) + if prime: + s1 = b + s2 = -(u+3)/(8*b) + s3 = -4*(7*u**2+82*u-9)/(3*(8*b)**3) + s4 = -32*(83*u**3+2075*u**2-3039*u+3537)/(15*(8*b)**5) + s5 = -64*(6949*u**4+296492*u**3-1248002*u**2+7414380*u-5853627)/(105*(8*b)**7) + terms = [s1,s2,s3,s4,s5] + s = s1 + err = 0.0 + for i in range(1,len(terms)): + if abs(terms[i]) < abs(terms[i-1]): + s += terms[i] + else: + err = abs(terms[i]) + if i == len(terms)-1: + err = abs(terms[-1]) + return s, err + +def generalized_bisection(ctx,f,a,b,n): + """ + Given f known to have exactly n simple roots within [a,b], + return a list of n intervals isolating the roots + and having opposite signs at the endpoints. + + TODO: this can be optimized, e.g. by reusing evaluation points. + """ + if n < 1: + raise ValueError("n cannot be less than 1") + N = n+1 + points = [] + signs = [] + while 1: + points = ctx.linspace(a,b,N) + signs = [ctx.sign(f(x)) for x in points] + ok_intervals = [(points[i],points[i+1]) for i in range(N-1) \ + if signs[i]*signs[i+1] == -1] + if len(ok_intervals) == n: + return ok_intervals + N = N*2 + +def find_in_interval(ctx, f, ab): + return ctx.findroot(f, ab, solver='illinois', verify=False) + +def bessel_zero(ctx, kind, prime, v, m, isoltol=0.01, _interval_cache={}): + prec = ctx.prec + workprec = max(prec, ctx.mag(v), ctx.mag(m))+10 + try: + ctx.prec = workprec + v = ctx.mpf(v) + m = int(m) + prime = int(prime) + if v < 0: + raise ValueError("v cannot be negative") + if m < 1: + raise ValueError("m cannot be less than 1") + if not prime in (0,1): + raise ValueError("prime should lie between 0 and 1") + if kind == 1: + if prime: f = lambda x: ctx.besselj(v,x,derivative=1) + else: f = lambda x: ctx.besselj(v,x) + if kind == 2: + if prime: f = lambda x: ctx.bessely(v,x,derivative=1) + else: f = lambda x: ctx.bessely(v,x) + # The first root of J' is very close to 0 for small + # orders, and this needs to be special-cased + if kind == 1 and prime and m == 1: + if v == 0: + return ctx.zero + if v <= 1: + # TODO: use v <= j'_{v,1} < y_{v,1}? + r = 2*ctx.sqrt(v*(1+v)/(v+2)) + return find_in_interval(ctx, f, (r/10, 2*r)) + if (kind,prime,v,m) in _interval_cache: + return find_in_interval(ctx, f, _interval_cache[kind,prime,v,m]) + r, err = mcmahon(ctx, kind, prime, v, m) + if err < isoltol: + return find_in_interval(ctx, f, (r-isoltol, r+isoltol)) + # An x such that 0 < x < r_{v,1} + if kind == 1 and not prime: low = 2.4 + if kind == 1 and prime: low = 1.8 + if kind == 2 and not prime: low = 0.8 + if kind == 2 and prime: low = 2.0 + n = m+1 + while 1: + r1, err = mcmahon(ctx, kind, prime, v, n) + if err < isoltol: + r2, err2 = mcmahon(ctx, kind, prime, v, n+1) + intervals = generalized_bisection(ctx, f, low, 0.5*(r1+r2), n) + for k, ab in enumerate(intervals): + _interval_cache[kind,prime,v,k+1] = ab + return find_in_interval(ctx, f, intervals[m-1]) + else: + n = n*2 + finally: + ctx.prec = prec + +@defun +def besseljzero(ctx, v, m, derivative=0): + r""" + For a real order `\nu \ge 0` and a positive integer `m`, returns + `j_{\nu,m}`, the `m`-th positive zero of the Bessel function of the + first kind `J_{\nu}(z)` (see :func:`~mpmath.besselj`). Alternatively, + with *derivative=1*, gives the first nonnegative simple zero + `j'_{\nu,m}` of `J'_{\nu}(z)`. + + The indexing convention is that used by Abramowitz & Stegun + and the DLMF. Note the special case `j'_{0,1} = 0`, while all other + zeros are positive. In effect, only simple zeros are counted + (all zeros of Bessel functions are simple except possibly `z = 0`) + and `j_{\nu,m}` becomes a monotonic function of both `\nu` + and `m`. + + The zeros are interlaced according to the inequalities + + .. math :: + + j'_{\nu,k} < j_{\nu,k} < j'_{\nu,k+1} + + j_{\nu,1} < j_{\nu+1,2} < j_{\nu,2} < j_{\nu+1,2} < j_{\nu,3} < \cdots + + **Examples** + + Initial zeros of the Bessel functions `J_0(z), J_1(z), J_2(z)`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> besseljzero(0,1); besseljzero(0,2); besseljzero(0,3) + 2.404825557695772768621632 + 5.520078110286310649596604 + 8.653727912911012216954199 + >>> besseljzero(1,1); besseljzero(1,2); besseljzero(1,3) + 3.831705970207512315614436 + 7.01558666981561875353705 + 10.17346813506272207718571 + >>> besseljzero(2,1); besseljzero(2,2); besseljzero(2,3) + 5.135622301840682556301402 + 8.417244140399864857783614 + 11.61984117214905942709415 + + Initial zeros of `J'_0(z), J'_1(z), J'_2(z)`:: + + 0.0 + 3.831705970207512315614436 + 7.01558666981561875353705 + >>> besseljzero(1,1,1); besseljzero(1,2,1); besseljzero(1,3,1) + 1.84118378134065930264363 + 5.331442773525032636884016 + 8.536316366346285834358961 + >>> besseljzero(2,1,1); besseljzero(2,2,1); besseljzero(2,3,1) + 3.054236928227140322755932 + 6.706133194158459146634394 + 9.969467823087595793179143 + + Zeros with large index:: + + >>> besseljzero(0,100); besseljzero(0,1000); besseljzero(0,10000) + 313.3742660775278447196902 + 3140.807295225078628895545 + 31415.14114171350798533666 + >>> besseljzero(5,100); besseljzero(5,1000); besseljzero(5,10000) + 321.1893195676003157339222 + 3148.657306813047523500494 + 31422.9947255486291798943 + >>> besseljzero(0,100,1); besseljzero(0,1000,1); besseljzero(0,10000,1) + 311.8018681873704508125112 + 3139.236339643802482833973 + 31413.57032947022399485808 + + Zeros of functions with large order:: + + >>> besseljzero(50,1) + 57.11689916011917411936228 + >>> besseljzero(50,2) + 62.80769876483536093435393 + >>> besseljzero(50,100) + 388.6936600656058834640981 + >>> besseljzero(50,1,1) + 52.99764038731665010944037 + >>> besseljzero(50,2,1) + 60.02631933279942589882363 + >>> besseljzero(50,100,1) + 387.1083151608726181086283 + + Zeros of functions with fractional order:: + + >>> besseljzero(0.5,1); besseljzero(1.5,1); besseljzero(2.25,4) + 3.141592653589793238462643 + 4.493409457909064175307881 + 15.15657692957458622921634 + + Both `J_{\nu}(z)` and `J'_{\nu}(z)` can be expressed as infinite + products over their zeros:: + + >>> v,z = 2, mpf(1) + >>> (z/2)**v/gamma(v+1) * \ + ... nprod(lambda k: 1-(z/besseljzero(v,k))**2, [1,inf]) + ... + 0.1149034849319004804696469 + >>> besselj(v,z) + 0.1149034849319004804696469 + >>> (z/2)**(v-1)/2/gamma(v) * \ + ... nprod(lambda k: 1-(z/besseljzero(v,k,1))**2, [1,inf]) + ... + 0.2102436158811325550203884 + >>> besselj(v,z,1) + 0.2102436158811325550203884 + + """ + return +bessel_zero(ctx, 1, derivative, v, m) + +@defun +def besselyzero(ctx, v, m, derivative=0): + r""" + For a real order `\nu \ge 0` and a positive integer `m`, returns + `y_{\nu,m}`, the `m`-th positive zero of the Bessel function of the + second kind `Y_{\nu}(z)` (see :func:`~mpmath.bessely`). Alternatively, + with *derivative=1*, gives the first positive zero `y'_{\nu,m}` of + `Y'_{\nu}(z)`. + + The zeros are interlaced according to the inequalities + + .. math :: + + y_{\nu,k} < y'_{\nu,k} < y_{\nu,k+1} + + y_{\nu,1} < y_{\nu+1,2} < y_{\nu,2} < y_{\nu+1,2} < y_{\nu,3} < \cdots + + **Examples** + + Initial zeros of the Bessel functions `Y_0(z), Y_1(z), Y_2(z)`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> besselyzero(0,1); besselyzero(0,2); besselyzero(0,3) + 0.8935769662791675215848871 + 3.957678419314857868375677 + 7.086051060301772697623625 + >>> besselyzero(1,1); besselyzero(1,2); besselyzero(1,3) + 2.197141326031017035149034 + 5.429681040794135132772005 + 8.596005868331168926429606 + >>> besselyzero(2,1); besselyzero(2,2); besselyzero(2,3) + 3.384241767149593472701426 + 6.793807513268267538291167 + 10.02347797936003797850539 + + Initial zeros of `Y'_0(z), Y'_1(z), Y'_2(z)`:: + + >>> besselyzero(0,1,1); besselyzero(0,2,1); besselyzero(0,3,1) + 2.197141326031017035149034 + 5.429681040794135132772005 + 8.596005868331168926429606 + >>> besselyzero(1,1,1); besselyzero(1,2,1); besselyzero(1,3,1) + 3.683022856585177699898967 + 6.941499953654175655751944 + 10.12340465543661307978775 + >>> besselyzero(2,1,1); besselyzero(2,2,1); besselyzero(2,3,1) + 5.002582931446063945200176 + 8.350724701413079526349714 + 11.57419546521764654624265 + + Zeros with large index:: + + >>> besselyzero(0,100); besselyzero(0,1000); besselyzero(0,10000) + 311.8034717601871549333419 + 3139.236498918198006794026 + 31413.57034538691205229188 + >>> besselyzero(5,100); besselyzero(5,1000); besselyzero(5,10000) + 319.6183338562782156235062 + 3147.086508524556404473186 + 31421.42392920214673402828 + >>> besselyzero(0,100,1); besselyzero(0,1000,1); besselyzero(0,10000,1) + 313.3726705426359345050449 + 3140.807136030340213610065 + 31415.14112579761578220175 + + Zeros of functions with large order:: + + >>> besselyzero(50,1) + 53.50285882040036394680237 + >>> besselyzero(50,2) + 60.11244442774058114686022 + >>> besselyzero(50,100) + 387.1096509824943957706835 + >>> besselyzero(50,1,1) + 56.96290427516751320063605 + >>> besselyzero(50,2,1) + 62.74888166945933944036623 + >>> besselyzero(50,100,1) + 388.6923300548309258355475 + + Zeros of functions with fractional order:: + + >>> besselyzero(0.5,1); besselyzero(1.5,1); besselyzero(2.25,4) + 1.570796326794896619231322 + 2.798386045783887136720249 + 13.56721208770735123376018 + + """ + return +bessel_zero(ctx, 2, derivative, v, m) diff --git a/MLPY/Lib/site-packages/mpmath/functions/elliptic.py b/MLPY/Lib/site-packages/mpmath/functions/elliptic.py new file mode 100644 index 0000000000000000000000000000000000000000..1e198697fa042b7cc8bcba9e9e770f5c8106dad6 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/functions/elliptic.py @@ -0,0 +1,1431 @@ +r""" +Elliptic functions historically comprise the elliptic integrals +and their inverses, and originate from the problem of computing the +arc length of an ellipse. From a more modern point of view, +an elliptic function is defined as a doubly periodic function, i.e. +a function which satisfies + +.. math :: + + f(z + 2 \omega_1) = f(z + 2 \omega_2) = f(z) + +for some half-periods `\omega_1, \omega_2` with +`\mathrm{Im}[\omega_1 / \omega_2] > 0`. The canonical elliptic +functions are the Jacobi elliptic functions. More broadly, this section +includes quasi-doubly periodic functions (such as the Jacobi theta +functions) and other functions useful in the study of elliptic functions. + +Many different conventions for the arguments of +elliptic functions are in use. It is even standard to use +different parameterizations for different functions in the same +text or software (and mpmath is no exception). +The usual parameters are the elliptic nome `q`, which usually +must satisfy `|q| < 1`; the elliptic parameter `m` (an arbitrary +complex number); the elliptic modulus `k` (an arbitrary complex +number); and the half-period ratio `\tau`, which usually must +satisfy `\mathrm{Im}[\tau] > 0`. +These quantities can be expressed in terms of each other +using the following relations: + +.. math :: + + m = k^2 + +.. math :: + + \tau = i \frac{K(1-m)}{K(m)} + +.. math :: + + q = e^{i \pi \tau} + +.. math :: + + k = \frac{\vartheta_2^2(q)}{\vartheta_3^2(q)} + +In addition, an alternative definition is used for the nome in +number theory, which we here denote by q-bar: + +.. math :: + + \bar{q} = q^2 = e^{2 i \pi \tau} + +For convenience, mpmath provides functions to convert +between the various parameters (:func:`~mpmath.qfrom`, :func:`~mpmath.mfrom`, +:func:`~mpmath.kfrom`, :func:`~mpmath.taufrom`, :func:`~mpmath.qbarfrom`). + +**References** + +1. [AbramowitzStegun]_ + +2. [WhittakerWatson]_ + +""" + +from .functions import defun, defun_wrapped + +@defun_wrapped +def eta(ctx, tau): + r""" + Returns the Dedekind eta function of tau in the upper half-plane. + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> eta(1j); gamma(0.25) / (2*pi**0.75) + (0.7682254223260566590025942 + 0.0j) + 0.7682254223260566590025942 + >>> tau = sqrt(2) + sqrt(5)*1j + >>> eta(-1/tau); sqrt(-1j*tau) * eta(tau) + (0.9022859908439376463573294 + 0.07985093673948098408048575j) + (0.9022859908439376463573295 + 0.07985093673948098408048575j) + >>> eta(tau+1); exp(pi*1j/12) * eta(tau) + (0.4493066139717553786223114 + 0.3290014793877986663915939j) + (0.4493066139717553786223114 + 0.3290014793877986663915939j) + >>> f = lambda z: diff(eta, z) / eta(z) + >>> chop(36*diff(f,tau)**2 - 24*diff(f,tau,2)*f(tau) + diff(f,tau,3)) + 0.0 + + """ + if ctx.im(tau) <= 0.0: + raise ValueError("eta is only defined in the upper half-plane") + q = ctx.expjpi(tau/12) + return q * ctx.qp(q**24) + +def nome(ctx, m): + m = ctx.convert(m) + if not m: + return m + if m == ctx.one: + return m + if ctx.isnan(m): + return m + if ctx.isinf(m): + if m == ctx.ninf: + return type(m)(-1) + else: + return ctx.mpc(-1) + a = ctx.ellipk(ctx.one-m) + b = ctx.ellipk(m) + v = ctx.exp(-ctx.pi*a/b) + if not ctx._im(m) and ctx._re(m) < 1: + if ctx._is_real_type(m): + return v.real + else: + return v.real + 0j + elif m == 2: + v = ctx.mpc(0, v.imag) + return v + +@defun_wrapped +def qfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None): + r""" + Returns the elliptic nome `q`, given any of `q, m, k, \tau, \bar{q}`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> qfrom(q=0.25) + 0.25 + >>> qfrom(m=mfrom(q=0.25)) + 0.25 + >>> qfrom(k=kfrom(q=0.25)) + 0.25 + >>> qfrom(tau=taufrom(q=0.25)) + (0.25 + 0.0j) + >>> qfrom(qbar=qbarfrom(q=0.25)) + 0.25 + + """ + if q is not None: + return ctx.convert(q) + if m is not None: + return nome(ctx, m) + if k is not None: + return nome(ctx, ctx.convert(k)**2) + if tau is not None: + return ctx.expjpi(tau) + if qbar is not None: + return ctx.sqrt(qbar) + +@defun_wrapped +def qbarfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None): + r""" + Returns the number-theoretic nome `\bar q`, given any of + `q, m, k, \tau, \bar{q}`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> qbarfrom(qbar=0.25) + 0.25 + >>> qbarfrom(q=qfrom(qbar=0.25)) + 0.25 + >>> qbarfrom(m=extraprec(20)(mfrom)(qbar=0.25)) # ill-conditioned + 0.25 + >>> qbarfrom(k=extraprec(20)(kfrom)(qbar=0.25)) # ill-conditioned + 0.25 + >>> qbarfrom(tau=taufrom(qbar=0.25)) + (0.25 + 0.0j) + + """ + if qbar is not None: + return ctx.convert(qbar) + if q is not None: + return ctx.convert(q) ** 2 + if m is not None: + return nome(ctx, m) ** 2 + if k is not None: + return nome(ctx, ctx.convert(k)**2) ** 2 + if tau is not None: + return ctx.expjpi(2*tau) + +@defun_wrapped +def taufrom(ctx, q=None, m=None, k=None, tau=None, qbar=None): + r""" + Returns the elliptic half-period ratio `\tau`, given any of + `q, m, k, \tau, \bar{q}`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> taufrom(tau=0.5j) + (0.0 + 0.5j) + >>> taufrom(q=qfrom(tau=0.5j)) + (0.0 + 0.5j) + >>> taufrom(m=mfrom(tau=0.5j)) + (0.0 + 0.5j) + >>> taufrom(k=kfrom(tau=0.5j)) + (0.0 + 0.5j) + >>> taufrom(qbar=qbarfrom(tau=0.5j)) + (0.0 + 0.5j) + + """ + if tau is not None: + return ctx.convert(tau) + if m is not None: + m = ctx.convert(m) + return ctx.j*ctx.ellipk(1-m)/ctx.ellipk(m) + if k is not None: + k = ctx.convert(k) + return ctx.j*ctx.ellipk(1-k**2)/ctx.ellipk(k**2) + if q is not None: + return ctx.log(q) / (ctx.pi*ctx.j) + if qbar is not None: + qbar = ctx.convert(qbar) + return ctx.log(qbar) / (2*ctx.pi*ctx.j) + +@defun_wrapped +def kfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None): + r""" + Returns the elliptic modulus `k`, given any of + `q, m, k, \tau, \bar{q}`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> kfrom(k=0.25) + 0.25 + >>> kfrom(m=mfrom(k=0.25)) + 0.25 + >>> kfrom(q=qfrom(k=0.25)) + 0.25 + >>> kfrom(tau=taufrom(k=0.25)) + (0.25 + 0.0j) + >>> kfrom(qbar=qbarfrom(k=0.25)) + 0.25 + + As `q \to 1` and `q \to -1`, `k` rapidly approaches + `1` and `i \infty` respectively:: + + >>> kfrom(q=0.75) + 0.9999999999999899166471767 + >>> kfrom(q=-0.75) + (0.0 + 7041781.096692038332790615j) + >>> kfrom(q=1) + 1 + >>> kfrom(q=-1) + (0.0 + +infj) + """ + if k is not None: + return ctx.convert(k) + if m is not None: + return ctx.sqrt(m) + if tau is not None: + q = ctx.expjpi(tau) + if qbar is not None: + q = ctx.sqrt(qbar) + if q == 1: + return q + if q == -1: + return ctx.mpc(0,'inf') + return (ctx.jtheta(2,0,q)/ctx.jtheta(3,0,q))**2 + +@defun_wrapped +def mfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None): + r""" + Returns the elliptic parameter `m`, given any of + `q, m, k, \tau, \bar{q}`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> mfrom(m=0.25) + 0.25 + >>> mfrom(q=qfrom(m=0.25)) + 0.25 + >>> mfrom(k=kfrom(m=0.25)) + 0.25 + >>> mfrom(tau=taufrom(m=0.25)) + (0.25 + 0.0j) + >>> mfrom(qbar=qbarfrom(m=0.25)) + 0.25 + + As `q \to 1` and `q \to -1`, `m` rapidly approaches + `1` and `-\infty` respectively:: + + >>> mfrom(q=0.75) + 0.9999999999999798332943533 + >>> mfrom(q=-0.75) + -49586681013729.32611558353 + >>> mfrom(q=1) + 1.0 + >>> mfrom(q=-1) + -inf + + The inverse nome as a function of `q` has an integer + Taylor series expansion:: + + >>> taylor(lambda q: mfrom(q), 0, 7) + [0.0, 16.0, -128.0, 704.0, -3072.0, 11488.0, -38400.0, 117632.0] + + """ + if m is not None: + return m + if k is not None: + return k**2 + if tau is not None: + q = ctx.expjpi(tau) + if qbar is not None: + q = ctx.sqrt(qbar) + if q == 1: + return ctx.convert(q) + if q == -1: + return q*ctx.inf + v = (ctx.jtheta(2,0,q)/ctx.jtheta(3,0,q))**4 + if ctx._is_real_type(q) and q < 0: + v = v.real + return v + +jacobi_spec = { + 'sn' : ([3],[2],[1],[4], 'sin', 'tanh'), + 'cn' : ([4],[2],[2],[4], 'cos', 'sech'), + 'dn' : ([4],[3],[3],[4], '1', 'sech'), + 'ns' : ([2],[3],[4],[1], 'csc', 'coth'), + 'nc' : ([2],[4],[4],[2], 'sec', 'cosh'), + 'nd' : ([3],[4],[4],[3], '1', 'cosh'), + 'sc' : ([3],[4],[1],[2], 'tan', 'sinh'), + 'sd' : ([3,3],[2,4],[1],[3], 'sin', 'sinh'), + 'cd' : ([3],[2],[2],[3], 'cos', '1'), + 'cs' : ([4],[3],[2],[1], 'cot', 'csch'), + 'dc' : ([2],[3],[3],[2], 'sec', '1'), + 'ds' : ([2,4],[3,3],[3],[1], 'csc', 'csch'), + 'cc' : None, + 'ss' : None, + 'nn' : None, + 'dd' : None +} + +@defun +def ellipfun(ctx, kind, u=None, m=None, q=None, k=None, tau=None): + try: + S = jacobi_spec[kind] + except KeyError: + raise ValueError("First argument must be a two-character string " + "containing 's', 'c', 'd' or 'n', e.g.: 'sn'") + if u is None: + def f(*args, **kwargs): + return ctx.ellipfun(kind, *args, **kwargs) + f.__name__ = kind + return f + prec = ctx.prec + try: + ctx.prec += 10 + u = ctx.convert(u) + q = ctx.qfrom(m=m, q=q, k=k, tau=tau) + if S is None: + v = ctx.one + 0*q*u + elif q == ctx.zero: + if S[4] == '1': v = ctx.one + else: v = getattr(ctx, S[4])(u) + v += 0*q*u + elif q == ctx.one: + if S[5] == '1': v = ctx.one + else: v = getattr(ctx, S[5])(u) + v += 0*q*u + else: + t = u / ctx.jtheta(3, 0, q)**2 + v = ctx.one + for a in S[0]: v *= ctx.jtheta(a, 0, q) + for b in S[1]: v /= ctx.jtheta(b, 0, q) + for c in S[2]: v *= ctx.jtheta(c, t, q) + for d in S[3]: v /= ctx.jtheta(d, t, q) + finally: + ctx.prec = prec + return +v + +@defun_wrapped +def kleinj(ctx, tau=None, **kwargs): + r""" + Evaluates the Klein j-invariant, which is a modular function defined for + `\tau` in the upper half-plane as + + .. math :: + + J(\tau) = \frac{g_2^3(\tau)}{g_2^3(\tau) - 27 g_3^2(\tau)} + + where `g_2` and `g_3` are the modular invariants of the Weierstrass + elliptic function, + + .. math :: + + g_2(\tau) = 60 \sum_{(m,n) \in \mathbb{Z}^2 \setminus (0,0)} (m \tau+n)^{-4} + + g_3(\tau) = 140 \sum_{(m,n) \in \mathbb{Z}^2 \setminus (0,0)} (m \tau+n)^{-6}. + + An alternative, common notation is that of the j-function + `j(\tau) = 1728 J(\tau)`. + + **Plots** + + .. literalinclude :: /plots/kleinj.py + .. image :: /plots/kleinj.png + .. literalinclude :: /plots/kleinj2.py + .. image :: /plots/kleinj2.png + + **Examples** + + Verifying the functional equation `J(\tau) = J(\tau+1) = J(-\tau^{-1})`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> tau = 0.625+0.75*j + >>> tau = 0.625+0.75*j + >>> kleinj(tau) + (-0.1507492166511182267125242 + 0.07595948379084571927228948j) + >>> kleinj(tau+1) + (-0.1507492166511182267125242 + 0.07595948379084571927228948j) + >>> kleinj(-1/tau) + (-0.1507492166511182267125242 + 0.07595948379084571927228946j) + + The j-function has a famous Laurent series expansion in terms of the nome + `\bar{q}`, `j(\tau) = \bar{q}^{-1} + 744 + 196884\bar{q} + \ldots`:: + + >>> mp.dps = 15 + >>> taylor(lambda q: 1728*q*kleinj(qbar=q), 0, 5, singular=True) + [1.0, 744.0, 196884.0, 21493760.0, 864299970.0, 20245856256.0] + + The j-function admits exact evaluation at special algebraic points + related to the Heegner numbers 1, 2, 3, 7, 11, 19, 43, 67, 163:: + + >>> @extraprec(10) + ... def h(n): + ... v = (1+sqrt(n)*j) + ... if n > 2: + ... v *= 0.5 + ... return v + ... + >>> mp.dps = 25 + >>> for n in [1,2,3,7,11,19,43,67,163]: + ... n, chop(1728*kleinj(h(n))) + ... + (1, 1728.0) + (2, 8000.0) + (3, 0.0) + (7, -3375.0) + (11, -32768.0) + (19, -884736.0) + (43, -884736000.0) + (67, -147197952000.0) + (163, -262537412640768000.0) + + Also at other special points, the j-function assumes explicit + algebraic values, e.g.:: + + >>> chop(1728*kleinj(j*sqrt(5))) + 1264538.909475140509320227 + >>> identify(cbrt(_)) # note: not simplified + '((100+sqrt(13520))/2)' + >>> (50+26*sqrt(5))**3 + 1264538.909475140509320227 + + """ + q = ctx.qfrom(tau=tau, **kwargs) + t2 = ctx.jtheta(2,0,q) + t3 = ctx.jtheta(3,0,q) + t4 = ctx.jtheta(4,0,q) + P = (t2**8 + t3**8 + t4**8)**3 + Q = 54*(t2*t3*t4)**8 + return P/Q + + +def RF_calc(ctx, x, y, z, r): + if y == z: return RC_calc(ctx, x, y, r) + if x == z: return RC_calc(ctx, y, x, r) + if x == y: return RC_calc(ctx, z, x, r) + if not (ctx.isnormal(x) and ctx.isnormal(y) and ctx.isnormal(z)): + if ctx.isnan(x) or ctx.isnan(y) or ctx.isnan(z): + return x*y*z + if ctx.isinf(x) or ctx.isinf(y) or ctx.isinf(z): + return ctx.zero + xm,ym,zm = x,y,z + A0 = Am = (x+y+z)/3 + Q = ctx.root(3*r, -6) * max(abs(A0-x),abs(A0-y),abs(A0-z)) + g = ctx.mpf(0.25) + pow4 = ctx.one + while 1: + xs = ctx.sqrt(xm) + ys = ctx.sqrt(ym) + zs = ctx.sqrt(zm) + lm = xs*ys + xs*zs + ys*zs + Am1 = (Am+lm)*g + xm, ym, zm = (xm+lm)*g, (ym+lm)*g, (zm+lm)*g + if pow4 * Q < abs(Am): + break + Am = Am1 + pow4 *= g + t = pow4/Am + X = (A0-x)*t + Y = (A0-y)*t + Z = -X-Y + E2 = X*Y-Z**2 + E3 = X*Y*Z + return ctx.power(Am,-0.5) * (9240-924*E2+385*E2**2+660*E3-630*E2*E3)/9240 + +def RC_calc(ctx, x, y, r, pv=True): + if not (ctx.isnormal(x) and ctx.isnormal(y)): + if ctx.isinf(x) or ctx.isinf(y): + return 1/(x*y) + if y == 0: + return ctx.inf + if x == 0: + return ctx.pi / ctx.sqrt(y) / 2 + raise ValueError + # Cauchy principal value + if pv and ctx._im(y) == 0 and ctx._re(y) < 0: + return ctx.sqrt(x/(x-y)) * RC_calc(ctx, x-y, -y, r) + if x == y: + return 1/ctx.sqrt(x) + extraprec = 2*max(0,-ctx.mag(x-y)+ctx.mag(x)) + ctx.prec += extraprec + if ctx._is_real_type(x) and ctx._is_real_type(y): + x = ctx._re(x) + y = ctx._re(y) + a = ctx.sqrt(x/y) + if x < y: + b = ctx.sqrt(y-x) + v = ctx.acos(a)/b + else: + b = ctx.sqrt(x-y) + v = ctx.acosh(a)/b + else: + sx = ctx.sqrt(x) + sy = ctx.sqrt(y) + v = ctx.acos(sx/sy)/(ctx.sqrt((1-x/y))*sy) + ctx.prec -= extraprec + return v + +def RJ_calc(ctx, x, y, z, p, r, integration): + """ + With integration == 0, computes RJ only using Carlson's algorithm + (may be wrong for some values). + With integration == 1, uses an initial integration to make sure + Carlson's algorithm is correct. + With integration == 2, uses only integration. + """ + if not (ctx.isnormal(x) and ctx.isnormal(y) and \ + ctx.isnormal(z) and ctx.isnormal(p)): + if ctx.isnan(x) or ctx.isnan(y) or ctx.isnan(z) or ctx.isnan(p): + return x*y*z + if ctx.isinf(x) or ctx.isinf(y) or ctx.isinf(z) or ctx.isinf(p): + return ctx.zero + if not p: + return ctx.inf + if (not x) + (not y) + (not z) > 1: + return ctx.inf + # Check conditions and fall back on integration for argument + # reduction if needed. The following conditions might be needlessly + # restrictive. + initial_integral = ctx.zero + if integration >= 1: + ok = (x.real >= 0 and y.real >= 0 and z.real >= 0 and p.real > 0) + if not ok: + if x == p or y == p or z == p: + ok = True + if not ok: + if p.imag != 0 or p.real >= 0: + if (x.imag == 0 and x.real >= 0 and ctx.conj(y) == z): + ok = True + if (y.imag == 0 and y.real >= 0 and ctx.conj(x) == z): + ok = True + if (z.imag == 0 and z.real >= 0 and ctx.conj(x) == y): + ok = True + if not ok or (integration == 2): + N = ctx.ceil(-min(x.real, y.real, z.real, p.real)) + 1 + # Integrate around any singularities + if all((t.imag >= 0 or t.real > 0) for t in [x, y, z, p]): + margin = ctx.j + elif all((t.imag < 0 or t.real > 0) for t in [x, y, z, p]): + margin = -ctx.j + else: + margin = 1 + # Go through the upper half-plane, but low enough that any + # parameter starting in the lower plane doesn't cross the + # branch cut + for t in [x, y, z, p]: + if t.imag >= 0 or t.real > 0: + continue + margin = min(margin, abs(t.imag) * 0.5) + margin *= ctx.j + N += margin + F = lambda t: 1/(ctx.sqrt(t+x)*ctx.sqrt(t+y)*ctx.sqrt(t+z)*(t+p)) + if integration == 2: + return 1.5 * ctx.quadsubdiv(F, [0, N, ctx.inf]) + initial_integral = 1.5 * ctx.quadsubdiv(F, [0, N]) + x += N; y += N; z += N; p += N + xm,ym,zm,pm = x,y,z,p + A0 = Am = (x + y + z + 2*p)/5 + delta = (p-x)*(p-y)*(p-z) + Q = ctx.root(0.25*r, -6) * max(abs(A0-x),abs(A0-y),abs(A0-z),abs(A0-p)) + g = ctx.mpf(0.25) + pow4 = ctx.one + S = 0 + while 1: + sx = ctx.sqrt(xm) + sy = ctx.sqrt(ym) + sz = ctx.sqrt(zm) + sp = ctx.sqrt(pm) + lm = sx*sy + sx*sz + sy*sz + Am1 = (Am+lm)*g + xm = (xm+lm)*g; ym = (ym+lm)*g; zm = (zm+lm)*g; pm = (pm+lm)*g + dm = (sp+sx) * (sp+sy) * (sp+sz) + em = delta * pow4**3 / dm**2 + if pow4 * Q < abs(Am): + break + T = RC_calc(ctx, ctx.one, ctx.one+em, r) * pow4 / dm + S += T + pow4 *= g + Am = Am1 + t = pow4 / Am + X = (A0-x)*t + Y = (A0-y)*t + Z = (A0-z)*t + P = (-X-Y-Z)/2 + E2 = X*Y + X*Z + Y*Z - 3*P**2 + E3 = X*Y*Z + 2*E2*P + 4*P**3 + E4 = (2*X*Y*Z + E2*P + 3*P**3)*P + E5 = X*Y*Z*P**2 + P = 24024 - 5148*E2 + 2457*E2**2 + 4004*E3 - 4158*E2*E3 - 3276*E4 + 2772*E5 + Q = 24024 + v1 = pow4 * ctx.power(Am, -1.5) * P/Q + v2 = 6*S + return initial_integral + v1 + v2 + +@defun +def elliprf(ctx, x, y, z): + r""" + Evaluates the Carlson symmetric elliptic integral of the first kind + + .. math :: + + R_F(x,y,z) = \frac{1}{2} + \int_0^{\infty} \frac{dt}{\sqrt{(t+x)(t+y)(t+z)}} + + which is defined for `x,y,z \notin (-\infty,0)`, and with + at most one of `x,y,z` being zero. + + For real `x,y,z \ge 0`, the principal square root is taken in the integrand. + For complex `x,y,z`, the principal square root is taken as `t \to \infty` + and as `t \to 0` non-principal branches are chosen as necessary so as to + make the integrand continuous. + + **Examples** + + Some basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> elliprf(0,1,1); pi/2 + 1.570796326794896619231322 + 1.570796326794896619231322 + >>> elliprf(0,1,inf) + 0.0 + >>> elliprf(1,1,1) + 1.0 + >>> elliprf(2,2,2)**2 + 0.5 + >>> elliprf(1,0,0); elliprf(0,0,1); elliprf(0,1,0); elliprf(0,0,0) + +inf + +inf + +inf + +inf + + Representing complete elliptic integrals in terms of `R_F`:: + + >>> m = mpf(0.75) + >>> ellipk(m); elliprf(0,1-m,1) + 2.156515647499643235438675 + 2.156515647499643235438675 + >>> ellipe(m); elliprf(0,1-m,1)-m*elliprd(0,1-m,1)/3 + 1.211056027568459524803563 + 1.211056027568459524803563 + + Some symmetries and argument transformations:: + + >>> x,y,z = 2,3,4 + >>> elliprf(x,y,z); elliprf(y,x,z); elliprf(z,y,x) + 0.5840828416771517066928492 + 0.5840828416771517066928492 + 0.5840828416771517066928492 + >>> k = mpf(100000) + >>> elliprf(k*x,k*y,k*z); k**(-0.5) * elliprf(x,y,z) + 0.001847032121923321253219284 + 0.001847032121923321253219284 + >>> l = sqrt(x*y) + sqrt(y*z) + sqrt(z*x) + >>> elliprf(x,y,z); 2*elliprf(x+l,y+l,z+l) + 0.5840828416771517066928492 + 0.5840828416771517066928492 + >>> elliprf((x+l)/4,(y+l)/4,(z+l)/4) + 0.5840828416771517066928492 + + Comparing with numerical integration:: + + >>> x,y,z = 2,3,4 + >>> elliprf(x,y,z) + 0.5840828416771517066928492 + >>> f = lambda t: 0.5*((t+x)*(t+y)*(t+z))**(-0.5) + >>> q = extradps(25)(quad) + >>> q(f, [0,inf]) + 0.5840828416771517066928492 + + With the following arguments, the square root in the integrand becomes + discontinuous at `t = 1/2` if the principal branch is used. To obtain + the right value, `-\sqrt{r}` must be taken instead of `\sqrt{r}` + on `t \in (0, 1/2)`:: + + >>> x,y,z = j-1,j,0 + >>> elliprf(x,y,z) + (0.7961258658423391329305694 - 1.213856669836495986430094j) + >>> -q(f, [0,0.5]) + q(f, [0.5,inf]) + (0.7961258658423391329305694 - 1.213856669836495986430094j) + + The so-called *first lemniscate constant*, a transcendental number:: + + >>> elliprf(0,1,2) + 1.31102877714605990523242 + >>> extradps(25)(quad)(lambda t: 1/sqrt(1-t**4), [0,1]) + 1.31102877714605990523242 + >>> gamma('1/4')**2/(4*sqrt(2*pi)) + 1.31102877714605990523242 + + **References** + + 1. [Carlson]_ + 2. [DLMF]_ Chapter 19. Elliptic Integrals + + """ + x = ctx.convert(x) + y = ctx.convert(y) + z = ctx.convert(z) + prec = ctx.prec + try: + ctx.prec += 20 + tol = ctx.eps * 2**10 + v = RF_calc(ctx, x, y, z, tol) + finally: + ctx.prec = prec + return +v + +@defun +def elliprc(ctx, x, y, pv=True): + r""" + Evaluates the degenerate Carlson symmetric elliptic integral + of the first kind + + .. math :: + + R_C(x,y) = R_F(x,y,y) = + \frac{1}{2} \int_0^{\infty} \frac{dt}{(t+y) \sqrt{(t+x)}}. + + If `y \in (-\infty,0)`, either a value defined by continuity, + or with *pv=True* the Cauchy principal value, can be computed. + + If `x \ge 0, y > 0`, the value can be expressed in terms of + elementary functions as + + .. math :: + + R_C(x,y) = + \begin{cases} + \dfrac{1}{\sqrt{y-x}} + \cos^{-1}\left(\sqrt{\dfrac{x}{y}}\right), & x < y \\ + \dfrac{1}{\sqrt{y}}, & x = y \\ + \dfrac{1}{\sqrt{x-y}} + \cosh^{-1}\left(\sqrt{\dfrac{x}{y}}\right), & x > y \\ + \end{cases}. + + **Examples** + + Some special values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> elliprc(1,2)*4; elliprc(0,1)*2; +pi + 3.141592653589793238462643 + 3.141592653589793238462643 + 3.141592653589793238462643 + >>> elliprc(1,0) + +inf + >>> elliprc(5,5)**2 + 0.2 + >>> elliprc(1,inf); elliprc(inf,1); elliprc(inf,inf) + 0.0 + 0.0 + 0.0 + + Comparing with the elementary closed-form solution:: + + >>> elliprc('1/3', '1/5'); sqrt(7.5)*acosh(sqrt('5/3')) + 2.041630778983498390751238 + 2.041630778983498390751238 + >>> elliprc('1/5', '1/3'); sqrt(7.5)*acos(sqrt('3/5')) + 1.875180765206547065111085 + 1.875180765206547065111085 + + Comparing with numerical integration:: + + >>> q = extradps(25)(quad) + >>> elliprc(2, -3, pv=True) + 0.3333969101113672670749334 + >>> elliprc(2, -3, pv=False) + (0.3333969101113672670749334 + 0.7024814731040726393156375j) + >>> 0.5*q(lambda t: 1/(sqrt(t+2)*(t-3)), [0,3-j,6,inf]) + (0.3333969101113672670749334 + 0.7024814731040726393156375j) + + """ + x = ctx.convert(x) + y = ctx.convert(y) + prec = ctx.prec + try: + ctx.prec += 20 + tol = ctx.eps * 2**10 + v = RC_calc(ctx, x, y, tol, pv) + finally: + ctx.prec = prec + return +v + +@defun +def elliprj(ctx, x, y, z, p, integration=1): + r""" + Evaluates the Carlson symmetric elliptic integral of the third kind + + .. math :: + + R_J(x,y,z,p) = \frac{3}{2} + \int_0^{\infty} \frac{dt}{(t+p)\sqrt{(t+x)(t+y)(t+z)}}. + + Like :func:`~mpmath.elliprf`, the branch of the square root in the integrand + is defined so as to be continuous along the path of integration for + complex values of the arguments. + + **Examples** + + Some values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> elliprj(1,1,1,1) + 1.0 + >>> elliprj(2,2,2,2); 1/(2*sqrt(2)) + 0.3535533905932737622004222 + 0.3535533905932737622004222 + >>> elliprj(0,1,2,2) + 1.067937989667395702268688 + >>> 3*(2*gamma('5/4')**2-pi**2/gamma('1/4')**2)/(sqrt(2*pi)) + 1.067937989667395702268688 + >>> elliprj(0,1,1,2); 3*pi*(2-sqrt(2))/4 + 1.380226776765915172432054 + 1.380226776765915172432054 + >>> elliprj(1,3,2,0); elliprj(0,1,1,0); elliprj(0,0,0,0) + +inf + +inf + +inf + >>> elliprj(1,inf,1,0); elliprj(1,1,1,inf) + 0.0 + 0.0 + >>> chop(elliprj(1+j, 1-j, 1, 1)) + 0.8505007163686739432927844 + + Scale transformation:: + + >>> x,y,z,p = 2,3,4,5 + >>> k = mpf(100000) + >>> elliprj(k*x,k*y,k*z,k*p); k**(-1.5)*elliprj(x,y,z,p) + 4.521291677592745527851168e-9 + 4.521291677592745527851168e-9 + + Comparing with numerical integration:: + + >>> elliprj(1,2,3,4) + 0.2398480997495677621758617 + >>> f = lambda t: 1/((t+4)*sqrt((t+1)*(t+2)*(t+3))) + >>> 1.5*quad(f, [0,inf]) + 0.2398480997495677621758617 + >>> elliprj(1,2+1j,3,4-2j) + (0.216888906014633498739952 + 0.04081912627366673332369512j) + >>> f = lambda t: 1/((t+4-2j)*sqrt((t+1)*(t+2+1j)*(t+3))) + >>> 1.5*quad(f, [0,inf]) + (0.216888906014633498739952 + 0.04081912627366673332369511j) + + """ + x = ctx.convert(x) + y = ctx.convert(y) + z = ctx.convert(z) + p = ctx.convert(p) + prec = ctx.prec + try: + ctx.prec += 20 + tol = ctx.eps * 2**10 + v = RJ_calc(ctx, x, y, z, p, tol, integration) + finally: + ctx.prec = prec + return +v + +@defun +def elliprd(ctx, x, y, z): + r""" + Evaluates the degenerate Carlson symmetric elliptic integral + of the third kind or Carlson elliptic integral of the + second kind `R_D(x,y,z) = R_J(x,y,z,z)`. + + See :func:`~mpmath.elliprj` for additional information. + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> elliprd(1,2,3) + 0.2904602810289906442326534 + >>> elliprj(1,2,3,3) + 0.2904602810289906442326534 + + The so-called *second lemniscate constant*, a transcendental number:: + + >>> elliprd(0,2,1)/3 + 0.5990701173677961037199612 + >>> extradps(25)(quad)(lambda t: t**2/sqrt(1-t**4), [0,1]) + 0.5990701173677961037199612 + >>> gamma('3/4')**2/sqrt(2*pi) + 0.5990701173677961037199612 + + """ + return ctx.elliprj(x,y,z,z) + +@defun +def elliprg(ctx, x, y, z): + r""" + Evaluates the Carlson completely symmetric elliptic integral + of the second kind + + .. math :: + + R_G(x,y,z) = \frac{1}{4} \int_0^{\infty} + \frac{t}{\sqrt{(t+x)(t+y)(t+z)}} + \left( \frac{x}{t+x} + \frac{y}{t+y} + \frac{z}{t+z}\right) dt. + + **Examples** + + Evaluation for real and complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> elliprg(0,1,1)*4; +pi + 3.141592653589793238462643 + 3.141592653589793238462643 + >>> elliprg(0,0.5,1) + 0.6753219405238377512600874 + >>> chop(elliprg(1+j, 1-j, 2)) + 1.172431327676416604532822 + + A double integral that can be evaluated in terms of `R_G`:: + + >>> x,y,z = 2,3,4 + >>> def f(t,u): + ... st = fp.sin(t); ct = fp.cos(t) + ... su = fp.sin(u); cu = fp.cos(u) + ... return (x*(st*cu)**2 + y*(st*su)**2 + z*ct**2)**0.5 * st + ... + >>> nprint(mpf(fp.quad(f, [0,fp.pi], [0,2*fp.pi])/(4*fp.pi)), 13) + 1.725503028069 + >>> nprint(elliprg(x,y,z), 13) + 1.725503028069 + + """ + x = ctx.convert(x) + y = ctx.convert(y) + z = ctx.convert(z) + zeros = (not x) + (not y) + (not z) + if zeros == 3: + return (x+y+z)*0 + if zeros == 2: + if x: return 0.5*ctx.sqrt(x) + if y: return 0.5*ctx.sqrt(y) + return 0.5*ctx.sqrt(z) + if zeros == 1: + if not z: + x, z = z, x + def terms(): + T1 = 0.5*z*ctx.elliprf(x,y,z) + T2 = -0.5*(x-z)*(y-z)*ctx.elliprd(x,y,z)/3 + T3 = 0.5*ctx.sqrt(x)*ctx.sqrt(y)/ctx.sqrt(z) + return T1,T2,T3 + return ctx.sum_accurately(terms) + + +@defun_wrapped +def ellipf(ctx, phi, m): + r""" + Evaluates the Legendre incomplete elliptic integral of the first kind + + .. math :: + + F(\phi,m) = \int_0^{\phi} \frac{dt}{\sqrt{1-m \sin^2 t}} + + or equivalently + + .. math :: + + F(\phi,m) = \int_0^{\sin \phi} + \frac{dt}{\left(\sqrt{1-t^2}\right)\left(\sqrt{1-mt^2}\right)}. + + The function reduces to a complete elliptic integral of the first kind + (see :func:`~mpmath.ellipk`) when `\phi = \frac{\pi}{2}`; that is, + + .. math :: + + F\left(\frac{\pi}{2}, m\right) = K(m). + + In the defining integral, it is assumed that the principal branch + of the square root is taken and that the path of integration avoids + crossing any branch cuts. Outside `-\pi/2 \le \Re(\phi) \le \pi/2`, + the function extends quasi-periodically as + + .. math :: + + F(\phi + n \pi, m) = 2 n K(m) + F(\phi,m), n \in \mathbb{Z}. + + **Plots** + + .. literalinclude :: /plots/ellipf.py + .. image :: /plots/ellipf.png + + **Examples** + + Basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> ellipf(0,1) + 0.0 + >>> ellipf(0,0) + 0.0 + >>> ellipf(1,0); ellipf(2+3j,0) + 1.0 + (2.0 + 3.0j) + >>> ellipf(1,1); log(sec(1)+tan(1)) + 1.226191170883517070813061 + 1.226191170883517070813061 + >>> ellipf(pi/2, -0.5); ellipk(-0.5) + 1.415737208425956198892166 + 1.415737208425956198892166 + >>> ellipf(pi/2+eps, 1); ellipf(-pi/2-eps, 1) + +inf + +inf + >>> ellipf(1.5, 1) + 3.340677542798311003320813 + + Comparing with numerical integration:: + + >>> z,m = 0.5, 1.25 + >>> ellipf(z,m) + 0.5287219202206327872978255 + >>> quad(lambda t: (1-m*sin(t)**2)**(-0.5), [0,z]) + 0.5287219202206327872978255 + + The arguments may be complex numbers:: + + >>> ellipf(3j, 0.5) + (0.0 + 1.713602407841590234804143j) + >>> ellipf(3+4j, 5-6j) + (1.269131241950351323305741 - 0.3561052815014558335412538j) + >>> z,m = 2+3j, 1.25 + >>> k = 1011 + >>> ellipf(z+pi*k,m); ellipf(z,m) + 2*k*ellipk(m) + (4086.184383622179764082821 - 3003.003538923749396546871j) + (4086.184383622179764082821 - 3003.003538923749396546871j) + + For `|\Re(z)| < \pi/2`, the function can be expressed as a + hypergeometric series of two variables + (see :func:`~mpmath.appellf1`):: + + >>> z,m = 0.5, 0.25 + >>> ellipf(z,m) + 0.5050887275786480788831083 + >>> sin(z)*appellf1(0.5,0.5,0.5,1.5,sin(z)**2,m*sin(z)**2) + 0.5050887275786480788831083 + + """ + z = phi + if not (ctx.isnormal(z) and ctx.isnormal(m)): + if m == 0: + return z + m + if z == 0: + return z * m + if m == ctx.inf or m == ctx.ninf: return z/m + raise ValueError + x = z.real + ctx.prec += max(0, ctx.mag(x)) + pi = +ctx.pi + away = abs(x) > pi/2 + if m == 1: + if away: + return ctx.inf + if away: + d = ctx.nint(x/pi) + z = z-pi*d + P = 2*d*ctx.ellipk(m) + else: + P = 0 + c, s = ctx.cos_sin(z) + return s * ctx.elliprf(c**2, 1-m*s**2, 1) + P + +@defun_wrapped +def ellipe(ctx, *args): + r""" + Called with a single argument `m`, evaluates the Legendre complete + elliptic integral of the second kind, `E(m)`, defined by + + .. math :: E(m) = \int_0^{\pi/2} \sqrt{1-m \sin^2 t} \, dt \,=\, + \frac{\pi}{2} + \,_2F_1\left(\frac{1}{2}, -\frac{1}{2}, 1, m\right). + + Called with two arguments `\phi, m`, evaluates the incomplete elliptic + integral of the second kind + + .. math :: + + E(\phi,m) = \int_0^{\phi} \sqrt{1-m \sin^2 t} \, dt = + \int_0^{\sin z} + \frac{\sqrt{1-mt^2}}{\sqrt{1-t^2}} \, dt. + + The incomplete integral reduces to a complete integral when + `\phi = \frac{\pi}{2}`; that is, + + .. math :: + + E\left(\frac{\pi}{2}, m\right) = E(m). + + In the defining integral, it is assumed that the principal branch + of the square root is taken and that the path of integration avoids + crossing any branch cuts. Outside `-\pi/2 \le \Re(z) \le \pi/2`, + the function extends quasi-periodically as + + .. math :: + + E(\phi + n \pi, m) = 2 n E(m) + E(\phi,m), n \in \mathbb{Z}. + + **Plots** + + .. literalinclude :: /plots/ellipe.py + .. image :: /plots/ellipe.png + + **Examples for the complete integral** + + Basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> ellipe(0) + 1.570796326794896619231322 + >>> ellipe(1) + 1.0 + >>> ellipe(-1) + 1.910098894513856008952381 + >>> ellipe(2) + (0.5990701173677961037199612 + 0.5990701173677961037199612j) + >>> ellipe(inf) + (0.0 + +infj) + >>> ellipe(-inf) + +inf + + Verifying the defining integral and hypergeometric + representation:: + + >>> ellipe(0.5) + 1.350643881047675502520175 + >>> quad(lambda t: sqrt(1-0.5*sin(t)**2), [0, pi/2]) + 1.350643881047675502520175 + >>> pi/2*hyp2f1(0.5,-0.5,1,0.5) + 1.350643881047675502520175 + + Evaluation is supported for arbitrary complex `m`:: + + >>> ellipe(0.5+0.25j) + (1.360868682163129682716687 - 0.1238733442561786843557315j) + >>> ellipe(3+4j) + (1.499553520933346954333612 - 1.577879007912758274533309j) + + A definite integral:: + + >>> quad(ellipe, [0,1]) + 1.333333333333333333333333 + + **Examples for the incomplete integral** + + Basic values and limits:: + + >>> ellipe(0,1) + 0.0 + >>> ellipe(0,0) + 0.0 + >>> ellipe(1,0) + 1.0 + >>> ellipe(2+3j,0) + (2.0 + 3.0j) + >>> ellipe(1,1); sin(1) + 0.8414709848078965066525023 + 0.8414709848078965066525023 + >>> ellipe(pi/2, -0.5); ellipe(-0.5) + 1.751771275694817862026502 + 1.751771275694817862026502 + >>> ellipe(pi/2, 1); ellipe(-pi/2, 1) + 1.0 + -1.0 + >>> ellipe(1.5, 1) + 0.9974949866040544309417234 + + Comparing with numerical integration:: + + >>> z,m = 0.5, 1.25 + >>> ellipe(z,m) + 0.4740152182652628394264449 + >>> quad(lambda t: sqrt(1-m*sin(t)**2), [0,z]) + 0.4740152182652628394264449 + + The arguments may be complex numbers:: + + >>> ellipe(3j, 0.5) + (0.0 + 7.551991234890371873502105j) + >>> ellipe(3+4j, 5-6j) + (24.15299022574220502424466 + 75.2503670480325997418156j) + >>> k = 35 + >>> z,m = 2+3j, 1.25 + >>> ellipe(z+pi*k,m); ellipe(z,m) + 2*k*ellipe(m) + (48.30138799412005235090766 + 17.47255216721987688224357j) + (48.30138799412005235090766 + 17.47255216721987688224357j) + + For `|\Re(z)| < \pi/2`, the function can be expressed as a + hypergeometric series of two variables + (see :func:`~mpmath.appellf1`):: + + >>> z,m = 0.5, 0.25 + >>> ellipe(z,m) + 0.4950017030164151928870375 + >>> sin(z)*appellf1(0.5,0.5,-0.5,1.5,sin(z)**2,m*sin(z)**2) + 0.4950017030164151928870376 + + """ + if len(args) == 1: + return ctx._ellipe(args[0]) + else: + phi, m = args + z = phi + if not (ctx.isnormal(z) and ctx.isnormal(m)): + if m == 0: + return z + m + if z == 0: + return z * m + if m == ctx.inf or m == ctx.ninf: + return ctx.inf + raise ValueError + x = z.real + ctx.prec += max(0, ctx.mag(x)) + pi = +ctx.pi + away = abs(x) > pi/2 + if away: + d = ctx.nint(x/pi) + z = z-pi*d + P = 2*d*ctx.ellipe(m) + else: + P = 0 + def terms(): + c, s = ctx.cos_sin(z) + x = c**2 + y = 1-m*s**2 + RF = ctx.elliprf(x, y, 1) + RD = ctx.elliprd(x, y, 1) + return s*RF, -m*s**3*RD/3 + return ctx.sum_accurately(terms) + P + +@defun_wrapped +def ellippi(ctx, *args): + r""" + Called with three arguments `n, \phi, m`, evaluates the Legendre + incomplete elliptic integral of the third kind + + .. math :: + + \Pi(n; \phi, m) = \int_0^{\phi} + \frac{dt}{(1-n \sin^2 t) \sqrt{1-m \sin^2 t}} = + \int_0^{\sin \phi} + \frac{dt}{(1-nt^2) \sqrt{1-t^2} \sqrt{1-mt^2}}. + + Called with two arguments `n, m`, evaluates the complete + elliptic integral of the third kind + `\Pi(n,m) = \Pi(n; \frac{\pi}{2},m)`. + + In the defining integral, it is assumed that the principal branch + of the square root is taken and that the path of integration avoids + crossing any branch cuts. Outside `-\pi/2 \le \Re(\phi) \le \pi/2`, + the function extends quasi-periodically as + + .. math :: + + \Pi(n,\phi+k\pi,m) = 2k\Pi(n,m) + \Pi(n,\phi,m), k \in \mathbb{Z}. + + **Plots** + + .. literalinclude :: /plots/ellippi.py + .. image :: /plots/ellippi.png + + **Examples for the complete integral** + + Some basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> ellippi(0,-5); ellipk(-5) + 0.9555039270640439337379334 + 0.9555039270640439337379334 + >>> ellippi(inf,2) + 0.0 + >>> ellippi(2,inf) + 0.0 + >>> abs(ellippi(1,5)) + +inf + >>> abs(ellippi(0.25,1)) + +inf + + Evaluation in terms of simpler functions:: + + >>> ellippi(0.25,0.25); ellipe(0.25)/(1-0.25) + 1.956616279119236207279727 + 1.956616279119236207279727 + >>> ellippi(3,0); pi/(2*sqrt(-2)) + (0.0 - 1.11072073453959156175397j) + (0.0 - 1.11072073453959156175397j) + >>> ellippi(-3,0); pi/(2*sqrt(4)) + 0.7853981633974483096156609 + 0.7853981633974483096156609 + + **Examples for the incomplete integral** + + Basic values and limits:: + + >>> ellippi(0.25,-0.5); ellippi(0.25,pi/2,-0.5) + 1.622944760954741603710555 + 1.622944760954741603710555 + >>> ellippi(1,0,1) + 0.0 + >>> ellippi(inf,0,1) + 0.0 + >>> ellippi(0,0.25,0.5); ellipf(0.25,0.5) + 0.2513040086544925794134591 + 0.2513040086544925794134591 + >>> ellippi(1,1,1); (log(sec(1)+tan(1))+sec(1)*tan(1))/2 + 2.054332933256248668692452 + 2.054332933256248668692452 + >>> ellippi(0.25, 53*pi/2, 0.75); 53*ellippi(0.25,0.75) + 135.240868757890840755058 + 135.240868757890840755058 + >>> ellippi(0.5,pi/4,0.5); 2*ellipe(pi/4,0.5)-1/sqrt(3) + 0.9190227391656969903987269 + 0.9190227391656969903987269 + + Complex arguments are supported:: + + >>> ellippi(0.5, 5+6j-2*pi, -7-8j) + (-0.3612856620076747660410167 + 0.5217735339984807829755815j) + + Some degenerate cases:: + + >>> ellippi(1,1) + +inf + >>> ellippi(1,0) + +inf + >>> ellippi(1,2,0) + +inf + >>> ellippi(1,2,1) + +inf + >>> ellippi(1,0,1) + 0.0 + + """ + if len(args) == 2: + n, m = args + complete = True + z = phi = ctx.pi/2 + else: + n, phi, m = args + complete = False + z = phi + if not (ctx.isnormal(n) and ctx.isnormal(z) and ctx.isnormal(m)): + if ctx.isnan(n) or ctx.isnan(z) or ctx.isnan(m): + raise ValueError + if complete: + if m == 0: + if n == 1: + return ctx.inf + return ctx.pi/(2*ctx.sqrt(1-n)) + if n == 0: return ctx.ellipk(m) + if ctx.isinf(n) or ctx.isinf(m): return ctx.zero + else: + if z == 0: return z + if ctx.isinf(n): return ctx.zero + if ctx.isinf(m): return ctx.zero + if ctx.isinf(n) or ctx.isinf(z) or ctx.isinf(m): + raise ValueError + if complete: + if m == 1: + if n == 1: + return ctx.inf + return -ctx.inf/ctx.sign(n-1) + away = False + else: + x = z.real + ctx.prec += max(0, ctx.mag(x)) + pi = +ctx.pi + away = abs(x) > pi/2 + if away: + d = ctx.nint(x/pi) + z = z-pi*d + P = 2*d*ctx.ellippi(n,m) + if ctx.isinf(P): + return ctx.inf + else: + P = 0 + def terms(): + if complete: + c, s = ctx.zero, ctx.one + else: + c, s = ctx.cos_sin(z) + x = c**2 + y = 1-m*s**2 + RF = ctx.elliprf(x, y, 1) + RJ = ctx.elliprj(x, y, 1, 1-n*s**2) + return s*RF, n*s**3*RJ/3 + return ctx.sum_accurately(terms) + P diff --git a/MLPY/Lib/site-packages/mpmath/functions/expintegrals.py b/MLPY/Lib/site-packages/mpmath/functions/expintegrals.py new file mode 100644 index 0000000000000000000000000000000000000000..0dee8356c0386819d8f0421fded476ee77229359 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/functions/expintegrals.py @@ -0,0 +1,425 @@ +from .functions import defun, defun_wrapped + +@defun_wrapped +def _erf_complex(ctx, z): + z2 = ctx.square_exp_arg(z, -1) + #z2 = -z**2 + v = (2/ctx.sqrt(ctx.pi))*z * ctx.hyp1f1((1,2),(3,2), z2) + if not ctx._re(z): + v = ctx._im(v)*ctx.j + return v + +@defun_wrapped +def _erfc_complex(ctx, z): + if ctx.re(z) > 2: + z2 = ctx.square_exp_arg(z) + nz2 = ctx.fneg(z2, exact=True) + v = ctx.exp(nz2)/ctx.sqrt(ctx.pi) * ctx.hyperu((1,2),(1,2), z2) + else: + v = 1 - ctx._erf_complex(z) + if not ctx._re(z): + v = 1+ctx._im(v)*ctx.j + return v + +@defun +def erf(ctx, z): + z = ctx.convert(z) + if ctx._is_real_type(z): + try: + return ctx._erf(z) + except NotImplementedError: + pass + if ctx._is_complex_type(z) and not z.imag: + try: + return type(z)(ctx._erf(z.real)) + except NotImplementedError: + pass + return ctx._erf_complex(z) + +@defun +def erfc(ctx, z): + z = ctx.convert(z) + if ctx._is_real_type(z): + try: + return ctx._erfc(z) + except NotImplementedError: + pass + if ctx._is_complex_type(z) and not z.imag: + try: + return type(z)(ctx._erfc(z.real)) + except NotImplementedError: + pass + return ctx._erfc_complex(z) + +@defun +def square_exp_arg(ctx, z, mult=1, reciprocal=False): + prec = ctx.prec*4+20 + if reciprocal: + z2 = ctx.fmul(z, z, prec=prec) + z2 = ctx.fdiv(ctx.one, z2, prec=prec) + else: + z2 = ctx.fmul(z, z, prec=prec) + if mult != 1: + z2 = ctx.fmul(z2, mult, exact=True) + return z2 + +@defun_wrapped +def erfi(ctx, z): + if not z: + return z + z2 = ctx.square_exp_arg(z) + v = (2/ctx.sqrt(ctx.pi)*z) * ctx.hyp1f1((1,2), (3,2), z2) + if not ctx._re(z): + v = ctx._im(v)*ctx.j + return v + +@defun_wrapped +def erfinv(ctx, x): + xre = ctx._re(x) + if (xre != x) or (xre < -1) or (xre > 1): + return ctx.bad_domain("erfinv(x) is defined only for -1 <= x <= 1") + x = xre + #if ctx.isnan(x): return x + if not x: return x + if x == 1: return ctx.inf + if x == -1: return ctx.ninf + if abs(x) < 0.9: + a = 0.53728*x**3 + 0.813198*x + else: + # An asymptotic formula + u = ctx.ln(2/ctx.pi/(abs(x)-1)**2) + a = ctx.sign(x) * ctx.sqrt(u - ctx.ln(u))/ctx.sqrt(2) + ctx.prec += 10 + return ctx.findroot(lambda t: ctx.erf(t)-x, a) + +@defun_wrapped +def npdf(ctx, x, mu=0, sigma=1): + sigma = ctx.convert(sigma) + return ctx.exp(-(x-mu)**2/(2*sigma**2)) / (sigma*ctx.sqrt(2*ctx.pi)) + +@defun_wrapped +def ncdf(ctx, x, mu=0, sigma=1): + a = (x-mu)/(sigma*ctx.sqrt(2)) + if a < 0: + return ctx.erfc(-a)/2 + else: + return (1+ctx.erf(a))/2 + +@defun_wrapped +def betainc(ctx, a, b, x1=0, x2=1, regularized=False): + if x1 == x2: + v = 0 + elif not x1: + if x1 == 0 and x2 == 1: + v = ctx.beta(a, b) + else: + v = x2**a * ctx.hyp2f1(a, 1-b, a+1, x2) / a + else: + m, d = ctx.nint_distance(a) + if m <= 0: + if d < -ctx.prec: + h = +ctx.eps + ctx.prec *= 2 + a += h + elif d < -4: + ctx.prec -= d + s1 = x2**a * ctx.hyp2f1(a,1-b,a+1,x2) + s2 = x1**a * ctx.hyp2f1(a,1-b,a+1,x1) + v = (s1 - s2) / a + if regularized: + v /= ctx.beta(a,b) + return v + +@defun +def gammainc(ctx, z, a=0, b=None, regularized=False): + regularized = bool(regularized) + z = ctx.convert(z) + if a is None: + a = ctx.zero + lower_modified = False + else: + a = ctx.convert(a) + lower_modified = a != ctx.zero + if b is None: + b = ctx.inf + upper_modified = False + else: + b = ctx.convert(b) + upper_modified = b != ctx.inf + # Complete gamma function + if not (upper_modified or lower_modified): + if regularized: + if ctx.re(z) < 0: + return ctx.inf + elif ctx.re(z) > 0: + return ctx.one + else: + return ctx.nan + return ctx.gamma(z) + if a == b: + return ctx.zero + # Standardize + if ctx.re(a) > ctx.re(b): + return -ctx.gammainc(z, b, a, regularized) + # Generalized gamma + if upper_modified and lower_modified: + return +ctx._gamma3(z, a, b, regularized) + # Upper gamma + elif lower_modified: + return ctx._upper_gamma(z, a, regularized) + # Lower gamma + elif upper_modified: + return ctx._lower_gamma(z, b, regularized) + +@defun +def _lower_gamma(ctx, z, b, regularized=False): + # Pole + if ctx.isnpint(z): + return type(z)(ctx.inf) + G = [z] * regularized + negb = ctx.fneg(b, exact=True) + def h(z): + T1 = [ctx.exp(negb), b, z], [1, z, -1], [], G, [1], [1+z], b + return (T1,) + return ctx.hypercomb(h, [z]) + +@defun +def _upper_gamma(ctx, z, a, regularized=False): + # Fast integer case, when available + if ctx.isint(z): + try: + if regularized: + # Gamma pole + if ctx.isnpint(z): + return type(z)(ctx.zero) + orig = ctx.prec + try: + ctx.prec += 10 + return ctx._gamma_upper_int(z, a) / ctx.gamma(z) + finally: + ctx.prec = orig + else: + return ctx._gamma_upper_int(z, a) + except NotImplementedError: + pass + # hypercomb is unable to detect the exact zeros, so handle them here + if z == 2 and a == -1: + return (z+a)*0 + if z == 3 and (a == -1-1j or a == -1+1j): + return (z+a)*0 + nega = ctx.fneg(a, exact=True) + G = [z] * regularized + # Use 2F0 series when possible; fall back to lower gamma representation + try: + def h(z): + r = z-1 + return [([ctx.exp(nega), a], [1, r], [], G, [1, -r], [], 1/nega)] + return ctx.hypercomb(h, [z], force_series=True) + except ctx.NoConvergence: + def h(z): + T1 = [], [1, z-1], [z], G, [], [], 0 + T2 = [-ctx.exp(nega), a, z], [1, z, -1], [], G, [1], [1+z], a + return T1, T2 + return ctx.hypercomb(h, [z]) + +@defun +def _gamma3(ctx, z, a, b, regularized=False): + pole = ctx.isnpint(z) + if regularized and pole: + return ctx.zero + try: + ctx.prec += 15 + # We don't know in advance whether it's better to write as a difference + # of lower or upper gamma functions, so try both + T1 = ctx.gammainc(z, a, regularized=regularized) + T2 = ctx.gammainc(z, b, regularized=regularized) + R = T1 - T2 + if ctx.mag(R) - max(ctx.mag(T1), ctx.mag(T2)) > -10: + return R + if not pole: + T1 = ctx.gammainc(z, 0, b, regularized=regularized) + T2 = ctx.gammainc(z, 0, a, regularized=regularized) + R = T1 - T2 + # May be ok, but should probably at least print a warning + # about possible cancellation + if 1: #ctx.mag(R) - max(ctx.mag(T1), ctx.mag(T2)) > -10: + return R + finally: + ctx.prec -= 15 + raise NotImplementedError + +@defun_wrapped +def expint(ctx, n, z): + if ctx.isint(n) and ctx._is_real_type(z): + try: + return ctx._expint_int(n, z) + except NotImplementedError: + pass + if ctx.isnan(n) or ctx.isnan(z): + return z*n + if z == ctx.inf: + return 1/z + if z == 0: + # integral from 1 to infinity of t^n + if ctx.re(n) <= 1: + # TODO: reasonable sign of infinity + return type(z)(ctx.inf) + else: + return ctx.one/(n-1) + if n == 0: + return ctx.exp(-z)/z + if n == -1: + return ctx.exp(-z)*(z+1)/z**2 + return z**(n-1) * ctx.gammainc(1-n, z) + +@defun_wrapped +def li(ctx, z, offset=False): + if offset: + if z == 2: + return ctx.zero + return ctx.ei(ctx.ln(z)) - ctx.ei(ctx.ln2) + if not z: + return z + if z == 1: + return ctx.ninf + return ctx.ei(ctx.ln(z)) + +@defun +def ei(ctx, z): + try: + return ctx._ei(z) + except NotImplementedError: + return ctx._ei_generic(z) + +@defun_wrapped +def _ei_generic(ctx, z): + # Note: the following is currently untested because mp and fp + # both use special-case ei code + if z == ctx.inf: + return z + if z == ctx.ninf: + return ctx.zero + if ctx.mag(z) > 1: + try: + r = ctx.one/z + v = ctx.exp(z)*ctx.hyper([1,1],[],r, + maxterms=ctx.prec, force_series=True)/z + im = ctx._im(z) + if im > 0: + v += ctx.pi*ctx.j + if im < 0: + v -= ctx.pi*ctx.j + return v + except ctx.NoConvergence: + pass + v = z*ctx.hyp2f2(1,1,2,2,z) + ctx.euler + if ctx._im(z): + v += 0.5*(ctx.log(z) - ctx.log(ctx.one/z)) + else: + v += ctx.log(abs(z)) + return v + +@defun +def e1(ctx, z): + try: + return ctx._e1(z) + except NotImplementedError: + return ctx.expint(1, z) + +@defun +def ci(ctx, z): + try: + return ctx._ci(z) + except NotImplementedError: + return ctx._ci_generic(z) + +@defun_wrapped +def _ci_generic(ctx, z): + if ctx.isinf(z): + if z == ctx.inf: return ctx.zero + if z == ctx.ninf: return ctx.pi*1j + jz = ctx.fmul(ctx.j,z,exact=True) + njz = ctx.fneg(jz,exact=True) + v = 0.5*(ctx.ei(jz) + ctx.ei(njz)) + zreal = ctx._re(z) + zimag = ctx._im(z) + if zreal == 0: + if zimag > 0: v += ctx.pi*0.5j + if zimag < 0: v -= ctx.pi*0.5j + if zreal < 0: + if zimag >= 0: v += ctx.pi*1j + if zimag < 0: v -= ctx.pi*1j + if ctx._is_real_type(z) and zreal > 0: + v = ctx._re(v) + return v + +@defun +def si(ctx, z): + try: + return ctx._si(z) + except NotImplementedError: + return ctx._si_generic(z) + +@defun_wrapped +def _si_generic(ctx, z): + if ctx.isinf(z): + if z == ctx.inf: return 0.5*ctx.pi + if z == ctx.ninf: return -0.5*ctx.pi + # Suffers from cancellation near 0 + if ctx.mag(z) >= -1: + jz = ctx.fmul(ctx.j,z,exact=True) + njz = ctx.fneg(jz,exact=True) + v = (-0.5j)*(ctx.ei(jz) - ctx.ei(njz)) + zreal = ctx._re(z) + if zreal > 0: + v -= 0.5*ctx.pi + if zreal < 0: + v += 0.5*ctx.pi + if ctx._is_real_type(z): + v = ctx._re(v) + return v + else: + return z*ctx.hyp1f2((1,2),(3,2),(3,2),-0.25*z*z) + +@defun_wrapped +def chi(ctx, z): + nz = ctx.fneg(z, exact=True) + v = 0.5*(ctx.ei(z) + ctx.ei(nz)) + zreal = ctx._re(z) + zimag = ctx._im(z) + if zimag > 0: + v += ctx.pi*0.5j + elif zimag < 0: + v -= ctx.pi*0.5j + elif zreal < 0: + v += ctx.pi*1j + return v + +@defun_wrapped +def shi(ctx, z): + # Suffers from cancellation near 0 + if ctx.mag(z) >= -1: + nz = ctx.fneg(z, exact=True) + v = 0.5*(ctx.ei(z) - ctx.ei(nz)) + zimag = ctx._im(z) + if zimag > 0: v -= 0.5j*ctx.pi + if zimag < 0: v += 0.5j*ctx.pi + return v + else: + return z * ctx.hyp1f2((1,2),(3,2),(3,2),0.25*z*z) + +@defun_wrapped +def fresnels(ctx, z): + if z == ctx.inf: + return ctx.mpf(0.5) + if z == ctx.ninf: + return ctx.mpf(-0.5) + return ctx.pi*z**3/6*ctx.hyp1f2((3,4),(3,2),(7,4),-ctx.pi**2*z**4/16) + +@defun_wrapped +def fresnelc(ctx, z): + if z == ctx.inf: + return ctx.mpf(0.5) + if z == ctx.ninf: + return ctx.mpf(-0.5) + return z*ctx.hyp1f2((1,4),(1,2),(5,4),-ctx.pi**2*z**4/16) diff --git a/MLPY/Lib/site-packages/mpmath/functions/factorials.py b/MLPY/Lib/site-packages/mpmath/functions/factorials.py new file mode 100644 index 0000000000000000000000000000000000000000..9259e40b95bf1c908a7ad98b59bbb33528606b07 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/functions/factorials.py @@ -0,0 +1,187 @@ +from ..libmp.backend import xrange +from .functions import defun, defun_wrapped + +@defun +def gammaprod(ctx, a, b, _infsign=False): + a = [ctx.convert(x) for x in a] + b = [ctx.convert(x) for x in b] + poles_num = [] + poles_den = [] + regular_num = [] + regular_den = [] + for x in a: [regular_num, poles_num][ctx.isnpint(x)].append(x) + for x in b: [regular_den, poles_den][ctx.isnpint(x)].append(x) + # One more pole in numerator or denominator gives 0 or inf + if len(poles_num) < len(poles_den): return ctx.zero + if len(poles_num) > len(poles_den): + # Get correct sign of infinity for x+h, h -> 0 from above + # XXX: hack, this should be done properly + if _infsign: + a = [x and x*(1+ctx.eps) or x+ctx.eps for x in poles_num] + b = [x and x*(1+ctx.eps) or x+ctx.eps for x in poles_den] + return ctx.sign(ctx.gammaprod(a+regular_num,b+regular_den)) * ctx.inf + else: + return ctx.inf + # All poles cancel + # lim G(i)/G(j) = (-1)**(i+j) * gamma(1-j) / gamma(1-i) + p = ctx.one + orig = ctx.prec + try: + ctx.prec = orig + 15 + while poles_num: + i = poles_num.pop() + j = poles_den.pop() + p *= (-1)**(i+j) * ctx.gamma(1-j) / ctx.gamma(1-i) + for x in regular_num: p *= ctx.gamma(x) + for x in regular_den: p /= ctx.gamma(x) + finally: + ctx.prec = orig + return +p + +@defun +def beta(ctx, x, y): + x = ctx.convert(x) + y = ctx.convert(y) + if ctx.isinf(y): + x, y = y, x + if ctx.isinf(x): + if x == ctx.inf and not ctx._im(y): + if y == ctx.ninf: + return ctx.nan + if y > 0: + return ctx.zero + if ctx.isint(y): + return ctx.nan + if y < 0: + return ctx.sign(ctx.gamma(y)) * ctx.inf + return ctx.nan + xy = ctx.fadd(x, y, prec=2*ctx.prec) + return ctx.gammaprod([x, y], [xy]) + +@defun +def binomial(ctx, n, k): + n1 = ctx.fadd(n, 1, prec=2*ctx.prec) + k1 = ctx.fadd(k, 1, prec=2*ctx.prec) + nk1 = ctx.fsub(n1, k, prec=2*ctx.prec) + return ctx.gammaprod([n1], [k1, nk1]) + +@defun +def rf(ctx, x, n): + xn = ctx.fadd(x, n, prec=2*ctx.prec) + return ctx.gammaprod([xn], [x]) + +@defun +def ff(ctx, x, n): + x1 = ctx.fadd(x, 1, prec=2*ctx.prec) + xn1 = ctx.fadd(ctx.fsub(x, n, prec=2*ctx.prec), 1, prec=2*ctx.prec) + return ctx.gammaprod([x1], [xn1]) + +@defun_wrapped +def fac2(ctx, x): + if ctx.isinf(x): + if x == ctx.inf: + return x + return ctx.nan + return 2**(x/2)*(ctx.pi/2)**((ctx.cospi(x)-1)/4)*ctx.gamma(x/2+1) + +@defun_wrapped +def barnesg(ctx, z): + if ctx.isinf(z): + if z == ctx.inf: + return z + return ctx.nan + if ctx.isnan(z): + return z + if (not ctx._im(z)) and ctx._re(z) <= 0 and ctx.isint(ctx._re(z)): + return z*0 + # Account for size (would not be needed if computing log(G)) + if abs(z) > 5: + ctx.dps += 2*ctx.log(abs(z),2) + # Reflection formula + if ctx.re(z) < -ctx.dps: + w = 1-z + pi2 = 2*ctx.pi + u = ctx.expjpi(2*w) + v = ctx.j*ctx.pi/12 - ctx.j*ctx.pi*w**2/2 + w*ctx.ln(1-u) - \ + ctx.j*ctx.polylog(2, u)/pi2 + v = ctx.barnesg(2-z)*ctx.exp(v)/pi2**w + if ctx._is_real_type(z): + v = ctx._re(v) + return v + # Estimate terms for asymptotic expansion + # TODO: fixme, obviously + N = ctx.dps // 2 + 5 + G = 1 + while abs(z) < N or ctx.re(z) < 1: + G /= ctx.gamma(z) + z += 1 + z -= 1 + s = ctx.mpf(1)/12 + s -= ctx.log(ctx.glaisher) + s += z*ctx.log(2*ctx.pi)/2 + s += (z**2/2-ctx.mpf(1)/12)*ctx.log(z) + s -= 3*z**2/4 + z2k = z2 = z**2 + for k in xrange(1, N+1): + t = ctx.bernoulli(2*k+2) / (4*k*(k+1)*z2k) + if abs(t) < ctx.eps: + #print k, N # check how many terms were needed + break + z2k *= z2 + s += t + #if k == N: + # print "warning: series for barnesg failed to converge", ctx.dps + return G*ctx.exp(s) + +@defun +def superfac(ctx, z): + return ctx.barnesg(z+2) + +@defun_wrapped +def hyperfac(ctx, z): + # XXX: estimate needed extra bits accurately + if z == ctx.inf: + return z + if abs(z) > 5: + extra = 4*int(ctx.log(abs(z),2)) + else: + extra = 0 + ctx.prec += extra + if not ctx._im(z) and ctx._re(z) < 0 and ctx.isint(ctx._re(z)): + n = int(ctx.re(z)) + h = ctx.hyperfac(-n-1) + if ((n+1)//2) & 1: + h = -h + if ctx._is_complex_type(z): + return h + 0j + return h + zp1 = z+1 + # Wrong branch cut + #v = ctx.gamma(zp1)**z + #ctx.prec -= extra + #return v / ctx.barnesg(zp1) + v = ctx.exp(z*ctx.loggamma(zp1)) + ctx.prec -= extra + return v / ctx.barnesg(zp1) + +''' +@defun +def psi0(ctx, z): + """Shortcut for psi(0,z) (the digamma function)""" + return ctx.psi(0, z) + +@defun +def psi1(ctx, z): + """Shortcut for psi(1,z) (the trigamma function)""" + return ctx.psi(1, z) + +@defun +def psi2(ctx, z): + """Shortcut for psi(2,z) (the tetragamma function)""" + return ctx.psi(2, z) + +@defun +def psi3(ctx, z): + """Shortcut for psi(3,z) (the pentagamma function)""" + return ctx.psi(3, z) +''' diff --git a/MLPY/Lib/site-packages/mpmath/functions/functions.py b/MLPY/Lib/site-packages/mpmath/functions/functions.py new file mode 100644 index 0000000000000000000000000000000000000000..4cdf5dd921418db10847ea75b32f8e6dfacdba64 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/functions/functions.py @@ -0,0 +1,645 @@ +from ..libmp.backend import xrange + +class SpecialFunctions(object): + """ + This class implements special functions using high-level code. + + Elementary and some other functions (e.g. gamma function, basecase + hypergeometric series) are assumed to be predefined by the context as + "builtins" or "low-level" functions. + """ + defined_functions = {} + + # The series for the Jacobi theta functions converge for |q| < 1; + # in the current implementation they throw a ValueError for + # abs(q) > THETA_Q_LIM + THETA_Q_LIM = 1 - 10**-7 + + def __init__(self): + cls = self.__class__ + for name in cls.defined_functions: + f, wrap = cls.defined_functions[name] + cls._wrap_specfun(name, f, wrap) + + self.mpq_1 = self._mpq((1,1)) + self.mpq_0 = self._mpq((0,1)) + self.mpq_1_2 = self._mpq((1,2)) + self.mpq_3_2 = self._mpq((3,2)) + self.mpq_1_4 = self._mpq((1,4)) + self.mpq_1_16 = self._mpq((1,16)) + self.mpq_3_16 = self._mpq((3,16)) + self.mpq_5_2 = self._mpq((5,2)) + self.mpq_3_4 = self._mpq((3,4)) + self.mpq_7_4 = self._mpq((7,4)) + self.mpq_5_4 = self._mpq((5,4)) + self.mpq_1_3 = self._mpq((1,3)) + self.mpq_2_3 = self._mpq((2,3)) + self.mpq_4_3 = self._mpq((4,3)) + self.mpq_1_6 = self._mpq((1,6)) + self.mpq_5_6 = self._mpq((5,6)) + self.mpq_5_3 = self._mpq((5,3)) + + self._misc_const_cache = {} + + self._aliases.update({ + 'phase' : 'arg', + 'conjugate' : 'conj', + 'nthroot' : 'root', + 'polygamma' : 'psi', + 'hurwitz' : 'zeta', + #'digamma' : 'psi0', + #'trigamma' : 'psi1', + #'tetragamma' : 'psi2', + #'pentagamma' : 'psi3', + 'fibonacci' : 'fib', + 'factorial' : 'fac', + }) + + self.zetazero_memoized = self.memoize(self.zetazero) + + # Default -- do nothing + @classmethod + def _wrap_specfun(cls, name, f, wrap): + setattr(cls, name, f) + + # Optional fast versions of common functions in common cases. + # If not overridden, default (generic hypergeometric series) + # implementations will be used + def _besselj(ctx, n, z): raise NotImplementedError + def _erf(ctx, z): raise NotImplementedError + def _erfc(ctx, z): raise NotImplementedError + def _gamma_upper_int(ctx, z, a): raise NotImplementedError + def _expint_int(ctx, n, z): raise NotImplementedError + def _zeta(ctx, s): raise NotImplementedError + def _zetasum_fast(ctx, s, a, n, derivatives, reflect): raise NotImplementedError + def _ei(ctx, z): raise NotImplementedError + def _e1(ctx, z): raise NotImplementedError + def _ci(ctx, z): raise NotImplementedError + def _si(ctx, z): raise NotImplementedError + def _altzeta(ctx, s): raise NotImplementedError + +def defun_wrapped(f): + SpecialFunctions.defined_functions[f.__name__] = f, True + return f + +def defun(f): + SpecialFunctions.defined_functions[f.__name__] = f, False + return f + +def defun_static(f): + setattr(SpecialFunctions, f.__name__, f) + return f + +@defun_wrapped +def cot(ctx, z): return ctx.one / ctx.tan(z) + +@defun_wrapped +def sec(ctx, z): return ctx.one / ctx.cos(z) + +@defun_wrapped +def csc(ctx, z): return ctx.one / ctx.sin(z) + +@defun_wrapped +def coth(ctx, z): return ctx.one / ctx.tanh(z) + +@defun_wrapped +def sech(ctx, z): return ctx.one / ctx.cosh(z) + +@defun_wrapped +def csch(ctx, z): return ctx.one / ctx.sinh(z) + +@defun_wrapped +def acot(ctx, z): + if not z: + return ctx.pi * 0.5 + else: + return ctx.atan(ctx.one / z) + +@defun_wrapped +def asec(ctx, z): return ctx.acos(ctx.one / z) + +@defun_wrapped +def acsc(ctx, z): return ctx.asin(ctx.one / z) + +@defun_wrapped +def acoth(ctx, z): + if not z: + return ctx.pi * 0.5j + else: + return ctx.atanh(ctx.one / z) + + +@defun_wrapped +def asech(ctx, z): return ctx.acosh(ctx.one / z) + +@defun_wrapped +def acsch(ctx, z): return ctx.asinh(ctx.one / z) + +@defun +def sign(ctx, x): + x = ctx.convert(x) + if not x or ctx.isnan(x): + return x + if ctx._is_real_type(x): + if x > 0: + return ctx.one + else: + return -ctx.one + return x / abs(x) + +@defun +def agm(ctx, a, b=1): + if b == 1: + return ctx.agm1(a) + a = ctx.convert(a) + b = ctx.convert(b) + return ctx._agm(a, b) + +@defun_wrapped +def sinc(ctx, x): + if ctx.isinf(x): + return 1/x + if not x: + return x+1 + return ctx.sin(x)/x + +@defun_wrapped +def sincpi(ctx, x): + if ctx.isinf(x): + return 1/x + if not x: + return x+1 + return ctx.sinpi(x)/(ctx.pi*x) + +# TODO: tests; improve implementation +@defun_wrapped +def expm1(ctx, x): + if not x: + return ctx.zero + # exp(x) - 1 ~ x + if ctx.mag(x) < -ctx.prec: + return x + 0.5*x**2 + # TODO: accurately eval the smaller of the real/imag parts + return ctx.sum_accurately(lambda: iter([ctx.exp(x),-1]),1) + +@defun_wrapped +def log1p(ctx, x): + if not x: + return ctx.zero + if ctx.mag(x) < -ctx.prec: + return x - 0.5*x**2 + return ctx.log(ctx.fadd(1, x, prec=2*ctx.prec)) + +@defun_wrapped +def powm1(ctx, x, y): + mag = ctx.mag + one = ctx.one + w = x**y - one + M = mag(w) + # Only moderate cancellation + if M > -8: + return w + # Check for the only possible exact cases + if not w: + if (not y) or (x in (1, -1, 1j, -1j) and ctx.isint(y)): + return w + x1 = x - one + magy = mag(y) + lnx = ctx.ln(x) + # Small y: x^y - 1 ~ log(x)*y + O(log(x)^2 * y^2) + if magy + mag(lnx) < -ctx.prec: + return lnx*y + (lnx*y)**2/2 + # TODO: accurately eval the smaller of the real/imag part + return ctx.sum_accurately(lambda: iter([x**y, -1]), 1) + +@defun +def _rootof1(ctx, k, n): + k = int(k) + n = int(n) + k %= n + if not k: + return ctx.one + elif 2*k == n: + return -ctx.one + elif 4*k == n: + return ctx.j + elif 4*k == 3*n: + return -ctx.j + return ctx.expjpi(2*ctx.mpf(k)/n) + +@defun +def root(ctx, x, n, k=0): + n = int(n) + x = ctx.convert(x) + if k: + # Special case: there is an exact real root + if (n & 1 and 2*k == n-1) and (not ctx.im(x)) and (ctx.re(x) < 0): + return -ctx.root(-x, n) + # Multiply by root of unity + prec = ctx.prec + try: + ctx.prec += 10 + v = ctx.root(x, n, 0) * ctx._rootof1(k, n) + finally: + ctx.prec = prec + return +v + return ctx._nthroot(x, n) + +@defun +def unitroots(ctx, n, primitive=False): + gcd = ctx._gcd + prec = ctx.prec + try: + ctx.prec += 10 + if primitive: + v = [ctx._rootof1(k,n) for k in range(n) if gcd(k,n) == 1] + else: + # TODO: this can be done *much* faster + v = [ctx._rootof1(k,n) for k in range(n)] + finally: + ctx.prec = prec + return [+x for x in v] + +@defun +def arg(ctx, x): + x = ctx.convert(x) + re = ctx._re(x) + im = ctx._im(x) + return ctx.atan2(im, re) + +@defun +def fabs(ctx, x): + return abs(ctx.convert(x)) + +@defun +def re(ctx, x): + x = ctx.convert(x) + if hasattr(x, "real"): # py2.5 doesn't have .real/.imag for all numbers + return x.real + return x + +@defun +def im(ctx, x): + x = ctx.convert(x) + if hasattr(x, "imag"): # py2.5 doesn't have .real/.imag for all numbers + return x.imag + return ctx.zero + +@defun +def conj(ctx, x): + x = ctx.convert(x) + try: + return x.conjugate() + except AttributeError: + return x + +@defun +def polar(ctx, z): + return (ctx.fabs(z), ctx.arg(z)) + +@defun_wrapped +def rect(ctx, r, phi): + return r * ctx.mpc(*ctx.cos_sin(phi)) + +@defun +def log(ctx, x, b=None): + if b is None: + return ctx.ln(x) + wp = ctx.prec + 20 + return ctx.ln(x, prec=wp) / ctx.ln(b, prec=wp) + +@defun +def log10(ctx, x): + return ctx.log(x, 10) + +@defun +def fmod(ctx, x, y): + return ctx.convert(x) % ctx.convert(y) + +@defun +def degrees(ctx, x): + return x / ctx.degree + +@defun +def radians(ctx, x): + return x * ctx.degree + +def _lambertw_special(ctx, z, k): + # W(0,0) = 0; all other branches are singular + if not z: + if not k: + return z + return ctx.ninf + z + if z == ctx.inf: + if k == 0: + return z + else: + return z + 2*k*ctx.pi*ctx.j + if z == ctx.ninf: + return (-z) + (2*k+1)*ctx.pi*ctx.j + # Some kind of nan or complex inf/nan? + return ctx.ln(z) + +import math +import cmath + +def _lambertw_approx_hybrid(z, k): + imag_sign = 0 + if hasattr(z, "imag"): + x = float(z.real) + y = z.imag + if y: + imag_sign = (-1) ** (y < 0) + y = float(y) + else: + x = float(z) + y = 0.0 + imag_sign = 0 + # hack to work regardless of whether Python supports -0.0 + if not y: + y = 0.0 + z = complex(x,y) + if k == 0: + if -4.0 < y < 4.0 and -1.0 < x < 2.5: + if imag_sign: + # Taylor series in upper/lower half-plane + if y > 1.00: return (0.876+0.645j) + (0.118-0.174j)*(z-(0.75+2.5j)) + if y > 0.25: return (0.505+0.204j) + (0.375-0.132j)*(z-(0.75+0.5j)) + if y < -1.00: return (0.876-0.645j) + (0.118+0.174j)*(z-(0.75-2.5j)) + if y < -0.25: return (0.505-0.204j) + (0.375+0.132j)*(z-(0.75-0.5j)) + # Taylor series near -1 + if x < -0.5: + if imag_sign >= 0: + return (-0.318+1.34j) + (-0.697-0.593j)*(z+1) + else: + return (-0.318-1.34j) + (-0.697+0.593j)*(z+1) + # return real type + r = -0.367879441171442 + if (not imag_sign) and x > r: + z = x + # Singularity near -1/e + if x < -0.2: + return -1 + 2.33164398159712*(z-r)**0.5 - 1.81218788563936*(z-r) + # Taylor series near 0 + if x < 0.5: return z + # Simple linear approximation + return 0.2 + 0.3*z + if (not imag_sign) and x > 0.0: + L1 = math.log(x); L2 = math.log(L1) + else: + L1 = cmath.log(z); L2 = cmath.log(L1) + elif k == -1: + # return real type + r = -0.367879441171442 + if (not imag_sign) and r < x < 0.0: + z = x + if (imag_sign >= 0) and y < 0.1 and -0.6 < x < -0.2: + return -1 - 2.33164398159712*(z-r)**0.5 - 1.81218788563936*(z-r) + if (not imag_sign) and -0.2 <= x < 0.0: + L1 = math.log(-x) + return L1 - math.log(-L1) + else: + if imag_sign == -1 and (not y) and x < 0.0: + L1 = cmath.log(z) - 3.1415926535897932j + else: + L1 = cmath.log(z) - 6.2831853071795865j + L2 = cmath.log(L1) + return L1 - L2 + L2/L1 + L2*(L2-2)/(2*L1**2) + +def _lambertw_series(ctx, z, k, tol): + """ + Return rough approximation for W_k(z) from an asymptotic series, + sufficiently accurate for the Halley iteration to converge to + the correct value. + """ + magz = ctx.mag(z) + if (-10 < magz < 900) and (-1000 < k < 1000): + # Near the branch point at -1/e + if magz < 1 and abs(z+0.36787944117144) < 0.05: + if k == 0 or (k == -1 and ctx._im(z) >= 0) or \ + (k == 1 and ctx._im(z) < 0): + delta = ctx.sum_accurately(lambda: [z, ctx.exp(-1)]) + cancellation = -ctx.mag(delta) + ctx.prec += cancellation + # Use series given in Corless et al. + p = ctx.sqrt(2*(ctx.e*z+1)) + ctx.prec -= cancellation + u = {0:ctx.mpf(-1), 1:ctx.mpf(1)} + a = {0:ctx.mpf(2), 1:ctx.mpf(-1)} + if k != 0: + p = -p + s = ctx.zero + # The series converges, so we could use it directly, but unless + # *extremely* close, it is better to just use the first few + # terms to get a good approximation for the iteration + for l in xrange(max(2,cancellation)): + if l not in u: + a[l] = ctx.fsum(u[j]*u[l+1-j] for j in xrange(2,l)) + u[l] = (l-1)*(u[l-2]/2+a[l-2]/4)/(l+1)-a[l]/2-u[l-1]/(l+1) + term = u[l] * p**l + s += term + if ctx.mag(term) < -tol: + return s, True + l += 1 + ctx.prec += cancellation//2 + return s, False + if k == 0 or k == -1: + return _lambertw_approx_hybrid(z, k), False + if k == 0: + if magz < -1: + return z*(1-z), False + L1 = ctx.ln(z) + L2 = ctx.ln(L1) + elif k == -1 and (not ctx._im(z)) and (-0.36787944117144 < ctx._re(z) < 0): + L1 = ctx.ln(-z) + return L1 - ctx.ln(-L1), False + else: + # This holds both as z -> 0 and z -> inf. + # Relative error is O(1/log(z)). + L1 = ctx.ln(z) + 2j*ctx.pi*k + L2 = ctx.ln(L1) + return L1 - L2 + L2/L1 + L2*(L2-2)/(2*L1**2), False + +@defun +def lambertw(ctx, z, k=0): + z = ctx.convert(z) + k = int(k) + if not ctx.isnormal(z): + return _lambertw_special(ctx, z, k) + prec = ctx.prec + ctx.prec += 20 + ctx.mag(k or 1) + wp = ctx.prec + tol = wp - 5 + w, done = _lambertw_series(ctx, z, k, tol) + if not done: + # Use Halley iteration to solve w*exp(w) = z + two = ctx.mpf(2) + for i in xrange(100): + ew = ctx.exp(w) + wew = w*ew + wewz = wew-z + wn = w - wewz/(wew+ew-(w+two)*wewz/(two*w+two)) + if ctx.mag(wn-w) <= ctx.mag(wn) - tol: + w = wn + break + else: + w = wn + if i == 100: + ctx.warn("Lambert W iteration failed to converge for z = %s" % z) + ctx.prec = prec + return +w + +@defun_wrapped +def bell(ctx, n, x=1): + x = ctx.convert(x) + if not n: + if ctx.isnan(x): + return x + return type(x)(1) + if ctx.isinf(x) or ctx.isinf(n) or ctx.isnan(x) or ctx.isnan(n): + return x**n + if n == 1: return x + if n == 2: return x*(x+1) + if x == 0: return ctx.sincpi(n) + return _polyexp(ctx, n, x, True) / ctx.exp(x) + +def _polyexp(ctx, n, x, extra=False): + def _terms(): + if extra: + yield ctx.sincpi(n) + t = x + k = 1 + while 1: + yield k**n * t + k += 1 + t = t*x/k + return ctx.sum_accurately(_terms, check_step=4) + +@defun_wrapped +def polyexp(ctx, s, z): + if ctx.isinf(z) or ctx.isinf(s) or ctx.isnan(z) or ctx.isnan(s): + return z**s + if z == 0: return z*s + if s == 0: return ctx.expm1(z) + if s == 1: return ctx.exp(z)*z + if s == 2: return ctx.exp(z)*z*(z+1) + return _polyexp(ctx, s, z) + +@defun_wrapped +def cyclotomic(ctx, n, z): + n = int(n) + if n < 0: + raise ValueError("n cannot be negative") + p = ctx.one + if n == 0: + return p + if n == 1: + return z - p + if n == 2: + return z + p + # Use divisor product representation. Unfortunately, this sometimes + # includes singularities for roots of unity, which we have to cancel out. + # Matching zeros/poles pairwise, we have (1-z^a)/(1-z^b) ~ a/b + O(z-1). + a_prod = 1 + b_prod = 1 + num_zeros = 0 + num_poles = 0 + for d in range(1,n+1): + if not n % d: + w = ctx.moebius(n//d) + # Use powm1 because it is important that we get 0 only + # if it really is exactly 0 + b = -ctx.powm1(z, d) + if b: + p *= b**w + else: + if w == 1: + a_prod *= d + num_zeros += 1 + elif w == -1: + b_prod *= d + num_poles += 1 + #print n, num_zeros, num_poles + if num_zeros: + if num_zeros > num_poles: + p *= 0 + else: + p *= a_prod + p /= b_prod + return p + +@defun +def mangoldt(ctx, n): + r""" + Evaluates the von Mangoldt function `\Lambda(n) = \log p` + if `n = p^k` a power of a prime, and `\Lambda(n) = 0` otherwise. + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> [mangoldt(n) for n in range(-2,3)] + [0.0, 0.0, 0.0, 0.0, 0.6931471805599453094172321] + >>> mangoldt(6) + 0.0 + >>> mangoldt(7) + 1.945910149055313305105353 + >>> mangoldt(8) + 0.6931471805599453094172321 + >>> fsum(mangoldt(n) for n in range(101)) + 94.04531122935739224600493 + >>> fsum(mangoldt(n) for n in range(10001)) + 10013.39669326311478372032 + + """ + n = int(n) + if n < 2: + return ctx.zero + if n % 2 == 0: + # Must be a power of two + if n & (n-1) == 0: + return +ctx.ln2 + else: + return ctx.zero + # TODO: the following could be generalized into a perfect + # power testing function + # --- + # Look for a small factor + for p in (3,5,7,11,13,17,19,23,29,31): + if not n % p: + q, r = n // p, 0 + while q > 1: + q, r = divmod(q, p) + if r: + return ctx.zero + return ctx.ln(p) + if ctx.isprime(n): + return ctx.ln(n) + # Obviously, we could use arbitrary-precision arithmetic for this... + if n > 10**30: + raise NotImplementedError + k = 2 + while 1: + p = int(n**(1./k) + 0.5) + if p < 2: + return ctx.zero + if p ** k == n: + if ctx.isprime(p): + return ctx.ln(p) + k += 1 + +@defun +def stirling1(ctx, n, k, exact=False): + v = ctx._stirling1(int(n), int(k)) + if exact: + return int(v) + else: + return ctx.mpf(v) + +@defun +def stirling2(ctx, n, k, exact=False): + v = ctx._stirling2(int(n), int(k)) + if exact: + return int(v) + else: + return ctx.mpf(v) diff --git a/MLPY/Lib/site-packages/mpmath/functions/hypergeometric.py b/MLPY/Lib/site-packages/mpmath/functions/hypergeometric.py new file mode 100644 index 0000000000000000000000000000000000000000..ddb50cbf3ea6daa5982678d3c26157a67a7d7945 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/functions/hypergeometric.py @@ -0,0 +1,1413 @@ +from ..libmp.backend import xrange +from .functions import defun, defun_wrapped + +def _check_need_perturb(ctx, terms, prec, discard_known_zeros): + perturb = recompute = False + extraprec = 0 + discard = [] + for term_index, term in enumerate(terms): + w_s, c_s, alpha_s, beta_s, a_s, b_s, z = term + have_singular_nongamma_weight = False + # Avoid division by zero in leading factors (TODO: + # also check for near division by zero?) + for k, w in enumerate(w_s): + if not w: + if ctx.re(c_s[k]) <= 0 and c_s[k]: + perturb = recompute = True + have_singular_nongamma_weight = True + pole_count = [0, 0, 0] + # Check for gamma and series poles and near-poles + for data_index, data in enumerate([alpha_s, beta_s, b_s]): + for i, x in enumerate(data): + n, d = ctx.nint_distance(x) + # Poles + if n > 0: + continue + if d == ctx.ninf: + # OK if we have a polynomial + # ------------------------------ + ok = False + if data_index == 2: + for u in a_s: + if ctx.isnpint(u) and u >= int(n): + ok = True + break + if ok: + continue + pole_count[data_index] += 1 + # ------------------------------ + #perturb = recompute = True + #return perturb, recompute, extraprec + elif d < -4: + extraprec += -d + recompute = True + if discard_known_zeros and pole_count[1] > pole_count[0] + pole_count[2] \ + and not have_singular_nongamma_weight: + discard.append(term_index) + elif sum(pole_count): + perturb = recompute = True + return perturb, recompute, extraprec, discard + +_hypercomb_msg = """ +hypercomb() failed to converge to the requested %i bits of accuracy +using a working precision of %i bits. The function value may be zero or +infinite; try passing zeroprec=N or infprec=M to bound finite values between +2^(-N) and 2^M. Otherwise try a higher maxprec or maxterms. +""" + +@defun +def hypercomb(ctx, function, params=[], discard_known_zeros=True, **kwargs): + orig = ctx.prec + sumvalue = ctx.zero + dist = ctx.nint_distance + ninf = ctx.ninf + orig_params = params[:] + verbose = kwargs.get('verbose', False) + maxprec = kwargs.get('maxprec', ctx._default_hyper_maxprec(orig)) + kwargs['maxprec'] = maxprec # For calls to hypsum + zeroprec = kwargs.get('zeroprec') + infprec = kwargs.get('infprec') + perturbed_reference_value = None + hextra = 0 + try: + while 1: + ctx.prec += 10 + if ctx.prec > maxprec: + raise ValueError(_hypercomb_msg % (orig, ctx.prec)) + orig2 = ctx.prec + params = orig_params[:] + terms = function(*params) + if verbose: + print() + print("ENTERING hypercomb main loop") + print("prec =", ctx.prec) + print("hextra", hextra) + perturb, recompute, extraprec, discard = \ + _check_need_perturb(ctx, terms, orig, discard_known_zeros) + ctx.prec += extraprec + if perturb: + if "hmag" in kwargs: + hmag = kwargs["hmag"] + elif ctx._fixed_precision: + hmag = int(ctx.prec*0.3) + else: + hmag = orig + 10 + hextra + h = ctx.ldexp(ctx.one, -hmag) + ctx.prec = orig2 + 10 + hmag + 10 + for k in range(len(params)): + params[k] += h + # Heuristically ensure that the perturbations + # are "independent" so that two perturbations + # don't accidentally cancel each other out + # in a subtraction. + h += h/(k+1) + if recompute: + terms = function(*params) + if discard_known_zeros: + terms = [term for (i, term) in enumerate(terms) if i not in discard] + if not terms: + return ctx.zero + evaluated_terms = [] + for term_index, term_data in enumerate(terms): + w_s, c_s, alpha_s, beta_s, a_s, b_s, z = term_data + if verbose: + print() + print(" Evaluating term %i/%i : %iF%i" % \ + (term_index+1, len(terms), len(a_s), len(b_s))) + print(" powers", ctx.nstr(w_s), ctx.nstr(c_s)) + print(" gamma", ctx.nstr(alpha_s), ctx.nstr(beta_s)) + print(" hyper", ctx.nstr(a_s), ctx.nstr(b_s)) + print(" z", ctx.nstr(z)) + #v = ctx.hyper(a_s, b_s, z, **kwargs) + #for a in alpha_s: v *= ctx.gamma(a) + #for b in beta_s: v *= ctx.rgamma(b) + #for w, c in zip(w_s, c_s): v *= ctx.power(w, c) + v = ctx.fprod([ctx.hyper(a_s, b_s, z, **kwargs)] + \ + [ctx.gamma(a) for a in alpha_s] + \ + [ctx.rgamma(b) for b in beta_s] + \ + [ctx.power(w,c) for (w,c) in zip(w_s,c_s)]) + if verbose: + print(" Value:", v) + evaluated_terms.append(v) + + if len(terms) == 1 and (not perturb): + sumvalue = evaluated_terms[0] + break + + if ctx._fixed_precision: + sumvalue = ctx.fsum(evaluated_terms) + break + + sumvalue = ctx.fsum(evaluated_terms) + term_magnitudes = [ctx.mag(x) for x in evaluated_terms] + max_magnitude = max(term_magnitudes) + sum_magnitude = ctx.mag(sumvalue) + cancellation = max_magnitude - sum_magnitude + if verbose: + print() + print(" Cancellation:", cancellation, "bits") + print(" Increased precision:", ctx.prec - orig, "bits") + + precision_ok = cancellation < ctx.prec - orig + + if zeroprec is None: + zero_ok = False + else: + zero_ok = max_magnitude - ctx.prec < -zeroprec + if infprec is None: + inf_ok = False + else: + inf_ok = max_magnitude > infprec + + if precision_ok and (not perturb) or ctx.isnan(cancellation): + break + elif precision_ok: + if perturbed_reference_value is None: + hextra += 20 + perturbed_reference_value = sumvalue + continue + elif ctx.mag(sumvalue - perturbed_reference_value) <= \ + ctx.mag(sumvalue) - orig: + break + elif zero_ok: + sumvalue = ctx.zero + break + elif inf_ok: + sumvalue = ctx.inf + break + elif 'hmag' in kwargs: + break + else: + hextra *= 2 + perturbed_reference_value = sumvalue + # Increase precision + else: + increment = min(max(cancellation, orig//2), max(extraprec,orig)) + ctx.prec += increment + if verbose: + print(" Must start over with increased precision") + continue + finally: + ctx.prec = orig + return +sumvalue + +@defun +def hyper(ctx, a_s, b_s, z, **kwargs): + """ + Hypergeometric function, general case. + """ + z = ctx.convert(z) + p = len(a_s) + q = len(b_s) + a_s = [ctx._convert_param(a) for a in a_s] + b_s = [ctx._convert_param(b) for b in b_s] + # Reduce degree by eliminating common parameters + if kwargs.get('eliminate', True): + elim_nonpositive = kwargs.get('eliminate_all', False) + i = 0 + while i < q and a_s: + b = b_s[i] + if b in a_s and (elim_nonpositive or not ctx.isnpint(b[0])): + a_s.remove(b) + b_s.remove(b) + p -= 1 + q -= 1 + else: + i += 1 + # Handle special cases + if p == 0: + if q == 1: return ctx._hyp0f1(b_s, z, **kwargs) + elif q == 0: return ctx.exp(z) + elif p == 1: + if q == 1: return ctx._hyp1f1(a_s, b_s, z, **kwargs) + elif q == 2: return ctx._hyp1f2(a_s, b_s, z, **kwargs) + elif q == 0: return ctx._hyp1f0(a_s[0][0], z) + elif p == 2: + if q == 1: return ctx._hyp2f1(a_s, b_s, z, **kwargs) + elif q == 2: return ctx._hyp2f2(a_s, b_s, z, **kwargs) + elif q == 3: return ctx._hyp2f3(a_s, b_s, z, **kwargs) + elif q == 0: return ctx._hyp2f0(a_s, b_s, z, **kwargs) + elif p == q+1: + return ctx._hypq1fq(p, q, a_s, b_s, z, **kwargs) + elif p > q+1 and not kwargs.get('force_series'): + return ctx._hyp_borel(p, q, a_s, b_s, z, **kwargs) + coeffs, types = zip(*(a_s+b_s)) + return ctx.hypsum(p, q, types, coeffs, z, **kwargs) + +@defun +def hyp0f1(ctx,b,z,**kwargs): + return ctx.hyper([],[b],z,**kwargs) + +@defun +def hyp1f1(ctx,a,b,z,**kwargs): + return ctx.hyper([a],[b],z,**kwargs) + +@defun +def hyp1f2(ctx,a1,b1,b2,z,**kwargs): + return ctx.hyper([a1],[b1,b2],z,**kwargs) + +@defun +def hyp2f1(ctx,a,b,c,z,**kwargs): + return ctx.hyper([a,b],[c],z,**kwargs) + +@defun +def hyp2f2(ctx,a1,a2,b1,b2,z,**kwargs): + return ctx.hyper([a1,a2],[b1,b2],z,**kwargs) + +@defun +def hyp2f3(ctx,a1,a2,b1,b2,b3,z,**kwargs): + return ctx.hyper([a1,a2],[b1,b2,b3],z,**kwargs) + +@defun +def hyp2f0(ctx,a,b,z,**kwargs): + return ctx.hyper([a,b],[],z,**kwargs) + +@defun +def hyp3f2(ctx,a1,a2,a3,b1,b2,z,**kwargs): + return ctx.hyper([a1,a2,a3],[b1,b2],z,**kwargs) + +@defun_wrapped +def _hyp1f0(ctx, a, z): + return (1-z) ** (-a) + +@defun +def _hyp0f1(ctx, b_s, z, **kwargs): + (b, btype), = b_s + if z: + magz = ctx.mag(z) + else: + magz = 0 + if magz >= 8 and not kwargs.get('force_series'): + try: + # http://functions.wolfram.com/HypergeometricFunctions/ + # Hypergeometric0F1/06/02/03/0004/ + # TODO: handle the all-real case more efficiently! + # TODO: figure out how much precision is needed (exponential growth) + orig = ctx.prec + try: + ctx.prec += 12 + magz//2 + def h(): + w = ctx.sqrt(-z) + jw = ctx.j*w + u = 1/(4*jw) + c = ctx.mpq_1_2 - b + E = ctx.exp(2*jw) + T1 = ([-jw,E], [c,-1], [], [], [b-ctx.mpq_1_2, ctx.mpq_3_2-b], [], -u) + T2 = ([jw,E], [c,1], [], [], [b-ctx.mpq_1_2, ctx.mpq_3_2-b], [], u) + return T1, T2 + v = ctx.hypercomb(h, [], force_series=True) + v = ctx.gamma(b)/(2*ctx.sqrt(ctx.pi))*v + finally: + ctx.prec = orig + if ctx._is_real_type(b) and ctx._is_real_type(z): + v = ctx._re(v) + return +v + except ctx.NoConvergence: + pass + return ctx.hypsum(0, 1, (btype,), [b], z, **kwargs) + +@defun +def _hyp1f1(ctx, a_s, b_s, z, **kwargs): + (a, atype), = a_s + (b, btype), = b_s + if not z: + return ctx.one+z + magz = ctx.mag(z) + if magz >= 7 and not (ctx.isint(a) and ctx.re(a) <= 0): + if ctx.isinf(z): + if ctx.sign(a) == ctx.sign(b) == ctx.sign(z) == 1: + return ctx.inf + return ctx.nan * z + try: + try: + ctx.prec += magz + sector = ctx._im(z) < 0 + def h(a,b): + if sector: + E = ctx.expjpi(ctx.fneg(a, exact=True)) + else: + E = ctx.expjpi(a) + rz = 1/z + T1 = ([E,z], [1,-a], [b], [b-a], [a, 1+a-b], [], -rz) + T2 = ([ctx.exp(z),z], [1,a-b], [b], [a], [b-a, 1-a], [], rz) + return T1, T2 + v = ctx.hypercomb(h, [a,b], force_series=True) + if ctx._is_real_type(a) and ctx._is_real_type(b) and ctx._is_real_type(z): + v = ctx._re(v) + return +v + except ctx.NoConvergence: + pass + finally: + ctx.prec -= magz + v = ctx.hypsum(1, 1, (atype, btype), [a, b], z, **kwargs) + return v + +def _hyp2f1_gosper(ctx,a,b,c,z,**kwargs): + # Use Gosper's recurrence + # See http://www.math.utexas.edu/pipermail/maxima/2006/000126.html + _a,_b,_c,_z = a, b, c, z + orig = ctx.prec + maxprec = kwargs.get('maxprec', 100*orig) + extra = 10 + while 1: + ctx.prec = orig + extra + #a = ctx.convert(_a) + #b = ctx.convert(_b) + #c = ctx.convert(_c) + z = ctx.convert(_z) + d = ctx.mpf(0) + e = ctx.mpf(1) + f = ctx.mpf(0) + k = 0 + # Common subexpression elimination, unfortunately making + # things a bit unreadable. The formula is quite messy to begin + # with, though... + abz = a*b*z + ch = c * ctx.mpq_1_2 + c1h = (c+1) * ctx.mpq_1_2 + nz = 1-z + g = z/nz + abg = a*b*g + cba = c-b-a + z2 = z-2 + tol = -ctx.prec - 10 + nstr = ctx.nstr + nprint = ctx.nprint + mag = ctx.mag + maxmag = ctx.ninf + while 1: + kch = k+ch + kakbz = (k+a)*(k+b)*z / (4*(k+1)*kch*(k+c1h)) + d1 = kakbz*(e-(k+cba)*d*g) + e1 = kakbz*(d*abg+(k+c)*e) + ft = d*(k*(cba*z+k*z2-c)-abz)/(2*kch*nz) + f1 = f + e - ft + maxmag = max(maxmag, mag(f1)) + if mag(f1-f) < tol: + break + d, e, f = d1, e1, f1 + k += 1 + cancellation = maxmag - mag(f1) + if cancellation < extra: + break + else: + extra += cancellation + if extra > maxprec: + raise ctx.NoConvergence + return f1 + +@defun +def _hyp2f1(ctx, a_s, b_s, z, **kwargs): + (a, atype), (b, btype) = a_s + (c, ctype), = b_s + if z == 1: + # TODO: the following logic can be simplified + convergent = ctx.re(c-a-b) > 0 + finite = (ctx.isint(a) and a <= 0) or (ctx.isint(b) and b <= 0) + zerodiv = ctx.isint(c) and c <= 0 and not \ + ((ctx.isint(a) and c <= a <= 0) or (ctx.isint(b) and c <= b <= 0)) + #print "bz", a, b, c, z, convergent, finite, zerodiv + # Gauss's theorem gives the value if convergent + if (convergent or finite) and not zerodiv: + return ctx.gammaprod([c, c-a-b], [c-a, c-b], _infsign=True) + # Otherwise, there is a pole and we take the + # sign to be that when approaching from below + # XXX: this evaluation is not necessarily correct in all cases + return ctx.hyp2f1(a,b,c,1-ctx.eps*2) * ctx.inf + + # Equal to 1 (first term), unless there is a subsequent + # division by zero + if not z: + # Division by zero but power of z is higher than + # first order so cancels + if c or a == 0 or b == 0: + return 1+z + # Indeterminate + return ctx.nan + + # Hit zero denominator unless numerator goes to 0 first + if ctx.isint(c) and c <= 0: + if (ctx.isint(a) and c <= a <= 0) or \ + (ctx.isint(b) and c <= b <= 0): + pass + else: + # Pole in series + return ctx.inf + + absz = abs(z) + + # Fast case: standard series converges rapidly, + # possibly in finitely many terms + if absz <= 0.8 or (ctx.isint(a) and a <= 0 and a >= -1000) or \ + (ctx.isint(b) and b <= 0 and b >= -1000): + return ctx.hypsum(2, 1, (atype, btype, ctype), [a, b, c], z, **kwargs) + + orig = ctx.prec + try: + ctx.prec += 10 + + # Use 1/z transformation + if absz >= 1.3: + def h(a,b): + t = ctx.mpq_1-c; ab = a-b; rz = 1/z + T1 = ([-z],[-a], [c,-ab],[b,c-a], [a,t+a],[ctx.mpq_1+ab], rz) + T2 = ([-z],[-b], [c,ab],[a,c-b], [b,t+b],[ctx.mpq_1-ab], rz) + return T1, T2 + v = ctx.hypercomb(h, [a,b], **kwargs) + + # Use 1-z transformation + elif abs(1-z) <= 0.75: + def h(a,b): + t = c-a-b; ca = c-a; cb = c-b; rz = 1-z + T1 = [], [], [c,t], [ca,cb], [a,b], [1-t], rz + T2 = [rz], [t], [c,a+b-c], [a,b], [ca,cb], [1+t], rz + return T1, T2 + v = ctx.hypercomb(h, [a,b], **kwargs) + + # Use z/(z-1) transformation + elif abs(z/(z-1)) <= 0.75: + v = ctx.hyp2f1(a, c-b, c, z/(z-1)) / (1-z)**a + + # Remaining part of unit circle + else: + v = _hyp2f1_gosper(ctx,a,b,c,z,**kwargs) + + finally: + ctx.prec = orig + return +v + +@defun +def _hypq1fq(ctx, p, q, a_s, b_s, z, **kwargs): + r""" + Evaluates 3F2, 4F3, 5F4, ... + """ + a_s, a_types = zip(*a_s) + b_s, b_types = zip(*b_s) + a_s = list(a_s) + b_s = list(b_s) + absz = abs(z) + ispoly = False + for a in a_s: + if ctx.isint(a) and a <= 0: + ispoly = True + break + # Direct summation + if absz < 1 or ispoly: + try: + return ctx.hypsum(p, q, a_types+b_types, a_s+b_s, z, **kwargs) + except ctx.NoConvergence: + if absz > 1.1 or ispoly: + raise + # Use expansion at |z-1| -> 0. + # Reference: Wolfgang Buhring, "Generalized Hypergeometric Functions at + # Unit Argument", Proc. Amer. Math. Soc., Vol. 114, No. 1 (Jan. 1992), + # pp.145-153 + # The current implementation has several problems: + # 1. We only implement it for 3F2. The expansion coefficients are + # given by extremely messy nested sums in the higher degree cases + # (see reference). Is efficient sequential generation of the coefficients + # possible in the > 3F2 case? + # 2. Although the series converges, it may do so slowly, so we need + # convergence acceleration. The acceleration implemented by + # nsum does not always help, so results returned are sometimes + # inaccurate! Can we do better? + # 3. We should check conditions for convergence, and possibly + # do a better job of cancelling out gamma poles if possible. + if z == 1: + # XXX: should also check for division by zero in the + # denominator of the series (cf. hyp2f1) + S = ctx.re(sum(b_s)-sum(a_s)) + if S <= 0: + #return ctx.hyper(a_s, b_s, 1-ctx.eps*2, **kwargs) * ctx.inf + return ctx.hyper(a_s, b_s, 0.9, **kwargs) * ctx.inf + if (p,q) == (3,2) and abs(z-1) < 0.05: # and kwargs.get('sum1') + #print "Using alternate summation (experimental)" + a1,a2,a3 = a_s + b1,b2 = b_s + u = b1+b2-a3 + initial = ctx.gammaprod([b2-a3,b1-a3,a1,a2],[b2-a3,b1-a3,1,u]) + def term(k, _cache={0:initial}): + u = b1+b2-a3+k + if k in _cache: + t = _cache[k] + else: + t = _cache[k-1] + t *= (b1+k-a3-1)*(b2+k-a3-1) + t /= k*(u-1) + _cache[k] = t + return t * ctx.hyp2f1(a1,a2,u,z) + try: + S = ctx.nsum(term, [0,ctx.inf], verbose=kwargs.get('verbose'), + strict=kwargs.get('strict', True)) + return S * ctx.gammaprod([b1,b2],[a1,a2,a3]) + except ctx.NoConvergence: + pass + # Try to use convergence acceleration on and close to the unit circle. + # Problem: the convergence acceleration degenerates as |z-1| -> 0, + # except for special cases. Everywhere else, the Shanks transformation + # is very efficient. + if absz < 1.1 and ctx._re(z) <= 1: + + def term(kk, _cache={0:ctx.one}): + k = int(kk) + if k != kk: + t = z ** ctx.mpf(kk) / ctx.fac(kk) + for a in a_s: t *= ctx.rf(a,kk) + for b in b_s: t /= ctx.rf(b,kk) + return t + if k in _cache: + return _cache[k] + t = term(k-1) + m = k-1 + for j in xrange(p): t *= (a_s[j]+m) + for j in xrange(q): t /= (b_s[j]+m) + t *= z + t /= k + _cache[k] = t + return t + + sum_method = kwargs.get('sum_method', 'r+s+e') + + try: + return ctx.nsum(term, [0,ctx.inf], verbose=kwargs.get('verbose'), + strict=kwargs.get('strict', True), + method=sum_method.replace('e','')) + except ctx.NoConvergence: + if 'e' not in sum_method: + raise + pass + + if kwargs.get('verbose'): + print("Attempting Euler-Maclaurin summation") + + + """ + Somewhat slower version (one diffs_exp for each factor). + However, this would be faster with fast direct derivatives + of the gamma function. + + def power_diffs(k0): + r = 0 + l = ctx.log(z) + while 1: + yield z**ctx.mpf(k0) * l**r + r += 1 + + def loggamma_diffs(x, reciprocal=False): + sign = (-1) ** reciprocal + yield sign * ctx.loggamma(x) + i = 0 + while 1: + yield sign * ctx.psi(i,x) + i += 1 + + def hyper_diffs(k0): + b2 = b_s + [1] + A = [ctx.diffs_exp(loggamma_diffs(a+k0)) for a in a_s] + B = [ctx.diffs_exp(loggamma_diffs(b+k0,True)) for b in b2] + Z = [power_diffs(k0)] + C = ctx.gammaprod([b for b in b2], [a for a in a_s]) + for d in ctx.diffs_prod(A + B + Z): + v = C * d + yield v + """ + + def log_diffs(k0): + b2 = b_s + [1] + yield sum(ctx.loggamma(a+k0) for a in a_s) - \ + sum(ctx.loggamma(b+k0) for b in b2) + k0*ctx.log(z) + i = 0 + while 1: + v = sum(ctx.psi(i,a+k0) for a in a_s) - \ + sum(ctx.psi(i,b+k0) for b in b2) + if i == 0: + v += ctx.log(z) + yield v + i += 1 + + def hyper_diffs(k0): + C = ctx.gammaprod([b for b in b_s], [a for a in a_s]) + for d in ctx.diffs_exp(log_diffs(k0)): + v = C * d + yield v + + tol = ctx.eps / 1024 + prec = ctx.prec + try: + trunc = 50 * ctx.dps + ctx.prec += 20 + for i in xrange(5): + head = ctx.fsum(term(k) for k in xrange(trunc)) + tail, err = ctx.sumem(term, [trunc, ctx.inf], tol=tol, + adiffs=hyper_diffs(trunc), + verbose=kwargs.get('verbose'), + error=True, + _fast_abort=True) + if err < tol: + v = head + tail + break + trunc *= 2 + # Need to increase precision because calculation of + # derivatives may be inaccurate + ctx.prec += ctx.prec//2 + if i == 4: + raise ctx.NoConvergence(\ + "Euler-Maclaurin summation did not converge") + finally: + ctx.prec = prec + return +v + + # Use 1/z transformation + # http://functions.wolfram.com/HypergeometricFunctions/ + # HypergeometricPFQ/06/01/05/02/0004/ + def h(*args): + a_s = list(args[:p]) + b_s = list(args[p:]) + Ts = [] + recz = ctx.one/z + negz = ctx.fneg(z, exact=True) + for k in range(q+1): + ak = a_s[k] + C = [negz] + Cp = [-ak] + Gn = b_s + [ak] + [a_s[j]-ak for j in range(q+1) if j != k] + Gd = a_s + [b_s[j]-ak for j in range(q)] + Fn = [ak] + [ak-b_s[j]+1 for j in range(q)] + Fd = [1-a_s[j]+ak for j in range(q+1) if j != k] + Ts.append((C, Cp, Gn, Gd, Fn, Fd, recz)) + return Ts + return ctx.hypercomb(h, a_s+b_s, **kwargs) + +@defun +def _hyp_borel(ctx, p, q, a_s, b_s, z, **kwargs): + if a_s: + a_s, a_types = zip(*a_s) + a_s = list(a_s) + else: + a_s, a_types = [], () + if b_s: + b_s, b_types = zip(*b_s) + b_s = list(b_s) + else: + b_s, b_types = [], () + kwargs['maxterms'] = kwargs.get('maxterms', ctx.prec) + try: + return ctx.hypsum(p, q, a_types+b_types, a_s+b_s, z, **kwargs) + except ctx.NoConvergence: + pass + prec = ctx.prec + try: + tol = kwargs.get('asymp_tol', ctx.eps/4) + ctx.prec += 10 + # hypsum is has a conservative tolerance. So we try again: + def term(k, cache={0:ctx.one}): + if k in cache: + return cache[k] + t = term(k-1) + for a in a_s: t *= (a+(k-1)) + for b in b_s: t /= (b+(k-1)) + t *= z + t /= k + cache[k] = t + return t + s = ctx.one + for k in xrange(1, ctx.prec): + t = term(k) + s += t + if abs(t) <= tol: + return s + finally: + ctx.prec = prec + if p <= q+3: + contour = kwargs.get('contour') + if not contour: + if ctx.arg(z) < 0.25: + u = z / max(1, abs(z)) + if ctx.arg(z) >= 0: + contour = [0, 2j, (2j+2)/u, 2/u, ctx.inf] + else: + contour = [0, -2j, (-2j+2)/u, 2/u, ctx.inf] + #contour = [0, 2j/z, 2/z, ctx.inf] + #contour = [0, 2j, 2/z, ctx.inf] + #contour = [0, 2j, ctx.inf] + else: + contour = [0, ctx.inf] + quad_kwargs = kwargs.get('quad_kwargs', {}) + def g(t): + return ctx.exp(-t)*ctx.hyper(a_s, b_s+[1], t*z) + I, err = ctx.quad(g, contour, error=True, **quad_kwargs) + if err <= abs(I)*ctx.eps*8: + return I + raise ctx.NoConvergence + + +@defun +def _hyp2f2(ctx, a_s, b_s, z, **kwargs): + (a1, a1type), (a2, a2type) = a_s + (b1, b1type), (b2, b2type) = b_s + + absz = abs(z) + magz = ctx.mag(z) + orig = ctx.prec + + # Asymptotic expansion is ~ exp(z) + asymp_extraprec = magz + + # Asymptotic series is in terms of 3F1 + can_use_asymptotic = (not kwargs.get('force_series')) and \ + (ctx.mag(absz) > 3) + + # TODO: much of the following could be shared with 2F3 instead of + # copypasted + if can_use_asymptotic: + #print "using asymp" + try: + try: + ctx.prec += asymp_extraprec + # http://functions.wolfram.com/HypergeometricFunctions/ + # Hypergeometric2F2/06/02/02/0002/ + def h(a1,a2,b1,b2): + X = a1+a2-b1-b2 + A2 = a1+a2 + B2 = b1+b2 + c = {} + c[0] = ctx.one + c[1] = (A2-1)*X+b1*b2-a1*a2 + s1 = 0 + k = 0 + tprev = 0 + while 1: + if k not in c: + uu1 = 1-B2+2*a1+a1**2+2*a2+a2**2-A2*B2+a1*a2+b1*b2+(2*B2-3*(A2+1))*k+2*k**2 + uu2 = (k-A2+b1-1)*(k-A2+b2-1)*(k-X-2) + c[k] = ctx.one/k * (uu1*c[k-1]-uu2*c[k-2]) + t1 = c[k] * z**(-k) + if abs(t1) < 0.1*ctx.eps: + #print "Convergence :)" + break + # Quit if the series doesn't converge quickly enough + if k > 5 and abs(tprev) / abs(t1) < 1.5: + #print "No convergence :(" + raise ctx.NoConvergence + s1 += t1 + tprev = t1 + k += 1 + S = ctx.exp(z)*s1 + T1 = [z,S], [X,1], [b1,b2],[a1,a2],[],[],0 + T2 = [-z],[-a1],[b1,b2,a2-a1],[a2,b1-a1,b2-a1],[a1,a1-b1+1,a1-b2+1],[a1-a2+1],-1/z + T3 = [-z],[-a2],[b1,b2,a1-a2],[a1,b1-a2,b2-a2],[a2,a2-b1+1,a2-b2+1],[-a1+a2+1],-1/z + return T1, T2, T3 + v = ctx.hypercomb(h, [a1,a2,b1,b2], force_series=True, maxterms=4*ctx.prec) + if sum(ctx._is_real_type(u) for u in [a1,a2,b1,b2,z]) == 5: + v = ctx.re(v) + return v + except ctx.NoConvergence: + pass + finally: + ctx.prec = orig + + return ctx.hypsum(2, 2, (a1type, a2type, b1type, b2type), [a1, a2, b1, b2], z, **kwargs) + + + +@defun +def _hyp1f2(ctx, a_s, b_s, z, **kwargs): + (a1, a1type), = a_s + (b1, b1type), (b2, b2type) = b_s + + absz = abs(z) + magz = ctx.mag(z) + orig = ctx.prec + + # Asymptotic expansion is ~ exp(sqrt(z)) + asymp_extraprec = z and magz//2 + + # Asymptotic series is in terms of 3F0 + can_use_asymptotic = (not kwargs.get('force_series')) and \ + (ctx.mag(absz) > 19) and \ + (ctx.sqrt(absz) > 1.5*orig) # and \ + # ctx._hyp_check_convergence([a1, a1-b1+1, a1-b2+1], [], + # 1/absz, orig+40+asymp_extraprec) + + # TODO: much of the following could be shared with 2F3 instead of + # copypasted + if can_use_asymptotic: + #print "using asymp" + try: + try: + ctx.prec += asymp_extraprec + # http://functions.wolfram.com/HypergeometricFunctions/ + # Hypergeometric1F2/06/02/03/ + def h(a1,b1,b2): + X = ctx.mpq_1_2*(a1-b1-b2+ctx.mpq_1_2) + c = {} + c[0] = ctx.one + c[1] = 2*(ctx.mpq_1_4*(3*a1+b1+b2-2)*(a1-b1-b2)+b1*b2-ctx.mpq_3_16) + c[2] = 2*(b1*b2+ctx.mpq_1_4*(a1-b1-b2)*(3*a1+b1+b2-2)-ctx.mpq_3_16)**2+\ + ctx.mpq_1_16*(-16*(2*a1-3)*b1*b2 + \ + 4*(a1-b1-b2)*(-8*a1**2+11*a1+b1+b2-2)-3) + s1 = 0 + s2 = 0 + k = 0 + tprev = 0 + while 1: + if k not in c: + uu1 = (3*k**2+(-6*a1+2*b1+2*b2-4)*k + 3*a1**2 - \ + (b1-b2)**2 - 2*a1*(b1+b2-2) + ctx.mpq_1_4) + uu2 = (k-a1+b1-b2-ctx.mpq_1_2)*(k-a1-b1+b2-ctx.mpq_1_2)*\ + (k-a1+b1+b2-ctx.mpq_5_2) + c[k] = ctx.one/(2*k)*(uu1*c[k-1]-uu2*c[k-2]) + w = c[k] * (-z)**(-0.5*k) + t1 = (-ctx.j)**k * ctx.mpf(2)**(-k) * w + t2 = ctx.j**k * ctx.mpf(2)**(-k) * w + if abs(t1) < 0.1*ctx.eps: + #print "Convergence :)" + break + # Quit if the series doesn't converge quickly enough + if k > 5 and abs(tprev) / abs(t1) < 1.5: + #print "No convergence :(" + raise ctx.NoConvergence + s1 += t1 + s2 += t2 + tprev = t1 + k += 1 + S = ctx.expj(ctx.pi*X+2*ctx.sqrt(-z))*s1 + \ + ctx.expj(-(ctx.pi*X+2*ctx.sqrt(-z)))*s2 + T1 = [0.5*S, ctx.pi, -z], [1, -0.5, X], [b1, b2], [a1],\ + [], [], 0 + T2 = [-z], [-a1], [b1,b2],[b1-a1,b2-a1], \ + [a1,a1-b1+1,a1-b2+1], [], 1/z + return T1, T2 + v = ctx.hypercomb(h, [a1,b1,b2], force_series=True, maxterms=4*ctx.prec) + if sum(ctx._is_real_type(u) for u in [a1,b1,b2,z]) == 4: + v = ctx.re(v) + return v + except ctx.NoConvergence: + pass + finally: + ctx.prec = orig + + #print "not using asymp" + return ctx.hypsum(1, 2, (a1type, b1type, b2type), [a1, b1, b2], z, **kwargs) + + + +@defun +def _hyp2f3(ctx, a_s, b_s, z, **kwargs): + (a1, a1type), (a2, a2type) = a_s + (b1, b1type), (b2, b2type), (b3, b3type) = b_s + + absz = abs(z) + magz = ctx.mag(z) + + # Asymptotic expansion is ~ exp(sqrt(z)) + asymp_extraprec = z and magz//2 + orig = ctx.prec + + # Asymptotic series is in terms of 4F1 + # The square root below empirically provides a plausible criterion + # for the leading series to converge + can_use_asymptotic = (not kwargs.get('force_series')) and \ + (ctx.mag(absz) > 19) and (ctx.sqrt(absz) > 1.5*orig) + + if can_use_asymptotic: + #print "using asymp" + try: + try: + ctx.prec += asymp_extraprec + # http://functions.wolfram.com/HypergeometricFunctions/ + # Hypergeometric2F3/06/02/03/01/0002/ + def h(a1,a2,b1,b2,b3): + X = ctx.mpq_1_2*(a1+a2-b1-b2-b3+ctx.mpq_1_2) + A2 = a1+a2 + B3 = b1+b2+b3 + A = a1*a2 + B = b1*b2+b3*b2+b1*b3 + R = b1*b2*b3 + c = {} + c[0] = ctx.one + c[1] = 2*(B - A + ctx.mpq_1_4*(3*A2+B3-2)*(A2-B3) - ctx.mpq_3_16) + c[2] = ctx.mpq_1_2*c[1]**2 + ctx.mpq_1_16*(-16*(2*A2-3)*(B-A) + 32*R +\ + 4*(-8*A2**2 + 11*A2 + 8*A + B3 - 2)*(A2-B3)-3) + s1 = 0 + s2 = 0 + k = 0 + tprev = 0 + while 1: + if k not in c: + uu1 = (k-2*X-3)*(k-2*X-2*b1-1)*(k-2*X-2*b2-1)*\ + (k-2*X-2*b3-1) + uu2 = (4*(k-1)**3 - 6*(4*X+B3)*(k-1)**2 + \ + 2*(24*X**2+12*B3*X+4*B+B3-1)*(k-1) - 32*X**3 - \ + 24*B3*X**2 - 4*B - 8*R - 4*(4*B+B3-1)*X + 2*B3-1) + uu3 = (5*(k-1)**2+2*(-10*X+A2-3*B3+3)*(k-1)+2*c[1]) + c[k] = ctx.one/(2*k)*(uu1*c[k-3]-uu2*c[k-2]+uu3*c[k-1]) + w = c[k] * ctx.power(-z, -0.5*k) + t1 = (-ctx.j)**k * ctx.mpf(2)**(-k) * w + t2 = ctx.j**k * ctx.mpf(2)**(-k) * w + if abs(t1) < 0.1*ctx.eps: + break + # Quit if the series doesn't converge quickly enough + if k > 5 and abs(tprev) / abs(t1) < 1.5: + raise ctx.NoConvergence + s1 += t1 + s2 += t2 + tprev = t1 + k += 1 + S = ctx.expj(ctx.pi*X+2*ctx.sqrt(-z))*s1 + \ + ctx.expj(-(ctx.pi*X+2*ctx.sqrt(-z)))*s2 + T1 = [0.5*S, ctx.pi, -z], [1, -0.5, X], [b1, b2, b3], [a1, a2],\ + [], [], 0 + T2 = [-z], [-a1], [b1,b2,b3,a2-a1],[a2,b1-a1,b2-a1,b3-a1], \ + [a1,a1-b1+1,a1-b2+1,a1-b3+1], [a1-a2+1], 1/z + T3 = [-z], [-a2], [b1,b2,b3,a1-a2],[a1,b1-a2,b2-a2,b3-a2], \ + [a2,a2-b1+1,a2-b2+1,a2-b3+1],[-a1+a2+1], 1/z + return T1, T2, T3 + v = ctx.hypercomb(h, [a1,a2,b1,b2,b3], force_series=True, maxterms=4*ctx.prec) + if sum(ctx._is_real_type(u) for u in [a1,a2,b1,b2,b3,z]) == 6: + v = ctx.re(v) + return v + except ctx.NoConvergence: + pass + finally: + ctx.prec = orig + + return ctx.hypsum(2, 3, (a1type, a2type, b1type, b2type, b3type), [a1, a2, b1, b2, b3], z, **kwargs) + +@defun +def _hyp2f0(ctx, a_s, b_s, z, **kwargs): + (a, atype), (b, btype) = a_s + # We want to try aggressively to use the asymptotic expansion, + # and fall back only when absolutely necessary + try: + kwargsb = kwargs.copy() + kwargsb['maxterms'] = kwargsb.get('maxterms', ctx.prec) + return ctx.hypsum(2, 0, (atype,btype), [a,b], z, **kwargsb) + except ctx.NoConvergence: + if kwargs.get('force_series'): + raise + pass + def h(a, b): + w = ctx.sinpi(b) + rz = -1/z + T1 = ([ctx.pi,w,rz],[1,-1,a],[],[a-b+1,b],[a],[b],rz) + T2 = ([-ctx.pi,w,rz],[1,-1,1+a-b],[],[a,2-b],[a-b+1],[2-b],rz) + return T1, T2 + return ctx.hypercomb(h, [a, 1+a-b], **kwargs) + +@defun +def meijerg(ctx, a_s, b_s, z, r=1, series=None, **kwargs): + an, ap = a_s + bm, bq = b_s + n = len(an) + p = n + len(ap) + m = len(bm) + q = m + len(bq) + a = an+ap + b = bm+bq + a = [ctx.convert(_) for _ in a] + b = [ctx.convert(_) for _ in b] + z = ctx.convert(z) + if series is None: + if p < q: series = 1 + if p > q: series = 2 + if p == q: + if m+n == p and abs(z) > 1: + series = 2 + else: + series = 1 + if kwargs.get('verbose'): + print("Meijer G m,n,p,q,series =", m,n,p,q,series) + if series == 1: + def h(*args): + a = args[:p] + b = args[p:] + terms = [] + for k in range(m): + bases = [z] + expts = [b[k]/r] + gn = [b[j]-b[k] for j in range(m) if j != k] + gn += [1-a[j]+b[k] for j in range(n)] + gd = [a[j]-b[k] for j in range(n,p)] + gd += [1-b[j]+b[k] for j in range(m,q)] + hn = [1-a[j]+b[k] for j in range(p)] + hd = [1-b[j]+b[k] for j in range(q) if j != k] + hz = (-ctx.one)**(p-m-n) * z**(ctx.one/r) + terms.append((bases, expts, gn, gd, hn, hd, hz)) + return terms + else: + def h(*args): + a = args[:p] + b = args[p:] + terms = [] + for k in range(n): + bases = [z] + if r == 1: + expts = [a[k]-1] + else: + expts = [(a[k]-1)/ctx.convert(r)] + gn = [a[k]-a[j] for j in range(n) if j != k] + gn += [1-a[k]+b[j] for j in range(m)] + gd = [a[k]-b[j] for j in range(m,q)] + gd += [1-a[k]+a[j] for j in range(n,p)] + hn = [1-a[k]+b[j] for j in range(q)] + hd = [1+a[j]-a[k] for j in range(p) if j != k] + hz = (-ctx.one)**(q-m-n) / z**(ctx.one/r) + terms.append((bases, expts, gn, gd, hn, hd, hz)) + return terms + return ctx.hypercomb(h, a+b, **kwargs) + +@defun_wrapped +def appellf1(ctx,a,b1,b2,c,x,y,**kwargs): + # Assume x smaller + # We will use x for the outer loop + if abs(x) > abs(y): + x, y = y, x + b1, b2 = b2, b1 + def ok(x): + return abs(x) < 0.99 + # Finite cases + if ctx.isnpint(a): + pass + elif ctx.isnpint(b1): + pass + elif ctx.isnpint(b2): + x, y, b1, b2 = y, x, b2, b1 + else: + #print x, y + # Note: ok if |y| > 1, because + # 2F1 implements analytic continuation + if not ok(x): + u1 = (x-y)/(x-1) + if not ok(u1): + raise ValueError("Analytic continuation not implemented") + #print "Using analytic continuation" + return (1-x)**(-b1)*(1-y)**(c-a-b2)*\ + ctx.appellf1(c-a,b1,c-b1-b2,c,u1,y,**kwargs) + return ctx.hyper2d({'m+n':[a],'m':[b1],'n':[b2]}, {'m+n':[c]}, x,y, **kwargs) + +@defun +def appellf2(ctx,a,b1,b2,c1,c2,x,y,**kwargs): + # TODO: continuation + return ctx.hyper2d({'m+n':[a],'m':[b1],'n':[b2]}, + {'m':[c1],'n':[c2]}, x,y, **kwargs) + +@defun +def appellf3(ctx,a1,a2,b1,b2,c,x,y,**kwargs): + outer_polynomial = ctx.isnpint(a1) or ctx.isnpint(b1) + inner_polynomial = ctx.isnpint(a2) or ctx.isnpint(b2) + if not outer_polynomial: + if inner_polynomial or abs(x) > abs(y): + x, y = y, x + a1,a2,b1,b2 = a2,a1,b2,b1 + return ctx.hyper2d({'m':[a1,b1],'n':[a2,b2]}, {'m+n':[c]},x,y,**kwargs) + +@defun +def appellf4(ctx,a,b,c1,c2,x,y,**kwargs): + # TODO: continuation + return ctx.hyper2d({'m+n':[a,b]}, {'m':[c1],'n':[c2]},x,y,**kwargs) + +@defun +def hyper2d(ctx, a, b, x, y, **kwargs): + r""" + Sums the generalized 2D hypergeometric series + + .. math :: + + \sum_{m=0}^{\infty} \sum_{n=0}^{\infty} + \frac{P((a),m,n)}{Q((b),m,n)} + \frac{x^m y^n} {m! n!} + + where `(a) = (a_1,\ldots,a_r)`, `(b) = (b_1,\ldots,b_s)` and where + `P` and `Q` are products of rising factorials such as `(a_j)_n` or + `(a_j)_{m+n}`. `P` and `Q` are specified in the form of dicts, with + the `m` and `n` dependence as keys and parameter lists as values. + The supported rising factorials are given in the following table + (note that only a few are supported in `Q`): + + +------------+-------------------+--------+ + | Key | Rising factorial | `Q` | + +============+===================+========+ + | ``'m'`` | `(a_j)_m` | Yes | + +------------+-------------------+--------+ + | ``'n'`` | `(a_j)_n` | Yes | + +------------+-------------------+--------+ + | ``'m+n'`` | `(a_j)_{m+n}` | Yes | + +------------+-------------------+--------+ + | ``'m-n'`` | `(a_j)_{m-n}` | No | + +------------+-------------------+--------+ + | ``'n-m'`` | `(a_j)_{n-m}` | No | + +------------+-------------------+--------+ + | ``'2m+n'`` | `(a_j)_{2m+n}` | No | + +------------+-------------------+--------+ + | ``'2m-n'`` | `(a_j)_{2m-n}` | No | + +------------+-------------------+--------+ + | ``'2n-m'`` | `(a_j)_{2n-m}` | No | + +------------+-------------------+--------+ + + For example, the Appell F1 and F4 functions + + .. math :: + + F_1 = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty} + \frac{(a)_{m+n} (b)_m (c)_n}{(d)_{m+n}} + \frac{x^m y^n}{m! n!} + + F_4 = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty} + \frac{(a)_{m+n} (b)_{m+n}}{(c)_m (d)_{n}} + \frac{x^m y^n}{m! n!} + + can be represented respectively as + + ``hyper2d({'m+n':[a], 'm':[b], 'n':[c]}, {'m+n':[d]}, x, y)`` + + ``hyper2d({'m+n':[a,b]}, {'m':[c], 'n':[d]}, x, y)`` + + More generally, :func:`~mpmath.hyper2d` can evaluate any of the 34 distinct + convergent second-order (generalized Gaussian) hypergeometric + series enumerated by Horn, as well as the Kampe de Feriet + function. + + The series is computed by rewriting it so that the inner + series (i.e. the series containing `n` and `y`) has the form of an + ordinary generalized hypergeometric series and thereby can be + evaluated efficiently using :func:`~mpmath.hyper`. If possible, + manually swapping `x` and `y` and the corresponding parameters + can sometimes give better results. + + **Examples** + + Two separable cases: a product of two geometric series, and a + product of two Gaussian hypergeometric functions:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> x, y = mpf(0.25), mpf(0.5) + >>> hyper2d({'m':1,'n':1}, {}, x,y) + 2.666666666666666666666667 + >>> 1/(1-x)/(1-y) + 2.666666666666666666666667 + >>> hyper2d({'m':[1,2],'n':[3,4]}, {'m':[5],'n':[6]}, x,y) + 4.164358531238938319669856 + >>> hyp2f1(1,2,5,x)*hyp2f1(3,4,6,y) + 4.164358531238938319669856 + + Some more series that can be done in closed form:: + + >>> hyper2d({'m':1,'n':1},{'m+n':1},x,y) + 2.013417124712514809623881 + >>> (exp(x)*x-exp(y)*y)/(x-y) + 2.013417124712514809623881 + + Six of the 34 Horn functions, G1-G3 and H1-H3:: + + >>> from mpmath import * + >>> mp.dps = 10; mp.pretty = True + >>> x, y = 0.0625, 0.125 + >>> a1,a2,b1,b2,c1,c2,d = 1.1,-1.2,-1.3,-1.4,1.5,-1.6,1.7 + >>> hyper2d({'m+n':a1,'n-m':b1,'m-n':b2},{},x,y) # G1 + 1.139090746 + >>> nsum(lambda m,n: rf(a1,m+n)*rf(b1,n-m)*rf(b2,m-n)*\ + ... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf]) + 1.139090746 + >>> hyper2d({'m':a1,'n':a2,'n-m':b1,'m-n':b2},{},x,y) # G2 + 0.9503682696 + >>> nsum(lambda m,n: rf(a1,m)*rf(a2,n)*rf(b1,n-m)*rf(b2,m-n)*\ + ... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf]) + 0.9503682696 + >>> hyper2d({'2n-m':a1,'2m-n':a2},{},x,y) # G3 + 1.029372029 + >>> nsum(lambda m,n: rf(a1,2*n-m)*rf(a2,2*m-n)*\ + ... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf]) + 1.029372029 + >>> hyper2d({'m-n':a1,'m+n':b1,'n':c1},{'m':d},x,y) # H1 + -1.605331256 + >>> nsum(lambda m,n: rf(a1,m-n)*rf(b1,m+n)*rf(c1,n)/rf(d,m)*\ + ... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf]) + -1.605331256 + >>> hyper2d({'m-n':a1,'m':b1,'n':[c1,c2]},{'m':d},x,y) # H2 + -2.35405404 + >>> nsum(lambda m,n: rf(a1,m-n)*rf(b1,m)*rf(c1,n)*rf(c2,n)/rf(d,m)*\ + ... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf]) + -2.35405404 + >>> hyper2d({'2m+n':a1,'n':b1},{'m+n':c1},x,y) # H3 + 0.974479074 + >>> nsum(lambda m,n: rf(a1,2*m+n)*rf(b1,n)/rf(c1,m+n)*\ + ... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf]) + 0.974479074 + + **References** + + 1. [SrivastavaKarlsson]_ + 2. [Weisstein]_ http://mathworld.wolfram.com/HornFunction.html + 3. [Weisstein]_ http://mathworld.wolfram.com/AppellHypergeometricFunction.html + + """ + x = ctx.convert(x) + y = ctx.convert(y) + def parse(dct, key): + args = dct.pop(key, []) + try: + args = list(args) + except TypeError: + args = [args] + return [ctx.convert(arg) for arg in args] + a_s = dict(a) + b_s = dict(b) + a_m = parse(a, 'm') + a_n = parse(a, 'n') + a_m_add_n = parse(a, 'm+n') + a_m_sub_n = parse(a, 'm-n') + a_n_sub_m = parse(a, 'n-m') + a_2m_add_n = parse(a, '2m+n') + a_2m_sub_n = parse(a, '2m-n') + a_2n_sub_m = parse(a, '2n-m') + b_m = parse(b, 'm') + b_n = parse(b, 'n') + b_m_add_n = parse(b, 'm+n') + if a: raise ValueError("unsupported key: %r" % a.keys()[0]) + if b: raise ValueError("unsupported key: %r" % b.keys()[0]) + s = 0 + outer = ctx.one + m = ctx.mpf(0) + ok_count = 0 + prec = ctx.prec + maxterms = kwargs.get('maxterms', 20*prec) + try: + ctx.prec += 10 + tol = +ctx.eps + while 1: + inner_sign = 1 + outer_sign = 1 + inner_a = list(a_n) + inner_b = list(b_n) + outer_a = [a+m for a in a_m] + outer_b = [b+m for b in b_m] + # (a)_{m+n} = (a)_m (a+m)_n + for a in a_m_add_n: + a = a+m + inner_a.append(a) + outer_a.append(a) + # (b)_{m+n} = (b)_m (b+m)_n + for b in b_m_add_n: + b = b+m + inner_b.append(b) + outer_b.append(b) + # (a)_{n-m} = (a-m)_n / (a-m)_m + for a in a_n_sub_m: + inner_a.append(a-m) + outer_b.append(a-m-1) + # (a)_{m-n} = (-1)^(m+n) (1-a-m)_m / (1-a-m)_n + for a in a_m_sub_n: + inner_sign *= (-1) + outer_sign *= (-1)**(m) + inner_b.append(1-a-m) + outer_a.append(-a-m) + # (a)_{2m+n} = (a)_{2m} (a+2m)_n + for a in a_2m_add_n: + inner_a.append(a+2*m) + outer_a.append((a+2*m)*(1+a+2*m)) + # (a)_{2m-n} = (-1)^(2m+n) (1-a-2m)_{2m} / (1-a-2m)_n + for a in a_2m_sub_n: + inner_sign *= (-1) + inner_b.append(1-a-2*m) + outer_a.append((a+2*m)*(1+a+2*m)) + # (a)_{2n-m} = 4^n ((a-m)/2)_n ((a-m+1)/2)_n / (a-m)_m + for a in a_2n_sub_m: + inner_sign *= 4 + inner_a.append(0.5*(a-m)) + inner_a.append(0.5*(a-m+1)) + outer_b.append(a-m-1) + inner = ctx.hyper(inner_a, inner_b, inner_sign*y, + zeroprec=ctx.prec, **kwargs) + term = outer * inner * outer_sign + if abs(term) < tol: + ok_count += 1 + else: + ok_count = 0 + if ok_count >= 3 or not outer: + break + s += term + for a in outer_a: outer *= a + for b in outer_b: outer /= b + m += 1 + outer = outer * x / m + if m > maxterms: + raise ctx.NoConvergence("maxterms exceeded in hyper2d") + finally: + ctx.prec = prec + return +s + +""" +@defun +def kampe_de_feriet(ctx,a,b,c,d,e,f,x,y,**kwargs): + return ctx.hyper2d({'m+n':a,'m':b,'n':c}, + {'m+n':d,'m':e,'n':f}, x,y, **kwargs) +""" + +@defun +def bihyper(ctx, a_s, b_s, z, **kwargs): + r""" + Evaluates the bilateral hypergeometric series + + .. math :: + + \,_AH_B(a_1, \ldots, a_k; b_1, \ldots, b_B; z) = + \sum_{n=-\infty}^{\infty} + \frac{(a_1)_n \ldots (a_A)_n} + {(b_1)_n \ldots (b_B)_n} \, z^n + + where, for direct convergence, `A = B` and `|z| = 1`, although a + regularized sum exists more generally by considering the + bilateral series as a sum of two ordinary hypergeometric + functions. In order for the series to make sense, none of the + parameters may be integers. + + **Examples** + + The value of `\,_2H_2` at `z = 1` is given by Dougall's formula:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> a,b,c,d = 0.5, 1.5, 2.25, 3.25 + >>> bihyper([a,b],[c,d],1) + -14.49118026212345786148847 + >>> gammaprod([c,d,1-a,1-b,c+d-a-b-1],[c-a,d-a,c-b,d-b]) + -14.49118026212345786148847 + + The regularized function `\,_1H_0` can be expressed as the + sum of one `\,_2F_0` function and one `\,_1F_1` function:: + + >>> a = mpf(0.25) + >>> z = mpf(0.75) + >>> bihyper([a], [], z) + (0.2454393389657273841385582 + 0.2454393389657273841385582j) + >>> hyper([a,1],[],z) + (hyper([1],[1-a],-1/z)-1) + (0.2454393389657273841385582 + 0.2454393389657273841385582j) + >>> hyper([a,1],[],z) + hyper([1],[2-a],-1/z)/z/(a-1) + (0.2454393389657273841385582 + 0.2454393389657273841385582j) + + **References** + + 1. [Slater]_ (chapter 6: "Bilateral Series", pp. 180-189) + 2. [Wikipedia]_ http://en.wikipedia.org/wiki/Bilateral_hypergeometric_series + + """ + z = ctx.convert(z) + c_s = a_s + b_s + p = len(a_s) + q = len(b_s) + if (p, q) == (0,0) or (p, q) == (1,1): + return ctx.zero * z + neg = (p-q) % 2 + def h(*c_s): + a_s = list(c_s[:p]) + b_s = list(c_s[p:]) + aa_s = [2-b for b in b_s] + bb_s = [2-a for a in a_s] + rp = [(-1)**neg * z] + [1-b for b in b_s] + [1-a for a in a_s] + rc = [-1] + [1]*len(b_s) + [-1]*len(a_s) + T1 = [], [], [], [], a_s + [1], b_s, z + T2 = rp, rc, [], [], aa_s + [1], bb_s, (-1)**neg / z + return T1, T2 + return ctx.hypercomb(h, c_s, **kwargs) diff --git a/MLPY/Lib/site-packages/mpmath/functions/orthogonal.py b/MLPY/Lib/site-packages/mpmath/functions/orthogonal.py new file mode 100644 index 0000000000000000000000000000000000000000..aa33d8bd78290f55a970e78dab7a317d5f652dee --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/functions/orthogonal.py @@ -0,0 +1,493 @@ +from .functions import defun, defun_wrapped + +def _hermite_param(ctx, n, z, parabolic_cylinder): + """ + Combined calculation of the Hermite polynomial H_n(z) (and its + generalization to complex n) and the parabolic cylinder + function D. + """ + n, ntyp = ctx._convert_param(n) + z = ctx.convert(z) + q = -ctx.mpq_1_2 + # For re(z) > 0, 2F0 -- http://functions.wolfram.com/ + # HypergeometricFunctions/HermiteHGeneral/06/02/0009/ + # Otherwise, there is a reflection formula + # 2F0 + http://functions.wolfram.com/HypergeometricFunctions/ + # HermiteHGeneral/16/01/01/0006/ + # + # TODO: + # An alternative would be to use + # http://functions.wolfram.com/HypergeometricFunctions/ + # HermiteHGeneral/06/02/0006/ + # + # Also, the 1F1 expansion + # http://functions.wolfram.com/HypergeometricFunctions/ + # HermiteHGeneral/26/01/02/0001/ + # should probably be used for tiny z + if not z: + T1 = [2, ctx.pi], [n, 0.5], [], [q*(n-1)], [], [], 0 + if parabolic_cylinder: + T1[1][0] += q*n + return T1, + can_use_2f0 = ctx.isnpint(-n) or ctx.re(z) > 0 or \ + (ctx.re(z) == 0 and ctx.im(z) > 0) + expprec = ctx.prec*4 + 20 + if parabolic_cylinder: + u = ctx.fmul(ctx.fmul(z,z,prec=expprec), -0.25, exact=True) + w = ctx.fmul(z, ctx.sqrt(0.5,prec=expprec), prec=expprec) + else: + w = z + w2 = ctx.fmul(w, w, prec=expprec) + rw2 = ctx.fdiv(1, w2, prec=expprec) + nrw2 = ctx.fneg(rw2, exact=True) + nw = ctx.fneg(w, exact=True) + if can_use_2f0: + T1 = [2, w], [n, n], [], [], [q*n, q*(n-1)], [], nrw2 + terms = [T1] + else: + T1 = [2, nw], [n, n], [], [], [q*n, q*(n-1)], [], nrw2 + T2 = [2, ctx.pi, nw], [n+2, 0.5, 1], [], [q*n], [q*(n-1)], [1-q], w2 + terms = [T1,T2] + # Multiply by prefactor for D_n + if parabolic_cylinder: + expu = ctx.exp(u) + for i in range(len(terms)): + terms[i][1][0] += q*n + terms[i][0].append(expu) + terms[i][1].append(1) + return tuple(terms) + +@defun +def hermite(ctx, n, z, **kwargs): + return ctx.hypercomb(lambda: _hermite_param(ctx, n, z, 0), [], **kwargs) + +@defun +def pcfd(ctx, n, z, **kwargs): + r""" + Gives the parabolic cylinder function in Whittaker's notation + `D_n(z) = U(-n-1/2, z)` (see :func:`~mpmath.pcfu`). + It solves the differential equation + + .. math :: + + y'' + \left(n + \frac{1}{2} - \frac{1}{4} z^2\right) y = 0. + + and can be represented in terms of Hermite polynomials + (see :func:`~mpmath.hermite`) as + + .. math :: + + D_n(z) = 2^{-n/2} e^{-z^2/4} H_n\left(\frac{z}{\sqrt{2}}\right). + + **Plots** + + .. literalinclude :: /plots/pcfd.py + .. image :: /plots/pcfd.png + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> pcfd(0,0); pcfd(1,0); pcfd(2,0); pcfd(3,0) + 1.0 + 0.0 + -1.0 + 0.0 + >>> pcfd(4,0); pcfd(-3,0) + 3.0 + 0.6266570686577501256039413 + >>> pcfd('1/2', 2+3j) + (-5.363331161232920734849056 - 3.858877821790010714163487j) + >>> pcfd(2, -10) + 1.374906442631438038871515e-9 + + Verifying the differential equation:: + + >>> n = mpf(2.5) + >>> y = lambda z: pcfd(n,z) + >>> z = 1.75 + >>> chop(diff(y,z,2) + (n+0.5-0.25*z**2)*y(z)) + 0.0 + + Rational Taylor series expansion when `n` is an integer:: + + >>> taylor(lambda z: pcfd(5,z), 0, 7) + [0.0, 15.0, 0.0, -13.75, 0.0, 3.96875, 0.0, -0.6015625] + + """ + return ctx.hypercomb(lambda: _hermite_param(ctx, n, z, 1), [], **kwargs) + +@defun +def pcfu(ctx, a, z, **kwargs): + r""" + Gives the parabolic cylinder function `U(a,z)`, which may be + defined for `\Re(z) > 0` in terms of the confluent + U-function (see :func:`~mpmath.hyperu`) by + + .. math :: + + U(a,z) = 2^{-\frac{1}{4}-\frac{a}{2}} e^{-\frac{1}{4} z^2} + U\left(\frac{a}{2}+\frac{1}{4}, + \frac{1}{2}, \frac{1}{2}z^2\right) + + or, for arbitrary `z`, + + .. math :: + + e^{-\frac{1}{4}z^2} U(a,z) = + U(a,0) \,_1F_1\left(-\tfrac{a}{2}+\tfrac{1}{4}; + \tfrac{1}{2}; -\tfrac{1}{2}z^2\right) + + U'(a,0) z \,_1F_1\left(-\tfrac{a}{2}+\tfrac{3}{4}; + \tfrac{3}{2}; -\tfrac{1}{2}z^2\right). + + **Examples** + + Connection to other functions:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> z = mpf(3) + >>> pcfu(0.5,z) + 0.03210358129311151450551963 + >>> sqrt(pi/2)*exp(z**2/4)*erfc(z/sqrt(2)) + 0.03210358129311151450551963 + >>> pcfu(0.5,-z) + 23.75012332835297233711255 + >>> sqrt(pi/2)*exp(z**2/4)*erfc(-z/sqrt(2)) + 23.75012332835297233711255 + >>> pcfu(0.5,-z) + 23.75012332835297233711255 + >>> sqrt(pi/2)*exp(z**2/4)*erfc(-z/sqrt(2)) + 23.75012332835297233711255 + + """ + n, _ = ctx._convert_param(a) + return ctx.pcfd(-n-ctx.mpq_1_2, z) + +@defun +def pcfv(ctx, a, z, **kwargs): + r""" + Gives the parabolic cylinder function `V(a,z)`, which can be + represented in terms of :func:`~mpmath.pcfu` as + + .. math :: + + V(a,z) = \frac{\Gamma(a+\tfrac{1}{2}) (U(a,-z)-\sin(\pi a) U(a,z)}{\pi}. + + **Examples** + + Wronskian relation between `U` and `V`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> a, z = 2, 3 + >>> pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z) + 0.7978845608028653558798921 + >>> sqrt(2/pi) + 0.7978845608028653558798921 + >>> a, z = 2.5, 3 + >>> pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z) + 0.7978845608028653558798921 + >>> a, z = 0.25, -1 + >>> pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z) + 0.7978845608028653558798921 + >>> a, z = 2+1j, 2+3j + >>> chop(pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z)) + 0.7978845608028653558798921 + + """ + n, ntype = ctx._convert_param(a) + z = ctx.convert(z) + q = ctx.mpq_1_2 + r = ctx.mpq_1_4 + if ntype == 'Q' and ctx.isint(n*2): + # Faster for half-integers + def h(): + jz = ctx.fmul(z, -1j, exact=True) + T1terms = _hermite_param(ctx, -n-q, z, 1) + T2terms = _hermite_param(ctx, n-q, jz, 1) + for T in T1terms: + T[0].append(1j) + T[1].append(1) + T[3].append(q-n) + u = ctx.expjpi((q*n-r)) * ctx.sqrt(2/ctx.pi) + for T in T2terms: + T[0].append(u) + T[1].append(1) + return T1terms + T2terms + v = ctx.hypercomb(h, [], **kwargs) + if ctx._is_real_type(n) and ctx._is_real_type(z): + v = ctx._re(v) + return v + else: + def h(n): + w = ctx.square_exp_arg(z, -0.25) + u = ctx.square_exp_arg(z, 0.5) + e = ctx.exp(w) + l = [ctx.pi, q, ctx.exp(w)] + Y1 = l, [-q, n*q+r, 1], [r-q*n], [], [q*n+r], [q], u + Y2 = l + [z], [-q, n*q-r, 1, 1], [1-r-q*n], [], [q*n+1-r], [1+q], u + c, s = ctx.cospi_sinpi(r+q*n) + Y1[0].append(s) + Y2[0].append(c) + for Y in (Y1, Y2): + Y[1].append(1) + Y[3].append(q-n) + return Y1, Y2 + return ctx.hypercomb(h, [n], **kwargs) + + +@defun +def pcfw(ctx, a, z, **kwargs): + r""" + Gives the parabolic cylinder function `W(a,z)` defined in (DLMF 12.14). + + **Examples** + + Value at the origin:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> a = mpf(0.25) + >>> pcfw(a,0) + 0.9722833245718180765617104 + >>> power(2,-0.75)*sqrt(abs(gamma(0.25+0.5j*a)/gamma(0.75+0.5j*a))) + 0.9722833245718180765617104 + >>> diff(pcfw,(a,0),(0,1)) + -0.5142533944210078966003624 + >>> -power(2,-0.25)*sqrt(abs(gamma(0.75+0.5j*a)/gamma(0.25+0.5j*a))) + -0.5142533944210078966003624 + + """ + n, _ = ctx._convert_param(a) + z = ctx.convert(z) + def terms(): + phi2 = ctx.arg(ctx.gamma(0.5 + ctx.j*n)) + phi2 = (ctx.loggamma(0.5+ctx.j*n) - ctx.loggamma(0.5-ctx.j*n))/2j + rho = ctx.pi/8 + 0.5*phi2 + # XXX: cancellation computing k + k = ctx.sqrt(1 + ctx.exp(2*ctx.pi*n)) - ctx.exp(ctx.pi*n) + C = ctx.sqrt(k/2) * ctx.exp(0.25*ctx.pi*n) + yield C * ctx.expj(rho) * ctx.pcfu(ctx.j*n, z*ctx.expjpi(-0.25)) + yield C * ctx.expj(-rho) * ctx.pcfu(-ctx.j*n, z*ctx.expjpi(0.25)) + v = ctx.sum_accurately(terms) + if ctx._is_real_type(n) and ctx._is_real_type(z): + v = ctx._re(v) + return v + +""" +Even/odd PCFs. Useful? + +@defun +def pcfy1(ctx, a, z, **kwargs): + a, _ = ctx._convert_param(n) + z = ctx.convert(z) + def h(): + w = ctx.square_exp_arg(z) + w1 = ctx.fmul(w, -0.25, exact=True) + w2 = ctx.fmul(w, 0.5, exact=True) + e = ctx.exp(w1) + return [e], [1], [], [], [ctx.mpq_1_2*a+ctx.mpq_1_4], [ctx.mpq_1_2], w2 + return ctx.hypercomb(h, [], **kwargs) + +@defun +def pcfy2(ctx, a, z, **kwargs): + a, _ = ctx._convert_param(n) + z = ctx.convert(z) + def h(): + w = ctx.square_exp_arg(z) + w1 = ctx.fmul(w, -0.25, exact=True) + w2 = ctx.fmul(w, 0.5, exact=True) + e = ctx.exp(w1) + return [e, z], [1, 1], [], [], [ctx.mpq_1_2*a+ctx.mpq_3_4], \ + [ctx.mpq_3_2], w2 + return ctx.hypercomb(h, [], **kwargs) +""" + +@defun_wrapped +def gegenbauer(ctx, n, a, z, **kwargs): + # Special cases: a+0.5, a*2 poles + if ctx.isnpint(a): + return 0*(z+n) + if ctx.isnpint(a+0.5): + # TODO: something else is required here + # E.g.: gegenbauer(-2, -0.5, 3) == -12 + if ctx.isnpint(n+1): + raise NotImplementedError("Gegenbauer function with two limits") + def h(a): + a2 = 2*a + T = [], [], [n+a2], [n+1, a2], [-n, n+a2], [a+0.5], 0.5*(1-z) + return [T] + return ctx.hypercomb(h, [a], **kwargs) + def h(n): + a2 = 2*a + T = [], [], [n+a2], [n+1, a2], [-n, n+a2], [a+0.5], 0.5*(1-z) + return [T] + return ctx.hypercomb(h, [n], **kwargs) + +@defun_wrapped +def jacobi(ctx, n, a, b, x, **kwargs): + if not ctx.isnpint(a): + def h(n): + return (([], [], [a+n+1], [n+1, a+1], [-n, a+b+n+1], [a+1], (1-x)*0.5),) + return ctx.hypercomb(h, [n], **kwargs) + if not ctx.isint(b): + def h(n, a): + return (([], [], [-b], [n+1, -b-n], [-n, a+b+n+1], [b+1], (x+1)*0.5),) + return ctx.hypercomb(h, [n, a], **kwargs) + # XXX: determine appropriate limit + return ctx.binomial(n+a,n) * ctx.hyp2f1(-n,1+n+a+b,a+1,(1-x)/2, **kwargs) + +@defun_wrapped +def laguerre(ctx, n, a, z, **kwargs): + # XXX: limits, poles + #if ctx.isnpint(n): + # return 0*(a+z) + def h(a): + return (([], [], [a+n+1], [a+1, n+1], [-n], [a+1], z),) + return ctx.hypercomb(h, [a], **kwargs) + +@defun_wrapped +def legendre(ctx, n, x, **kwargs): + if ctx.isint(n): + n = int(n) + # Accuracy near zeros + if (n + (n < 0)) & 1: + if not x: + return x + mag = ctx.mag(x) + if mag < -2*ctx.prec-10: + return x + if mag < -5: + ctx.prec += -mag + return ctx.hyp2f1(-n,n+1,1,(1-x)/2, **kwargs) + +@defun +def legenp(ctx, n, m, z, type=2, **kwargs): + # Legendre function, 1st kind + n = ctx.convert(n) + m = ctx.convert(m) + # Faster + if not m: + return ctx.legendre(n, z, **kwargs) + # TODO: correct evaluation at singularities + if type == 2: + def h(n,m): + g = m*0.5 + T = [1+z, 1-z], [g, -g], [], [1-m], [-n, n+1], [1-m], 0.5*(1-z) + return (T,) + return ctx.hypercomb(h, [n,m], **kwargs) + if type == 3: + def h(n,m): + g = m*0.5 + T = [z+1, z-1], [g, -g], [], [1-m], [-n, n+1], [1-m], 0.5*(1-z) + return (T,) + return ctx.hypercomb(h, [n,m], **kwargs) + raise ValueError("requires type=2 or type=3") + +@defun +def legenq(ctx, n, m, z, type=2, **kwargs): + # Legendre function, 2nd kind + n = ctx.convert(n) + m = ctx.convert(m) + z = ctx.convert(z) + if z in (1, -1): + #if ctx.isint(m): + # return ctx.nan + #return ctx.inf # unsigned + return ctx.nan + if type == 2: + def h(n, m): + cos, sin = ctx.cospi_sinpi(m) + s = 2 * sin / ctx.pi + c = cos + a = 1+z + b = 1-z + u = m/2 + w = (1-z)/2 + T1 = [s, c, a, b], [-1, 1, u, -u], [], [1-m], \ + [-n, n+1], [1-m], w + T2 = [-s, a, b], [-1, -u, u], [n+m+1], [n-m+1, m+1], \ + [-n, n+1], [m+1], w + return T1, T2 + return ctx.hypercomb(h, [n, m], **kwargs) + if type == 3: + # The following is faster when there only is a single series + # Note: not valid for -1 < z < 0 (?) + if abs(z) > 1: + def h(n, m): + T1 = [ctx.expjpi(m), 2, ctx.pi, z, z-1, z+1], \ + [1, -n-1, 0.5, -n-m-1, 0.5*m, 0.5*m], \ + [n+m+1], [n+1.5], \ + [0.5*(2+n+m), 0.5*(1+n+m)], [n+1.5], z**(-2) + return [T1] + return ctx.hypercomb(h, [n, m], **kwargs) + else: + # not valid for 1 < z < inf ? + def h(n, m): + s = 2 * ctx.sinpi(m) / ctx.pi + c = ctx.expjpi(m) + a = 1+z + b = z-1 + u = m/2 + w = (1-z)/2 + T1 = [s, c, a, b], [-1, 1, u, -u], [], [1-m], \ + [-n, n+1], [1-m], w + T2 = [-s, c, a, b], [-1, 1, -u, u], [n+m+1], [n-m+1, m+1], \ + [-n, n+1], [m+1], w + return T1, T2 + return ctx.hypercomb(h, [n, m], **kwargs) + raise ValueError("requires type=2 or type=3") + +@defun_wrapped +def chebyt(ctx, n, x, **kwargs): + if (not x) and ctx.isint(n) and int(ctx._re(n)) % 2 == 1: + return x * 0 + return ctx.hyp2f1(-n,n,(1,2),(1-x)/2, **kwargs) + +@defun_wrapped +def chebyu(ctx, n, x, **kwargs): + if (not x) and ctx.isint(n) and int(ctx._re(n)) % 2 == 1: + return x * 0 + return (n+1) * ctx.hyp2f1(-n, n+2, (3,2), (1-x)/2, **kwargs) + +@defun +def spherharm(ctx, l, m, theta, phi, **kwargs): + l = ctx.convert(l) + m = ctx.convert(m) + theta = ctx.convert(theta) + phi = ctx.convert(phi) + l_isint = ctx.isint(l) + l_natural = l_isint and l >= 0 + m_isint = ctx.isint(m) + if l_isint and l < 0 and m_isint: + return ctx.spherharm(-(l+1), m, theta, phi, **kwargs) + if theta == 0 and m_isint and m < 0: + return ctx.zero * 1j + if l_natural and m_isint: + if abs(m) > l: + return ctx.zero * 1j + # http://functions.wolfram.com/Polynomials/ + # SphericalHarmonicY/26/01/02/0004/ + def h(l,m): + absm = abs(m) + C = [-1, ctx.expj(m*phi), + (2*l+1)*ctx.fac(l+absm)/ctx.pi/ctx.fac(l-absm), + ctx.sin(theta)**2, + ctx.fac(absm), 2] + P = [0.5*m*(ctx.sign(m)+1), 1, 0.5, 0.5*absm, -1, -absm-1] + return ((C, P, [], [], [absm-l, l+absm+1], [absm+1], + ctx.sin(0.5*theta)**2),) + else: + # http://functions.wolfram.com/HypergeometricFunctions/ + # SphericalHarmonicYGeneral/26/01/02/0001/ + def h(l,m): + if ctx.isnpint(l-m+1) or ctx.isnpint(l+m+1) or ctx.isnpint(1-m): + return (([0], [-1], [], [], [], [], 0),) + cos, sin = ctx.cos_sin(0.5*theta) + C = [0.5*ctx.expj(m*phi), (2*l+1)/ctx.pi, + ctx.gamma(l-m+1), ctx.gamma(l+m+1), + cos**2, sin**2] + P = [1, 0.5, 0.5, -0.5, 0.5*m, -0.5*m] + return ((C, P, [], [1-m], [-l,l+1], [1-m], sin**2),) + return ctx.hypercomb(h, [l,m], **kwargs) diff --git a/MLPY/Lib/site-packages/mpmath/functions/qfunctions.py b/MLPY/Lib/site-packages/mpmath/functions/qfunctions.py new file mode 100644 index 0000000000000000000000000000000000000000..5a20e53a8b6fa0d8fbc9ad098614d2694998f49a --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/functions/qfunctions.py @@ -0,0 +1,280 @@ +from .functions import defun, defun_wrapped + +@defun +def qp(ctx, a, q=None, n=None, **kwargs): + r""" + Evaluates the q-Pochhammer symbol (or q-rising factorial) + + .. math :: + + (a; q)_n = \prod_{k=0}^{n-1} (1-a q^k) + + where `n = \infty` is permitted if `|q| < 1`. Called with two arguments, + ``qp(a,q)`` computes `(a;q)_{\infty}`; with a single argument, ``qp(q)`` + computes `(q;q)_{\infty}`. The special case + + .. math :: + + \phi(q) = (q; q)_{\infty} = \prod_{k=1}^{\infty} (1-q^k) = + \sum_{k=-\infty}^{\infty} (-1)^k q^{(3k^2-k)/2} + + is also known as the Euler function, or (up to a factor `q^{-1/24}`) + the Dedekind eta function. + + **Examples** + + If `n` is a positive integer, the function amounts to a finite product:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> qp(2,3,5) + -725305.0 + >>> fprod(1-2*3**k for k in range(5)) + -725305.0 + >>> qp(2,3,0) + 1.0 + + Complex arguments are allowed:: + + >>> qp(2-1j, 0.75j) + (0.4628842231660149089976379 + 4.481821753552703090628793j) + + The regular Pochhammer symbol `(a)_n` is obtained in the + following limit as `q \to 1`:: + + >>> a, n = 4, 7 + >>> limit(lambda q: qp(q**a,q,n) / (1-q)**n, 1) + 604800.0 + >>> rf(a,n) + 604800.0 + + The Taylor series of the reciprocal Euler function gives + the partition function `P(n)`, i.e. the number of ways of writing + `n` as a sum of positive integers:: + + >>> taylor(lambda q: 1/qp(q), 0, 10) + [1.0, 1.0, 2.0, 3.0, 5.0, 7.0, 11.0, 15.0, 22.0, 30.0, 42.0] + + Special values include:: + + >>> qp(0) + 1.0 + >>> findroot(diffun(qp), -0.4) # location of maximum + -0.4112484791779547734440257 + >>> qp(_) + 1.228348867038575112586878 + + The q-Pochhammer symbol is related to the Jacobi theta functions. + For example, the following identity holds:: + + >>> q = mpf(0.5) # arbitrary + >>> qp(q) + 0.2887880950866024212788997 + >>> root(3,-2)*root(q,-24)*jtheta(2,pi/6,root(q,6)) + 0.2887880950866024212788997 + + """ + a = ctx.convert(a) + if n is None: + n = ctx.inf + else: + n = ctx.convert(n) + if n < 0: + raise ValueError("n cannot be negative") + if q is None: + q = a + else: + q = ctx.convert(q) + if n == 0: + return ctx.one + 0*(a+q) + infinite = (n == ctx.inf) + same = (a == q) + if infinite: + if abs(q) >= 1: + if same and (q == -1 or q == 1): + return ctx.zero * q + raise ValueError("q-function only defined for |q| < 1") + elif q == 0: + return ctx.one - a + maxterms = kwargs.get('maxterms', 50*ctx.prec) + if infinite and same: + # Euler's pentagonal theorem + def terms(): + t = 1 + yield t + k = 1 + x1 = q + x2 = q**2 + while 1: + yield (-1)**k * x1 + yield (-1)**k * x2 + x1 *= q**(3*k+1) + x2 *= q**(3*k+2) + k += 1 + if k > maxterms: + raise ctx.NoConvergence + return ctx.sum_accurately(terms) + # return ctx.nprod(lambda k: 1-a*q**k, [0,n-1]) + def factors(): + k = 0 + r = ctx.one + while 1: + yield 1 - a*r + r *= q + k += 1 + if k >= n: + return + if k > maxterms: + raise ctx.NoConvergence + return ctx.mul_accurately(factors) + +@defun_wrapped +def qgamma(ctx, z, q, **kwargs): + r""" + Evaluates the q-gamma function + + .. math :: + + \Gamma_q(z) = \frac{(q; q)_{\infty}}{(q^z; q)_{\infty}} (1-q)^{1-z}. + + + **Examples** + + Evaluation for real and complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> qgamma(4,0.75) + 4.046875 + >>> qgamma(6,6) + 121226245.0 + >>> qgamma(3+4j, 0.5j) + (0.1663082382255199834630088 + 0.01952474576025952984418217j) + + The q-gamma function satisfies a functional equation similar + to that of the ordinary gamma function:: + + >>> q = mpf(0.25) + >>> z = mpf(2.5) + >>> qgamma(z+1,q) + 1.428277424823760954685912 + >>> (1-q**z)/(1-q)*qgamma(z,q) + 1.428277424823760954685912 + + """ + if abs(q) > 1: + return ctx.qgamma(z,1/q)*q**((z-2)*(z-1)*0.5) + return ctx.qp(q, q, None, **kwargs) / \ + ctx.qp(q**z, q, None, **kwargs) * (1-q)**(1-z) + +@defun_wrapped +def qfac(ctx, z, q, **kwargs): + r""" + Evaluates the q-factorial, + + .. math :: + + [n]_q! = (1+q)(1+q+q^2)\cdots(1+q+\cdots+q^{n-1}) + + or more generally + + .. math :: + + [z]_q! = \frac{(q;q)_z}{(1-q)^z}. + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> qfac(0,0) + 1.0 + >>> qfac(4,3) + 2080.0 + >>> qfac(5,6) + 121226245.0 + >>> qfac(1+1j, 2+1j) + (0.4370556551322672478613695 + 0.2609739839216039203708921j) + + """ + if ctx.isint(z) and ctx._re(z) > 0: + n = int(ctx._re(z)) + return ctx.qp(q, q, n, **kwargs) / (1-q)**n + return ctx.qgamma(z+1, q, **kwargs) + +@defun +def qhyper(ctx, a_s, b_s, q, z, **kwargs): + r""" + Evaluates the basic hypergeometric series or hypergeometric q-series + + .. math :: + + \,_r\phi_s \left[\begin{matrix} + a_1 & a_2 & \ldots & a_r \\ + b_1 & b_2 & \ldots & b_s + \end{matrix} ; q,z \right] = + \sum_{n=0}^\infty + \frac{(a_1;q)_n, \ldots, (a_r;q)_n} + {(b_1;q)_n, \ldots, (b_s;q)_n} + \left((-1)^n q^{n\choose 2}\right)^{1+s-r} + \frac{z^n}{(q;q)_n} + + where `(a;q)_n` denotes the q-Pochhammer symbol (see :func:`~mpmath.qp`). + + **Examples** + + Evaluation works for real and complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> qhyper([0.5], [2.25], 0.25, 4) + -0.1975849091263356009534385 + >>> qhyper([0.5], [2.25], 0.25-0.25j, 4) + (2.806330244925716649839237 + 3.568997623337943121769938j) + >>> qhyper([1+j], [2,3+0.5j], 0.25, 3+4j) + (9.112885171773400017270226 - 1.272756997166375050700388j) + + Comparing with a summation of the defining series, using + :func:`~mpmath.nsum`:: + + >>> b, q, z = 3, 0.25, 0.5 + >>> qhyper([], [b], q, z) + 0.6221136748254495583228324 + >>> nsum(lambda n: z**n / qp(q,q,n)/qp(b,q,n) * q**(n*(n-1)), [0,inf]) + 0.6221136748254495583228324 + + """ + #a_s = [ctx._convert_param(a)[0] for a in a_s] + #b_s = [ctx._convert_param(b)[0] for b in b_s] + #q = ctx._convert_param(q)[0] + a_s = [ctx.convert(a) for a in a_s] + b_s = [ctx.convert(b) for b in b_s] + q = ctx.convert(q) + z = ctx.convert(z) + r = len(a_s) + s = len(b_s) + d = 1+s-r + maxterms = kwargs.get('maxterms', 50*ctx.prec) + def terms(): + t = ctx.one + yield t + qk = 1 + k = 0 + x = 1 + while 1: + for a in a_s: + p = 1 - a*qk + t *= p + for b in b_s: + p = 1 - b*qk + if not p: + raise ValueError + t /= p + t *= z + x *= (-1)**d * qk ** d + qk *= q + t /= (1 - qk) + k += 1 + yield t * x + if k > maxterms: + raise ctx.NoConvergence + return ctx.sum_accurately(terms) diff --git a/MLPY/Lib/site-packages/mpmath/functions/rszeta.py b/MLPY/Lib/site-packages/mpmath/functions/rszeta.py new file mode 100644 index 0000000000000000000000000000000000000000..19e2c9a251b81bafe8cf77a2b0180636b1078ee4 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/functions/rszeta.py @@ -0,0 +1,1403 @@ +""" +--------------------------------------------------------------------- +.. sectionauthor:: Juan Arias de Reyna + +This module implements zeta-related functions using the Riemann-Siegel +expansion: zeta_offline(s,k=0) + +* coef(J, eps): Need in the computation of Rzeta(s,k) + +* Rzeta_simul(s, der=0) computes Rzeta^(k)(s) and Rzeta^(k)(1-s) simultaneously + for 0 <= k <= der. Used by zeta_offline and z_offline + +* Rzeta_set(s, derivatives) computes Rzeta^(k)(s) for given derivatives, used by + z_half(t,k) and zeta_half + +* z_offline(w,k): Z(w) and its derivatives of order k <= 4 +* z_half(t,k): Z(t) (Riemann Siegel function) and its derivatives of order k <= 4 +* zeta_offline(s): zeta(s) and its derivatives of order k<= 4 +* zeta_half(1/2+it,k): zeta(s) and its derivatives of order k<= 4 + +* rs_zeta(s,k=0) Computes zeta^(k)(s) Unifies zeta_half and zeta_offline +* rs_z(w,k=0) Computes Z^(k)(w) Unifies z_offline and z_half +---------------------------------------------------------------------- + +This program uses Riemann-Siegel expansion even to compute +zeta(s) on points s = sigma + i t with sigma arbitrary not +necessarily equal to 1/2. + +It is founded on a new deduction of the formula, with rigorous +and sharp bounds for the terms and rest of this expansion. + +More information on the papers: + + J. Arias de Reyna, High Precision Computation of Riemann's + Zeta Function by the Riemann-Siegel Formula I, II + + We refer to them as I, II. + + In them we shall find detailed explanation of all the + procedure. + +The program uses Riemann-Siegel expansion. +This is useful when t is big, ( say t > 10000 ). +The precision is limited, roughly it can compute zeta(sigma+it) +with an error less than exp(-c t) for some constant c depending +on sigma. The program gives an error when the Riemann-Siegel +formula can not compute to the wanted precision. + +""" + +import math + +class RSCache(object): + def __init__(ctx): + ctx._rs_cache = [0, 10, {}, {}] + +from .functions import defun + +#-------------------------------------------------------------------------------# +# # +# coef(ctx, J, eps, _cache=[0, 10, {} ] ) # +# # +#-------------------------------------------------------------------------------# + +# This function computes the coefficients c[n] defined on (I, equation (47)) +# but see also (II, section 3.14). +# +# Since these coefficients are very difficult to compute we save the values +# in a cache. So if we compute several values of the functions Rzeta(s) for +# near values of s, we do not recompute these coefficients. +# +# c[n] are the Taylor coefficients of the function: +# +# F(z):= (exp(pi*j*(z*z/2+3/8))-j* sqrt(2) cos(pi*z/2))/(2*cos(pi *z)) +# +# + +def _coef(ctx, J, eps): + r""" + Computes the coefficients `c_n` for `0\le n\le 2J` with error less than eps + + **Definition** + + The coefficients c_n are defined by + + .. math :: + + \begin{equation} + F(z)=\frac{e^{\pi i + \bigl(\frac{z^2}{2}+\frac38\bigr)}-i\sqrt{2}\cos\frac{\pi}{2}z}{2\cos\pi + z}=\sum_{n=0}^\infty c_{2n} z^{2n} + \end{equation} + + they are computed applying the relation + + .. math :: + + \begin{multline} + c_{2n}=-\frac{i}{\sqrt{2}}\Bigl(\frac{\pi}{2}\Bigr)^{2n} + \sum_{k=0}^n\frac{(-1)^k}{(2k)!} + 2^{2n-2k}\frac{(-1)^{n-k}E_{2n-2k}}{(2n-2k)!}+\\ + +e^{3\pi i/8}\sum_{j=0}^n(-1)^j\frac{ + E_{2j}}{(2j)!}\frac{i^{n-j}\pi^{n+j}}{(n-j)!2^{n-j+1}}. + \end{multline} + """ + + newJ = J+2 # compute more coefficients that are needed + neweps6 = eps/2. # compute with a slight more precision that are needed + + # PREPARATION FOR THE COMPUTATION OF V(N) AND W(N) + # See II Section 3.16 + # + # Computing the exponent wpvw of the error II equation (81) + wpvw = max(ctx.mag(10*(newJ+3)), 4*newJ+5-ctx.mag(neweps6)) + + # Preparation of Euler numbers (we need until the 2*RS_NEWJ) + E = ctx._eulernum(2*newJ) + + # Now we have in the cache all the needed Euler numbers. + # + # Computing the powers of pi + # + # We need to compute the powers pi**n for 1<= n <= 2*J + # with relative error less than 2**(-wpvw) + # it is easy to show that this is obtained + # taking wppi as the least d with + # 2**d>40*J and 2**d> 4.24 *newJ + 2**wpvw + # In II Section 3.9 we need also that + # wppi > wptcoef[0], and that the powers + # here computed 0<= k <= 2*newJ are more + # than those needed there that are 2*L-2. + # so we need J >= L this will be checked + # before computing tcoef[] + wppi = max(ctx.mag(40*newJ), ctx.mag(newJ)+3 +wpvw) + ctx.prec = wppi + pipower = {} + pipower[0] = ctx.one + pipower[1] = ctx.pi + for n in range(2,2*newJ+1): + pipower[n] = pipower[n-1]*ctx.pi + + # COMPUTING THE COEFFICIENTS v(n) AND w(n) + # see II equation (61) and equations (81) and (82) + ctx.prec = wpvw+2 + v={} + w={} + for n in range(0,newJ+1): + va = (-1)**n * ctx._eulernum(2*n) + va = ctx.mpf(va)/ctx.fac(2*n) + v[n]=va*pipower[2*n] + for n in range(0,2*newJ+1): + wa = ctx.one/ctx.fac(n) + wa=wa/(2**n) + w[n]=wa*pipower[n] + + # COMPUTATION OF THE CONVOLUTIONS RS_P1 AND RS_P2 + # See II Section 3.16 + ctx.prec = 15 + wpp1a = 9 - ctx.mag(neweps6) + P1 = {} + for n in range(0,newJ+1): + ctx.prec = 15 + wpp1 = max(ctx.mag(10*(n+4)),4*n+wpp1a) + ctx.prec = wpp1 + sump = 0 + for k in range(0,n+1): + sump += ((-1)**k) * v[k]*w[2*n-2*k] + P1[n]=((-1)**(n+1))*ctx.j*sump + P2={} + for n in range(0,newJ+1): + ctx.prec = 15 + wpp2 = max(ctx.mag(10*(n+4)),4*n+wpp1a) + ctx.prec = wpp2 + sump = 0 + for k in range(0,n+1): + sump += (ctx.j**(n-k)) * v[k]*w[n-k] + P2[n]=sump + # COMPUTING THE COEFFICIENTS c[2n] + # See II Section 3.14 + ctx.prec = 15 + wpc0 = 5 - ctx.mag(neweps6) + wpc = max(6,4*newJ+wpc0) + ctx.prec = wpc + mu = ctx.sqrt(ctx.mpf('2'))/2 + nu = ctx.expjpi(3./8)/2 + c={} + for n in range(0,newJ): + ctx.prec = 15 + wpc = max(6,4*n+wpc0) + ctx.prec = wpc + c[2*n] = mu*P1[n]+nu*P2[n] + for n in range(1,2*newJ,2): + c[n] = 0 + return [newJ, neweps6, c, pipower] + +def coef(ctx, J, eps): + _cache = ctx._rs_cache + if J <= _cache[0] and eps >= _cache[1]: + return _cache[2], _cache[3] + orig = ctx._mp.prec + try: + data = _coef(ctx._mp, J, eps) + finally: + ctx._mp.prec = orig + if ctx is not ctx._mp: + data[2] = dict((k,ctx.convert(v)) for (k,v) in data[2].items()) + data[3] = dict((k,ctx.convert(v)) for (k,v) in data[3].items()) + ctx._rs_cache[:] = data + return ctx._rs_cache[2], ctx._rs_cache[3] + +#-------------------------------------------------------------------------------# +# # +# Rzeta_simul(s,k=0) # +# # +#-------------------------------------------------------------------------------# +# This function return a list with the values: +# Rzeta(sigma+it), conj(Rzeta(1-sigma+it)),Rzeta'(sigma+it), conj(Rzeta'(1-sigma+it)), +# .... , Rzeta^{(k)}(sigma+it), conj(Rzeta^{(k)}(1-sigma+it)) +# +# Useful to compute the function zeta(s) and Z(w) or its derivatives. +# + +def aux_M_Fp(ctx, xA, xeps4, a, xB1, xL): + # COMPUTING M NUMBER OF DERIVATIVES Fp[m] TO COMPUTE + # See II Section 3.11 equations (47) and (48) + aux1 = 126.0657606*xA/xeps4 # 126.06.. = 316/sqrt(2*pi) + aux1 = ctx.ln(aux1) + aux2 = (2*ctx.ln(ctx.pi)+ctx.ln(xB1)+ctx.ln(a))/3 -ctx.ln(2*ctx.pi)/2 + m = 3*xL-3 + aux3= (ctx.loggamma(m+1)-ctx.loggamma(m/3.0+2))/2 -ctx.loggamma((m+1)/2.) + while((aux1 < m*aux2+ aux3)and (m>1)): + m = m - 1 + aux3 = (ctx.loggamma(m+1)-ctx.loggamma(m/3.0+2))/2 -ctx.loggamma((m+1)/2.) + xM = m + return xM + +def aux_J_needed(ctx, xA, xeps4, a, xB1, xM): + # DETERMINATION OF J THE NUMBER OF TERMS NEEDED + # IN THE TAYLOR SERIES OF F. + # See II Section 3.11 equation (49)) + # Only determine one + h1 = xeps4/(632*xA) + h2 = xB1*a * 126.31337419529260248 # = pi^2*e^2*sqrt(3) + h2 = h1 * ctx.power((h2/xM**2),(xM-1)/3) / xM + h3 = min(h1,h2) + return h3 + +def Rzeta_simul(ctx, s, der=0): + # First we take the value of ctx.prec + wpinitial = ctx.prec + + # INITIALIZATION + # Take the real and imaginary part of s + t = ctx._im(s) + xsigma = ctx._re(s) + ysigma = 1 - xsigma + + # Now compute several parameter that appear on the program + ctx.prec = 15 + a = ctx.sqrt(t/(2*ctx.pi)) + xasigma = a ** xsigma + yasigma = a ** ysigma + + # We need a simple bound A1 < asigma (see II Section 3.1 and 3.3) + xA1=ctx.power(2, ctx.mag(xasigma)-1) + yA1=ctx.power(2, ctx.mag(yasigma)-1) + + # We compute various epsilon's (see II end of Section 3.1) + eps = ctx.power(2, -wpinitial) + eps1 = eps/6. + xeps2 = eps * xA1/3. + yeps2 = eps * yA1/3. + + # COMPUTING SOME COEFFICIENTS THAT DEPENDS + # ON sigma + # constant b and c (see I Theorem 2 formula (26) ) + # coefficients A and B1 (see I Section 6.1 equation (50)) + # + # here we not need high precision + ctx.prec = 15 + if xsigma > 0: + xb = 2. + xc = math.pow(9,xsigma)/4.44288 + # 4.44288 =(math.sqrt(2)*math.pi) + xA = math.pow(9,xsigma) + xB1 = 1 + else: + xb = 2.25158 # math.sqrt( (3-2* math.log(2))*math.pi ) + xc = math.pow(2,-xsigma)/4.44288 + xA = math.pow(2,-xsigma) + xB1 = 1.10789 # = 2*sqrt(1-log(2)) + + if(ysigma > 0): + yb = 2. + yc = math.pow(9,ysigma)/4.44288 + # 4.44288 =(math.sqrt(2)*math.pi) + yA = math.pow(9,ysigma) + yB1 = 1 + else: + yb = 2.25158 # math.sqrt( (3-2* math.log(2))*math.pi ) + yc = math.pow(2,-ysigma)/4.44288 + yA = math.pow(2,-ysigma) + yB1 = 1.10789 # = 2*sqrt(1-log(2)) + + # COMPUTING L THE NUMBER OF TERMS NEEDED IN THE RIEMANN-SIEGEL + # CORRECTION + # See II Section 3.2 + ctx.prec = 15 + xL = 1 + while 3*xc*ctx.gamma(xL*0.5) * ctx.power(xb*a,-xL) >= xeps2: + xL = xL+1 + xL = max(2,xL) + yL = 1 + while 3*yc*ctx.gamma(yL*0.5) * ctx.power(yb*a,-yL) >= yeps2: + yL = yL+1 + yL = max(2,yL) + + # The number L has to satify some conditions. + # If not RS can not compute Rzeta(s) with the prescribed precision + # (see II, Section 3.2 condition (20) ) and + # (II, Section 3.3 condition (22) ). Also we have added + # an additional technical condition in Section 3.17 Proposition 17 + if ((3*xL >= 2*a*a/25.) or (3*xL+2+xsigma<0) or (abs(xsigma) > a/2.) or \ + (3*yL >= 2*a*a/25.) or (3*yL+2+ysigma<0) or (abs(ysigma) > a/2.)): + ctx.prec = wpinitial + raise NotImplementedError("Riemann-Siegel can not compute with such precision") + + # We take the maximum of the two values + L = max(xL, yL) + + # INITIALIZATION (CONTINUATION) + # + # eps3 is the constant defined on (II, Section 3.5 equation (27) ) + # each term of the RS correction must be computed with error <= eps3 + xeps3 = xeps2/(4*xL) + yeps3 = yeps2/(4*yL) + + # eps4 is defined on (II Section 3.6 equation (30) ) + # each component of the formula (II Section 3.6 equation (29) ) + # must be computed with error <= eps4 + xeps4 = xeps3/(3*xL) + yeps4 = yeps3/(3*yL) + + # COMPUTING M NUMBER OF DERIVATIVES Fp[m] TO COMPUTE + xM = aux_M_Fp(ctx, xA, xeps4, a, xB1, xL) + yM = aux_M_Fp(ctx, yA, yeps4, a, yB1, yL) + M = max(xM, yM) + + # COMPUTING NUMBER OF TERMS J NEEDED + h3 = aux_J_needed(ctx, xA, xeps4, a, xB1, xM) + h4 = aux_J_needed(ctx, yA, yeps4, a, yB1, yM) + h3 = min(h3,h4) + J = 12 + jvalue = (2*ctx.pi)**J / ctx.gamma(J+1) + while jvalue > h3: + J = J+1 + jvalue = (2*ctx.pi)*jvalue/J + + # COMPUTING eps5[m] for 1 <= m <= 21 + # See II Section 10 equation (43) + # We choose the minimum of the two possibilities + eps5={} + xforeps5 = math.pi*math.pi*xB1*a + yforeps5 = math.pi*math.pi*yB1*a + for m in range(0,22): + xaux1 = math.pow(xforeps5, m/3)/(316.*xA) + yaux1 = math.pow(yforeps5, m/3)/(316.*yA) + aux1 = min(xaux1, yaux1) + aux2 = ctx.gamma(m+1)/ctx.gamma(m/3.0+0.5) + aux2 = math.sqrt(aux2) + eps5[m] = (aux1*aux2*min(xeps4,yeps4)) + + # COMPUTING wpfp + # See II Section 3.13 equation (59) + twenty = min(3*L-3, 21)+1 + aux = 6812*J + wpfp = ctx.mag(44*J) + for m in range(0,twenty): + wpfp = max(wpfp, ctx.mag(aux*ctx.gamma(m+1)/eps5[m])) + + # COMPUTING N AND p + # See II Section + ctx.prec = wpfp + ctx.mag(t)+20 + a = ctx.sqrt(t/(2*ctx.pi)) + N = ctx.floor(a) + p = 1-2*(a-N) + + # now we get a rounded version of p + # to the precision wpfp + # this possibly is not necessary + num=ctx.floor(p*(ctx.mpf('2')**wpfp)) + difference = p * (ctx.mpf('2')**wpfp)-num + if (difference < 0.5): + num = num + else: + num = num+1 + p = ctx.convert(num * (ctx.mpf('2')**(-wpfp))) + + # COMPUTING THE COEFFICIENTS c[n] = cc[n] + # We shall use the notation cc[n], since there is + # a constant that is called c + # See II Section 3.14 + # We compute the coefficients and also save then in a + # cache. The bulk of the computation is passed to + # the function coef() + # + # eps6 is defined in II Section 3.13 equation (58) + eps6 = ctx.power(ctx.convert(2*ctx.pi), J)/(ctx.gamma(J+1)*3*J) + + # Now we compute the coefficients + cc = {} + cont = {} + cont, pipowers = coef(ctx, J, eps6) + cc=cont.copy() # we need a copy since we have to change his values. + Fp={} # this is the adequate locus of this + for n in range(M, 3*L-2): + Fp[n] = 0 + Fp={} + ctx.prec = wpfp + for m in range(0,M+1): + sumP = 0 + for k in range(2*J-m-1,-1,-1): + sumP = (sumP * p)+ cc[k] + Fp[m] = sumP + # preparation of the new coefficients + for k in range(0,2*J-m-1): + cc[k] = (k+1)* cc[k+1] + + # COMPUTING THE NUMBERS xd[u,n,k], yd[u,n,k] + # See II Section 3.17 + # + # First we compute the working precisions xwpd[k] + # Se II equation (92) + xwpd={} + d1 = max(6,ctx.mag(40*L*L)) + xd2 = 13+ctx.mag((1+abs(xsigma))*xA)-ctx.mag(xeps4)-1 + xconst = ctx.ln(8/(ctx.pi*ctx.pi*a*a*xB1*xB1)) /2 + for n in range(0,L): + xd3 = ctx.mag(ctx.sqrt(ctx.gamma(n-0.5)))-ctx.floor(n*xconst)+xd2 + xwpd[n]=max(xd3,d1) + + # procedure of II Section 3.17 + ctx.prec = xwpd[1]+10 + xpsigma = 1-(2*xsigma) + xd = {} + xd[0,0,-2]=0; xd[0,0,-1]=0; xd[0,0,0]=1; xd[0,0,1]=0 + xd[0,-1,-2]=0; xd[0,-1,-1]=0; xd[0,-1,0]=1; xd[0,-1,1]=0 + for n in range(1,L): + ctx.prec = xwpd[n]+10 + for k in range(0,3*n//2+1): + m = 3*n-2*k + if(m!=0): + m1 = ctx.one/m + c1= m1/4 + c2=(xpsigma*m1)/2 + c3=-(m+1) + xd[0,n,k]=c3*xd[0,n-1,k-2]+c1*xd[0,n-1,k]+c2*xd[0,n-1,k-1] + else: + xd[0,n,k]=0 + for r in range(0,k): + add=xd[0,n,r]*(ctx.mpf('1.0')*ctx.fac(2*k-2*r)/ctx.fac(k-r)) + xd[0,n,k] -= ((-1)**(k-r))*add + xd[0,n,-2]=0; xd[0,n,-1]=0; xd[0,n,3*n//2+1]=0 + for mu in range(-2,der+1): + for n in range(-2,L): + for k in range(-3,max(1,3*n//2+2)): + if( (mu<0)or (n<0) or(k<0)or (k>3*n//2)): + xd[mu,n,k] = 0 + for mu in range(1,der+1): + for n in range(0,L): + ctx.prec = xwpd[n]+10 + for k in range(0,3*n//2+1): + aux=(2*mu-2)*xd[mu-2,n-2,k-3]+2*(xsigma+n-2)*xd[mu-1,n-2,k-3] + xd[mu,n,k] = aux - xd[mu-1,n-1,k-1] + + # Now we compute the working precisions ywpd[k] + # Se II equation (92) + ywpd={} + d1 = max(6,ctx.mag(40*L*L)) + yd2 = 13+ctx.mag((1+abs(ysigma))*yA)-ctx.mag(yeps4)-1 + yconst = ctx.ln(8/(ctx.pi*ctx.pi*a*a*yB1*yB1)) /2 + for n in range(0,L): + yd3 = ctx.mag(ctx.sqrt(ctx.gamma(n-0.5)))-ctx.floor(n*yconst)+yd2 + ywpd[n]=max(yd3,d1) + + # procedure of II Section 3.17 + ctx.prec = ywpd[1]+10 + ypsigma = 1-(2*ysigma) + yd = {} + yd[0,0,-2]=0; yd[0,0,-1]=0; yd[0,0,0]=1; yd[0,0,1]=0 + yd[0,-1,-2]=0; yd[0,-1,-1]=0; yd[0,-1,0]=1; yd[0,-1,1]=0 + for n in range(1,L): + ctx.prec = ywpd[n]+10 + for k in range(0,3*n//2+1): + m = 3*n-2*k + if(m!=0): + m1 = ctx.one/m + c1= m1/4 + c2=(ypsigma*m1)/2 + c3=-(m+1) + yd[0,n,k]=c3*yd[0,n-1,k-2]+c1*yd[0,n-1,k]+c2*yd[0,n-1,k-1] + else: + yd[0,n,k]=0 + for r in range(0,k): + add=yd[0,n,r]*(ctx.mpf('1.0')*ctx.fac(2*k-2*r)/ctx.fac(k-r)) + yd[0,n,k] -= ((-1)**(k-r))*add + yd[0,n,-2]=0; yd[0,n,-1]=0; yd[0,n,3*n//2+1]=0 + + for mu in range(-2,der+1): + for n in range(-2,L): + for k in range(-3,max(1,3*n//2+2)): + if( (mu<0)or (n<0) or(k<0)or (k>3*n//2)): + yd[mu,n,k] = 0 + for mu in range(1,der+1): + for n in range(0,L): + ctx.prec = ywpd[n]+10 + for k in range(0,3*n//2+1): + aux=(2*mu-2)*yd[mu-2,n-2,k-3]+2*(ysigma+n-2)*yd[mu-1,n-2,k-3] + yd[mu,n,k] = aux - yd[mu-1,n-1,k-1] + + # COMPUTING THE COEFFICIENTS xtcoef[k,l] + # See II Section 3.9 + # + # computing the needed wp + xwptcoef={} + xwpterm={} + ctx.prec = 15 + c1 = ctx.mag(40*(L+2)) + xc2 = ctx.mag(68*(L+2)*xA) + xc4 = ctx.mag(xB1*a*math.sqrt(ctx.pi))-1 + for k in range(0,L): + xc3 = xc2 - k*xc4+ctx.mag(ctx.fac(k+0.5))/2. + xwptcoef[k] = (max(c1,xc3-ctx.mag(xeps4)+1)+1 +20)*1.5 + xwpterm[k] = (max(c1,ctx.mag(L+2)+xc3-ctx.mag(xeps3)+1)+1 +20) + ywptcoef={} + ywpterm={} + ctx.prec = 15 + c1 = ctx.mag(40*(L+2)) + yc2 = ctx.mag(68*(L+2)*yA) + yc4 = ctx.mag(yB1*a*math.sqrt(ctx.pi))-1 + for k in range(0,L): + yc3 = yc2 - k*yc4+ctx.mag(ctx.fac(k+0.5))/2. + ywptcoef[k] = ((max(c1,yc3-ctx.mag(yeps4)+1))+10)*1.5 + ywpterm[k] = (max(c1,ctx.mag(L+2)+yc3-ctx.mag(yeps3)+1)+1)+10 + + # check of power of pi + # computing the fortcoef[mu,k,ell] + xfortcoef={} + for mu in range(0,der+1): + for k in range(0,L): + for ell in range(-2,3*k//2+1): + xfortcoef[mu,k,ell]=0 + for mu in range(0,der+1): + for k in range(0,L): + ctx.prec = xwptcoef[k] + for ell in range(0,3*k//2+1): + xfortcoef[mu,k,ell]=xd[mu,k,ell]*Fp[3*k-2*ell]/pipowers[2*k-ell] + xfortcoef[mu,k,ell]=xfortcoef[mu,k,ell]/((2*ctx.j)**ell) + + def trunc_a(t): + wp = ctx.prec + ctx.prec = wp + 2 + aa = ctx.sqrt(t/(2*ctx.pi)) + ctx.prec = wp + return aa + + # computing the tcoef[k,ell] + xtcoef={} + for mu in range(0,der+1): + for k in range(0,L): + for ell in range(-2,3*k//2+1): + xtcoef[mu,k,ell]=0 + ctx.prec = max(xwptcoef[0],ywptcoef[0])+3 + aa= trunc_a(t) + la = -ctx.ln(aa) + + for chi in range(0,der+1): + for k in range(0,L): + ctx.prec = xwptcoef[k] + for ell in range(0,3*k//2+1): + xtcoef[chi,k,ell] =0 + for mu in range(0, chi+1): + tcoefter=ctx.binomial(chi,mu)*ctx.power(la,mu)*xfortcoef[chi-mu,k,ell] + xtcoef[chi,k,ell] += tcoefter + + # COMPUTING THE COEFFICIENTS ytcoef[k,l] + # See II Section 3.9 + # + # computing the needed wp + # check of power of pi + # computing the fortcoef[mu,k,ell] + yfortcoef={} + for mu in range(0,der+1): + for k in range(0,L): + for ell in range(-2,3*k//2+1): + yfortcoef[mu,k,ell]=0 + for mu in range(0,der+1): + for k in range(0,L): + ctx.prec = ywptcoef[k] + for ell in range(0,3*k//2+1): + yfortcoef[mu,k,ell]=yd[mu,k,ell]*Fp[3*k-2*ell]/pipowers[2*k-ell] + yfortcoef[mu,k,ell]=yfortcoef[mu,k,ell]/((2*ctx.j)**ell) + # computing the tcoef[k,ell] + ytcoef={} + for chi in range(0,der+1): + for k in range(0,L): + for ell in range(-2,3*k//2+1): + ytcoef[chi,k,ell]=0 + for chi in range(0,der+1): + for k in range(0,L): + ctx.prec = ywptcoef[k] + for ell in range(0,3*k//2+1): + ytcoef[chi,k,ell] =0 + for mu in range(0, chi+1): + tcoefter=ctx.binomial(chi,mu)*ctx.power(la,mu)*yfortcoef[chi-mu,k,ell] + ytcoef[chi,k,ell] += tcoefter + + # COMPUTING tv[k,ell] + # See II Section 3.8 + # + # a has a good value + ctx.prec = max(xwptcoef[0], ywptcoef[0])+2 + av = {} + av[0] = 1 + av[1] = av[0]/a + + ctx.prec = max(xwptcoef[0],ywptcoef[0]) + for k in range(2,L): + av[k] = av[k-1] * av[1] + + # Computing the quotients + xtv = {} + for chi in range(0,der+1): + for k in range(0,L): + ctx.prec = xwptcoef[k] + for ell in range(0,3*k//2+1): + xtv[chi,k,ell] = xtcoef[chi,k,ell]* av[k] + # Computing the quotients + ytv = {} + for chi in range(0,der+1): + for k in range(0,L): + ctx.prec = ywptcoef[k] + for ell in range(0,3*k//2+1): + ytv[chi,k,ell] = ytcoef[chi,k,ell]* av[k] + + # COMPUTING THE TERMS xterm[k] + # See II Section 3.6 + xterm = {} + for chi in range(0,der+1): + for n in range(0,L): + ctx.prec = xwpterm[n] + te = 0 + for k in range(0, 3*n//2+1): + te += xtv[chi,n,k] + xterm[chi,n] = te + + # COMPUTING THE TERMS yterm[k] + # See II Section 3.6 + yterm = {} + for chi in range(0,der+1): + for n in range(0,L): + ctx.prec = ywpterm[n] + te = 0 + for k in range(0, 3*n//2+1): + te += ytv[chi,n,k] + yterm[chi,n] = te + + # COMPUTING rssum + # See II Section 3.5 + xrssum={} + ctx.prec=15 + xrsbound = math.sqrt(ctx.pi) * xc /(xb*a) + ctx.prec=15 + xwprssum = ctx.mag(4.4*((L+3)**2)*xrsbound / xeps2) + xwprssum = max(xwprssum, ctx.mag(10*(L+1))) + ctx.prec = xwprssum + for chi in range(0,der+1): + xrssum[chi] = 0 + for k in range(1,L+1): + xrssum[chi] += xterm[chi,L-k] + yrssum={} + ctx.prec=15 + yrsbound = math.sqrt(ctx.pi) * yc /(yb*a) + ctx.prec=15 + ywprssum = ctx.mag(4.4*((L+3)**2)*yrsbound / yeps2) + ywprssum = max(ywprssum, ctx.mag(10*(L+1))) + ctx.prec = ywprssum + for chi in range(0,der+1): + yrssum[chi] = 0 + for k in range(1,L+1): + yrssum[chi] += yterm[chi,L-k] + + # COMPUTING S3 + # See II Section 3.19 + ctx.prec = 15 + A2 = 2**(max(ctx.mag(abs(xrssum[0])), ctx.mag(abs(yrssum[0])))) + eps8 = eps/(3*A2) + T = t *ctx.ln(t/(2*ctx.pi)) + xwps3 = 5 + ctx.mag((1+(2/eps8)*ctx.power(a,-xsigma))*T) + ywps3 = 5 + ctx.mag((1+(2/eps8)*ctx.power(a,-ysigma))*T) + + ctx.prec = max(xwps3, ywps3) + + tpi = t/(2*ctx.pi) + arg = (t/2)*ctx.ln(tpi)-(t/2)-ctx.pi/8 + U = ctx.expj(-arg) + a = trunc_a(t) + xasigma = ctx.power(a, -xsigma) + yasigma = ctx.power(a, -ysigma) + xS3 = ((-1)**(N-1)) * xasigma * U + yS3 = ((-1)**(N-1)) * yasigma * U + + # COMPUTING S1 the zetasum + # See II Section 3.18 + ctx.prec = 15 + xwpsum = 4+ ctx.mag((N+ctx.power(N,1-xsigma))*ctx.ln(N) /eps1) + ywpsum = 4+ ctx.mag((N+ctx.power(N,1-ysigma))*ctx.ln(N) /eps1) + wpsum = max(xwpsum, ywpsum) + + ctx.prec = wpsum +10 + ''' + # This can be improved + xS1={} + yS1={} + for chi in range(0,der+1): + xS1[chi] = 0 + yS1[chi] = 0 + for n in range(1,int(N)+1): + ln = ctx.ln(n) + xexpn = ctx.exp(-ln*(xsigma+ctx.j*t)) + yexpn = ctx.conj(1/(n*xexpn)) + for chi in range(0,der+1): + pown = ctx.power(-ln, chi) + xterm = pown*xexpn + yterm = pown*yexpn + xS1[chi] += xterm + yS1[chi] += yterm + ''' + xS1, yS1 = ctx._zetasum(s, 1, int(N)-1, range(0,der+1), True) + + # END OF COMPUTATION of xrz, yrz + # See II Section 3.1 + ctx.prec = 15 + xabsS1 = abs(xS1[der]) + xabsS2 = abs(xrssum[der] * xS3) + xwpend = max(6, wpinitial+ctx.mag(6*(3*xabsS1+7*xabsS2) ) ) + + ctx.prec = xwpend + xrz={} + for chi in range(0,der+1): + xrz[chi] = xS1[chi]+xrssum[chi]*xS3 + + ctx.prec = 15 + yabsS1 = abs(yS1[der]) + yabsS2 = abs(yrssum[der] * yS3) + ywpend = max(6, wpinitial+ctx.mag(6*(3*yabsS1+7*yabsS2) ) ) + + ctx.prec = ywpend + yrz={} + for chi in range(0,der+1): + yrz[chi] = yS1[chi]+yrssum[chi]*yS3 + yrz[chi] = ctx.conj(yrz[chi]) + ctx.prec = wpinitial + return xrz, yrz + +def Rzeta_set(ctx, s, derivatives=[0]): + r""" + Computes several derivatives of the auxiliary function of Riemann `R(s)`. + + **Definition** + + The function is defined by + + .. math :: + + \begin{equation} + {\mathop{\mathcal R }\nolimits}(s)= + \int_{0\swarrow1}\frac{x^{-s} e^{\pi i x^2}}{e^{\pi i x}- + e^{-\pi i x}}\,dx + \end{equation} + + To this function we apply the Riemann-Siegel expansion. + """ + der = max(derivatives) + # First we take the value of ctx.prec + # During the computation we will change ctx.prec, and finally we will + # restaurate the initial value + wpinitial = ctx.prec + # Take the real and imaginary part of s + t = ctx._im(s) + sigma = ctx._re(s) + # Now compute several parameter that appear on the program + ctx.prec = 15 + a = ctx.sqrt(t/(2*ctx.pi)) # Careful + asigma = ctx.power(a, sigma) # Careful + # We need a simple bound A1 < asigma (see II Section 3.1 and 3.3) + A1 = ctx.power(2, ctx.mag(asigma)-1) + # We compute various epsilon's (see II end of Section 3.1) + eps = ctx.power(2, -wpinitial) + eps1 = eps/6. + eps2 = eps * A1/3. + # COMPUTING SOME COEFFICIENTS THAT DEPENDS + # ON sigma + # constant b and c (see I Theorem 2 formula (26) ) + # coefficients A and B1 (see I Section 6.1 equation (50)) + # here we not need high precision + ctx.prec = 15 + if sigma > 0: + b = 2. + c = math.pow(9,sigma)/4.44288 + # 4.44288 =(math.sqrt(2)*math.pi) + A = math.pow(9,sigma) + B1 = 1 + else: + b = 2.25158 # math.sqrt( (3-2* math.log(2))*math.pi ) + c = math.pow(2,-sigma)/4.44288 + A = math.pow(2,-sigma) + B1 = 1.10789 # = 2*sqrt(1-log(2)) + # COMPUTING L THE NUMBER OF TERMS NEEDED IN THE RIEMANN-SIEGEL + # CORRECTION + # See II Section 3.2 + ctx.prec = 15 + L = 1 + while 3*c*ctx.gamma(L*0.5) * ctx.power(b*a,-L) >= eps2: + L = L+1 + L = max(2,L) + # The number L has to satify some conditions. + # If not RS can not compute Rzeta(s) with the prescribed precision + # (see II, Section 3.2 condition (20) ) and + # (II, Section 3.3 condition (22) ). Also we have added + # an additional technical condition in Section 3.17 Proposition 17 + if ((3*L >= 2*a*a/25.) or (3*L+2+sigma<0) or (abs(sigma)> a/2.)): + #print 'Error Riemann-Siegel can not compute with such precision' + ctx.prec = wpinitial + raise NotImplementedError("Riemann-Siegel can not compute with such precision") + + # INITIALIZATION (CONTINUATION) + # + # eps3 is the constant defined on (II, Section 3.5 equation (27) ) + # each term of the RS correction must be computed with error <= eps3 + eps3 = eps2/(4*L) + + # eps4 is defined on (II Section 3.6 equation (30) ) + # each component of the formula (II Section 3.6 equation (29) ) + # must be computed with error <= eps4 + eps4 = eps3/(3*L) + + # COMPUTING M. NUMBER OF DERIVATIVES Fp[m] TO COMPUTE + M = aux_M_Fp(ctx, A, eps4, a, B1, L) + Fp = {} + for n in range(M, 3*L-2): + Fp[n] = 0 + + # But I have not seen an instance of M != 3*L-3 + # + # DETERMINATION OF J THE NUMBER OF TERMS NEEDED + # IN THE TAYLOR SERIES OF F. + # See II Section 3.11 equation (49)) + h1 = eps4/(632*A) + h2 = ctx.pi*ctx.pi*B1*a *ctx.sqrt(3)*math.e*math.e + h2 = h1 * ctx.power((h2/M**2),(M-1)/3) / M + h3 = min(h1,h2) + J=12 + jvalue = (2*ctx.pi)**J / ctx.gamma(J+1) + while jvalue > h3: + J = J+1 + jvalue = (2*ctx.pi)*jvalue/J + + # COMPUTING eps5[m] for 1 <= m <= 21 + # See II Section 10 equation (43) + eps5={} + foreps5 = math.pi*math.pi*B1*a + for m in range(0,22): + aux1 = math.pow(foreps5, m/3)/(316.*A) + aux2 = ctx.gamma(m+1)/ctx.gamma(m/3.0+0.5) + aux2 = math.sqrt(aux2) + eps5[m] = aux1*aux2*eps4 + + # COMPUTING wpfp + # See II Section 3.13 equation (59) + twenty = min(3*L-3, 21)+1 + aux = 6812*J + wpfp = ctx.mag(44*J) + for m in range(0, twenty): + wpfp = max(wpfp, ctx.mag(aux*ctx.gamma(m+1)/eps5[m])) + # COMPUTING N AND p + # See II Section + ctx.prec = wpfp + ctx.mag(t) + 20 + a = ctx.sqrt(t/(2*ctx.pi)) + N = ctx.floor(a) + p = 1-2*(a-N) + + # now we get a rounded version of p to the precision wpfp + # this possibly is not necessary + num = ctx.floor(p*(ctx.mpf(2)**wpfp)) + difference = p * (ctx.mpf(2)**wpfp)-num + if difference < 0.5: + num = num + else: + num = num+1 + p = ctx.convert(num * (ctx.mpf(2)**(-wpfp))) + + # COMPUTING THE COEFFICIENTS c[n] = cc[n] + # We shall use the notation cc[n], since there is + # a constant that is called c + # See II Section 3.14 + # We compute the coefficients and also save then in a + # cache. The bulk of the computation is passed to + # the function coef() + # + # eps6 is defined in II Section 3.13 equation (58) + eps6 = ctx.power(2*ctx.pi, J)/(ctx.gamma(J+1)*3*J) + + # Now we compute the coefficients + cc={} + cont={} + cont, pipowers = coef(ctx, J, eps6) + cc = cont.copy() # we need a copy since we have + Fp={} + for n in range(M, 3*L-2): + Fp[n] = 0 + ctx.prec = wpfp + for m in range(0,M+1): + sumP = 0 + for k in range(2*J-m-1,-1,-1): + sumP = (sumP * p) + cc[k] + Fp[m] = sumP + # preparation of the new coefficients + for k in range(0, 2*J-m-1): + cc[k] = (k+1) * cc[k+1] + + # COMPUTING THE NUMBERS d[n,k] + # See II Section 3.17 + + # First we compute the working precisions wpd[k] + # Se II equation (92) + wpd = {} + d1 = max(6, ctx.mag(40*L*L)) + d2 = 13+ctx.mag((1+abs(sigma))*A)-ctx.mag(eps4)-1 + const = ctx.ln(8/(ctx.pi*ctx.pi*a*a*B1*B1)) /2 + for n in range(0,L): + d3 = ctx.mag(ctx.sqrt(ctx.gamma(n-0.5)))-ctx.floor(n*const)+d2 + wpd[n] = max(d3,d1) + + # procedure of II Section 3.17 + ctx.prec = wpd[1]+10 + psigma = 1-(2*sigma) + d = {} + d[0,0,-2]=0; d[0,0,-1]=0; d[0,0,0]=1; d[0,0,1]=0 + d[0,-1,-2]=0; d[0,-1,-1]=0; d[0,-1,0]=1; d[0,-1,1]=0 + for n in range(1,L): + ctx.prec = wpd[n]+10 + for k in range(0,3*n//2+1): + m = 3*n-2*k + if (m!=0): + m1 = ctx.one/m + c1 = m1/4 + c2 = (psigma*m1)/2 + c3 = -(m+1) + d[0,n,k] = c3*d[0,n-1,k-2]+c1*d[0,n-1,k]+c2*d[0,n-1,k-1] + else: + d[0,n,k]=0 + for r in range(0,k): + add = d[0,n,r]*(ctx.one*ctx.fac(2*k-2*r)/ctx.fac(k-r)) + d[0,n,k] -= ((-1)**(k-r))*add + d[0,n,-2]=0; d[0,n,-1]=0; d[0,n,3*n//2+1]=0 + + for mu in range(-2,der+1): + for n in range(-2,L): + for k in range(-3,max(1,3*n//2+2)): + if ((mu<0)or (n<0) or(k<0)or (k>3*n//2)): + d[mu,n,k] = 0 + + for mu in range(1,der+1): + for n in range(0,L): + ctx.prec = wpd[n]+10 + for k in range(0,3*n//2+1): + aux=(2*mu-2)*d[mu-2,n-2,k-3]+2*(sigma+n-2)*d[mu-1,n-2,k-3] + d[mu,n,k] = aux - d[mu-1,n-1,k-1] + + # COMPUTING THE COEFFICIENTS t[k,l] + # See II Section 3.9 + # + # computing the needed wp + wptcoef = {} + wpterm = {} + ctx.prec = 15 + c1 = ctx.mag(40*(L+2)) + c2 = ctx.mag(68*(L+2)*A) + c4 = ctx.mag(B1*a*math.sqrt(ctx.pi))-1 + for k in range(0,L): + c3 = c2 - k*c4+ctx.mag(ctx.fac(k+0.5))/2. + wptcoef[k] = max(c1,c3-ctx.mag(eps4)+1)+1 +10 + wpterm[k] = max(c1,ctx.mag(L+2)+c3-ctx.mag(eps3)+1)+1 +10 + + # check of power of pi + + # computing the fortcoef[mu,k,ell] + fortcoef={} + for mu in derivatives: + for k in range(0,L): + for ell in range(-2,3*k//2+1): + fortcoef[mu,k,ell]=0 + + for mu in derivatives: + for k in range(0,L): + ctx.prec = wptcoef[k] + for ell in range(0,3*k//2+1): + fortcoef[mu,k,ell]=d[mu,k,ell]*Fp[3*k-2*ell]/pipowers[2*k-ell] + fortcoef[mu,k,ell]=fortcoef[mu,k,ell]/((2*ctx.j)**ell) + + def trunc_a(t): + wp = ctx.prec + ctx.prec = wp + 2 + aa = ctx.sqrt(t/(2*ctx.pi)) + ctx.prec = wp + return aa + + # computing the tcoef[chi,k,ell] + tcoef={} + for chi in derivatives: + for k in range(0,L): + for ell in range(-2,3*k//2+1): + tcoef[chi,k,ell]=0 + ctx.prec = wptcoef[0]+3 + aa = trunc_a(t) + la = -ctx.ln(aa) + + for chi in derivatives: + for k in range(0,L): + ctx.prec = wptcoef[k] + for ell in range(0,3*k//2+1): + tcoef[chi,k,ell] = 0 + for mu in range(0, chi+1): + tcoefter = ctx.binomial(chi,mu) * la**mu * \ + fortcoef[chi-mu,k,ell] + tcoef[chi,k,ell] += tcoefter + + # COMPUTING tv[k,ell] + # See II Section 3.8 + + # Computing the powers av[k] = a**(-k) + ctx.prec = wptcoef[0] + 2 + + # a has a good value of a. + # See II Section 3.6 + av = {} + av[0] = 1 + av[1] = av[0]/a + + ctx.prec = wptcoef[0] + for k in range(2,L): + av[k] = av[k-1] * av[1] + + # Computing the quotients + tv = {} + for chi in derivatives: + for k in range(0,L): + ctx.prec = wptcoef[k] + for ell in range(0,3*k//2+1): + tv[chi,k,ell] = tcoef[chi,k,ell]* av[k] + + # COMPUTING THE TERMS term[k] + # See II Section 3.6 + term = {} + for chi in derivatives: + for n in range(0,L): + ctx.prec = wpterm[n] + te = 0 + for k in range(0, 3*n//2+1): + te += tv[chi,n,k] + term[chi,n] = te + + # COMPUTING rssum + # See II Section 3.5 + rssum={} + ctx.prec=15 + rsbound = math.sqrt(ctx.pi) * c /(b*a) + ctx.prec=15 + wprssum = ctx.mag(4.4*((L+3)**2)*rsbound / eps2) + wprssum = max(wprssum, ctx.mag(10*(L+1))) + ctx.prec = wprssum + for chi in derivatives: + rssum[chi] = 0 + for k in range(1,L+1): + rssum[chi] += term[chi,L-k] + + # COMPUTING S3 + # See II Section 3.19 + ctx.prec = 15 + A2 = 2**(ctx.mag(rssum[0])) + eps8 = eps/(3* A2) + T = t * ctx.ln(t/(2*ctx.pi)) + wps3 = 5 + ctx.mag((1+(2/eps8)*ctx.power(a,-sigma))*T) + + ctx.prec = wps3 + tpi = t/(2*ctx.pi) + arg = (t/2)*ctx.ln(tpi)-(t/2)-ctx.pi/8 + U = ctx.expj(-arg) + a = trunc_a(t) + asigma = ctx.power(a, -sigma) + S3 = ((-1)**(N-1)) * asigma * U + + # COMPUTING S1 the zetasum + # See II Section 3.18 + ctx.prec = 15 + wpsum = 4 + ctx.mag((N+ctx.power(N,1-sigma))*ctx.ln(N)/eps1) + + ctx.prec = wpsum + 10 + ''' + # This can be improved + S1 = {} + for chi in derivatives: + S1[chi] = 0 + for n in range(1,int(N)+1): + ln = ctx.ln(n) + expn = ctx.exp(-ln*(sigma+ctx.j*t)) + for chi in derivatives: + term = ctx.power(-ln, chi)*expn + S1[chi] += term + ''' + S1 = ctx._zetasum(s, 1, int(N)-1, derivatives)[0] + + # END OF COMPUTATION + # See II Section 3.1 + ctx.prec = 15 + absS1 = abs(S1[der]) + absS2 = abs(rssum[der] * S3) + wpend = max(6, wpinitial + ctx.mag(6*(3*absS1+7*absS2))) + ctx.prec = wpend + rz = {} + for chi in derivatives: + rz[chi] = S1[chi]+rssum[chi]*S3 + ctx.prec = wpinitial + return rz + + +def z_half(ctx,t,der=0): + r""" + z_half(t,der=0) Computes Z^(der)(t) + """ + s=ctx.mpf('0.5')+ctx.j*t + wpinitial = ctx.prec + ctx.prec = 15 + tt = t/(2*ctx.pi) + wptheta = wpinitial +1 + ctx.mag(3*(tt**1.5)*ctx.ln(tt)) + wpz = wpinitial + 1 + ctx.mag(12*tt*ctx.ln(tt)) + ctx.prec = wptheta + theta = ctx.siegeltheta(t) + ctx.prec = wpz + rz = Rzeta_set(ctx,s, range(der+1)) + if der > 0: ps1 = ctx._re(ctx.psi(0,s/2)/2 - ctx.ln(ctx.pi)/2) + if der > 1: ps2 = ctx._re(ctx.j*ctx.psi(1,s/2)/4) + if der > 2: ps3 = ctx._re(-ctx.psi(2,s/2)/8) + if der > 3: ps4 = ctx._re(-ctx.j*ctx.psi(3,s/2)/16) + exptheta = ctx.expj(theta) + if der == 0: + z = 2*exptheta*rz[0] + if der == 1: + zf = 2j*exptheta + z = zf*(ps1*rz[0]+rz[1]) + if der == 2: + zf = 2 * exptheta + z = -zf*(2*rz[1]*ps1+rz[0]*ps1**2+rz[2]-ctx.j*rz[0]*ps2) + if der == 3: + zf = -2j*exptheta + z = 3*rz[1]*ps1**2+rz[0]*ps1**3+3*ps1*rz[2] + z = zf*(z-3j*rz[1]*ps2-3j*rz[0]*ps1*ps2+rz[3]-rz[0]*ps3) + if der == 4: + zf = 2*exptheta + z = 4*rz[1]*ps1**3+rz[0]*ps1**4+6*ps1**2*rz[2] + z = z-12j*rz[1]*ps1*ps2-6j*rz[0]*ps1**2*ps2-6j*rz[2]*ps2-3*rz[0]*ps2*ps2 + z = z + 4*ps1*rz[3]-4*rz[1]*ps3-4*rz[0]*ps1*ps3+rz[4]+ctx.j*rz[0]*ps4 + z = zf*z + ctx.prec = wpinitial + return ctx._re(z) + +def zeta_half(ctx, s, k=0): + """ + zeta_half(s,k=0) Computes zeta^(k)(s) when Re s = 0.5 + """ + wpinitial = ctx.prec + sigma = ctx._re(s) + t = ctx._im(s) + #--- compute wptheta, wpR, wpbasic --- + ctx.prec = 53 + # X see II Section 3.21 (109) and (110) + if sigma > 0: + X = ctx.sqrt(abs(s)) + else: + X = (2*ctx.pi)**(sigma-1) * abs(1-s)**(0.5-sigma) + # M1 see II Section 3.21 (111) and (112) + if sigma > 0: + M1 = 2*ctx.sqrt(t/(2*ctx.pi)) + else: + M1 = 4 * t * X + # T see II Section 3.21 (113) + abst = abs(0.5-s) + T = 2* abst*math.log(abst) + # computing wpbasic, wptheta, wpR see II Section 3.21 + wpbasic = max(6,3+ctx.mag(t)) + wpbasic2 = 2+ctx.mag(2.12*M1+21.2*M1*X+1.3*M1*X*T)+wpinitial+1 + wpbasic = max(wpbasic, wpbasic2) + wptheta = max(4, 3+ctx.mag(2.7*M1*X)+wpinitial+1) + wpR = 3+ctx.mag(1.1+2*X)+wpinitial+1 + ctx.prec = wptheta + theta = ctx.siegeltheta(t-ctx.j*(sigma-ctx.mpf('0.5'))) + if k > 0: ps1 = (ctx._re(ctx.psi(0,s/2)))/2 - ctx.ln(ctx.pi)/2 + if k > 1: ps2 = -(ctx._im(ctx.psi(1,s/2)))/4 + if k > 2: ps3 = -(ctx._re(ctx.psi(2,s/2)))/8 + if k > 3: ps4 = (ctx._im(ctx.psi(3,s/2)))/16 + ctx.prec = wpR + xrz = Rzeta_set(ctx,s,range(k+1)) + yrz={} + for chi in range(0,k+1): + yrz[chi] = ctx.conj(xrz[chi]) + ctx.prec = wpbasic + exptheta = ctx.expj(-2*theta) + if k==0: + zv = xrz[0]+exptheta*yrz[0] + if k==1: + zv1 = -yrz[1] - 2*yrz[0]*ps1 + zv = xrz[1] + exptheta*zv1 + if k==2: + zv1 = 4*yrz[1]*ps1+4*yrz[0]*(ps1**2)+yrz[2]+2j*yrz[0]*ps2 + zv = xrz[2]+exptheta*zv1 + if k==3: + zv1 = -12*yrz[1]*ps1**2-8*yrz[0]*ps1**3-6*yrz[2]*ps1-6j*yrz[1]*ps2 + zv1 = zv1 - 12j*yrz[0]*ps1*ps2-yrz[3]+2*yrz[0]*ps3 + zv = xrz[3]+exptheta*zv1 + if k == 4: + zv1 = 32*yrz[1]*ps1**3 +16*yrz[0]*ps1**4+24*yrz[2]*ps1**2 + zv1 = zv1 +48j*yrz[1]*ps1*ps2+48j*yrz[0]*(ps1**2)*ps2 + zv1 = zv1+12j*yrz[2]*ps2-12*yrz[0]*ps2**2+8*yrz[3]*ps1-8*yrz[1]*ps3 + zv1 = zv1-16*yrz[0]*ps1*ps3+yrz[4]-2j*yrz[0]*ps4 + zv = xrz[4]+exptheta*zv1 + ctx.prec = wpinitial + return zv + +def zeta_offline(ctx, s, k=0): + """ + Computes zeta^(k)(s) off the line + """ + wpinitial = ctx.prec + sigma = ctx._re(s) + t = ctx._im(s) + #--- compute wptheta, wpR, wpbasic --- + ctx.prec = 53 + # X see II Section 3.21 (109) and (110) + if sigma > 0: + X = ctx.power(abs(s), 0.5) + else: + X = ctx.power(2*ctx.pi, sigma-1)*ctx.power(abs(1-s),0.5-sigma) + # M1 see II Section 3.21 (111) and (112) + if (sigma > 0): + M1 = 2*ctx.sqrt(t/(2*ctx.pi)) + else: + M1 = 4 * t * X + # M2 see II Section 3.21 (111) and (112) + if (1-sigma > 0): + M2 = 2*ctx.sqrt(t/(2*ctx.pi)) + else: + M2 = 4*t*ctx.power(2*ctx.pi, -sigma)*ctx.power(abs(s),sigma-0.5) + # T see II Section 3.21 (113) + abst = abs(0.5-s) + T = 2* abst*math.log(abst) + # computing wpbasic, wptheta, wpR see II Section 3.21 + wpbasic = max(6,3+ctx.mag(t)) + wpbasic2 = 2+ctx.mag(2.12*M1+21.2*M2*X+1.3*M2*X*T)+wpinitial+1 + wpbasic = max(wpbasic, wpbasic2) + wptheta = max(4, 3+ctx.mag(2.7*M2*X)+wpinitial+1) + wpR = 3+ctx.mag(1.1+2*X)+wpinitial+1 + ctx.prec = wptheta + theta = ctx.siegeltheta(t-ctx.j*(sigma-ctx.mpf('0.5'))) + s1 = s + s2 = ctx.conj(1-s1) + ctx.prec = wpR + xrz, yrz = Rzeta_simul(ctx, s, k) + if k > 0: ps1 = (ctx.psi(0,s1/2)+ctx.psi(0,(1-s1)/2))/4 - ctx.ln(ctx.pi)/2 + if k > 1: ps2 = ctx.j*(ctx.psi(1,s1/2)-ctx.psi(1,(1-s1)/2))/8 + if k > 2: ps3 = -(ctx.psi(2,s1/2)+ctx.psi(2,(1-s1)/2))/16 + if k > 3: ps4 = -ctx.j*(ctx.psi(3,s1/2)-ctx.psi(3,(1-s1)/2))/32 + ctx.prec = wpbasic + exptheta = ctx.expj(-2*theta) + if k == 0: + zv = xrz[0]+exptheta*yrz[0] + if k == 1: + zv1 = -yrz[1]-2*yrz[0]*ps1 + zv = xrz[1]+exptheta*zv1 + if k == 2: + zv1 = 4*yrz[1]*ps1+4*yrz[0]*(ps1**2) +yrz[2]+2j*yrz[0]*ps2 + zv = xrz[2]+exptheta*zv1 + if k == 3: + zv1 = -12*yrz[1]*ps1**2 -8*yrz[0]*ps1**3-6*yrz[2]*ps1-6j*yrz[1]*ps2 + zv1 = zv1 - 12j*yrz[0]*ps1*ps2-yrz[3]+2*yrz[0]*ps3 + zv = xrz[3]+exptheta*zv1 + if k == 4: + zv1 = 32*yrz[1]*ps1**3 +16*yrz[0]*ps1**4+24*yrz[2]*ps1**2 + zv1 = zv1 +48j*yrz[1]*ps1*ps2+48j*yrz[0]*(ps1**2)*ps2 + zv1 = zv1+12j*yrz[2]*ps2-12*yrz[0]*ps2**2+8*yrz[3]*ps1-8*yrz[1]*ps3 + zv1 = zv1-16*yrz[0]*ps1*ps3+yrz[4]-2j*yrz[0]*ps4 + zv = xrz[4]+exptheta*zv1 + ctx.prec = wpinitial + return zv + +def z_offline(ctx, w, k=0): + r""" + Computes Z(w) and its derivatives off the line + """ + s = ctx.mpf('0.5')+ctx.j*w + s1 = s + s2 = ctx.conj(1-s1) + wpinitial = ctx.prec + ctx.prec = 35 + # X see II Section 3.21 (109) and (110) + # M1 see II Section 3.21 (111) and (112) + if (ctx._re(s1) >= 0): + M1 = 2*ctx.sqrt(ctx._im(s1)/(2 * ctx.pi)) + X = ctx.sqrt(abs(s1)) + else: + X = (2*ctx.pi)**(ctx._re(s1)-1) * abs(1-s1)**(0.5-ctx._re(s1)) + M1 = 4 * ctx._im(s1)*X + # M2 see II Section 3.21 (111) and (112) + if (ctx._re(s2) >= 0): + M2 = 2*ctx.sqrt(ctx._im(s2)/(2 * ctx.pi)) + else: + M2 = 4 * ctx._im(s2)*(2*ctx.pi)**(ctx._re(s2)-1)*abs(1-s2)**(0.5-ctx._re(s2)) + # T see II Section 3.21 Prop. 27 + T = 2*abs(ctx.siegeltheta(w)) + # defining some precisions + # see II Section 3.22 (115), (116), (117) + aux1 = ctx.sqrt(X) + aux2 = aux1*(M1+M2) + aux3 = 3 +wpinitial + wpbasic = max(6, 3+ctx.mag(T), ctx.mag(aux2*(26+2*T))+aux3) + wptheta = max(4,ctx.mag(2.04*aux2)+aux3) + wpR = ctx.mag(4*aux1)+aux3 + # now the computations + ctx.prec = wptheta + theta = ctx.siegeltheta(w) + ctx.prec = wpR + xrz, yrz = Rzeta_simul(ctx,s,k) + pta = 0.25 + 0.5j*w + ptb = 0.25 - 0.5j*w + if k > 0: ps1 = 0.25*(ctx.psi(0,pta)+ctx.psi(0,ptb)) - ctx.ln(ctx.pi)/2 + if k > 1: ps2 = (1j/8)*(ctx.psi(1,pta)-ctx.psi(1,ptb)) + if k > 2: ps3 = (-1./16)*(ctx.psi(2,pta)+ctx.psi(2,ptb)) + if k > 3: ps4 = (-1j/32)*(ctx.psi(3,pta)-ctx.psi(3,ptb)) + ctx.prec = wpbasic + exptheta = ctx.expj(theta) + if k == 0: + zv = exptheta*xrz[0]+yrz[0]/exptheta + j = ctx.j + if k == 1: + zv = j*exptheta*(xrz[1]+xrz[0]*ps1)-j*(yrz[1]+yrz[0]*ps1)/exptheta + if k == 2: + zv = exptheta*(-2*xrz[1]*ps1-xrz[0]*ps1**2-xrz[2]+j*xrz[0]*ps2) + zv =zv + (-2*yrz[1]*ps1-yrz[0]*ps1**2-yrz[2]-j*yrz[0]*ps2)/exptheta + if k == 3: + zv1 = -3*xrz[1]*ps1**2-xrz[0]*ps1**3-3*xrz[2]*ps1+j*3*xrz[1]*ps2 + zv1 = (zv1+ 3j*xrz[0]*ps1*ps2-xrz[3]+xrz[0]*ps3)*j*exptheta + zv2 = 3*yrz[1]*ps1**2+yrz[0]*ps1**3+3*yrz[2]*ps1+j*3*yrz[1]*ps2 + zv2 = j*(zv2 + 3j*yrz[0]*ps1*ps2+ yrz[3]-yrz[0]*ps3)/exptheta + zv = zv1+zv2 + if k == 4: + zv1 = 4*xrz[1]*ps1**3+xrz[0]*ps1**4 + 6*xrz[2]*ps1**2 + zv1 = zv1-12j*xrz[1]*ps1*ps2-6j*xrz[0]*ps1**2*ps2-6j*xrz[2]*ps2 + zv1 = zv1-3*xrz[0]*ps2*ps2+4*xrz[3]*ps1-4*xrz[1]*ps3-4*xrz[0]*ps1*ps3 + zv1 = zv1+xrz[4]+j*xrz[0]*ps4 + zv2 = 4*yrz[1]*ps1**3+yrz[0]*ps1**4 + 6*yrz[2]*ps1**2 + zv2 = zv2+12j*yrz[1]*ps1*ps2+6j*yrz[0]*ps1**2*ps2+6j*yrz[2]*ps2 + zv2 = zv2-3*yrz[0]*ps2*ps2+4*yrz[3]*ps1-4*yrz[1]*ps3-4*yrz[0]*ps1*ps3 + zv2 = zv2+yrz[4]-j*yrz[0]*ps4 + zv = exptheta*zv1+zv2/exptheta + ctx.prec = wpinitial + return zv + +@defun +def rs_zeta(ctx, s, derivative=0, **kwargs): + if derivative > 4: + raise NotImplementedError + s = ctx.convert(s) + re = ctx._re(s); im = ctx._im(s) + if im < 0: + z = ctx.conj(ctx.rs_zeta(ctx.conj(s), derivative)) + return z + critical_line = (re == 0.5) + if critical_line: + return zeta_half(ctx, s, derivative) + else: + return zeta_offline(ctx, s, derivative) + +@defun +def rs_z(ctx, w, derivative=0): + w = ctx.convert(w) + re = ctx._re(w); im = ctx._im(w) + if re < 0: + return rs_z(ctx, -w, derivative) + critical_line = (im == 0) + if critical_line : + return z_half(ctx, w, derivative) + else: + return z_offline(ctx, w, derivative) diff --git a/MLPY/Lib/site-packages/mpmath/functions/signals.py b/MLPY/Lib/site-packages/mpmath/functions/signals.py new file mode 100644 index 0000000000000000000000000000000000000000..6fadafb2dbb44fe19a2defa8d807d81d7c8e2789 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/functions/signals.py @@ -0,0 +1,32 @@ +from .functions import defun_wrapped + +@defun_wrapped +def squarew(ctx, t, amplitude=1, period=1): + P = period + A = amplitude + return A*((-1)**ctx.floor(2*t/P)) + +@defun_wrapped +def trianglew(ctx, t, amplitude=1, period=1): + A = amplitude + P = period + + return 2*A*(0.5 - ctx.fabs(1 - 2*ctx.frac(t/P + 0.25))) + +@defun_wrapped +def sawtoothw(ctx, t, amplitude=1, period=1): + A = amplitude + P = period + return A*ctx.frac(t/P) + +@defun_wrapped +def unit_triangle(ctx, t, amplitude=1): + A = amplitude + if t <= -1 or t >= 1: + return ctx.zero + return A*(-ctx.fabs(t) + 1) + +@defun_wrapped +def sigmoid(ctx, t, amplitude=1): + A = amplitude + return A / (1 + ctx.exp(-t)) diff --git a/MLPY/Lib/site-packages/mpmath/functions/theta.py b/MLPY/Lib/site-packages/mpmath/functions/theta.py new file mode 100644 index 0000000000000000000000000000000000000000..2b3d8323a163a43186b85417a1b40f3b656c30d0 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/functions/theta.py @@ -0,0 +1,1049 @@ +from .functions import defun, defun_wrapped + +@defun +def _jacobi_theta2(ctx, z, q): + extra1 = 10 + extra2 = 20 + # the loops below break when the fixed precision quantities + # a and b go to zero; + # right shifting small negative numbers by wp one obtains -1, not zero, + # so the condition a**2 + b**2 > MIN is used to break the loops. + MIN = 2 + if z == ctx.zero: + if (not ctx._im(q)): + wp = ctx.prec + extra1 + x = ctx.to_fixed(ctx._re(q), wp) + x2 = (x*x) >> wp + a = b = x2 + s = x2 + while abs(a) > MIN: + b = (b*x2) >> wp + a = (a*b) >> wp + s += a + s = (1 << (wp+1)) + (s << 1) + s = ctx.ldexp(s, -wp) + else: + wp = ctx.prec + extra1 + xre = ctx.to_fixed(ctx._re(q), wp) + xim = ctx.to_fixed(ctx._im(q), wp) + x2re = (xre*xre - xim*xim) >> wp + x2im = (xre*xim) >> (wp-1) + are = bre = x2re + aim = bim = x2im + sre = (1< MIN: + bre, bim = (bre * x2re - bim * x2im) >> wp, \ + (bre * x2im + bim * x2re) >> wp + are, aim = (are * bre - aim * bim) >> wp, \ + (are * bim + aim * bre) >> wp + sre += are + sim += aim + sre = (sre << 1) + sim = (sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + else: + if (not ctx._im(q)) and (not ctx._im(z)): + wp = ctx.prec + extra1 + x = ctx.to_fixed(ctx._re(q), wp) + x2 = (x*x) >> wp + a = b = x2 + c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp) + cn = c1 = ctx.to_fixed(c1, wp) + sn = s1 = ctx.to_fixed(s1, wp) + c2 = (c1*c1 - s1*s1) >> wp + s2 = (c1 * s1) >> (wp - 1) + cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp + s = c1 + ((a * cn) >> wp) + while abs(a) > MIN: + b = (b*x2) >> wp + a = (a*b) >> wp + cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp + s += (a * cn) >> wp + s = (s << 1) + s = ctx.ldexp(s, -wp) + s *= ctx.nthroot(q, 4) + return s + # case z real, q complex + elif not ctx._im(z): + wp = ctx.prec + extra2 + xre = ctx.to_fixed(ctx._re(q), wp) + xim = ctx.to_fixed(ctx._im(q), wp) + x2re = (xre*xre - xim*xim) >> wp + x2im = (xre*xim) >> (wp - 1) + are = bre = x2re + aim = bim = x2im + c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp) + cn = c1 = ctx.to_fixed(c1, wp) + sn = s1 = ctx.to_fixed(s1, wp) + c2 = (c1*c1 - s1*s1) >> wp + s2 = (c1 * s1) >> (wp - 1) + cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp + sre = c1 + ((are * cn) >> wp) + sim = ((aim * cn) >> wp) + while are**2 + aim**2 > MIN: + bre, bim = (bre * x2re - bim * x2im) >> wp, \ + (bre * x2im + bim * x2re) >> wp + are, aim = (are * bre - aim * bim) >> wp, \ + (are * bim + aim * bre) >> wp + cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp + sre += ((are * cn) >> wp) + sim += ((aim * cn) >> wp) + sre = (sre << 1) + sim = (sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + #case z complex, q real + elif not ctx._im(q): + wp = ctx.prec + extra2 + x = ctx.to_fixed(ctx._re(q), wp) + x2 = (x*x) >> wp + a = b = x2 + prec0 = ctx.prec + ctx.prec = wp + c1, s1 = ctx.cos_sin(z) + ctx.prec = prec0 + cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) + cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) + snre = s1re = ctx.to_fixed(ctx._re(s1), wp) + snim = s1im = ctx.to_fixed(ctx._im(s1), wp) + #c2 = (c1*c1 - s1*s1) >> wp + c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp + c2im = (c1re*c1im - s1re*s1im) >> (wp - 1) + #s2 = (c1 * s1) >> (wp - 1) + s2re = (c1re*s1re - c1im*s1im) >> (wp - 1) + s2im = (c1re*s1im + c1im*s1re) >> (wp - 1) + #cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp + t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp + t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp + t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp + t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + sre = c1re + ((a * cnre) >> wp) + sim = c1im + ((a * cnim) >> wp) + while abs(a) > MIN: + b = (b*x2) >> wp + a = (a*b) >> wp + t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp + t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp + t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp + t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + sre += ((a * cnre) >> wp) + sim += ((a * cnim) >> wp) + sre = (sre << 1) + sim = (sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + # case z and q complex + else: + wp = ctx.prec + extra2 + xre = ctx.to_fixed(ctx._re(q), wp) + xim = ctx.to_fixed(ctx._im(q), wp) + x2re = (xre*xre - xim*xim) >> wp + x2im = (xre*xim) >> (wp - 1) + are = bre = x2re + aim = bim = x2im + prec0 = ctx.prec + ctx.prec = wp + # cos(z), sin(z) with z complex + c1, s1 = ctx.cos_sin(z) + ctx.prec = prec0 + cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) + cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) + snre = s1re = ctx.to_fixed(ctx._re(s1), wp) + snim = s1im = ctx.to_fixed(ctx._im(s1), wp) + c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp + c2im = (c1re*c1im - s1re*s1im) >> (wp - 1) + s2re = (c1re*s1re - c1im*s1im) >> (wp - 1) + s2im = (c1re*s1im + c1im*s1re) >> (wp - 1) + t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp + t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp + t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp + t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + n = 1 + termre = c1re + termim = c1im + sre = c1re + ((are * cnre - aim * cnim) >> wp) + sim = c1im + ((are * cnim + aim * cnre) >> wp) + n = 3 + termre = ((are * cnre - aim * cnim) >> wp) + termim = ((are * cnim + aim * cnre) >> wp) + sre = c1re + ((are * cnre - aim * cnim) >> wp) + sim = c1im + ((are * cnim + aim * cnre) >> wp) + n = 5 + while are**2 + aim**2 > MIN: + bre, bim = (bre * x2re - bim * x2im) >> wp, \ + (bre * x2im + bim * x2re) >> wp + are, aim = (are * bre - aim * bim) >> wp, \ + (are * bim + aim * bre) >> wp + #cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp + t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp + t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp + t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp + t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + termre = ((are * cnre - aim * cnim) >> wp) + termim = ((aim * cnre + are * cnim) >> wp) + sre += ((are * cnre - aim * cnim) >> wp) + sim += ((aim * cnre + are * cnim) >> wp) + n += 2 + sre = (sre << 1) + sim = (sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + s *= ctx.nthroot(q, 4) + return s + +@defun +def _djacobi_theta2(ctx, z, q, nd): + MIN = 2 + extra1 = 10 + extra2 = 20 + if (not ctx._im(q)) and (not ctx._im(z)): + wp = ctx.prec + extra1 + x = ctx.to_fixed(ctx._re(q), wp) + x2 = (x*x) >> wp + a = b = x2 + c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp) + cn = c1 = ctx.to_fixed(c1, wp) + sn = s1 = ctx.to_fixed(s1, wp) + c2 = (c1*c1 - s1*s1) >> wp + s2 = (c1 * s1) >> (wp - 1) + cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp + if (nd&1): + s = s1 + ((a * sn * 3**nd) >> wp) + else: + s = c1 + ((a * cn * 3**nd) >> wp) + n = 2 + while abs(a) > MIN: + b = (b*x2) >> wp + a = (a*b) >> wp + cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp + if nd&1: + s += (a * sn * (2*n+1)**nd) >> wp + else: + s += (a * cn * (2*n+1)**nd) >> wp + n += 1 + s = -(s << 1) + s = ctx.ldexp(s, -wp) + # case z real, q complex + elif not ctx._im(z): + wp = ctx.prec + extra2 + xre = ctx.to_fixed(ctx._re(q), wp) + xim = ctx.to_fixed(ctx._im(q), wp) + x2re = (xre*xre - xim*xim) >> wp + x2im = (xre*xim) >> (wp - 1) + are = bre = x2re + aim = bim = x2im + c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp) + cn = c1 = ctx.to_fixed(c1, wp) + sn = s1 = ctx.to_fixed(s1, wp) + c2 = (c1*c1 - s1*s1) >> wp + s2 = (c1 * s1) >> (wp - 1) + cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp + if (nd&1): + sre = s1 + ((are * sn * 3**nd) >> wp) + sim = ((aim * sn * 3**nd) >> wp) + else: + sre = c1 + ((are * cn * 3**nd) >> wp) + sim = ((aim * cn * 3**nd) >> wp) + n = 5 + while are**2 + aim**2 > MIN: + bre, bim = (bre * x2re - bim * x2im) >> wp, \ + (bre * x2im + bim * x2re) >> wp + are, aim = (are * bre - aim * bim) >> wp, \ + (are * bim + aim * bre) >> wp + cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp + + if (nd&1): + sre += ((are * sn * n**nd) >> wp) + sim += ((aim * sn * n**nd) >> wp) + else: + sre += ((are * cn * n**nd) >> wp) + sim += ((aim * cn * n**nd) >> wp) + n += 2 + sre = -(sre << 1) + sim = -(sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + #case z complex, q real + elif not ctx._im(q): + wp = ctx.prec + extra2 + x = ctx.to_fixed(ctx._re(q), wp) + x2 = (x*x) >> wp + a = b = x2 + prec0 = ctx.prec + ctx.prec = wp + c1, s1 = ctx.cos_sin(z) + ctx.prec = prec0 + cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) + cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) + snre = s1re = ctx.to_fixed(ctx._re(s1), wp) + snim = s1im = ctx.to_fixed(ctx._im(s1), wp) + #c2 = (c1*c1 - s1*s1) >> wp + c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp + c2im = (c1re*c1im - s1re*s1im) >> (wp - 1) + #s2 = (c1 * s1) >> (wp - 1) + s2re = (c1re*s1re - c1im*s1im) >> (wp - 1) + s2im = (c1re*s1im + c1im*s1re) >> (wp - 1) + #cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp + t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp + t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp + t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp + t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + if (nd&1): + sre = s1re + ((a * snre * 3**nd) >> wp) + sim = s1im + ((a * snim * 3**nd) >> wp) + else: + sre = c1re + ((a * cnre * 3**nd) >> wp) + sim = c1im + ((a * cnim * 3**nd) >> wp) + n = 5 + while abs(a) > MIN: + b = (b*x2) >> wp + a = (a*b) >> wp + t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp + t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp + t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp + t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + if (nd&1): + sre += ((a * snre * n**nd) >> wp) + sim += ((a * snim * n**nd) >> wp) + else: + sre += ((a * cnre * n**nd) >> wp) + sim += ((a * cnim * n**nd) >> wp) + n += 2 + sre = -(sre << 1) + sim = -(sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + # case z and q complex + else: + wp = ctx.prec + extra2 + xre = ctx.to_fixed(ctx._re(q), wp) + xim = ctx.to_fixed(ctx._im(q), wp) + x2re = (xre*xre - xim*xim) >> wp + x2im = (xre*xim) >> (wp - 1) + are = bre = x2re + aim = bim = x2im + prec0 = ctx.prec + ctx.prec = wp + # cos(2*z), sin(2*z) with z complex + c1, s1 = ctx.cos_sin(z) + ctx.prec = prec0 + cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) + cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) + snre = s1re = ctx.to_fixed(ctx._re(s1), wp) + snim = s1im = ctx.to_fixed(ctx._im(s1), wp) + c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp + c2im = (c1re*c1im - s1re*s1im) >> (wp - 1) + s2re = (c1re*s1re - c1im*s1im) >> (wp - 1) + s2im = (c1re*s1im + c1im*s1re) >> (wp - 1) + t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp + t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp + t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp + t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + if (nd&1): + sre = s1re + (((are * snre - aim * snim) * 3**nd) >> wp) + sim = s1im + (((are * snim + aim * snre)* 3**nd) >> wp) + else: + sre = c1re + (((are * cnre - aim * cnim) * 3**nd) >> wp) + sim = c1im + (((are * cnim + aim * cnre)* 3**nd) >> wp) + n = 5 + while are**2 + aim**2 > MIN: + bre, bim = (bre * x2re - bim * x2im) >> wp, \ + (bre * x2im + bim * x2re) >> wp + are, aim = (are * bre - aim * bim) >> wp, \ + (are * bim + aim * bre) >> wp + #cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp + t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp + t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp + t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp + t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + if (nd&1): + sre += (((are * snre - aim * snim) * n**nd) >> wp) + sim += (((aim * snre + are * snim) * n**nd) >> wp) + else: + sre += (((are * cnre - aim * cnim) * n**nd) >> wp) + sim += (((aim * cnre + are * cnim) * n**nd) >> wp) + n += 2 + sre = -(sre << 1) + sim = -(sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + s *= ctx.nthroot(q, 4) + if (nd&1): + return (-1)**(nd//2) * s + else: + return (-1)**(1 + nd//2) * s + +@defun +def _jacobi_theta3(ctx, z, q): + extra1 = 10 + extra2 = 20 + MIN = 2 + if z == ctx.zero: + if not ctx._im(q): + wp = ctx.prec + extra1 + x = ctx.to_fixed(ctx._re(q), wp) + s = x + a = b = x + x2 = (x*x) >> wp + while abs(a) > MIN: + b = (b*x2) >> wp + a = (a*b) >> wp + s += a + s = (1 << wp) + (s << 1) + s = ctx.ldexp(s, -wp) + return s + else: + wp = ctx.prec + extra1 + xre = ctx.to_fixed(ctx._re(q), wp) + xim = ctx.to_fixed(ctx._im(q), wp) + x2re = (xre*xre - xim*xim) >> wp + x2im = (xre*xim) >> (wp - 1) + sre = are = bre = xre + sim = aim = bim = xim + while are**2 + aim**2 > MIN: + bre, bim = (bre * x2re - bim * x2im) >> wp, \ + (bre * x2im + bim * x2re) >> wp + are, aim = (are * bre - aim * bim) >> wp, \ + (are * bim + aim * bre) >> wp + sre += are + sim += aim + sre = (1 << wp) + (sre << 1) + sim = (sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + return s + else: + if (not ctx._im(q)) and (not ctx._im(z)): + s = 0 + wp = ctx.prec + extra1 + x = ctx.to_fixed(ctx._re(q), wp) + a = b = x + x2 = (x*x) >> wp + c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp) + c1 = ctx.to_fixed(c1, wp) + s1 = ctx.to_fixed(s1, wp) + cn = c1 + sn = s1 + s += (a * cn) >> wp + while abs(a) > MIN: + b = (b*x2) >> wp + a = (a*b) >> wp + cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp + s += (a * cn) >> wp + s = (1 << wp) + (s << 1) + s = ctx.ldexp(s, -wp) + return s + # case z real, q complex + elif not ctx._im(z): + wp = ctx.prec + extra2 + xre = ctx.to_fixed(ctx._re(q), wp) + xim = ctx.to_fixed(ctx._im(q), wp) + x2re = (xre*xre - xim*xim) >> wp + x2im = (xre*xim) >> (wp - 1) + are = bre = xre + aim = bim = xim + c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp) + c1 = ctx.to_fixed(c1, wp) + s1 = ctx.to_fixed(s1, wp) + cn = c1 + sn = s1 + sre = (are * cn) >> wp + sim = (aim * cn) >> wp + while are**2 + aim**2 > MIN: + bre, bim = (bre * x2re - bim * x2im) >> wp, \ + (bre * x2im + bim * x2re) >> wp + are, aim = (are * bre - aim * bim) >> wp, \ + (are * bim + aim * bre) >> wp + cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp + sre += (are * cn) >> wp + sim += (aim * cn) >> wp + sre = (1 << wp) + (sre << 1) + sim = (sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + return s + #case z complex, q real + elif not ctx._im(q): + wp = ctx.prec + extra2 + x = ctx.to_fixed(ctx._re(q), wp) + a = b = x + x2 = (x*x) >> wp + prec0 = ctx.prec + ctx.prec = wp + c1, s1 = ctx.cos_sin(2*z) + ctx.prec = prec0 + cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) + cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) + snre = s1re = ctx.to_fixed(ctx._re(s1), wp) + snim = s1im = ctx.to_fixed(ctx._im(s1), wp) + sre = (a * cnre) >> wp + sim = (a * cnim) >> wp + while abs(a) > MIN: + b = (b*x2) >> wp + a = (a*b) >> wp + t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp + t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp + t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp + t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + sre += (a * cnre) >> wp + sim += (a * cnim) >> wp + sre = (1 << wp) + (sre << 1) + sim = (sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + return s + # case z and q complex + else: + wp = ctx.prec + extra2 + xre = ctx.to_fixed(ctx._re(q), wp) + xim = ctx.to_fixed(ctx._im(q), wp) + x2re = (xre*xre - xim*xim) >> wp + x2im = (xre*xim) >> (wp - 1) + are = bre = xre + aim = bim = xim + prec0 = ctx.prec + ctx.prec = wp + # cos(2*z), sin(2*z) with z complex + c1, s1 = ctx.cos_sin(2*z) + ctx.prec = prec0 + cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) + cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) + snre = s1re = ctx.to_fixed(ctx._re(s1), wp) + snim = s1im = ctx.to_fixed(ctx._im(s1), wp) + sre = (are * cnre - aim * cnim) >> wp + sim = (aim * cnre + are * cnim) >> wp + while are**2 + aim**2 > MIN: + bre, bim = (bre * x2re - bim * x2im) >> wp, \ + (bre * x2im + bim * x2re) >> wp + are, aim = (are * bre - aim * bim) >> wp, \ + (are * bim + aim * bre) >> wp + t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp + t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp + t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp + t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + sre += (are * cnre - aim * cnim) >> wp + sim += (aim * cnre + are * cnim) >> wp + sre = (1 << wp) + (sre << 1) + sim = (sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + return s + +@defun +def _djacobi_theta3(ctx, z, q, nd): + """nd=1,2,3 order of the derivative with respect to z""" + MIN = 2 + extra1 = 10 + extra2 = 20 + if (not ctx._im(q)) and (not ctx._im(z)): + s = 0 + wp = ctx.prec + extra1 + x = ctx.to_fixed(ctx._re(q), wp) + a = b = x + x2 = (x*x) >> wp + c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp) + c1 = ctx.to_fixed(c1, wp) + s1 = ctx.to_fixed(s1, wp) + cn = c1 + sn = s1 + if (nd&1): + s += (a * sn) >> wp + else: + s += (a * cn) >> wp + n = 2 + while abs(a) > MIN: + b = (b*x2) >> wp + a = (a*b) >> wp + cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp + if nd&1: + s += (a * sn * n**nd) >> wp + else: + s += (a * cn * n**nd) >> wp + n += 1 + s = -(s << (nd+1)) + s = ctx.ldexp(s, -wp) + # case z real, q complex + elif not ctx._im(z): + wp = ctx.prec + extra2 + xre = ctx.to_fixed(ctx._re(q), wp) + xim = ctx.to_fixed(ctx._im(q), wp) + x2re = (xre*xre - xim*xim) >> wp + x2im = (xre*xim) >> (wp - 1) + are = bre = xre + aim = bim = xim + c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp) + c1 = ctx.to_fixed(c1, wp) + s1 = ctx.to_fixed(s1, wp) + cn = c1 + sn = s1 + if (nd&1): + sre = (are * sn) >> wp + sim = (aim * sn) >> wp + else: + sre = (are * cn) >> wp + sim = (aim * cn) >> wp + n = 2 + while are**2 + aim**2 > MIN: + bre, bim = (bre * x2re - bim * x2im) >> wp, \ + (bre * x2im + bim * x2re) >> wp + are, aim = (are * bre - aim * bim) >> wp, \ + (are * bim + aim * bre) >> wp + cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp + if nd&1: + sre += (are * sn * n**nd) >> wp + sim += (aim * sn * n**nd) >> wp + else: + sre += (are * cn * n**nd) >> wp + sim += (aim * cn * n**nd) >> wp + n += 1 + sre = -(sre << (nd+1)) + sim = -(sim << (nd+1)) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + #case z complex, q real + elif not ctx._im(q): + wp = ctx.prec + extra2 + x = ctx.to_fixed(ctx._re(q), wp) + a = b = x + x2 = (x*x) >> wp + prec0 = ctx.prec + ctx.prec = wp + c1, s1 = ctx.cos_sin(2*z) + ctx.prec = prec0 + cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) + cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) + snre = s1re = ctx.to_fixed(ctx._re(s1), wp) + snim = s1im = ctx.to_fixed(ctx._im(s1), wp) + if (nd&1): + sre = (a * snre) >> wp + sim = (a * snim) >> wp + else: + sre = (a * cnre) >> wp + sim = (a * cnim) >> wp + n = 2 + while abs(a) > MIN: + b = (b*x2) >> wp + a = (a*b) >> wp + t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp + t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp + t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp + t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + if (nd&1): + sre += (a * snre * n**nd) >> wp + sim += (a * snim * n**nd) >> wp + else: + sre += (a * cnre * n**nd) >> wp + sim += (a * cnim * n**nd) >> wp + n += 1 + sre = -(sre << (nd+1)) + sim = -(sim << (nd+1)) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + # case z and q complex + else: + wp = ctx.prec + extra2 + xre = ctx.to_fixed(ctx._re(q), wp) + xim = ctx.to_fixed(ctx._im(q), wp) + x2re = (xre*xre - xim*xim) >> wp + x2im = (xre*xim) >> (wp - 1) + are = bre = xre + aim = bim = xim + prec0 = ctx.prec + ctx.prec = wp + # cos(2*z), sin(2*z) with z complex + c1, s1 = ctx.cos_sin(2*z) + ctx.prec = prec0 + cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) + cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) + snre = s1re = ctx.to_fixed(ctx._re(s1), wp) + snim = s1im = ctx.to_fixed(ctx._im(s1), wp) + if (nd&1): + sre = (are * snre - aim * snim) >> wp + sim = (aim * snre + are * snim) >> wp + else: + sre = (are * cnre - aim * cnim) >> wp + sim = (aim * cnre + are * cnim) >> wp + n = 2 + while are**2 + aim**2 > MIN: + bre, bim = (bre * x2re - bim * x2im) >> wp, \ + (bre * x2im + bim * x2re) >> wp + are, aim = (are * bre - aim * bim) >> wp, \ + (are * bim + aim * bre) >> wp + t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp + t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp + t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp + t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + if(nd&1): + sre += ((are * snre - aim * snim) * n**nd) >> wp + sim += ((aim * snre + are * snim) * n**nd) >> wp + else: + sre += ((are * cnre - aim * cnim) * n**nd) >> wp + sim += ((aim * cnre + are * cnim) * n**nd) >> wp + n += 1 + sre = -(sre << (nd+1)) + sim = -(sim << (nd+1)) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + if (nd&1): + return (-1)**(nd//2) * s + else: + return (-1)**(1 + nd//2) * s + +@defun +def _jacobi_theta2a(ctx, z, q): + """ + case ctx._im(z) != 0 + theta(2, z, q) = + q**1/4 * Sum(q**(n*n + n) * exp(j*(2*n + 1)*z), n=-inf, inf) + max term for minimum (2*n+1)*log(q).real - 2* ctx._im(z) + n0 = int(ctx._im(z)/log(q).real - 1/2) + theta(2, z, q) = + q**1/4 * Sum(q**(n*n + n) * exp(j*(2*n + 1)*z), n=n0, inf) + + q**1/4 * Sum(q**(n*n + n) * exp(j*(2*n + 1)*z), n, n0-1, -inf) + """ + n = n0 = int(ctx._im(z)/ctx._re(ctx.log(q)) - 1/2) + e2 = ctx.expj(2*z) + e = e0 = ctx.expj((2*n+1)*z) + a = q**(n*n + n) + # leading term + term = a * e + s = term + eps1 = ctx.eps*abs(term) + while 1: + n += 1 + e = e * e2 + term = q**(n*n + n) * e + if abs(term) < eps1: + break + s += term + e = e0 + e2 = ctx.expj(-2*z) + n = n0 + while 1: + n -= 1 + e = e * e2 + term = q**(n*n + n) * e + if abs(term) < eps1: + break + s += term + s = s * ctx.nthroot(q, 4) + return s + +@defun +def _jacobi_theta3a(ctx, z, q): + """ + case ctx._im(z) != 0 + theta3(z, q) = Sum(q**(n*n) * exp(j*2*n*z), n, -inf, inf) + max term for n*abs(log(q).real) + ctx._im(z) ~= 0 + n0 = int(- ctx._im(z)/abs(log(q).real)) + """ + n = n0 = int(-ctx._im(z)/abs(ctx._re(ctx.log(q)))) + e2 = ctx.expj(2*z) + e = e0 = ctx.expj(2*n*z) + s = term = q**(n*n) * e + eps1 = ctx.eps*abs(term) + while 1: + n += 1 + e = e * e2 + term = q**(n*n) * e + if abs(term) < eps1: + break + s += term + e = e0 + e2 = ctx.expj(-2*z) + n = n0 + while 1: + n -= 1 + e = e * e2 + term = q**(n*n) * e + if abs(term) < eps1: + break + s += term + return s + +@defun +def _djacobi_theta2a(ctx, z, q, nd): + """ + case ctx._im(z) != 0 + dtheta(2, z, q, nd) = + j* q**1/4 * Sum(q**(n*n + n) * (2*n+1)*exp(j*(2*n + 1)*z), n=-inf, inf) + max term for (2*n0+1)*log(q).real - 2* ctx._im(z) ~= 0 + n0 = int(ctx._im(z)/log(q).real - 1/2) + """ + n = n0 = int(ctx._im(z)/ctx._re(ctx.log(q)) - 1/2) + e2 = ctx.expj(2*z) + e = e0 = ctx.expj((2*n + 1)*z) + a = q**(n*n + n) + # leading term + term = (2*n+1)**nd * a * e + s = term + eps1 = ctx.eps*abs(term) + while 1: + n += 1 + e = e * e2 + term = (2*n+1)**nd * q**(n*n + n) * e + if abs(term) < eps1: + break + s += term + e = e0 + e2 = ctx.expj(-2*z) + n = n0 + while 1: + n -= 1 + e = e * e2 + term = (2*n+1)**nd * q**(n*n + n) * e + if abs(term) < eps1: + break + s += term + return ctx.j**nd * s * ctx.nthroot(q, 4) + +@defun +def _djacobi_theta3a(ctx, z, q, nd): + """ + case ctx._im(z) != 0 + djtheta3(z, q, nd) = (2*j)**nd * + Sum(q**(n*n) * n**nd * exp(j*2*n*z), n, -inf, inf) + max term for minimum n*abs(log(q).real) + ctx._im(z) + """ + n = n0 = int(-ctx._im(z)/abs(ctx._re(ctx.log(q)))) + e2 = ctx.expj(2*z) + e = e0 = ctx.expj(2*n*z) + a = q**(n*n) * e + s = term = n**nd * a + if n != 0: + eps1 = ctx.eps*abs(term) + else: + eps1 = ctx.eps*abs(a) + while 1: + n += 1 + e = e * e2 + a = q**(n*n) * e + term = n**nd * a + if n != 0: + aterm = abs(term) + else: + aterm = abs(a) + if aterm < eps1: + break + s += term + e = e0 + e2 = ctx.expj(-2*z) + n = n0 + while 1: + n -= 1 + e = e * e2 + a = q**(n*n) * e + term = n**nd * a + if n != 0: + aterm = abs(term) + else: + aterm = abs(a) + if aterm < eps1: + break + s += term + return (2*ctx.j)**nd * s + +@defun +def jtheta(ctx, n, z, q, derivative=0): + if derivative: + return ctx._djtheta(n, z, q, derivative) + + z = ctx.convert(z) + q = ctx.convert(q) + + # Implementation note + # If ctx._im(z) is close to zero, _jacobi_theta2 and _jacobi_theta3 + # are used, + # which compute the series starting from n=0 using fixed precision + # numbers; + # otherwise _jacobi_theta2a and _jacobi_theta3a are used, which compute + # the series starting from n=n0, which is the largest term. + + # TODO: write _jacobi_theta2a and _jacobi_theta3a using fixed-point + + if abs(q) > ctx.THETA_Q_LIM: + raise ValueError('abs(q) > THETA_Q_LIM = %f' % ctx.THETA_Q_LIM) + + extra = 10 + if z: + M = ctx.mag(z) + if M > 5 or (n == 1 and M < -5): + extra += 2*abs(M) + cz = 0.5 + extra2 = 50 + prec0 = ctx.prec + try: + ctx.prec += extra + if n == 1: + if ctx._im(z): + if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): + ctx.dps += extra2 + res = ctx._jacobi_theta2(z - ctx.pi/2, q) + else: + ctx.dps += 10 + res = ctx._jacobi_theta2a(z - ctx.pi/2, q) + else: + res = ctx._jacobi_theta2(z - ctx.pi/2, q) + elif n == 2: + if ctx._im(z): + if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): + ctx.dps += extra2 + res = ctx._jacobi_theta2(z, q) + else: + ctx.dps += 10 + res = ctx._jacobi_theta2a(z, q) + else: + res = ctx._jacobi_theta2(z, q) + elif n == 3: + if ctx._im(z): + if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): + ctx.dps += extra2 + res = ctx._jacobi_theta3(z, q) + else: + ctx.dps += 10 + res = ctx._jacobi_theta3a(z, q) + else: + res = ctx._jacobi_theta3(z, q) + elif n == 4: + if ctx._im(z): + if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): + ctx.dps += extra2 + res = ctx._jacobi_theta3(z, -q) + else: + ctx.dps += 10 + res = ctx._jacobi_theta3a(z, -q) + else: + res = ctx._jacobi_theta3(z, -q) + else: + raise ValueError + finally: + ctx.prec = prec0 + return res + +@defun +def _djtheta(ctx, n, z, q, derivative=1): + z = ctx.convert(z) + q = ctx.convert(q) + nd = int(derivative) + + if abs(q) > ctx.THETA_Q_LIM: + raise ValueError('abs(q) > THETA_Q_LIM = %f' % ctx.THETA_Q_LIM) + extra = 10 + ctx.prec * nd // 10 + if z: + M = ctx.mag(z) + if M > 5 or (n != 1 and M < -5): + extra += 2*abs(M) + cz = 0.5 + extra2 = 50 + prec0 = ctx.prec + try: + ctx.prec += extra + if n == 1: + if ctx._im(z): + if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): + ctx.dps += extra2 + res = ctx._djacobi_theta2(z - ctx.pi/2, q, nd) + else: + ctx.dps += 10 + res = ctx._djacobi_theta2a(z - ctx.pi/2, q, nd) + else: + res = ctx._djacobi_theta2(z - ctx.pi/2, q, nd) + elif n == 2: + if ctx._im(z): + if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): + ctx.dps += extra2 + res = ctx._djacobi_theta2(z, q, nd) + else: + ctx.dps += 10 + res = ctx._djacobi_theta2a(z, q, nd) + else: + res = ctx._djacobi_theta2(z, q, nd) + elif n == 3: + if ctx._im(z): + if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): + ctx.dps += extra2 + res = ctx._djacobi_theta3(z, q, nd) + else: + ctx.dps += 10 + res = ctx._djacobi_theta3a(z, q, nd) + else: + res = ctx._djacobi_theta3(z, q, nd) + elif n == 4: + if ctx._im(z): + if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): + ctx.dps += extra2 + res = ctx._djacobi_theta3(z, -q, nd) + else: + ctx.dps += 10 + res = ctx._djacobi_theta3a(z, -q, nd) + else: + res = ctx._djacobi_theta3(z, -q, nd) + else: + raise ValueError + finally: + ctx.prec = prec0 + return +res diff --git a/MLPY/Lib/site-packages/mpmath/functions/zeta.py b/MLPY/Lib/site-packages/mpmath/functions/zeta.py new file mode 100644 index 0000000000000000000000000000000000000000..d7ede50d95e5b6eff511619620c934529942cbdd --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/functions/zeta.py @@ -0,0 +1,1154 @@ +from __future__ import print_function + +from ..libmp.backend import xrange +from .functions import defun, defun_wrapped, defun_static + +@defun +def stieltjes(ctx, n, a=1): + n = ctx.convert(n) + a = ctx.convert(a) + if n < 0: + return ctx.bad_domain("Stieltjes constants defined for n >= 0") + if hasattr(ctx, "stieltjes_cache"): + stieltjes_cache = ctx.stieltjes_cache + else: + stieltjes_cache = ctx.stieltjes_cache = {} + if a == 1: + if n == 0: + return +ctx.euler + if n in stieltjes_cache: + prec, s = stieltjes_cache[n] + if prec >= ctx.prec: + return +s + mag = 1 + def f(x): + xa = x/a + v = (xa-ctx.j)*ctx.ln(a-ctx.j*x)**n/(1+xa**2)/(ctx.exp(2*ctx.pi*x)-1) + return ctx._re(v) / mag + orig = ctx.prec + try: + # Normalize integrand by approx. magnitude to + # speed up quadrature (which uses absolute error) + if n > 50: + ctx.prec = 20 + mag = ctx.quad(f, [0,ctx.inf], maxdegree=3) + ctx.prec = orig + 10 + int(n**0.5) + s = ctx.quad(f, [0,ctx.inf], maxdegree=20) + v = ctx.ln(a)**n/(2*a) - ctx.ln(a)**(n+1)/(n+1) + 2*s/a*mag + finally: + ctx.prec = orig + if a == 1 and ctx.isint(n): + stieltjes_cache[n] = (ctx.prec, v) + return +v + +@defun_wrapped +def siegeltheta(ctx, t, derivative=0): + d = int(derivative) + if (t == ctx.inf or t == ctx.ninf): + if d < 2: + if t == ctx.ninf and d == 0: + return ctx.ninf + return ctx.inf + else: + return ctx.zero + if d == 0: + if ctx._im(t): + # XXX: cancellation occurs + a = ctx.loggamma(0.25+0.5j*t) + b = ctx.loggamma(0.25-0.5j*t) + return -ctx.ln(ctx.pi)/2*t - 0.5j*(a-b) + else: + if ctx.isinf(t): + return t + return ctx._im(ctx.loggamma(0.25+0.5j*t)) - ctx.ln(ctx.pi)/2*t + if d > 0: + a = (-0.5j)**(d-1)*ctx.polygamma(d-1, 0.25-0.5j*t) + b = (0.5j)**(d-1)*ctx.polygamma(d-1, 0.25+0.5j*t) + if ctx._im(t): + if d == 1: + return -0.5*ctx.log(ctx.pi)+0.25*(a+b) + else: + return 0.25*(a+b) + else: + if d == 1: + return ctx._re(-0.5*ctx.log(ctx.pi)+0.25*(a+b)) + else: + return ctx._re(0.25*(a+b)) + +@defun_wrapped +def grampoint(ctx, n): + # asymptotic expansion, from + # http://mathworld.wolfram.com/GramPoint.html + g = 2*ctx.pi*ctx.exp(1+ctx.lambertw((8*n+1)/(8*ctx.e))) + return ctx.findroot(lambda t: ctx.siegeltheta(t)-ctx.pi*n, g) + + +@defun_wrapped +def siegelz(ctx, t, **kwargs): + d = int(kwargs.get("derivative", 0)) + t = ctx.convert(t) + t1 = ctx._re(t) + t2 = ctx._im(t) + prec = ctx.prec + try: + if abs(t1) > 500*prec and t2**2 < t1: + v = ctx.rs_z(t, d) + if ctx._is_real_type(t): + return ctx._re(v) + return v + except NotImplementedError: + pass + ctx.prec += 21 + e1 = ctx.expj(ctx.siegeltheta(t)) + z = ctx.zeta(0.5+ctx.j*t) + if d == 0: + v = e1*z + ctx.prec=prec + if ctx._is_real_type(t): + return ctx._re(v) + return +v + z1 = ctx.zeta(0.5+ctx.j*t, derivative=1) + theta1 = ctx.siegeltheta(t, derivative=1) + if d == 1: + v = ctx.j*e1*(z1+z*theta1) + ctx.prec=prec + if ctx._is_real_type(t): + return ctx._re(v) + return +v + z2 = ctx.zeta(0.5+ctx.j*t, derivative=2) + theta2 = ctx.siegeltheta(t, derivative=2) + comb1 = theta1**2-ctx.j*theta2 + if d == 2: + def terms(): + return [2*z1*theta1, z2, z*comb1] + v = ctx.sum_accurately(terms, 1) + v = -e1*v + ctx.prec = prec + if ctx._is_real_type(t): + return ctx._re(v) + return +v + ctx.prec += 10 + z3 = ctx.zeta(0.5+ctx.j*t, derivative=3) + theta3 = ctx.siegeltheta(t, derivative=3) + comb2 = theta1**3-3*ctx.j*theta1*theta2-theta3 + if d == 3: + def terms(): + return [3*theta1*z2, 3*z1*comb1, z3+z*comb2] + v = ctx.sum_accurately(terms, 1) + v = -ctx.j*e1*v + ctx.prec = prec + if ctx._is_real_type(t): + return ctx._re(v) + return +v + z4 = ctx.zeta(0.5+ctx.j*t, derivative=4) + theta4 = ctx.siegeltheta(t, derivative=4) + def terms(): + return [theta1**4, -6*ctx.j*theta1**2*theta2, -3*theta2**2, + -4*theta1*theta3, ctx.j*theta4] + comb3 = ctx.sum_accurately(terms, 1) + if d == 4: + def terms(): + return [6*theta1**2*z2, -6*ctx.j*z2*theta2, 4*theta1*z3, + 4*z1*comb2, z4, z*comb3] + v = ctx.sum_accurately(terms, 1) + v = e1*v + ctx.prec = prec + if ctx._is_real_type(t): + return ctx._re(v) + return +v + if d > 4: + h = lambda x: ctx.siegelz(x, derivative=4) + return ctx.diff(h, t, n=d-4) + + +_zeta_zeros = [ +14.134725142,21.022039639,25.010857580,30.424876126,32.935061588, +37.586178159,40.918719012,43.327073281,48.005150881,49.773832478, +52.970321478,56.446247697,59.347044003,60.831778525,65.112544048, +67.079810529,69.546401711,72.067157674,75.704690699,77.144840069, +79.337375020,82.910380854,84.735492981,87.425274613,88.809111208, +92.491899271,94.651344041,95.870634228,98.831194218,101.317851006, +103.725538040,105.446623052,107.168611184,111.029535543,111.874659177, +114.320220915,116.226680321,118.790782866,121.370125002,122.946829294, +124.256818554,127.516683880,129.578704200,131.087688531,133.497737203, +134.756509753,138.116042055,139.736208952,141.123707404,143.111845808, +146.000982487,147.422765343,150.053520421,150.925257612,153.024693811, +156.112909294,157.597591818,158.849988171,161.188964138,163.030709687, +165.537069188,167.184439978,169.094515416,169.911976479,173.411536520, +174.754191523,176.441434298,178.377407776,179.916484020,182.207078484, +184.874467848,185.598783678,187.228922584,189.416158656,192.026656361, +193.079726604,195.265396680,196.876481841,198.015309676,201.264751944, +202.493594514,204.189671803,205.394697202,207.906258888,209.576509717, +211.690862595,213.347919360,214.547044783,216.169538508,219.067596349, +220.714918839,221.430705555,224.007000255,224.983324670,227.421444280, +229.337413306,231.250188700,231.987235253,233.693404179,236.524229666, +] + +def _load_zeta_zeros(url): + import urllib + d = urllib.urlopen(url) + L = [float(x) for x in d.readlines()] + # Sanity check + assert round(L[0]) == 14 + _zeta_zeros[:] = L + +@defun +def oldzetazero(ctx, n, url='http://www.dtc.umn.edu/~odlyzko/zeta_tables/zeros1'): + n = int(n) + if n < 0: + return ctx.zetazero(-n).conjugate() + if n == 0: + raise ValueError("n must be nonzero") + if n > len(_zeta_zeros) and n <= 100000: + _load_zeta_zeros(url) + if n > len(_zeta_zeros): + raise NotImplementedError("n too large for zetazeros") + return ctx.mpc(0.5, ctx.findroot(ctx.siegelz, _zeta_zeros[n-1])) + +@defun_wrapped +def riemannr(ctx, x): + if x == 0: + return ctx.zero + # Check if a simple asymptotic estimate is accurate enough + if abs(x) > 1000: + a = ctx.li(x) + b = 0.5*ctx.li(ctx.sqrt(x)) + if abs(b) < abs(a)*ctx.eps: + return a + if abs(x) < 0.01: + # XXX + ctx.prec += int(-ctx.log(abs(x),2)) + # Sum Gram's series + s = t = ctx.one + u = ctx.ln(x) + k = 1 + while abs(t) > abs(s)*ctx.eps: + t = t * u / k + s += t / (k * ctx._zeta_int(k+1)) + k += 1 + return s + +@defun_static +def primepi(ctx, x): + x = int(x) + if x < 2: + return 0 + return len(ctx.list_primes(x)) + +# TODO: fix the interface wrt contexts +@defun_wrapped +def primepi2(ctx, x): + x = int(x) + if x < 2: + return ctx._iv.zero + if x < 2657: + return ctx._iv.mpf(ctx.primepi(x)) + mid = ctx.li(x) + # Schoenfeld's estimate for x >= 2657, assuming RH + err = ctx.sqrt(x,rounding='u')*ctx.ln(x,rounding='u')/8/ctx.pi(rounding='d') + a = ctx.floor((ctx._iv.mpf(mid)-err).a, rounding='d') + b = ctx.ceil((ctx._iv.mpf(mid)+err).b, rounding='u') + return ctx._iv.mpf([a,b]) + +@defun_wrapped +def primezeta(ctx, s): + if ctx.isnan(s): + return s + if ctx.re(s) <= 0: + raise ValueError("prime zeta function defined only for re(s) > 0") + if s == 1: + return ctx.inf + if s == 0.5: + return ctx.mpc(ctx.ninf, ctx.pi) + r = ctx.re(s) + if r > ctx.prec: + return 0.5**s + else: + wp = ctx.prec + int(r) + def terms(): + orig = ctx.prec + # zeta ~ 1+eps; need to set precision + # to get logarithm accurately + k = 0 + while 1: + k += 1 + u = ctx.moebius(k) + if not u: + continue + ctx.prec = wp + t = u*ctx.ln(ctx.zeta(k*s))/k + if not t: + return + #print ctx.prec, ctx.nstr(t) + ctx.prec = orig + yield t + return ctx.sum_accurately(terms) + +# TODO: for bernpoly and eulerpoly, ensure that all exact zeros are covered + +@defun_wrapped +def bernpoly(ctx, n, z): + # Slow implementation: + #return sum(ctx.binomial(n,k)*ctx.bernoulli(k)*z**(n-k) for k in xrange(0,n+1)) + n = int(n) + if n < 0: + raise ValueError("Bernoulli polynomials only defined for n >= 0") + if z == 0 or (z == 1 and n > 1): + return ctx.bernoulli(n) + if z == 0.5: + return (ctx.ldexp(1,1-n)-1)*ctx.bernoulli(n) + if n <= 3: + if n == 0: return z ** 0 + if n == 1: return z - 0.5 + if n == 2: return (6*z*(z-1)+1)/6 + if n == 3: return z*(z*(z-1.5)+0.5) + if ctx.isinf(z): + return z ** n + if ctx.isnan(z): + return z + if abs(z) > 2: + def terms(): + t = ctx.one + yield t + r = ctx.one/z + k = 1 + while k <= n: + t = t*(n+1-k)/k*r + if not (k > 2 and k & 1): + yield t*ctx.bernoulli(k) + k += 1 + return ctx.sum_accurately(terms) * z**n + else: + def terms(): + yield ctx.bernoulli(n) + t = ctx.one + k = 1 + while k <= n: + t = t*(n+1-k)/k * z + m = n-k + if not (m > 2 and m & 1): + yield t*ctx.bernoulli(m) + k += 1 + return ctx.sum_accurately(terms) + +@defun_wrapped +def eulerpoly(ctx, n, z): + n = int(n) + if n < 0: + raise ValueError("Euler polynomials only defined for n >= 0") + if n <= 2: + if n == 0: return z ** 0 + if n == 1: return z - 0.5 + if n == 2: return z*(z-1) + if ctx.isinf(z): + return z**n + if ctx.isnan(z): + return z + m = n+1 + if z == 0: + return -2*(ctx.ldexp(1,m)-1)*ctx.bernoulli(m)/m * z**0 + if z == 1: + return 2*(ctx.ldexp(1,m)-1)*ctx.bernoulli(m)/m * z**0 + if z == 0.5: + if n % 2: + return ctx.zero + # Use exact code for Euler numbers + if n < 100 or n*ctx.mag(0.46839865*n) < ctx.prec*0.25: + return ctx.ldexp(ctx._eulernum(n), -n) + # http://functions.wolfram.com/Polynomials/EulerE2/06/01/02/01/0002/ + def terms(): + t = ctx.one + k = 0 + w = ctx.ldexp(1,n+2) + while 1: + v = n-k+1 + if not (v > 2 and v & 1): + yield (2-w)*ctx.bernoulli(v)*t + k += 1 + if k > n: + break + t = t*z*(n-k+2)/k + w *= 0.5 + return ctx.sum_accurately(terms) / m + +@defun +def eulernum(ctx, n, exact=False): + n = int(n) + if exact: + return int(ctx._eulernum(n)) + if n < 100: + return ctx.mpf(ctx._eulernum(n)) + if n % 2: + return ctx.zero + return ctx.ldexp(ctx.eulerpoly(n,0.5), n) + +# TODO: this should be implemented low-level +def polylog_series(ctx, s, z): + tol = +ctx.eps + l = ctx.zero + k = 1 + zk = z + while 1: + term = zk / k**s + l += term + if abs(term) < tol: + break + zk *= z + k += 1 + return l + +def polylog_continuation(ctx, n, z): + if n < 0: + return z*0 + twopij = 2j * ctx.pi + a = -twopij**n/ctx.fac(n) * ctx.bernpoly(n, ctx.ln(z)/twopij) + if ctx._is_real_type(z) and z < 0: + a = ctx._re(a) + if ctx._im(z) < 0 or (ctx._im(z) == 0 and ctx._re(z) >= 1): + a -= twopij*ctx.ln(z)**(n-1)/ctx.fac(n-1) + return a + +def polylog_unitcircle(ctx, n, z): + tol = +ctx.eps + if n > 1: + l = ctx.zero + logz = ctx.ln(z) + logmz = ctx.one + m = 0 + while 1: + if (n-m) != 1: + term = ctx.zeta(n-m) * logmz / ctx.fac(m) + if term and abs(term) < tol: + break + l += term + logmz *= logz + m += 1 + l += ctx.ln(z)**(n-1)/ctx.fac(n-1)*(ctx.harmonic(n-1)-ctx.ln(-ctx.ln(z))) + elif n < 1: # else + l = ctx.fac(-n)*(-ctx.ln(z))**(n-1) + logz = ctx.ln(z) + logkz = ctx.one + k = 0 + while 1: + b = ctx.bernoulli(k-n+1) + if b: + term = b*logkz/(ctx.fac(k)*(k-n+1)) + if abs(term) < tol: + break + l -= term + logkz *= logz + k += 1 + else: + raise ValueError + if ctx._is_real_type(z) and z < 0: + l = ctx._re(l) + return l + +def polylog_general(ctx, s, z): + v = ctx.zero + u = ctx.ln(z) + if not abs(u) < 5: # theoretically |u| < 2*pi + j = ctx.j + v = 1-s + y = ctx.ln(-z)/(2*ctx.pi*j) + return ctx.gamma(v)*(j**v*ctx.zeta(v,0.5+y) + j**-v*ctx.zeta(v,0.5-y))/(2*ctx.pi)**v + t = 1 + k = 0 + while 1: + term = ctx.zeta(s-k) * t + if abs(term) < ctx.eps: + break + v += term + k += 1 + t *= u + t /= k + return ctx.gamma(1-s)*(-u)**(s-1) + v + +@defun_wrapped +def polylog(ctx, s, z): + s = ctx.convert(s) + z = ctx.convert(z) + if z == 1: + return ctx.zeta(s) + if z == -1: + return -ctx.altzeta(s) + if s == 0: + return z/(1-z) + if s == 1: + return -ctx.ln(1-z) + if s == -1: + return z/(1-z)**2 + if abs(z) <= 0.75 or (not ctx.isint(s) and abs(z) < 0.9): + return polylog_series(ctx, s, z) + if abs(z) >= 1.4 and ctx.isint(s): + return (-1)**(s+1)*polylog_series(ctx, s, 1/z) + polylog_continuation(ctx, int(ctx.re(s)), z) + if ctx.isint(s): + return polylog_unitcircle(ctx, int(ctx.re(s)), z) + return polylog_general(ctx, s, z) + +@defun_wrapped +def clsin(ctx, s, z, pi=False): + if ctx.isint(s) and s < 0 and int(s) % 2 == 1: + return z*0 + if pi: + a = ctx.expjpi(z) + else: + a = ctx.expj(z) + if ctx._is_real_type(z) and ctx._is_real_type(s): + return ctx.im(ctx.polylog(s,a)) + b = 1/a + return (-0.5j)*(ctx.polylog(s,a) - ctx.polylog(s,b)) + +@defun_wrapped +def clcos(ctx, s, z, pi=False): + if ctx.isint(s) and s < 0 and int(s) % 2 == 0: + return z*0 + if pi: + a = ctx.expjpi(z) + else: + a = ctx.expj(z) + if ctx._is_real_type(z) and ctx._is_real_type(s): + return ctx.re(ctx.polylog(s,a)) + b = 1/a + return 0.5*(ctx.polylog(s,a) + ctx.polylog(s,b)) + +@defun +def altzeta(ctx, s, **kwargs): + try: + return ctx._altzeta(s, **kwargs) + except NotImplementedError: + return ctx._altzeta_generic(s) + +@defun_wrapped +def _altzeta_generic(ctx, s): + if s == 1: + return ctx.ln2 + 0*s + return -ctx.powm1(2, 1-s) * ctx.zeta(s) + +@defun +def zeta(ctx, s, a=1, derivative=0, method=None, **kwargs): + d = int(derivative) + if a == 1 and not (d or method): + try: + return ctx._zeta(s, **kwargs) + except NotImplementedError: + pass + s = ctx.convert(s) + prec = ctx.prec + method = kwargs.get('method') + verbose = kwargs.get('verbose') + if (not s) and (not derivative): + return ctx.mpf(0.5) - ctx._convert_param(a)[0] + if a == 1 and method != 'euler-maclaurin': + im = abs(ctx._im(s)) + re = abs(ctx._re(s)) + #if (im < prec or method == 'borwein') and not derivative: + # try: + # if verbose: + # print "zeta: Attempting to use the Borwein algorithm" + # return ctx._zeta(s, **kwargs) + # except NotImplementedError: + # if verbose: + # print "zeta: Could not use the Borwein algorithm" + # pass + if abs(im) > 500*prec and 10*re < prec and derivative <= 4 or \ + method == 'riemann-siegel': + try: # py2.4 compatible try block + try: + if verbose: + print("zeta: Attempting to use the Riemann-Siegel algorithm") + return ctx.rs_zeta(s, derivative, **kwargs) + except NotImplementedError: + if verbose: + print("zeta: Could not use the Riemann-Siegel algorithm") + pass + finally: + ctx.prec = prec + if s == 1: + return ctx.inf + abss = abs(s) + if abss == ctx.inf: + if ctx.re(s) == ctx.inf: + if d == 0: + return ctx.one + return ctx.zero + return s*0 + elif ctx.isnan(abss): + return 1/s + if ctx.re(s) > 2*ctx.prec and a == 1 and not derivative: + return ctx.one + ctx.power(2, -s) + return +ctx._hurwitz(s, a, d, **kwargs) + +@defun +def _hurwitz(ctx, s, a=1, d=0, **kwargs): + prec = ctx.prec + verbose = kwargs.get('verbose') + try: + extraprec = 10 + ctx.prec += extraprec + # We strongly want to special-case rational a + a, atype = ctx._convert_param(a) + if ctx.re(s) < 0: + if verbose: + print("zeta: Attempting reflection formula") + try: + return _hurwitz_reflection(ctx, s, a, d, atype) + except NotImplementedError: + pass + if verbose: + print("zeta: Reflection formula failed") + if verbose: + print("zeta: Using the Euler-Maclaurin algorithm") + while 1: + ctx.prec = prec + extraprec + T1, T2 = _hurwitz_em(ctx, s, a, d, prec+10, verbose) + cancellation = ctx.mag(T1) - ctx.mag(T1+T2) + if verbose: + print("Term 1:", T1) + print("Term 2:", T2) + print("Cancellation:", cancellation, "bits") + if cancellation < extraprec: + return T1 + T2 + else: + extraprec = max(2*extraprec, min(cancellation + 5, 100*prec)) + if extraprec > kwargs.get('maxprec', 100*prec): + raise ctx.NoConvergence("zeta: too much cancellation") + finally: + ctx.prec = prec + +def _hurwitz_reflection(ctx, s, a, d, atype): + # TODO: implement for derivatives + if d != 0: + raise NotImplementedError + res = ctx.re(s) + negs = -s + # Integer reflection formula + if ctx.isnpint(s): + n = int(res) + if n <= 0: + return ctx.bernpoly(1-n, a) / (n-1) + if not (atype == 'Q' or atype == 'Z'): + raise NotImplementedError + t = 1-s + # We now require a to be standardized + v = 0 + shift = 0 + b = a + while ctx.re(b) > 1: + b -= 1 + v -= b**negs + shift -= 1 + while ctx.re(b) <= 0: + v += b**negs + b += 1 + shift += 1 + # Rational reflection formula + try: + p, q = a._mpq_ + except: + assert a == int(a) + p = int(a) + q = 1 + p += shift*q + assert 1 <= p <= q + g = ctx.fsum(ctx.cospi(t/2-2*k*b)*ctx._hurwitz(t,(k,q)) \ + for k in range(1,q+1)) + g *= 2*ctx.gamma(t)/(2*ctx.pi*q)**t + v += g + return v + +def _hurwitz_em(ctx, s, a, d, prec, verbose): + # May not be converted at this point + a = ctx.convert(a) + tol = -prec + # Estimate number of terms for Euler-Maclaurin summation; could be improved + M1 = 0 + M2 = prec // 3 + N = M2 + lsum = 0 + # This speeds up the recurrence for derivatives + if ctx.isint(s): + s = int(ctx._re(s)) + s1 = s-1 + while 1: + # Truncated L-series + l = ctx._zetasum(s, M1+a, M2-M1-1, [d])[0][0] + #if d: + # l = ctx.fsum((-ctx.ln(n+a))**d * (n+a)**negs for n in range(M1,M2)) + #else: + # l = ctx.fsum((n+a)**negs for n in range(M1,M2)) + lsum += l + M2a = M2+a + logM2a = ctx.ln(M2a) + logM2ad = logM2a**d + logs = [logM2ad] + logr = 1/logM2a + rM2a = 1/M2a + M2as = M2a**(-s) + if d: + tailsum = ctx.gammainc(d+1, s1*logM2a) / s1**(d+1) + else: + tailsum = 1/((s1)*(M2a)**s1) + tailsum += 0.5 * logM2ad * M2as + U = [1] + r = M2as + fact = 2 + for j in range(1, N+1): + # TODO: the following could perhaps be tidied a bit + j2 = 2*j + if j == 1: + upds = [1] + else: + upds = [j2-2, j2-1] + for m in upds: + D = min(m,d+1) + if m <= d: + logs.append(logs[-1] * logr) + Un = [0]*(D+1) + for i in xrange(D): Un[i] = (1-m-s)*U[i] + for i in xrange(1,D+1): Un[i] += (d-(i-1))*U[i-1] + U = Un + r *= rM2a + t = ctx.fdot(U, logs) * r * ctx.bernoulli(j2)/(-fact) + tailsum += t + if ctx.mag(t) < tol: + return lsum, (-1)**d * tailsum + fact *= (j2+1)*(j2+2) + if verbose: + print("Sum range:", M1, M2, "term magnitude", ctx.mag(t), "tolerance", tol) + M1, M2 = M2, M2*2 + if ctx.re(s) < 0: + N += N//2 + + + +@defun +def _zetasum(ctx, s, a, n, derivatives=[0], reflect=False): + """ + Returns [xd0,xd1,...,xdr], [yd0,yd1,...ydr] where + + xdk = D^k ( 1/a^s + 1/(a+1)^s + ... + 1/(a+n)^s ) + ydk = D^k conj( 1/a^(1-s) + 1/(a+1)^(1-s) + ... + 1/(a+n)^(1-s) ) + + D^k = kth derivative with respect to s, k ranges over the given list of + derivatives (which should consist of either a single element + or a range 0,1,...r). If reflect=False, the ydks are not computed. + """ + #print "zetasum", s, a, n + # don't use the fixed-point code if there are large exponentials + if abs(ctx.re(s)) < 0.5 * ctx.prec: + try: + return ctx._zetasum_fast(s, a, n, derivatives, reflect) + except NotImplementedError: + pass + negs = ctx.fneg(s, exact=True) + have_derivatives = derivatives != [0] + have_one_derivative = len(derivatives) == 1 + if not reflect: + if not have_derivatives: + return [ctx.fsum((a+k)**negs for k in xrange(n+1))], [] + if have_one_derivative: + d = derivatives[0] + x = ctx.fsum(ctx.ln(a+k)**d * (a+k)**negs for k in xrange(n+1)) + return [(-1)**d * x], [] + maxd = max(derivatives) + if not have_one_derivative: + derivatives = range(maxd+1) + xs = [ctx.zero for d in derivatives] + if reflect: + ys = [ctx.zero for d in derivatives] + else: + ys = [] + for k in xrange(n+1): + w = a + k + xterm = w ** negs + if reflect: + yterm = ctx.conj(ctx.one / (w * xterm)) + if have_derivatives: + logw = -ctx.ln(w) + if have_one_derivative: + logw = logw ** maxd + xs[0] += xterm * logw + if reflect: + ys[0] += yterm * logw + else: + t = ctx.one + for d in derivatives: + xs[d] += xterm * t + if reflect: + ys[d] += yterm * t + t *= logw + else: + xs[0] += xterm + if reflect: + ys[0] += yterm + return xs, ys + +@defun +def dirichlet(ctx, s, chi=[1], derivative=0): + s = ctx.convert(s) + q = len(chi) + d = int(derivative) + if d > 2: + raise NotImplementedError("arbitrary order derivatives") + prec = ctx.prec + try: + ctx.prec += 10 + if s == 1: + have_pole = True + for x in chi: + if x and x != 1: + have_pole = False + h = +ctx.eps + ctx.prec *= 2*(d+1) + s += h + if have_pole: + return +ctx.inf + z = ctx.zero + for p in range(1,q+1): + if chi[p%q]: + if d == 1: + z += chi[p%q] * (ctx.zeta(s, (p,q), 1) - \ + ctx.zeta(s, (p,q))*ctx.log(q)) + else: + z += chi[p%q] * ctx.zeta(s, (p,q)) + z /= q**s + finally: + ctx.prec = prec + return +z + + +def secondzeta_main_term(ctx, s, a, **kwargs): + tol = ctx.eps + f = lambda n: ctx.gammainc(0.5*s, a*gamm**2, regularized=True)*gamm**(-s) + totsum = term = ctx.zero + mg = ctx.inf + n = 0 + while mg > tol: + totsum += term + n += 1 + gamm = ctx.im(ctx.zetazero_memoized(n)) + term = f(n) + mg = abs(term) + err = 0 + if kwargs.get("error"): + sg = ctx.re(s) + err = 0.5*ctx.pi**(-1)*max(1,sg)*a**(sg-0.5)*ctx.log(gamm/(2*ctx.pi))*\ + ctx.gammainc(-0.5, a*gamm**2)/abs(ctx.gamma(s/2)) + err = abs(err) + return +totsum, err, n + +def secondzeta_prime_term(ctx, s, a, **kwargs): + tol = ctx.eps + f = lambda n: ctx.gammainc(0.5*(1-s),0.25*ctx.log(n)**2 * a**(-1))*\ + ((0.5*ctx.log(n))**(s-1))*ctx.mangoldt(n)/ctx.sqrt(n)/\ + (2*ctx.gamma(0.5*s)*ctx.sqrt(ctx.pi)) + totsum = term = ctx.zero + mg = ctx.inf + n = 1 + while mg > tol or n < 9: + totsum += term + n += 1 + term = f(n) + if term == 0: + mg = ctx.inf + else: + mg = abs(term) + if kwargs.get("error"): + err = mg + return +totsum, err, n + +def secondzeta_exp_term(ctx, s, a): + if ctx.isint(s) and ctx.re(s) <= 0: + m = int(round(ctx.re(s))) + if not m & 1: + return ctx.mpf('-0.25')**(-m//2) + tol = ctx.eps + f = lambda n: (0.25*a)**n/((n+0.5*s)*ctx.fac(n)) + totsum = ctx.zero + term = f(0) + mg = ctx.inf + n = 0 + while mg > tol: + totsum += term + n += 1 + term = f(n) + mg = abs(term) + v = a**(0.5*s)*totsum/ctx.gamma(0.5*s) + return v + +def secondzeta_singular_term(ctx, s, a, **kwargs): + factor = a**(0.5*(s-1))/(4*ctx.sqrt(ctx.pi)*ctx.gamma(0.5*s)) + extraprec = ctx.mag(factor) + ctx.prec += extraprec + factor = a**(0.5*(s-1))/(4*ctx.sqrt(ctx.pi)*ctx.gamma(0.5*s)) + tol = ctx.eps + f = lambda n: ctx.bernpoly(n,0.75)*(4*ctx.sqrt(a))**n*\ + ctx.gamma(0.5*n)/((s+n-1)*ctx.fac(n)) + totsum = ctx.zero + mg1 = ctx.inf + n = 1 + term = f(n) + mg2 = abs(term) + while mg2 > tol and mg2 <= mg1: + totsum += term + n += 1 + term = f(n) + totsum += term + n +=1 + term = f(n) + mg1 = mg2 + mg2 = abs(term) + totsum += term + pole = -2*(s-1)**(-2)+(ctx.euler+ctx.log(16*ctx.pi**2*a))*(s-1)**(-1) + st = factor*(pole+totsum) + err = 0 + if kwargs.get("error"): + if not ((mg2 > tol) and (mg2 <= mg1)): + if mg2 <= tol: + err = ctx.mpf(10)**int(ctx.log(abs(factor*tol),10)) + if mg2 > mg1: + err = ctx.mpf(10)**int(ctx.log(abs(factor*mg1),10)) + err = max(err, ctx.eps*1.) + ctx.prec -= extraprec + return +st, err + +@defun +def secondzeta(ctx, s, a = 0.015, **kwargs): + r""" + Evaluates the secondary zeta function `Z(s)`, defined for + `\mathrm{Re}(s)>1` by + + .. math :: + + Z(s) = \sum_{n=1}^{\infty} \frac{1}{\tau_n^s} + + where `\frac12+i\tau_n` runs through the zeros of `\zeta(s)` with + imaginary part positive. + + `Z(s)` extends to a meromorphic function on `\mathbb{C}` with a + double pole at `s=1` and simple poles at the points `-2n` for + `n=0`, 1, 2, ... + + **Examples** + + >>> from mpmath import * + >>> mp.pretty = True; mp.dps = 15 + >>> secondzeta(2) + 0.023104993115419 + >>> xi = lambda s: 0.5*s*(s-1)*pi**(-0.5*s)*gamma(0.5*s)*zeta(s) + >>> Xi = lambda t: xi(0.5+t*j) + >>> chop(-0.5*diff(Xi,0,n=2)/Xi(0)) + 0.023104993115419 + + We may ask for an approximate error value:: + + >>> secondzeta(0.5+100j, error=True) + ((-0.216272011276718 - 0.844952708937228j), 2.22044604925031e-16) + + The function has poles at the negative odd integers, + and dyadic rational values at the negative even integers:: + + >>> mp.dps = 30 + >>> secondzeta(-8) + -0.67236328125 + >>> secondzeta(-7) + +inf + + **Implementation notes** + + The function is computed as sum of four terms `Z(s)=A(s)-P(s)+E(s)-S(s)` + respectively main, prime, exponential and singular terms. + The main term `A(s)` is computed from the zeros of zeta. + The prime term depends on the von Mangoldt function. + The singular term is responsible for the poles of the function. + + The four terms depends on a small parameter `a`. We may change the + value of `a`. Theoretically this has no effect on the sum of the four + terms, but in practice may be important. + + A smaller value of the parameter `a` makes `A(s)` depend on + a smaller number of zeros of zeta, but `P(s)` uses more values of + von Mangoldt function. + + We may also add a verbose option to obtain data about the + values of the four terms. + + >>> mp.dps = 10 + >>> secondzeta(0.5 + 40j, error=True, verbose=True) + main term = (-30190318549.138656312556 - 13964804384.624622876523j) + computed using 19 zeros of zeta + prime term = (132717176.89212754625045 + 188980555.17563978290601j) + computed using 9 values of the von Mangoldt function + exponential term = (542447428666.07179812536 + 362434922978.80192435203j) + singular term = (512124392939.98154322355 + 348281138038.65531023921j) + ((0.059471043 + 0.3463514534j), 1.455191523e-11) + + >>> secondzeta(0.5 + 40j, a=0.04, error=True, verbose=True) + main term = (-151962888.19606243907725 - 217930683.90210294051982j) + computed using 9 zeros of zeta + prime term = (2476659342.3038722372461 + 28711581821.921627163136j) + computed using 37 values of the von Mangoldt function + exponential term = (178506047114.7838188264 + 819674143244.45677330576j) + singular term = (175877424884.22441310708 + 790744630738.28669174871j) + ((0.059471043 + 0.3463514534j), 1.455191523e-11) + + Notice the great cancellation between the four terms. Changing `a`, the + four terms are very different numbers but the cancellation gives + the good value of Z(s). + + **References** + + A. Voros, Zeta functions for the Riemann zeros, Ann. Institute Fourier, + 53, (2003) 665--699. + + A. Voros, Zeta functions over Zeros of Zeta Functions, Lecture Notes + of the Unione Matematica Italiana, Springer, 2009. + """ + s = ctx.convert(s) + a = ctx.convert(a) + tol = ctx.eps + if ctx.isint(s) and ctx.re(s) <= 1: + if abs(s-1) < tol*1000: + return ctx.inf + m = int(round(ctx.re(s))) + if m & 1: + return ctx.inf + else: + return ((-1)**(-m//2)*\ + ctx.fraction(8-ctx.eulernum(-m,exact=True),2**(-m+3))) + prec = ctx.prec + try: + t3 = secondzeta_exp_term(ctx, s, a) + extraprec = max(ctx.mag(t3),0) + ctx.prec += extraprec + 3 + t1, r1, gt = secondzeta_main_term(ctx,s,a,error='True', verbose='True') + t2, r2, pt = secondzeta_prime_term(ctx,s,a,error='True', verbose='True') + t4, r4 = secondzeta_singular_term(ctx,s,a,error='True') + t3 = secondzeta_exp_term(ctx, s, a) + err = r1+r2+r4 + t = t1-t2+t3-t4 + if kwargs.get("verbose"): + print('main term =', t1) + print(' computed using', gt, 'zeros of zeta') + print('prime term =', t2) + print(' computed using', pt, 'values of the von Mangoldt function') + print('exponential term =', t3) + print('singular term =', t4) + finally: + ctx.prec = prec + if kwargs.get("error"): + w = max(ctx.mag(abs(t)),0) + err = max(err*2**w, ctx.eps*1.*2**w) + return +t, err + return +t + + +@defun_wrapped +def lerchphi(ctx, z, s, a): + r""" + Gives the Lerch transcendent, defined for `|z| < 1` and + `\Re{a} > 0` by + + .. math :: + + \Phi(z,s,a) = \sum_{k=0}^{\infty} \frac{z^k}{(a+k)^s} + + and generally by the recurrence `\Phi(z,s,a) = z \Phi(z,s,a+1) + a^{-s}` + along with the integral representation valid for `\Re{a} > 0` + + .. math :: + + \Phi(z,s,a) = \frac{1}{2 a^s} + + \int_0^{\infty} \frac{z^t}{(a+t)^s} dt - + 2 \int_0^{\infty} \frac{\sin(t \log z - s + \operatorname{arctan}(t/a)}{(a^2 + t^2)^{s/2} + (e^{2 \pi t}-1)} dt. + + The Lerch transcendent generalizes the Hurwitz zeta function :func:`zeta` + (`z = 1`) and the polylogarithm :func:`polylog` (`a = 1`). + + **Examples** + + Several evaluations in terms of simpler functions:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> lerchphi(-1,2,0.5); 4*catalan + 3.663862376708876060218414 + 3.663862376708876060218414 + >>> diff(lerchphi, (-1,-2,1), (0,1,0)); 7*zeta(3)/(4*pi**2) + 0.2131391994087528954617607 + 0.2131391994087528954617607 + >>> lerchphi(-4,1,1); log(5)/4 + 0.4023594781085250936501898 + 0.4023594781085250936501898 + >>> lerchphi(-3+2j,1,0.5); 2*atanh(sqrt(-3+2j))/sqrt(-3+2j) + (1.142423447120257137774002 + 0.2118232380980201350495795j) + (1.142423447120257137774002 + 0.2118232380980201350495795j) + + Evaluation works for complex arguments and `|z| \ge 1`:: + + >>> lerchphi(1+2j, 3-j, 4+2j) + (0.002025009957009908600539469 + 0.003327897536813558807438089j) + >>> lerchphi(-2,2,-2.5) + -12.28676272353094275265944 + >>> lerchphi(10,10,10) + (-4.462130727102185701817349e-11 - 1.575172198981096218823481e-12j) + >>> lerchphi(10,10,-10.5) + (112658784011940.5605789002 - 498113185.5756221777743631j) + + Some degenerate cases:: + + >>> lerchphi(0,1,2) + 0.5 + >>> lerchphi(0,1,-2) + -0.5 + + Reduction to simpler functions:: + + >>> lerchphi(1, 4.25+1j, 1) + (1.044674457556746668033975 - 0.04674508654012658932271226j) + >>> zeta(4.25+1j) + (1.044674457556746668033975 - 0.04674508654012658932271226j) + >>> lerchphi(1 - 0.5**10, 4.25+1j, 1) + (1.044629338021507546737197 - 0.04667768813963388181708101j) + >>> lerchphi(3, 4, 1) + (1.249503297023366545192592 - 0.2314252413375664776474462j) + >>> polylog(4, 3) / 3 + (1.249503297023366545192592 - 0.2314252413375664776474462j) + >>> lerchphi(3, 4, 1 - 0.5**10) + (1.253978063946663945672674 - 0.2316736622836535468765376j) + + **References** + + 1. [DLMF]_ section 25.14 + + """ + if z == 0: + return a ** (-s) + # Faster, but these cases are useful for testing right now + if z == 1: + return ctx.zeta(s, a) + if a == 1: + return ctx.polylog(s, z) / z + if ctx.re(a) < 1: + if ctx.isnpint(a): + raise ValueError("Lerch transcendent complex infinity") + m = int(ctx.ceil(1-ctx.re(a))) + v = ctx.zero + zpow = ctx.one + for n in xrange(m): + v += zpow / (a+n)**s + zpow *= z + return zpow * ctx.lerchphi(z,s, a+m) + v + g = ctx.ln(z) + v = 1/(2*a**s) + ctx.gammainc(1-s, -a*g) * (-g)**(s-1) / z**a + h = s / 2 + r = 2*ctx.pi + f = lambda t: ctx.sin(s*ctx.atan(t/a)-t*g) / \ + ((a**2+t**2)**h * ctx.expm1(r*t)) + v += 2*ctx.quad(f, [0, ctx.inf]) + if not ctx.im(z) and not ctx.im(s) and not ctx.im(a) and ctx.re(z) < 1: + v = ctx.chop(v) + return v diff --git a/MLPY/Lib/site-packages/mpmath/functions/zetazeros.py b/MLPY/Lib/site-packages/mpmath/functions/zetazeros.py new file mode 100644 index 0000000000000000000000000000000000000000..37c11a29426b0114053ae61664541f7ae7de95d8 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/functions/zetazeros.py @@ -0,0 +1,1018 @@ +""" +The function zetazero(n) computes the n-th nontrivial zero of zeta(s). + +The general strategy is to locate a block of Gram intervals B where we +know exactly the number of zeros contained and which of those zeros +is that which we search. + +If n <= 400 000 000 we know exactly the Rosser exceptions, contained +in a list in this file. Hence for n<=400 000 000 we simply +look at these list of exceptions. If our zero is implicated in one of +these exceptions we have our block B. In other case we simply locate +the good Rosser block containing our zero. + +For n > 400 000 000 we apply the method of Turing, as complemented by +Lehman, Brent and Trudgian to find a suitable B. +""" + +from .functions import defun, defun_wrapped + +def find_rosser_block_zero(ctx, n): + """for n<400 000 000 determines a block were one find our zero""" + for k in range(len(_ROSSER_EXCEPTIONS)//2): + a=_ROSSER_EXCEPTIONS[2*k][0] + b=_ROSSER_EXCEPTIONS[2*k][1] + if ((a<= n-2) and (n-1 <= b)): + t0 = ctx.grampoint(a) + t1 = ctx.grampoint(b) + v0 = ctx._fp.siegelz(t0) + v1 = ctx._fp.siegelz(t1) + my_zero_number = n-a-1 + zero_number_block = b-a + pattern = _ROSSER_EXCEPTIONS[2*k+1] + return (my_zero_number, [a,b], [t0,t1], [v0,v1]) + k = n-2 + t,v,b = compute_triple_tvb(ctx, k) + T = [t] + V = [v] + while b < 0: + k -= 1 + t,v,b = compute_triple_tvb(ctx, k) + T.insert(0,t) + V.insert(0,v) + my_zero_number = n-k-1 + m = n-1 + t,v,b = compute_triple_tvb(ctx, m) + T.append(t) + V.append(v) + while b < 0: + m += 1 + t,v,b = compute_triple_tvb(ctx, m) + T.append(t) + V.append(v) + return (my_zero_number, [k,m], T, V) + +def wpzeros(t): + """Precision needed to compute higher zeros""" + wp = 53 + if t > 3*10**8: + wp = 63 + if t > 10**11: + wp = 70 + if t > 10**14: + wp = 83 + return wp + +def separate_zeros_in_block(ctx, zero_number_block, T, V, limitloop=None, + fp_tolerance=None): + """Separate the zeros contained in the block T, limitloop + determines how long one must search""" + if limitloop is None: + limitloop = ctx.inf + loopnumber = 0 + variations = count_variations(V) + while ((variations < zero_number_block) and (loopnumber 0): + alpha = ctx.sqrt(u/v) + b= (alpha*a+b2)/(alpha+1) + else: + b = (a+b2)/2 + if fp_tolerance < 10: + w = ctx._fp.siegelz(b) + if abs(w)ITERATION_LIMIT)and(loopnumber>2)and(variations+2==zero_number_block): + dtMax=0 + dtSec=0 + kMax = 0 + for k1 in range(1,len(T)): + dt = T[k1]-T[k1-1] + if dt > dtMax: + kMax=k1 + dtSec = dtMax + dtMax = dt + elif (dtdtSec): + dtSec = dt + if dtMax>3*dtSec: + f = lambda x: ctx.rs_z(x,derivative=1) + t0=T[kMax-1] + t1 = T[kMax] + t=ctx.findroot(f, (t0,t1), solver ='illinois',verify=False, verbose=False) + v = ctx.siegelz(t) + if (t0 2*wpz: + index +=1 + precs = [precs[0] // 2 +3+2*index] + precs + ctx.prec = precs[0] + guard + r = ctx.findroot(lambda x:ctx.siegelz(x), (t0,t1), solver ='illinois', verbose=False) + #print "first step at", ctx.dps, "digits" + z=ctx.mpc(0.5,r) + for prec in precs[1:]: + ctx.prec = prec + guard + #print "refining to", ctx.dps, "digits" + znew = z - ctx.zeta(z) / ctx.zeta(z, derivative=1) + #print "difference", ctx.nstr(abs(z-znew)) + z=ctx.mpc(0.5,ctx.im(znew)) + return ctx.im(z) + +def sure_number_block(ctx, n): + """The number of good Rosser blocks needed to apply + Turing method + References: + R. P. Brent, On the Zeros of the Riemann Zeta Function + in the Critical Strip, Math. Comp. 33 (1979) 1361--1372 + T. Trudgian, Improvements to Turing Method, Math. Comp.""" + if n < 9*10**5: + return(2) + g = ctx.grampoint(n-100) + lg = ctx._fp.ln(g) + brent = 0.0061 * lg**2 +0.08*lg + trudgian = 0.0031 * lg**2 +0.11*lg + N = ctx.ceil(min(brent,trudgian)) + N = int(N) + return N + +def compute_triple_tvb(ctx, n): + t = ctx.grampoint(n) + v = ctx._fp.siegelz(t) + if ctx.mag(abs(v))400 000 000""" + sb = sure_number_block(ctx, n) + number_goodblocks = 0 + m2 = n-1 + t, v, b = compute_triple_tvb(ctx, m2) + Tf = [t] + Vf = [v] + while b < 0: + m2 += 1 + t,v,b = compute_triple_tvb(ctx, m2) + Tf.append(t) + Vf.append(v) + goodpoints = [m2] + T = [t] + V = [v] + while number_goodblocks < 2*sb: + m2 += 1 + t, v, b = compute_triple_tvb(ctx, m2) + T.append(t) + V.append(v) + while b < 0: + m2 += 1 + t,v,b = compute_triple_tvb(ctx, m2) + T.append(t) + V.append(v) + goodpoints.append(m2) + zn = len(T)-1 + A, B, separated =\ + separate_zeros_in_block(ctx, zn, T, V, limitloop=ITERATION_LIMIT, + fp_tolerance=fp_tolerance) + Tf.pop() + Tf.extend(A) + Vf.pop() + Vf.extend(B) + if separated: + number_goodblocks += 1 + else: + number_goodblocks = 0 + T = [t] + V = [v] + # Now the same procedure to the left + number_goodblocks = 0 + m2 = n-2 + t, v, b = compute_triple_tvb(ctx, m2) + Tf.insert(0,t) + Vf.insert(0,v) + while b < 0: + m2 -= 1 + t,v,b = compute_triple_tvb(ctx, m2) + Tf.insert(0,t) + Vf.insert(0,v) + goodpoints.insert(0,m2) + T = [t] + V = [v] + while number_goodblocks < 2*sb: + m2 -= 1 + t, v, b = compute_triple_tvb(ctx, m2) + T.insert(0,t) + V.insert(0,v) + while b < 0: + m2 -= 1 + t,v,b = compute_triple_tvb(ctx, m2) + T.insert(0,t) + V.insert(0,v) + goodpoints.insert(0,m2) + zn = len(T)-1 + A, B, separated =\ + separate_zeros_in_block(ctx, zn, T, V, limitloop=ITERATION_LIMIT, fp_tolerance=fp_tolerance) + A.pop() + Tf = A+Tf + B.pop() + Vf = B+Vf + if separated: + number_goodblocks += 1 + else: + number_goodblocks = 0 + T = [t] + V = [v] + r = goodpoints[2*sb] + lg = len(goodpoints) + s = goodpoints[lg-2*sb-1] + tr, vr, br = compute_triple_tvb(ctx, r) + ar = Tf.index(tr) + ts, vs, bs = compute_triple_tvb(ctx, s) + as1 = Tf.index(ts) + T = Tf[ar:as1+1] + V = Vf[ar:as1+1] + zn = s-r + A, B, separated =\ + separate_zeros_in_block(ctx, zn,T,V,limitloop=ITERATION_LIMIT, fp_tolerance=fp_tolerance) + if separated: + return (n-r-1,[r,s],A,B) + q = goodpoints[sb] + lg = len(goodpoints) + t = goodpoints[lg-sb-1] + tq, vq, bq = compute_triple_tvb(ctx, q) + aq = Tf.index(tq) + tt, vt, bt = compute_triple_tvb(ctx, t) + at = Tf.index(tt) + T = Tf[aq:at+1] + V = Vf[aq:at+1] + return (n-q-1,[q,t],T,V) + +def count_variations(V): + count = 0 + vold = V[0] + for n in range(1, len(V)): + vnew = V[n] + if vold*vnew < 0: + count +=1 + vold = vnew + return count + +def pattern_construct(ctx, block, T, V): + pattern = '(' + a = block[0] + b = block[1] + t0,v0,b0 = compute_triple_tvb(ctx, a) + k = 0 + k0 = 0 + for n in range(a+1,b+1): + t1,v1,b1 = compute_triple_tvb(ctx, n) + lgT =len(T) + while (k < lgT) and (T[k] <= t1): + k += 1 + L = V[k0:k] + L.append(v1) + L.insert(0,v0) + count = count_variations(L) + pattern = pattern + ("%s" % count) + if b1 > 0: + pattern = pattern + ')(' + k0 = k + t0,v0,b0 = t1,v1,b1 + pattern = pattern[:-1] + return pattern + +@defun +def zetazero(ctx, n, info=False, round=True): + r""" + Computes the `n`-th nontrivial zero of `\zeta(s)` on the critical line, + i.e. returns an approximation of the `n`-th largest complex number + `s = \frac{1}{2} + ti` for which `\zeta(s) = 0`. Equivalently, the + imaginary part `t` is a zero of the Z-function (:func:`~mpmath.siegelz`). + + **Examples** + + The first few zeros:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> zetazero(1) + (0.5 + 14.13472514173469379045725j) + >>> zetazero(2) + (0.5 + 21.02203963877155499262848j) + >>> zetazero(20) + (0.5 + 77.14484006887480537268266j) + + Verifying that the values are zeros:: + + >>> for n in range(1,5): + ... s = zetazero(n) + ... chop(zeta(s)), chop(siegelz(s.imag)) + ... + (0.0, 0.0) + (0.0, 0.0) + (0.0, 0.0) + (0.0, 0.0) + + Negative indices give the conjugate zeros (`n = 0` is undefined):: + + >>> zetazero(-1) + (0.5 - 14.13472514173469379045725j) + + :func:`~mpmath.zetazero` supports arbitrarily large `n` and arbitrary precision:: + + >>> mp.dps = 15 + >>> zetazero(1234567) + (0.5 + 727690.906948208j) + >>> mp.dps = 50 + >>> zetazero(1234567) + (0.5 + 727690.9069482075392389420041147142092708393819935j) + >>> chop(zeta(_)/_) + 0.0 + + with *info=True*, :func:`~mpmath.zetazero` gives additional information:: + + >>> mp.dps = 15 + >>> zetazero(542964976,info=True) + ((0.5 + 209039046.578535j), [542964969, 542964978], 6, '(013111110)') + + This means that the zero is between Gram points 542964969 and 542964978; + it is the 6-th zero between them. Finally (01311110) is the pattern + of zeros in this interval. The numbers indicate the number of zeros + in each Gram interval (Rosser blocks between parenthesis). In this case + there is only one Rosser block of length nine. + """ + n = int(n) + if n < 0: + return ctx.zetazero(-n).conjugate() + if n == 0: + raise ValueError("n must be nonzero") + wpinitial = ctx.prec + try: + wpz, fp_tolerance = comp_fp_tolerance(ctx, n) + ctx.prec = wpz + if n < 400000000: + my_zero_number, block, T, V =\ + find_rosser_block_zero(ctx, n) + else: + my_zero_number, block, T, V =\ + search_supergood_block(ctx, n, fp_tolerance) + zero_number_block = block[1]-block[0] + T, V, separated = separate_zeros_in_block(ctx, zero_number_block, T, V, + limitloop=ctx.inf, fp_tolerance=fp_tolerance) + if info: + pattern = pattern_construct(ctx,block,T,V) + prec = max(wpinitial, wpz) + t = separate_my_zero(ctx, my_zero_number, zero_number_block,T,V,prec) + v = ctx.mpc(0.5,t) + finally: + ctx.prec = wpinitial + if round: + v =+v + if info: + return (v,block,my_zero_number,pattern) + else: + return v + +def gram_index(ctx, t): + if t > 10**13: + wp = 3*ctx.log(t, 10) + else: + wp = 0 + prec = ctx.prec + try: + ctx.prec += wp + h = int(ctx.siegeltheta(t)/ctx.pi) + finally: + ctx.prec = prec + return(h) + +def count_to(ctx, t, T, V): + count = 0 + vold = V[0] + told = T[0] + tnew = T[1] + k = 1 + while tnew < t: + vnew = V[k] + if vold*vnew < 0: + count += 1 + vold = vnew + k += 1 + tnew = T[k] + a = ctx.siegelz(t) + if a*vold < 0: + count += 1 + return count + +def comp_fp_tolerance(ctx, n): + wpz = wpzeros(n*ctx.log(n)) + if n < 15*10**8: + fp_tolerance = 0.0005 + elif n <= 10**14: + fp_tolerance = 0.1 + else: + fp_tolerance = 100 + return wpz, fp_tolerance + +@defun +def nzeros(ctx, t): + r""" + Computes the number of zeros of the Riemann zeta function in + `(0,1) \times (0,t]`, usually denoted by `N(t)`. + + **Examples** + + The first zero has imaginary part between 14 and 15:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> nzeros(14) + 0 + >>> nzeros(15) + 1 + >>> zetazero(1) + (0.5 + 14.1347251417347j) + + Some closely spaced zeros:: + + >>> nzeros(10**7) + 21136125 + >>> zetazero(21136125) + (0.5 + 9999999.32718175j) + >>> zetazero(21136126) + (0.5 + 10000000.2400236j) + >>> nzeros(545439823.215) + 1500000001 + >>> zetazero(1500000001) + (0.5 + 545439823.201985j) + >>> zetazero(1500000002) + (0.5 + 545439823.325697j) + + This confirms the data given by J. van de Lune, + H. J. J. te Riele and D. T. Winter in 1986. + """ + if t < 14.1347251417347: + return 0 + x = gram_index(ctx, t) + k = int(ctx.floor(x)) + wpinitial = ctx.prec + wpz, fp_tolerance = comp_fp_tolerance(ctx, k) + ctx.prec = wpz + a = ctx.siegelz(t) + if k == -1 and a < 0: + return 0 + elif k == -1 and a > 0: + return 1 + if k+2 < 400000000: + Rblock = find_rosser_block_zero(ctx, k+2) + else: + Rblock = search_supergood_block(ctx, k+2, fp_tolerance) + n1, n2 = Rblock[1] + if n2-n1 == 1: + b = Rblock[3][0] + if a*b > 0: + ctx.prec = wpinitial + return k+1 + else: + ctx.prec = wpinitial + return k+2 + my_zero_number,block, T, V = Rblock + zero_number_block = n2-n1 + T, V, separated = separate_zeros_in_block(ctx,\ + zero_number_block, T, V,\ + limitloop=ctx.inf,\ + fp_tolerance=fp_tolerance) + n = count_to(ctx, t, T, V) + ctx.prec = wpinitial + return n+n1+1 + +@defun_wrapped +def backlunds(ctx, t): + r""" + Computes the function + `S(t) = \operatorname{arg} \zeta(\frac{1}{2} + it) / \pi`. + + See Titchmarsh Section 9.3 for details of the definition. + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> backlunds(217.3) + 0.16302205431184 + + Generally, the value is a small number. At Gram points it is an integer, + frequently equal to 0:: + + >>> chop(backlunds(grampoint(200))) + 0.0 + >>> backlunds(extraprec(10)(grampoint)(211)) + 1.0 + >>> backlunds(extraprec(10)(grampoint)(232)) + -1.0 + + The number of zeros of the Riemann zeta function up to height `t` + satisfies `N(t) = \theta(t)/\pi + 1 + S(t)` (see :func:nzeros` and + :func:`siegeltheta`):: + + >>> t = 1234.55 + >>> nzeros(t) + 842 + >>> siegeltheta(t)/pi+1+backlunds(t) + 842.0 + + """ + return ctx.nzeros(t)-1-ctx.siegeltheta(t)/ctx.pi + + +""" +_ROSSER_EXCEPTIONS is a list of all exceptions to +Rosser's rule for n <= 400 000 000. + +Alternately the entry is of type [n,m], or a string. +The string is the zero pattern of the Block and the relevant +adjacent. For example (010)3 corresponds to a block +composed of three Gram intervals, the first ant third without +a zero and the intermediate with a zero. The next Gram interval +contain three zeros. So that in total we have 4 zeros in 4 Gram +blocks. n and m are the indices of the Gram points of this +interval of four Gram intervals. The Rosser exception is therefore +formed by the three Gram intervals that are signaled between +parenthesis. + +We have included also some Rosser's exceptions beyond n=400 000 000 +that are noted in the literature by some reason. + +The list is composed from the data published in the references: + +R. P. Brent, J. van de Lune, H. J. J. te Riele, D. T. Winter, +'On the Zeros of the Riemann Zeta Function in the Critical Strip. II', +Math. Comp. 39 (1982) 681--688. +See also Corrigenda in Math. Comp. 46 (1986) 771. + +J. van de Lune, H. J. J. te Riele, +'On the Zeros of the Riemann Zeta Function in the Critical Strip. III', +Math. Comp. 41 (1983) 759--767. +See also Corrigenda in Math. Comp. 46 (1986) 771. + +J. van de Lune, +'Sums of Equal Powers of Positive Integers', +Dissertation, +Vrije Universiteit te Amsterdam, Centrum voor Wiskunde en Informatica, +Amsterdam, 1984. + +Thanks to the authors all this papers and those others that have +contributed to make this possible. +""" + + + + + + + +_ROSSER_EXCEPTIONS = \ +[[13999525, 13999528], '(00)3', +[30783329, 30783332], '(00)3', +[30930926, 30930929], '3(00)', +[37592215, 37592218], '(00)3', +[40870156, 40870159], '(00)3', +[43628107, 43628110], '(00)3', +[46082042, 46082045], '(00)3', +[46875667, 46875670], '(00)3', +[49624540, 49624543], '3(00)', +[50799238, 50799241], '(00)3', +[55221453, 55221456], '3(00)', +[56948779, 56948782], '3(00)', +[60515663, 60515666], '(00)3', +[61331766, 61331770], '(00)40', +[69784843, 69784846], '3(00)', +[75052114, 75052117], '(00)3', +[79545240, 79545243], '3(00)', +[79652247, 79652250], '3(00)', +[83088043, 83088046], '(00)3', +[83689522, 83689525], '3(00)', +[85348958, 85348961], '(00)3', +[86513820, 86513823], '(00)3', +[87947596, 87947599], '3(00)', +[88600095, 88600098], '(00)3', +[93681183, 93681186], '(00)3', +[100316551, 100316554], '3(00)', +[100788444, 100788447], '(00)3', +[106236172, 106236175], '(00)3', +[106941327, 106941330], '3(00)', +[107287955, 107287958], '(00)3', +[107532016, 107532019], '3(00)', +[110571044, 110571047], '(00)3', +[111885253, 111885256], '3(00)', +[113239783, 113239786], '(00)3', +[120159903, 120159906], '(00)3', +[121424391, 121424394], '3(00)', +[121692931, 121692934], '3(00)', +[121934170, 121934173], '3(00)', +[122612848, 122612851], '3(00)', +[126116567, 126116570], '(00)3', +[127936513, 127936516], '(00)3', +[128710277, 128710280], '3(00)', +[129398902, 129398905], '3(00)', +[130461096, 130461099], '3(00)', +[131331947, 131331950], '3(00)', +[137334071, 137334074], '3(00)', +[137832603, 137832606], '(00)3', +[138799471, 138799474], '3(00)', +[139027791, 139027794], '(00)3', +[141617806, 141617809], '(00)3', +[144454931, 144454934], '(00)3', +[145402379, 145402382], '3(00)', +[146130245, 146130248], '3(00)', +[147059770, 147059773], '(00)3', +[147896099, 147896102], '3(00)', +[151097113, 151097116], '(00)3', +[152539438, 152539441], '(00)3', +[152863168, 152863171], '3(00)', +[153522726, 153522729], '3(00)', +[155171524, 155171527], '3(00)', +[155366607, 155366610], '(00)3', +[157260686, 157260689], '3(00)', +[157269224, 157269227], '(00)3', +[157755123, 157755126], '(00)3', +[158298484, 158298487], '3(00)', +[160369050, 160369053], '3(00)', +[162962787, 162962790], '(00)3', +[163724709, 163724712], '(00)3', +[164198113, 164198116], '3(00)', +[164689301, 164689305], '(00)40', +[164880228, 164880231], '3(00)', +[166201932, 166201935], '(00)3', +[168573836, 168573839], '(00)3', +[169750763, 169750766], '(00)3', +[170375507, 170375510], '(00)3', +[170704879, 170704882], '3(00)', +[172000992, 172000995], '3(00)', +[173289941, 173289944], '(00)3', +[173737613, 173737616], '3(00)', +[174102513, 174102516], '(00)3', +[174284990, 174284993], '(00)3', +[174500513, 174500516], '(00)3', +[175710609, 175710612], '(00)3', +[176870843, 176870846], '3(00)', +[177332732, 177332735], '3(00)', +[177902861, 177902864], '3(00)', +[179979095, 179979098], '(00)3', +[181233726, 181233729], '3(00)', +[181625435, 181625438], '(00)3', +[182105255, 182105259], '22(00)', +[182223559, 182223562], '3(00)', +[191116404, 191116407], '3(00)', +[191165599, 191165602], '3(00)', +[191297535, 191297539], '(00)22', +[192485616, 192485619], '(00)3', +[193264634, 193264638], '22(00)', +[194696968, 194696971], '(00)3', +[195876805, 195876808], '(00)3', +[195916548, 195916551], '3(00)', +[196395160, 196395163], '3(00)', +[196676303, 196676306], '(00)3', +[197889882, 197889885], '3(00)', +[198014122, 198014125], '(00)3', +[199235289, 199235292], '(00)3', +[201007375, 201007378], '(00)3', +[201030605, 201030608], '3(00)', +[201184290, 201184293], '3(00)', +[201685414, 201685418], '(00)22', +[202762875, 202762878], '3(00)', +[202860957, 202860960], '3(00)', +[203832577, 203832580], '3(00)', +[205880544, 205880547], '(00)3', +[206357111, 206357114], '(00)3', +[207159767, 207159770], '3(00)', +[207167343, 207167346], '3(00)', +[207482539, 207482543], '3(010)', +[207669540, 207669543], '3(00)', +[208053426, 208053429], '(00)3', +[208110027, 208110030], '3(00)', +[209513826, 209513829], '3(00)', +[212623522, 212623525], '(00)3', +[213841715, 213841718], '(00)3', +[214012333, 214012336], '(00)3', +[214073567, 214073570], '(00)3', +[215170600, 215170603], '3(00)', +[215881039, 215881042], '3(00)', +[216274604, 216274607], '3(00)', +[216957120, 216957123], '3(00)', +[217323208, 217323211], '(00)3', +[218799264, 218799267], '(00)3', +[218803557, 218803560], '3(00)', +[219735146, 219735149], '(00)3', +[219830062, 219830065], '3(00)', +[219897904, 219897907], '(00)3', +[221205545, 221205548], '(00)3', +[223601929, 223601932], '(00)3', +[223907076, 223907079], '3(00)', +[223970397, 223970400], '(00)3', +[224874044, 224874048], '22(00)', +[225291157, 225291160], '(00)3', +[227481734, 227481737], '(00)3', +[228006442, 228006445], '3(00)', +[228357900, 228357903], '(00)3', +[228386399, 228386402], '(00)3', +[228907446, 228907449], '(00)3', +[228984552, 228984555], '3(00)', +[229140285, 229140288], '3(00)', +[231810024, 231810027], '(00)3', +[232838062, 232838065], '3(00)', +[234389088, 234389091], '3(00)', +[235588194, 235588197], '(00)3', +[236645695, 236645698], '(00)3', +[236962876, 236962879], '3(00)', +[237516723, 237516727], '04(00)', +[240004911, 240004914], '(00)3', +[240221306, 240221309], '3(00)', +[241389213, 241389217], '(010)3', +[241549003, 241549006], '(00)3', +[241729717, 241729720], '(00)3', +[241743684, 241743687], '3(00)', +[243780200, 243780203], '3(00)', +[243801317, 243801320], '(00)3', +[244122072, 244122075], '(00)3', +[244691224, 244691227], '3(00)', +[244841577, 244841580], '(00)3', +[245813461, 245813464], '(00)3', +[246299475, 246299478], '(00)3', +[246450176, 246450179], '3(00)', +[249069349, 249069352], '(00)3', +[250076378, 250076381], '(00)3', +[252442157, 252442160], '3(00)', +[252904231, 252904234], '3(00)', +[255145220, 255145223], '(00)3', +[255285971, 255285974], '3(00)', +[256713230, 256713233], '(00)3', +[257992082, 257992085], '(00)3', +[258447955, 258447959], '22(00)', +[259298045, 259298048], '3(00)', +[262141503, 262141506], '(00)3', +[263681743, 263681746], '3(00)', +[266527881, 266527885], '(010)3', +[266617122, 266617125], '(00)3', +[266628044, 266628047], '3(00)', +[267305763, 267305766], '(00)3', +[267388404, 267388407], '3(00)', +[267441672, 267441675], '3(00)', +[267464886, 267464889], '(00)3', +[267554907, 267554910], '3(00)', +[269787480, 269787483], '(00)3', +[270881434, 270881437], '(00)3', +[270997583, 270997586], '3(00)', +[272096378, 272096381], '3(00)', +[272583009, 272583012], '(00)3', +[274190881, 274190884], '3(00)', +[274268747, 274268750], '(00)3', +[275297429, 275297432], '3(00)', +[275545476, 275545479], '3(00)', +[275898479, 275898482], '3(00)', +[275953000, 275953003], '(00)3', +[277117197, 277117201], '(00)22', +[277447310, 277447313], '3(00)', +[279059657, 279059660], '3(00)', +[279259144, 279259147], '3(00)', +[279513636, 279513639], '3(00)', +[279849069, 279849072], '3(00)', +[280291419, 280291422], '(00)3', +[281449425, 281449428], '3(00)', +[281507953, 281507956], '3(00)', +[281825600, 281825603], '(00)3', +[282547093, 282547096], '3(00)', +[283120963, 283120966], '3(00)', +[283323493, 283323496], '(00)3', +[284764535, 284764538], '3(00)', +[286172639, 286172642], '3(00)', +[286688824, 286688827], '(00)3', +[287222172, 287222175], '3(00)', +[287235534, 287235537], '3(00)', +[287304861, 287304864], '3(00)', +[287433571, 287433574], '(00)3', +[287823551, 287823554], '(00)3', +[287872422, 287872425], '3(00)', +[288766615, 288766618], '3(00)', +[290122963, 290122966], '3(00)', +[290450849, 290450853], '(00)22', +[291426141, 291426144], '3(00)', +[292810353, 292810356], '3(00)', +[293109861, 293109864], '3(00)', +[293398054, 293398057], '3(00)', +[294134426, 294134429], '3(00)', +[294216438, 294216441], '(00)3', +[295367141, 295367144], '3(00)', +[297834111, 297834114], '3(00)', +[299099969, 299099972], '3(00)', +[300746958, 300746961], '3(00)', +[301097423, 301097426], '(00)3', +[301834209, 301834212], '(00)3', +[302554791, 302554794], '(00)3', +[303497445, 303497448], '3(00)', +[304165344, 304165347], '3(00)', +[304790218, 304790222], '3(010)', +[305302352, 305302355], '(00)3', +[306785996, 306785999], '3(00)', +[307051443, 307051446], '3(00)', +[307481539, 307481542], '3(00)', +[308605569, 308605572], '3(00)', +[309237610, 309237613], '3(00)', +[310509287, 310509290], '(00)3', +[310554057, 310554060], '3(00)', +[310646345, 310646348], '3(00)', +[311274896, 311274899], '(00)3', +[311894272, 311894275], '3(00)', +[312269470, 312269473], '(00)3', +[312306601, 312306605], '(00)40', +[312683193, 312683196], '3(00)', +[314499804, 314499807], '3(00)', +[314636802, 314636805], '(00)3', +[314689897, 314689900], '3(00)', +[314721319, 314721322], '3(00)', +[316132890, 316132893], '3(00)', +[316217470, 316217474], '(010)3', +[316465705, 316465708], '3(00)', +[316542790, 316542793], '(00)3', +[320822347, 320822350], '3(00)', +[321733242, 321733245], '3(00)', +[324413970, 324413973], '(00)3', +[325950140, 325950143], '(00)3', +[326675884, 326675887], '(00)3', +[326704208, 326704211], '3(00)', +[327596247, 327596250], '3(00)', +[328123172, 328123175], '3(00)', +[328182212, 328182215], '(00)3', +[328257498, 328257501], '3(00)', +[328315836, 328315839], '(00)3', +[328800974, 328800977], '(00)3', +[328998509, 328998512], '3(00)', +[329725370, 329725373], '(00)3', +[332080601, 332080604], '(00)3', +[332221246, 332221249], '(00)3', +[332299899, 332299902], '(00)3', +[332532822, 332532825], '(00)3', +[333334544, 333334548], '(00)22', +[333881266, 333881269], '3(00)', +[334703267, 334703270], '3(00)', +[334875138, 334875141], '3(00)', +[336531451, 336531454], '3(00)', +[336825907, 336825910], '(00)3', +[336993167, 336993170], '(00)3', +[337493998, 337494001], '3(00)', +[337861034, 337861037], '3(00)', +[337899191, 337899194], '(00)3', +[337958123, 337958126], '(00)3', +[342331982, 342331985], '3(00)', +[342676068, 342676071], '3(00)', +[347063781, 347063784], '3(00)', +[347697348, 347697351], '3(00)', +[347954319, 347954322], '3(00)', +[348162775, 348162778], '3(00)', +[349210702, 349210705], '(00)3', +[349212913, 349212916], '3(00)', +[349248650, 349248653], '(00)3', +[349913500, 349913503], '3(00)', +[350891529, 350891532], '3(00)', +[351089323, 351089326], '3(00)', +[351826158, 351826161], '3(00)', +[352228580, 352228583], '(00)3', +[352376244, 352376247], '3(00)', +[352853758, 352853761], '(00)3', +[355110439, 355110442], '(00)3', +[355808090, 355808094], '(00)40', +[355941556, 355941559], '3(00)', +[356360231, 356360234], '(00)3', +[356586657, 356586660], '3(00)', +[356892926, 356892929], '(00)3', +[356908232, 356908235], '3(00)', +[357912730, 357912733], '3(00)', +[358120344, 358120347], '3(00)', +[359044096, 359044099], '(00)3', +[360819357, 360819360], '3(00)', +[361399662, 361399666], '(010)3', +[362361315, 362361318], '(00)3', +[363610112, 363610115], '(00)3', +[363964804, 363964807], '3(00)', +[364527375, 364527378], '(00)3', +[365090327, 365090330], '(00)3', +[365414539, 365414542], '3(00)', +[366738474, 366738477], '3(00)', +[368714778, 368714783], '04(010)', +[368831545, 368831548], '(00)3', +[368902387, 368902390], '(00)3', +[370109769, 370109772], '3(00)', +[370963333, 370963336], '3(00)', +[372541136, 372541140], '3(010)', +[372681562, 372681565], '(00)3', +[373009410, 373009413], '(00)3', +[373458970, 373458973], '3(00)', +[375648658, 375648661], '3(00)', +[376834728, 376834731], '3(00)', +[377119945, 377119948], '(00)3', +[377335703, 377335706], '(00)3', +[378091745, 378091748], '3(00)', +[379139522, 379139525], '3(00)', +[380279160, 380279163], '(00)3', +[380619442, 380619445], '3(00)', +[381244231, 381244234], '3(00)', +[382327446, 382327450], '(010)3', +[382357073, 382357076], '3(00)', +[383545479, 383545482], '3(00)', +[384363766, 384363769], '(00)3', +[384401786, 384401790], '22(00)', +[385198212, 385198215], '3(00)', +[385824476, 385824479], '(00)3', +[385908194, 385908197], '3(00)', +[386946806, 386946809], '3(00)', +[387592175, 387592179], '22(00)', +[388329293, 388329296], '(00)3', +[388679566, 388679569], '3(00)', +[388832142, 388832145], '3(00)', +[390087103, 390087106], '(00)3', +[390190926, 390190930], '(00)22', +[390331207, 390331210], '3(00)', +[391674495, 391674498], '3(00)', +[391937831, 391937834], '3(00)', +[391951632, 391951636], '(00)22', +[392963986, 392963989], '(00)3', +[393007921, 393007924], '3(00)', +[393373210, 393373213], '3(00)', +[393759572, 393759575], '(00)3', +[394036662, 394036665], '(00)3', +[395813866, 395813869], '(00)3', +[395956690, 395956693], '3(00)', +[396031670, 396031673], '3(00)', +[397076433, 397076436], '3(00)', +[397470601, 397470604], '3(00)', +[398289458, 398289461], '3(00)', +# +[368714778, 368714783], '04(010)', +[437953499, 437953504], '04(010)', +[526196233, 526196238], '032(00)', +[744719566, 744719571], '(010)40', +[750375857, 750375862], '032(00)', +[958241932, 958241937], '04(010)', +[983377342, 983377347], '(00)410', +[1003780080, 1003780085], '04(010)', +[1070232754, 1070232759], '(00)230', +[1209834865, 1209834870], '032(00)', +[1257209100, 1257209105], '(00)410', +[1368002233, 1368002238], '(00)230' +] diff --git a/MLPY/Lib/site-packages/mpmath/identification.py b/MLPY/Lib/site-packages/mpmath/identification.py new file mode 100644 index 0000000000000000000000000000000000000000..226f62d3fe9cacedbd9ba2b1e66ff0ad017fa604 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/identification.py @@ -0,0 +1,844 @@ +""" +Implements the PSLQ algorithm for integer relation detection, +and derivative algorithms for constant recognition. +""" + +from .libmp.backend import xrange +from .libmp import int_types, sqrt_fixed + +# round to nearest integer (can be done more elegantly...) +def round_fixed(x, prec): + return ((x + (1<<(prec-1))) >> prec) << prec + +class IdentificationMethods(object): + pass + + +def pslq(ctx, x, tol=None, maxcoeff=1000, maxsteps=100, verbose=False): + r""" + Given a vector of real numbers `x = [x_0, x_1, ..., x_n]`, ``pslq(x)`` + uses the PSLQ algorithm to find a list of integers + `[c_0, c_1, ..., c_n]` such that + + .. math :: + + |c_1 x_1 + c_2 x_2 + ... + c_n x_n| < \mathrm{tol} + + and such that `\max |c_k| < \mathrm{maxcoeff}`. If no such vector + exists, :func:`~mpmath.pslq` returns ``None``. The tolerance defaults to + 3/4 of the working precision. + + **Examples** + + Find rational approximations for `\pi`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> pslq([-1, pi], tol=0.01) + [22, 7] + >>> pslq([-1, pi], tol=0.001) + [355, 113] + >>> mpf(22)/7; mpf(355)/113; +pi + 3.14285714285714 + 3.14159292035398 + 3.14159265358979 + + Pi is not a rational number with denominator less than 1000:: + + >>> pslq([-1, pi]) + >>> + + To within the standard precision, it can however be approximated + by at least one rational number with denominator less than `10^{12}`:: + + >>> p, q = pslq([-1, pi], maxcoeff=10**12) + >>> print(p); print(q) + 238410049439 + 75888275702 + >>> mpf(p)/q + 3.14159265358979 + + The PSLQ algorithm can be applied to long vectors. For example, + we can investigate the rational (in)dependence of integer square + roots:: + + >>> mp.dps = 30 + >>> pslq([sqrt(n) for n in range(2, 5+1)]) + >>> + >>> pslq([sqrt(n) for n in range(2, 6+1)]) + >>> + >>> pslq([sqrt(n) for n in range(2, 8+1)]) + [2, 0, 0, 0, 0, 0, -1] + + **Machin formulas** + + A famous formula for `\pi` is Machin's, + + .. math :: + + \frac{\pi}{4} = 4 \operatorname{acot} 5 - \operatorname{acot} 239 + + There are actually infinitely many formulas of this type. Two + others are + + .. math :: + + \frac{\pi}{4} = \operatorname{acot} 1 + + \frac{\pi}{4} = 12 \operatorname{acot} 49 + 32 \operatorname{acot} 57 + + 5 \operatorname{acot} 239 + 12 \operatorname{acot} 110443 + + We can easily verify the formulas using the PSLQ algorithm:: + + >>> mp.dps = 30 + >>> pslq([pi/4, acot(1)]) + [1, -1] + >>> pslq([pi/4, acot(5), acot(239)]) + [1, -4, 1] + >>> pslq([pi/4, acot(49), acot(57), acot(239), acot(110443)]) + [1, -12, -32, 5, -12] + + We could try to generate a custom Machin-like formula by running + the PSLQ algorithm with a few inverse cotangent values, for example + acot(2), acot(3) ... acot(10). Unfortunately, there is a linear + dependence among these values, resulting in only that dependence + being detected, with a zero coefficient for `\pi`:: + + >>> pslq([pi] + [acot(n) for n in range(2,11)]) + [0, 1, -1, 0, 0, 0, -1, 0, 0, 0] + + We get better luck by removing linearly dependent terms:: + + >>> pslq([pi] + [acot(n) for n in range(2,11) if n not in (3, 5)]) + [1, -8, 0, 0, 4, 0, 0, 0] + + In other words, we found the following formula:: + + >>> 8*acot(2) - 4*acot(7) + 3.14159265358979323846264338328 + >>> +pi + 3.14159265358979323846264338328 + + **Algorithm** + + This is a fairly direct translation to Python of the pseudocode given by + David Bailey, "The PSLQ Integer Relation Algorithm": + http://www.cecm.sfu.ca/organics/papers/bailey/paper/html/node3.html + + The present implementation uses fixed-point instead of floating-point + arithmetic, since this is significantly (about 7x) faster. + """ + + n = len(x) + if n < 2: + raise ValueError("n cannot be less than 2") + + # At too low precision, the algorithm becomes meaningless + prec = ctx.prec + if prec < 53: + raise ValueError("prec cannot be less than 53") + + if verbose and prec // max(2,n) < 5: + print("Warning: precision for PSLQ may be too low") + + target = int(prec * 0.75) + + if tol is None: + tol = ctx.mpf(2)**(-target) + else: + tol = ctx.convert(tol) + + extra = 60 + prec += extra + + if verbose: + print("PSLQ using prec %i and tol %s" % (prec, ctx.nstr(tol))) + + tol = ctx.to_fixed(tol, prec) + assert tol + + # Convert to fixed-point numbers. The dummy None is added so we can + # use 1-based indexing. (This just allows us to be consistent with + # Bailey's indexing. The algorithm is 100 lines long, so debugging + # a single wrong index can be painful.) + x = [None] + [ctx.to_fixed(ctx.mpf(xk), prec) for xk in x] + + # Sanity check on magnitudes + minx = min(abs(xx) for xx in x[1:]) + if not minx: + raise ValueError("PSLQ requires a vector of nonzero numbers") + if minx < tol//100: + if verbose: + print("STOPPING: (one number is too small)") + return None + + g = sqrt_fixed((4<> prec) + s[k] = sqrt_fixed(t, prec) + t = s[1] + y = x[:] + for k in xrange(1, n+1): + y[k] = (x[k] << prec) // t + s[k] = (s[k] << prec) // t + # step 3 + for i in xrange(1, n+1): + for j in xrange(i+1, n): + H[i,j] = 0 + if i <= n-1: + if s[i]: + H[i,i] = (s[i+1] << prec) // s[i] + else: + H[i,i] = 0 + for j in range(1, i): + sjj1 = s[j]*s[j+1] + if sjj1: + H[i,j] = ((-y[i]*y[j])<> prec) + for k in xrange(1, j+1): + H[i,k] = H[i,k] - (t*H[j,k] >> prec) + for k in xrange(1, n+1): + A[i,k] = A[i,k] - (t*A[j,k] >> prec) + B[k,j] = B[k,j] + (t*B[k,i] >> prec) + # Main algorithm + for REP in range(maxsteps): + # Step 1 + m = -1 + szmax = -1 + for i in range(1, n): + h = H[i,i] + sz = (g**i * abs(h)) >> (prec*(i-1)) + if sz > szmax: + m = i + szmax = sz + # Step 2 + y[m], y[m+1] = y[m+1], y[m] + for i in xrange(1,n+1): H[m,i], H[m+1,i] = H[m+1,i], H[m,i] + for i in xrange(1,n+1): A[m,i], A[m+1,i] = A[m+1,i], A[m,i] + for i in xrange(1,n+1): B[i,m], B[i,m+1] = B[i,m+1], B[i,m] + # Step 3 + if m <= n - 2: + t0 = sqrt_fixed((H[m,m]**2 + H[m,m+1]**2)>>prec, prec) + # A zero element probably indicates that the precision has + # been exhausted. XXX: this could be spurious, due to + # using fixed-point arithmetic + if not t0: + break + t1 = (H[m,m] << prec) // t0 + t2 = (H[m,m+1] << prec) // t0 + for i in xrange(m, n+1): + t3 = H[i,m] + t4 = H[i,m+1] + H[i,m] = (t1*t3+t2*t4) >> prec + H[i,m+1] = (-t2*t3+t1*t4) >> prec + # Step 4 + for i in xrange(m+1, n+1): + for j in xrange(min(i-1, m+1), 0, -1): + try: + t = round_fixed((H[i,j] << prec)//H[j,j], prec) + # Precision probably exhausted + except ZeroDivisionError: + break + y[j] = y[j] + ((t*y[i]) >> prec) + for k in xrange(1, j+1): + H[i,k] = H[i,k] - (t*H[j,k] >> prec) + for k in xrange(1, n+1): + A[i,k] = A[i,k] - (t*A[j,k] >> prec) + B[k,j] = B[k,j] + (t*B[k,i] >> prec) + # Until a relation is found, the error typically decreases + # slowly (e.g. a factor 1-10) with each step TODO: we could + # compare err from two successive iterations. If there is a + # large drop (several orders of magnitude), that indicates a + # "high quality" relation was detected. Reporting this to + # the user somehow might be useful. + best_err = maxcoeff<> prec) for j in \ + range(1,n+1)] + if max(abs(v) for v in vec) < maxcoeff: + if verbose: + print("FOUND relation at iter %i/%i, error: %s" % \ + (REP, maxsteps, ctx.nstr(err / ctx.mpf(2)**prec, 1))) + return vec + best_err = min(err, best_err) + # Calculate a lower bound for the norm. We could do this + # more exactly (using the Euclidean norm) but there is probably + # no practical benefit. + recnorm = max(abs(h) for h in H.values()) + if recnorm: + norm = ((1 << (2*prec)) // recnorm) >> prec + norm //= 100 + else: + norm = ctx.inf + if verbose: + print("%i/%i: Error: %8s Norm: %s" % \ + (REP, maxsteps, ctx.nstr(best_err / ctx.mpf(2)**prec, 1), norm)) + if norm >= maxcoeff: + break + if verbose: + print("CANCELLING after step %i/%i." % (REP, maxsteps)) + print("Could not find an integer relation. Norm bound: %s" % norm) + return None + +def findpoly(ctx, x, n=1, **kwargs): + r""" + ``findpoly(x, n)`` returns the coefficients of an integer + polynomial `P` of degree at most `n` such that `P(x) \approx 0`. + If no polynomial having `x` as a root can be found, + :func:`~mpmath.findpoly` returns ``None``. + + :func:`~mpmath.findpoly` works by successively calling :func:`~mpmath.pslq` with + the vectors `[1, x]`, `[1, x, x^2]`, `[1, x, x^2, x^3]`, ..., + `[1, x, x^2, .., x^n]` as input. Keyword arguments given to + :func:`~mpmath.findpoly` are forwarded verbatim to :func:`~mpmath.pslq`. In + particular, you can specify a tolerance for `P(x)` with ``tol`` + and a maximum permitted coefficient size with ``maxcoeff``. + + For large values of `n`, it is recommended to run :func:`~mpmath.findpoly` + at high precision; preferably 50 digits or more. + + **Examples** + + By default (degree `n = 1`), :func:`~mpmath.findpoly` simply finds a linear + polynomial with a rational root:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> findpoly(0.7) + [-10, 7] + + The generated coefficient list is valid input to ``polyval`` and + ``polyroots``:: + + >>> nprint(polyval(findpoly(phi, 2), phi), 1) + -2.0e-16 + >>> for r in polyroots(findpoly(phi, 2)): + ... print(r) + ... + -0.618033988749895 + 1.61803398874989 + + Numbers of the form `m + n \sqrt p` for integers `(m, n, p)` are + solutions to quadratic equations. As we find here, `1+\sqrt 2` + is a root of the polynomial `x^2 - 2x - 1`:: + + >>> findpoly(1+sqrt(2), 2) + [1, -2, -1] + >>> findroot(lambda x: x**2 - 2*x - 1, 1) + 2.4142135623731 + + Despite only containing square roots, the following number results + in a polynomial of degree 4:: + + >>> findpoly(sqrt(2)+sqrt(3), 4) + [1, 0, -10, 0, 1] + + In fact, `x^4 - 10x^2 + 1` is the *minimal polynomial* of + `r = \sqrt 2 + \sqrt 3`, meaning that a rational polynomial of + lower degree having `r` as a root does not exist. Given sufficient + precision, :func:`~mpmath.findpoly` will usually find the correct + minimal polynomial of a given algebraic number. + + **Non-algebraic numbers** + + If :func:`~mpmath.findpoly` fails to find a polynomial with given + coefficient size and tolerance constraints, that means no such + polynomial exists. + + We can verify that `\pi` is not an algebraic number of degree 3 with + coefficients less than 1000:: + + >>> mp.dps = 15 + >>> findpoly(pi, 3) + >>> + + It is always possible to find an algebraic approximation of a number + using one (or several) of the following methods: + + 1. Increasing the permitted degree + 2. Allowing larger coefficients + 3. Reducing the tolerance + + One example of each method is shown below:: + + >>> mp.dps = 15 + >>> findpoly(pi, 4) + [95, -545, 863, -183, -298] + >>> findpoly(pi, 3, maxcoeff=10000) + [836, -1734, -2658, -457] + >>> findpoly(pi, 3, tol=1e-7) + [-4, 22, -29, -2] + + It is unknown whether Euler's constant is transcendental (or even + irrational). We can use :func:`~mpmath.findpoly` to check that if is + an algebraic number, its minimal polynomial must have degree + at least 7 and a coefficient of magnitude at least 1000000:: + + >>> mp.dps = 200 + >>> findpoly(euler, 6, maxcoeff=10**6, tol=1e-100, maxsteps=1000) + >>> + + Note that the high precision and strict tolerance is necessary + for such high-degree runs, since otherwise unwanted low-accuracy + approximations will be detected. It may also be necessary to set + maxsteps high to prevent a premature exit (before the coefficient + bound has been reached). Running with ``verbose=True`` to get an + idea what is happening can be useful. + """ + x = ctx.mpf(x) + if n < 1: + raise ValueError("n cannot be less than 1") + if x == 0: + return [1, 0] + xs = [ctx.mpf(1)] + for i in range(1,n+1): + xs.append(x**i) + a = ctx.pslq(xs, **kwargs) + if a is not None: + return a[::-1] + +def fracgcd(p, q): + x, y = p, q + while y: + x, y = y, x % y + if x != 1: + p //= x + q //= x + if q == 1: + return p + return p, q + +def pslqstring(r, constants): + q = r[0] + r = r[1:] + s = [] + for i in range(len(r)): + p = r[i] + if p: + z = fracgcd(-p,q) + cs = constants[i][1] + if cs == '1': + cs = '' + else: + cs = '*' + cs + if isinstance(z, int_types): + if z > 0: term = str(z) + cs + else: term = ("(%s)" % z) + cs + else: + term = ("(%s/%s)" % z) + cs + s.append(term) + s = ' + '.join(s) + if '+' in s or '*' in s: + s = '(' + s + ')' + return s or '0' + +def prodstring(r, constants): + q = r[0] + r = r[1:] + num = [] + den = [] + for i in range(len(r)): + p = r[i] + if p: + z = fracgcd(-p,q) + cs = constants[i][1] + if isinstance(z, int_types): + if abs(z) == 1: t = cs + else: t = '%s**%s' % (cs, abs(z)) + ([num,den][z<0]).append(t) + else: + t = '%s**(%s/%s)' % (cs, abs(z[0]), z[1]) + ([num,den][z[0]<0]).append(t) + num = '*'.join(num) + den = '*'.join(den) + if num and den: return "(%s)/(%s)" % (num, den) + if num: return num + if den: return "1/(%s)" % den + +def quadraticstring(ctx,t,a,b,c): + if c < 0: + a,b,c = -a,-b,-c + u1 = (-b+ctx.sqrt(b**2-4*a*c))/(2*c) + u2 = (-b-ctx.sqrt(b**2-4*a*c))/(2*c) + if abs(u1-t) < abs(u2-t): + if b: s = '((%s+sqrt(%s))/%s)' % (-b,b**2-4*a*c,2*c) + else: s = '(sqrt(%s)/%s)' % (-4*a*c,2*c) + else: + if b: s = '((%s-sqrt(%s))/%s)' % (-b,b**2-4*a*c,2*c) + else: s = '(-sqrt(%s)/%s)' % (-4*a*c,2*c) + return s + +# Transformation y = f(x,c), with inverse function x = f(y,c) +# The third entry indicates whether the transformation is +# redundant when c = 1 +transforms = [ + (lambda ctx,x,c: x*c, '$y/$c', 0), + (lambda ctx,x,c: x/c, '$c*$y', 1), + (lambda ctx,x,c: c/x, '$c/$y', 0), + (lambda ctx,x,c: (x*c)**2, 'sqrt($y)/$c', 0), + (lambda ctx,x,c: (x/c)**2, '$c*sqrt($y)', 1), + (lambda ctx,x,c: (c/x)**2, '$c/sqrt($y)', 0), + (lambda ctx,x,c: c*x**2, 'sqrt($y)/sqrt($c)', 1), + (lambda ctx,x,c: x**2/c, 'sqrt($c)*sqrt($y)', 1), + (lambda ctx,x,c: c/x**2, 'sqrt($c)/sqrt($y)', 1), + (lambda ctx,x,c: ctx.sqrt(x*c), '$y**2/$c', 0), + (lambda ctx,x,c: ctx.sqrt(x/c), '$c*$y**2', 1), + (lambda ctx,x,c: ctx.sqrt(c/x), '$c/$y**2', 0), + (lambda ctx,x,c: c*ctx.sqrt(x), '$y**2/$c**2', 1), + (lambda ctx,x,c: ctx.sqrt(x)/c, '$c**2*$y**2', 1), + (lambda ctx,x,c: c/ctx.sqrt(x), '$c**2/$y**2', 1), + (lambda ctx,x,c: ctx.exp(x*c), 'log($y)/$c', 0), + (lambda ctx,x,c: ctx.exp(x/c), '$c*log($y)', 1), + (lambda ctx,x,c: ctx.exp(c/x), '$c/log($y)', 0), + (lambda ctx,x,c: c*ctx.exp(x), 'log($y/$c)', 1), + (lambda ctx,x,c: ctx.exp(x)/c, 'log($c*$y)', 1), + (lambda ctx,x,c: c/ctx.exp(x), 'log($c/$y)', 0), + (lambda ctx,x,c: ctx.ln(x*c), 'exp($y)/$c', 0), + (lambda ctx,x,c: ctx.ln(x/c), '$c*exp($y)', 1), + (lambda ctx,x,c: ctx.ln(c/x), '$c/exp($y)', 0), + (lambda ctx,x,c: c*ctx.ln(x), 'exp($y/$c)', 1), + (lambda ctx,x,c: ctx.ln(x)/c, 'exp($c*$y)', 1), + (lambda ctx,x,c: c/ctx.ln(x), 'exp($c/$y)', 0), +] + +def identify(ctx, x, constants=[], tol=None, maxcoeff=1000, full=False, + verbose=False): + r""" + Given a real number `x`, ``identify(x)`` attempts to find an exact + formula for `x`. This formula is returned as a string. If no match + is found, ``None`` is returned. With ``full=True``, a list of + matching formulas is returned. + + As a simple example, :func:`~mpmath.identify` will find an algebraic + formula for the golden ratio:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> identify(phi) + '((1+sqrt(5))/2)' + + :func:`~mpmath.identify` can identify simple algebraic numbers and simple + combinations of given base constants, as well as certain basic + transformations thereof. More specifically, :func:`~mpmath.identify` + looks for the following: + + 1. Fractions + 2. Quadratic algebraic numbers + 3. Rational linear combinations of the base constants + 4. Any of the above after first transforming `x` into `f(x)` where + `f(x)` is `1/x`, `\sqrt x`, `x^2`, `\log x` or `\exp x`, either + directly or with `x` or `f(x)` multiplied or divided by one of + the base constants + 5. Products of fractional powers of the base constants and + small integers + + Base constants can be given as a list of strings representing mpmath + expressions (:func:`~mpmath.identify` will ``eval`` the strings to numerical + values and use the original strings for the output), or as a dict of + formula:value pairs. + + In order not to produce spurious results, :func:`~mpmath.identify` should + be used with high precision; preferably 50 digits or more. + + **Examples** + + Simple identifications can be performed safely at standard + precision. Here the default recognition of rational, algebraic, + and exp/log of algebraic numbers is demonstrated:: + + >>> mp.dps = 15 + >>> identify(0.22222222222222222) + '(2/9)' + >>> identify(1.9662210973805663) + 'sqrt(((24+sqrt(48))/8))' + >>> identify(4.1132503787829275) + 'exp((sqrt(8)/2))' + >>> identify(0.881373587019543) + 'log(((2+sqrt(8))/2))' + + By default, :func:`~mpmath.identify` does not recognize `\pi`. At standard + precision it finds a not too useful approximation. At slightly + increased precision, this approximation is no longer accurate + enough and :func:`~mpmath.identify` more correctly returns ``None``:: + + >>> identify(pi) + '(2**(176/117)*3**(20/117)*5**(35/39))/(7**(92/117))' + >>> mp.dps = 30 + >>> identify(pi) + >>> + + Numbers such as `\pi`, and simple combinations of user-defined + constants, can be identified if they are provided explicitly:: + + >>> identify(3*pi-2*e, ['pi', 'e']) + '(3*pi + (-2)*e)' + + Here is an example using a dict of constants. Note that the + constants need not be "atomic"; :func:`~mpmath.identify` can just + as well express the given number in terms of expressions + given by formulas:: + + >>> identify(pi+e, {'a':pi+2, 'b':2*e}) + '((-2) + 1*a + (1/2)*b)' + + Next, we attempt some identifications with a set of base constants. + It is necessary to increase the precision a bit. + + >>> mp.dps = 50 + >>> base = ['sqrt(2)','pi','log(2)'] + >>> identify(0.25, base) + '(1/4)' + >>> identify(3*pi + 2*sqrt(2) + 5*log(2)/7, base) + '(2*sqrt(2) + 3*pi + (5/7)*log(2))' + >>> identify(exp(pi+2), base) + 'exp((2 + 1*pi))' + >>> identify(1/(3+sqrt(2)), base) + '((3/7) + (-1/7)*sqrt(2))' + >>> identify(sqrt(2)/(3*pi+4), base) + 'sqrt(2)/(4 + 3*pi)' + >>> identify(5**(mpf(1)/3)*pi*log(2)**2, base) + '5**(1/3)*pi*log(2)**2' + + An example of an erroneous solution being found when too low + precision is used:: + + >>> mp.dps = 15 + >>> identify(1/(3*pi-4*e+sqrt(8)), ['pi', 'e', 'sqrt(2)']) + '((11/25) + (-158/75)*pi + (76/75)*e + (44/15)*sqrt(2))' + >>> mp.dps = 50 + >>> identify(1/(3*pi-4*e+sqrt(8)), ['pi', 'e', 'sqrt(2)']) + '1/(3*pi + (-4)*e + 2*sqrt(2))' + + **Finding approximate solutions** + + The tolerance ``tol`` defaults to 3/4 of the working precision. + Lowering the tolerance is useful for finding approximate matches. + We can for example try to generate approximations for pi:: + + >>> mp.dps = 15 + >>> identify(pi, tol=1e-2) + '(22/7)' + >>> identify(pi, tol=1e-3) + '(355/113)' + >>> identify(pi, tol=1e-10) + '(5**(339/269))/(2**(64/269)*3**(13/269)*7**(92/269))' + + With ``full=True``, and by supplying a few base constants, + ``identify`` can generate almost endless lists of approximations + for any number (the output below has been truncated to show only + the first few):: + + >>> for p in identify(pi, ['e', 'catalan'], tol=1e-5, full=True): + ... print(p) + ... # doctest: +ELLIPSIS + e/log((6 + (-4/3)*e)) + (3**3*5*e*catalan**2)/(2*7**2) + sqrt(((-13) + 1*e + 22*catalan)) + log(((-6) + 24*e + 4*catalan)/e) + exp(catalan*((-1/5) + (8/15)*e)) + catalan*(6 + (-6)*e + 15*catalan) + sqrt((5 + 26*e + (-3)*catalan))/e + e*sqrt(((-27) + 2*e + 25*catalan)) + log(((-1) + (-11)*e + 59*catalan)) + ((3/20) + (21/20)*e + (3/20)*catalan) + ... + + The numerical values are roughly as close to `\pi` as permitted by the + specified tolerance: + + >>> e/log(6-4*e/3) + 3.14157719846001 + >>> 135*e*catalan**2/98 + 3.14166950419369 + >>> sqrt(e-13+22*catalan) + 3.14158000062992 + >>> log(24*e-6+4*catalan)-1 + 3.14158791577159 + + **Symbolic processing** + + The output formula can be evaluated as a Python expression. + Note however that if fractions (like '2/3') are present in + the formula, Python's :func:`~mpmath.eval()` may erroneously perform + integer division. Note also that the output is not necessarily + in the algebraically simplest form:: + + >>> identify(sqrt(2)) + '(sqrt(8)/2)' + + As a solution to both problems, consider using SymPy's + :func:`~mpmath.sympify` to convert the formula into a symbolic expression. + SymPy can be used to pretty-print or further simplify the formula + symbolically:: + + >>> from sympy import sympify # doctest: +SKIP + >>> sympify(identify(sqrt(2))) # doctest: +SKIP + 2**(1/2) + + Sometimes :func:`~mpmath.identify` can simplify an expression further than + a symbolic algorithm:: + + >>> from sympy import simplify # doctest: +SKIP + >>> x = sympify('-1/(-3/2+(1/2)*5**(1/2))*(3/2-1/2*5**(1/2))**(1/2)') # doctest: +SKIP + >>> x # doctest: +SKIP + (3/2 - 5**(1/2)/2)**(-1/2) + >>> x = simplify(x) # doctest: +SKIP + >>> x # doctest: +SKIP + 2/(6 - 2*5**(1/2))**(1/2) + >>> mp.dps = 30 # doctest: +SKIP + >>> x = sympify(identify(x.evalf(30))) # doctest: +SKIP + >>> x # doctest: +SKIP + 1/2 + 5**(1/2)/2 + + (In fact, this functionality is available directly in SymPy as the + function :func:`~mpmath.nsimplify`, which is essentially a wrapper for + :func:`~mpmath.identify`.) + + **Miscellaneous issues and limitations** + + The input `x` must be a real number. All base constants must be + positive real numbers and must not be rationals or rational linear + combinations of each other. + + The worst-case computation time grows quickly with the number of + base constants. Already with 3 or 4 base constants, + :func:`~mpmath.identify` may require several seconds to finish. To search + for relations among a large number of constants, you should + consider using :func:`~mpmath.pslq` directly. + + The extended transformations are applied to x, not the constants + separately. As a result, ``identify`` will for example be able to + recognize ``exp(2*pi+3)`` with ``pi`` given as a base constant, but + not ``2*exp(pi)+3``. It will be able to recognize the latter if + ``exp(pi)`` is given explicitly as a base constant. + + """ + + solutions = [] + + def addsolution(s): + if verbose: print("Found: ", s) + solutions.append(s) + + x = ctx.mpf(x) + + # Further along, x will be assumed positive + if x == 0: + if full: return ['0'] + else: return '0' + if x < 0: + sol = ctx.identify(-x, constants, tol, maxcoeff, full, verbose) + if sol is None: + return sol + if full: + return ["-(%s)"%s for s in sol] + else: + return "-(%s)" % sol + + if tol: + tol = ctx.mpf(tol) + else: + tol = ctx.eps**0.7 + M = maxcoeff + + if constants: + if isinstance(constants, dict): + constants = [(ctx.mpf(v), name) for (name, v) in sorted(constants.items())] + else: + namespace = dict((name, getattr(ctx,name)) for name in dir(ctx)) + constants = [(eval(p, namespace), p) for p in constants] + else: + constants = [] + + # We always want to find at least rational terms + if 1 not in [value for (name, value) in constants]: + constants = [(ctx.mpf(1), '1')] + constants + + # PSLQ with simple algebraic and functional transformations + for ft, ftn, red in transforms: + for c, cn in constants: + if red and cn == '1': + continue + t = ft(ctx,x,c) + # Prevent exponential transforms from wreaking havoc + if abs(t) > M**2 or abs(t) < tol: + continue + # Linear combination of base constants + r = ctx.pslq([t] + [a[0] for a in constants], tol, M) + s = None + if r is not None and max(abs(uw) for uw in r) <= M and r[0]: + s = pslqstring(r, constants) + # Quadratic algebraic numbers + else: + q = ctx.pslq([ctx.one, t, t**2], tol, M) + if q is not None and len(q) == 3 and q[2]: + aa, bb, cc = q + if max(abs(aa),abs(bb),abs(cc)) <= M: + s = quadraticstring(ctx,t,aa,bb,cc) + if s: + if cn == '1' and ('/$c' in ftn): + s = ftn.replace('$y', s).replace('/$c', '') + else: + s = ftn.replace('$y', s).replace('$c', cn) + addsolution(s) + if not full: return solutions[0] + + if verbose: + print(".") + + # Check for a direct multiplicative formula + if x != 1: + # Allow fractional powers of fractions + ilogs = [2,3,5,7] + # Watch out for existing fractional powers of fractions + logs = [] + for a, s in constants: + if not sum(bool(ctx.findpoly(ctx.ln(a)/ctx.ln(i),1)) for i in ilogs): + logs.append((ctx.ln(a), s)) + logs = [(ctx.ln(i),str(i)) for i in ilogs] + logs + r = ctx.pslq([ctx.ln(x)] + [a[0] for a in logs], tol, M) + if r is not None and max(abs(uw) for uw in r) <= M and r[0]: + addsolution(prodstring(r, logs)) + if not full: return solutions[0] + + if full: + return sorted(solutions, key=len) + else: + return None + +IdentificationMethods.pslq = pslq +IdentificationMethods.findpoly = findpoly +IdentificationMethods.identify = identify + + +if __name__ == '__main__': + import doctest + doctest.testmod() diff --git a/MLPY/Lib/site-packages/mpmath/libmp/__init__.py b/MLPY/Lib/site-packages/mpmath/libmp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1573114afc4fbce73f2ba9d2ddc99882c00027c0 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/libmp/__init__.py @@ -0,0 +1,77 @@ +from .libmpf import (prec_to_dps, dps_to_prec, repr_dps, + round_down, round_up, round_floor, round_ceiling, round_nearest, + to_pickable, from_pickable, ComplexResult, + fzero, fnzero, fone, fnone, ftwo, ften, fhalf, fnan, finf, fninf, + math_float_inf, round_int, normalize, normalize1, + from_man_exp, from_int, to_man_exp, to_int, mpf_ceil, mpf_floor, + mpf_nint, mpf_frac, + from_float, from_npfloat, from_Decimal, to_float, from_rational, to_rational, to_fixed, + mpf_rand, mpf_eq, mpf_hash, mpf_cmp, mpf_lt, mpf_le, mpf_gt, mpf_ge, + mpf_pos, mpf_neg, mpf_abs, mpf_sign, mpf_add, mpf_sub, mpf_sum, + mpf_mul, mpf_mul_int, mpf_shift, mpf_frexp, + mpf_div, mpf_rdiv_int, mpf_mod, mpf_pow_int, + mpf_perturb, + to_digits_exp, to_str, str_to_man_exp, from_str, from_bstr, to_bstr, + mpf_sqrt, mpf_hypot) + +from .libmpc import (mpc_one, mpc_zero, mpc_two, mpc_half, + mpc_is_inf, mpc_is_infnan, mpc_to_str, mpc_to_complex, mpc_hash, + mpc_conjugate, mpc_is_nonzero, mpc_add, mpc_add_mpf, + mpc_sub, mpc_sub_mpf, mpc_pos, mpc_neg, mpc_shift, mpc_abs, + mpc_arg, mpc_floor, mpc_ceil, mpc_nint, mpc_frac, mpc_mul, mpc_square, + mpc_mul_mpf, mpc_mul_imag_mpf, mpc_mul_int, + mpc_div, mpc_div_mpf, mpc_reciprocal, mpc_mpf_div, + complex_int_pow, mpc_pow, mpc_pow_mpf, mpc_pow_int, + mpc_sqrt, mpc_nthroot, mpc_cbrt, mpc_exp, mpc_log, mpc_cos, mpc_sin, + mpc_tan, mpc_cos_pi, mpc_sin_pi, mpc_cosh, mpc_sinh, mpc_tanh, + mpc_atan, mpc_acos, mpc_asin, mpc_asinh, mpc_acosh, mpc_atanh, + mpc_fibonacci, mpf_expj, mpf_expjpi, mpc_expj, mpc_expjpi, + mpc_cos_sin, mpc_cos_sin_pi) + +from .libelefun import (ln2_fixed, mpf_ln2, ln10_fixed, mpf_ln10, + pi_fixed, mpf_pi, e_fixed, mpf_e, phi_fixed, mpf_phi, + degree_fixed, mpf_degree, + mpf_pow, mpf_nthroot, mpf_cbrt, log_int_fixed, agm_fixed, + mpf_log, mpf_log_hypot, mpf_exp, mpf_cos_sin, mpf_cos, mpf_sin, mpf_tan, + mpf_cos_sin_pi, mpf_cos_pi, mpf_sin_pi, mpf_cosh_sinh, + mpf_cosh, mpf_sinh, mpf_tanh, mpf_atan, mpf_atan2, mpf_asin, + mpf_acos, mpf_asinh, mpf_acosh, mpf_atanh, mpf_fibonacci) + +from .libhyper import (NoConvergence, make_hyp_summator, + mpf_erf, mpf_erfc, mpf_ei, mpc_ei, mpf_e1, mpc_e1, mpf_expint, + mpf_ci_si, mpf_ci, mpf_si, mpc_ci, mpc_si, mpf_besseljn, + mpc_besseljn, mpf_agm, mpf_agm1, mpc_agm, mpc_agm1, + mpf_ellipk, mpc_ellipk, mpf_ellipe, mpc_ellipe) + +from .gammazeta import (catalan_fixed, mpf_catalan, + khinchin_fixed, mpf_khinchin, glaisher_fixed, mpf_glaisher, + apery_fixed, mpf_apery, euler_fixed, mpf_euler, mertens_fixed, + mpf_mertens, twinprime_fixed, mpf_twinprime, + mpf_bernoulli, bernfrac, mpf_gamma_int, + mpf_factorial, mpc_factorial, mpf_gamma, mpc_gamma, + mpf_loggamma, mpc_loggamma, mpf_rgamma, mpc_rgamma, + mpf_harmonic, mpc_harmonic, mpf_psi0, mpc_psi0, + mpf_psi, mpc_psi, mpf_zeta_int, mpf_zeta, mpc_zeta, + mpf_altzeta, mpc_altzeta, mpf_zetasum, mpc_zetasum) + +from .libmpi import (mpi_str, + mpi_from_str, mpi_to_str, + mpi_eq, mpi_ne, + mpi_lt, mpi_le, mpi_gt, mpi_ge, + mpi_add, mpi_sub, mpi_delta, mpi_mid, + mpi_pos, mpi_neg, mpi_abs, mpi_mul, mpi_div, mpi_exp, + mpi_log, mpi_sqrt, mpi_pow_int, mpi_pow, mpi_cos_sin, + mpi_cos, mpi_sin, mpi_tan, mpi_cot, + mpi_atan, mpi_atan2, + mpci_pos, mpci_neg, mpci_add, mpci_sub, mpci_mul, mpci_div, mpci_pow, + mpci_abs, mpci_pow, mpci_exp, mpci_log, mpci_cos, mpci_sin, + mpi_gamma, mpci_gamma, mpi_loggamma, mpci_loggamma, + mpi_rgamma, mpci_rgamma, mpi_factorial, mpci_factorial) + +from .libintmath import (trailing, bitcount, numeral, bin_to_radix, + isqrt, isqrt_small, isqrt_fast, sqrt_fixed, sqrtrem, ifib, ifac, + list_primes, isprime, moebius, gcd, eulernum, stirling1, stirling2) + +from .backend import (gmpy, sage, BACKEND, STRICT, MPZ, MPZ_TYPE, + MPZ_ZERO, MPZ_ONE, MPZ_TWO, MPZ_THREE, MPZ_FIVE, int_types, + HASH_MODULUS, HASH_BITS) diff --git a/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0617a360d9f588474f788e97a14bdefd943b8022 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/backend.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/backend.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cba9d45906be63a2d6cf2e78037e477318ddea97 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/backend.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/gammazeta.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/gammazeta.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..984318531ab944f12d00523f919ce7cfa4b1e6db Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/gammazeta.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/libelefun.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/libelefun.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..170e8f19febcfc63be272309e6564afa09e8b20b Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/libelefun.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/libhyper.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/libhyper.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..556c4a4fbf4dd0d7372f1aacdb47cb0e9aab6761 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/libhyper.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/libintmath.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/libintmath.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2a6c2ffc8d42b179d33f5c56279acacd694bad9 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/libintmath.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/libmpc.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/libmpc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49ac834e5d6915d345333b453b2fa0386308fa3b Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/libmpc.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/libmpf.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/libmpf.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6714b0bb04511e3d931960b70bded75c32c275a5 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/libmpf.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/libmpi.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/libmpi.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74d355de1bdfcefd4b36a3440bb08e6d339d5057 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/libmp/__pycache__/libmpi.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/libmp/backend.py b/MLPY/Lib/site-packages/mpmath/libmp/backend.py new file mode 100644 index 0000000000000000000000000000000000000000..5610221290a05078f21f09df3c1a76b0e4ccdc02 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/libmp/backend.py @@ -0,0 +1,115 @@ +import os +import sys + +#----------------------------------------------------------------------------# +# Support GMPY for high-speed large integer arithmetic. # +# # +# To allow an external module to handle arithmetic, we need to make sure # +# that all high-precision variables are declared of the correct type. MPZ # +# is the constructor for the high-precision type. It defaults to Python's # +# long type but can be assinged another type, typically gmpy.mpz. # +# # +# MPZ must be used for the mantissa component of an mpf and must be used # +# for internal fixed-point operations. # +# # +# Side-effects # +# 1) "is" cannot be used to test for special values. Must use "==". # +# 2) There are bugs in GMPY prior to v1.02 so we must use v1.03 or later. # +#----------------------------------------------------------------------------# + +# So we can import it from this module +gmpy = None +sage = None +sage_utils = None + +if sys.version_info[0] < 3: + python3 = False +else: + python3 = True + +BACKEND = 'python' + +if not python3: + MPZ = long + xrange = xrange + basestring = basestring + + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") +else: + MPZ = int + xrange = range + basestring = str + + import builtins + exec_ = getattr(builtins, "exec") + +# Define constants for calculating hash on Python 3.2. +if sys.version_info >= (3, 2): + HASH_MODULUS = sys.hash_info.modulus + if sys.hash_info.width == 32: + HASH_BITS = 31 + else: + HASH_BITS = 61 +else: + HASH_MODULUS = None + HASH_BITS = None + +if 'MPMATH_NOGMPY' not in os.environ: + try: + try: + import gmpy2 as gmpy + except ImportError: + try: + import gmpy + except ImportError: + raise ImportError + if gmpy.version() >= '1.03': + BACKEND = 'gmpy' + MPZ = gmpy.mpz + except: + pass + +if ('MPMATH_NOSAGE' not in os.environ and 'SAGE_ROOT' in os.environ or + 'MPMATH_SAGE' in os.environ): + try: + import sage.all + import sage.libs.mpmath.utils as _sage_utils + sage = sage.all + sage_utils = _sage_utils + BACKEND = 'sage' + MPZ = sage.Integer + except: + pass + +if 'MPMATH_STRICT' in os.environ: + STRICT = True +else: + STRICT = False + +MPZ_TYPE = type(MPZ(0)) +MPZ_ZERO = MPZ(0) +MPZ_ONE = MPZ(1) +MPZ_TWO = MPZ(2) +MPZ_THREE = MPZ(3) +MPZ_FIVE = MPZ(5) + +try: + if BACKEND == 'python': + int_types = (int, long) + else: + int_types = (int, long, MPZ_TYPE) +except NameError: + if BACKEND == 'python': + int_types = (int,) + else: + int_types = (int, MPZ_TYPE) diff --git a/MLPY/Lib/site-packages/mpmath/libmp/gammazeta.py b/MLPY/Lib/site-packages/mpmath/libmp/gammazeta.py new file mode 100644 index 0000000000000000000000000000000000000000..3b05cc63c5f00e6c76d8383853dba06f15e46030 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/libmp/gammazeta.py @@ -0,0 +1,2167 @@ +""" +----------------------------------------------------------------------- +This module implements gamma- and zeta-related functions: + +* Bernoulli numbers +* Factorials +* The gamma function +* Polygamma functions +* Harmonic numbers +* The Riemann zeta function +* Constants related to these functions + +----------------------------------------------------------------------- +""" + +import math +import sys + +from .backend import xrange +from .backend import MPZ, MPZ_ZERO, MPZ_ONE, MPZ_THREE, gmpy + +from .libintmath import list_primes, ifac, ifac2, moebius + +from .libmpf import (\ + round_floor, round_ceiling, round_down, round_up, + round_nearest, round_fast, + lshift, sqrt_fixed, isqrt_fast, + fzero, fone, fnone, fhalf, ftwo, finf, fninf, fnan, + from_int, to_int, to_fixed, from_man_exp, from_rational, + mpf_pos, mpf_neg, mpf_abs, mpf_add, mpf_sub, + mpf_mul, mpf_mul_int, mpf_div, mpf_sqrt, mpf_pow_int, + mpf_rdiv_int, + mpf_perturb, mpf_le, mpf_lt, mpf_gt, mpf_shift, + negative_rnd, reciprocal_rnd, + bitcount, to_float, mpf_floor, mpf_sign, ComplexResult +) + +from .libelefun import (\ + constant_memo, + def_mpf_constant, + mpf_pi, pi_fixed, ln2_fixed, log_int_fixed, mpf_ln2, + mpf_exp, mpf_log, mpf_pow, mpf_cosh, + mpf_cos_sin, mpf_cosh_sinh, mpf_cos_sin_pi, mpf_cos_pi, mpf_sin_pi, + ln_sqrt2pi_fixed, mpf_ln_sqrt2pi, sqrtpi_fixed, mpf_sqrtpi, + cos_sin_fixed, exp_fixed +) + +from .libmpc import (\ + mpc_zero, mpc_one, mpc_half, mpc_two, + mpc_abs, mpc_shift, mpc_pos, mpc_neg, + mpc_add, mpc_sub, mpc_mul, mpc_div, + mpc_add_mpf, mpc_mul_mpf, mpc_div_mpf, mpc_mpf_div, + mpc_mul_int, mpc_pow_int, + mpc_log, mpc_exp, mpc_pow, + mpc_cos_pi, mpc_sin_pi, + mpc_reciprocal, mpc_square, + mpc_sub_mpf +) + + + +# Catalan's constant is computed using Lupas's rapidly convergent series +# (listed on http://mathworld.wolfram.com/CatalansConstant.html) +# oo +# ___ n-1 8n 2 3 2 +# 1 \ (-1) 2 (40n - 24n + 3) [(2n)!] (n!) +# K = --- ) ----------------------------------------- +# 64 /___ 3 2 +# n (2n-1) [(4n)!] +# n = 1 + +@constant_memo +def catalan_fixed(prec): + prec = prec + 20 + a = one = MPZ_ONE << prec + s, t, n = 0, 1, 1 + while t: + a *= 32 * n**3 * (2*n-1) + a //= (3-16*n+16*n**2)**2 + t = a * (-1)**(n-1) * (40*n**2-24*n+3) // (n**3 * (2*n-1)) + s += t + n += 1 + return s >> (20 + 6) + +# Khinchin's constant is relatively difficult to compute. Here +# we use the rational zeta series + +# oo 2*n-1 +# ___ ___ +# \ ` zeta(2*n)-1 \ ` (-1)^(k+1) +# log(K)*log(2) = ) ------------ ) ---------- +# /___. n /___. k +# n = 1 k = 1 + +# which adds half a digit per term. The essential trick for achieving +# reasonable efficiency is to recycle both the values of the zeta +# function (essentially Bernoulli numbers) and the partial terms of +# the inner sum. + +# An alternative might be to use K = 2*exp[1/log(2) X] where + +# / 1 1 [ pi*x*(1-x^2) ] +# X = | ------ log [ ------------ ]. +# / 0 x(1+x) [ sin(pi*x) ] + +# and integrate numerically. In practice, this seems to be slightly +# slower than the zeta series at high precision. + +@constant_memo +def khinchin_fixed(prec): + wp = int(prec + prec**0.5 + 15) + s = MPZ_ZERO + fac = from_int(4) + t = ONE = MPZ_ONE << wp + pi = mpf_pi(wp) + pipow = twopi2 = mpf_shift(mpf_mul(pi, pi, wp), 2) + n = 1 + while 1: + zeta2n = mpf_abs(mpf_bernoulli(2*n, wp)) + zeta2n = mpf_mul(zeta2n, pipow, wp) + zeta2n = mpf_div(zeta2n, fac, wp) + zeta2n = to_fixed(zeta2n, wp) + term = (((zeta2n - ONE) * t) // n) >> wp + if term < 100: + break + #if not n % 10: + # print n, math.log(int(abs(term))) + s += term + t += ONE//(2*n+1) - ONE//(2*n) + n += 1 + fac = mpf_mul_int(fac, (2*n)*(2*n-1), wp) + pipow = mpf_mul(pipow, twopi2, wp) + s = (s << wp) // ln2_fixed(wp) + K = mpf_exp(from_man_exp(s, -wp), wp) + K = to_fixed(K, prec) + return K + + +# Glaisher's constant is defined as A = exp(1/2 - zeta'(-1)). +# One way to compute it would be to perform direct numerical +# differentiation, but computing arbitrary Riemann zeta function +# values at high precision is expensive. We instead use the formula + +# A = exp((6 (-zeta'(2))/pi^2 + log 2 pi + gamma)/12) + +# and compute zeta'(2) from the series representation + +# oo +# ___ +# \ log k +# -zeta'(2) = ) ----- +# /___ 2 +# k +# k = 2 + +# This series converges exceptionally slowly, but can be accelerated +# using Euler-Maclaurin formula. The important insight is that the +# E-M integral can be done in closed form and that the high order +# are given by + +# n / \ +# d | log x | a + b log x +# --- | ----- | = ----------- +# n | 2 | 2 + n +# dx \ x / x + +# where a and b are integers given by a simple recurrence. Note +# that just one logarithm is needed. However, lots of integer +# logarithms are required for the initial summation. + +# This algorithm could possibly be turned into a faster algorithm +# for general evaluation of zeta(s) or zeta'(s); this should be +# looked into. + +@constant_memo +def glaisher_fixed(prec): + wp = prec + 30 + # Number of direct terms to sum before applying the Euler-Maclaurin + # formula to the tail. TODO: choose more intelligently + N = int(0.33*prec + 5) + ONE = MPZ_ONE << wp + # Euler-Maclaurin, step 1: sum log(k)/k**2 for k from 2 to N-1 + s = MPZ_ZERO + for k in range(2, N): + #print k, N + s += log_int_fixed(k, wp) // k**2 + logN = log_int_fixed(N, wp) + #logN = to_fixed(mpf_log(from_int(N), wp+20), wp) + # E-M step 2: integral of log(x)/x**2 from N to inf + s += (ONE + logN) // N + # E-M step 3: endpoint correction term f(N)/2 + s += logN // (N**2 * 2) + # E-M step 4: the series of derivatives + pN = N**3 + a = 1 + b = -2 + j = 3 + fac = from_int(2) + k = 1 + while 1: + # D(2*k-1) * B(2*k) / fac(2*k) [D(n) = nth derivative] + D = ((a << wp) + b*logN) // pN + D = from_man_exp(D, -wp) + B = mpf_bernoulli(2*k, wp) + term = mpf_mul(B, D, wp) + term = mpf_div(term, fac, wp) + term = to_fixed(term, wp) + if abs(term) < 100: + break + #if not k % 10: + # print k, math.log(int(abs(term)), 10) + s -= term + # Advance derivative twice + a, b, pN, j = b-a*j, -j*b, pN*N, j+1 + a, b, pN, j = b-a*j, -j*b, pN*N, j+1 + k += 1 + fac = mpf_mul_int(fac, (2*k)*(2*k-1), wp) + # A = exp((6*s/pi**2 + log(2*pi) + euler)/12) + pi = pi_fixed(wp) + s *= 6 + s = (s << wp) // (pi**2 >> wp) + s += euler_fixed(wp) + s += to_fixed(mpf_log(from_man_exp(2*pi, -wp), wp), wp) + s //= 12 + A = mpf_exp(from_man_exp(s, -wp), wp) + return to_fixed(A, prec) + +# Apery's constant can be computed using the very rapidly convergent +# series +# oo +# ___ 2 10 +# \ n 205 n + 250 n + 77 (n!) +# zeta(3) = ) (-1) ------------------- ---------- +# /___ 64 5 +# n = 0 ((2n+1)!) + +@constant_memo +def apery_fixed(prec): + prec += 20 + d = MPZ_ONE << prec + term = MPZ(77) << prec + n = 1 + s = MPZ_ZERO + while term: + s += term + d *= (n**10) + d //= (((2*n+1)**5) * (2*n)**5) + term = (-1)**n * (205*(n**2) + 250*n + 77) * d + n += 1 + return s >> (20 + 6) + +""" +Euler's constant (gamma) is computed using the Brent-McMillan formula, +gamma ~= I(n)/J(n) - log(n), where + + I(n) = sum_{k=0,1,2,...} (n**k / k!)**2 * H(k) + J(n) = sum_{k=0,1,2,...} (n**k / k!)**2 + H(k) = 1 + 1/2 + 1/3 + ... + 1/k + +The error is bounded by O(exp(-4n)). Choosing n to be a power +of two, 2**p, the logarithm becomes particularly easy to calculate.[1] + +We use the formulation of Algorithm 3.9 in [2] to make the summation +more efficient. + +Reference: +[1] Xavier Gourdon & Pascal Sebah, The Euler constant: gamma +http://numbers.computation.free.fr/Constants/Gamma/gamma.pdf + +[2] [BorweinBailey]_ +""" + +@constant_memo +def euler_fixed(prec): + extra = 30 + prec += extra + # choose p such that exp(-4*(2**p)) < 2**-n + p = int(math.log((prec/4) * math.log(2), 2)) + 1 + n = 2**p + A = U = -p*ln2_fixed(prec) + B = V = MPZ_ONE << prec + k = 1 + while 1: + B = B*n**2//k**2 + A = (A*n**2//k + B)//k + U += A + V += B + if max(abs(A), abs(B)) < 100: + break + k += 1 + return (U<<(prec-extra))//V + +# Use zeta accelerated formulas for the Mertens and twin +# prime constants; see +# http://mathworld.wolfram.com/MertensConstant.html +# http://mathworld.wolfram.com/TwinPrimesConstant.html + +@constant_memo +def mertens_fixed(prec): + wp = prec + 20 + m = 2 + s = mpf_euler(wp) + while 1: + t = mpf_zeta_int(m, wp) + if t == fone: + break + t = mpf_log(t, wp) + t = mpf_mul_int(t, moebius(m), wp) + t = mpf_div(t, from_int(m), wp) + s = mpf_add(s, t) + m += 1 + return to_fixed(s, prec) + +@constant_memo +def twinprime_fixed(prec): + def I(n): + return sum(moebius(d)<<(n//d) for d in xrange(1,n+1) if not n%d)//n + wp = 2*prec + 30 + res = fone + primes = [from_rational(1,p,wp) for p in [2,3,5,7]] + ppowers = [mpf_mul(p,p,wp) for p in primes] + n = 2 + while 1: + a = mpf_zeta_int(n, wp) + for i in range(4): + a = mpf_mul(a, mpf_sub(fone, ppowers[i]), wp) + ppowers[i] = mpf_mul(ppowers[i], primes[i], wp) + a = mpf_pow_int(a, -I(n), wp) + if mpf_pos(a, prec+10, 'n') == fone: + break + #from libmpf import to_str + #print n, to_str(mpf_sub(fone, a), 6) + res = mpf_mul(res, a, wp) + n += 1 + res = mpf_mul(res, from_int(3*15*35), wp) + res = mpf_div(res, from_int(4*16*36), wp) + return to_fixed(res, prec) + + +mpf_euler = def_mpf_constant(euler_fixed) +mpf_apery = def_mpf_constant(apery_fixed) +mpf_khinchin = def_mpf_constant(khinchin_fixed) +mpf_glaisher = def_mpf_constant(glaisher_fixed) +mpf_catalan = def_mpf_constant(catalan_fixed) +mpf_mertens = def_mpf_constant(mertens_fixed) +mpf_twinprime = def_mpf_constant(twinprime_fixed) + + +#-----------------------------------------------------------------------# +# # +# Bernoulli numbers # +# # +#-----------------------------------------------------------------------# + +MAX_BERNOULLI_CACHE = 3000 + + +r""" +Small Bernoulli numbers and factorials are used in numerous summations, +so it is critical for speed that sequential computation is fast and that +values are cached up to a fairly high threshold. + +On the other hand, we also want to support fast computation of isolated +large numbers. Currently, no such acceleration is provided for integer +factorials (though it is for large floating-point factorials, which are +computed via gamma if the precision is low enough). + +For sequential computation of Bernoulli numbers, we use Ramanujan's formula + + / n + 3 \ + B = (A(n) - S(n)) / | | + n \ n / + +where A(n) = (n+3)/3 when n = 0 or 2 (mod 6), A(n) = -(n+3)/6 +when n = 4 (mod 6), and + + [n/6] + ___ + \ / n + 3 \ + S(n) = ) | | * B + /___ \ n - 6*k / n-6*k + k = 1 + +For isolated large Bernoulli numbers, we use the Riemann zeta function +to calculate a numerical value for B_n. The von Staudt-Clausen theorem +can then be used to optionally find the exact value of the +numerator and denominator. +""" + +bernoulli_cache = {} +f3 = from_int(3) +f6 = from_int(6) + +def bernoulli_size(n): + """Accurately estimate the size of B_n (even n > 2 only)""" + lgn = math.log(n,2) + return int(2.326 + 0.5*lgn + n*(lgn - 4.094)) + +BERNOULLI_PREC_CUTOFF = bernoulli_size(MAX_BERNOULLI_CACHE) + +def mpf_bernoulli(n, prec, rnd=None): + """Computation of Bernoulli numbers (numerically)""" + if n < 2: + if n < 0: + raise ValueError("Bernoulli numbers only defined for n >= 0") + if n == 0: + return fone + if n == 1: + return mpf_neg(fhalf) + # For odd n > 1, the Bernoulli numbers are zero + if n & 1: + return fzero + # If precision is extremely high, we can save time by computing + # the Bernoulli number at a lower precision that is sufficient to + # obtain the exact fraction, round to the exact fraction, and + # convert the fraction back to an mpf value at the original precision + if prec > BERNOULLI_PREC_CUTOFF and prec > bernoulli_size(n)*1.1 + 1000: + p, q = bernfrac(n) + return from_rational(p, q, prec, rnd or round_floor) + if n > MAX_BERNOULLI_CACHE: + return mpf_bernoulli_huge(n, prec, rnd) + wp = prec + 30 + # Reuse nearby precisions + wp += 32 - (prec & 31) + cached = bernoulli_cache.get(wp) + if cached: + numbers, state = cached + if n in numbers: + if not rnd: + return numbers[n] + return mpf_pos(numbers[n], prec, rnd) + m, bin, bin1 = state + if n - m > 10: + return mpf_bernoulli_huge(n, prec, rnd) + else: + if n > 10: + return mpf_bernoulli_huge(n, prec, rnd) + numbers = {0:fone} + m, bin, bin1 = state = [2, MPZ(10), MPZ_ONE] + bernoulli_cache[wp] = (numbers, state) + while m <= n: + #print m + case = m % 6 + # Accurately estimate size of B_m so we can use + # fixed point math without using too much precision + szbm = bernoulli_size(m) + s = 0 + sexp = max(0, szbm) - wp + if m < 6: + a = MPZ_ZERO + else: + a = bin1 + for j in xrange(1, m//6+1): + usign, uman, uexp, ubc = u = numbers[m-6*j] + if usign: + uman = -uman + s += lshift(a*uman, uexp-sexp) + # Update inner binomial coefficient + j6 = 6*j + a *= ((m-5-j6)*(m-4-j6)*(m-3-j6)*(m-2-j6)*(m-1-j6)*(m-j6)) + a //= ((4+j6)*(5+j6)*(6+j6)*(7+j6)*(8+j6)*(9+j6)) + if case == 0: b = mpf_rdiv_int(m+3, f3, wp) + if case == 2: b = mpf_rdiv_int(m+3, f3, wp) + if case == 4: b = mpf_rdiv_int(-m-3, f6, wp) + s = from_man_exp(s, sexp, wp) + b = mpf_div(mpf_sub(b, s, wp), from_int(bin), wp) + numbers[m] = b + m += 2 + # Update outer binomial coefficient + bin = bin * ((m+2)*(m+3)) // (m*(m-1)) + if m > 6: + bin1 = bin1 * ((2+m)*(3+m)) // ((m-7)*(m-6)) + state[:] = [m, bin, bin1] + return numbers[n] + +def mpf_bernoulli_huge(n, prec, rnd=None): + wp = prec + 10 + piprec = wp + int(math.log(n,2)) + v = mpf_gamma_int(n+1, wp) + v = mpf_mul(v, mpf_zeta_int(n, wp), wp) + v = mpf_mul(v, mpf_pow_int(mpf_pi(piprec), -n, wp)) + v = mpf_shift(v, 1-n) + if not n & 3: + v = mpf_neg(v) + return mpf_pos(v, prec, rnd or round_fast) + +def bernfrac(n): + r""" + Returns a tuple of integers `(p, q)` such that `p/q = B_n` exactly, + where `B_n` denotes the `n`-th Bernoulli number. The fraction is + always reduced to lowest terms. Note that for `n > 1` and `n` odd, + `B_n = 0`, and `(0, 1)` is returned. + + **Examples** + + The first few Bernoulli numbers are exactly:: + + >>> from mpmath import * + >>> for n in range(15): + ... p, q = bernfrac(n) + ... print("%s %s/%s" % (n, p, q)) + ... + 0 1/1 + 1 -1/2 + 2 1/6 + 3 0/1 + 4 -1/30 + 5 0/1 + 6 1/42 + 7 0/1 + 8 -1/30 + 9 0/1 + 10 5/66 + 11 0/1 + 12 -691/2730 + 13 0/1 + 14 7/6 + + This function works for arbitrarily large `n`:: + + >>> p, q = bernfrac(10**4) + >>> print(q) + 2338224387510 + >>> print(len(str(p))) + 27692 + >>> mp.dps = 15 + >>> print(mpf(p) / q) + -9.04942396360948e+27677 + >>> print(bernoulli(10**4)) + -9.04942396360948e+27677 + + .. note :: + + :func:`~mpmath.bernoulli` computes a floating-point approximation + directly, without computing the exact fraction first. + This is much faster for large `n`. + + **Algorithm** + + :func:`~mpmath.bernfrac` works by computing the value of `B_n` numerically + and then using the von Staudt-Clausen theorem [1] to reconstruct + the exact fraction. For large `n`, this is significantly faster than + computing `B_1, B_2, \ldots, B_2` recursively with exact arithmetic. + The implementation has been tested for `n = 10^m` up to `m = 6`. + + In practice, :func:`~mpmath.bernfrac` appears to be about three times + slower than the specialized program calcbn.exe [2] + + **References** + + 1. MathWorld, von Staudt-Clausen Theorem: + http://mathworld.wolfram.com/vonStaudt-ClausenTheorem.html + + 2. The Bernoulli Number Page: + http://www.bernoulli.org/ + + """ + n = int(n) + if n < 3: + return [(1, 1), (-1, 2), (1, 6)][n] + if n & 1: + return (0, 1) + q = 1 + for k in list_primes(n+1): + if not (n % (k-1)): + q *= k + prec = bernoulli_size(n) + int(math.log(q,2)) + 20 + b = mpf_bernoulli(n, prec) + p = mpf_mul(b, from_int(q)) + pint = to_int(p, round_nearest) + return (pint, q) + + +#-----------------------------------------------------------------------# +# # +# Polygamma functions # +# # +#-----------------------------------------------------------------------# + +r""" +For all polygamma (psi) functions, we use the Euler-Maclaurin summation +formula. It looks slightly different in the m = 0 and m > 0 cases. + +For m = 0, we have + oo + ___ B + (0) 1 \ 2 k -2 k + psi (z) ~ log z + --- - ) ------ z + 2 z /___ (2 k)! + k = 1 + +Experiment shows that the minimum term of the asymptotic series +reaches 2^(-p) when Re(z) > 0.11*p. So we simply use the recurrence +for psi (equivalent, in fact, to summing to the first few terms +directly before applying E-M) to obtain z large enough. + +Since, very crudely, log z ~= 1 for Re(z) > 1, we can use +fixed-point arithmetic (if z is extremely large, log(z) itself +is a sufficient approximation, so we can stop there already). + +For Re(z) << 0, we could use recurrence, but this is of course +inefficient for large negative z, so there we use the +reflection formula instead. + +For m > 0, we have + + N - 1 + ___ + ~~~(m) [ \ 1 ] 1 1 + psi (z) ~ [ ) -------- ] + ---------- + -------- + + [ /___ m+1 ] m+1 m + k = 1 (z+k) ] 2 (z+N) m (z+N) + + oo + ___ B + \ 2 k (m+1) (m+2) ... (m+2k-1) + + ) ------ ------------------------ + /___ (2 k)! m + 2 k + k = 1 (z+N) + +where ~~~ denotes the function rescaled by 1/((-1)^(m+1) m!). + +Here again N is chosen to make z+N large enough for the minimum +term in the last series to become smaller than eps. + +TODO: the current estimation of N for m > 0 is *very suboptimal*. + +TODO: implement the reflection formula for m > 0, Re(z) << 0. +It is generally a combination of multiple cotangents. Need to +figure out a reasonably simple way to generate these formulas +on the fly. + +TODO: maybe use exact algorithms to compute psi for integral +and certain rational arguments, as this can be much more +efficient. (On the other hand, the availability of these +special values provides a convenient way to test the general +algorithm.) +""" + +# Harmonic numbers are just shifted digamma functions +# We should calculate these exactly when x is an integer +# and when doing so is faster. + +def mpf_harmonic(x, prec, rnd): + if x in (fzero, fnan, finf): + return x + a = mpf_psi0(mpf_add(fone, x, prec+5), prec) + return mpf_add(a, mpf_euler(prec+5, rnd), prec, rnd) + +def mpc_harmonic(z, prec, rnd): + if z[1] == fzero: + return (mpf_harmonic(z[0], prec, rnd), fzero) + a = mpc_psi0(mpc_add_mpf(z, fone, prec+5), prec) + return mpc_add_mpf(a, mpf_euler(prec+5, rnd), prec, rnd) + +def mpf_psi0(x, prec, rnd=round_fast): + """ + Computation of the digamma function (psi function of order 0) + of a real argument. + """ + sign, man, exp, bc = x + wp = prec + 10 + if not man: + if x == finf: return x + if x == fninf or x == fnan: return fnan + if x == fzero or (exp >= 0 and sign): + raise ValueError("polygamma pole") + # Near 0 -- fixed-point arithmetic becomes bad + if exp+bc < -5: + v = mpf_psi0(mpf_add(x, fone, prec, rnd), prec, rnd) + return mpf_sub(v, mpf_div(fone, x, wp, rnd), prec, rnd) + # Reflection formula + if sign and exp+bc > 3: + c, s = mpf_cos_sin_pi(x, wp) + q = mpf_mul(mpf_div(c, s, wp), mpf_pi(wp), wp) + p = mpf_psi0(mpf_sub(fone, x, wp), wp) + return mpf_sub(p, q, prec, rnd) + # The logarithmic term is accurate enough + if (not sign) and bc + exp > wp: + return mpf_log(mpf_sub(x, fone, wp), prec, rnd) + # Initial recurrence to obtain a large enough x + m = to_int(x) + n = int(0.11*wp) + 2 + s = MPZ_ZERO + x = to_fixed(x, wp) + one = MPZ_ONE << wp + if m < n: + for k in xrange(m, n): + s -= (one << wp) // x + x += one + x -= one + # Logarithmic term + s += to_fixed(mpf_log(from_man_exp(x, -wp, wp), wp), wp) + # Endpoint term in Euler-Maclaurin expansion + s += (one << wp) // (2*x) + # Euler-Maclaurin remainder sum + x2 = (x*x) >> wp + t = one + prev = 0 + k = 1 + while 1: + t = (t*x2) >> wp + bsign, bman, bexp, bbc = mpf_bernoulli(2*k, wp) + offset = (bexp + 2*wp) + if offset >= 0: term = (bman << offset) // (t*(2*k)) + else: term = (bman >> (-offset)) // (t*(2*k)) + if k & 1: s -= term + else: s += term + if k > 2 and term >= prev: + break + prev = term + k += 1 + return from_man_exp(s, -wp, wp, rnd) + +def mpc_psi0(z, prec, rnd=round_fast): + """ + Computation of the digamma function (psi function of order 0) + of a complex argument. + """ + re, im = z + # Fall back to the real case + if im == fzero: + return (mpf_psi0(re, prec, rnd), fzero) + wp = prec + 20 + sign, man, exp, bc = re + # Reflection formula + if sign and exp+bc > 3: + c = mpc_cos_pi(z, wp) + s = mpc_sin_pi(z, wp) + q = mpc_mul_mpf(mpc_div(c, s, wp), mpf_pi(wp), wp) + p = mpc_psi0(mpc_sub(mpc_one, z, wp), wp) + return mpc_sub(p, q, prec, rnd) + # Just the logarithmic term + if (not sign) and bc + exp > wp: + return mpc_log(mpc_sub(z, mpc_one, wp), prec, rnd) + # Initial recurrence to obtain a large enough z + w = to_int(re) + n = int(0.11*wp) + 2 + s = mpc_zero + if w < n: + for k in xrange(w, n): + s = mpc_sub(s, mpc_reciprocal(z, wp), wp) + z = mpc_add_mpf(z, fone, wp) + z = mpc_sub(z, mpc_one, wp) + # Logarithmic and endpoint term + s = mpc_add(s, mpc_log(z, wp), wp) + s = mpc_add(s, mpc_div(mpc_half, z, wp), wp) + # Euler-Maclaurin remainder sum + z2 = mpc_square(z, wp) + t = mpc_one + prev = mpc_zero + szprev = fzero + k = 1 + eps = mpf_shift(fone, -wp+2) + while 1: + t = mpc_mul(t, z2, wp) + bern = mpf_bernoulli(2*k, wp) + term = mpc_mpf_div(bern, mpc_mul_int(t, 2*k, wp), wp) + s = mpc_sub(s, term, wp) + szterm = mpc_abs(term, 10) + if k > 2 and (mpf_le(szterm, eps) or mpf_le(szprev, szterm)): + break + prev = term + szprev = szterm + k += 1 + return s + +# Currently unoptimized +def mpf_psi(m, x, prec, rnd=round_fast): + """ + Computation of the polygamma function of arbitrary integer order + m >= 0, for a real argument x. + """ + if m == 0: + return mpf_psi0(x, prec, rnd=round_fast) + return mpc_psi(m, (x, fzero), prec, rnd)[0] + +def mpc_psi(m, z, prec, rnd=round_fast): + """ + Computation of the polygamma function of arbitrary integer order + m >= 0, for a complex argument z. + """ + if m == 0: + return mpc_psi0(z, prec, rnd) + re, im = z + wp = prec + 20 + sign, man, exp, bc = re + if not im[1]: + if im in (finf, fninf, fnan): + return (fnan, fnan) + if not man: + if re == finf and im == fzero: + return (fzero, fzero) + if re == fnan: + return (fnan, fnan) + # Recurrence + w = to_int(re) + n = int(0.4*wp + 4*m) + s = mpc_zero + if w < n: + for k in xrange(w, n): + t = mpc_pow_int(z, -m-1, wp) + s = mpc_add(s, t, wp) + z = mpc_add_mpf(z, fone, wp) + zm = mpc_pow_int(z, -m, wp) + z2 = mpc_pow_int(z, -2, wp) + # 1/m*(z+N)^m + integral_term = mpc_div_mpf(zm, from_int(m), wp) + s = mpc_add(s, integral_term, wp) + # 1/2*(z+N)^(-(m+1)) + s = mpc_add(s, mpc_mul_mpf(mpc_div(zm, z, wp), fhalf, wp), wp) + a = m + 1 + b = 2 + k = 1 + # Important: we want to sum up to the *relative* error, + # not the absolute error, because psi^(m)(z) might be tiny + magn = mpc_abs(s, 10) + magn = magn[2]+magn[3] + eps = mpf_shift(fone, magn-wp+2) + while 1: + zm = mpc_mul(zm, z2, wp) + bern = mpf_bernoulli(2*k, wp) + scal = mpf_mul_int(bern, a, wp) + scal = mpf_div(scal, from_int(b), wp) + term = mpc_mul_mpf(zm, scal, wp) + s = mpc_add(s, term, wp) + szterm = mpc_abs(term, 10) + if k > 2 and mpf_le(szterm, eps): + break + #print k, to_str(szterm, 10), to_str(eps, 10) + a *= (m+2*k)*(m+2*k+1) + b *= (2*k+1)*(2*k+2) + k += 1 + # Scale and sign factor + v = mpc_mul_mpf(s, mpf_gamma(from_int(m+1), wp), prec, rnd) + if not (m & 1): + v = mpf_neg(v[0]), mpf_neg(v[1]) + return v + + +#-----------------------------------------------------------------------# +# # +# Riemann zeta function # +# # +#-----------------------------------------------------------------------# + +r""" +We use zeta(s) = eta(s) / (1 - 2**(1-s)) and Borwein's approximation + + n-1 + ___ k + -1 \ (-1) (d_k - d_n) + eta(s) ~= ---- ) ------------------ + d_n /___ s + k = 0 (k + 1) +where + k + ___ i + \ (n + i - 1)! 4 + d_k = n ) ---------------. + /___ (n - i)! (2i)! + i = 0 + +If s = a + b*I, the absolute error for eta(s) is bounded by + + 3 (1 + 2|b|) + ------------ * exp(|b| pi/2) + n + (3+sqrt(8)) + +Disregarding the linear term, we have approximately, + + log(err) ~= log(exp(1.58*|b|)) - log(5.8**n) + log(err) ~= 1.58*|b| - log(5.8)*n + log(err) ~= 1.58*|b| - 1.76*n + log2(err) ~= 2.28*|b| - 2.54*n + +So for p bits, we should choose n > (p + 2.28*|b|) / 2.54. + +References: +----------- + +Peter Borwein, "An Efficient Algorithm for the Riemann Zeta Function" +http://www.cecm.sfu.ca/personal/pborwein/PAPERS/P117.ps + +http://en.wikipedia.org/wiki/Dirichlet_eta_function +""" + +borwein_cache = {} + +def borwein_coefficients(n): + if n in borwein_cache: + return borwein_cache[n] + ds = [MPZ_ZERO] * (n+1) + d = MPZ_ONE + s = ds[0] = MPZ_ONE + for i in range(1, n+1): + d = d * 4 * (n+i-1) * (n-i+1) + d //= ((2*i) * ((2*i)-1)) + s += d + ds[i] = s + borwein_cache[n] = ds + return ds + +ZETA_INT_CACHE_MAX_PREC = 1000 +zeta_int_cache = {} + +def mpf_zeta_int(s, prec, rnd=round_fast): + """ + Optimized computation of zeta(s) for an integer s. + """ + wp = prec + 20 + s = int(s) + if s in zeta_int_cache and zeta_int_cache[s][0] >= wp: + return mpf_pos(zeta_int_cache[s][1], prec, rnd) + if s < 2: + if s == 1: + raise ValueError("zeta(1) pole") + if not s: + return mpf_neg(fhalf) + return mpf_div(mpf_bernoulli(-s+1, wp), from_int(s-1), prec, rnd) + # 2^-s term vanishes? + if s >= wp: + return mpf_perturb(fone, 0, prec, rnd) + # 5^-s term vanishes? + elif s >= wp*0.431: + t = one = 1 << wp + t += 1 << (wp - s) + t += one // (MPZ_THREE ** s) + t += 1 << max(0, wp - s*2) + return from_man_exp(t, -wp, prec, rnd) + else: + # Fast enough to sum directly? + # Even better, we use the Euler product (idea stolen from pari) + m = (float(wp)/(s-1) + 1) + if m < 30: + needed_terms = int(2.0**m + 1) + if needed_terms < int(wp/2.54 + 5) / 10: + t = fone + for k in list_primes(needed_terms): + #print k, needed_terms + powprec = int(wp - s*math.log(k,2)) + if powprec < 2: + break + a = mpf_sub(fone, mpf_pow_int(from_int(k), -s, powprec), wp) + t = mpf_mul(t, a, wp) + return mpf_div(fone, t, wp) + # Use Borwein's algorithm + n = int(wp/2.54 + 5) + d = borwein_coefficients(n) + t = MPZ_ZERO + s = MPZ(s) + for k in xrange(n): + t += (((-1)**k * (d[k] - d[n])) << wp) // (k+1)**s + t = (t << wp) // (-d[n]) + t = (t << wp) // ((1 << wp) - (1 << (wp+1-s))) + if (s in zeta_int_cache and zeta_int_cache[s][0] < wp) or (s not in zeta_int_cache): + zeta_int_cache[s] = (wp, from_man_exp(t, -wp-wp)) + return from_man_exp(t, -wp-wp, prec, rnd) + +def mpf_zeta(s, prec, rnd=round_fast, alt=0): + sign, man, exp, bc = s + if not man: + if s == fzero: + if alt: + return fhalf + else: + return mpf_neg(fhalf) + if s == finf: + return fone + return fnan + wp = prec + 20 + # First term vanishes? + if (not sign) and (exp + bc > (math.log(wp,2) + 2)): + return mpf_perturb(fone, alt, prec, rnd) + # Optimize for integer arguments + elif exp >= 0: + if alt: + if s == fone: + return mpf_ln2(prec, rnd) + z = mpf_zeta_int(to_int(s), wp, negative_rnd[rnd]) + q = mpf_sub(fone, mpf_pow(ftwo, mpf_sub(fone, s, wp), wp), wp) + return mpf_mul(z, q, prec, rnd) + else: + return mpf_zeta_int(to_int(s), prec, rnd) + # Negative: use the reflection formula + # Borwein only proves the accuracy bound for x >= 1/2. However, based on + # tests, the accuracy without reflection is quite good even some distance + # to the left of 1/2. XXX: verify this. + if sign: + # XXX: could use the separate refl. formula for Dirichlet eta + if alt: + q = mpf_sub(fone, mpf_pow(ftwo, mpf_sub(fone, s, wp), wp), wp) + return mpf_mul(mpf_zeta(s, wp), q, prec, rnd) + # XXX: -1 should be done exactly + y = mpf_sub(fone, s, 10*wp) + a = mpf_gamma(y, wp) + b = mpf_zeta(y, wp) + c = mpf_sin_pi(mpf_shift(s, -1), wp) + wp2 = wp + max(0,exp+bc) + pi = mpf_pi(wp+wp2) + d = mpf_div(mpf_pow(mpf_shift(pi, 1), s, wp2), pi, wp2) + return mpf_mul(a,mpf_mul(b,mpf_mul(c,d,wp),wp),prec,rnd) + + # Near pole + r = mpf_sub(fone, s, wp) + asign, aman, aexp, abc = mpf_abs(r) + pole_dist = -2*(aexp+abc) + if pole_dist > wp: + if alt: + return mpf_ln2(prec, rnd) + else: + q = mpf_neg(mpf_div(fone, r, wp)) + return mpf_add(q, mpf_euler(wp), prec, rnd) + else: + wp += max(0, pole_dist) + + t = MPZ_ZERO + #wp += 16 - (prec & 15) + # Use Borwein's algorithm + n = int(wp/2.54 + 5) + d = borwein_coefficients(n) + t = MPZ_ZERO + sf = to_fixed(s, wp) + ln2 = ln2_fixed(wp) + for k in xrange(n): + u = (-sf*log_int_fixed(k+1, wp, ln2)) >> wp + #esign, eman, eexp, ebc = mpf_exp(u, wp) + #offset = eexp + wp + #if offset >= 0: + # w = ((d[k] - d[n]) * eman) << offset + #else: + # w = ((d[k] - d[n]) * eman) >> (-offset) + eman = exp_fixed(u, wp, ln2) + w = (d[k] - d[n]) * eman + if k & 1: + t -= w + else: + t += w + t = t // (-d[n]) + t = from_man_exp(t, -wp, wp) + if alt: + return mpf_pos(t, prec, rnd) + else: + q = mpf_sub(fone, mpf_pow(ftwo, mpf_sub(fone, s, wp), wp), wp) + return mpf_div(t, q, prec, rnd) + +def mpc_zeta(s, prec, rnd=round_fast, alt=0, force=False): + re, im = s + if im == fzero: + return mpf_zeta(re, prec, rnd, alt), fzero + + # slow for large s + if (not force) and mpf_gt(mpc_abs(s, 10), from_int(prec)): + raise NotImplementedError + + wp = prec + 20 + + # Near pole + r = mpc_sub(mpc_one, s, wp) + asign, aman, aexp, abc = mpc_abs(r, 10) + pole_dist = -2*(aexp+abc) + if pole_dist > wp: + if alt: + q = mpf_ln2(wp) + y = mpf_mul(q, mpf_euler(wp), wp) + g = mpf_shift(mpf_mul(q, q, wp), -1) + g = mpf_sub(y, g) + z = mpc_mul_mpf(r, mpf_neg(g), wp) + z = mpc_add_mpf(z, q, wp) + return mpc_pos(z, prec, rnd) + else: + q = mpc_neg(mpc_div(mpc_one, r, wp)) + q = mpc_add_mpf(q, mpf_euler(wp), wp) + return mpc_pos(q, prec, rnd) + else: + wp += max(0, pole_dist) + + # Reflection formula. To be rigorous, we should reflect to the left of + # re = 1/2 (see comments for mpf_zeta), but this leads to unnecessary + # slowdown for interesting values of s + if mpf_lt(re, fzero): + # XXX: could use the separate refl. formula for Dirichlet eta + if alt: + q = mpc_sub(mpc_one, mpc_pow(mpc_two, mpc_sub(mpc_one, s, wp), + wp), wp) + return mpc_mul(mpc_zeta(s, wp), q, prec, rnd) + # XXX: -1 should be done exactly + y = mpc_sub(mpc_one, s, 10*wp) + a = mpc_gamma(y, wp) + b = mpc_zeta(y, wp) + c = mpc_sin_pi(mpc_shift(s, -1), wp) + rsign, rman, rexp, rbc = re + isign, iman, iexp, ibc = im + mag = max(rexp+rbc, iexp+ibc) + wp2 = wp + max(0, mag) + pi = mpf_pi(wp+wp2) + pi2 = (mpf_shift(pi, 1), fzero) + d = mpc_div_mpf(mpc_pow(pi2, s, wp2), pi, wp2) + return mpc_mul(a,mpc_mul(b,mpc_mul(c,d,wp),wp),prec,rnd) + n = int(wp/2.54 + 5) + n += int(0.9*abs(to_int(im))) + d = borwein_coefficients(n) + ref = to_fixed(re, wp) + imf = to_fixed(im, wp) + tre = MPZ_ZERO + tim = MPZ_ZERO + one = MPZ_ONE << wp + one_2wp = MPZ_ONE << (2*wp) + critical_line = re == fhalf + ln2 = ln2_fixed(wp) + pi2 = pi_fixed(wp-1) + wp2 = wp+wp + for k in xrange(n): + log = log_int_fixed(k+1, wp, ln2) + # A square root is much cheaper than an exp + if critical_line: + w = one_2wp // isqrt_fast((k+1) << wp2) + else: + w = exp_fixed((-ref*log) >> wp, wp) + if k & 1: + w *= (d[n] - d[k]) + else: + w *= (d[k] - d[n]) + wre, wim = cos_sin_fixed((-imf*log)>>wp, wp, pi2) + tre += (w * wre) >> wp + tim += (w * wim) >> wp + tre //= (-d[n]) + tim //= (-d[n]) + tre = from_man_exp(tre, -wp, wp) + tim = from_man_exp(tim, -wp, wp) + if alt: + return mpc_pos((tre, tim), prec, rnd) + else: + q = mpc_sub(mpc_one, mpc_pow(mpc_two, r, wp), wp) + return mpc_div((tre, tim), q, prec, rnd) + +def mpf_altzeta(s, prec, rnd=round_fast): + return mpf_zeta(s, prec, rnd, 1) + +def mpc_altzeta(s, prec, rnd=round_fast): + return mpc_zeta(s, prec, rnd, 1) + +# Not optimized currently +mpf_zetasum = None + + +def pow_fixed(x, n, wp): + if n == 1: + return x + y = MPZ_ONE << wp + while n: + if n & 1: + y = (y*x) >> wp + n -= 1 + x = (x*x) >> wp + n //= 2 + return y + +# TODO: optimize / cleanup interface / unify with list_primes +sieve_cache = [] +primes_cache = [] +mult_cache = [] + +def primesieve(n): + global sieve_cache, primes_cache, mult_cache + if n < len(sieve_cache): + sieve = sieve_cache#[:n+1] + primes = primes_cache[:primes_cache.index(max(sieve))+1] + mult = mult_cache#[:n+1] + return sieve, primes, mult + sieve = [0] * (n+1) + mult = [0] * (n+1) + primes = list_primes(n) + for p in primes: + #sieve[p::p] = p + for k in xrange(p,n+1,p): + sieve[k] = p + for i, p in enumerate(sieve): + if i >= 2: + m = 1 + n = i // p + while not n % p: + n //= p + m += 1 + mult[i] = m + sieve_cache = sieve + primes_cache = primes + mult_cache = mult + return sieve, primes, mult + +def zetasum_sieved(critical_line, sre, sim, a, n, wp): + if a < 1: + raise ValueError("a cannot be less than 1") + sieve, primes, mult = primesieve(a+n) + basic_powers = {} + one = MPZ_ONE << wp + one_2wp = MPZ_ONE << (2*wp) + wp2 = wp+wp + ln2 = ln2_fixed(wp) + pi2 = pi_fixed(wp-1) + for p in primes: + if p*2 > a+n: + break + log = log_int_fixed(p, wp, ln2) + cos, sin = cos_sin_fixed((-sim*log)>>wp, wp, pi2) + if critical_line: + u = one_2wp // isqrt_fast(p<>wp, wp) + pre = (u*cos) >> wp + pim = (u*sin) >> wp + basic_powers[p] = [(pre, pim)] + tre, tim = pre, pim + for m in range(1,int(math.log(a+n,p)+0.01)+1): + tre, tim = ((pre*tre-pim*tim)>>wp), ((pim*tre+pre*tim)>>wp) + basic_powers[p].append((tre,tim)) + xre = MPZ_ZERO + xim = MPZ_ZERO + if a == 1: + xre += one + aa = max(a,2) + for k in xrange(aa, a+n+1): + p = sieve[k] + if p in basic_powers: + m = mult[k] + tre, tim = basic_powers[p][m-1] + while 1: + k //= p**m + if k == 1: + break + p = sieve[k] + m = mult[k] + pre, pim = basic_powers[p][m-1] + tre, tim = ((pre*tre-pim*tim)>>wp), ((pim*tre+pre*tim)>>wp) + else: + log = log_int_fixed(k, wp, ln2) + cos, sin = cos_sin_fixed((-sim*log)>>wp, wp, pi2) + if critical_line: + u = one_2wp // isqrt_fast(k<>wp, wp) + tre = (u*cos) >> wp + tim = (u*sin) >> wp + xre += tre + xim += tim + return xre, xim + +# Set to something large to disable +ZETASUM_SIEVE_CUTOFF = 10 + +def mpc_zetasum(s, a, n, derivatives, reflect, prec): + """ + Fast version of mp._zetasum, assuming s = complex, a = integer. + """ + + wp = prec + 10 + derivatives = list(derivatives) + have_derivatives = derivatives != [0] + have_one_derivative = len(derivatives) == 1 + + # parse s + sre, sim = s + critical_line = (sre == fhalf) + sre = to_fixed(sre, wp) + sim = to_fixed(sim, wp) + + if a > 0 and n > ZETASUM_SIEVE_CUTOFF and not have_derivatives \ + and not reflect and (n < 4e7 or sys.maxsize > 2**32): + re, im = zetasum_sieved(critical_line, sre, sim, a, n, wp) + xs = [(from_man_exp(re, -wp, prec, 'n'), from_man_exp(im, -wp, prec, 'n'))] + return xs, [] + + maxd = max(derivatives) + if not have_one_derivative: + derivatives = range(maxd+1) + + # x_d = 0, y_d = 0 + xre = [MPZ_ZERO for d in derivatives] + xim = [MPZ_ZERO for d in derivatives] + if reflect: + yre = [MPZ_ZERO for d in derivatives] + yim = [MPZ_ZERO for d in derivatives] + else: + yre = yim = [] + + one = MPZ_ONE << wp + one_2wp = MPZ_ONE << (2*wp) + + ln2 = ln2_fixed(wp) + pi2 = pi_fixed(wp-1) + wp2 = wp+wp + + for w in xrange(a, a+n+1): + log = log_int_fixed(w, wp, ln2) + cos, sin = cos_sin_fixed((-sim*log)>>wp, wp, pi2) + if critical_line: + u = one_2wp // isqrt_fast(w<>wp, wp) + xterm_re = (u * cos) >> wp + xterm_im = (u * sin) >> wp + if reflect: + reciprocal = (one_2wp // (u*w)) + yterm_re = (reciprocal * cos) >> wp + yterm_im = (reciprocal * sin) >> wp + + if have_derivatives: + if have_one_derivative: + log = pow_fixed(log, maxd, wp) + xre[0] += (xterm_re * log) >> wp + xim[0] += (xterm_im * log) >> wp + if reflect: + yre[0] += (yterm_re * log) >> wp + yim[0] += (yterm_im * log) >> wp + else: + t = MPZ_ONE << wp + for d in derivatives: + xre[d] += (xterm_re * t) >> wp + xim[d] += (xterm_im * t) >> wp + if reflect: + yre[d] += (yterm_re * t) >> wp + yim[d] += (yterm_im * t) >> wp + t = (t * log) >> wp + else: + xre[0] += xterm_re + xim[0] += xterm_im + if reflect: + yre[0] += yterm_re + yim[0] += yterm_im + if have_derivatives: + if have_one_derivative: + if maxd % 2: + xre[0] = -xre[0] + xim[0] = -xim[0] + if reflect: + yre[0] = -yre[0] + yim[0] = -yim[0] + else: + xre = [(-1)**d * xre[d] for d in derivatives] + xim = [(-1)**d * xim[d] for d in derivatives] + if reflect: + yre = [(-1)**d * yre[d] for d in derivatives] + yim = [(-1)**d * yim[d] for d in derivatives] + xs = [(from_man_exp(xa, -wp, prec, 'n'), from_man_exp(xb, -wp, prec, 'n')) + for (xa, xb) in zip(xre, xim)] + ys = [(from_man_exp(ya, -wp, prec, 'n'), from_man_exp(yb, -wp, prec, 'n')) + for (ya, yb) in zip(yre, yim)] + return xs, ys + + +#-----------------------------------------------------------------------# +# # +# The gamma function (NEW IMPLEMENTATION) # +# # +#-----------------------------------------------------------------------# + +# Higher means faster, but more precomputation time +MAX_GAMMA_TAYLOR_PREC = 5000 +# Need to derive higher bounds for Taylor series to go higher +assert MAX_GAMMA_TAYLOR_PREC < 15000 + +# Use Stirling's series if abs(x) > beta*prec +# Important: must be large enough for convergence! +GAMMA_STIRLING_BETA = 0.2 + +SMALL_FACTORIAL_CACHE_SIZE = 150 + +gamma_taylor_cache = {} +gamma_stirling_cache = {} + +small_factorial_cache = [from_int(ifac(n)) for \ + n in range(SMALL_FACTORIAL_CACHE_SIZE+1)] + +def zeta_array(N, prec): + """ + zeta(n) = A * pi**n / n! + B + + where A is a rational number (A = Bernoulli number + for n even) and B is an infinite sum over powers of exp(2*pi). + (B = 0 for n even). + + TODO: this is currently only used for gamma, but could + be very useful elsewhere. + """ + extra = 30 + wp = prec+extra + zeta_values = [MPZ_ZERO] * (N+2) + pi = pi_fixed(wp) + # STEP 1: + one = MPZ_ONE << wp + zeta_values[0] = -one//2 + f_2pi = mpf_shift(mpf_pi(wp),1) + exp_2pi_k = exp_2pi = mpf_exp(f_2pi, wp) + # Compute exponential series + # Store values of 1/(exp(2*pi*k)-1), + # exp(2*pi*k)/(exp(2*pi*k)-1)**2, 1/(exp(2*pi*k)-1)**2 + # pi*k*exp(2*pi*k)/(exp(2*pi*k)-1)**2 + exps3 = [] + k = 1 + while 1: + tp = wp - 9*k + if tp < 1: + break + # 1/(exp(2*pi*k-1) + q1 = mpf_div(fone, mpf_sub(exp_2pi_k, fone, tp), tp) + # pi*k*exp(2*pi*k)/(exp(2*pi*k)-1)**2 + q2 = mpf_mul(exp_2pi_k, mpf_mul(q1,q1,tp), tp) + q1 = to_fixed(q1, wp) + q2 = to_fixed(q2, wp) + q2 = (k * q2 * pi) >> wp + exps3.append((q1, q2)) + # Multiply for next round + exp_2pi_k = mpf_mul(exp_2pi_k, exp_2pi, wp) + k += 1 + # Exponential sum + for n in xrange(3, N+1, 2): + s = MPZ_ZERO + k = 1 + for e1, e2 in exps3: + if n%4 == 3: + t = e1 // k**n + else: + U = (n-1)//4 + t = (e1 + e2//U) // k**n + if not t: + break + s += t + k += 1 + zeta_values[n] = -2*s + # Even zeta values + B = [mpf_abs(mpf_bernoulli(k,wp)) for k in xrange(N+2)] + pi_pow = fpi = mpf_pow_int(mpf_shift(mpf_pi(wp), 1), 2, wp) + pi_pow = mpf_div(pi_pow, from_int(4), wp) + for n in xrange(2,N+2,2): + z = mpf_mul(B[n], pi_pow, wp) + zeta_values[n] = to_fixed(z, wp) + pi_pow = mpf_mul(pi_pow, fpi, wp) + pi_pow = mpf_div(pi_pow, from_int((n+1)*(n+2)), wp) + # Zeta sum + reciprocal_pi = (one << wp) // pi + for n in xrange(3, N+1, 4): + U = (n-3)//4 + s = zeta_values[4*U+4]*(4*U+7)//4 + for k in xrange(1, U+1): + s -= (zeta_values[4*k] * zeta_values[4*U+4-4*k]) >> wp + zeta_values[n] += (2*s*reciprocal_pi) >> wp + for n in xrange(5, N+1, 4): + U = (n-1)//4 + s = zeta_values[4*U+2]*(2*U+1) + for k in xrange(1, 2*U+1): + s += ((-1)**k*2*k* zeta_values[2*k] * zeta_values[4*U+2-2*k])>>wp + zeta_values[n] += ((s*reciprocal_pi)>>wp)//(2*U) + return [x>>extra for x in zeta_values] + +def gamma_taylor_coefficients(inprec): + """ + Gives the Taylor coefficients of 1/gamma(1+x) as + a list of fixed-point numbers. Enough coefficients are returned + to ensure that the series converges to the given precision + when x is in [0.5, 1.5]. + """ + # Reuse nearby cache values (small case) + if inprec < 400: + prec = inprec + (10-(inprec%10)) + elif inprec < 1000: + prec = inprec + (30-(inprec%30)) + else: + prec = inprec + if prec in gamma_taylor_cache: + return gamma_taylor_cache[prec], prec + + # Experimentally determined bounds + if prec < 1000: + N = int(prec**0.76 + 2) + else: + # Valid to at least 15000 bits + N = int(prec**0.787 + 2) + + # Reuse higher precision values + for cprec in gamma_taylor_cache: + if cprec > prec: + coeffs = [x>>(cprec-prec) for x in gamma_taylor_cache[cprec][-N:]] + if inprec < 1000: + gamma_taylor_cache[prec] = coeffs + return coeffs, prec + + # Cache at a higher precision (large case) + if prec > 1000: + prec = int(prec * 1.2) + + wp = prec + 20 + A = [0] * N + A[0] = MPZ_ZERO + A[1] = MPZ_ONE << wp + A[2] = euler_fixed(wp) + # SLOW, reference implementation + #zeta_values = [0,0]+[to_fixed(mpf_zeta_int(k,wp),wp) for k in xrange(2,N)] + zeta_values = zeta_array(N, wp) + for k in xrange(3, N): + a = (-A[2]*A[k-1])>>wp + for j in xrange(2,k): + a += ((-1)**j * zeta_values[j] * A[k-j]) >> wp + a //= (1-k) + A[k] = a + A = [a>>20 for a in A] + A = A[::-1] + A = A[:-1] + gamma_taylor_cache[prec] = A + #return A, prec + return gamma_taylor_coefficients(inprec) + +def gamma_fixed_taylor(xmpf, x, wp, prec, rnd, type): + # Determine nearest multiple of N/2 + #n = int(x >> (wp-1)) + #steps = (n-1)>>1 + nearest_int = ((x >> (wp-1)) + MPZ_ONE) >> 1 + one = MPZ_ONE << wp + coeffs, cwp = gamma_taylor_coefficients(wp) + if nearest_int > 0: + r = one + for i in xrange(nearest_int-1): + x -= one + r = (r*x) >> wp + x -= one + p = MPZ_ZERO + for c in coeffs: + p = c + ((x*p)>>wp) + p >>= (cwp-wp) + if type == 0: + return from_man_exp((r<> wp + x += one + p = MPZ_ZERO + for c in coeffs: + p = c + ((x*p)>>wp) + p >>= (cwp-wp) + if wp - bitcount(abs(x)) > 10: + # pass very close to 0, so do floating-point multiply + g = mpf_add(xmpf, from_int(-nearest_int)) # exact + r = from_man_exp(p*r,-wp-wp) + r = mpf_mul(r, g, wp) + if type == 0: + return mpf_div(fone, r, prec, rnd) + if type == 2: + return mpf_pos(r, prec, rnd) + if type == 3: + return mpf_log(mpf_abs(mpf_div(fone, r, wp)), prec, rnd) + else: + r = from_man_exp(x*p*r,-3*wp) + if type == 0: return mpf_div(fone, r, prec, rnd) + if type == 2: return mpf_pos(r, prec, rnd) + if type == 3: return mpf_neg(mpf_log(mpf_abs(r), prec, rnd)) + +def stirling_coefficient(n): + if n in gamma_stirling_cache: + return gamma_stirling_cache[n] + p, q = bernfrac(n) + q *= MPZ(n*(n-1)) + gamma_stirling_cache[n] = p, q, bitcount(abs(p)), bitcount(q) + return gamma_stirling_cache[n] + +def real_stirling_series(x, prec): + """ + Sums the rational part of Stirling's expansion, + + log(sqrt(2*pi)) - z + 1/(12*z) - 1/(360*z^3) + ... + + """ + t = (MPZ_ONE<<(prec+prec)) // x # t = 1/x + u = (t*t)>>prec # u = 1/x**2 + s = ln_sqrt2pi_fixed(prec) - x + # Add initial terms of Stirling's series + s += t//12; t = (t*u)>>prec + s -= t//360; t = (t*u)>>prec + s += t//1260; t = (t*u)>>prec + s -= t//1680; t = (t*u)>>prec + if not t: return s + s += t//1188; t = (t*u)>>prec + s -= 691*t//360360; t = (t*u)>>prec + s += t//156; t = (t*u)>>prec + if not t: return s + s -= 3617*t//122400; t = (t*u)>>prec + s += 43867*t//244188; t = (t*u)>>prec + s -= 174611*t//125400; t = (t*u)>>prec + if not t: return s + k = 22 + # From here on, the coefficients are growing, so we + # have to keep t at a roughly constant size + usize = bitcount(abs(u)) + tsize = bitcount(abs(t)) + texp = 0 + while 1: + p, q, pb, qb = stirling_coefficient(k) + term_mag = tsize + pb + texp + shift = -texp + m = pb - term_mag + if m > 0 and shift < m: + p >>= m + shift -= m + m = tsize - term_mag + if m > 0 and shift < m: + w = t >> m + shift -= m + else: + w = t + term = (t*p//q) >> shift + if not term: + break + s += term + t = (t*u) >> usize + texp -= (prec - usize) + k += 2 + return s + +def complex_stirling_series(x, y, prec): + # t = 1/z + _m = (x*x + y*y) >> prec + tre = (x << prec) // _m + tim = (-y << prec) // _m + # u = 1/z**2 + ure = (tre*tre - tim*tim) >> prec + uim = tim*tre >> (prec-1) + # s = log(sqrt(2*pi)) - z + sre = ln_sqrt2pi_fixed(prec) - x + sim = -y + + # Add initial terms of Stirling's series + sre += tre//12; sim += tim//12; + tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec) + sre -= tre//360; sim -= tim//360; + tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec) + sre += tre//1260; sim += tim//1260; + tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec) + sre -= tre//1680; sim -= tim//1680; + tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec) + if abs(tre) + abs(tim) < 5: return sre, sim + sre += tre//1188; sim += tim//1188; + tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec) + sre -= 691*tre//360360; sim -= 691*tim//360360; + tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec) + sre += tre//156; sim += tim//156; + tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec) + if abs(tre) + abs(tim) < 5: return sre, sim + sre -= 3617*tre//122400; sim -= 3617*tim//122400; + tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec) + sre += 43867*tre//244188; sim += 43867*tim//244188; + tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec) + sre -= 174611*tre//125400; sim -= 174611*tim//125400; + tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec) + if abs(tre) + abs(tim) < 5: return sre, sim + + k = 22 + # From here on, the coefficients are growing, so we + # have to keep t at a roughly constant size + usize = bitcount(max(abs(ure), abs(uim))) + tsize = bitcount(max(abs(tre), abs(tim))) + texp = 0 + while 1: + p, q, pb, qb = stirling_coefficient(k) + term_mag = tsize + pb + texp + shift = -texp + m = pb - term_mag + if m > 0 and shift < m: + p >>= m + shift -= m + m = tsize - term_mag + if m > 0 and shift < m: + wre = tre >> m + wim = tim >> m + shift -= m + else: + wre = tre + wim = tim + termre = (tre*p//q) >> shift + termim = (tim*p//q) >> shift + if abs(termre) + abs(termim) < 5: + break + sre += termre + sim += termim + tre, tim = ((tre*ure - tim*uim)>>usize), \ + ((tre*uim + tim*ure)>>usize) + texp -= (prec - usize) + k += 2 + return sre, sim + + +def mpf_gamma(x, prec, rnd='d', type=0): + """ + This function implements multipurpose evaluation of the gamma + function, G(x), as well as the following versions of the same: + + type = 0 -- G(x) [standard gamma function] + type = 1 -- G(x+1) = x*G(x+1) = x! [factorial] + type = 2 -- 1/G(x) [reciprocal gamma function] + type = 3 -- log(|G(x)|) [log-gamma function, real part] + """ + + # Specal values + sign, man, exp, bc = x + if not man: + if x == fzero: + if type == 1: return fone + if type == 2: return fzero + raise ValueError("gamma function pole") + if x == finf: + if type == 2: return fzero + return finf + return fnan + + # First of all, for log gamma, numbers can be well beyond the fixed-point + # range, so we must take care of huge numbers before e.g. trying + # to convert x to the nearest integer + if type == 3: + wp = prec+20 + if exp+bc > wp and not sign: + return mpf_sub(mpf_mul(x, mpf_log(x, wp), wp), x, prec, rnd) + + # We strongly want to special-case small integers + is_integer = exp >= 0 + if is_integer: + # Poles + if sign: + if type == 2: + return fzero + raise ValueError("gamma function pole") + # n = x + n = man << exp + if n < SMALL_FACTORIAL_CACHE_SIZE: + if type == 0: + return mpf_pos(small_factorial_cache[n-1], prec, rnd) + if type == 1: + return mpf_pos(small_factorial_cache[n], prec, rnd) + if type == 2: + return mpf_div(fone, small_factorial_cache[n-1], prec, rnd) + if type == 3: + return mpf_log(small_factorial_cache[n-1], prec, rnd) + else: + # floor(abs(x)) + n = int(man >> (-exp)) + + # Estimate size and precision + # Estimate log(gamma(|x|),2) as x*log(x,2) + mag = exp + bc + gamma_size = n*mag + + if type == 3: + wp = prec + 20 + else: + wp = prec + bitcount(gamma_size) + 20 + + # Very close to 0, pole + if mag < -wp: + if type == 0: + return mpf_sub(mpf_div(fone,x, wp),mpf_shift(fone,-wp),prec,rnd) + if type == 1: return mpf_sub(fone, x, prec, rnd) + if type == 2: return mpf_add(x, mpf_shift(fone,mag-wp), prec, rnd) + if type == 3: return mpf_neg(mpf_log(mpf_abs(x), prec, rnd)) + + # From now on, we assume having a gamma function + if type == 1: + return mpf_gamma(mpf_add(x, fone), prec, rnd, 0) + + # Special case integers (those not small enough to be caught above, + # but still small enough for an exact factorial to be faster + # than an approximate algorithm), and half-integers + if exp >= -1: + if is_integer: + if gamma_size < 10*wp: + if type == 0: + return from_int(ifac(n-1), prec, rnd) + if type == 2: + return from_rational(MPZ_ONE, ifac(n-1), prec, rnd) + if type == 3: + return mpf_log(from_int(ifac(n-1)), prec, rnd) + # half-integer + if n < 100 or gamma_size < 10*wp: + if sign: + w = sqrtpi_fixed(wp) + if n % 2: f = ifac2(2*n+1) + else: f = -ifac2(2*n+1) + if type == 0: + return mpf_shift(from_rational(w, f, prec, rnd), -wp+n+1) + if type == 2: + return mpf_shift(from_rational(f, w, prec, rnd), wp-n-1) + if type == 3: + return mpf_log(mpf_shift(from_rational(w, abs(f), + prec, rnd), -wp+n+1), prec, rnd) + elif n == 0: + if type == 0: return mpf_sqrtpi(prec, rnd) + if type == 2: return mpf_div(fone, mpf_sqrtpi(wp), prec, rnd) + if type == 3: return mpf_log(mpf_sqrtpi(wp), prec, rnd) + else: + w = sqrtpi_fixed(wp) + w = from_man_exp(w * ifac2(2*n-1), -wp-n) + if type == 0: return mpf_pos(w, prec, rnd) + if type == 2: return mpf_div(fone, w, prec, rnd) + if type == 3: return mpf_log(mpf_abs(w), prec, rnd) + + # Convert to fixed point + offset = exp + wp + if offset >= 0: absxman = man << offset + else: absxman = man >> (-offset) + + # For log gamma, provide accurate evaluation for x = 1+eps and 2+eps + if type == 3 and not sign: + one = MPZ_ONE << wp + one_dist = abs(absxman-one) + two_dist = abs(absxman-2*one) + cancellation = (wp - bitcount(min(one_dist, two_dist))) + if cancellation > 10: + xsub1 = mpf_sub(fone, x) + xsub2 = mpf_sub(ftwo, x) + xsub1mag = xsub1[2]+xsub1[3] + xsub2mag = xsub2[2]+xsub2[3] + if xsub1mag < -wp: + return mpf_mul(mpf_euler(wp), mpf_sub(fone, x), prec, rnd) + if xsub2mag < -wp: + return mpf_mul(mpf_sub(fone, mpf_euler(wp)), + mpf_sub(x, ftwo), prec, rnd) + # Proceed but increase precision + wp += max(-xsub1mag, -xsub2mag) + offset = exp + wp + if offset >= 0: absxman = man << offset + else: absxman = man >> (-offset) + + # Use Taylor series if appropriate + n_for_stirling = int(GAMMA_STIRLING_BETA*wp) + if n < max(100, n_for_stirling) and wp < MAX_GAMMA_TAYLOR_PREC: + if sign: + absxman = -absxman + return gamma_fixed_taylor(x, absxman, wp, prec, rnd, type) + + # Use Stirling's series + # First ensure that |x| is large enough for rapid convergence + xorig = x + + # Argument reduction + r = 0 + if n < n_for_stirling: + r = one = MPZ_ONE << wp + d = n_for_stirling - n + for k in xrange(d): + r = (r * absxman) >> wp + absxman += one + x = xabs = from_man_exp(absxman, -wp) + if sign: + x = mpf_neg(x) + else: + xabs = mpf_abs(x) + + # Asymptotic series + y = real_stirling_series(absxman, wp) + u = to_fixed(mpf_log(xabs, wp), wp) + u = ((absxman - (MPZ_ONE<<(wp-1))) * u) >> wp + y += u + w = from_man_exp(y, -wp) + + # Compute final value + if sign: + # Reflection formula + A = mpf_mul(mpf_sin_pi(xorig, wp), xorig, wp) + B = mpf_neg(mpf_pi(wp)) + if type == 0 or type == 2: + A = mpf_mul(A, mpf_exp(w, wp)) + if r: + B = mpf_mul(B, from_man_exp(r, -wp), wp) + if type == 0: + return mpf_div(B, A, prec, rnd) + if type == 2: + return mpf_div(A, B, prec, rnd) + if type == 3: + if r: + B = mpf_mul(B, from_man_exp(r, -wp), wp) + A = mpf_add(mpf_log(mpf_abs(A), wp), w, wp) + return mpf_sub(mpf_log(mpf_abs(B), wp), A, prec, rnd) + else: + if type == 0: + if r: + return mpf_div(mpf_exp(w, wp), + from_man_exp(r, -wp), prec, rnd) + return mpf_exp(w, prec, rnd) + if type == 2: + if r: + return mpf_div(from_man_exp(r, -wp), + mpf_exp(w, wp), prec, rnd) + return mpf_exp(mpf_neg(w), prec, rnd) + if type == 3: + if r: + return mpf_sub(w, mpf_log(from_man_exp(r,-wp), wp), prec, rnd) + return mpf_pos(w, prec, rnd) + + +def mpc_gamma(z, prec, rnd='d', type=0): + a, b = z + asign, aman, aexp, abc = a + bsign, bman, bexp, bbc = b + + if b == fzero: + # Imaginary part on negative half-axis for log-gamma function + if type == 3 and asign: + re = mpf_gamma(a, prec, rnd, 3) + n = (-aman) >> (-aexp) + im = mpf_mul_int(mpf_pi(prec+10), n, prec, rnd) + return re, im + return mpf_gamma(a, prec, rnd, type), fzero + + # Some kind of complex inf/nan + if (not aman and aexp) or (not bman and bexp): + return (fnan, fnan) + + # Initial working precision + wp = prec + 20 + + amag = aexp+abc + bmag = bexp+bbc + if aman: + mag = max(amag, bmag) + else: + mag = bmag + + # Close to 0 + if mag < -8: + if mag < -wp: + # 1/gamma(z) = z + euler*z^2 + O(z^3) + v = mpc_add(z, mpc_mul_mpf(mpc_mul(z,z,wp),mpf_euler(wp),wp), wp) + if type == 0: return mpc_reciprocal(v, prec, rnd) + if type == 1: return mpc_div(z, v, prec, rnd) + if type == 2: return mpc_pos(v, prec, rnd) + if type == 3: return mpc_log(mpc_reciprocal(v, prec), prec, rnd) + elif type != 1: + wp += (-mag) + + # Handle huge log-gamma values; must do this before converting to + # a fixed-point value. TODO: determine a precise cutoff of validity + # depending on amag and bmag + if type == 3 and mag > wp and ((not asign) or (bmag >= amag)): + return mpc_sub(mpc_mul(z, mpc_log(z, wp), wp), z, prec, rnd) + + # From now on, we assume having a gamma function + if type == 1: + return mpc_gamma((mpf_add(a, fone), b), prec, rnd, 0) + + an = abs(to_int(a)) + bn = abs(to_int(b)) + absn = max(an, bn) + gamma_size = absn*mag + if type == 3: + pass + else: + wp += bitcount(gamma_size) + + # Reflect to the right half-plane. Note that Stirling's expansion + # is valid in the left half-plane too, as long as we're not too close + # to the real axis, but in order to use this argument reduction + # in the negative direction must be implemented. + #need_reflection = asign and ((bmag < 0) or (amag-bmag > 4)) + need_reflection = asign + zorig = z + if need_reflection: + z = mpc_neg(z) + asign, aman, aexp, abc = a = z[0] + bsign, bman, bexp, bbc = b = z[1] + + # Imaginary part very small compared to real one? + yfinal = 0 + balance_prec = 0 + if bmag < -10: + # Check z ~= 1 and z ~= 2 for loggamma + if type == 3: + zsub1 = mpc_sub_mpf(z, fone) + if zsub1[0] == fzero: + cancel1 = -bmag + else: + cancel1 = -max(zsub1[0][2]+zsub1[0][3], bmag) + if cancel1 > wp: + pi = mpf_pi(wp) + x = mpc_mul_mpf(zsub1, pi, wp) + x = mpc_mul(x, x, wp) + x = mpc_div_mpf(x, from_int(12), wp) + y = mpc_mul_mpf(zsub1, mpf_neg(mpf_euler(wp)), wp) + yfinal = mpc_add(x, y, wp) + if not need_reflection: + return mpc_pos(yfinal, prec, rnd) + elif cancel1 > 0: + wp += cancel1 + zsub2 = mpc_sub_mpf(z, ftwo) + if zsub2[0] == fzero: + cancel2 = -bmag + else: + cancel2 = -max(zsub2[0][2]+zsub2[0][3], bmag) + if cancel2 > wp: + pi = mpf_pi(wp) + t = mpf_sub(mpf_mul(pi, pi), from_int(6)) + x = mpc_mul_mpf(mpc_mul(zsub2, zsub2, wp), t, wp) + x = mpc_div_mpf(x, from_int(12), wp) + y = mpc_mul_mpf(zsub2, mpf_sub(fone, mpf_euler(wp)), wp) + yfinal = mpc_add(x, y, wp) + if not need_reflection: + return mpc_pos(yfinal, prec, rnd) + elif cancel2 > 0: + wp += cancel2 + if bmag < -wp: + # Compute directly from the real gamma function. + pp = 2*(wp+10) + aabs = mpf_abs(a) + eps = mpf_shift(fone, amag-wp) + x1 = mpf_gamma(aabs, pp, type=type) + x2 = mpf_gamma(mpf_add(aabs, eps), pp, type=type) + xprime = mpf_div(mpf_sub(x2, x1, pp), eps, pp) + y = mpf_mul(b, xprime, prec, rnd) + yfinal = (x1, y) + # Note: we still need to use the reflection formula for + # near-poles, and the correct branch of the log-gamma function + if not need_reflection: + return mpc_pos(yfinal, prec, rnd) + else: + balance_prec += (-bmag) + + wp += balance_prec + n_for_stirling = int(GAMMA_STIRLING_BETA*wp) + need_reduction = absn < n_for_stirling + + afix = to_fixed(a, wp) + bfix = to_fixed(b, wp) + + r = 0 + if not yfinal: + zprered = z + # Argument reduction + if absn < n_for_stirling: + absn = complex(an, bn) + d = int((1 + n_for_stirling**2 - bn**2)**0.5 - an) + rre = one = MPZ_ONE << wp + rim = MPZ_ZERO + for k in xrange(d): + rre, rim = ((afix*rre-bfix*rim)>>wp), ((afix*rim + bfix*rre)>>wp) + afix += one + r = from_man_exp(rre, -wp), from_man_exp(rim, -wp) + a = from_man_exp(afix, -wp) + z = a, b + + yre, yim = complex_stirling_series(afix, bfix, wp) + # (z-1/2)*log(z) + S + lre, lim = mpc_log(z, wp) + lre = to_fixed(lre, wp) + lim = to_fixed(lim, wp) + yre = ((lre*afix - lim*bfix)>>wp) - (lre>>1) + yre + yim = ((lre*bfix + lim*afix)>>wp) - (lim>>1) + yim + y = from_man_exp(yre, -wp), from_man_exp(yim, -wp) + + if r and type == 3: + # If re(z) > 0 and abs(z) <= 4, the branches of loggamma(z) + # and log(gamma(z)) coincide. Otherwise, use the zeroth order + # Stirling expansion to compute the correct imaginary part. + y = mpc_sub(y, mpc_log(r, wp), wp) + zfa = to_float(zprered[0]) + zfb = to_float(zprered[1]) + zfabs = math.hypot(zfa,zfb) + #if not (zfa > 0.0 and zfabs <= 4): + yfb = to_float(y[1]) + u = math.atan2(zfb, zfa) + if zfabs <= 0.5: + gi = 0.577216*zfb - u + else: + gi = -zfb - 0.5*u + zfa*u + zfb*math.log(zfabs) + n = int(math.floor((gi-yfb)/(2*math.pi)+0.5)) + y = (y[0], mpf_add(y[1], mpf_mul_int(mpf_pi(wp), 2*n, wp), wp)) + + if need_reflection: + if type == 0 or type == 2: + A = mpc_mul(mpc_sin_pi(zorig, wp), zorig, wp) + B = (mpf_neg(mpf_pi(wp)), fzero) + if yfinal: + if type == 2: + A = mpc_div(A, yfinal, wp) + else: + A = mpc_mul(A, yfinal, wp) + else: + A = mpc_mul(A, mpc_exp(y, wp), wp) + if r: + B = mpc_mul(B, r, wp) + if type == 0: return mpc_div(B, A, prec, rnd) + if type == 2: return mpc_div(A, B, prec, rnd) + + # Reflection formula for the log-gamma function with correct branch + # http://functions.wolfram.com/GammaBetaErf/LogGamma/16/01/01/0006/ + # LogGamma[z] == -LogGamma[-z] - Log[-z] + + # Sign[Im[z]] Floor[Re[z]] Pi I + Log[Pi] - + # Log[Sin[Pi (z - Floor[Re[z]])]] - + # Pi I (1 - Abs[Sign[Im[z]]]) Abs[Floor[Re[z]]] + if type == 3: + if yfinal: + s1 = mpc_neg(yfinal) + else: + s1 = mpc_neg(y) + # s -= log(-z) + s1 = mpc_sub(s1, mpc_log(mpc_neg(zorig), wp), wp) + # floor(re(z)) + rezfloor = mpf_floor(zorig[0]) + imzsign = mpf_sign(zorig[1]) + pi = mpf_pi(wp) + t = mpf_mul(pi, rezfloor) + t = mpf_mul_int(t, imzsign, wp) + s1 = (s1[0], mpf_add(s1[1], t, wp)) + s1 = mpc_add_mpf(s1, mpf_log(pi, wp), wp) + t = mpc_sin_pi(mpc_sub_mpf(zorig, rezfloor), wp) + t = mpc_log(t, wp) + s1 = mpc_sub(s1, t, wp) + # Note: may actually be unused, because we fall back + # to the mpf_ function for real arguments + if not imzsign: + t = mpf_mul(pi, mpf_floor(rezfloor), wp) + s1 = (s1[0], mpf_sub(s1[1], t, wp)) + return mpc_pos(s1, prec, rnd) + else: + if type == 0: + if r: + return mpc_div(mpc_exp(y, wp), r, prec, rnd) + return mpc_exp(y, prec, rnd) + if type == 2: + if r: + return mpc_div(r, mpc_exp(y, wp), prec, rnd) + return mpc_exp(mpc_neg(y), prec, rnd) + if type == 3: + return mpc_pos(y, prec, rnd) + +def mpf_factorial(x, prec, rnd='d'): + return mpf_gamma(x, prec, rnd, 1) + +def mpc_factorial(x, prec, rnd='d'): + return mpc_gamma(x, prec, rnd, 1) + +def mpf_rgamma(x, prec, rnd='d'): + return mpf_gamma(x, prec, rnd, 2) + +def mpc_rgamma(x, prec, rnd='d'): + return mpc_gamma(x, prec, rnd, 2) + +def mpf_loggamma(x, prec, rnd='d'): + sign, man, exp, bc = x + if sign: + raise ComplexResult + return mpf_gamma(x, prec, rnd, 3) + +def mpc_loggamma(z, prec, rnd='d'): + a, b = z + asign, aman, aexp, abc = a + bsign, bman, bexp, bbc = b + if b == fzero and asign: + re = mpf_gamma(a, prec, rnd, 3) + n = (-aman) >> (-aexp) + im = mpf_mul_int(mpf_pi(prec+10), n, prec, rnd) + return re, im + return mpc_gamma(z, prec, rnd, 3) + +def mpf_gamma_int(n, prec, rnd=round_fast): + if n < SMALL_FACTORIAL_CACHE_SIZE: + return mpf_pos(small_factorial_cache[n-1], prec, rnd) + return mpf_gamma(from_int(n), prec, rnd) diff --git a/MLPY/Lib/site-packages/mpmath/libmp/libelefun.py b/MLPY/Lib/site-packages/mpmath/libmp/libelefun.py new file mode 100644 index 0000000000000000000000000000000000000000..3de2e5aaef02296ed03dec3df3021b56823f3728 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/libmp/libelefun.py @@ -0,0 +1,1428 @@ +""" +This module implements computation of elementary transcendental +functions (powers, logarithms, trigonometric and hyperbolic +functions, inverse trigonometric and hyperbolic) for real +floating-point numbers. + +For complex and interval implementations of the same functions, +see libmpc and libmpi. + +""" + +import math +from bisect import bisect + +from .backend import xrange +from .backend import MPZ, MPZ_ZERO, MPZ_ONE, MPZ_TWO, MPZ_FIVE, BACKEND + +from .libmpf import ( + round_floor, round_ceiling, round_down, round_up, + round_nearest, round_fast, + ComplexResult, + bitcount, bctable, lshift, rshift, giant_steps, sqrt_fixed, + from_int, to_int, from_man_exp, to_fixed, to_float, from_float, + from_rational, normalize, + fzero, fone, fnone, fhalf, finf, fninf, fnan, + mpf_cmp, mpf_sign, mpf_abs, + mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_div, mpf_shift, + mpf_rdiv_int, mpf_pow_int, mpf_sqrt, + reciprocal_rnd, negative_rnd, mpf_perturb, + isqrt_fast +) + +from .libintmath import ifib + + +#------------------------------------------------------------------------------- +# Tuning parameters +#------------------------------------------------------------------------------- + +# Cutoff for computing exp from cosh+sinh. This reduces the +# number of terms by half, but also requires a square root which +# is expensive with the pure-Python square root code. +if BACKEND == 'python': + EXP_COSH_CUTOFF = 600 +else: + EXP_COSH_CUTOFF = 400 +# Cutoff for using more than 2 series +EXP_SERIES_U_CUTOFF = 1500 + +# Also basically determined by sqrt +if BACKEND == 'python': + COS_SIN_CACHE_PREC = 400 +else: + COS_SIN_CACHE_PREC = 200 +COS_SIN_CACHE_STEP = 8 +cos_sin_cache = {} + +# Number of integer logarithms to cache (for zeta sums) +MAX_LOG_INT_CACHE = 2000 +log_int_cache = {} + +LOG_TAYLOR_PREC = 2500 # Use Taylor series with caching up to this prec +LOG_TAYLOR_SHIFT = 9 # Cache log values in steps of size 2^-N +log_taylor_cache = {} +# prec/size ratio of x for fastest convergence in AGM formula +LOG_AGM_MAG_PREC_RATIO = 20 + +ATAN_TAYLOR_PREC = 3000 # Same as for log +ATAN_TAYLOR_SHIFT = 7 # steps of size 2^-N +atan_taylor_cache = {} + + +# ~= next power of two + 20 +cache_prec_steps = [22,22] +for k in xrange(1, bitcount(LOG_TAYLOR_PREC)+1): + cache_prec_steps += [min(2**k,LOG_TAYLOR_PREC)+20] * 2**(k-1) + + +#----------------------------------------------------------------------------# +# # +# Elementary mathematical constants # +# # +#----------------------------------------------------------------------------# + +def constant_memo(f): + """ + Decorator for caching computed values of mathematical + constants. This decorator should be applied to a + function taking a single argument prec as input and + returning a fixed-point value with the given precision. + """ + f.memo_prec = -1 + f.memo_val = None + def g(prec, **kwargs): + memo_prec = f.memo_prec + if prec <= memo_prec: + return f.memo_val >> (memo_prec-prec) + newprec = int(prec*1.05+10) + f.memo_val = f(newprec, **kwargs) + f.memo_prec = newprec + return f.memo_val >> (newprec-prec) + g.__name__ = f.__name__ + g.__doc__ = f.__doc__ + return g + +def def_mpf_constant(fixed): + """ + Create a function that computes the mpf value for a mathematical + constant, given a function that computes the fixed-point value. + + Assumptions: the constant is positive and has magnitude ~= 1; + the fixed-point function rounds to floor. + """ + def f(prec, rnd=round_fast): + wp = prec + 20 + v = fixed(wp) + if rnd in (round_up, round_ceiling): + v += 1 + return normalize(0, v, -wp, bitcount(v), prec, rnd) + f.__doc__ = fixed.__doc__ + return f + +def bsp_acot(q, a, b, hyperbolic): + if b - a == 1: + a1 = MPZ(2*a + 3) + if hyperbolic or a&1: + return MPZ_ONE, a1 * q**2, a1 + else: + return -MPZ_ONE, a1 * q**2, a1 + m = (a+b)//2 + p1, q1, r1 = bsp_acot(q, a, m, hyperbolic) + p2, q2, r2 = bsp_acot(q, m, b, hyperbolic) + return q2*p1 + r1*p2, q1*q2, r1*r2 + +# the acoth(x) series converges like the geometric series for x^2 +# N = ceil(p*log(2)/(2*log(x))) +def acot_fixed(a, prec, hyperbolic): + """ + Compute acot(a) or acoth(a) for an integer a with binary splitting; see + http://numbers.computation.free.fr/Constants/Algorithms/splitting.html + """ + N = int(0.35 * prec/math.log(a) + 20) + p, q, r = bsp_acot(a, 0,N, hyperbolic) + return ((p+q)<> extraprec) + +# Logarithms of integers are needed for various computations involving +# logarithms, powers, radix conversion, etc + +@constant_memo +def ln2_fixed(prec): + """ + Computes ln(2). This is done with a hyperbolic Machin-type formula, + with binary splitting at high precision. + """ + return machin([(18, 26), (-2, 4801), (8, 8749)], prec, True) + +@constant_memo +def ln10_fixed(prec): + """ + Computes ln(10). This is done with a hyperbolic Machin-type formula. + """ + return machin([(46, 31), (34, 49), (20, 161)], prec, True) + + +r""" +For computation of pi, we use the Chudnovsky series: + + oo + ___ k + 1 \ (-1) (6 k)! (A + B k) + ----- = ) ----------------------- + 12 pi /___ 3 3k+3/2 + (3 k)! (k!) C + k = 0 + +where A, B, and C are certain integer constants. This series adds roughly +14 digits per term. Note that C^(3/2) can be extracted so that the +series contains only rational terms. This makes binary splitting very +efficient. + +The recurrence formulas for the binary splitting were taken from +ftp://ftp.gmplib.org/pub/src/gmp-chudnovsky.c + +Previously, Machin's formula was used at low precision and the AGM iteration +was used at high precision. However, the Chudnovsky series is essentially as +fast as the Machin formula at low precision and in practice about 3x faster +than the AGM at high precision (despite theoretically having a worse +asymptotic complexity), so there is no reason not to use it in all cases. + +""" + +# Constants in Chudnovsky's series +CHUD_A = MPZ(13591409) +CHUD_B = MPZ(545140134) +CHUD_C = MPZ(640320) +CHUD_D = MPZ(12) + +def bs_chudnovsky(a, b, level, verbose): + """ + Computes the sum from a to b of the series in the Chudnovsky + formula. Returns g, p, q where p/q is the sum as an exact + fraction and g is a temporary value used to save work + for recursive calls. + """ + if b-a == 1: + g = MPZ((6*b-5)*(2*b-1)*(6*b-1)) + p = b**3 * CHUD_C**3 // 24 + q = (-1)**b * g * (CHUD_A+CHUD_B*b) + else: + if verbose and level < 4: + print(" binary splitting", a, b) + mid = (a+b)//2 + g1, p1, q1 = bs_chudnovsky(a, mid, level+1, verbose) + g2, p2, q2 = bs_chudnovsky(mid, b, level+1, verbose) + p = p1*p2 + g = g1*g2 + q = q1*p2 + q2*g1 + return g, p, q + +@constant_memo +def pi_fixed(prec, verbose=False, verbose_base=None): + """ + Compute floor(pi * 2**prec) as a big integer. + + This is done using Chudnovsky's series (see comments in + libelefun.py for details). + """ + # The Chudnovsky series gives 14.18 digits per term + N = int(prec/3.3219280948/14.181647462 + 2) + if verbose: + print("binary splitting with N =", N) + g, p, q = bs_chudnovsky(0, N, 0, verbose) + sqrtC = isqrt_fast(CHUD_C<<(2*prec)) + v = p*CHUD_C*sqrtC//((q+CHUD_A*p)*CHUD_D) + return v + +def degree_fixed(prec): + return pi_fixed(prec)//180 + +def bspe(a, b): + """ + Sum series for exp(1)-1 between a, b, returning the result + as an exact fraction (p, q). + """ + if b-a == 1: + return MPZ_ONE, MPZ(b) + m = (a+b)//2 + p1, q1 = bspe(a, m) + p2, q2 = bspe(m, b) + return p1*q2+p2, q1*q2 + +@constant_memo +def e_fixed(prec): + """ + Computes exp(1). This is done using the ordinary Taylor series for + exp, with binary splitting. For a description of the algorithm, + see: + + http://numbers.computation.free.fr/Constants/ + Algorithms/splitting.html + """ + # Slight overestimate of N needed for 1/N! < 2**(-prec) + # This could be tightened for large N. + N = int(1.1*prec/math.log(prec) + 20) + p, q = bspe(0,N) + return ((p+q)<> 11 + +mpf_phi = def_mpf_constant(phi_fixed) +mpf_pi = def_mpf_constant(pi_fixed) +mpf_e = def_mpf_constant(e_fixed) +mpf_degree = def_mpf_constant(degree_fixed) +mpf_ln2 = def_mpf_constant(ln2_fixed) +mpf_ln10 = def_mpf_constant(ln10_fixed) + + +@constant_memo +def ln_sqrt2pi_fixed(prec): + wp = prec + 10 + # ln(sqrt(2*pi)) = ln(2*pi)/2 + return to_fixed(mpf_log(mpf_shift(mpf_pi(wp), 1), wp), prec-1) + +@constant_memo +def sqrtpi_fixed(prec): + return sqrt_fixed(pi_fixed(prec), prec) + +mpf_sqrtpi = def_mpf_constant(sqrtpi_fixed) +mpf_ln_sqrt2pi = def_mpf_constant(ln_sqrt2pi_fixed) + + +#----------------------------------------------------------------------------# +# # +# Powers # +# # +#----------------------------------------------------------------------------# + +def mpf_pow(s, t, prec, rnd=round_fast): + """ + Compute s**t. Raises ComplexResult if s is negative and t is + fractional. + """ + ssign, sman, sexp, sbc = s + tsign, tman, texp, tbc = t + if ssign and texp < 0: + raise ComplexResult("negative number raised to a fractional power") + if texp >= 0: + return mpf_pow_int(s, (-1)**tsign * (tman<> pbc)] + if pbc > workprec: + pm = pm >> (pbc-workprec) + pe += pbc - workprec + pbc = workprec + n -= 1 + if not n: + break + y = y*y + exp = exp+exp + bc = bc + bc - 2 + bc = bc + bctable[int(y >> bc)] + if bc > workprec: + y = y >> (bc-workprec) + exp += bc - workprec + bc = workprec + n = n // 2 + return pm, pe + +# froot(s, n, prec, rnd) computes the real n-th root of a +# positive mpf tuple s. +# To compute the root we start from a 50-bit estimate for r +# generated with ordinary floating-point arithmetic, and then refine +# the value to full accuracy using the iteration + +# 1 / y \ +# r = --- | (n-1) * r + ---------- | +# n+1 n \ n r_n**(n-1) / + +# which is simply Newton's method applied to the equation r**n = y. +# With giant_steps(start, prec+extra) = [p0,...,pm, prec+extra] +# and y = man * 2**-shift one has +# (man * 2**exp)**(1/n) = +# y**(1/n) * 2**(start-prec/n) * 2**(p0-start) * ... * 2**(prec+extra-pm) * +# 2**((exp+shift-(n-1)*prec)/n -extra)) +# The last factor is accounted for in the last line of froot. + +def nthroot_fixed(y, n, prec, exp1): + start = 50 + try: + y1 = rshift(y, prec - n*start) + r = MPZ(int(y1**(1.0/n))) + except OverflowError: + y1 = from_int(y1, start) + fn = from_int(n) + fn = mpf_rdiv_int(1, fn, start) + r = mpf_pow(y1, fn, start) + r = to_int(r) + extra = 10 + extra1 = n + prevp = start + for p in giant_steps(start, prec+extra): + pm, pe = int_pow_fixed(r, n-1, prevp) + r2 = rshift(pm, (n-1)*prevp - p - pe - extra1) + B = lshift(y, 2*p-prec+extra1)//r2 + r = (B + (n-1) * lshift(r, p-prevp))//n + prevp = p + return r + +def mpf_nthroot(s, n, prec, rnd=round_fast): + """nth-root of a positive number + + Use the Newton method when faster, otherwise use x**(1/n) + """ + sign, man, exp, bc = s + if sign: + raise ComplexResult("nth root of a negative number") + if not man: + if s == fnan: + return fnan + if s == fzero: + if n > 0: + return fzero + if n == 0: + return fone + return finf + # Infinity + if not n: + return fnan + if n < 0: + return fzero + return finf + flag_inverse = False + if n < 2: + if n == 0: + return fone + if n == 1: + return mpf_pos(s, prec, rnd) + if n == -1: + return mpf_div(fone, s, prec, rnd) + # n < 0 + rnd = reciprocal_rnd[rnd] + flag_inverse = True + extra_inverse = 5 + prec += extra_inverse + n = -n + if n > 20 and (n >= 20000 or prec < int(233 + 28.3 * n**0.62)): + prec2 = prec + 10 + fn = from_int(n) + nth = mpf_rdiv_int(1, fn, prec2) + r = mpf_pow(s, nth, prec2, rnd) + s = normalize(r[0], r[1], r[2], r[3], prec, rnd) + if flag_inverse: + return mpf_div(fone, s, prec-extra_inverse, rnd) + else: + return s + # Convert to a fixed-point number with prec2 bits. + prec2 = prec + 2*n - (prec%n) + # a few tests indicate that + # for 10 < n < 10**4 a bit more precision is needed + if n > 10: + prec2 += prec2//10 + prec2 = prec2 - prec2%n + # Mantissa may have more bits than we need. Trim it down. + shift = bc - prec2 + # Adjust exponents to make prec2 and exp+shift multiples of n. + sign1 = 0 + es = exp+shift + if es < 0: + sign1 = 1 + es = -es + if sign1: + shift += es%n + else: + shift -= es%n + man = rshift(man, shift) + extra = 10 + exp1 = ((exp+shift-(n-1)*prec2)//n) - extra + rnd_shift = 0 + if flag_inverse: + if rnd == 'u' or rnd == 'c': + rnd_shift = 1 + else: + if rnd == 'd' or rnd == 'f': + rnd_shift = 1 + man = nthroot_fixed(man+rnd_shift, n, prec2, exp1) + s = from_man_exp(man, exp1, prec, rnd) + if flag_inverse: + return mpf_div(fone, s, prec-extra_inverse, rnd) + else: + return s + +def mpf_cbrt(s, prec, rnd=round_fast): + """cubic root of a positive number""" + return mpf_nthroot(s, 3, prec, rnd) + +#----------------------------------------------------------------------------# +# # +# Logarithms # +# # +#----------------------------------------------------------------------------# + + +def log_int_fixed(n, prec, ln2=None): + """ + Fast computation of log(n), caching the value for small n, + intended for zeta sums. + """ + if n in log_int_cache: + value, vprec = log_int_cache[n] + if vprec >= prec: + return value >> (vprec - prec) + wp = prec + 10 + if wp <= LOG_TAYLOR_SHIFT: + if ln2 is None: + ln2 = ln2_fixed(wp) + r = bitcount(n) + x = n << (wp-r) + v = log_taylor_cached(x, wp) + r*ln2 + else: + v = to_fixed(mpf_log(from_int(n), wp+5), wp) + if n < MAX_LOG_INT_CACHE: + log_int_cache[n] = (v, wp) + return v >> (wp-prec) + +def agm_fixed(a, b, prec): + """ + Fixed-point computation of agm(a,b), assuming + a, b both close to unit magnitude. + """ + i = 0 + while 1: + anew = (a+b)>>1 + if i > 4 and abs(a-anew) < 8: + return a + b = isqrt_fast(a*b) + a = anew + i += 1 + return a + +def log_agm(x, prec): + """ + Fixed-point computation of -log(x) = log(1/x), suitable + for large precision. It is required that 0 < x < 1. The + algorithm used is the Sasaki-Kanada formula + + -log(x) = pi/agm(theta2(x)^2,theta3(x)^2). [1] + + For faster convergence in the theta functions, x should + be chosen closer to 0. + + Guard bits must be added by the caller. + + HYPOTHESIS: if x = 2^(-n), n bits need to be added to + account for the truncation to a fixed-point number, + and this is the only significant cancellation error. + + The number of bits lost to roundoff is small and can be + considered constant. + + [1] Richard P. Brent, "Fast Algorithms for High-Precision + Computation of Elementary Functions (extended abstract)", + http://wwwmaths.anu.edu.au/~brent/pd/RNC7-Brent.pdf + + """ + x2 = (x*x) >> prec + # Compute jtheta2(x)**2 + s = a = b = x2 + while a: + b = (b*x2) >> prec + a = (a*b) >> prec + s += a + s += (MPZ_ONE<>(prec-2) + s = (s*isqrt_fast(x<>prec + # Compute jtheta3(x)**2 + t = a = b = x + while a: + b = (b*x2) >> prec + a = (a*b) >> prec + t += a + t = (MPZ_ONE<>prec + # Final formula + p = agm_fixed(s, t, prec) + return (pi_fixed(prec) << prec) // p + +def log_taylor(x, prec, r=0): + """ + Fixed-point calculation of log(x). It is assumed that x is close + enough to 1 for the Taylor series to converge quickly. Convergence + can be improved by specifying r > 0 to compute + log(x^(1/2^r))*2^r, at the cost of performing r square roots. + + The caller must provide sufficient guard bits. + """ + for i in xrange(r): + x = isqrt_fast(x<> prec + v4 = (v2*v2) >> prec + s0 = v + s1 = v//3 + v = (v*v4) >> prec + k = 5 + while v: + s0 += v // k + k += 2 + s1 += v // k + v = (v*v4) >> prec + k += 2 + s1 = (s1*v2) >> prec + s = (s0+s1) << (1+r) + if sign: + return -s + return s + +def log_taylor_cached(x, prec): + """ + Fixed-point computation of log(x), assuming x in (0.5, 2) + and prec <= LOG_TAYLOR_PREC. + """ + n = x >> (prec-LOG_TAYLOR_SHIFT) + cached_prec = cache_prec_steps[prec] + dprec = cached_prec - prec + if (n, cached_prec) in log_taylor_cache: + a, log_a = log_taylor_cache[n, cached_prec] + else: + a = n << (cached_prec - LOG_TAYLOR_SHIFT) + log_a = log_taylor(a, cached_prec, 8) + log_taylor_cache[n, cached_prec] = (a, log_a) + a >>= dprec + log_a >>= dprec + u = ((x - a) << prec) // a + v = (u << prec) // ((MPZ_TWO << prec) + u) + v2 = (v*v) >> prec + v4 = (v2*v2) >> prec + s0 = v + s1 = v//3 + v = (v*v4) >> prec + k = 5 + while v: + s0 += v//k + k += 2 + s1 += v//k + v = (v*v4) >> prec + k += 2 + s1 = (s1*v2) >> prec + s = (s0+s1) << 1 + return log_a + s + +def mpf_log(x, prec, rnd=round_fast): + """ + Compute the natural logarithm of the mpf value x. If x is negative, + ComplexResult is raised. + """ + sign, man, exp, bc = x + #------------------------------------------------------------------ + # Handle special values + if not man: + if x == fzero: return fninf + if x == finf: return finf + if x == fnan: return fnan + if sign: + raise ComplexResult("logarithm of a negative number") + wp = prec + 20 + #------------------------------------------------------------------ + # Handle log(2^n) = log(n)*2. + # Here we catch the only possible exact value, log(1) = 0 + if man == 1: + if not exp: + return fzero + return from_man_exp(exp*ln2_fixed(wp), -wp, prec, rnd) + mag = exp+bc + abs_mag = abs(mag) + #------------------------------------------------------------------ + # Handle x = 1+eps, where log(x) ~ x. We need to check for + # cancellation when moving to fixed-point math and compensate + # by increasing the precision. Note that abs_mag in (0, 1) <=> + # 0.5 < x < 2 and x != 1 + if abs_mag <= 1: + # Calculate t = x-1 to measure distance from 1 in bits + tsign = 1-abs_mag + if tsign: + tman = (MPZ_ONE< wp: + t = normalize(tsign, tman, abs_mag-bc, tbc, tbc, 'n') + return mpf_perturb(t, tsign, prec, rnd) + else: + wp += cancellation + # TODO: if close enough to 1, we could use Taylor series + # even in the AGM precision range, since the Taylor series + # converges rapidly + #------------------------------------------------------------------ + # Another special case: + # n*log(2) is a good enough approximation + if abs_mag > 10000: + if bitcount(abs_mag) > wp: + return from_man_exp(exp*ln2_fixed(wp), -wp, prec, rnd) + #------------------------------------------------------------------ + # General case. + # Perform argument reduction using log(x) = log(x*2^n) - n*log(2): + # If we are in the Taylor precision range, choose magnitude 0 or 1. + # If we are in the AGM precision range, choose magnitude -m for + # some large m; benchmarking on one machine showed m = prec/20 to be + # optimal between 1000 and 100,000 digits. + if wp <= LOG_TAYLOR_PREC: + m = log_taylor_cached(lshift(man, wp-bc), wp) + if mag: + m += mag*ln2_fixed(wp) + else: + optimal_mag = -wp//LOG_AGM_MAG_PREC_RATIO + n = optimal_mag - mag + x = mpf_shift(x, n) + wp += (-optimal_mag) + m = -log_agm(to_fixed(x, wp), wp) + m -= n*ln2_fixed(wp) + return from_man_exp(m, -wp, prec, rnd) + +def mpf_log_hypot(a, b, prec, rnd): + """ + Computes log(sqrt(a^2+b^2)) accurately. + """ + # If either a or b is inf/nan/0, assume it to be a + if not b[1]: + a, b = b, a + # a is inf/nan/0 + if not a[1]: + # both are inf/nan/0 + if not b[1]: + if a == b == fzero: + return fninf + if fnan in (a, b): + return fnan + # at least one term is (+/- inf)^2 + return finf + # only a is inf/nan/0 + if a == fzero: + # log(sqrt(0+b^2)) = log(|b|) + return mpf_log(mpf_abs(b), prec, rnd) + if a == fnan: + return fnan + return finf + # Exact + a2 = mpf_mul(a,a) + b2 = mpf_mul(b,b) + extra = 20 + # Not exact + h2 = mpf_add(a2, b2, prec+extra) + cancelled = mpf_add(h2, fnone, 10) + mag_cancelled = cancelled[2]+cancelled[3] + # Just redo the sum exactly if necessary (could be smarter + # and avoid memory allocation when a or b is precisely 1 + # and the other is tiny...) + if cancelled == fzero or mag_cancelled < -extra//2: + h2 = mpf_add(a2, b2, prec+extra-min(a2[2],b2[2])) + return mpf_shift(mpf_log(h2, prec, rnd), -1) + + +#---------------------------------------------------------------------- +# Inverse tangent +# + +def atan_newton(x, prec): + if prec >= 100: + r = math.atan(int((x>>(prec-53)))/2.0**53) + else: + r = math.atan(int(x)/2.0**prec) + prevp = 50 + r = MPZ(int(r * 2.0**53) >> (53-prevp)) + extra_p = 50 + for wp in giant_steps(prevp, prec): + wp += extra_p + r = r << (wp-prevp) + cos, sin = cos_sin_fixed(r, wp) + tan = (sin << wp) // cos + a = ((tan-rshift(x, prec-wp)) << wp) // ((MPZ_ONE<>wp)) + r = r - a + prevp = wp + return rshift(r, prevp-prec) + +def atan_taylor_get_cached(n, prec): + # Taylor series with caching wins up to huge precisions + # To avoid unnecessary precomputation at low precision, we + # do it in steps + # Round to next power of 2 + prec2 = (1<<(bitcount(prec-1))) + 20 + dprec = prec2 - prec + if (n, prec2) in atan_taylor_cache: + a, atan_a = atan_taylor_cache[n, prec2] + else: + a = n << (prec2 - ATAN_TAYLOR_SHIFT) + atan_a = atan_newton(a, prec2) + atan_taylor_cache[n, prec2] = (a, atan_a) + return (a >> dprec), (atan_a >> dprec) + +def atan_taylor(x, prec): + n = (x >> (prec-ATAN_TAYLOR_SHIFT)) + a, atan_a = atan_taylor_get_cached(n, prec) + d = x - a + s0 = v = (d << prec) // ((a**2 >> prec) + (a*d >> prec) + (MPZ_ONE << prec)) + v2 = (v**2 >> prec) + v4 = (v2 * v2) >> prec + s1 = v//3 + v = (v * v4) >> prec + k = 5 + while v: + s0 += v // k + k += 2 + s1 += v // k + v = (v * v4) >> prec + k += 2 + s1 = (s1 * v2) >> prec + s = s0 - s1 + return atan_a + s + +def atan_inf(sign, prec, rnd): + if not sign: + return mpf_shift(mpf_pi(prec, rnd), -1) + return mpf_neg(mpf_shift(mpf_pi(prec, negative_rnd[rnd]), -1)) + +def mpf_atan(x, prec, rnd=round_fast): + sign, man, exp, bc = x + if not man: + if x == fzero: return fzero + if x == finf: return atan_inf(0, prec, rnd) + if x == fninf: return atan_inf(1, prec, rnd) + return fnan + mag = exp + bc + # Essentially infinity + if mag > prec+20: + return atan_inf(sign, prec, rnd) + # Essentially ~ x + if -mag > prec+20: + return mpf_perturb(x, 1-sign, prec, rnd) + wp = prec + 30 + abs(mag) + # For large x, use atan(x) = pi/2 - atan(1/x) + if mag >= 2: + x = mpf_rdiv_int(1, x, wp) + reciprocal = True + else: + reciprocal = False + t = to_fixed(x, wp) + if sign: + t = -t + if wp < ATAN_TAYLOR_PREC: + a = atan_taylor(t, wp) + else: + a = atan_newton(t, wp) + if reciprocal: + a = ((pi_fixed(wp)>>1)+1) - a + if sign: + a = -a + return from_man_exp(a, -wp, prec, rnd) + +# TODO: cleanup the special cases +def mpf_atan2(y, x, prec, rnd=round_fast): + xsign, xman, xexp, xbc = x + ysign, yman, yexp, ybc = y + if not yman: + if y == fzero and x != fnan: + if mpf_sign(x) >= 0: + return fzero + return mpf_pi(prec, rnd) + if y in (finf, fninf): + if x in (finf, fninf): + return fnan + # pi/2 + if y == finf: + return mpf_shift(mpf_pi(prec, rnd), -1) + # -pi/2 + return mpf_neg(mpf_shift(mpf_pi(prec, negative_rnd[rnd]), -1)) + return fnan + if ysign: + return mpf_neg(mpf_atan2(mpf_neg(y), x, prec, negative_rnd[rnd])) + if not xman: + if x == fnan: + return fnan + if x == finf: + return fzero + if x == fninf: + return mpf_pi(prec, rnd) + if y == fzero: + return fzero + return mpf_shift(mpf_pi(prec, rnd), -1) + tquo = mpf_atan(mpf_div(y, x, prec+4), prec+4) + if xsign: + return mpf_add(mpf_pi(prec+4), tquo, prec, rnd) + else: + return mpf_pos(tquo, prec, rnd) + +def mpf_asin(x, prec, rnd=round_fast): + sign, man, exp, bc = x + if bc+exp > 0 and x not in (fone, fnone): + raise ComplexResult("asin(x) is real only for -1 <= x <= 1") + # asin(x) = 2*atan(x/(1+sqrt(1-x**2))) + wp = prec + 15 + a = mpf_mul(x, x) + b = mpf_add(fone, mpf_sqrt(mpf_sub(fone, a, wp), wp), wp) + c = mpf_div(x, b, wp) + return mpf_shift(mpf_atan(c, prec, rnd), 1) + +def mpf_acos(x, prec, rnd=round_fast): + # acos(x) = 2*atan(sqrt(1-x**2)/(1+x)) + sign, man, exp, bc = x + if bc + exp > 0: + if x not in (fone, fnone): + raise ComplexResult("acos(x) is real only for -1 <= x <= 1") + if x == fnone: + return mpf_pi(prec, rnd) + wp = prec + 15 + a = mpf_mul(x, x) + b = mpf_sqrt(mpf_sub(fone, a, wp), wp) + c = mpf_div(b, mpf_add(fone, x, wp), wp) + return mpf_shift(mpf_atan(c, prec, rnd), 1) + +def mpf_asinh(x, prec, rnd=round_fast): + wp = prec + 20 + sign, man, exp, bc = x + mag = exp+bc + if mag < -8: + if mag < -wp: + return mpf_perturb(x, 1-sign, prec, rnd) + wp += (-mag) + # asinh(x) = log(x+sqrt(x**2+1)) + # use reflection symmetry to avoid cancellation + q = mpf_sqrt(mpf_add(mpf_mul(x, x), fone, wp), wp) + q = mpf_add(mpf_abs(x), q, wp) + if sign: + return mpf_neg(mpf_log(q, prec, negative_rnd[rnd])) + else: + return mpf_log(q, prec, rnd) + +def mpf_acosh(x, prec, rnd=round_fast): + # acosh(x) = log(x+sqrt(x**2-1)) + wp = prec + 15 + if mpf_cmp(x, fone) == -1: + raise ComplexResult("acosh(x) is real only for x >= 1") + q = mpf_sqrt(mpf_add(mpf_mul(x,x), fnone, wp), wp) + return mpf_log(mpf_add(x, q, wp), prec, rnd) + +def mpf_atanh(x, prec, rnd=round_fast): + # atanh(x) = log((1+x)/(1-x))/2 + sign, man, exp, bc = x + if (not man) and exp: + if x in (fzero, fnan): + return x + raise ComplexResult("atanh(x) is real only for -1 <= x <= 1") + mag = bc + exp + if mag > 0: + if mag == 1 and man == 1: + return [finf, fninf][sign] + raise ComplexResult("atanh(x) is real only for -1 <= x <= 1") + wp = prec + 15 + if mag < -8: + if mag < -wp: + return mpf_perturb(x, sign, prec, rnd) + wp += (-mag) + a = mpf_add(x, fone, wp) + b = mpf_sub(fone, x, wp) + return mpf_shift(mpf_log(mpf_div(a, b, wp), prec, rnd), -1) + +def mpf_fibonacci(x, prec, rnd=round_fast): + sign, man, exp, bc = x + if not man: + if x == fninf: + return fnan + return x + # F(2^n) ~= 2^(2^n) + size = abs(exp+bc) + if exp >= 0: + # Exact + if size < 10 or size <= bitcount(prec): + return from_int(ifib(to_int(x)), prec, rnd) + # Use the modified Binet formula + wp = prec + size + 20 + a = mpf_phi(wp) + b = mpf_add(mpf_shift(a, 1), fnone, wp) + u = mpf_pow(a, x, wp) + v = mpf_cos_pi(x, wp) + v = mpf_div(v, u, wp) + u = mpf_sub(u, v, wp) + u = mpf_div(u, b, prec, rnd) + return u + + +#------------------------------------------------------------------------------- +# Exponential-type functions +#------------------------------------------------------------------------------- + +def exponential_series(x, prec, type=0): + """ + Taylor series for cosh/sinh or cos/sin. + + type = 0 -- returns exp(x) (slightly faster than cosh+sinh) + type = 1 -- returns (cosh(x), sinh(x)) + type = 2 -- returns (cos(x), sin(x)) + """ + if x < 0: + x = -x + sign = 1 + else: + sign = 0 + r = int(0.5*prec**0.5) + xmag = bitcount(x) - prec + r = max(0, xmag + r) + extra = 10 + 2*max(r,-xmag) + wp = prec + extra + x <<= (extra - r) + one = MPZ_ONE << wp + alt = (type == 2) + if prec < EXP_SERIES_U_CUTOFF: + x2 = a = (x*x) >> wp + x4 = (x2*x2) >> wp + s0 = s1 = MPZ_ZERO + k = 2 + while a: + a //= (k-1)*k; s0 += a; k += 2 + a //= (k-1)*k; s1 += a; k += 2 + a = (a*x4) >> wp + s1 = (x2*s1) >> wp + if alt: + c = s1 - s0 + one + else: + c = s1 + s0 + one + else: + u = int(0.3*prec**0.35) + x2 = a = (x*x) >> wp + xpowers = [one, x2] + for i in xrange(1, u): + xpowers.append((xpowers[-1]*x2)>>wp) + sums = [MPZ_ZERO] * u + k = 2 + while a: + for i in xrange(u): + a //= (k-1)*k + if alt and k & 2: sums[i] -= a + else: sums[i] += a + k += 2 + a = (a*xpowers[-1]) >> wp + for i in xrange(1, u): + sums[i] = (sums[i]*xpowers[i]) >> wp + c = sum(sums) + one + if type == 0: + s = isqrt_fast(c*c - (one<> wp + return v >> extra + else: + # Repeatedly apply the double-angle formula + # cosh(2*x) = 2*cosh(x)^2 - 1 + # cos(2*x) = 2*cos(x)^2 - 1 + pshift = wp-1 + for i in xrange(r): + c = ((c*c) >> pshift) - one + # With the abs, this is the same for sinh and sin + s = isqrt_fast(abs((one<>extra), (s>>extra) + +def exp_basecase(x, prec): + """ + Compute exp(x) as a fixed-point number. Works for any x, + but for speed should have |x| < 1. For an arbitrary number, + use exp(x) = exp(x-m*log(2)) * 2^m where m = floor(x/log(2)). + """ + if prec > EXP_COSH_CUTOFF: + return exponential_series(x, prec, 0) + r = int(prec**0.5) + prec += r + s0 = s1 = (MPZ_ONE << prec) + k = 2 + a = x2 = (x*x) >> prec + while a: + a //= k; s0 += a; k += 1 + a //= k; s1 += a; k += 1 + a = (a*x2) >> prec + s1 = (s1*x) >> prec + s = s0 + s1 + u = r + while r: + s = (s*s) >> prec + r -= 1 + return s >> u + +def exp_expneg_basecase(x, prec): + """ + Computation of exp(x), exp(-x) + """ + if prec > EXP_COSH_CUTOFF: + cosh, sinh = exponential_series(x, prec, 1) + return cosh+sinh, cosh-sinh + a = exp_basecase(x, prec) + b = (MPZ_ONE << (prec+prec)) // a + return a, b + +def cos_sin_basecase(x, prec): + """ + Compute cos(x), sin(x) as fixed-point numbers, assuming x + in [0, pi/2). For an arbitrary number, use x' = x - m*(pi/2) + where m = floor(x/(pi/2)) along with quarter-period symmetries. + """ + if prec > COS_SIN_CACHE_PREC: + return exponential_series(x, prec, 2) + precs = prec - COS_SIN_CACHE_STEP + t = x >> precs + n = int(t) + if n not in cos_sin_cache: + w = t<<(10+COS_SIN_CACHE_PREC-COS_SIN_CACHE_STEP) + cos_t, sin_t = exponential_series(w, 10+COS_SIN_CACHE_PREC, 2) + cos_sin_cache[n] = (cos_t>>10), (sin_t>>10) + cos_t, sin_t = cos_sin_cache[n] + offset = COS_SIN_CACHE_PREC - prec + cos_t >>= offset + sin_t >>= offset + x -= t << precs + cos = MPZ_ONE << prec + sin = x + k = 2 + a = -((x*x) >> prec) + while a: + a //= k; cos += a; k += 1; a = (a*x) >> prec + a //= k; sin += a; k += 1; a = -((a*x) >> prec) + return ((cos*cos_t-sin*sin_t) >> prec), ((sin*cos_t+cos*sin_t) >> prec) + +def mpf_exp(x, prec, rnd=round_fast): + sign, man, exp, bc = x + if man: + mag = bc + exp + wp = prec + 14 + if sign: + man = -man + # TODO: the best cutoff depends on both x and the precision. + if prec > 600 and exp >= 0: + # Need about log2(exp(n)) ~= 1.45*mag extra precision + e = mpf_e(wp+int(1.45*mag)) + return mpf_pow_int(e, man<= 2 + if mag > 1: + # For large arguments: exp(2^mag*(1+eps)) = + # exp(2^mag)*exp(2^mag*eps) = exp(2^mag)*(1 + 2^mag*eps + ...) + # so about mag extra bits is required. + wpmod = wp + mag + offset = exp + wpmod + if offset >= 0: + t = man << offset + else: + t = man >> (-offset) + lg2 = ln2_fixed(wpmod) + n, t = divmod(t, lg2) + n = int(n) + t >>= mag + else: + offset = exp + wp + if offset >= 0: + t = man << offset + else: + t = man >> (-offset) + n = 0 + man = exp_basecase(t, wp) + return from_man_exp(man, n-wp, prec, rnd) + if not exp: + return fone + if x == fninf: + return fzero + return x + + +def mpf_cosh_sinh(x, prec, rnd=round_fast, tanh=0): + """Simultaneously compute (cosh(x), sinh(x)) for real x""" + sign, man, exp, bc = x + if (not man) and exp: + if tanh: + if x == finf: return fone + if x == fninf: return fnone + return fnan + if x == finf: return (finf, finf) + if x == fninf: return (finf, fninf) + return fnan, fnan + mag = exp+bc + wp = prec+14 + if mag < -4: + # Extremely close to 0, sinh(x) ~= x and cosh(x) ~= 1 + if mag < -wp: + if tanh: + return mpf_perturb(x, 1-sign, prec, rnd) + cosh = mpf_perturb(fone, 0, prec, rnd) + sinh = mpf_perturb(x, sign, prec, rnd) + return cosh, sinh + # Fix for cancellation when computing sinh + wp += (-mag) + # Does exp(-2*x) vanish? + if mag > 10: + if 3*(1<<(mag-1)) > wp: + # XXX: rounding + if tanh: + return mpf_perturb([fone,fnone][sign], 1-sign, prec, rnd) + c = s = mpf_shift(mpf_exp(mpf_abs(x), prec, rnd), -1) + if sign: + s = mpf_neg(s) + return c, s + # |x| > 1 + if mag > 1: + wpmod = wp + mag + offset = exp + wpmod + if offset >= 0: + t = man << offset + else: + t = man >> (-offset) + lg2 = ln2_fixed(wpmod) + n, t = divmod(t, lg2) + n = int(n) + t >>= mag + else: + offset = exp + wp + if offset >= 0: + t = man << offset + else: + t = man >> (-offset) + n = 0 + a, b = exp_expneg_basecase(t, wp) + # TODO: optimize division precision + cosh = a + (b>>(2*n)) + sinh = a - (b>>(2*n)) + if sign: + sinh = -sinh + if tanh: + man = (sinh << wp) // cosh + return from_man_exp(man, -wp, prec, rnd) + else: + cosh = from_man_exp(cosh, n-wp-1, prec, rnd) + sinh = from_man_exp(sinh, n-wp-1, prec, rnd) + return cosh, sinh + + +def mod_pi2(man, exp, mag, wp): + # Reduce to standard interval + if mag > 0: + i = 0 + while 1: + cancellation_prec = 20 << i + wpmod = wp + mag + cancellation_prec + pi2 = pi_fixed(wpmod-1) + pi4 = pi2 >> 1 + offset = wpmod + exp + if offset >= 0: + t = man << offset + else: + t = man >> (-offset) + n, y = divmod(t, pi2) + if y > pi4: + small = pi2 - y + else: + small = y + if small >> (wp+mag-10): + n = int(n) + t = y >> mag + wp = wpmod - mag + break + i += 1 + else: + wp += (-mag) + offset = exp + wp + if offset >= 0: + t = man << offset + else: + t = man >> (-offset) + n = 0 + return t, n, wp + + +def mpf_cos_sin(x, prec, rnd=round_fast, which=0, pi=False): + """ + which: + 0 -- return cos(x), sin(x) + 1 -- return cos(x) + 2 -- return sin(x) + 3 -- return tan(x) + + if pi=True, compute for pi*x + """ + sign, man, exp, bc = x + if not man: + if exp: + c, s = fnan, fnan + else: + c, s = fone, fzero + if which == 0: return c, s + if which == 1: return c + if which == 2: return s + if which == 3: return s + + mag = bc + exp + wp = prec + 10 + + # Extremely small? + if mag < 0: + if mag < -wp: + if pi: + x = mpf_mul(x, mpf_pi(wp)) + c = mpf_perturb(fone, 1, prec, rnd) + s = mpf_perturb(x, 1-sign, prec, rnd) + if which == 0: return c, s + if which == 1: return c + if which == 2: return s + if which == 3: return mpf_perturb(x, sign, prec, rnd) + if pi: + if exp >= -1: + if exp == -1: + c = fzero + s = (fone, fnone)[bool(man & 2) ^ sign] + elif exp == 0: + c, s = (fnone, fzero) + else: + c, s = (fone, fzero) + if which == 0: return c, s + if which == 1: return c + if which == 2: return s + if which == 3: return mpf_div(s, c, prec, rnd) + # Subtract nearest half-integer (= mod by pi/2) + n = ((man >> (-exp-2)) + 1) >> 1 + man = man - (n << (-exp-1)) + mag2 = bitcount(man) + exp + wp = prec + 10 - mag2 + offset = exp + wp + if offset >= 0: + t = man << offset + else: + t = man >> (-offset) + t = (t*pi_fixed(wp)) >> wp + else: + t, n, wp = mod_pi2(man, exp, mag, wp) + c, s = cos_sin_basecase(t, wp) + m = n & 3 + if m == 1: c, s = -s, c + elif m == 2: c, s = -c, -s + elif m == 3: c, s = s, -c + if sign: + s = -s + if which == 0: + c = from_man_exp(c, -wp, prec, rnd) + s = from_man_exp(s, -wp, prec, rnd) + return c, s + if which == 1: + return from_man_exp(c, -wp, prec, rnd) + if which == 2: + return from_man_exp(s, -wp, prec, rnd) + if which == 3: + return from_rational(s, c, prec, rnd) + +def mpf_cos(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 1) +def mpf_sin(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 2) +def mpf_tan(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 3) +def mpf_cos_sin_pi(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 0, 1) +def mpf_cos_pi(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 1, 1) +def mpf_sin_pi(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 2, 1) +def mpf_cosh(x, prec, rnd=round_fast): return mpf_cosh_sinh(x, prec, rnd)[0] +def mpf_sinh(x, prec, rnd=round_fast): return mpf_cosh_sinh(x, prec, rnd)[1] +def mpf_tanh(x, prec, rnd=round_fast): return mpf_cosh_sinh(x, prec, rnd, tanh=1) + + +# Low-overhead fixed-point versions + +def cos_sin_fixed(x, prec, pi2=None): + if pi2 is None: + pi2 = pi_fixed(prec-1) + n, t = divmod(x, pi2) + n = int(n) + c, s = cos_sin_basecase(t, prec) + m = n & 3 + if m == 0: return c, s + if m == 1: return -s, c + if m == 2: return -c, -s + if m == 3: return s, -c + +def exp_fixed(x, prec, ln2=None): + if ln2 is None: + ln2 = ln2_fixed(prec) + n, t = divmod(x, ln2) + n = int(n) + v = exp_basecase(t, prec) + if n >= 0: + return v << n + else: + return v >> (-n) + + +if BACKEND == 'sage': + try: + import sage.libs.mpmath.ext_libmp as _lbmp + mpf_sqrt = _lbmp.mpf_sqrt + mpf_exp = _lbmp.mpf_exp + mpf_log = _lbmp.mpf_log + mpf_cos = _lbmp.mpf_cos + mpf_sin = _lbmp.mpf_sin + mpf_pow = _lbmp.mpf_pow + exp_fixed = _lbmp.exp_fixed + cos_sin_fixed = _lbmp.cos_sin_fixed + log_int_fixed = _lbmp.log_int_fixed + except (ImportError, AttributeError): + print("Warning: Sage imports in libelefun failed") diff --git a/MLPY/Lib/site-packages/mpmath/libmp/libhyper.py b/MLPY/Lib/site-packages/mpmath/libmp/libhyper.py new file mode 100644 index 0000000000000000000000000000000000000000..04f52d59710be77819066aea5c1cf4b0883f72d7 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/libmp/libhyper.py @@ -0,0 +1,1150 @@ +""" +This module implements computation of hypergeometric and related +functions. In particular, it provides code for generic summation +of hypergeometric series. Optimized versions for various special +cases are also provided. +""" + +import operator +import math + +from .backend import MPZ_ZERO, MPZ_ONE, BACKEND, xrange, exec_ + +from .libintmath import gcd + +from .libmpf import (\ + ComplexResult, round_fast, round_nearest, + negative_rnd, bitcount, to_fixed, from_man_exp, from_int, to_int, + from_rational, + fzero, fone, fnone, ftwo, finf, fninf, fnan, + mpf_sign, mpf_add, mpf_abs, mpf_pos, + mpf_cmp, mpf_lt, mpf_le, mpf_gt, mpf_min_max, + mpf_perturb, mpf_neg, mpf_shift, mpf_sub, mpf_mul, mpf_div, + sqrt_fixed, mpf_sqrt, mpf_rdiv_int, mpf_pow_int, + to_rational, +) + +from .libelefun import (\ + mpf_pi, mpf_exp, mpf_log, pi_fixed, mpf_cos_sin, mpf_cos, mpf_sin, + mpf_sqrt, agm_fixed, +) + +from .libmpc import (\ + mpc_one, mpc_sub, mpc_mul_mpf, mpc_mul, mpc_neg, complex_int_pow, + mpc_div, mpc_add_mpf, mpc_sub_mpf, + mpc_log, mpc_add, mpc_pos, mpc_shift, + mpc_is_infnan, mpc_zero, mpc_sqrt, mpc_abs, + mpc_mpf_div, mpc_square, mpc_exp +) + +from .libintmath import ifac +from .gammazeta import mpf_gamma_int, mpf_euler, euler_fixed + +class NoConvergence(Exception): + pass + + +#-----------------------------------------------------------------------# +# # +# Generic hypergeometric series # +# # +#-----------------------------------------------------------------------# + +""" +TODO: + +1. proper mpq parsing +2. imaginary z special-cased (also: rational, integer?) +3. more clever handling of series that don't converge because of stupid + upwards rounding +4. checking for cancellation + +""" + +def make_hyp_summator(key): + """ + Returns a function that sums a generalized hypergeometric series, + for given parameter types (integer, rational, real, complex). + + """ + p, q, param_types, ztype = key + + pstring = "".join(param_types) + fname = "hypsum_%i_%i_%s_%s_%s" % (p, q, pstring[:p], pstring[p:], ztype) + #print "generating hypsum", fname + + have_complex_param = 'C' in param_types + have_complex_arg = ztype == 'C' + have_complex = have_complex_param or have_complex_arg + + source = [] + add = source.append + + aint = [] + arat = [] + bint = [] + brat = [] + areal = [] + breal = [] + acomplex = [] + bcomplex = [] + + #add("wp = prec + 40") + add("MAX = kwargs.get('maxterms', wp*100)") + add("HIGH = MPZ_ONE<= 0:") + add(" ZRE = xm << offset") + add("else:") + add(" ZRE = xm >> (-offset)") + if have_complex_arg: + add("offset = ye + wp") + add("if offset >= 0:") + add(" ZIM = ym << offset") + add("else:") + add(" ZIM = ym >> (-offset)") + + for i, flag in enumerate(param_types): + W = ["A", "B"][i >= p] + if flag == 'Z': + ([aint,bint][i >= p]).append(i) + add("%sINT_%i = coeffs[%i]" % (W, i, i)) + elif flag == 'Q': + ([arat,brat][i >= p]).append(i) + add("%sP_%i, %sQ_%i = coeffs[%i]._mpq_" % (W, i, W, i, i)) + elif flag == 'R': + ([areal,breal][i >= p]).append(i) + add("xsign, xm, xe, xbc = coeffs[%i]._mpf_" % i) + add("if xsign: xm = -xm") + add("offset = xe + wp") + add("if offset >= 0:") + add(" %sREAL_%i = xm << offset" % (W, i)) + add("else:") + add(" %sREAL_%i = xm >> (-offset)" % (W, i)) + elif flag == 'C': + ([acomplex,bcomplex][i >= p]).append(i) + add("__re, __im = coeffs[%i]._mpc_" % i) + add("xsign, xm, xe, xbc = __re") + add("if xsign: xm = -xm") + add("ysign, ym, ye, ybc = __im") + add("if ysign: ym = -ym") + + add("offset = xe + wp") + add("if offset >= 0:") + add(" %sCRE_%i = xm << offset" % (W, i)) + add("else:") + add(" %sCRE_%i = xm >> (-offset)" % (W, i)) + add("offset = ye + wp") + add("if offset >= 0:") + add(" %sCIM_%i = ym << offset" % (W, i)) + add("else:") + add(" %sCIM_%i = ym >> (-offset)" % (W, i)) + else: + raise ValueError + + l_areal = len(areal) + l_breal = len(breal) + cancellable_real = min(l_areal, l_breal) + noncancellable_real_num = areal[cancellable_real:] + noncancellable_real_den = breal[cancellable_real:] + + # LOOP + add("for n in xrange(1,10**8):") + + add(" if n in magnitude_check:") + add(" p_mag = bitcount(abs(PRE))") + if have_complex: + add(" p_mag = max(p_mag, bitcount(abs(PIM)))") + add(" magnitude_check[n] = wp-p_mag") + + # Real factors + multiplier = " * ".join(["AINT_#".replace("#", str(i)) for i in aint] + \ + ["AP_#".replace("#", str(i)) for i in arat] + \ + ["BQ_#".replace("#", str(i)) for i in brat]) + + divisor = " * ".join(["BINT_#".replace("#", str(i)) for i in bint] + \ + ["BP_#".replace("#", str(i)) for i in brat] + \ + ["AQ_#".replace("#", str(i)) for i in arat] + ["n"]) + + if multiplier: + add(" mul = " + multiplier) + add(" div = " + divisor) + + # Check for singular terms + add(" if not div:") + if multiplier: + add(" if not mul:") + add(" break") + add(" raise ZeroDivisionError") + + # Update product + if have_complex: + + # TODO: when there are several real parameters and just a few complex + # (maybe just the complex argument), we only need to do about + # half as many ops if we accumulate the real factor in a single real variable + for k in range(cancellable_real): add(" PRE = PRE * AREAL_%i // BREAL_%i" % (areal[k], breal[k])) + for i in noncancellable_real_num: add(" PRE = (PRE * AREAL_#) >> wp".replace("#", str(i))) + for i in noncancellable_real_den: add(" PRE = (PRE << wp) // BREAL_#".replace("#", str(i))) + for k in range(cancellable_real): add(" PIM = PIM * AREAL_%i // BREAL_%i" % (areal[k], breal[k])) + for i in noncancellable_real_num: add(" PIM = (PIM * AREAL_#) >> wp".replace("#", str(i))) + for i in noncancellable_real_den: add(" PIM = (PIM << wp) // BREAL_#".replace("#", str(i))) + + if multiplier: + if have_complex_arg: + add(" PRE, PIM = (mul*(PRE*ZRE-PIM*ZIM))//div, (mul*(PIM*ZRE+PRE*ZIM))//div") + add(" PRE >>= wp") + add(" PIM >>= wp") + else: + add(" PRE = ((mul * PRE * ZRE) >> wp) // div") + add(" PIM = ((mul * PIM * ZRE) >> wp) // div") + else: + if have_complex_arg: + add(" PRE, PIM = (PRE*ZRE-PIM*ZIM)//div, (PIM*ZRE+PRE*ZIM)//div") + add(" PRE >>= wp") + add(" PIM >>= wp") + else: + add(" PRE = ((PRE * ZRE) >> wp) // div") + add(" PIM = ((PIM * ZRE) >> wp) // div") + + for i in acomplex: + add(" PRE, PIM = PRE*ACRE_#-PIM*ACIM_#, PIM*ACRE_#+PRE*ACIM_#".replace("#", str(i))) + add(" PRE >>= wp") + add(" PIM >>= wp") + + for i in bcomplex: + add(" mag = BCRE_#*BCRE_#+BCIM_#*BCIM_#".replace("#", str(i))) + add(" re = PRE*BCRE_# + PIM*BCIM_#".replace("#", str(i))) + add(" im = PIM*BCRE_# - PRE*BCIM_#".replace("#", str(i))) + add(" PRE = (re << wp) // mag".replace("#", str(i))) + add(" PIM = (im << wp) // mag".replace("#", str(i))) + + else: + for k in range(cancellable_real): add(" PRE = PRE * AREAL_%i // BREAL_%i" % (areal[k], breal[k])) + for i in noncancellable_real_num: add(" PRE = (PRE * AREAL_#) >> wp".replace("#", str(i))) + for i in noncancellable_real_den: add(" PRE = (PRE << wp) // BREAL_#".replace("#", str(i))) + if multiplier: + add(" PRE = ((PRE * mul * ZRE) >> wp) // div") + else: + add(" PRE = ((PRE * ZRE) >> wp) // div") + + # Add product to sum + if have_complex: + add(" SRE += PRE") + add(" SIM += PIM") + add(" if (HIGH > PRE > LOW) and (HIGH > PIM > LOW):") + add(" break") + else: + add(" SRE += PRE") + add(" if HIGH > PRE > LOW:") + add(" break") + + #add(" from mpmath import nprint, log, ldexp") + #add(" nprint([n, log(abs(PRE),2), ldexp(PRE,-wp)])") + + add(" if n > MAX:") + add(" raise NoConvergence('Hypergeometric series converges too slowly. Try increasing maxterms.')") + + # +1 all parameters for next loop + for i in aint: add(" AINT_# += 1".replace("#", str(i))) + for i in bint: add(" BINT_# += 1".replace("#", str(i))) + for i in arat: add(" AP_# += AQ_#".replace("#", str(i))) + for i in brat: add(" BP_# += BQ_#".replace("#", str(i))) + for i in areal: add(" AREAL_# += one".replace("#", str(i))) + for i in breal: add(" BREAL_# += one".replace("#", str(i))) + for i in acomplex: add(" ACRE_# += one".replace("#", str(i))) + for i in bcomplex: add(" BCRE_# += one".replace("#", str(i))) + + if have_complex: + add("a = from_man_exp(SRE, -wp, prec, 'n')") + add("b = from_man_exp(SIM, -wp, prec, 'n')") + + add("if SRE:") + add(" if SIM:") + add(" magn = max(a[2]+a[3], b[2]+b[3])") + add(" else:") + add(" magn = a[2]+a[3]") + add("elif SIM:") + add(" magn = b[2]+b[3]") + add("else:") + add(" magn = -wp+1") + + add("return (a, b), True, magn") + else: + add("a = from_man_exp(SRE, -wp, prec, 'n')") + + add("if SRE:") + add(" magn = a[2]+a[3]") + add("else:") + add(" magn = -wp+1") + + add("return a, False, magn") + + source = "\n".join((" " + line) for line in source) + source = ("def %s(coeffs, z, prec, wp, epsshift, magnitude_check, **kwargs):\n" % fname) + source + + namespace = {} + + exec_(source, globals(), namespace) + + #print source + return source, namespace[fname] + + +if BACKEND == 'sage': + + def make_hyp_summator(key): + """ + Returns a function that sums a generalized hypergeometric series, + for given parameter types (integer, rational, real, complex). + """ + from sage.libs.mpmath.ext_main import hypsum_internal + p, q, param_types, ztype = key + def _hypsum(coeffs, z, prec, wp, epsshift, magnitude_check, **kwargs): + return hypsum_internal(p, q, param_types, ztype, coeffs, z, + prec, wp, epsshift, magnitude_check, kwargs) + + return "(none)", _hypsum + + +#-----------------------------------------------------------------------# +# # +# Error functions # +# # +#-----------------------------------------------------------------------# + +# TODO: mpf_erf should call mpf_erfc when appropriate (currently +# only the converse delegation is implemented) + +def mpf_erf(x, prec, rnd=round_fast): + sign, man, exp, bc = x + if not man: + if x == fzero: return fzero + if x == finf: return fone + if x== fninf: return fnone + return fnan + size = exp + bc + lg = math.log + # The approximation erf(x) = 1 is accurate to > x^2 * log(e,2) bits + if size > 3 and 2*(size-1) + 0.528766 > lg(prec,2): + if sign: + return mpf_perturb(fnone, 0, prec, rnd) + else: + return mpf_perturb(fone, 1, prec, rnd) + # erf(x) ~ 2*x/sqrt(pi) close to 0 + if size < -prec: + # 2*x + x = mpf_shift(x,1) + c = mpf_sqrt(mpf_pi(prec+20), prec+20) + # TODO: interval rounding + return mpf_div(x, c, prec, rnd) + wp = prec + abs(size) + 25 + # Taylor series for erf, fixed-point summation + t = abs(to_fixed(x, wp)) + t2 = (t*t) >> wp + s, term, k = t, 12345, 1 + while term: + t = ((t * t2) >> wp) // k + term = t // (2*k+1) + if k & 1: + s -= term + else: + s += term + k += 1 + s = (s << (wp+1)) // sqrt_fixed(pi_fixed(wp), wp) + if sign: + s = -s + return from_man_exp(s, -wp, prec, rnd) + +# If possible, we use the asymptotic series for erfc. +# This is an alternating divergent asymptotic series, so +# the error is at most equal to the first omitted term. +# Here we check if the smallest term is small enough +# for a given x and precision +def erfc_check_series(x, prec): + n = to_int(x) + if n**2 * 1.44 > prec: + return True + return False + +def mpf_erfc(x, prec, rnd=round_fast): + sign, man, exp, bc = x + if not man: + if x == fzero: return fone + if x == finf: return fzero + if x == fninf: return ftwo + return fnan + wp = prec + 20 + mag = bc+exp + # Preserve full accuracy when exponent grows huge + wp += max(0, 2*mag) + regular_erf = sign or mag < 2 + if regular_erf or not erfc_check_series(x, wp): + if regular_erf: + return mpf_sub(fone, mpf_erf(x, prec+10, negative_rnd[rnd]), prec, rnd) + # 1-erf(x) ~ exp(-x^2), increase prec to deal with cancellation + n = to_int(x)+1 + return mpf_sub(fone, mpf_erf(x, prec + int(n**2*1.44) + 10), prec, rnd) + s = term = MPZ_ONE << wp + term_prev = 0 + t = (2 * to_fixed(x, wp) ** 2) >> wp + k = 1 + while 1: + term = ((term * (2*k - 1)) << wp) // t + if k > 4 and term > term_prev or not term: + break + if k & 1: + s -= term + else: + s += term + term_prev = term + #print k, to_str(from_man_exp(term, -wp, 50), 10) + k += 1 + s = (s << wp) // sqrt_fixed(pi_fixed(wp), wp) + s = from_man_exp(s, -wp, wp) + z = mpf_exp(mpf_neg(mpf_mul(x,x,wp),wp),wp) + y = mpf_div(mpf_mul(z, s, wp), x, prec, rnd) + return y + + +#-----------------------------------------------------------------------# +# # +# Exponential integrals # +# # +#-----------------------------------------------------------------------# + +def ei_taylor(x, prec): + s = t = x + k = 2 + while t: + t = ((t*x) >> prec) // k + s += t // k + k += 1 + return s + +def complex_ei_taylor(zre, zim, prec): + _abs = abs + sre = tre = zre + sim = tim = zim + k = 2 + while _abs(tre) + _abs(tim) > 5: + tre, tim = ((tre*zre-tim*zim)//k)>>prec, ((tre*zim+tim*zre)//k)>>prec + sre += tre // k + sim += tim // k + k += 1 + return sre, sim + +def ei_asymptotic(x, prec): + one = MPZ_ONE << prec + x = t = ((one << prec) // x) + s = one + x + k = 2 + while t: + t = (k*t*x) >> prec + s += t + k += 1 + return s + +def complex_ei_asymptotic(zre, zim, prec): + _abs = abs + one = MPZ_ONE << prec + M = (zim*zim + zre*zre) >> prec + # 1 / z + xre = tre = (zre << prec) // M + xim = tim = ((-zim) << prec) // M + sre = one + xre + sim = xim + k = 2 + while _abs(tre) + _abs(tim) > 1000: + #print tre, tim + tre, tim = ((tre*xre-tim*xim)*k)>>prec, ((tre*xim+tim*xre)*k)>>prec + sre += tre + sim += tim + k += 1 + if k > prec: + raise NoConvergence + return sre, sim + +def mpf_ei(x, prec, rnd=round_fast, e1=False): + if e1: + x = mpf_neg(x) + sign, man, exp, bc = x + if e1 and not sign: + if x == fzero: + return finf + raise ComplexResult("E1(x) for x < 0") + if man: + xabs = 0, man, exp, bc + xmag = exp+bc + wp = prec + 20 + can_use_asymp = xmag > wp + if not can_use_asymp: + if exp >= 0: + xabsint = man << exp + else: + xabsint = man >> (-exp) + can_use_asymp = xabsint > int(wp*0.693) + 10 + if can_use_asymp: + if xmag > wp: + v = fone + else: + v = from_man_exp(ei_asymptotic(to_fixed(x, wp), wp), -wp) + v = mpf_mul(v, mpf_exp(x, wp), wp) + v = mpf_div(v, x, prec, rnd) + else: + wp += 2*int(to_int(xabs)) + u = to_fixed(x, wp) + v = ei_taylor(u, wp) + euler_fixed(wp) + t1 = from_man_exp(v,-wp) + t2 = mpf_log(xabs,wp) + v = mpf_add(t1, t2, prec, rnd) + else: + if x == fzero: v = fninf + elif x == finf: v = finf + elif x == fninf: v = fzero + else: v = fnan + if e1: + v = mpf_neg(v) + return v + +def mpc_ei(z, prec, rnd=round_fast, e1=False): + if e1: + z = mpc_neg(z) + a, b = z + asign, aman, aexp, abc = a + bsign, bman, bexp, bbc = b + if b == fzero: + if e1: + x = mpf_neg(mpf_ei(a, prec, rnd)) + if not asign: + y = mpf_neg(mpf_pi(prec, rnd)) + else: + y = fzero + return x, y + else: + return mpf_ei(a, prec, rnd), fzero + if a != fzero: + if not aman or not bman: + return (fnan, fnan) + wp = prec + 40 + amag = aexp+abc + bmag = bexp+bbc + zmag = max(amag, bmag) + can_use_asymp = zmag > wp + if not can_use_asymp: + zabsint = abs(to_int(a)) + abs(to_int(b)) + can_use_asymp = zabsint > int(wp*0.693) + 20 + try: + if can_use_asymp: + if zmag > wp: + v = fone, fzero + else: + zre = to_fixed(a, wp) + zim = to_fixed(b, wp) + vre, vim = complex_ei_asymptotic(zre, zim, wp) + v = from_man_exp(vre, -wp), from_man_exp(vim, -wp) + v = mpc_mul(v, mpc_exp(z, wp), wp) + v = mpc_div(v, z, wp) + if e1: + v = mpc_neg(v, prec, rnd) + else: + x, y = v + if bsign: + v = mpf_pos(x, prec, rnd), mpf_sub(y, mpf_pi(wp), prec, rnd) + else: + v = mpf_pos(x, prec, rnd), mpf_add(y, mpf_pi(wp), prec, rnd) + return v + except NoConvergence: + pass + #wp += 2*max(0,zmag) + wp += 2*int(to_int(mpc_abs(z, 5))) + zre = to_fixed(a, wp) + zim = to_fixed(b, wp) + vre, vim = complex_ei_taylor(zre, zim, wp) + vre += euler_fixed(wp) + v = from_man_exp(vre,-wp), from_man_exp(vim,-wp) + if e1: + u = mpc_log(mpc_neg(z),wp) + else: + u = mpc_log(z,wp) + v = mpc_add(v, u, prec, rnd) + if e1: + v = mpc_neg(v) + return v + +def mpf_e1(x, prec, rnd=round_fast): + return mpf_ei(x, prec, rnd, True) + +def mpc_e1(x, prec, rnd=round_fast): + return mpc_ei(x, prec, rnd, True) + +def mpf_expint(n, x, prec, rnd=round_fast, gamma=False): + """ + E_n(x), n an integer, x real + + With gamma=True, computes Gamma(n,x) (upper incomplete gamma function) + + Returns (real, None) if real, otherwise (real, imag) + The imaginary part is an optional branch cut term + + """ + sign, man, exp, bc = x + if not man: + if gamma: + if x == fzero: + # Actually gamma function pole + if n <= 0: + return finf, None + return mpf_gamma_int(n, prec, rnd), None + if x == finf: + return fzero, None + # TODO: could return finite imaginary value at -inf + return fnan, fnan + else: + if x == fzero: + if n > 1: + return from_rational(1, n-1, prec, rnd), None + else: + return finf, None + if x == finf: + return fzero, None + return fnan, fnan + n_orig = n + if gamma: + n = 1-n + wp = prec + 20 + xmag = exp + bc + # Beware of near-poles + if xmag < -10: + raise NotImplementedError + nmag = bitcount(abs(n)) + have_imag = n > 0 and sign + negx = mpf_neg(x) + # Skip series if direct convergence + if n == 0 or 2*nmag - xmag < -wp: + if gamma: + v = mpf_exp(negx, wp) + re = mpf_mul(v, mpf_pow_int(x, n_orig-1, wp), prec, rnd) + else: + v = mpf_exp(negx, wp) + re = mpf_div(v, x, prec, rnd) + else: + # Finite number of terms, or... + can_use_asymptotic_series = -3*wp < n <= 0 + # ...large enough? + if not can_use_asymptotic_series: + xi = abs(to_int(x)) + m = min(max(1, xi-n), 2*wp) + siz = -n*nmag + (m+n)*bitcount(abs(m+n)) - m*xmag - (144*m//100) + tol = -wp-10 + can_use_asymptotic_series = siz < tol + if can_use_asymptotic_series: + r = ((-MPZ_ONE) << (wp+wp)) // to_fixed(x, wp) + m = n + t = r*m + s = MPZ_ONE << wp + while m and t: + s += t + m += 1 + t = (m*r*t) >> wp + v = mpf_exp(negx, wp) + if gamma: + # ~ exp(-x) * x^(n-1) * (1 + ...) + v = mpf_mul(v, mpf_pow_int(x, n_orig-1, wp), wp) + else: + # ~ exp(-x)/x * (1 + ...) + v = mpf_div(v, x, wp) + re = mpf_mul(v, from_man_exp(s, -wp), prec, rnd) + elif n == 1: + re = mpf_neg(mpf_ei(negx, prec, rnd)) + elif n > 0 and n < 3*wp: + T1 = mpf_neg(mpf_ei(negx, wp)) + if gamma: + if n_orig & 1: + T1 = mpf_neg(T1) + else: + T1 = mpf_mul(T1, mpf_pow_int(negx, n-1, wp), wp) + r = t = to_fixed(x, wp) + facs = [1] * (n-1) + for k in range(1,n-1): + facs[k] = facs[k-1] * k + facs = facs[::-1] + s = facs[0] << wp + for k in range(1, n-1): + if k & 1: + s -= facs[k] * t + else: + s += facs[k] * t + t = (t*r) >> wp + T2 = from_man_exp(s, -wp, wp) + T2 = mpf_mul(T2, mpf_exp(negx, wp)) + if gamma: + T2 = mpf_mul(T2, mpf_pow_int(x, n_orig, wp), wp) + R = mpf_add(T1, T2) + re = mpf_div(R, from_int(ifac(n-1)), prec, rnd) + else: + raise NotImplementedError + if have_imag: + M = from_int(-ifac(n-1)) + if gamma: + im = mpf_div(mpf_pi(wp), M, prec, rnd) + if n_orig & 1: + im = mpf_neg(im) + else: + im = mpf_div(mpf_mul(mpf_pi(wp), mpf_pow_int(negx, n_orig-1, wp), wp), M, prec, rnd) + return re, im + else: + return re, None + +def mpf_ci_si_taylor(x, wp, which=0): + """ + 0 - Ci(x) - (euler+log(x)) + 1 - Si(x) + """ + x = to_fixed(x, wp) + x2 = -(x*x) >> wp + if which == 0: + s, t, k = 0, (MPZ_ONE<>wp + s += t//k + k += 2 + return from_man_exp(s, -wp) + +def mpc_ci_si_taylor(re, im, wp, which=0): + # The following code is only designed for small arguments, + # and not too small arguments (for relative accuracy) + if re[1]: + mag = re[2]+re[3] + elif im[1]: + mag = im[2]+im[3] + if im[1]: + mag = max(mag, im[2]+im[3]) + if mag > 2 or mag < -wp: + raise NotImplementedError + wp += (2-mag) + zre = to_fixed(re, wp) + zim = to_fixed(im, wp) + z2re = (zim*zim-zre*zre)>>wp + z2im = (-2*zre*zim)>>wp + tre = zre + tim = zim + one = MPZ_ONE< 2: + f = k*(k-1) + tre, tim = ((tre*z2re-tim*z2im)//f)>>wp, ((tre*z2im+tim*z2re)//f)>>wp + sre += tre//k + sim += tim//k + k += 2 + return from_man_exp(sre, -wp), from_man_exp(sim, -wp) + +def mpf_ci_si(x, prec, rnd=round_fast, which=2): + """ + Calculation of Ci(x), Si(x) for real x. + + which = 0 -- returns (Ci(x), -) + which = 1 -- returns (Si(x), -) + which = 2 -- returns (Ci(x), Si(x)) + + Note: if x < 0, Ci(x) needs an additional imaginary term, pi*i. + """ + wp = prec + 20 + sign, man, exp, bc = x + ci, si = None, None + if not man: + if x == fzero: + return (fninf, fzero) + if x == fnan: + return (x, x) + ci = fzero + if which != 0: + if x == finf: + si = mpf_shift(mpf_pi(prec, rnd), -1) + if x == fninf: + si = mpf_neg(mpf_shift(mpf_pi(prec, negative_rnd[rnd]), -1)) + return (ci, si) + # For small x: Ci(x) ~ euler + log(x), Si(x) ~ x + mag = exp+bc + if mag < -wp: + if which != 0: + si = mpf_perturb(x, 1-sign, prec, rnd) + if which != 1: + y = mpf_euler(wp) + xabs = mpf_abs(x) + ci = mpf_add(y, mpf_log(xabs, wp), prec, rnd) + return ci, si + # For huge x: Ci(x) ~ sin(x)/x, Si(x) ~ pi/2 + elif mag > wp: + if which != 0: + if sign: + si = mpf_neg(mpf_pi(prec, negative_rnd[rnd])) + else: + si = mpf_pi(prec, rnd) + si = mpf_shift(si, -1) + if which != 1: + ci = mpf_div(mpf_sin(x, wp), x, prec, rnd) + return ci, si + else: + wp += abs(mag) + # Use an asymptotic series? The smallest value of n!/x^n + # occurs for n ~ x, where the magnitude is ~ exp(-x). + asymptotic = mag-1 > math.log(wp, 2) + # Case 1: convergent series near 0 + if not asymptotic: + if which != 0: + si = mpf_pos(mpf_ci_si_taylor(x, wp, 1), prec, rnd) + if which != 1: + ci = mpf_ci_si_taylor(x, wp, 0) + ci = mpf_add(ci, mpf_euler(wp), wp) + ci = mpf_add(ci, mpf_log(mpf_abs(x), wp), prec, rnd) + return ci, si + x = mpf_abs(x) + # Case 2: asymptotic series for x >> 1 + xf = to_fixed(x, wp) + xr = (MPZ_ONE<<(2*wp)) // xf # 1/x + s1 = (MPZ_ONE << wp) + s2 = xr + t = xr + k = 2 + while t: + t = -t + t = (t*xr*k)>>wp + k += 1 + s1 += t + t = (t*xr*k)>>wp + k += 1 + s2 += t + s1 = from_man_exp(s1, -wp) + s2 = from_man_exp(s2, -wp) + s1 = mpf_div(s1, x, wp) + s2 = mpf_div(s2, x, wp) + cos, sin = mpf_cos_sin(x, wp) + # Ci(x) = sin(x)*s1-cos(x)*s2 + # Si(x) = pi/2-cos(x)*s1-sin(x)*s2 + if which != 0: + si = mpf_add(mpf_mul(cos, s1), mpf_mul(sin, s2), wp) + si = mpf_sub(mpf_shift(mpf_pi(wp), -1), si, wp) + if sign: + si = mpf_neg(si) + si = mpf_pos(si, prec, rnd) + if which != 1: + ci = mpf_sub(mpf_mul(sin, s1), mpf_mul(cos, s2), prec, rnd) + return ci, si + +def mpf_ci(x, prec, rnd=round_fast): + if mpf_sign(x) < 0: + raise ComplexResult + return mpf_ci_si(x, prec, rnd, 0)[0] + +def mpf_si(x, prec, rnd=round_fast): + return mpf_ci_si(x, prec, rnd, 1)[1] + +def mpc_ci(z, prec, rnd=round_fast): + re, im = z + if im == fzero: + ci = mpf_ci_si(re, prec, rnd, 0)[0] + if mpf_sign(re) < 0: + return (ci, mpf_pi(prec, rnd)) + return (ci, fzero) + wp = prec + 20 + cre, cim = mpc_ci_si_taylor(re, im, wp, 0) + cre = mpf_add(cre, mpf_euler(wp), wp) + ci = mpc_add((cre, cim), mpc_log(z, wp), prec, rnd) + return ci + +def mpc_si(z, prec, rnd=round_fast): + re, im = z + if im == fzero: + return (mpf_ci_si(re, prec, rnd, 1)[1], fzero) + wp = prec + 20 + z = mpc_ci_si_taylor(re, im, wp, 1) + return mpc_pos(z, prec, rnd) + + +#-----------------------------------------------------------------------# +# # +# Bessel functions # +# # +#-----------------------------------------------------------------------# + +# A Bessel function of the first kind of integer order, J_n(x), is +# given by the power series + +# oo +# ___ k 2 k + n +# \ (-1) / x \ +# J_n(x) = ) ----------- | - | +# /___ k! (k + n)! \ 2 / +# k = 0 + +# Simplifying the quotient between two successive terms gives the +# ratio x^2 / (-4*k*(k+n)). Hence, we only need one full-precision +# multiplication and one division by a small integer per term. +# The complex version is very similar, the only difference being +# that the multiplication is actually 4 multiplies. + +# In the general case, we have +# J_v(x) = (x/2)**v / v! * 0F1(v+1, (-1/4)*z**2) + +# TODO: for extremely large x, we could use an asymptotic +# trigonometric approximation. + +# TODO: recompute at higher precision if the fixed-point mantissa +# is very small + +def mpf_besseljn(n, x, prec, rounding=round_fast): + prec += 50 + negate = n < 0 and n & 1 + mag = x[2]+x[3] + n = abs(n) + wp = prec + 20 + n*bitcount(n) + if mag < 0: + wp -= n * mag + x = to_fixed(x, wp) + x2 = (x**2) >> wp + if not n: + s = t = MPZ_ONE << wp + else: + s = t = (x**n // ifac(n)) >> ((n-1)*wp + n) + k = 1 + while t: + t = ((t * x2) // (-4*k*(k+n))) >> wp + s += t + k += 1 + if negate: + s = -s + return from_man_exp(s, -wp, prec, rounding) + +def mpc_besseljn(n, z, prec, rounding=round_fast): + negate = n < 0 and n & 1 + n = abs(n) + origprec = prec + zre, zim = z + mag = max(zre[2]+zre[3], zim[2]+zim[3]) + prec += 20 + n*bitcount(n) + abs(mag) + if mag < 0: + prec -= n * mag + zre = to_fixed(zre, prec) + zim = to_fixed(zim, prec) + z2re = (zre**2 - zim**2) >> prec + z2im = (zre*zim) >> (prec-1) + if not n: + sre = tre = MPZ_ONE << prec + sim = tim = MPZ_ZERO + else: + re, im = complex_int_pow(zre, zim, n) + sre = tre = (re // ifac(n)) >> ((n-1)*prec + n) + sim = tim = (im // ifac(n)) >> ((n-1)*prec + n) + k = 1 + while abs(tre) + abs(tim) > 3: + p = -4*k*(k+n) + tre, tim = tre*z2re - tim*z2im, tim*z2re + tre*z2im + tre = (tre // p) >> prec + tim = (tim // p) >> prec + sre += tre + sim += tim + k += 1 + if negate: + sre = -sre + sim = -sim + re = from_man_exp(sre, -prec, origprec, rounding) + im = from_man_exp(sim, -prec, origprec, rounding) + return (re, im) + +def mpf_agm(a, b, prec, rnd=round_fast): + """ + Computes the arithmetic-geometric mean agm(a,b) for + nonnegative mpf values a, b. + """ + asign, aman, aexp, abc = a + bsign, bman, bexp, bbc = b + if asign or bsign: + raise ComplexResult("agm of a negative number") + # Handle inf, nan or zero in either operand + if not (aman and bman): + if a == fnan or b == fnan: + return fnan + if a == finf: + if b == fzero: + return fnan + return finf + if b == finf: + if a == fzero: + return fnan + return finf + # agm(0,x) = agm(x,0) = 0 + return fzero + wp = prec + 20 + amag = aexp+abc + bmag = bexp+bbc + mag_delta = amag - bmag + # Reduce to roughly the same magnitude using floating-point AGM + abs_mag_delta = abs(mag_delta) + if abs_mag_delta > 10: + while abs_mag_delta > 10: + a, b = mpf_shift(mpf_add(a,b,wp),-1), \ + mpf_sqrt(mpf_mul(a,b,wp),wp) + abs_mag_delta //= 2 + asign, aman, aexp, abc = a + bsign, bman, bexp, bbc = b + amag = aexp+abc + bmag = bexp+bbc + mag_delta = amag - bmag + #print to_float(a), to_float(b) + # Use agm(a,b) = agm(x*a,x*b)/x to obtain a, b ~= 1 + min_mag = min(amag,bmag) + max_mag = max(amag,bmag) + n = 0 + # If too small, we lose precision when going to fixed-point + if min_mag < -8: + n = -min_mag + # If too large, we waste time using fixed-point with large numbers + elif max_mag > 20: + n = -max_mag + if n: + a = mpf_shift(a, n) + b = mpf_shift(b, n) + #print to_float(a), to_float(b) + af = to_fixed(a, wp) + bf = to_fixed(b, wp) + g = agm_fixed(af, bf, wp) + return from_man_exp(g, -wp-n, prec, rnd) + +def mpf_agm1(a, prec, rnd=round_fast): + """ + Computes the arithmetic-geometric mean agm(1,a) for a nonnegative + mpf value a. + """ + return mpf_agm(fone, a, prec, rnd) + +def mpc_agm(a, b, prec, rnd=round_fast): + """ + Complex AGM. + + TODO: + * check that convergence works as intended + * optimize + * select a nonarbitrary branch + """ + if mpc_is_infnan(a) or mpc_is_infnan(b): + return fnan, fnan + if mpc_zero in (a, b): + return fzero, fzero + if mpc_neg(a) == b: + return fzero, fzero + wp = prec+20 + eps = mpf_shift(fone, -wp+10) + while 1: + a1 = mpc_shift(mpc_add(a, b, wp), -1) + b1 = mpc_sqrt(mpc_mul(a, b, wp), wp) + a, b = a1, b1 + size = mpf_min_max([mpc_abs(a,10), mpc_abs(b,10)])[1] + err = mpc_abs(mpc_sub(a, b, 10), 10) + if size == fzero or mpf_lt(err, mpf_mul(eps, size)): + return a + +def mpc_agm1(a, prec, rnd=round_fast): + return mpc_agm(mpc_one, a, prec, rnd) + +def mpf_ellipk(x, prec, rnd=round_fast): + if not x[1]: + if x == fzero: + return mpf_shift(mpf_pi(prec, rnd), -1) + if x == fninf: + return fzero + if x == fnan: + return x + if x == fone: + return finf + # TODO: for |x| << 1/2, one could use fall back to + # pi/2 * hyp2f1_rat((1,2),(1,2),(1,1), x) + wp = prec + 15 + # Use K(x) = pi/2/agm(1,a) where a = sqrt(1-x) + # The sqrt raises ComplexResult if x > 0 + a = mpf_sqrt(mpf_sub(fone, x, wp), wp) + v = mpf_agm1(a, wp) + r = mpf_div(mpf_pi(wp), v, prec, rnd) + return mpf_shift(r, -1) + +def mpc_ellipk(z, prec, rnd=round_fast): + re, im = z + if im == fzero: + if re == finf: + return mpc_zero + if mpf_le(re, fone): + return mpf_ellipk(re, prec, rnd), fzero + wp = prec + 15 + a = mpc_sqrt(mpc_sub(mpc_one, z, wp), wp) + v = mpc_agm1(a, wp) + r = mpc_mpf_div(mpf_pi(wp), v, prec, rnd) + return mpc_shift(r, -1) + +def mpf_ellipe(x, prec, rnd=round_fast): + # http://functions.wolfram.com/EllipticIntegrals/ + # EllipticK/20/01/0001/ + # E = (1-m)*(K'(m)*2*m + K(m)) + sign, man, exp, bc = x + if not man: + if x == fzero: + return mpf_shift(mpf_pi(prec, rnd), -1) + if x == fninf: + return finf + if x == fnan: + return x + if x == finf: + raise ComplexResult + if x == fone: + return fone + wp = prec+20 + mag = exp+bc + if mag < -wp: + return mpf_shift(mpf_pi(prec, rnd), -1) + # Compute a finite difference for K' + p = max(mag, 0) - wp + h = mpf_shift(fone, p) + K = mpf_ellipk(x, 2*wp) + Kh = mpf_ellipk(mpf_sub(x, h), 2*wp) + Kdiff = mpf_shift(mpf_sub(K, Kh), -p) + t = mpf_sub(fone, x) + b = mpf_mul(Kdiff, mpf_shift(x,1), wp) + return mpf_mul(t, mpf_add(K, b), prec, rnd) + +def mpc_ellipe(z, prec, rnd=round_fast): + re, im = z + if im == fzero: + if re == finf: + return (fzero, finf) + if mpf_le(re, fone): + return mpf_ellipe(re, prec, rnd), fzero + wp = prec + 15 + mag = mpc_abs(z, 1) + p = max(mag[2]+mag[3], 0) - wp + h = mpf_shift(fone, p) + K = mpc_ellipk(z, 2*wp) + Kh = mpc_ellipk(mpc_add_mpf(z, h, 2*wp), 2*wp) + Kdiff = mpc_shift(mpc_sub(Kh, K, wp), -p) + t = mpc_sub(mpc_one, z, wp) + b = mpc_mul(Kdiff, mpc_shift(z,1), wp) + return mpc_mul(t, mpc_add(K, b, wp), prec, rnd) diff --git a/MLPY/Lib/site-packages/mpmath/libmp/libintmath.py b/MLPY/Lib/site-packages/mpmath/libmp/libintmath.py new file mode 100644 index 0000000000000000000000000000000000000000..7880546e135639208d136488408b102ad41682a2 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/libmp/libintmath.py @@ -0,0 +1,584 @@ +""" +Utility functions for integer math. + +TODO: rename, cleanup, perhaps move the gmpy wrapper code +here from settings.py + +""" + +import math +from bisect import bisect + +from .backend import xrange +from .backend import BACKEND, gmpy, sage, sage_utils, MPZ, MPZ_ONE, MPZ_ZERO + +small_trailing = [0] * 256 +for j in range(1,8): + small_trailing[1<>> giant_steps(50,1000) + [66, 128, 253, 502, 1000] + >>> giant_steps(50,1000,4) + [65, 252, 1000] + + """ + L = [target] + while L[-1] > start*n: + L = L + [L[-1]//n + 2] + return L[::-1] + +def rshift(x, n): + """For an integer x, calculate x >> n with the fastest (floor) + rounding. Unlike the plain Python expression (x >> n), n is + allowed to be negative, in which case a left shift is performed.""" + if n >= 0: return x >> n + else: return x << (-n) + +def lshift(x, n): + """For an integer x, calculate x << n. Unlike the plain Python + expression (x << n), n is allowed to be negative, in which case a + right shift with default (floor) rounding is performed.""" + if n >= 0: return x << n + else: return x >> (-n) + +if BACKEND == 'sage': + import operator + rshift = operator.rshift + lshift = operator.lshift + +def python_trailing(n): + """Count the number of trailing zero bits in abs(n).""" + if not n: + return 0 + low_byte = n & 0xff + if low_byte: + return small_trailing[low_byte] + t = 8 + n >>= 8 + while not n & 0xff: + n >>= 8 + t += 8 + return t + small_trailing[n & 0xff] + +if BACKEND == 'gmpy': + if gmpy.version() >= '2': + def gmpy_trailing(n): + """Count the number of trailing zero bits in abs(n) using gmpy.""" + if n: return MPZ(n).bit_scan1() + else: return 0 + else: + def gmpy_trailing(n): + """Count the number of trailing zero bits in abs(n) using gmpy.""" + if n: return MPZ(n).scan1() + else: return 0 + +# Small powers of 2 +powers = [1<<_ for _ in range(300)] + +def python_bitcount(n): + """Calculate bit size of the nonnegative integer n.""" + bc = bisect(powers, n) + if bc != 300: + return bc + bc = int(math.log(n, 2)) - 4 + return bc + bctable[n>>bc] + +def gmpy_bitcount(n): + """Calculate bit size of the nonnegative integer n.""" + if n: return MPZ(n).numdigits(2) + else: return 0 + +#def sage_bitcount(n): +# if n: return MPZ(n).nbits() +# else: return 0 + +def sage_trailing(n): + return MPZ(n).trailing_zero_bits() + +if BACKEND == 'gmpy': + bitcount = gmpy_bitcount + trailing = gmpy_trailing +elif BACKEND == 'sage': + sage_bitcount = sage_utils.bitcount + bitcount = sage_bitcount + trailing = sage_trailing +else: + bitcount = python_bitcount + trailing = python_trailing + +if BACKEND == 'gmpy' and 'bit_length' in dir(gmpy): + bitcount = gmpy.bit_length + +# Used to avoid slow function calls as far as possible +trailtable = [trailing(n) for n in range(256)] +bctable = [bitcount(n) for n in range(1024)] + +# TODO: speed up for bases 2, 4, 8, 16, ... + +def bin_to_radix(x, xbits, base, bdigits): + """Changes radix of a fixed-point number; i.e., converts + x * 2**xbits to floor(x * 10**bdigits).""" + return x * (MPZ(base)**bdigits) >> xbits + +stddigits = '0123456789abcdefghijklmnopqrstuvwxyz' + +def small_numeral(n, base=10, digits=stddigits): + """Return the string numeral of a positive integer in an arbitrary + base. Most efficient for small input.""" + if base == 10: + return str(n) + digs = [] + while n: + n, digit = divmod(n, base) + digs.append(digits[digit]) + return "".join(digs[::-1]) + +def numeral_python(n, base=10, size=0, digits=stddigits): + """Represent the integer n as a string of digits in the given base. + Recursive division is used to make this function about 3x faster + than Python's str() for converting integers to decimal strings. + + The 'size' parameters specifies the number of digits in n; this + number is only used to determine splitting points and need not be + exact.""" + if n <= 0: + if not n: + return "0" + return "-" + numeral(-n, base, size, digits) + # Fast enough to do directly + if size < 250: + return small_numeral(n, base, digits) + # Divide in half + half = (size // 2) + (size & 1) + A, B = divmod(n, base**half) + ad = numeral(A, base, half, digits) + bd = numeral(B, base, half, digits).rjust(half, "0") + return ad + bd + +def numeral_gmpy(n, base=10, size=0, digits=stddigits): + """Represent the integer n as a string of digits in the given base. + Recursive division is used to make this function about 3x faster + than Python's str() for converting integers to decimal strings. + + The 'size' parameters specifies the number of digits in n; this + number is only used to determine splitting points and need not be + exact.""" + if n < 0: + return "-" + numeral(-n, base, size, digits) + # gmpy.digits() may cause a segmentation fault when trying to convert + # extremely large values to a string. The size limit may need to be + # adjusted on some platforms, but 1500000 works on Windows and Linux. + if size < 1500000: + return gmpy.digits(n, base) + # Divide in half + half = (size // 2) + (size & 1) + A, B = divmod(n, MPZ(base)**half) + ad = numeral(A, base, half, digits) + bd = numeral(B, base, half, digits).rjust(half, "0") + return ad + bd + +if BACKEND == "gmpy": + numeral = numeral_gmpy +else: + numeral = numeral_python + +_1_800 = 1<<800 +_1_600 = 1<<600 +_1_400 = 1<<400 +_1_200 = 1<<200 +_1_100 = 1<<100 +_1_50 = 1<<50 + +def isqrt_small_python(x): + """ + Correctly (floor) rounded integer square root, using + division. Fast up to ~200 digits. + """ + if not x: + return x + if x < _1_800: + # Exact with IEEE double precision arithmetic + if x < _1_50: + return int(x**0.5) + # Initial estimate can be any integer >= the true root; round up + r = int(x**0.5 * 1.00000000000001) + 1 + else: + bc = bitcount(x) + n = bc//2 + r = int((x>>(2*n-100))**0.5+2)<<(n-50) # +2 is to round up + # The following iteration now precisely computes floor(sqrt(x)) + # See e.g. Crandall & Pomerance, "Prime Numbers: A Computational + # Perspective" + while 1: + y = (r+x//r)>>1 + if y >= r: + return r + r = y + +def isqrt_fast_python(x): + """ + Fast approximate integer square root, computed using division-free + Newton iteration for large x. For random integers the result is almost + always correct (floor(sqrt(x))), but is 1 ulp too small with a roughly + 0.1% probability. If x is very close to an exact square, the answer is + 1 ulp wrong with high probability. + + With 0 guard bits, the largest error over a set of 10^5 random + inputs of size 1-10^5 bits was 3 ulp. The use of 10 guard bits + almost certainly guarantees a max 1 ulp error. + """ + # Use direct division-based iteration if sqrt(x) < 2^400 + # Assume floating-point square root accurate to within 1 ulp, then: + # 0 Newton iterations good to 52 bits + # 1 Newton iterations good to 104 bits + # 2 Newton iterations good to 208 bits + # 3 Newton iterations good to 416 bits + if x < _1_800: + y = int(x**0.5) + if x >= _1_100: + y = (y + x//y) >> 1 + if x >= _1_200: + y = (y + x//y) >> 1 + if x >= _1_400: + y = (y + x//y) >> 1 + return y + bc = bitcount(x) + guard_bits = 10 + x <<= 2*guard_bits + bc += 2*guard_bits + bc += (bc&1) + hbc = bc//2 + startprec = min(50, hbc) + # Newton iteration for 1/sqrt(x), with floating-point starting value + r = int(2.0**(2*startprec) * (x >> (bc-2*startprec)) ** -0.5) + pp = startprec + for p in giant_steps(startprec, hbc): + # r**2, scaled from real size 2**(-bc) to 2**p + r2 = (r*r) >> (2*pp - p) + # x*r**2, scaled from real size ~1.0 to 2**p + xr2 = ((x >> (bc-p)) * r2) >> p + # New value of r, scaled from real size 2**(-bc/2) to 2**p + r = (r * ((3<> (pp+1) + pp = p + # (1/sqrt(x))*x = sqrt(x) + return (r*(x>>hbc)) >> (p+guard_bits) + +def sqrtrem_python(x): + """Correctly rounded integer (floor) square root with remainder.""" + # to check cutoff: + # plot(lambda x: timing(isqrt, 2**int(x)), [0,2000]) + if x < _1_600: + y = isqrt_small_python(x) + return y, x - y*y + y = isqrt_fast_python(x) + 1 + rem = x - y*y + # Correct remainder + while rem < 0: + y -= 1 + rem += (1+2*y) + else: + if rem: + while rem > 2*(1+y): + y += 1 + rem -= (1+2*y) + return y, rem + +def isqrt_python(x): + """Integer square root with correct (floor) rounding.""" + return sqrtrem_python(x)[0] + +def sqrt_fixed(x, prec): + return isqrt_fast(x<= '2': + isqrt_small = isqrt_fast = isqrt = gmpy.isqrt + sqrtrem = gmpy.isqrt_rem + else: + isqrt_small = isqrt_fast = isqrt = gmpy.sqrt + sqrtrem = gmpy.sqrtrem +elif BACKEND == 'sage': + isqrt_small = isqrt_fast = isqrt = \ + getattr(sage_utils, "isqrt", lambda n: MPZ(n).isqrt()) + sqrtrem = lambda n: MPZ(n).sqrtrem() +else: + isqrt_small = isqrt_small_python + isqrt_fast = isqrt_fast_python + isqrt = isqrt_python + sqrtrem = sqrtrem_python + + +def ifib(n, _cache={}): + """Computes the nth Fibonacci number as an integer, for + integer n.""" + if n < 0: + return (-1)**(-n+1) * ifib(-n) + if n in _cache: + return _cache[n] + m = n + # Use Dijkstra's logarithmic algorithm + # The following implementation is basically equivalent to + # http://en.literateprograms.org/Fibonacci_numbers_(Scheme) + a, b, p, q = MPZ_ONE, MPZ_ZERO, MPZ_ZERO, MPZ_ONE + while n: + if n & 1: + aq = a*q + a, b = b*q+aq+a*p, b*p+aq + n -= 1 + else: + qq = q*q + p, q = p*p+qq, qq+2*p*q + n >>= 1 + if m < 250: + _cache[m] = b + return b + +MAX_FACTORIAL_CACHE = 1000 + +def ifac(n, memo={0:1, 1:1}): + """Return n factorial (for integers n >= 0 only).""" + f = memo.get(n) + if f: + return f + k = len(memo) + p = memo[k-1] + MAX = MAX_FACTORIAL_CACHE + while k <= n: + p *= k + if k <= MAX: + memo[k] = p + k += 1 + return p + +def ifac2(n, memo_pair=[{0:1}, {1:1}]): + """Return n!! (double factorial), integers n >= 0 only.""" + memo = memo_pair[n&1] + f = memo.get(n) + if f: + return f + k = max(memo) + p = memo[k] + MAX = MAX_FACTORIAL_CACHE + while k < n: + k += 2 + p *= k + if k <= MAX: + memo[k] = p + return p + +if BACKEND == 'gmpy': + ifac = gmpy.fac +elif BACKEND == 'sage': + ifac = lambda n: int(sage.factorial(n)) + ifib = sage.fibonacci + +def list_primes(n): + n = n + 1 + sieve = list(xrange(n)) + sieve[:2] = [0, 0] + for i in xrange(2, int(n**0.5)+1): + if sieve[i]: + for j in xrange(i**2, n, i): + sieve[j] = 0 + return [p for p in sieve if p] + +if BACKEND == 'sage': + # Note: it is *VERY* important for performance that we convert + # the list to Python ints. + def list_primes(n): + return [int(_) for _ in sage.primes(n+1)] + +small_odd_primes = (3,5,7,11,13,17,19,23,29,31,37,41,43,47) +small_odd_primes_set = set(small_odd_primes) + +def isprime(n): + """ + Determines whether n is a prime number. A probabilistic test is + performed if n is very large. No special trick is used for detecting + perfect powers. + + >>> sum(list_primes(100000)) + 454396537 + >>> sum(n*isprime(n) for n in range(100000)) + 454396537 + + """ + n = int(n) + if not n & 1: + return n == 2 + if n < 50: + return n in small_odd_primes_set + for p in small_odd_primes: + if not n % p: + return False + m = n-1 + s = trailing(m) + d = m >> s + def test(a): + x = pow(a,d,n) + if x == 1 or x == m: + return True + for r in xrange(1,s): + x = x**2 % n + if x == m: + return True + return False + # See http://primes.utm.edu/prove/prove2_3.html + if n < 1373653: + witnesses = [2,3] + elif n < 341550071728321: + witnesses = [2,3,5,7,11,13,17] + else: + witnesses = small_odd_primes + for a in witnesses: + if not test(a): + return False + return True + +def moebius(n): + """ + Evaluates the Moebius function which is `mu(n) = (-1)^k` if `n` + is a product of `k` distinct primes and `mu(n) = 0` otherwise. + + TODO: speed up using factorization + """ + n = abs(int(n)) + if n < 2: + return n + factors = [] + for p in xrange(2, n+1): + if not (n % p): + if not (n % p**2): + return 0 + if not sum(p % f for f in factors): + factors.append(p) + return (-1)**len(factors) + +def gcd(*args): + a = 0 + for b in args: + if a: + while b: + a, b = b, a % b + else: + a = b + return a + + +# Comment by Juan Arias de Reyna: +# +# I learn this method to compute EulerE[2n] from van de Lune. +# +# We apply the formula EulerE[2n] = (-1)^n 2**(-2n) sum_{j=0}^n a(2n,2j+1) +# +# where the numbers a(n,j) vanish for j > n+1 or j <= -1 and satisfies +# +# a(0,-1) = a(0,0) = 0; a(0,1)= 1; a(0,2) = a(0,3) = 0 +# +# a(n,j) = a(n-1,j) when n+j is even +# a(n,j) = (j-1) a(n-1,j-1) + (j+1) a(n-1,j+1) when n+j is odd +# +# +# But we can use only one array unidimensional a(j) since to compute +# a(n,j) we only need to know a(n-1,k) where k and j are of different parity +# and we have not to conserve the used values. +# +# We cached up the values of Euler numbers to sufficiently high order. +# +# Important Observation: If we pretend to use the numbers +# EulerE[1], EulerE[2], ... , EulerE[n] +# it is convenient to compute first EulerE[n], since the algorithm +# computes first all +# the previous ones, and keeps them in the CACHE + +MAX_EULER_CACHE = 500 + +def eulernum(m, _cache={0:MPZ_ONE}): + r""" + Computes the Euler numbers `E(n)`, which can be defined as + coefficients of the Taylor expansion of `1/cosh x`: + + .. math :: + + \frac{1}{\cosh x} = \sum_{n=0}^\infty \frac{E_n}{n!} x^n + + Example:: + + >>> [int(eulernum(n)) for n in range(11)] + [1, 0, -1, 0, 5, 0, -61, 0, 1385, 0, -50521] + >>> [int(eulernum(n)) for n in range(11)] # test cache + [1, 0, -1, 0, 5, 0, -61, 0, 1385, 0, -50521] + + """ + # for odd m > 1, the Euler numbers are zero + if m & 1: + return MPZ_ZERO + f = _cache.get(m) + if f: + return f + MAX = MAX_EULER_CACHE + n = m + a = [MPZ(_) for _ in [0,0,1,0,0,0]] + for n in range(1, m+1): + for j in range(n+1, -1, -2): + a[j+1] = (j-1)*a[j] + (j+1)*a[j+2] + a.append(0) + suma = 0 + for k in range(n+1, -1, -2): + suma += a[k+1] + if n <= MAX: + _cache[n] = ((-1)**(n//2))*(suma // 2**n) + if n == m: + return ((-1)**(n//2))*suma // 2**n + +def stirling1(n, k): + """ + Stirling number of the first kind. + """ + if n < 0 or k < 0: + raise ValueError + if k >= n: + return MPZ(n == k) + if k < 1: + return MPZ_ZERO + L = [MPZ_ZERO] * (k+1) + L[1] = MPZ_ONE + for m in xrange(2, n+1): + for j in xrange(min(k, m), 0, -1): + L[j] = (m-1) * L[j] + L[j-1] + return (-1)**(n+k) * L[k] + +def stirling2(n, k): + """ + Stirling number of the second kind. + """ + if n < 0 or k < 0: + raise ValueError + if k >= n: + return MPZ(n == k) + if k <= 1: + return MPZ(k == 1) + s = MPZ_ZERO + t = MPZ_ONE + for j in xrange(k+1): + if (k + j) & 1: + s -= t * MPZ(j)**n + else: + s += t * MPZ(j)**n + t = t * (k - j) // (j + 1) + return s // ifac(k) diff --git a/MLPY/Lib/site-packages/mpmath/libmp/libmpc.py b/MLPY/Lib/site-packages/mpmath/libmp/libmpc.py new file mode 100644 index 0000000000000000000000000000000000000000..cc22d0e73674676c8a9249ebc2d48da7f3be8b0d --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/libmp/libmpc.py @@ -0,0 +1,835 @@ +""" +Low-level functions for complex arithmetic. +""" + +import sys + +from .backend import MPZ, MPZ_ZERO, MPZ_ONE, MPZ_TWO, BACKEND + +from .libmpf import (\ + round_floor, round_ceiling, round_down, round_up, + round_nearest, round_fast, bitcount, + bctable, normalize, normalize1, reciprocal_rnd, rshift, lshift, giant_steps, + negative_rnd, + to_str, to_fixed, from_man_exp, from_float, to_float, from_int, to_int, + fzero, fone, ftwo, fhalf, finf, fninf, fnan, fnone, + mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, + mpf_div, mpf_mul_int, mpf_shift, mpf_sqrt, mpf_hypot, + mpf_rdiv_int, mpf_floor, mpf_ceil, mpf_nint, mpf_frac, + mpf_sign, mpf_hash, + ComplexResult +) + +from .libelefun import (\ + mpf_pi, mpf_exp, mpf_log, mpf_cos_sin, mpf_cosh_sinh, mpf_tan, mpf_pow_int, + mpf_log_hypot, + mpf_cos_sin_pi, mpf_phi, + mpf_cos, mpf_sin, mpf_cos_pi, mpf_sin_pi, + mpf_atan, mpf_atan2, mpf_cosh, mpf_sinh, mpf_tanh, + mpf_asin, mpf_acos, mpf_acosh, mpf_nthroot, mpf_fibonacci +) + +# An mpc value is a (real, imag) tuple +mpc_one = fone, fzero +mpc_zero = fzero, fzero +mpc_two = ftwo, fzero +mpc_half = (fhalf, fzero) + +_infs = (finf, fninf) +_infs_nan = (finf, fninf, fnan) + +def mpc_is_inf(z): + """Check if either real or imaginary part is infinite""" + re, im = z + if re in _infs: return True + if im in _infs: return True + return False + +def mpc_is_infnan(z): + """Check if either real or imaginary part is infinite or nan""" + re, im = z + if re in _infs_nan: return True + if im in _infs_nan: return True + return False + +def mpc_to_str(z, dps, **kwargs): + re, im = z + rs = to_str(re, dps) + if im[0]: + return rs + " - " + to_str(mpf_neg(im), dps, **kwargs) + "j" + else: + return rs + " + " + to_str(im, dps, **kwargs) + "j" + +def mpc_to_complex(z, strict=False, rnd=round_fast): + re, im = z + return complex(to_float(re, strict, rnd), to_float(im, strict, rnd)) + +def mpc_hash(z): + if sys.version_info >= (3, 2): + re, im = z + h = mpf_hash(re) + sys.hash_info.imag * mpf_hash(im) + # Need to reduce either module 2^32 or 2^64 + h = h % (2**sys.hash_info.width) + return int(h) + else: + try: + return hash(mpc_to_complex(z, strict=True)) + except OverflowError: + return hash(z) + +def mpc_conjugate(z, prec, rnd=round_fast): + re, im = z + return re, mpf_neg(im, prec, rnd) + +def mpc_is_nonzero(z): + return z != mpc_zero + +def mpc_add(z, w, prec, rnd=round_fast): + a, b = z + c, d = w + return mpf_add(a, c, prec, rnd), mpf_add(b, d, prec, rnd) + +def mpc_add_mpf(z, x, prec, rnd=round_fast): + a, b = z + return mpf_add(a, x, prec, rnd), b + +def mpc_sub(z, w, prec=0, rnd=round_fast): + a, b = z + c, d = w + return mpf_sub(a, c, prec, rnd), mpf_sub(b, d, prec, rnd) + +def mpc_sub_mpf(z, p, prec=0, rnd=round_fast): + a, b = z + return mpf_sub(a, p, prec, rnd), b + +def mpc_pos(z, prec, rnd=round_fast): + a, b = z + return mpf_pos(a, prec, rnd), mpf_pos(b, prec, rnd) + +def mpc_neg(z, prec=None, rnd=round_fast): + a, b = z + return mpf_neg(a, prec, rnd), mpf_neg(b, prec, rnd) + +def mpc_shift(z, n): + a, b = z + return mpf_shift(a, n), mpf_shift(b, n) + +def mpc_abs(z, prec, rnd=round_fast): + """Absolute value of a complex number, |a+bi|. + Returns an mpf value.""" + a, b = z + return mpf_hypot(a, b, prec, rnd) + +def mpc_arg(z, prec, rnd=round_fast): + """Argument of a complex number. Returns an mpf value.""" + a, b = z + return mpf_atan2(b, a, prec, rnd) + +def mpc_floor(z, prec, rnd=round_fast): + a, b = z + return mpf_floor(a, prec, rnd), mpf_floor(b, prec, rnd) + +def mpc_ceil(z, prec, rnd=round_fast): + a, b = z + return mpf_ceil(a, prec, rnd), mpf_ceil(b, prec, rnd) + +def mpc_nint(z, prec, rnd=round_fast): + a, b = z + return mpf_nint(a, prec, rnd), mpf_nint(b, prec, rnd) + +def mpc_frac(z, prec, rnd=round_fast): + a, b = z + return mpf_frac(a, prec, rnd), mpf_frac(b, prec, rnd) + + +def mpc_mul(z, w, prec, rnd=round_fast): + """ + Complex multiplication. + + Returns the real and imaginary part of (a+bi)*(c+di), rounded to + the specified precision. The rounding mode applies to the real and + imaginary parts separately. + """ + a, b = z + c, d = w + p = mpf_mul(a, c) + q = mpf_mul(b, d) + r = mpf_mul(a, d) + s = mpf_mul(b, c) + re = mpf_sub(p, q, prec, rnd) + im = mpf_add(r, s, prec, rnd) + return re, im + +def mpc_square(z, prec, rnd=round_fast): + # (a+b*I)**2 == a**2 - b**2 + 2*I*a*b + a, b = z + p = mpf_mul(a,a) + q = mpf_mul(b,b) + r = mpf_mul(a,b, prec, rnd) + re = mpf_sub(p, q, prec, rnd) + im = mpf_shift(r, 1) + return re, im + +def mpc_mul_mpf(z, p, prec, rnd=round_fast): + a, b = z + re = mpf_mul(a, p, prec, rnd) + im = mpf_mul(b, p, prec, rnd) + return re, im + +def mpc_mul_imag_mpf(z, x, prec, rnd=round_fast): + """ + Multiply the mpc value z by I*x where x is an mpf value. + """ + a, b = z + re = mpf_neg(mpf_mul(b, x, prec, rnd)) + im = mpf_mul(a, x, prec, rnd) + return re, im + +def mpc_mul_int(z, n, prec, rnd=round_fast): + a, b = z + re = mpf_mul_int(a, n, prec, rnd) + im = mpf_mul_int(b, n, prec, rnd) + return re, im + +def mpc_div(z, w, prec, rnd=round_fast): + a, b = z + c, d = w + wp = prec + 10 + # mag = c*c + d*d + mag = mpf_add(mpf_mul(c, c), mpf_mul(d, d), wp) + # (a*c+b*d)/mag, (b*c-a*d)/mag + t = mpf_add(mpf_mul(a,c), mpf_mul(b,d), wp) + u = mpf_sub(mpf_mul(b,c), mpf_mul(a,d), wp) + return mpf_div(t,mag,prec,rnd), mpf_div(u,mag,prec,rnd) + +def mpc_div_mpf(z, p, prec, rnd=round_fast): + """Calculate z/p where p is real""" + a, b = z + re = mpf_div(a, p, prec, rnd) + im = mpf_div(b, p, prec, rnd) + return re, im + +def mpc_reciprocal(z, prec, rnd=round_fast): + """Calculate 1/z efficiently""" + a, b = z + m = mpf_add(mpf_mul(a,a),mpf_mul(b,b),prec+10) + re = mpf_div(a, m, prec, rnd) + im = mpf_neg(mpf_div(b, m, prec, rnd)) + return re, im + +def mpc_mpf_div(p, z, prec, rnd=round_fast): + """Calculate p/z where p is real efficiently""" + a, b = z + m = mpf_add(mpf_mul(a,a),mpf_mul(b,b), prec+10) + re = mpf_div(mpf_mul(a,p), m, prec, rnd) + im = mpf_div(mpf_neg(mpf_mul(b,p)), m, prec, rnd) + return re, im + +def complex_int_pow(a, b, n): + """Complex integer power: computes (a+b*I)**n exactly for + nonnegative n (a and b must be Python ints).""" + wre = 1 + wim = 0 + while n: + if n & 1: + wre, wim = wre*a - wim*b, wim*a + wre*b + n -= 1 + a, b = a*a - b*b, 2*a*b + n //= 2 + return wre, wim + +def mpc_pow(z, w, prec, rnd=round_fast): + if w[1] == fzero: + return mpc_pow_mpf(z, w[0], prec, rnd) + return mpc_exp(mpc_mul(mpc_log(z, prec+10), w, prec+10), prec, rnd) + +def mpc_pow_mpf(z, p, prec, rnd=round_fast): + psign, pman, pexp, pbc = p + if pexp >= 0: + return mpc_pow_int(z, (-1)**psign * (pman< 0: + aman <<= de + aexp = bexp + else: + bman <<= (-de) + bexp = aexp + re, im = complex_int_pow(aman, bman, n) + re = from_man_exp(re, int(n*aexp), prec, rnd) + im = from_man_exp(im, int(n*bexp), prec, rnd) + return re, im + return mpc_exp(mpc_mul_int(mpc_log(z, prec+10), n, prec+10), prec, rnd) + +def mpc_sqrt(z, prec, rnd=round_fast): + """Complex square root (principal branch). + + We have sqrt(a+bi) = sqrt((r+a)/2) + b/sqrt(2*(r+a))*i where + r = abs(a+bi), when a+bi is not a negative real number.""" + a, b = z + if b == fzero: + if a == fzero: + return (a, b) + # When a+bi is a negative real number, we get a real sqrt times i + if a[0]: + im = mpf_sqrt(mpf_neg(a), prec, rnd) + return (fzero, im) + else: + re = mpf_sqrt(a, prec, rnd) + return (re, fzero) + wp = prec+20 + if not a[0]: # case a positive + t = mpf_add(mpc_abs((a, b), wp), a, wp) # t = abs(a+bi) + a + u = mpf_shift(t, -1) # u = t/2 + re = mpf_sqrt(u, prec, rnd) # re = sqrt(u) + v = mpf_shift(t, 1) # v = 2*t + w = mpf_sqrt(v, wp) # w = sqrt(v) + im = mpf_div(b, w, prec, rnd) # im = b / w + else: # case a negative + t = mpf_sub(mpc_abs((a, b), wp), a, wp) # t = abs(a+bi) - a + u = mpf_shift(t, -1) # u = t/2 + im = mpf_sqrt(u, prec, rnd) # im = sqrt(u) + v = mpf_shift(t, 1) # v = 2*t + w = mpf_sqrt(v, wp) # w = sqrt(v) + re = mpf_div(b, w, prec, rnd) # re = b/w + if b[0]: + re = mpf_neg(re) + im = mpf_neg(im) + return re, im + +def mpc_nthroot_fixed(a, b, n, prec): + # a, b signed integers at fixed precision prec + start = 50 + a1 = int(rshift(a, prec - n*start)) + b1 = int(rshift(b, prec - n*start)) + try: + r = (a1 + 1j * b1)**(1.0/n) + re = r.real + im = r.imag + re = MPZ(int(re)) + im = MPZ(int(im)) + except OverflowError: + a1 = from_int(a1, start) + b1 = from_int(b1, start) + fn = from_int(n) + nth = mpf_rdiv_int(1, fn, start) + re, im = mpc_pow((a1, b1), (nth, fzero), start) + re = to_int(re) + im = to_int(im) + extra = 10 + prevp = start + extra1 = n + for p in giant_steps(start, prec+extra): + # this is slow for large n, unlike int_pow_fixed + re2, im2 = complex_int_pow(re, im, n-1) + re2 = rshift(re2, (n-1)*prevp - p - extra1) + im2 = rshift(im2, (n-1)*prevp - p - extra1) + r4 = (re2*re2 + im2*im2) >> (p + extra1) + ap = rshift(a, prec - p) + bp = rshift(b, prec - p) + rec = (ap * re2 + bp * im2) >> p + imc = (-ap * im2 + bp * re2) >> p + reb = (rec << p) // r4 + imb = (imc << p) // r4 + re = (reb + (n-1)*lshift(re, p-prevp))//n + im = (imb + (n-1)*lshift(im, p-prevp))//n + prevp = p + return re, im + +def mpc_nthroot(z, n, prec, rnd=round_fast): + """ + Complex n-th root. + + Use Newton method as in the real case when it is faster, + otherwise use z**(1/n) + """ + a, b = z + if a[0] == 0 and b == fzero: + re = mpf_nthroot(a, n, prec, rnd) + return (re, fzero) + if n < 2: + if n == 0: + return mpc_one + if n == 1: + return mpc_pos((a, b), prec, rnd) + if n == -1: + return mpc_div(mpc_one, (a, b), prec, rnd) + inverse = mpc_nthroot((a, b), -n, prec+5, reciprocal_rnd[rnd]) + return mpc_div(mpc_one, inverse, prec, rnd) + if n <= 20: + prec2 = int(1.2 * (prec + 10)) + asign, aman, aexp, abc = a + bsign, bman, bexp, bbc = b + pf = mpc_abs((a,b), prec) + if pf[-2] + pf[-1] > -10 and pf[-2] + pf[-1] < prec: + af = to_fixed(a, prec2) + bf = to_fixed(b, prec2) + re, im = mpc_nthroot_fixed(af, bf, n, prec2) + extra = 10 + re = from_man_exp(re, -prec2-extra, prec2, rnd) + im = from_man_exp(im, -prec2-extra, prec2, rnd) + return re, im + fn = from_int(n) + prec2 = prec+10 + 10 + nth = mpf_rdiv_int(1, fn, prec2) + re, im = mpc_pow((a, b), (nth, fzero), prec2, rnd) + re = normalize(re[0], re[1], re[2], re[3], prec, rnd) + im = normalize(im[0], im[1], im[2], im[3], prec, rnd) + return re, im + +def mpc_cbrt(z, prec, rnd=round_fast): + """ + Complex cubic root. + """ + return mpc_nthroot(z, 3, prec, rnd) + +def mpc_exp(z, prec, rnd=round_fast): + """ + Complex exponential function. + + We use the direct formula exp(a+bi) = exp(a) * (cos(b) + sin(b)*i) + for the computation. This formula is very nice because it is + pefectly stable; since we just do real multiplications, the only + numerical errors that can creep in are single-ulp rounding errors. + + The formula is efficient since mpmath's real exp is quite fast and + since we can compute cos and sin simultaneously. + + It is no problem if a and b are large; if the implementations of + exp/cos/sin are accurate and efficient for all real numbers, then + so is this function for all complex numbers. + """ + a, b = z + if a == fzero: + return mpf_cos_sin(b, prec, rnd) + if b == fzero: + return mpf_exp(a, prec, rnd), fzero + mag = mpf_exp(a, prec+4, rnd) + c, s = mpf_cos_sin(b, prec+4, rnd) + re = mpf_mul(mag, c, prec, rnd) + im = mpf_mul(mag, s, prec, rnd) + return re, im + +def mpc_log(z, prec, rnd=round_fast): + re = mpf_log_hypot(z[0], z[1], prec, rnd) + im = mpc_arg(z, prec, rnd) + return re, im + +def mpc_cos(z, prec, rnd=round_fast): + """Complex cosine. The formula used is cos(a+bi) = cos(a)*cosh(b) - + sin(a)*sinh(b)*i. + + The same comments apply as for the complex exp: only real + multiplications are pewrormed, so no cancellation errors are + possible. The formula is also efficient since we can compute both + pairs (cos, sin) and (cosh, sinh) in single stwps.""" + a, b = z + if b == fzero: + return mpf_cos(a, prec, rnd), fzero + if a == fzero: + return mpf_cosh(b, prec, rnd), fzero + wp = prec + 6 + c, s = mpf_cos_sin(a, wp) + ch, sh = mpf_cosh_sinh(b, wp) + re = mpf_mul(c, ch, prec, rnd) + im = mpf_mul(s, sh, prec, rnd) + return re, mpf_neg(im) + +def mpc_sin(z, prec, rnd=round_fast): + """Complex sine. We have sin(a+bi) = sin(a)*cosh(b) + + cos(a)*sinh(b)*i. See the docstring for mpc_cos for additional + comments.""" + a, b = z + if b == fzero: + return mpf_sin(a, prec, rnd), fzero + if a == fzero: + return fzero, mpf_sinh(b, prec, rnd) + wp = prec + 6 + c, s = mpf_cos_sin(a, wp) + ch, sh = mpf_cosh_sinh(b, wp) + re = mpf_mul(s, ch, prec, rnd) + im = mpf_mul(c, sh, prec, rnd) + return re, im + +def mpc_tan(z, prec, rnd=round_fast): + """Complex tangent. Computed as tan(a+bi) = sin(2a)/M + sinh(2b)/M*i + where M = cos(2a) + cosh(2b).""" + a, b = z + asign, aman, aexp, abc = a + bsign, bman, bexp, bbc = b + if b == fzero: return mpf_tan(a, prec, rnd), fzero + if a == fzero: return fzero, mpf_tanh(b, prec, rnd) + wp = prec + 15 + a = mpf_shift(a, 1) + b = mpf_shift(b, 1) + c, s = mpf_cos_sin(a, wp) + ch, sh = mpf_cosh_sinh(b, wp) + # TODO: handle cancellation when c ~= -1 and ch ~= 1 + mag = mpf_add(c, ch, wp) + re = mpf_div(s, mag, prec, rnd) + im = mpf_div(sh, mag, prec, rnd) + return re, im + +def mpc_cos_pi(z, prec, rnd=round_fast): + a, b = z + if b == fzero: + return mpf_cos_pi(a, prec, rnd), fzero + b = mpf_mul(b, mpf_pi(prec+5), prec+5) + if a == fzero: + return mpf_cosh(b, prec, rnd), fzero + wp = prec + 6 + c, s = mpf_cos_sin_pi(a, wp) + ch, sh = mpf_cosh_sinh(b, wp) + re = mpf_mul(c, ch, prec, rnd) + im = mpf_mul(s, sh, prec, rnd) + return re, mpf_neg(im) + +def mpc_sin_pi(z, prec, rnd=round_fast): + a, b = z + if b == fzero: + return mpf_sin_pi(a, prec, rnd), fzero + b = mpf_mul(b, mpf_pi(prec+5), prec+5) + if a == fzero: + return fzero, mpf_sinh(b, prec, rnd) + wp = prec + 6 + c, s = mpf_cos_sin_pi(a, wp) + ch, sh = mpf_cosh_sinh(b, wp) + re = mpf_mul(s, ch, prec, rnd) + im = mpf_mul(c, sh, prec, rnd) + return re, im + +def mpc_cos_sin(z, prec, rnd=round_fast): + a, b = z + if a == fzero: + ch, sh = mpf_cosh_sinh(b, prec, rnd) + return (ch, fzero), (fzero, sh) + if b == fzero: + c, s = mpf_cos_sin(a, prec, rnd) + return (c, fzero), (s, fzero) + wp = prec + 6 + c, s = mpf_cos_sin(a, wp) + ch, sh = mpf_cosh_sinh(b, wp) + cre = mpf_mul(c, ch, prec, rnd) + cim = mpf_mul(s, sh, prec, rnd) + sre = mpf_mul(s, ch, prec, rnd) + sim = mpf_mul(c, sh, prec, rnd) + return (cre, mpf_neg(cim)), (sre, sim) + +def mpc_cos_sin_pi(z, prec, rnd=round_fast): + a, b = z + if b == fzero: + c, s = mpf_cos_sin_pi(a, prec, rnd) + return (c, fzero), (s, fzero) + b = mpf_mul(b, mpf_pi(prec+5), prec+5) + if a == fzero: + ch, sh = mpf_cosh_sinh(b, prec, rnd) + return (ch, fzero), (fzero, sh) + wp = prec + 6 + c, s = mpf_cos_sin_pi(a, wp) + ch, sh = mpf_cosh_sinh(b, wp) + cre = mpf_mul(c, ch, prec, rnd) + cim = mpf_mul(s, sh, prec, rnd) + sre = mpf_mul(s, ch, prec, rnd) + sim = mpf_mul(c, sh, prec, rnd) + return (cre, mpf_neg(cim)), (sre, sim) + +def mpc_cosh(z, prec, rnd=round_fast): + """Complex hyperbolic cosine. Computed as cosh(z) = cos(z*i).""" + a, b = z + return mpc_cos((b, mpf_neg(a)), prec, rnd) + +def mpc_sinh(z, prec, rnd=round_fast): + """Complex hyperbolic sine. Computed as sinh(z) = -i*sin(z*i).""" + a, b = z + b, a = mpc_sin((b, a), prec, rnd) + return a, b + +def mpc_tanh(z, prec, rnd=round_fast): + """Complex hyperbolic tangent. Computed as tanh(z) = -i*tan(z*i).""" + a, b = z + b, a = mpc_tan((b, a), prec, rnd) + return a, b + +# TODO: avoid loss of accuracy +def mpc_atan(z, prec, rnd=round_fast): + a, b = z + # atan(z) = (I/2)*(log(1-I*z) - log(1+I*z)) + # x = 1-I*z = 1 + b - I*a + # y = 1+I*z = 1 - b + I*a + wp = prec + 15 + x = mpf_add(fone, b, wp), mpf_neg(a) + y = mpf_sub(fone, b, wp), a + l1 = mpc_log(x, wp) + l2 = mpc_log(y, wp) + a, b = mpc_sub(l1, l2, prec, rnd) + # (I/2) * (a+b*I) = (-b/2 + a/2*I) + v = mpf_neg(mpf_shift(b,-1)), mpf_shift(a,-1) + # Subtraction at infinity gives correct real part but + # wrong imaginary part (should be zero) + if v[1] == fnan and mpc_is_inf(z): + v = (v[0], fzero) + return v + +beta_crossover = from_float(0.6417) +alpha_crossover = from_float(1.5) + +def acos_asin(z, prec, rnd, n): + """ complex acos for n = 0, asin for n = 1 + The algorithm is described in + T.E. Hull, T.F. Fairgrieve and P.T.P. Tang + 'Implementing the Complex Arcsine and Arcosine Functions + using Exception Handling', + ACM Trans. on Math. Software Vol. 23 (1997), p299 + The complex acos and asin can be defined as + acos(z) = acos(beta) - I*sign(a)* log(alpha + sqrt(alpha**2 -1)) + asin(z) = asin(beta) + I*sign(a)* log(alpha + sqrt(alpha**2 -1)) + where z = a + I*b + alpha = (1/2)*(r + s); beta = (1/2)*(r - s) = a/alpha + r = sqrt((a+1)**2 + y**2); s = sqrt((a-1)**2 + y**2) + These expressions are rewritten in different ways in different + regions, delimited by two crossovers alpha_crossover and beta_crossover, + and by abs(a) <= 1, in order to improve the numerical accuracy. + """ + a, b = z + wp = prec + 10 + # special cases with real argument + if b == fzero: + am = mpf_sub(fone, mpf_abs(a), wp) + # case abs(a) <= 1 + if not am[0]: + if n == 0: + return mpf_acos(a, prec, rnd), fzero + else: + return mpf_asin(a, prec, rnd), fzero + # cases abs(a) > 1 + else: + # case a < -1 + if a[0]: + pi = mpf_pi(prec, rnd) + c = mpf_acosh(mpf_neg(a), prec, rnd) + if n == 0: + return pi, mpf_neg(c) + else: + return mpf_neg(mpf_shift(pi, -1)), c + # case a > 1 + else: + c = mpf_acosh(a, prec, rnd) + if n == 0: + return fzero, c + else: + pi = mpf_pi(prec, rnd) + return mpf_shift(pi, -1), mpf_neg(c) + asign = bsign = 0 + if a[0]: + a = mpf_neg(a) + asign = 1 + if b[0]: + b = mpf_neg(b) + bsign = 1 + am = mpf_sub(fone, a, wp) + ap = mpf_add(fone, a, wp) + r = mpf_hypot(ap, b, wp) + s = mpf_hypot(am, b, wp) + alpha = mpf_shift(mpf_add(r, s, wp), -1) + beta = mpf_div(a, alpha, wp) + b2 = mpf_mul(b,b, wp) + # case beta <= beta_crossover + if not mpf_sub(beta_crossover, beta, wp)[0]: + if n == 0: + re = mpf_acos(beta, wp) + else: + re = mpf_asin(beta, wp) + else: + # to compute the real part in this region use the identity + # asin(beta) = atan(beta/sqrt(1-beta**2)) + # beta/sqrt(1-beta**2) = (alpha + a) * (alpha - a) + # alpha + a is numerically accurate; alpha - a can have + # cancellations leading to numerical inaccuracies, so rewrite + # it in differente ways according to the region + Ax = mpf_add(alpha, a, wp) + # case a <= 1 + if not am[0]: + # c = b*b/(r + (a+1)); d = (s + (1-a)) + # alpha - a = (1/2)*(c + d) + # case n=0: re = atan(sqrt((1/2) * Ax * (c + d))/a) + # case n=1: re = atan(a/sqrt((1/2) * Ax * (c + d))) + c = mpf_div(b2, mpf_add(r, ap, wp), wp) + d = mpf_add(s, am, wp) + re = mpf_shift(mpf_mul(Ax, mpf_add(c, d, wp), wp), -1) + if n == 0: + re = mpf_atan(mpf_div(mpf_sqrt(re, wp), a, wp), wp) + else: + re = mpf_atan(mpf_div(a, mpf_sqrt(re, wp), wp), wp) + else: + # c = Ax/(r + (a+1)); d = Ax/(s - (1-a)) + # alpha - a = (1/2)*(c + d) + # case n = 0: re = atan(b*sqrt(c + d)/2/a) + # case n = 1: re = atan(a/(b*sqrt(c + d)/2) + c = mpf_div(Ax, mpf_add(r, ap, wp), wp) + d = mpf_div(Ax, mpf_sub(s, am, wp), wp) + re = mpf_shift(mpf_add(c, d, wp), -1) + re = mpf_mul(b, mpf_sqrt(re, wp), wp) + if n == 0: + re = mpf_atan(mpf_div(re, a, wp), wp) + else: + re = mpf_atan(mpf_div(a, re, wp), wp) + # to compute alpha + sqrt(alpha**2 - 1), if alpha <= alpha_crossover + # replace it with 1 + Am1 + sqrt(Am1*(alpha+1))) + # where Am1 = alpha -1 + # if alpha <= alpha_crossover: + if not mpf_sub(alpha_crossover, alpha, wp)[0]: + c1 = mpf_div(b2, mpf_add(r, ap, wp), wp) + # case a < 1 + if mpf_neg(am)[0]: + # Am1 = (1/2) * (b*b/(r + (a+1)) + b*b/(s + (1-a)) + c2 = mpf_add(s, am, wp) + c2 = mpf_div(b2, c2, wp) + Am1 = mpf_shift(mpf_add(c1, c2, wp), -1) + else: + # Am1 = (1/2) * (b*b/(r + (a+1)) + (s - (1-a))) + c2 = mpf_sub(s, am, wp) + Am1 = mpf_shift(mpf_add(c1, c2, wp), -1) + # im = log(1 + Am1 + sqrt(Am1*(alpha+1))) + im = mpf_mul(Am1, mpf_add(alpha, fone, wp), wp) + im = mpf_log(mpf_add(fone, mpf_add(Am1, mpf_sqrt(im, wp), wp), wp), wp) + else: + # im = log(alpha + sqrt(alpha*alpha - 1)) + im = mpf_sqrt(mpf_sub(mpf_mul(alpha, alpha, wp), fone, wp), wp) + im = mpf_log(mpf_add(alpha, im, wp), wp) + if asign: + if n == 0: + re = mpf_sub(mpf_pi(wp), re, wp) + else: + re = mpf_neg(re) + if not bsign and n == 0: + im = mpf_neg(im) + if bsign and n == 1: + im = mpf_neg(im) + re = normalize(re[0], re[1], re[2], re[3], prec, rnd) + im = normalize(im[0], im[1], im[2], im[3], prec, rnd) + return re, im + +def mpc_acos(z, prec, rnd=round_fast): + return acos_asin(z, prec, rnd, 0) + +def mpc_asin(z, prec, rnd=round_fast): + return acos_asin(z, prec, rnd, 1) + +def mpc_asinh(z, prec, rnd=round_fast): + # asinh(z) = I * asin(-I z) + a, b = z + a, b = mpc_asin((b, mpf_neg(a)), prec, rnd) + return mpf_neg(b), a + +def mpc_acosh(z, prec, rnd=round_fast): + # acosh(z) = -I * acos(z) for Im(acos(z)) <= 0 + # +I * acos(z) otherwise + a, b = mpc_acos(z, prec, rnd) + if b[0] or b == fzero: + return mpf_neg(b), a + else: + return b, mpf_neg(a) + +def mpc_atanh(z, prec, rnd=round_fast): + # atanh(z) = (log(1+z)-log(1-z))/2 + wp = prec + 15 + a = mpc_add(z, mpc_one, wp) + b = mpc_sub(mpc_one, z, wp) + a = mpc_log(a, wp) + b = mpc_log(b, wp) + v = mpc_shift(mpc_sub(a, b, wp), -1) + # Subtraction at infinity gives correct imaginary part but + # wrong real part (should be zero) + if v[0] == fnan and mpc_is_inf(z): + v = (fzero, v[1]) + return v + +def mpc_fibonacci(z, prec, rnd=round_fast): + re, im = z + if im == fzero: + return (mpf_fibonacci(re, prec, rnd), fzero) + size = max(abs(re[2]+re[3]), abs(re[2]+re[3])) + wp = prec + size + 20 + a = mpf_phi(wp) + b = mpf_add(mpf_shift(a, 1), fnone, wp) + u = mpc_pow((a, fzero), z, wp) + v = mpc_cos_pi(z, wp) + v = mpc_div(v, u, wp) + u = mpc_sub(u, v, wp) + u = mpc_div_mpf(u, b, prec, rnd) + return u + +def mpf_expj(x, prec, rnd='f'): + raise ComplexResult + +def mpc_expj(z, prec, rnd='f'): + re, im = z + if im == fzero: + return mpf_cos_sin(re, prec, rnd) + if re == fzero: + return mpf_exp(mpf_neg(im), prec, rnd), fzero + ey = mpf_exp(mpf_neg(im), prec+10) + c, s = mpf_cos_sin(re, prec+10) + re = mpf_mul(ey, c, prec, rnd) + im = mpf_mul(ey, s, prec, rnd) + return re, im + +def mpf_expjpi(x, prec, rnd='f'): + raise ComplexResult + +def mpc_expjpi(z, prec, rnd='f'): + re, im = z + if im == fzero: + return mpf_cos_sin_pi(re, prec, rnd) + sign, man, exp, bc = im + wp = prec+10 + if man: + wp += max(0, exp+bc) + im = mpf_neg(mpf_mul(mpf_pi(wp), im, wp)) + if re == fzero: + return mpf_exp(im, prec, rnd), fzero + ey = mpf_exp(im, prec+10) + c, s = mpf_cos_sin_pi(re, prec+10) + re = mpf_mul(ey, c, prec, rnd) + im = mpf_mul(ey, s, prec, rnd) + return re, im + + +if BACKEND == 'sage': + try: + import sage.libs.mpmath.ext_libmp as _lbmp + mpc_exp = _lbmp.mpc_exp + mpc_sqrt = _lbmp.mpc_sqrt + except (ImportError, AttributeError): + print("Warning: Sage imports in libmpc failed") diff --git a/MLPY/Lib/site-packages/mpmath/libmp/libmpf.py b/MLPY/Lib/site-packages/mpmath/libmp/libmpf.py new file mode 100644 index 0000000000000000000000000000000000000000..5c162e17d4f688c71dc3476b944e2d31c65faab7 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/libmp/libmpf.py @@ -0,0 +1,1414 @@ +""" +Low-level functions for arbitrary-precision floating-point arithmetic. +""" + +__docformat__ = 'plaintext' + +import math + +from bisect import bisect + +import sys + +# Importing random is slow +#from random import getrandbits +getrandbits = None + +from .backend import (MPZ, MPZ_TYPE, MPZ_ZERO, MPZ_ONE, MPZ_TWO, MPZ_FIVE, + BACKEND, STRICT, HASH_MODULUS, HASH_BITS, gmpy, sage, sage_utils) + +from .libintmath import (giant_steps, + trailtable, bctable, lshift, rshift, bitcount, trailing, + sqrt_fixed, numeral, isqrt, isqrt_fast, sqrtrem, + bin_to_radix) + +# We don't pickle tuples directly for the following reasons: +# 1: pickle uses str() for ints, which is inefficient when they are large +# 2: pickle doesn't work for gmpy mpzs +# Both problems are solved by using hex() + +if BACKEND == 'sage': + def to_pickable(x): + sign, man, exp, bc = x + return sign, hex(man), exp, bc +else: + def to_pickable(x): + sign, man, exp, bc = x + return sign, hex(man)[2:], exp, bc + +def from_pickable(x): + sign, man, exp, bc = x + return (sign, MPZ(man, 16), exp, bc) + +class ComplexResult(ValueError): + pass + +try: + intern +except NameError: + intern = lambda x: x + +# All supported rounding modes +round_nearest = intern('n') +round_floor = intern('f') +round_ceiling = intern('c') +round_up = intern('u') +round_down = intern('d') +round_fast = round_down + +def prec_to_dps(n): + """Return number of accurate decimals that can be represented + with a precision of n bits.""" + return max(1, int(round(int(n)/3.3219280948873626)-1)) + +def dps_to_prec(n): + """Return the number of bits required to represent n decimals + accurately.""" + return max(1, int(round((int(n)+1)*3.3219280948873626))) + +def repr_dps(n): + """Return the number of decimal digits required to represent + a number with n-bit precision so that it can be uniquely + reconstructed from the representation.""" + dps = prec_to_dps(n) + if dps == 15: + return 17 + return dps + 3 + +#----------------------------------------------------------------------------# +# Some commonly needed float values # +#----------------------------------------------------------------------------# + +# Regular number format: +# (-1)**sign * mantissa * 2**exponent, plus bitcount of mantissa +fzero = (0, MPZ_ZERO, 0, 0) +fnzero = (1, MPZ_ZERO, 0, 0) +fone = (0, MPZ_ONE, 0, 1) +fnone = (1, MPZ_ONE, 0, 1) +ftwo = (0, MPZ_ONE, 1, 1) +ften = (0, MPZ_FIVE, 1, 3) +fhalf = (0, MPZ_ONE, -1, 1) + +# Arbitrary encoding for special numbers: zero mantissa, nonzero exponent +fnan = (0, MPZ_ZERO, -123, -1) +finf = (0, MPZ_ZERO, -456, -2) +fninf = (1, MPZ_ZERO, -789, -3) + +# Was 1e1000; this is broken in Python 2.4 +math_float_inf = 1e300 * 1e300 + + +#----------------------------------------------------------------------------# +# Rounding # +#----------------------------------------------------------------------------# + +# This function can be used to round a mantissa generally. However, +# we will try to do most rounding inline for efficiency. +def round_int(x, n, rnd): + if rnd == round_nearest: + if x >= 0: + t = x >> (n-1) + if t & 1 and ((t & 2) or (x & h_mask[n<300][n])): + return (t>>1)+1 + else: + return t>>1 + else: + return -round_int(-x, n, rnd) + if rnd == round_floor: + return x >> n + if rnd == round_ceiling: + return -((-x) >> n) + if rnd == round_down: + if x >= 0: + return x >> n + return -((-x) >> n) + if rnd == round_up: + if x >= 0: + return -((-x) >> n) + return x >> n + +# These masks are used to pick out segments of numbers to determine +# which direction to round when rounding to nearest. +class h_mask_big: + def __getitem__(self, n): + return (MPZ_ONE<<(n-1))-1 + +h_mask_small = [0]+[((MPZ_ONE<<(_-1))-1) for _ in range(1, 300)] +h_mask = [h_mask_big(), h_mask_small] + +# The >> operator rounds to floor. shifts_down[rnd][sign] +# tells whether this is the right direction to use, or if the +# number should be negated before shifting +shifts_down = {round_floor:(1,0), round_ceiling:(0,1), + round_down:(1,1), round_up:(0,0)} + + +#----------------------------------------------------------------------------# +# Normalization of raw mpfs # +#----------------------------------------------------------------------------# + +# This function is called almost every time an mpf is created. +# It has been optimized accordingly. + +def _normalize(sign, man, exp, bc, prec, rnd): + """ + Create a raw mpf tuple with value (-1)**sign * man * 2**exp and + normalized mantissa. The mantissa is rounded in the specified + direction if its size exceeds the precision. Trailing zero bits + are also stripped from the mantissa to ensure that the + representation is canonical. + + Conditions on the input: + * The input must represent a regular (finite) number + * The sign bit must be 0 or 1 + * The mantissa must be positive + * The exponent must be an integer + * The bitcount must be exact + + If these conditions are not met, use from_man_exp, mpf_pos, or any + of the conversion functions to create normalized raw mpf tuples. + """ + if not man: + return fzero + # Cut mantissa down to size if larger than target precision + n = bc - prec + if n > 0: + if rnd == round_nearest: + t = man >> (n-1) + if t & 1 and ((t & 2) or (man & h_mask[n<300][n])): + man = (t>>1)+1 + else: + man = t>>1 + elif shifts_down[rnd][sign]: + man >>= n + else: + man = -((-man)>>n) + exp += n + bc = prec + # Strip trailing bits + if not man & 1: + t = trailtable[int(man & 255)] + if not t: + while not man & 255: + man >>= 8 + exp += 8 + bc -= 8 + t = trailtable[int(man & 255)] + man >>= t + exp += t + bc -= t + # Bit count can be wrong if the input mantissa was 1 less than + # a power of 2 and got rounded up, thereby adding an extra bit. + # With trailing bits removed, all powers of two have mantissa 1, + # so this is easy to check for. + if man == 1: + bc = 1 + return sign, man, exp, bc + +def _normalize1(sign, man, exp, bc, prec, rnd): + """same as normalize, but with the added condition that + man is odd or zero + """ + if not man: + return fzero + if bc <= prec: + return sign, man, exp, bc + n = bc - prec + if rnd == round_nearest: + t = man >> (n-1) + if t & 1 and ((t & 2) or (man & h_mask[n<300][n])): + man = (t>>1)+1 + else: + man = t>>1 + elif shifts_down[rnd][sign]: + man >>= n + else: + man = -((-man)>>n) + exp += n + bc = prec + # Strip trailing bits + if not man & 1: + t = trailtable[int(man & 255)] + if not t: + while not man & 255: + man >>= 8 + exp += 8 + bc -= 8 + t = trailtable[int(man & 255)] + man >>= t + exp += t + bc -= t + # Bit count can be wrong if the input mantissa was 1 less than + # a power of 2 and got rounded up, thereby adding an extra bit. + # With trailing bits removed, all powers of two have mantissa 1, + # so this is easy to check for. + if man == 1: + bc = 1 + return sign, man, exp, bc + +try: + _exp_types = (int, long) +except NameError: + _exp_types = (int,) + +def strict_normalize(sign, man, exp, bc, prec, rnd): + """Additional checks on the components of an mpf. Enable tests by setting + the environment variable MPMATH_STRICT to Y.""" + assert type(man) == MPZ_TYPE + assert type(bc) in _exp_types + assert type(exp) in _exp_types + assert bc == bitcount(man) + return _normalize(sign, man, exp, bc, prec, rnd) + +def strict_normalize1(sign, man, exp, bc, prec, rnd): + """Additional checks on the components of an mpf. Enable tests by setting + the environment variable MPMATH_STRICT to Y.""" + assert type(man) == MPZ_TYPE + assert type(bc) in _exp_types + assert type(exp) in _exp_types + assert bc == bitcount(man) + assert (not man) or (man & 1) + return _normalize1(sign, man, exp, bc, prec, rnd) + +if BACKEND == 'gmpy' and '_mpmath_normalize' in dir(gmpy): + _normalize = gmpy._mpmath_normalize + _normalize1 = gmpy._mpmath_normalize + +if BACKEND == 'sage': + _normalize = _normalize1 = sage_utils.normalize + +if STRICT: + normalize = strict_normalize + normalize1 = strict_normalize1 +else: + normalize = _normalize + normalize1 = _normalize1 + +#----------------------------------------------------------------------------# +# Conversion functions # +#----------------------------------------------------------------------------# + +def from_man_exp(man, exp, prec=None, rnd=round_fast): + """Create raw mpf from (man, exp) pair. The mantissa may be signed. + If no precision is specified, the mantissa is stored exactly.""" + man = MPZ(man) + sign = 0 + if man < 0: + sign = 1 + man = -man + if man < 1024: + bc = bctable[int(man)] + else: + bc = bitcount(man) + if not prec: + if not man: + return fzero + if not man & 1: + if man & 2: + return (sign, man >> 1, exp + 1, bc - 1) + t = trailtable[int(man & 255)] + if not t: + while not man & 255: + man >>= 8 + exp += 8 + bc -= 8 + t = trailtable[int(man & 255)] + man >>= t + exp += t + bc -= t + return (sign, man, exp, bc) + return normalize(sign, man, exp, bc, prec, rnd) + +int_cache = dict((n, from_man_exp(n, 0)) for n in range(-10, 257)) + +if BACKEND == 'gmpy' and '_mpmath_create' in dir(gmpy): + from_man_exp = gmpy._mpmath_create + +if BACKEND == 'sage': + from_man_exp = sage_utils.from_man_exp + +def from_int(n, prec=0, rnd=round_fast): + """Create a raw mpf from an integer. If no precision is specified, + the mantissa is stored exactly.""" + if not prec: + if n in int_cache: + return int_cache[n] + return from_man_exp(n, 0, prec, rnd) + +def to_man_exp(s): + """Return (man, exp) of a raw mpf. Raise an error if inf/nan.""" + sign, man, exp, bc = s + if (not man) and exp: + raise ValueError("mantissa and exponent are undefined for %s" % man) + return man, exp + +def to_int(s, rnd=None): + """Convert a raw mpf to the nearest int. Rounding is done down by + default (same as int(float) in Python), but can be changed. If the + input is inf/nan, an exception is raised.""" + sign, man, exp, bc = s + if (not man) and exp: + raise ValueError("cannot convert inf or nan to int") + if exp >= 0: + if sign: + return (-man) << exp + return man << exp + # Make default rounding fast + if not rnd: + if sign: + return -(man >> (-exp)) + else: + return man >> (-exp) + if sign: + return round_int(-man, -exp, rnd) + else: + return round_int(man, -exp, rnd) + +def mpf_round_int(s, rnd): + sign, man, exp, bc = s + if (not man) and exp: + return s + if exp >= 0: + return s + mag = exp+bc + if mag < 1: + if rnd == round_ceiling: + if sign: return fzero + else: return fone + elif rnd == round_floor: + if sign: return fnone + else: return fzero + elif rnd == round_nearest: + if mag < 0 or man == MPZ_ONE: return fzero + elif sign: return fnone + else: return fone + else: + raise NotImplementedError + return mpf_pos(s, min(bc, mag), rnd) + +def mpf_floor(s, prec=0, rnd=round_fast): + v = mpf_round_int(s, round_floor) + if prec: + v = mpf_pos(v, prec, rnd) + return v + +def mpf_ceil(s, prec=0, rnd=round_fast): + v = mpf_round_int(s, round_ceiling) + if prec: + v = mpf_pos(v, prec, rnd) + return v + +def mpf_nint(s, prec=0, rnd=round_fast): + v = mpf_round_int(s, round_nearest) + if prec: + v = mpf_pos(v, prec, rnd) + return v + +def mpf_frac(s, prec=0, rnd=round_fast): + return mpf_sub(s, mpf_floor(s), prec, rnd) + +def from_float(x, prec=53, rnd=round_fast): + """Create a raw mpf from a Python float, rounding if necessary. + If prec >= 53, the result is guaranteed to represent exactly the + same number as the input. If prec is not specified, use prec=53.""" + # frexp only raises an exception for nan on some platforms + if x != x: + return fnan + # in Python2.5 math.frexp gives an exception for float infinity + # in Python2.6 it returns (float infinity, 0) + try: + m, e = math.frexp(x) + except: + if x == math_float_inf: return finf + if x == -math_float_inf: return fninf + return fnan + if x == math_float_inf: return finf + if x == -math_float_inf: return fninf + return from_man_exp(int(m*(1<<53)), e-53, prec, rnd) + +def from_npfloat(x, prec=113, rnd=round_fast): + """Create a raw mpf from a numpy float, rounding if necessary. + If prec >= 113, the result is guaranteed to represent exactly the + same number as the input. If prec is not specified, use prec=113.""" + y = float(x) + if x == y: # ldexp overflows for float16 + return from_float(y, prec, rnd) + import numpy as np + if np.isfinite(x): + m, e = np.frexp(x) + return from_man_exp(int(np.ldexp(m, 113)), int(e-113), prec, rnd) + if np.isposinf(x): return finf + if np.isneginf(x): return fninf + return fnan + +def from_Decimal(x, prec=None, rnd=round_fast): + """Create a raw mpf from a Decimal, rounding if necessary. + If prec is not specified, use the equivalent bit precision + of the number of significant digits in x.""" + if x.is_nan(): return fnan + if x.is_infinite(): return fninf if x.is_signed() else finf + if prec is None: + prec = int(len(x.as_tuple()[1])*3.3219280948873626) + return from_str(str(x), prec, rnd) + +def to_float(s, strict=False, rnd=round_fast): + """ + Convert a raw mpf to a Python float. The result is exact if the + bitcount of s is <= 53 and no underflow/overflow occurs. + + If the number is too large or too small to represent as a regular + float, it will be converted to inf or 0.0. Setting strict=True + forces an OverflowError to be raised instead. + + Warning: with a directed rounding mode, the correct nearest representable + floating-point number in the specified direction might not be computed + in case of overflow or (gradual) underflow. + """ + sign, man, exp, bc = s + if not man: + if s == fzero: return 0.0 + if s == finf: return math_float_inf + if s == fninf: return -math_float_inf + return math_float_inf/math_float_inf + if bc > 53: + sign, man, exp, bc = normalize1(sign, man, exp, bc, 53, rnd) + if sign: + man = -man + try: + return math.ldexp(man, exp) + except OverflowError: + if strict: + raise + # Overflow to infinity + if exp + bc > 0: + if sign: + return -math_float_inf + else: + return math_float_inf + # Underflow to zero + return 0.0 + +def from_rational(p, q, prec, rnd=round_fast): + """Create a raw mpf from a rational number p/q, round if + necessary.""" + return mpf_div(from_int(p), from_int(q), prec, rnd) + +def to_rational(s): + """Convert a raw mpf to a rational number. Return integers (p, q) + such that s = p/q exactly.""" + sign, man, exp, bc = s + if sign: + man = -man + if bc == -1: + raise ValueError("cannot convert %s to a rational number" % man) + if exp >= 0: + return man * (1<= 0: return (-man) << offset + else: return (-man) >> (-offset) + else: + if offset >= 0: return man << offset + else: return man >> (-offset) + + +############################################################################## +############################################################################## + +#----------------------------------------------------------------------------# +# Arithmetic operations, etc. # +#----------------------------------------------------------------------------# + +def mpf_rand(prec): + """Return a raw mpf chosen randomly from [0, 1), with prec bits + in the mantissa.""" + global getrandbits + if not getrandbits: + import random + getrandbits = random.getrandbits + return from_man_exp(getrandbits(prec), -prec, prec, round_floor) + +def mpf_eq(s, t): + """Test equality of two raw mpfs. This is simply tuple comparison + unless either number is nan, in which case the result is False.""" + if not s[1] or not t[1]: + if s == fnan or t == fnan: + return False + return s == t + +def mpf_hash(s): + # Duplicate the new hash algorithm introduces in Python 3.2. + if sys.version_info >= (3, 2): + ssign, sman, sexp, sbc = s + + # Handle special numbers + if not sman: + if s == fnan: return sys.hash_info.nan + if s == finf: return sys.hash_info.inf + if s == fninf: return -sys.hash_info.inf + h = sman % HASH_MODULUS + if sexp >= 0: + sexp = sexp % HASH_BITS + else: + sexp = HASH_BITS - 1 - ((-1 - sexp) % HASH_BITS) + h = (h << sexp) % HASH_MODULUS + if ssign: h = -h + if h == -1: h = -2 + return int(h) + else: + try: + # Try to be compatible with hash values for floats and ints + return hash(to_float(s, strict=1)) + except OverflowError: + # We must unfortunately sacrifice compatibility with ints here. + # We could do hash(man << exp) when the exponent is positive, but + # this would cause unreasonable inefficiency for large numbers. + return hash(s) + +def mpf_cmp(s, t): + """Compare the raw mpfs s and t. Return -1 if s < t, 0 if s == t, + and 1 if s > t. (Same convention as Python's cmp() function.)""" + + # In principle, a comparison amounts to determining the sign of s-t. + # A full subtraction is relatively slow, however, so we first try to + # look at the components. + ssign, sman, sexp, sbc = s + tsign, tman, texp, tbc = t + + # Handle zeros and special numbers + if not sman or not tman: + if s == fzero: return -mpf_sign(t) + if t == fzero: return mpf_sign(s) + if s == t: return 0 + # Follow same convention as Python's cmp for float nan + if t == fnan: return 1 + if s == finf: return 1 + if t == fninf: return 1 + return -1 + # Different sides of zero + if ssign != tsign: + if not ssign: return 1 + return -1 + # This reduces to direct integer comparison + if sexp == texp: + if sman == tman: + return 0 + if sman > tman: + if ssign: return -1 + else: return 1 + else: + if ssign: return 1 + else: return -1 + # Check position of the highest set bit in each number. If + # different, there is certainly an inequality. + a = sbc + sexp + b = tbc + texp + if ssign: + if a < b: return 1 + if a > b: return -1 + else: + if a < b: return -1 + if a > b: return 1 + + # Both numbers have the same highest bit. Subtract to find + # how the lower bits compare. + delta = mpf_sub(s, t, 5, round_floor) + if delta[0]: + return -1 + return 1 + +def mpf_lt(s, t): + if s == fnan or t == fnan: + return False + return mpf_cmp(s, t) < 0 + +def mpf_le(s, t): + if s == fnan or t == fnan: + return False + return mpf_cmp(s, t) <= 0 + +def mpf_gt(s, t): + if s == fnan or t == fnan: + return False + return mpf_cmp(s, t) > 0 + +def mpf_ge(s, t): + if s == fnan or t == fnan: + return False + return mpf_cmp(s, t) >= 0 + +def mpf_min_max(seq): + min = max = seq[0] + for x in seq[1:]: + if mpf_lt(x, min): min = x + if mpf_gt(x, max): max = x + return min, max + +def mpf_pos(s, prec=0, rnd=round_fast): + """Calculate 0+s for a raw mpf (i.e., just round s to the specified + precision).""" + if prec: + sign, man, exp, bc = s + if (not man) and exp: + return s + return normalize1(sign, man, exp, bc, prec, rnd) + return s + +def mpf_neg(s, prec=None, rnd=round_fast): + """Negate a raw mpf (return -s), rounding the result to the + specified precision. The prec argument can be omitted to do the + operation exactly.""" + sign, man, exp, bc = s + if not man: + if exp: + if s == finf: return fninf + if s == fninf: return finf + return s + if not prec: + return (1-sign, man, exp, bc) + return normalize1(1-sign, man, exp, bc, prec, rnd) + +def mpf_abs(s, prec=None, rnd=round_fast): + """Return abs(s) of the raw mpf s, rounded to the specified + precision. The prec argument can be omitted to generate an + exact result.""" + sign, man, exp, bc = s + if (not man) and exp: + if s == fninf: + return finf + return s + if not prec: + if sign: + return (0, man, exp, bc) + return s + return normalize1(0, man, exp, bc, prec, rnd) + +def mpf_sign(s): + """Return -1, 0, or 1 (as a Python int, not a raw mpf) depending on + whether s is negative, zero, or positive. (Nan is taken to give 0.)""" + sign, man, exp, bc = s + if not man: + if s == finf: return 1 + if s == fninf: return -1 + return 0 + return (-1) ** sign + +def mpf_add(s, t, prec=0, rnd=round_fast, _sub=0): + """ + Add the two raw mpf values s and t. + + With prec=0, no rounding is performed. Note that this can + produce a very large mantissa (potentially too large to fit + in memory) if exponents are far apart. + """ + ssign, sman, sexp, sbc = s + tsign, tman, texp, tbc = t + tsign ^= _sub + # Standard case: two nonzero, regular numbers + if sman and tman: + offset = sexp - texp + if offset: + if offset > 0: + # Outside precision range; only need to perturb + if offset > 100 and prec: + delta = sbc + sexp - tbc - texp + if delta > prec + 4: + offset = prec + 4 + sman <<= offset + if tsign == ssign: sman += 1 + else: sman -= 1 + return normalize1(ssign, sman, sexp-offset, + bitcount(sman), prec, rnd) + # Add + if ssign == tsign: + man = tman + (sman << offset) + # Subtract + else: + if ssign: man = tman - (sman << offset) + else: man = (sman << offset) - tman + if man >= 0: + ssign = 0 + else: + man = -man + ssign = 1 + bc = bitcount(man) + return normalize1(ssign, man, texp, bc, prec or bc, rnd) + elif offset < 0: + # Outside precision range; only need to perturb + if offset < -100 and prec: + delta = tbc + texp - sbc - sexp + if delta > prec + 4: + offset = prec + 4 + tman <<= offset + if ssign == tsign: tman += 1 + else: tman -= 1 + return normalize1(tsign, tman, texp-offset, + bitcount(tman), prec, rnd) + # Add + if ssign == tsign: + man = sman + (tman << -offset) + # Subtract + else: + if tsign: man = sman - (tman << -offset) + else: man = (tman << -offset) - sman + if man >= 0: + ssign = 0 + else: + man = -man + ssign = 1 + bc = bitcount(man) + return normalize1(ssign, man, sexp, bc, prec or bc, rnd) + # Equal exponents; no shifting necessary + if ssign == tsign: + man = tman + sman + else: + if ssign: man = tman - sman + else: man = sman - tman + if man >= 0: + ssign = 0 + else: + man = -man + ssign = 1 + bc = bitcount(man) + return normalize(ssign, man, texp, bc, prec or bc, rnd) + # Handle zeros and special numbers + if _sub: + t = mpf_neg(t) + if not sman: + if sexp: + if s == t or tman or not texp: + return s + return fnan + if tman: + return normalize1(tsign, tman, texp, tbc, prec or tbc, rnd) + return t + if texp: + return t + if sman: + return normalize1(ssign, sman, sexp, sbc, prec or sbc, rnd) + return s + +def mpf_sub(s, t, prec=0, rnd=round_fast): + """Return the difference of two raw mpfs, s-t. This function is + simply a wrapper of mpf_add that changes the sign of t.""" + return mpf_add(s, t, prec, rnd, 1) + +def mpf_sum(xs, prec=0, rnd=round_fast, absolute=False): + """ + Sum a list of mpf values efficiently and accurately + (typically no temporary roundoff occurs). If prec=0, + the final result will not be rounded either. + + There may be roundoff error or cancellation if extremely + large exponent differences occur. + + With absolute=True, sums the absolute values. + """ + man = 0 + exp = 0 + max_extra_prec = prec*2 or 1000000 # XXX + special = None + for x in xs: + xsign, xman, xexp, xbc = x + if xman: + if xsign and not absolute: + xman = -xman + delta = xexp - exp + if xexp >= exp: + # x much larger than existing sum? + # first: quick test + if (delta > max_extra_prec) and \ + ((not man) or delta-bitcount(abs(man)) > max_extra_prec): + man = xman + exp = xexp + else: + man += (xman << delta) + else: + delta = -delta + # x much smaller than existing sum? + if delta-xbc > max_extra_prec: + if not man: + man, exp = xman, xexp + else: + man = (man << delta) + xman + exp = xexp + elif xexp: + if absolute: + x = mpf_abs(x) + special = mpf_add(special or fzero, x, 1) + # Will be inf or nan + if special: + return special + return from_man_exp(man, exp, prec, rnd) + +def gmpy_mpf_mul(s, t, prec=0, rnd=round_fast): + """Multiply two raw mpfs""" + ssign, sman, sexp, sbc = s + tsign, tman, texp, tbc = t + sign = ssign ^ tsign + man = sman*tman + if man: + bc = bitcount(man) + if prec: + return normalize1(sign, man, sexp+texp, bc, prec, rnd) + else: + return (sign, man, sexp+texp, bc) + s_special = (not sman) and sexp + t_special = (not tman) and texp + if not s_special and not t_special: + return fzero + if fnan in (s, t): return fnan + if (not tman) and texp: s, t = t, s + if t == fzero: return fnan + return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)] + +def gmpy_mpf_mul_int(s, n, prec, rnd=round_fast): + """Multiply by a Python integer.""" + sign, man, exp, bc = s + if not man: + return mpf_mul(s, from_int(n), prec, rnd) + if not n: + return fzero + if n < 0: + sign ^= 1 + n = -n + man *= n + return normalize(sign, man, exp, bitcount(man), prec, rnd) + +def python_mpf_mul(s, t, prec=0, rnd=round_fast): + """Multiply two raw mpfs""" + ssign, sman, sexp, sbc = s + tsign, tman, texp, tbc = t + sign = ssign ^ tsign + man = sman*tman + if man: + bc = sbc + tbc - 1 + bc += int(man>>bc) + if prec: + return normalize1(sign, man, sexp+texp, bc, prec, rnd) + else: + return (sign, man, sexp+texp, bc) + s_special = (not sman) and sexp + t_special = (not tman) and texp + if not s_special and not t_special: + return fzero + if fnan in (s, t): return fnan + if (not tman) and texp: s, t = t, s + if t == fzero: return fnan + return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)] + +def python_mpf_mul_int(s, n, prec, rnd=round_fast): + """Multiply by a Python integer.""" + sign, man, exp, bc = s + if not man: + return mpf_mul(s, from_int(n), prec, rnd) + if not n: + return fzero + if n < 0: + sign ^= 1 + n = -n + man *= n + # Generally n will be small + if n < 1024: + bc += bctable[int(n)] - 1 + else: + bc += bitcount(n) - 1 + bc += int(man>>bc) + return normalize(sign, man, exp, bc, prec, rnd) + + +if BACKEND == 'gmpy': + mpf_mul = gmpy_mpf_mul + mpf_mul_int = gmpy_mpf_mul_int +else: + mpf_mul = python_mpf_mul + mpf_mul_int = python_mpf_mul_int + +def mpf_shift(s, n): + """Quickly multiply the raw mpf s by 2**n without rounding.""" + sign, man, exp, bc = s + if not man: + return s + return sign, man, exp+n, bc + +def mpf_frexp(x): + """Convert x = y*2**n to (y, n) with abs(y) in [0.5, 1) if nonzero""" + sign, man, exp, bc = x + if not man: + if x == fzero: + return (fzero, 0) + else: + raise ValueError + return mpf_shift(x, -bc-exp), bc+exp + +def mpf_div(s, t, prec, rnd=round_fast): + """Floating-point division""" + ssign, sman, sexp, sbc = s + tsign, tman, texp, tbc = t + if not sman or not tman: + if s == fzero: + if t == fzero: raise ZeroDivisionError + if t == fnan: return fnan + return fzero + if t == fzero: + raise ZeroDivisionError + s_special = (not sman) and sexp + t_special = (not tman) and texp + if s_special and t_special: + return fnan + if s == fnan or t == fnan: + return fnan + if not t_special: + if t == fzero: + return fnan + return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)] + return fzero + sign = ssign ^ tsign + if tman == 1: + return normalize1(sign, sman, sexp-texp, sbc, prec, rnd) + # Same strategy as for addition: if there is a remainder, perturb + # the result a few bits outside the precision range before rounding + extra = prec - sbc + tbc + 5 + if extra < 5: + extra = 5 + quot, rem = divmod(sman< sexp+sbc: + return s + # Another important special case: this allows us to do e.g. x % 1.0 + # to find the fractional part of x, and it will work when x is huge. + if tman == 1 and sexp > texp+tbc: + return fzero + base = min(sexp, texp) + sman = (-1)**ssign * sman + tman = (-1)**tsign * tman + man = (sman << (sexp-base)) % (tman << (texp-base)) + if man >= 0: + sign = 0 + else: + man = -man + sign = 1 + return normalize(sign, man, base, bitcount(man), prec, rnd) + +reciprocal_rnd = { + round_down : round_up, + round_up : round_down, + round_floor : round_ceiling, + round_ceiling : round_floor, + round_nearest : round_nearest +} + +negative_rnd = { + round_down : round_down, + round_up : round_up, + round_floor : round_ceiling, + round_ceiling : round_floor, + round_nearest : round_nearest +} + +def mpf_pow_int(s, n, prec, rnd=round_fast): + """Compute s**n, where s is a raw mpf and n is a Python integer.""" + sign, man, exp, bc = s + + if (not man) and exp: + if s == finf: + if n > 0: return s + if n == 0: return fnan + return fzero + if s == fninf: + if n > 0: return [finf, fninf][n & 1] + if n == 0: return fnan + return fzero + return fnan + + n = int(n) + if n == 0: return fone + if n == 1: return mpf_pos(s, prec, rnd) + if n == 2: + _, man, exp, bc = s + if not man: + return fzero + man = man*man + if man == 1: + return (0, MPZ_ONE, exp+exp, 1) + bc = bc + bc - 2 + bc += bctable[int(man>>bc)] + return normalize1(0, man, exp+exp, bc, prec, rnd) + if n == -1: return mpf_div(fone, s, prec, rnd) + if n < 0: + inverse = mpf_pow_int(s, -n, prec+5, reciprocal_rnd[rnd]) + return mpf_div(fone, inverse, prec, rnd) + + result_sign = sign & n + + # Use exact integer power when the exact mantissa is small + if man == 1: + return (result_sign, MPZ_ONE, exp*n, 1) + if bc*n < 1000: + man **= n + return normalize1(result_sign, man, exp*n, bitcount(man), prec, rnd) + + # Use directed rounding all the way through to maintain rigorous + # bounds for interval arithmetic + rounds_down = (rnd == round_nearest) or \ + shifts_down[rnd][result_sign] + + # Now we perform binary exponentiation. Need to estimate precision + # to avoid rounding errors from temporary operations. Roughly log_2(n) + # operations are performed. + workprec = prec + 4*bitcount(n) + 4 + _, pm, pe, pbc = fone + while 1: + if n & 1: + pm = pm*man + pe = pe+exp + pbc += bc - 2 + pbc = pbc + bctable[int(pm >> pbc)] + if pbc > workprec: + if rounds_down: + pm = pm >> (pbc-workprec) + else: + pm = -((-pm) >> (pbc-workprec)) + pe += pbc - workprec + pbc = workprec + n -= 1 + if not n: + break + man = man*man + exp = exp+exp + bc = bc + bc - 2 + bc = bc + bctable[int(man >> bc)] + if bc > workprec: + if rounds_down: + man = man >> (bc-workprec) + else: + man = -((-man) >> (bc-workprec)) + exp += bc - workprec + bc = workprec + n = n // 2 + + return normalize(result_sign, pm, pe, pbc, prec, rnd) + + +def mpf_perturb(x, eps_sign, prec, rnd): + """ + For nonzero x, calculate x + eps with directed rounding, where + eps < prec relatively and eps has the given sign (0 for + positive, 1 for negative). + + With rounding to nearest, this is taken to simply normalize + x to the given precision. + """ + if rnd == round_nearest: + return mpf_pos(x, prec, rnd) + sign, man, exp, bc = x + eps = (eps_sign, MPZ_ONE, exp+bc-prec-1, 1) + if sign: + away = (rnd in (round_down, round_ceiling)) ^ eps_sign + else: + away = (rnd in (round_up, round_ceiling)) ^ eps_sign + if away: + return mpf_add(x, eps, prec, rnd) + else: + return mpf_pos(x, prec, rnd) + + +#----------------------------------------------------------------------------# +# Radix conversion # +#----------------------------------------------------------------------------# + +def to_digits_exp(s, dps): + """Helper function for representing the floating-point number s as + a decimal with dps digits. Returns (sign, string, exponent) where + sign is '' or '-', string is the digit string, and exponent is + the decimal exponent as an int. + + If inexact, the decimal representation is rounded toward zero.""" + + # Extract sign first so it doesn't mess up the string digit count + if s[0]: + sign = '-' + s = mpf_neg(s) + else: + sign = '' + _sign, man, exp, bc = s + + if not man: + return '', '0', 0 + + bitprec = int(dps * math.log(10,2)) + 10 + + # Cut down to size + # TODO: account for precision when doing this + exp_from_1 = exp + bc + if abs(exp_from_1) > 3500: + from .libelefun import mpf_ln2, mpf_ln10 + # Set b = int(exp * log(2)/log(10)) + # If exp is huge, we must use high-precision arithmetic to + # find the nearest power of ten + expprec = bitcount(abs(exp)) + 5 + tmp = from_int(exp) + tmp = mpf_mul(tmp, mpf_ln2(expprec)) + tmp = mpf_div(tmp, mpf_ln10(expprec), expprec) + b = to_int(tmp) + s = mpf_div(s, mpf_pow_int(ften, b, bitprec), bitprec) + _sign, man, exp, bc = s + exponent = b + else: + exponent = 0 + + # First, calculate mantissa digits by converting to a binary + # fixed-point number and then converting that number to + # a decimal fixed-point number. + fixprec = max(bitprec - exp - bc, 0) + fixdps = int(fixprec / math.log(10,2) + 0.5) + sf = to_fixed(s, fixprec) + sd = bin_to_radix(sf, fixprec, 10, fixdps) + digits = numeral(sd, base=10, size=dps) + + exponent += len(digits) - fixdps - 1 + return sign, digits, exponent + +def to_str(s, dps, strip_zeros=True, min_fixed=None, max_fixed=None, + show_zero_exponent=False): + """ + Convert a raw mpf to a decimal floating-point literal with at + most `dps` decimal digits in the mantissa (not counting extra zeros + that may be inserted for visual purposes). + + The number will be printed in fixed-point format if the position + of the leading digit is strictly between min_fixed + (default = min(-dps/3,-5)) and max_fixed (default = dps). + + To force fixed-point format always, set min_fixed = -inf, + max_fixed = +inf. To force floating-point format, set + min_fixed >= max_fixed. + + The literal is formatted so that it can be parsed back to a number + by to_str, float() or Decimal(). + """ + + # Special numbers + if not s[1]: + if s == fzero: + if dps: t = '0.0' + else: t = '.0' + if show_zero_exponent: + t += 'e+0' + return t + if s == finf: return '+inf' + if s == fninf: return '-inf' + if s == fnan: return 'nan' + raise ValueError + + if min_fixed is None: min_fixed = min(-(dps//3), -5) + if max_fixed is None: max_fixed = dps + + # to_digits_exp rounds to floor. + # This sometimes kills some instances of "...00001" + sign, digits, exponent = to_digits_exp(s, dps+3) + + # No digits: show only .0; round exponent to nearest + if not dps: + if digits[0] in '56789': + exponent += 1 + digits = ".0" + + else: + # Rounding up kills some instances of "...99999" + if len(digits) > dps and digits[dps] in '56789': + digits = digits[:dps] + i = dps - 1 + while i >= 0 and digits[i] == '9': + i -= 1 + if i >= 0: + digits = digits[:i] + str(int(digits[i]) + 1) + '0' * (dps - i - 1) + else: + digits = '1' + '0' * (dps - 1) + exponent += 1 + else: + digits = digits[:dps] + + # Prettify numbers close to unit magnitude + if min_fixed < exponent < max_fixed: + if exponent < 0: + digits = ("0"*int(-exponent)) + digits + split = 1 + else: + split = exponent + 1 + if split > dps: + digits += "0"*(split-dps) + exponent = 0 + else: + split = 1 + + digits = (digits[:split] + "." + digits[split:]) + + if strip_zeros: + # Clean up trailing zeros + digits = digits.rstrip('0') + if digits[-1] == ".": + digits += "0" + + if exponent == 0 and dps and not show_zero_exponent: return sign + digits + if exponent >= 0: return sign + digits + "e+" + str(exponent) + if exponent < 0: return sign + digits + "e" + str(exponent) + +def str_to_man_exp(x, base=10): + """Helper function for from_str.""" + x = x.lower().rstrip('l') + # Verify that the input is a valid float literal + float(x) + # Split into mantissa, exponent + parts = x.split('e') + if len(parts) == 1: + exp = 0 + else: # == 2 + x = parts[0] + exp = int(parts[1]) + # Look for radix point in mantissa + parts = x.split('.') + if len(parts) == 2: + a, b = parts[0], parts[1].rstrip('0') + exp -= len(b) + x = a + b + x = MPZ(int(x, base)) + return x, exp + +special_str = {'inf':finf, '+inf':finf, '-inf':fninf, 'nan':fnan} + +def from_str(x, prec, rnd=round_fast): + """Create a raw mpf from a decimal literal, rounding in the + specified direction if the input number cannot be represented + exactly as a binary floating-point number with the given number of + bits. The literal syntax accepted is the same as for Python + floats. + + TODO: the rounding does not work properly for large exponents. + """ + x = x.lower().strip() + if x in special_str: + return special_str[x] + + if '/' in x: + p, q = x.split('/') + p, q = p.rstrip('l'), q.rstrip('l') + return from_rational(int(p), int(q), prec, rnd) + + man, exp = str_to_man_exp(x, base=10) + + # XXX: appropriate cutoffs & track direction + # note no factors of 5 + if abs(exp) > 400: + s = from_int(man, prec+10) + s = mpf_mul(s, mpf_pow_int(ften, exp, prec+10), prec, rnd) + else: + if exp >= 0: + s = from_int(man * 10**exp, prec, rnd) + else: + s = from_rational(man, 10**-exp, prec, rnd) + return s + +# Binary string conversion. These are currently mainly used for debugging +# and could use some improvement in the future + +def from_bstr(x): + man, exp = str_to_man_exp(x, base=2) + man = MPZ(man) + sign = 0 + if man < 0: + man = -man + sign = 1 + bc = bitcount(man) + return normalize(sign, man, exp, bc, bc, round_floor) + +def to_bstr(x): + sign, man, exp, bc = x + return ['','-'][sign] + numeral(man, size=bitcount(man), base=2) + ("e%i" % exp) + + +#----------------------------------------------------------------------------# +# Square roots # +#----------------------------------------------------------------------------# + + +def mpf_sqrt(s, prec, rnd=round_fast): + """ + Compute the square root of a nonnegative mpf value. The + result is correctly rounded. + """ + sign, man, exp, bc = s + if sign: + raise ComplexResult("square root of a negative number") + if not man: + return s + if exp & 1: + exp -= 1 + man <<= 1 + bc += 1 + elif man == 1: + return normalize1(sign, man, exp//2, bc, prec, rnd) + shift = max(4, 2*prec-bc+4) + shift += shift & 1 + if rnd in 'fd': + man = isqrt(man<= 0: + a = mpf_pos(sa, prec, round_floor) + b = mpf_pos(sb, prec, round_ceiling) + # Upper point nonnegative? + elif sbs >= 0: + a = fzero + negsa = mpf_neg(sa) + if mpf_lt(negsa, sb): + b = mpf_pos(sb, prec, round_ceiling) + else: + b = mpf_pos(negsa, prec, round_ceiling) + # Both negative? + else: + a = mpf_neg(sb, prec, round_floor) + b = mpf_neg(sa, prec, round_ceiling) + return a, b + +# TODO: optimize +def mpi_mul_mpf(s, t, prec): + return mpi_mul(s, (t, t), prec) + +def mpi_div_mpf(s, t, prec): + return mpi_div(s, (t, t), prec) + +def mpi_mul(s, t, prec=0): + sa, sb = s + ta, tb = t + sas = mpf_sign(sa) + sbs = mpf_sign(sb) + tas = mpf_sign(ta) + tbs = mpf_sign(tb) + if sas == sbs == 0: + # Should maybe be undefined + if ta == fninf or tb == finf: + return fninf, finf + return fzero, fzero + if tas == tbs == 0: + # Should maybe be undefined + if sa == fninf or sb == finf: + return fninf, finf + return fzero, fzero + if sas >= 0: + # positive * positive + if tas >= 0: + a = mpf_mul(sa, ta, prec, round_floor) + b = mpf_mul(sb, tb, prec, round_ceiling) + if a == fnan: a = fzero + if b == fnan: b = finf + # positive * negative + elif tbs <= 0: + a = mpf_mul(sb, ta, prec, round_floor) + b = mpf_mul(sa, tb, prec, round_ceiling) + if a == fnan: a = fninf + if b == fnan: b = fzero + # positive * both signs + else: + a = mpf_mul(sb, ta, prec, round_floor) + b = mpf_mul(sb, tb, prec, round_ceiling) + if a == fnan: a = fninf + if b == fnan: b = finf + elif sbs <= 0: + # negative * positive + if tas >= 0: + a = mpf_mul(sa, tb, prec, round_floor) + b = mpf_mul(sb, ta, prec, round_ceiling) + if a == fnan: a = fninf + if b == fnan: b = fzero + # negative * negative + elif tbs <= 0: + a = mpf_mul(sb, tb, prec, round_floor) + b = mpf_mul(sa, ta, prec, round_ceiling) + if a == fnan: a = fzero + if b == fnan: b = finf + # negative * both signs + else: + a = mpf_mul(sa, tb, prec, round_floor) + b = mpf_mul(sa, ta, prec, round_ceiling) + if a == fnan: a = fninf + if b == fnan: b = finf + else: + # General case: perform all cross-multiplications and compare + # Since the multiplications can be done exactly, we need only + # do 4 (instead of 8: two for each rounding mode) + cases = [mpf_mul(sa, ta), mpf_mul(sa, tb), mpf_mul(sb, ta), mpf_mul(sb, tb)] + if fnan in cases: + a, b = (fninf, finf) + else: + a, b = mpf_min_max(cases) + a = mpf_pos(a, prec, round_floor) + b = mpf_pos(b, prec, round_ceiling) + return a, b + +def mpi_square(s, prec=0): + sa, sb = s + if mpf_ge(sa, fzero): + a = mpf_mul(sa, sa, prec, round_floor) + b = mpf_mul(sb, sb, prec, round_ceiling) + elif mpf_le(sb, fzero): + a = mpf_mul(sb, sb, prec, round_floor) + b = mpf_mul(sa, sa, prec, round_ceiling) + else: + sa = mpf_neg(sa) + sa, sb = mpf_min_max([sa, sb]) + a = fzero + b = mpf_mul(sb, sb, prec, round_ceiling) + return a, b + +def mpi_div(s, t, prec): + sa, sb = s + ta, tb = t + sas = mpf_sign(sa) + sbs = mpf_sign(sb) + tas = mpf_sign(ta) + tbs = mpf_sign(tb) + # 0 / X + if sas == sbs == 0: + # 0 / + if (tas < 0 and tbs > 0) or (tas == 0 or tbs == 0): + return fninf, finf + return fzero, fzero + # Denominator contains both negative and positive numbers; + # this should properly be a multi-interval, but the closest + # match is the entire (extended) real line + if tas < 0 and tbs > 0: + return fninf, finf + # Assume denominator to be nonnegative + if tas < 0: + return mpi_div(mpi_neg(s), mpi_neg(t), prec) + # Division by zero + # XXX: make sure all results make sense + if tas == 0: + # Numerator contains both signs? + if sas < 0 and sbs > 0: + return fninf, finf + if tas == tbs: + return fninf, finf + # Numerator positive? + if sas >= 0: + a = mpf_div(sa, tb, prec, round_floor) + b = finf + if sbs <= 0: + a = fninf + b = mpf_div(sb, tb, prec, round_ceiling) + # Division with positive denominator + # We still have to handle nans resulting from inf/0 or inf/inf + else: + # Nonnegative numerator + if sas >= 0: + a = mpf_div(sa, tb, prec, round_floor) + b = mpf_div(sb, ta, prec, round_ceiling) + if a == fnan: a = fzero + if b == fnan: b = finf + # Nonpositive numerator + elif sbs <= 0: + a = mpf_div(sa, ta, prec, round_floor) + b = mpf_div(sb, tb, prec, round_ceiling) + if a == fnan: a = fninf + if b == fnan: b = fzero + # Numerator contains both signs? + else: + a = mpf_div(sa, ta, prec, round_floor) + b = mpf_div(sb, ta, prec, round_ceiling) + if a == fnan: a = fninf + if b == fnan: b = finf + return a, b + +def mpi_pi(prec): + a = mpf_pi(prec, round_floor) + b = mpf_pi(prec, round_ceiling) + return a, b + +def mpi_exp(s, prec): + sa, sb = s + # exp is monotonic + a = mpf_exp(sa, prec, round_floor) + b = mpf_exp(sb, prec, round_ceiling) + return a, b + +def mpi_log(s, prec): + sa, sb = s + # log is monotonic + a = mpf_log(sa, prec, round_floor) + b = mpf_log(sb, prec, round_ceiling) + return a, b + +def mpi_sqrt(s, prec): + sa, sb = s + # sqrt is monotonic + a = mpf_sqrt(sa, prec, round_floor) + b = mpf_sqrt(sb, prec, round_ceiling) + return a, b + +def mpi_atan(s, prec): + sa, sb = s + a = mpf_atan(sa, prec, round_floor) + b = mpf_atan(sb, prec, round_ceiling) + return a, b + +def mpi_pow_int(s, n, prec): + sa, sb = s + if n < 0: + return mpi_div((fone, fone), mpi_pow_int(s, -n, prec+20), prec) + if n == 0: + return (fone, fone) + if n == 1: + return s + if n == 2: + return mpi_square(s, prec) + # Odd -- signs are preserved + if n & 1: + a = mpf_pow_int(sa, n, prec, round_floor) + b = mpf_pow_int(sb, n, prec, round_ceiling) + # Even -- important to ensure positivity + else: + sas = mpf_sign(sa) + sbs = mpf_sign(sb) + # Nonnegative? + if sas >= 0: + a = mpf_pow_int(sa, n, prec, round_floor) + b = mpf_pow_int(sb, n, prec, round_ceiling) + # Nonpositive? + elif sbs <= 0: + a = mpf_pow_int(sb, n, prec, round_floor) + b = mpf_pow_int(sa, n, prec, round_ceiling) + # Mixed signs? + else: + a = fzero + # max(-a,b)**n + sa = mpf_neg(sa) + if mpf_ge(sa, sb): + b = mpf_pow_int(sa, n, prec, round_ceiling) + else: + b = mpf_pow_int(sb, n, prec, round_ceiling) + return a, b + +def mpi_pow(s, t, prec): + ta, tb = t + if ta == tb and ta not in (finf, fninf): + if ta == from_int(to_int(ta)): + return mpi_pow_int(s, to_int(ta), prec) + if ta == fhalf: + return mpi_sqrt(s, prec) + u = mpi_log(s, prec + 20) + v = mpi_mul(u, t, prec + 20) + return mpi_exp(v, prec) + +def MIN(x, y): + if mpf_le(x, y): + return x + return y + +def MAX(x, y): + if mpf_ge(x, y): + return x + return y + +def cos_sin_quadrant(x, wp): + sign, man, exp, bc = x + if x == fzero: + return fone, fzero, 0 + # TODO: combine evaluation code to avoid duplicate modulo + c, s = mpf_cos_sin(x, wp) + t, n, wp_ = mod_pi2(man, exp, exp+bc, 15) + if sign: + n = -1-n + return c, s, n + +def mpi_cos_sin(x, prec): + a, b = x + if a == b == fzero: + return (fone, fone), (fzero, fzero) + # Guaranteed to contain both -1 and 1 + if (finf in x) or (fninf in x): + return (fnone, fone), (fnone, fone) + wp = prec + 20 + ca, sa, na = cos_sin_quadrant(a, wp) + cb, sb, nb = cos_sin_quadrant(b, wp) + ca, cb = mpf_min_max([ca, cb]) + sa, sb = mpf_min_max([sa, sb]) + # Both functions are monotonic within one quadrant + if na == nb: + pass + # Guaranteed to contain both -1 and 1 + elif nb - na >= 4: + return (fnone, fone), (fnone, fone) + else: + # cos has maximum between a and b + if na//4 != nb//4: + cb = fone + # cos has minimum + if (na-2)//4 != (nb-2)//4: + ca = fnone + # sin has maximum + if (na-1)//4 != (nb-1)//4: + sb = fone + # sin has minimum + if (na-3)//4 != (nb-3)//4: + sa = fnone + # Perturb to force interval rounding + more = from_man_exp((MPZ_ONE<= 1: + if sign: + return fnone + return fone + return v + ca = finalize(ca, round_floor) + cb = finalize(cb, round_ceiling) + sa = finalize(sa, round_floor) + sb = finalize(sb, round_ceiling) + return (ca,cb), (sa,sb) + +def mpi_cos(x, prec): + return mpi_cos_sin(x, prec)[0] + +def mpi_sin(x, prec): + return mpi_cos_sin(x, prec)[1] + +def mpi_tan(x, prec): + cos, sin = mpi_cos_sin(x, prec+20) + return mpi_div(sin, cos, prec) + +def mpi_cot(x, prec): + cos, sin = mpi_cos_sin(x, prec+20) + return mpi_div(cos, sin, prec) + +def mpi_from_str_a_b(x, y, percent, prec): + wp = prec + 20 + xa = from_str(x, wp, round_floor) + xb = from_str(x, wp, round_ceiling) + #ya = from_str(y, wp, round_floor) + y = from_str(y, wp, round_ceiling) + assert mpf_ge(y, fzero) + if percent: + y = mpf_mul(MAX(mpf_abs(xa), mpf_abs(xb)), y, wp, round_ceiling) + y = mpf_div(y, from_int(100), wp, round_ceiling) + a = mpf_sub(xa, y, prec, round_floor) + b = mpf_add(xb, y, prec, round_ceiling) + return a, b + +def mpi_from_str(s, prec): + """ + Parse an interval number given as a string. + + Allowed forms are + + "-1.23e-27" + Any single decimal floating-point literal. + "a +- b" or "a (b)" + a is the midpoint of the interval and b is the half-width + "a +- b%" or "a (b%)" + a is the midpoint of the interval and the half-width + is b percent of a (`a \times b / 100`). + "[a, b]" + The interval indicated directly. + "x[y,z]e" + x are shared digits, y and z are unequal digits, e is the exponent. + + """ + e = ValueError("Improperly formed interval number '%s'" % s) + s = s.replace(" ", "") + wp = prec + 20 + if "+-" in s: + x, y = s.split("+-") + return mpi_from_str_a_b(x, y, False, prec) + # case 2 + elif "(" in s: + # Don't confuse with a complex number (x,y) + if s[0] == "(" or ")" not in s: + raise e + s = s.replace(")", "") + percent = False + if "%" in s: + if s[-1] != "%": + raise e + percent = True + s = s.replace("%", "") + x, y = s.split("(") + return mpi_from_str_a_b(x, y, percent, prec) + elif "," in s: + if ('[' not in s) or (']' not in s): + raise e + if s[0] == '[': + # case 3 + s = s.replace("[", "") + s = s.replace("]", "") + a, b = s.split(",") + a = from_str(a, prec, round_floor) + b = from_str(b, prec, round_ceiling) + return a, b + else: + # case 4 + x, y = s.split('[') + y, z = y.split(',') + if 'e' in s: + z, e = z.split(']') + else: + z, e = z.rstrip(']'), '' + a = from_str(x+y+e, prec, round_floor) + b = from_str(x+z+e, prec, round_ceiling) + return a, b + else: + a = from_str(s, prec, round_floor) + b = from_str(s, prec, round_ceiling) + return a, b + +def mpi_to_str(x, dps, use_spaces=True, brackets='[]', mode='brackets', error_dps=4, **kwargs): + """ + Convert a mpi interval to a string. + + **Arguments** + + *dps* + decimal places to use for printing + *use_spaces* + use spaces for more readable output, defaults to true + *brackets* + pair of strings (or two-character string) giving left and right brackets + *mode* + mode of display: 'plusminus', 'percent', 'brackets' (default) or 'diff' + *error_dps* + limit the error to *error_dps* digits (mode 'plusminus and 'percent') + + Additional keyword arguments are forwarded to the mpf-to-string conversion + for the components of the output. + + **Examples** + + >>> from mpmath import mpi, mp + >>> mp.dps = 30 + >>> x = mpi(1, 2)._mpi_ + >>> mpi_to_str(x, 2, mode='plusminus') + '1.5 +- 0.5' + >>> mpi_to_str(x, 2, mode='percent') + '1.5 (33.33%)' + >>> mpi_to_str(x, 2, mode='brackets') + '[1.0, 2.0]' + >>> mpi_to_str(x, 2, mode='brackets' , brackets=('<', '>')) + '<1.0, 2.0>' + >>> x = mpi('5.2582327113062393041', '5.2582327113062749951')._mpi_ + >>> mpi_to_str(x, 15, mode='diff') + '5.2582327113062[4, 7]' + >>> mpi_to_str(mpi(0)._mpi_, 2, mode='percent') + '0.0 (0.0%)' + + """ + prec = dps_to_prec(dps) + wp = prec + 20 + a, b = x + mid = mpi_mid(x, prec) + delta = mpi_delta(x, prec) + a_str = to_str(a, dps, **kwargs) + b_str = to_str(b, dps, **kwargs) + mid_str = to_str(mid, dps, **kwargs) + sp = "" + if use_spaces: + sp = " " + br1, br2 = brackets + if mode == 'plusminus': + delta_str = to_str(mpf_shift(delta,-1), dps, **kwargs) + s = mid_str + sp + "+-" + sp + delta_str + elif mode == 'percent': + if mid == fzero: + p = fzero + else: + # p = 100 * delta(x) / (2*mid(x)) + p = mpf_mul(delta, from_int(100)) + p = mpf_div(p, mpf_mul(mid, from_int(2)), wp) + s = mid_str + sp + "(" + to_str(p, error_dps) + "%)" + elif mode == 'brackets': + s = br1 + a_str + "," + sp + b_str + br2 + elif mode == 'diff': + # use more digits if str(x.a) and str(x.b) are equal + if a_str == b_str: + a_str = to_str(a, dps+3, **kwargs) + b_str = to_str(b, dps+3, **kwargs) + # separate mantissa and exponent + a = a_str.split('e') + if len(a) == 1: + a.append('') + b = b_str.split('e') + if len(b) == 1: + b.append('') + if a[1] == b[1]: + if a[0] != b[0]: + for i in xrange(len(a[0]) + 1): + if a[0][i] != b[0][i]: + break + s = (a[0][:i] + br1 + a[0][i:] + ',' + sp + b[0][i:] + br2 + + 'e'*min(len(a[1]), 1) + a[1]) + else: # no difference + s = a[0] + br1 + br2 + 'e'*min(len(a[1]), 1) + a[1] + else: + s = br1 + 'e'.join(a) + ',' + sp + 'e'.join(b) + br2 + else: + raise ValueError("'%s' is unknown mode for printing mpi" % mode) + return s + +def mpci_add(x, y, prec): + a, b = x + c, d = y + return mpi_add(a, c, prec), mpi_add(b, d, prec) + +def mpci_sub(x, y, prec): + a, b = x + c, d = y + return mpi_sub(a, c, prec), mpi_sub(b, d, prec) + +def mpci_neg(x, prec=0): + a, b = x + return mpi_neg(a, prec), mpi_neg(b, prec) + +def mpci_pos(x, prec): + a, b = x + return mpi_pos(a, prec), mpi_pos(b, prec) + +def mpci_mul(x, y, prec): + # TODO: optimize for real/imag cases + a, b = x + c, d = y + r1 = mpi_mul(a,c) + r2 = mpi_mul(b,d) + re = mpi_sub(r1,r2,prec) + i1 = mpi_mul(a,d) + i2 = mpi_mul(b,c) + im = mpi_add(i1,i2,prec) + return re, im + +def mpci_div(x, y, prec): + # TODO: optimize for real/imag cases + a, b = x + c, d = y + wp = prec+20 + m1 = mpi_square(c) + m2 = mpi_square(d) + m = mpi_add(m1,m2,wp) + re = mpi_add(mpi_mul(a,c), mpi_mul(b,d), wp) + im = mpi_sub(mpi_mul(b,c), mpi_mul(a,d), wp) + re = mpi_div(re, m, prec) + im = mpi_div(im, m, prec) + return re, im + +def mpci_exp(x, prec): + a, b = x + wp = prec+20 + r = mpi_exp(a, wp) + c, s = mpi_cos_sin(b, wp) + a = mpi_mul(r, c, prec) + b = mpi_mul(r, s, prec) + return a, b + +def mpi_shift(x, n): + a, b = x + return mpf_shift(a,n), mpf_shift(b,n) + +def mpi_cosh_sinh(x, prec): + # TODO: accuracy for small x + wp = prec+20 + e1 = mpi_exp(x, wp) + e2 = mpi_div(mpi_one, e1, wp) + c = mpi_add(e1, e2, prec) + s = mpi_sub(e1, e2, prec) + c = mpi_shift(c, -1) + s = mpi_shift(s, -1) + return c, s + +def mpci_cos(x, prec): + a, b = x + wp = prec+10 + c, s = mpi_cos_sin(a, wp) + ch, sh = mpi_cosh_sinh(b, wp) + re = mpi_mul(c, ch, prec) + im = mpi_mul(s, sh, prec) + return re, mpi_neg(im) + +def mpci_sin(x, prec): + a, b = x + wp = prec+10 + c, s = mpi_cos_sin(a, wp) + ch, sh = mpi_cosh_sinh(b, wp) + re = mpi_mul(s, ch, prec) + im = mpi_mul(c, sh, prec) + return re, im + +def mpci_abs(x, prec): + a, b = x + if a == mpi_zero: + return mpi_abs(b) + if b == mpi_zero: + return mpi_abs(a) + # Important: nonnegative + a = mpi_square(a) + b = mpi_square(b) + t = mpi_add(a, b, prec+20) + return mpi_sqrt(t, prec) + +def mpi_atan2(y, x, prec): + ya, yb = y + xa, xb = x + # Constrained to the real line + if ya == yb == fzero: + if mpf_ge(xa, fzero): + return mpi_zero + return mpi_pi(prec) + # Right half-plane + if mpf_ge(xa, fzero): + if mpf_ge(ya, fzero): + a = mpf_atan2(ya, xb, prec, round_floor) + else: + a = mpf_atan2(ya, xa, prec, round_floor) + if mpf_ge(yb, fzero): + b = mpf_atan2(yb, xa, prec, round_ceiling) + else: + b = mpf_atan2(yb, xb, prec, round_ceiling) + # Upper half-plane + elif mpf_ge(ya, fzero): + b = mpf_atan2(ya, xa, prec, round_ceiling) + if mpf_le(xb, fzero): + a = mpf_atan2(yb, xb, prec, round_floor) + else: + a = mpf_atan2(ya, xb, prec, round_floor) + # Lower half-plane + elif mpf_le(yb, fzero): + a = mpf_atan2(yb, xa, prec, round_floor) + if mpf_le(xb, fzero): + b = mpf_atan2(ya, xb, prec, round_ceiling) + else: + b = mpf_atan2(yb, xb, prec, round_ceiling) + # Covering the origin + else: + b = mpf_pi(prec, round_ceiling) + a = mpf_neg(b) + return a, b + +def mpci_arg(z, prec): + x, y = z + return mpi_atan2(y, x, prec) + +def mpci_log(z, prec): + x, y = z + re = mpi_log(mpci_abs(z, prec+20), prec) + im = mpci_arg(z, prec) + return re, im + +def mpci_pow(x, y, prec): + # TODO: recognize/speed up real cases, integer y + yre, yim = y + if yim == mpi_zero: + ya, yb = yre + if ya == yb: + sign, man, exp, bc = yb + if man and exp >= 0: + return mpci_pow_int(x, (-1)**sign * int(man<>= 1 + return mpci_pos(result, prec) + +gamma_min_a = from_float(1.46163214496) +gamma_min_b = from_float(1.46163214497) +gamma_min = (gamma_min_a, gamma_min_b) +gamma_mono_imag_a = from_float(-1.1) +gamma_mono_imag_b = from_float(1.1) + +def mpi_overlap(x, y): + a, b = x + c, d = y + if mpf_lt(d, a): return False + if mpf_gt(c, b): return False + return True + +# type = 0 -- gamma +# type = 1 -- factorial +# type = 2 -- 1/gamma +# type = 3 -- log-gamma + +def mpi_gamma(z, prec, type=0): + a, b = z + wp = prec+20 + + if type == 1: + return mpi_gamma(mpi_add(z, mpi_one, wp), prec, 0) + + # increasing + if mpf_gt(a, gamma_min_b): + if type == 0: + c = mpf_gamma(a, prec, round_floor) + d = mpf_gamma(b, prec, round_ceiling) + elif type == 2: + c = mpf_rgamma(b, prec, round_floor) + d = mpf_rgamma(a, prec, round_ceiling) + elif type == 3: + c = mpf_loggamma(a, prec, round_floor) + d = mpf_loggamma(b, prec, round_ceiling) + # decreasing + elif mpf_gt(a, fzero) and mpf_lt(b, gamma_min_a): + if type == 0: + c = mpf_gamma(b, prec, round_floor) + d = mpf_gamma(a, prec, round_ceiling) + elif type == 2: + c = mpf_rgamma(a, prec, round_floor) + d = mpf_rgamma(b, prec, round_ceiling) + elif type == 3: + c = mpf_loggamma(b, prec, round_floor) + d = mpf_loggamma(a, prec, round_ceiling) + else: + # TODO: reflection formula + znew = mpi_add(z, mpi_one, wp) + if type == 0: return mpi_div(mpi_gamma(znew, prec+2, 0), z, prec) + if type == 2: return mpi_mul(mpi_gamma(znew, prec+2, 2), z, prec) + if type == 3: return mpi_sub(mpi_gamma(znew, prec+2, 3), mpi_log(z, prec+2), prec) + return c, d + +def mpci_gamma(z, prec, type=0): + (a1,a2), (b1,b2) = z + + # Real case + if b1 == b2 == fzero and (type != 3 or mpf_gt(a1,fzero)): + return mpi_gamma(z, prec, type), mpi_zero + + # Estimate precision + wp = prec+20 + if type != 3: + amag = a2[2]+a2[3] + bmag = b2[2]+b2[3] + if a2 != fzero: + mag = max(amag, bmag) + else: + mag = bmag + an = abs(to_int(a2)) + bn = abs(to_int(b2)) + absn = max(an, bn) + gamma_size = max(0,absn*mag) + wp += bitcount(gamma_size) + + # Assume type != 1 + if type == 1: + (a1,a2) = mpi_add((a1,a2), mpi_one, wp); z = (a1,a2), (b1,b2) + type = 0 + + # Avoid non-monotonic region near the negative real axis + if mpf_lt(a1, gamma_min_b): + if mpi_overlap((b1,b2), (gamma_mono_imag_a, gamma_mono_imag_b)): + # TODO: reflection formula + #if mpf_lt(a2, mpf_shift(fone,-1)): + # znew = mpci_sub((mpi_one,mpi_zero),z,wp) + # ... + # Recurrence: + # gamma(z) = gamma(z+1)/z + znew = mpi_add((a1,a2), mpi_one, wp), (b1,b2) + if type == 0: return mpci_div(mpci_gamma(znew, prec+2, 0), z, prec) + if type == 2: return mpci_mul(mpci_gamma(znew, prec+2, 2), z, prec) + if type == 3: return mpci_sub(mpci_gamma(znew, prec+2, 3), mpci_log(z,prec+2), prec) + + # Use monotonicity (except for a small region close to the + # origin and near poles) + # upper half-plane + if mpf_ge(b1, fzero): + minre = mpc_loggamma((a1,b2), wp, round_floor) + maxre = mpc_loggamma((a2,b1), wp, round_ceiling) + minim = mpc_loggamma((a1,b1), wp, round_floor) + maxim = mpc_loggamma((a2,b2), wp, round_ceiling) + # lower half-plane + elif mpf_le(b2, fzero): + minre = mpc_loggamma((a1,b1), wp, round_floor) + maxre = mpc_loggamma((a2,b2), wp, round_ceiling) + minim = mpc_loggamma((a2,b1), wp, round_floor) + maxim = mpc_loggamma((a1,b2), wp, round_ceiling) + # crosses real axis + else: + maxre = mpc_loggamma((a2,fzero), wp, round_ceiling) + # stretches more into the lower half-plane + if mpf_gt(mpf_neg(b1), b2): + minre = mpc_loggamma((a1,b1), wp, round_ceiling) + else: + minre = mpc_loggamma((a1,b2), wp, round_ceiling) + minim = mpc_loggamma((a2,b1), wp, round_floor) + maxim = mpc_loggamma((a2,b2), wp, round_floor) + + w = (minre[0], maxre[0]), (minim[1], maxim[1]) + if type == 3: + return mpi_pos(w[0], prec), mpi_pos(w[1], prec) + if type == 2: + w = mpci_neg(w) + return mpci_exp(w, prec) + +def mpi_loggamma(z, prec): return mpi_gamma(z, prec, type=3) +def mpci_loggamma(z, prec): return mpci_gamma(z, prec, type=3) + +def mpi_rgamma(z, prec): return mpi_gamma(z, prec, type=2) +def mpci_rgamma(z, prec): return mpci_gamma(z, prec, type=2) + +def mpi_factorial(z, prec): return mpi_gamma(z, prec, type=1) +def mpci_factorial(z, prec): return mpci_gamma(z, prec, type=1) diff --git a/MLPY/Lib/site-packages/mpmath/math2.py b/MLPY/Lib/site-packages/mpmath/math2.py new file mode 100644 index 0000000000000000000000000000000000000000..302e25f509c18b2c76a2b62611f2765db84ab13e --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/math2.py @@ -0,0 +1,672 @@ +""" +This module complements the math and cmath builtin modules by providing +fast machine precision versions of some additional functions (gamma, ...) +and wrapping math/cmath functions so that they can be called with either +real or complex arguments. +""" + +import operator +import math +import cmath + +# Irrational (?) constants +pi = 3.1415926535897932385 +e = 2.7182818284590452354 +sqrt2 = 1.4142135623730950488 +sqrt5 = 2.2360679774997896964 +phi = 1.6180339887498948482 +ln2 = 0.69314718055994530942 +ln10 = 2.302585092994045684 +euler = 0.57721566490153286061 +catalan = 0.91596559417721901505 +khinchin = 2.6854520010653064453 +apery = 1.2020569031595942854 + +logpi = 1.1447298858494001741 + +def _mathfun_real(f_real, f_complex): + def f(x, **kwargs): + if type(x) is float: + return f_real(x) + if type(x) is complex: + return f_complex(x) + try: + x = float(x) + return f_real(x) + except (TypeError, ValueError): + x = complex(x) + return f_complex(x) + f.__name__ = f_real.__name__ + return f + +def _mathfun(f_real, f_complex): + def f(x, **kwargs): + if type(x) is complex: + return f_complex(x) + try: + return f_real(float(x)) + except (TypeError, ValueError): + return f_complex(complex(x)) + f.__name__ = f_real.__name__ + return f + +def _mathfun_n(f_real, f_complex): + def f(*args, **kwargs): + try: + return f_real(*(float(x) for x in args)) + except (TypeError, ValueError): + return f_complex(*(complex(x) for x in args)) + f.__name__ = f_real.__name__ + return f + +# Workaround for non-raising log and sqrt in Python 2.5 and 2.4 +# on Unix system +try: + math.log(-2.0) + def math_log(x): + if x <= 0.0: + raise ValueError("math domain error") + return math.log(x) + def math_sqrt(x): + if x < 0.0: + raise ValueError("math domain error") + return math.sqrt(x) +except (ValueError, TypeError): + math_log = math.log + math_sqrt = math.sqrt + +pow = _mathfun_n(operator.pow, lambda x, y: complex(x)**y) +log = _mathfun_n(math_log, cmath.log) +sqrt = _mathfun(math_sqrt, cmath.sqrt) +exp = _mathfun_real(math.exp, cmath.exp) + +cos = _mathfun_real(math.cos, cmath.cos) +sin = _mathfun_real(math.sin, cmath.sin) +tan = _mathfun_real(math.tan, cmath.tan) + +acos = _mathfun(math.acos, cmath.acos) +asin = _mathfun(math.asin, cmath.asin) +atan = _mathfun_real(math.atan, cmath.atan) + +cosh = _mathfun_real(math.cosh, cmath.cosh) +sinh = _mathfun_real(math.sinh, cmath.sinh) +tanh = _mathfun_real(math.tanh, cmath.tanh) + +floor = _mathfun_real(math.floor, + lambda z: complex(math.floor(z.real), math.floor(z.imag))) +ceil = _mathfun_real(math.ceil, + lambda z: complex(math.ceil(z.real), math.ceil(z.imag))) + + +cos_sin = _mathfun_real(lambda x: (math.cos(x), math.sin(x)), + lambda z: (cmath.cos(z), cmath.sin(z))) + +cbrt = _mathfun(lambda x: x**(1./3), lambda z: z**(1./3)) + +def nthroot(x, n): + r = 1./n + try: + return float(x) ** r + except (ValueError, TypeError): + return complex(x) ** r + +def _sinpi_real(x): + if x < 0: + return -_sinpi_real(-x) + n, r = divmod(x, 0.5) + r *= pi + n %= 4 + if n == 0: return math.sin(r) + if n == 1: return math.cos(r) + if n == 2: return -math.sin(r) + if n == 3: return -math.cos(r) + +def _cospi_real(x): + if x < 0: + x = -x + n, r = divmod(x, 0.5) + r *= pi + n %= 4 + if n == 0: return math.cos(r) + if n == 1: return -math.sin(r) + if n == 2: return -math.cos(r) + if n == 3: return math.sin(r) + +def _sinpi_complex(z): + if z.real < 0: + return -_sinpi_complex(-z) + n, r = divmod(z.real, 0.5) + z = pi*complex(r, z.imag) + n %= 4 + if n == 0: return cmath.sin(z) + if n == 1: return cmath.cos(z) + if n == 2: return -cmath.sin(z) + if n == 3: return -cmath.cos(z) + +def _cospi_complex(z): + if z.real < 0: + z = -z + n, r = divmod(z.real, 0.5) + z = pi*complex(r, z.imag) + n %= 4 + if n == 0: return cmath.cos(z) + if n == 1: return -cmath.sin(z) + if n == 2: return -cmath.cos(z) + if n == 3: return cmath.sin(z) + +cospi = _mathfun_real(_cospi_real, _cospi_complex) +sinpi = _mathfun_real(_sinpi_real, _sinpi_complex) + +def tanpi(x): + try: + return sinpi(x) / cospi(x) + except OverflowError: + if complex(x).imag > 10: + return 1j + if complex(x).imag < 10: + return -1j + raise + +def cotpi(x): + try: + return cospi(x) / sinpi(x) + except OverflowError: + if complex(x).imag > 10: + return -1j + if complex(x).imag < 10: + return 1j + raise + +INF = 1e300*1e300 +NINF = -INF +NAN = INF-INF +EPS = 2.2204460492503131e-16 + +_exact_gamma = (INF, 1.0, 1.0, 2.0, 6.0, 24.0, 120.0, 720.0, 5040.0, 40320.0, + 362880.0, 3628800.0, 39916800.0, 479001600.0, 6227020800.0, 87178291200.0, + 1307674368000.0, 20922789888000.0, 355687428096000.0, 6402373705728000.0, + 121645100408832000.0, 2432902008176640000.0) + +_max_exact_gamma = len(_exact_gamma)-1 + +# Lanczos coefficients used by the GNU Scientific Library +_lanczos_g = 7 +_lanczos_p = (0.99999999999980993, 676.5203681218851, -1259.1392167224028, + 771.32342877765313, -176.61502916214059, 12.507343278686905, + -0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7) + +def _gamma_real(x): + _intx = int(x) + if _intx == x: + if _intx <= 0: + #return (-1)**_intx * INF + raise ZeroDivisionError("gamma function pole") + if _intx <= _max_exact_gamma: + return _exact_gamma[_intx] + if x < 0.5: + # TODO: sinpi + return pi / (_sinpi_real(x)*_gamma_real(1-x)) + else: + x -= 1.0 + r = _lanczos_p[0] + for i in range(1, _lanczos_g+2): + r += _lanczos_p[i]/(x+i) + t = x + _lanczos_g + 0.5 + return 2.506628274631000502417 * t**(x+0.5) * math.exp(-t) * r + +def _gamma_complex(x): + if not x.imag: + return complex(_gamma_real(x.real)) + if x.real < 0.5: + # TODO: sinpi + return pi / (_sinpi_complex(x)*_gamma_complex(1-x)) + else: + x -= 1.0 + r = _lanczos_p[0] + for i in range(1, _lanczos_g+2): + r += _lanczos_p[i]/(x+i) + t = x + _lanczos_g + 0.5 + return 2.506628274631000502417 * t**(x+0.5) * cmath.exp(-t) * r + +gamma = _mathfun_real(_gamma_real, _gamma_complex) + +def rgamma(x): + try: + return 1./gamma(x) + except ZeroDivisionError: + return x*0.0 + +def factorial(x): + return gamma(x+1.0) + +def arg(x): + if type(x) is float: + return math.atan2(0.0,x) + return math.atan2(x.imag,x.real) + +# XXX: broken for negatives +def loggamma(x): + if type(x) not in (float, complex): + try: + x = float(x) + except (ValueError, TypeError): + x = complex(x) + try: + xreal = x.real + ximag = x.imag + except AttributeError: # py2.5 + xreal = x + ximag = 0.0 + # Reflection formula + # http://functions.wolfram.com/GammaBetaErf/LogGamma/16/01/01/0003/ + if xreal < 0.0: + if abs(x) < 0.5: + v = log(gamma(x)) + if ximag == 0: + v = v.conjugate() + return v + z = 1-x + try: + re = z.real + im = z.imag + except AttributeError: # py2.5 + re = z + im = 0.0 + refloor = floor(re) + if im == 0.0: + imsign = 0 + elif im < 0.0: + imsign = -1 + else: + imsign = 1 + return (-pi*1j)*abs(refloor)*(1-abs(imsign)) + logpi - \ + log(sinpi(z-refloor)) - loggamma(z) + 1j*pi*refloor*imsign + if x == 1.0 or x == 2.0: + return x*0 + p = 0. + while abs(x) < 11: + p -= log(x) + x += 1.0 + s = 0.918938533204672742 + (x-0.5)*log(x) - x + r = 1./x + r2 = r*r + s += 0.083333333333333333333*r; r *= r2 + s += -0.0027777777777777777778*r; r *= r2 + s += 0.00079365079365079365079*r; r *= r2 + s += -0.0005952380952380952381*r; r *= r2 + s += 0.00084175084175084175084*r; r *= r2 + s += -0.0019175269175269175269*r; r *= r2 + s += 0.0064102564102564102564*r; r *= r2 + s += -0.02955065359477124183*r + return s + p + +_psi_coeff = [ +0.083333333333333333333, +-0.0083333333333333333333, +0.003968253968253968254, +-0.0041666666666666666667, +0.0075757575757575757576, +-0.021092796092796092796, +0.083333333333333333333, +-0.44325980392156862745, +3.0539543302701197438, +-26.456212121212121212] + +def _digamma_real(x): + _intx = int(x) + if _intx == x: + if _intx <= 0: + raise ZeroDivisionError("polygamma pole") + if x < 0.5: + x = 1.0-x + s = pi*cotpi(x) + else: + s = 0.0 + while x < 10.0: + s -= 1.0/x + x += 1.0 + x2 = x**-2 + t = x2 + for c in _psi_coeff: + s -= c*t + if t < 1e-20: + break + t *= x2 + return s + math_log(x) - 0.5/x + +def _digamma_complex(x): + if not x.imag: + return complex(_digamma_real(x.real)) + if x.real < 0.5: + x = 1.0-x + s = pi*cotpi(x) + else: + s = 0.0 + while abs(x) < 10.0: + s -= 1.0/x + x += 1.0 + x2 = x**-2 + t = x2 + for c in _psi_coeff: + s -= c*t + if abs(t) < 1e-20: + break + t *= x2 + return s + cmath.log(x) - 0.5/x + +digamma = _mathfun_real(_digamma_real, _digamma_complex) + +# TODO: could implement complex erf and erfc here. Need +# to find an accurate method (avoiding cancellation) +# for approx. 1 < abs(x) < 9. + +_erfc_coeff_P = [ + 1.0000000161203922312, + 2.1275306946297962644, + 2.2280433377390253297, + 1.4695509105618423961, + 0.66275911699770787537, + 0.20924776504163751585, + 0.045459713768411264339, + 0.0063065951710717791934, + 0.00044560259661560421715][::-1] + +_erfc_coeff_Q = [ + 1.0000000000000000000, + 3.2559100272784894318, + 4.9019435608903239131, + 4.4971472894498014205, + 2.7845640601891186528, + 1.2146026030046904138, + 0.37647108453729465912, + 0.080970149639040548613, + 0.011178148899483545902, + 0.00078981003831980423513][::-1] + +def _polyval(coeffs, x): + p = coeffs[0] + for c in coeffs[1:]: + p = c + x*p + return p + +def _erf_taylor(x): + # Taylor series assuming 0 <= x <= 1 + x2 = x*x + s = t = x + n = 1 + while abs(t) > 1e-17: + t *= x2/n + s -= t/(n+n+1) + n += 1 + t *= x2/n + s += t/(n+n+1) + n += 1 + return 1.1283791670955125739*s + +def _erfc_mid(x): + # Rational approximation assuming 0 <= x <= 9 + return exp(-x*x)*_polyval(_erfc_coeff_P,x)/_polyval(_erfc_coeff_Q,x) + +def _erfc_asymp(x): + # Asymptotic expansion assuming x >= 9 + x2 = x*x + v = exp(-x2)/x*0.56418958354775628695 + r = t = 0.5 / x2 + s = 1.0 + for n in range(1,22,4): + s -= t + t *= r * (n+2) + s += t + t *= r * (n+4) + if abs(t) < 1e-17: + break + return s * v + +def erf(x): + """ + erf of a real number. + """ + x = float(x) + if x != x: + return x + if x < 0.0: + return -erf(-x) + if x >= 1.0: + if x >= 6.0: + return 1.0 + return 1.0 - _erfc_mid(x) + return _erf_taylor(x) + +def erfc(x): + """ + erfc of a real number. + """ + x = float(x) + if x != x: + return x + if x < 0.0: + if x < -6.0: + return 2.0 + return 2.0-erfc(-x) + if x > 9.0: + return _erfc_asymp(x) + if x >= 1.0: + return _erfc_mid(x) + return 1.0 - _erf_taylor(x) + +gauss42 = [\ +(0.99839961899006235, 0.0041059986046490839), +(-0.99839961899006235, 0.0041059986046490839), +(0.9915772883408609, 0.009536220301748501), +(-0.9915772883408609,0.009536220301748501), +(0.97934250806374812, 0.014922443697357493), +(-0.97934250806374812, 0.014922443697357493), +(0.96175936533820439,0.020227869569052644), +(-0.96175936533820439, 0.020227869569052644), +(0.93892355735498811, 0.025422959526113047), +(-0.93892355735498811,0.025422959526113047), +(0.91095972490412735, 0.030479240699603467), +(-0.91095972490412735, 0.030479240699603467), +(0.87802056981217269,0.03536907109759211), +(-0.87802056981217269, 0.03536907109759211), +(0.8402859832618168, 0.040065735180692258), +(-0.8402859832618168,0.040065735180692258), +(0.7979620532554873, 0.044543577771965874), +(-0.7979620532554873, 0.044543577771965874), +(0.75127993568948048,0.048778140792803244), +(-0.75127993568948048, 0.048778140792803244), +(0.70049459055617114, 0.052746295699174064), +(-0.70049459055617114,0.052746295699174064), +(0.64588338886924779, 0.056426369358018376), +(-0.64588338886924779, 0.056426369358018376), +(0.58774459748510932, 0.059798262227586649), +(-0.58774459748510932, 0.059798262227586649), +(0.5263957499311922, 0.062843558045002565), +(-0.5263957499311922, 0.062843558045002565), +(0.46217191207042191, 0.065545624364908975), +(-0.46217191207042191, 0.065545624364908975), +(0.39542385204297503, 0.067889703376521934), +(-0.39542385204297503, 0.067889703376521934), +(0.32651612446541151, 0.069862992492594159), +(-0.32651612446541151, 0.069862992492594159), +(0.25582507934287907, 0.071454714265170971), +(-0.25582507934287907, 0.071454714265170971), +(0.18373680656485453, 0.072656175243804091), +(-0.18373680656485453, 0.072656175243804091), +(0.11064502720851986, 0.073460813453467527), +(-0.11064502720851986, 0.073460813453467527), +(0.036948943165351772, 0.073864234232172879), +(-0.036948943165351772, 0.073864234232172879)] + +EI_ASYMP_CONVERGENCE_RADIUS = 40.0 + +def ei_asymp(z, _e1=False): + r = 1./z + s = t = 1.0 + k = 1 + while 1: + t *= k*r + s += t + if abs(t) < 1e-16: + break + k += 1 + v = s*exp(z)/z + if _e1: + if type(z) is complex: + zreal = z.real + zimag = z.imag + else: + zreal = z + zimag = 0.0 + if zimag == 0.0 and zreal > 0.0: + v += pi*1j + else: + if type(z) is complex: + if z.imag > 0: + v += pi*1j + if z.imag < 0: + v -= pi*1j + return v + +def ei_taylor(z, _e1=False): + s = t = z + k = 2 + while 1: + t = t*z/k + term = t/k + if abs(term) < 1e-17: + break + s += term + k += 1 + s += euler + if _e1: + s += log(-z) + else: + if type(z) is float or z.imag == 0.0: + s += math_log(abs(z)) + else: + s += cmath.log(z) + return s + +def ei(z, _e1=False): + typez = type(z) + if typez not in (float, complex): + try: + z = float(z) + typez = float + except (TypeError, ValueError): + z = complex(z) + typez = complex + if not z: + return -INF + absz = abs(z) + if absz > EI_ASYMP_CONVERGENCE_RADIUS: + return ei_asymp(z, _e1) + elif absz <= 2.0 or (typez is float and z > 0.0): + return ei_taylor(z, _e1) + # Integrate, starting from whichever is smaller of a Taylor + # series value or an asymptotic series value + if typez is complex and z.real > 0.0: + zref = z / absz + ref = ei_taylor(zref, _e1) + else: + zref = EI_ASYMP_CONVERGENCE_RADIUS * z / absz + ref = ei_asymp(zref, _e1) + C = (zref-z)*0.5 + D = (zref+z)*0.5 + s = 0.0 + if type(z) is complex: + _exp = cmath.exp + else: + _exp = math.exp + for x,w in gauss42: + t = C*x+D + s += w*_exp(t)/t + ref -= C*s + return ref + +def e1(z): + # hack to get consistent signs if the imaginary part if 0 + # and signed + typez = type(z) + if type(z) not in (float, complex): + try: + z = float(z) + typez = float + except (TypeError, ValueError): + z = complex(z) + typez = complex + if typez is complex and not z.imag: + z = complex(z.real, 0.0) + # end hack + return -ei(-z, _e1=True) + +_zeta_int = [\ +-0.5, +0.0, +1.6449340668482264365,1.2020569031595942854,1.0823232337111381915, +1.0369277551433699263,1.0173430619844491397,1.0083492773819228268, +1.0040773561979443394,1.0020083928260822144,1.0009945751278180853, +1.0004941886041194646,1.0002460865533080483,1.0001227133475784891, +1.0000612481350587048,1.0000305882363070205,1.0000152822594086519, +1.0000076371976378998,1.0000038172932649998,1.0000019082127165539, +1.0000009539620338728,1.0000004769329867878,1.0000002384505027277, +1.0000001192199259653,1.0000000596081890513,1.0000000298035035147, +1.0000000149015548284] + +_zeta_P = [-3.50000000087575873, -0.701274355654678147, +-0.0672313458590012612, -0.00398731457954257841, +-0.000160948723019303141, -4.67633010038383371e-6, +-1.02078104417700585e-7, -1.68030037095896287e-9, +-1.85231868742346722e-11][::-1] + +_zeta_Q = [1.00000000000000000, -0.936552848762465319, +-0.0588835413263763741, -0.00441498861482948666, +-0.000143416758067432622, -5.10691659585090782e-6, +-9.58813053268913799e-8, -1.72963791443181972e-9, +-1.83527919681474132e-11][::-1] + +_zeta_1 = [3.03768838606128127e-10, -1.21924525236601262e-8, +2.01201845887608893e-7, -1.53917240683468381e-6, +-5.09890411005967954e-7, 0.000122464707271619326, +-0.000905721539353130232, -0.00239315326074843037, +0.084239750013159168, 0.418938517907442414, 0.500000001921884009] + +_zeta_0 = [-3.46092485016748794e-10, -6.42610089468292485e-9, +1.76409071536679773e-7, -1.47141263991560698e-6, -6.38880222546167613e-7, +0.000122641099800668209, -0.000905894913516772796, -0.00239303348507992713, +0.0842396947501199816, 0.418938533204660256, 0.500000000000000052] + +def zeta(s): + """ + Riemann zeta function, real argument + """ + if not isinstance(s, (float, int)): + try: + s = float(s) + except (ValueError, TypeError): + try: + s = complex(s) + if not s.imag: + return complex(zeta(s.real)) + except (ValueError, TypeError): + pass + raise NotImplementedError + if s == 1: + raise ValueError("zeta(1) pole") + if s >= 27: + return 1.0 + 2.0**(-s) + 3.0**(-s) + n = int(s) + if n == s: + if n >= 0: + return _zeta_int[n] + if not (n % 2): + return 0.0 + if s <= 0.0: + return 2.**s*pi**(s-1)*_sinpi_real(0.5*s)*_gamma_real(1-s)*zeta(1-s) + if s <= 2.0: + if s <= 1.0: + return _polyval(_zeta_0,s)/(s-1) + return _polyval(_zeta_1,s)/(s-1) + z = _polyval(_zeta_P,s) / _polyval(_zeta_Q,s) + return 1.0 + 2.0**(-s) + 3.0**(-s) + 4.0**(-s)*z diff --git a/MLPY/Lib/site-packages/mpmath/matrices/__init__.py b/MLPY/Lib/site-packages/mpmath/matrices/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..293697b9fcf8bd82d58ac4ff45acd73fadac82f9 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/matrices/__init__.py @@ -0,0 +1,2 @@ +from . import eigen # to set methods +from . import eigen_symmetric # to set methods diff --git a/MLPY/Lib/site-packages/mpmath/matrices/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/matrices/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2acae50ef814d6426610efbdb2f1ede856581b23 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/matrices/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/matrices/__pycache__/calculus.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/matrices/__pycache__/calculus.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e3cbaed1549c03038b91469b329e6a46cfbd10f Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/matrices/__pycache__/calculus.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/matrices/__pycache__/eigen.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/matrices/__pycache__/eigen.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfcd0294695f0d6832e33deb8cc63da53e93b281 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/matrices/__pycache__/eigen.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/matrices/__pycache__/eigen_symmetric.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/matrices/__pycache__/eigen_symmetric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b05b06dd7ad80a31d8a7f7ed3c3fccc6c53c3b4 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/matrices/__pycache__/eigen_symmetric.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/matrices/__pycache__/linalg.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/matrices/__pycache__/linalg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d065a1f5d03a16490b13e5f6462675dcb71a894 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/matrices/__pycache__/linalg.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/matrices/__pycache__/matrices.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/matrices/__pycache__/matrices.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc8e75199785c5ae6bdff69f686fdca1616048e4 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/matrices/__pycache__/matrices.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/matrices/calculus.py b/MLPY/Lib/site-packages/mpmath/matrices/calculus.py new file mode 100644 index 0000000000000000000000000000000000000000..7fae2a7a9a29898241ed41810331b480ff70798f --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/matrices/calculus.py @@ -0,0 +1,531 @@ +from ..libmp.backend import xrange + +# TODO: should use diagonalization-based algorithms + +class MatrixCalculusMethods(object): + + def _exp_pade(ctx, a): + """ + Exponential of a matrix using Pade approximants. + + See G. H. Golub, C. F. van Loan 'Matrix Computations', + third Ed., page 572 + + TODO: + - find a good estimate for q + - reduce the number of matrix multiplications to improve + performance + """ + def eps_pade(p): + return ctx.mpf(2)**(3-2*p) * \ + ctx.factorial(p)**2/(ctx.factorial(2*p)**2 * (2*p + 1)) + q = 4 + extraq = 8 + while 1: + if eps_pade(q) < ctx.eps: + break + q += 1 + q += extraq + j = int(max(1, ctx.mag(ctx.mnorm(a,'inf')))) + extra = q + prec = ctx.prec + ctx.dps += extra + 3 + try: + a = a/2**j + na = a.rows + den = ctx.eye(na) + num = ctx.eye(na) + x = ctx.eye(na) + c = ctx.mpf(1) + for k in range(1, q+1): + c *= ctx.mpf(q - k + 1)/((2*q - k + 1) * k) + x = a*x + cx = c*x + num += cx + den += (-1)**k * cx + f = ctx.lu_solve_mat(den, num) + for k in range(j): + f = f*f + finally: + ctx.prec = prec + return f*1 + + def expm(ctx, A, method='taylor'): + r""" + Computes the matrix exponential of a square matrix `A`, which is defined + by the power series + + .. math :: + + \exp(A) = I + A + \frac{A^2}{2!} + \frac{A^3}{3!} + \ldots + + With method='taylor', the matrix exponential is computed + using the Taylor series. With method='pade', Pade approximants + are used instead. + + **Examples** + + Basic examples:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> expm(zeros(3)) + [1.0 0.0 0.0] + [0.0 1.0 0.0] + [0.0 0.0 1.0] + >>> expm(eye(3)) + [2.71828182845905 0.0 0.0] + [ 0.0 2.71828182845905 0.0] + [ 0.0 0.0 2.71828182845905] + >>> expm([[1,1,0],[1,0,1],[0,1,0]]) + [ 3.86814500615414 2.26812870852145 0.841130841230196] + [ 2.26812870852145 2.44114713886289 1.42699786729125] + [0.841130841230196 1.42699786729125 1.6000162976327] + >>> expm([[1,1,0],[1,0,1],[0,1,0]], method='pade') + [ 3.86814500615414 2.26812870852145 0.841130841230196] + [ 2.26812870852145 2.44114713886289 1.42699786729125] + [0.841130841230196 1.42699786729125 1.6000162976327] + >>> expm([[1+j, 0], [1+j,1]]) + [(1.46869393991589 + 2.28735528717884j) 0.0] + [ (1.03776739863568 + 3.536943175722j) (2.71828182845905 + 0.0j)] + + Matrices with large entries are allowed:: + + >>> expm(matrix([[1,2],[2,3]])**25) + [5.65024064048415e+2050488462815550 9.14228140091932e+2050488462815550] + [9.14228140091932e+2050488462815550 1.47925220414035e+2050488462815551] + + The identity `\exp(A+B) = \exp(A) \exp(B)` does not hold for + noncommuting matrices:: + + >>> A = hilbert(3) + >>> B = A + eye(3) + >>> chop(mnorm(A*B - B*A)) + 0.0 + >>> chop(mnorm(expm(A+B) - expm(A)*expm(B))) + 0.0 + >>> B = A + ones(3) + >>> mnorm(A*B - B*A) + 1.8 + >>> mnorm(expm(A+B) - expm(A)*expm(B)) + 42.0927851137247 + + """ + if method == 'pade': + prec = ctx.prec + try: + A = ctx.matrix(A) + ctx.prec += 2*A.rows + res = ctx._exp_pade(A) + finally: + ctx.prec = prec + return res + A = ctx.matrix(A) + prec = ctx.prec + j = int(max(1, ctx.mag(ctx.mnorm(A,'inf')))) + j += int(0.5*prec**0.5) + try: + ctx.prec += 10 + 2*j + tol = +ctx.eps + A = A/2**j + T = A + Y = A**0 + A + k = 2 + while 1: + T *= A * (1/ctx.mpf(k)) + if ctx.mnorm(T, 'inf') < tol: + break + Y += T + k += 1 + for k in xrange(j): + Y = Y*Y + finally: + ctx.prec = prec + Y *= 1 + return Y + + def cosm(ctx, A): + r""" + Gives the cosine of a square matrix `A`, defined in analogy + with the matrix exponential. + + Examples:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> X = eye(3) + >>> cosm(X) + [0.54030230586814 0.0 0.0] + [ 0.0 0.54030230586814 0.0] + [ 0.0 0.0 0.54030230586814] + >>> X = hilbert(3) + >>> cosm(X) + [ 0.424403834569555 -0.316643413047167 -0.221474945949293] + [-0.316643413047167 0.820646708837824 -0.127183694770039] + [-0.221474945949293 -0.127183694770039 0.909236687217541] + >>> X = matrix([[1+j,-2],[0,-j]]) + >>> cosm(X) + [(0.833730025131149 - 0.988897705762865j) (1.07485840848393 - 0.17192140544213j)] + [ 0.0 (1.54308063481524 + 0.0j)] + """ + B = 0.5 * (ctx.expm(A*ctx.j) + ctx.expm(A*(-ctx.j))) + if not sum(A.apply(ctx.im).apply(abs)): + B = B.apply(ctx.re) + return B + + def sinm(ctx, A): + r""" + Gives the sine of a square matrix `A`, defined in analogy + with the matrix exponential. + + Examples:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> X = eye(3) + >>> sinm(X) + [0.841470984807897 0.0 0.0] + [ 0.0 0.841470984807897 0.0] + [ 0.0 0.0 0.841470984807897] + >>> X = hilbert(3) + >>> sinm(X) + [0.711608512150994 0.339783913247439 0.220742837314741] + [0.339783913247439 0.244113865695532 0.187231271174372] + [0.220742837314741 0.187231271174372 0.155816730769635] + >>> X = matrix([[1+j,-2],[0,-j]]) + >>> sinm(X) + [(1.29845758141598 + 0.634963914784736j) (-1.96751511930922 + 0.314700021761367j)] + [ 0.0 (0.0 - 1.1752011936438j)] + """ + B = (-0.5j) * (ctx.expm(A*ctx.j) - ctx.expm(A*(-ctx.j))) + if not sum(A.apply(ctx.im).apply(abs)): + B = B.apply(ctx.re) + return B + + def _sqrtm_rot(ctx, A, _may_rotate): + # If the iteration fails to converge, cheat by performing + # a rotation by a complex number + u = ctx.j**0.3 + return ctx.sqrtm(u*A, _may_rotate) / ctx.sqrt(u) + + def sqrtm(ctx, A, _may_rotate=2): + r""" + Computes a square root of the square matrix `A`, i.e. returns + a matrix `B = A^{1/2}` such that `B^2 = A`. The square root + of a matrix, if it exists, is not unique. + + **Examples** + + Square roots of some simple matrices:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> sqrtm([[1,0], [0,1]]) + [1.0 0.0] + [0.0 1.0] + >>> sqrtm([[0,0], [0,0]]) + [0.0 0.0] + [0.0 0.0] + >>> sqrtm([[2,0],[0,1]]) + [1.4142135623731 0.0] + [ 0.0 1.0] + >>> sqrtm([[1,1],[1,0]]) + [ (0.920442065259926 - 0.21728689675164j) (0.568864481005783 + 0.351577584254143j)] + [(0.568864481005783 + 0.351577584254143j) (0.351577584254143 - 0.568864481005783j)] + >>> sqrtm([[1,0],[0,1]]) + [1.0 0.0] + [0.0 1.0] + >>> sqrtm([[-1,0],[0,1]]) + [(0.0 - 1.0j) 0.0] + [ 0.0 (1.0 + 0.0j)] + >>> sqrtm([[j,0],[0,j]]) + [(0.707106781186547 + 0.707106781186547j) 0.0] + [ 0.0 (0.707106781186547 + 0.707106781186547j)] + + A square root of a rotation matrix, giving the corresponding + half-angle rotation matrix:: + + >>> t1 = 0.75 + >>> t2 = t1 * 0.5 + >>> A1 = matrix([[cos(t1), -sin(t1)], [sin(t1), cos(t1)]]) + >>> A2 = matrix([[cos(t2), -sin(t2)], [sin(t2), cos(t2)]]) + >>> sqrtm(A1) + [0.930507621912314 -0.366272529086048] + [0.366272529086048 0.930507621912314] + >>> A2 + [0.930507621912314 -0.366272529086048] + [0.366272529086048 0.930507621912314] + + The identity `(A^2)^{1/2} = A` does not necessarily hold:: + + >>> A = matrix([[4,1,4],[7,8,9],[10,2,11]]) + >>> sqrtm(A**2) + [ 4.0 1.0 4.0] + [ 7.0 8.0 9.0] + [10.0 2.0 11.0] + >>> sqrtm(A)**2 + [ 4.0 1.0 4.0] + [ 7.0 8.0 9.0] + [10.0 2.0 11.0] + >>> A = matrix([[-4,1,4],[7,-8,9],[10,2,11]]) + >>> sqrtm(A**2) + [ 7.43715112194995 -0.324127569985474 1.8481718827526] + [-0.251549715716942 9.32699765900402 2.48221180985147] + [ 4.11609388833616 0.775751877098258 13.017955697342] + >>> chop(sqrtm(A)**2) + [-4.0 1.0 4.0] + [ 7.0 -8.0 9.0] + [10.0 2.0 11.0] + + For some matrices, a square root does not exist:: + + >>> sqrtm([[0,1], [0,0]]) + Traceback (most recent call last): + ... + ZeroDivisionError: matrix is numerically singular + + Two examples from the documentation for Matlab's ``sqrtm``:: + + >>> mp.dps = 15; mp.pretty = True + >>> sqrtm([[7,10],[15,22]]) + [1.56669890360128 1.74077655955698] + [2.61116483933547 4.17786374293675] + >>> + >>> X = matrix(\ + ... [[5,-4,1,0,0], + ... [-4,6,-4,1,0], + ... [1,-4,6,-4,1], + ... [0,1,-4,6,-4], + ... [0,0,1,-4,5]]) + >>> Y = matrix(\ + ... [[2,-1,-0,-0,-0], + ... [-1,2,-1,0,-0], + ... [0,-1,2,-1,0], + ... [-0,0,-1,2,-1], + ... [-0,-0,-0,-1,2]]) + >>> mnorm(sqrtm(X) - Y) + 4.53155328326114e-19 + + """ + A = ctx.matrix(A) + # Trivial + if A*0 == A: + return A + prec = ctx.prec + if _may_rotate: + d = ctx.det(A) + if abs(ctx.im(d)) < 16*ctx.eps and ctx.re(d) < 0: + return ctx._sqrtm_rot(A, _may_rotate-1) + try: + ctx.prec += 10 + tol = ctx.eps * 128 + Y = A + Z = I = A**0 + k = 0 + # Denman-Beavers iteration + while 1: + Yprev = Y + try: + Y, Z = 0.5*(Y+ctx.inverse(Z)), 0.5*(Z+ctx.inverse(Y)) + except ZeroDivisionError: + if _may_rotate: + Y = ctx._sqrtm_rot(A, _may_rotate-1) + break + else: + raise + mag1 = ctx.mnorm(Y-Yprev, 'inf') + mag2 = ctx.mnorm(Y, 'inf') + if mag1 <= mag2*tol: + break + if _may_rotate and k > 6 and not mag1 < mag2 * 0.001: + return ctx._sqrtm_rot(A, _may_rotate-1) + k += 1 + if k > ctx.prec: + raise ctx.NoConvergence + finally: + ctx.prec = prec + Y *= 1 + return Y + + def logm(ctx, A): + r""" + Computes a logarithm of the square matrix `A`, i.e. returns + a matrix `B = \log(A)` such that `\exp(B) = A`. The logarithm + of a matrix, if it exists, is not unique. + + **Examples** + + Logarithms of some simple matrices:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> X = eye(3) + >>> logm(X) + [0.0 0.0 0.0] + [0.0 0.0 0.0] + [0.0 0.0 0.0] + >>> logm(2*X) + [0.693147180559945 0.0 0.0] + [ 0.0 0.693147180559945 0.0] + [ 0.0 0.0 0.693147180559945] + >>> logm(expm(X)) + [1.0 0.0 0.0] + [0.0 1.0 0.0] + [0.0 0.0 1.0] + + A logarithm of a complex matrix:: + + >>> X = matrix([[2+j, 1, 3], [1-j, 1-2*j, 1], [-4, -5, j]]) + >>> B = logm(X) + >>> nprint(B) + [ (0.808757 + 0.107759j) (2.20752 + 0.202762j) (1.07376 - 0.773874j)] + [ (0.905709 - 0.107795j) (0.0287395 - 0.824993j) (0.111619 + 0.514272j)] + [(-0.930151 + 0.399512j) (-2.06266 - 0.674397j) (0.791552 + 0.519839j)] + >>> chop(expm(B)) + [(2.0 + 1.0j) 1.0 3.0] + [(1.0 - 1.0j) (1.0 - 2.0j) 1.0] + [ -4.0 -5.0 (0.0 + 1.0j)] + + A matrix `X` close to the identity matrix, for which + `\log(\exp(X)) = \exp(\log(X)) = X` holds:: + + >>> X = eye(3) + hilbert(3)/4 + >>> X + [ 1.25 0.125 0.0833333333333333] + [ 0.125 1.08333333333333 0.0625] + [0.0833333333333333 0.0625 1.05] + >>> logm(expm(X)) + [ 1.25 0.125 0.0833333333333333] + [ 0.125 1.08333333333333 0.0625] + [0.0833333333333333 0.0625 1.05] + >>> expm(logm(X)) + [ 1.25 0.125 0.0833333333333333] + [ 0.125 1.08333333333333 0.0625] + [0.0833333333333333 0.0625 1.05] + + A logarithm of a rotation matrix, giving back the angle of + the rotation:: + + >>> t = 3.7 + >>> A = matrix([[cos(t),sin(t)],[-sin(t),cos(t)]]) + >>> chop(logm(A)) + [ 0.0 -2.58318530717959] + [2.58318530717959 0.0] + >>> (2*pi-t) + 2.58318530717959 + + For some matrices, a logarithm does not exist:: + + >>> logm([[1,0], [0,0]]) + Traceback (most recent call last): + ... + ZeroDivisionError: matrix is numerically singular + + Logarithm of a matrix with large entries:: + + >>> logm(hilbert(3) * 10**20).apply(re) + [ 45.5597513593433 1.27721006042799 0.317662687717978] + [ 1.27721006042799 42.5222778973542 2.24003708791604] + [0.317662687717978 2.24003708791604 42.395212822267] + + """ + A = ctx.matrix(A) + prec = ctx.prec + try: + ctx.prec += 10 + tol = ctx.eps * 128 + I = A**0 + B = A + n = 0 + while 1: + B = ctx.sqrtm(B) + n += 1 + if ctx.mnorm(B-I, 'inf') < 0.125: + break + T = X = B-I + L = X*0 + k = 1 + while 1: + if k & 1: + L += T / k + else: + L -= T / k + T *= X + if ctx.mnorm(T, 'inf') < tol: + break + k += 1 + if k > ctx.prec: + raise ctx.NoConvergence + finally: + ctx.prec = prec + L *= 2**n + return L + + def powm(ctx, A, r): + r""" + Computes `A^r = \exp(A \log r)` for a matrix `A` and complex + number `r`. + + **Examples** + + Powers and inverse powers of a matrix:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> A = matrix([[4,1,4],[7,8,9],[10,2,11]]) + >>> powm(A, 2) + [ 63.0 20.0 69.0] + [174.0 89.0 199.0] + [164.0 48.0 179.0] + >>> chop(powm(powm(A, 4), 1/4.)) + [ 4.0 1.0 4.0] + [ 7.0 8.0 9.0] + [10.0 2.0 11.0] + >>> powm(extraprec(20)(powm)(A, -4), -1/4.) + [ 4.0 1.0 4.0] + [ 7.0 8.0 9.0] + [10.0 2.0 11.0] + >>> chop(powm(powm(A, 1+0.5j), 1/(1+0.5j))) + [ 4.0 1.0 4.0] + [ 7.0 8.0 9.0] + [10.0 2.0 11.0] + >>> powm(extraprec(5)(powm)(A, -1.5), -1/(1.5)) + [ 4.0 1.0 4.0] + [ 7.0 8.0 9.0] + [10.0 2.0 11.0] + + A Fibonacci-generating matrix:: + + >>> powm([[1,1],[1,0]], 10) + [89.0 55.0] + [55.0 34.0] + >>> fib(10) + 55.0 + >>> powm([[1,1],[1,0]], 6.5) + [(16.5166626964253 - 0.0121089837381789j) (10.2078589271083 + 0.0195927472575932j)] + [(10.2078589271083 + 0.0195927472575932j) (6.30880376931698 - 0.0317017309957721j)] + >>> (phi**6.5 - (1-phi)**6.5)/sqrt(5) + (10.2078589271083 - 0.0195927472575932j) + >>> powm([[1,1],[1,0]], 6.2) + [ (14.3076953002666 - 0.008222855781077j) (8.81733464837593 + 0.0133048601383712j)] + [(8.81733464837593 + 0.0133048601383712j) (5.49036065189071 - 0.0215277159194482j)] + >>> (phi**6.2 - (1-phi)**6.2)/sqrt(5) + (8.81733464837593 - 0.0133048601383712j) + + """ + A = ctx.matrix(A) + r = ctx.convert(r) + prec = ctx.prec + try: + ctx.prec += 10 + if ctx.isint(r): + v = A ** int(r) + elif ctx.isint(r*2): + y = int(r*2) + v = ctx.sqrtm(A) ** y + else: + v = ctx.expm(r*ctx.logm(A)) + finally: + ctx.prec = prec + v *= 1 + return v diff --git a/MLPY/Lib/site-packages/mpmath/matrices/eigen.py b/MLPY/Lib/site-packages/mpmath/matrices/eigen.py new file mode 100644 index 0000000000000000000000000000000000000000..885d604203195b695183329acc637de91aeaf5ea --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/matrices/eigen.py @@ -0,0 +1,877 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +################################################################################################## +# module for the eigenvalue problem +# Copyright 2013 Timo Hartmann (thartmann15 at gmail.com) +# +# todo: +# - implement balancing +# - agressive early deflation +# +################################################################################################## + +""" +The eigenvalue problem +---------------------- + +This file contains routines for the eigenvalue problem. + +high level routines: + + hessenberg : reduction of a real or complex square matrix to upper Hessenberg form + schur : reduction of a real or complex square matrix to upper Schur form + eig : eigenvalues and eigenvectors of a real or complex square matrix + +low level routines: + + hessenberg_reduce_0 : reduction of a real or complex square matrix to upper Hessenberg form + hessenberg_reduce_1 : auxiliary routine to hessenberg_reduce_0 + qr_step : a single implicitly shifted QR step for an upper Hessenberg matrix + hessenberg_qr : Schur decomposition of an upper Hessenberg matrix + eig_tr_r : right eigenvectors of an upper triangular matrix + eig_tr_l : left eigenvectors of an upper triangular matrix +""" + +from ..libmp.backend import xrange + +class Eigen(object): + pass + +def defun(f): + setattr(Eigen, f.__name__, f) + return f + +def hessenberg_reduce_0(ctx, A, T): + """ + This routine computes the (upper) Hessenberg decomposition of a square matrix A. + Given A, an unitary matrix Q is calculated such that + + Q' A Q = H and Q' Q = Q Q' = 1 + + where H is an upper Hessenberg matrix, meaning that it only contains zeros + below the first subdiagonal. Here ' denotes the hermitian transpose (i.e. + transposition and conjugation). + + parameters: + A (input/output) On input, A contains the square matrix A of + dimension (n,n). On output, A contains a compressed representation + of Q and H. + T (output) An array of length n containing the first elements of + the Householder reflectors. + """ + + # internally we work with householder reflections from the right. + # let u be a row vector (i.e. u[i]=A[i,:i]). then + # Q is build up by reflectors of the type (1-v'v) where v is a suitable + # modification of u. these reflectors are applyed to A from the right. + # because we work with reflectors from the right we have to start with + # the bottom row of A and work then upwards (this corresponds to + # some kind of RQ decomposition). + # the first part of the vectors v (i.e. A[i,:(i-1)]) are stored as row vectors + # in the lower left part of A (excluding the diagonal and subdiagonal). + # the last entry of v is stored in T. + # the upper right part of A (including diagonal and subdiagonal) becomes H. + + + n = A.rows + if n <= 2: return + + for i in xrange(n-1, 1, -1): + + # scale the vector + + scale = 0 + for k in xrange(0, i): + scale += abs(ctx.re(A[i,k])) + abs(ctx.im(A[i,k])) + + scale_inv = 0 + if scale != 0: + scale_inv = 1 / scale + + if scale == 0 or ctx.isinf(scale_inv): + # sadly there are floating point numbers not equal to zero whose reciprocal is infinity + T[i] = 0 + A[i,i-1] = 0 + continue + + # calculate parameters for housholder transformation + + H = 0 + for k in xrange(0, i): + A[i,k] *= scale_inv + rr = ctx.re(A[i,k]) + ii = ctx.im(A[i,k]) + H += rr * rr + ii * ii + + F = A[i,i-1] + f = abs(F) + G = ctx.sqrt(H) + A[i,i-1] = - G * scale + + if f == 0: + T[i] = G + else: + ff = F / f + T[i] = F + G * ff + A[i,i-1] *= ff + + H += G * f + H = 1 / ctx.sqrt(H) + + T[i] *= H + for k in xrange(0, i - 1): + A[i,k] *= H + + for j in xrange(0, i): + # apply housholder transformation (from right) + + G = ctx.conj(T[i]) * A[j,i-1] + for k in xrange(0, i-1): + G += ctx.conj(A[i,k]) * A[j,k] + + A[j,i-1] -= G * T[i] + for k in xrange(0, i-1): + A[j,k] -= G * A[i,k] + + for j in xrange(0, n): + # apply housholder transformation (from left) + + G = T[i] * A[i-1,j] + for k in xrange(0, i-1): + G += A[i,k] * A[k,j] + + A[i-1,j] -= G * ctx.conj(T[i]) + for k in xrange(0, i-1): + A[k,j] -= G * ctx.conj(A[i,k]) + + + +def hessenberg_reduce_1(ctx, A, T): + """ + This routine forms the unitary matrix Q described in hessenberg_reduce_0. + + parameters: + A (input/output) On input, A is the same matrix as delivered by + hessenberg_reduce_0. On output, A is set to Q. + + T (input) On input, T is the same array as delivered by hessenberg_reduce_0. + """ + + n = A.rows + + if n == 1: + A[0,0] = 1 + return + + A[0,0] = A[1,1] = 1 + A[0,1] = A[1,0] = 0 + + for i in xrange(2, n): + if T[i] != 0: + + for j in xrange(0, i): + G = T[i] * A[i-1,j] + for k in xrange(0, i-1): + G += A[i,k] * A[k,j] + + A[i-1,j] -= G * ctx.conj(T[i]) + for k in xrange(0, i-1): + A[k,j] -= G * ctx.conj(A[i,k]) + + A[i,i] = 1 + for j in xrange(0, i): + A[j,i] = A[i,j] = 0 + + + +@defun +def hessenberg(ctx, A, overwrite_a = False): + """ + This routine computes the Hessenberg decomposition of a square matrix A. + Given A, an unitary matrix Q is determined such that + + Q' A Q = H and Q' Q = Q Q' = 1 + + where H is an upper right Hessenberg matrix. Here ' denotes the hermitian + transpose (i.e. transposition and conjugation). + + input: + A : a real or complex square matrix + overwrite_a : if true, allows modification of A which may improve + performance. if false, A is not modified. + + output: + Q : an unitary matrix + H : an upper right Hessenberg matrix + + example: + >>> from mpmath import mp + >>> A = mp.matrix([[3, -1, 2], [2, 5, -5], [-2, -3, 7]]) + >>> Q, H = mp.hessenberg(A) + >>> mp.nprint(H, 3) # doctest:+SKIP + [ 3.15 2.23 4.44] + [-0.769 4.85 3.05] + [ 0.0 3.61 7.0] + >>> print(mp.chop(A - Q * H * Q.transpose_conj())) + [0.0 0.0 0.0] + [0.0 0.0 0.0] + [0.0 0.0 0.0] + + return value: (Q, H) + """ + + n = A.rows + + if n == 1: + return (ctx.matrix([[1]]), A) + + if not overwrite_a: + A = A.copy() + + T = ctx.matrix(n, 1) + + hessenberg_reduce_0(ctx, A, T) + Q = A.copy() + hessenberg_reduce_1(ctx, Q, T) + + for x in xrange(n): + for y in xrange(x+2, n): + A[y,x] = 0 + + return Q, A + + +########################################################################### + + +def qr_step(ctx, n0, n1, A, Q, shift): + """ + This subroutine executes a single implicitly shifted QR step applied to an + upper Hessenberg matrix A. Given A and shift as input, first an QR + decomposition is calculated: + + Q R = A - shift * 1 . + + The output is then following matrix: + + R Q + shift * 1 + + parameters: + n0, n1 (input) Two integers which specify the submatrix A[n0:n1,n0:n1] + on which this subroutine operators. The subdiagonal elements + to the left and below this submatrix must be deflated (i.e. zero). + following restriction is imposed: n1>=n0+2 + A (input/output) On input, A is an upper Hessenberg matrix. + On output, A is replaced by "R Q + shift * 1" + Q (input/output) The parameter Q is multiplied by the unitary matrix + Q arising from the QR decomposition. Q can also be false, in which + case the unitary matrix Q is not computated. + shift (input) a complex number specifying the shift. idealy close to an + eigenvalue of the bottemmost part of the submatrix A[n0:n1,n0:n1]. + + references: + Stoer, Bulirsch - Introduction to Numerical Analysis. + Kresser : Numerical Methods for General and Structured Eigenvalue Problems + """ + + # implicitly shifted and bulge chasing is explained at p.398/399 in "Stoer, Bulirsch - Introduction to Numerical Analysis" + # for bulge chasing see also "Watkins - The Matrix Eigenvalue Problem" sec.4.5,p.173 + + # the Givens rotation we used is determined as follows: let c,s be two complex + # numbers. then we have following relation: + # + # v = sqrt(|c|^2 + |s|^2) + # + # 1/v [ c~ s~] [c] = [v] + # [-s c ] [s] [0] + # + # the matrix on the left is our Givens rotation. + + n = A.rows + + # first step + + # calculate givens rotation + c = A[n0 ,n0] - shift + s = A[n0+1,n0] + + v = ctx.hypot(ctx.hypot(ctx.re(c), ctx.im(c)), ctx.hypot(ctx.re(s), ctx.im(s))) + + if v == 0: + v = 1 + c = 1 + s = 0 + else: + c /= v + s /= v + + cc = ctx.conj(c) + cs = ctx.conj(s) + + for k in xrange(n0, n): + # apply givens rotation from the left + x = A[n0 ,k] + y = A[n0+1,k] + A[n0 ,k] = cc * x + cs * y + A[n0+1,k] = c * y - s * x + + for k in xrange(min(n1, n0+3)): + # apply givens rotation from the right + x = A[k,n0 ] + y = A[k,n0+1] + A[k,n0 ] = c * x + s * y + A[k,n0+1] = cc * y - cs * x + + if not isinstance(Q, bool): + for k in xrange(n): + # eigenvectors + x = Q[k,n0 ] + y = Q[k,n0+1] + Q[k,n0 ] = c * x + s * y + Q[k,n0+1] = cc * y - cs * x + + # chase the bulge + + for j in xrange(n0, n1 - 2): + # calculate givens rotation + + c = A[j+1,j] + s = A[j+2,j] + + v = ctx.hypot(ctx.hypot(ctx.re(c), ctx.im(c)), ctx.hypot(ctx.re(s), ctx.im(s))) + + if v == 0: + A[j+1,j] = 0 + v = 1 + c = 1 + s = 0 + else: + A[j+1,j] = v + c /= v + s /= v + + A[j+2,j] = 0 + + cc = ctx.conj(c) + cs = ctx.conj(s) + + for k in xrange(j+1, n): + # apply givens rotation from the left + x = A[j+1,k] + y = A[j+2,k] + A[j+1,k] = cc * x + cs * y + A[j+2,k] = c * y - s * x + + for k in xrange(0, min(n1, j+4)): + # apply givens rotation from the right + x = A[k,j+1] + y = A[k,j+2] + A[k,j+1] = c * x + s * y + A[k,j+2] = cc * y - cs * x + + if not isinstance(Q, bool): + for k in xrange(0, n): + # eigenvectors + x = Q[k,j+1] + y = Q[k,j+2] + Q[k,j+1] = c * x + s * y + Q[k,j+2] = cc * y - cs * x + + + +def hessenberg_qr(ctx, A, Q): + """ + This routine computes the Schur decomposition of an upper Hessenberg matrix A. + Given A, an unitary matrix Q is determined such that + + Q' A Q = R and Q' Q = Q Q' = 1 + + where R is an upper right triangular matrix. Here ' denotes the hermitian + transpose (i.e. transposition and conjugation). + + parameters: + A (input/output) On input, A contains an upper Hessenberg matrix. + On output, A is replace by the upper right triangluar matrix R. + + Q (input/output) The parameter Q is multiplied by the unitary + matrix Q arising from the Schur decomposition. Q can also be + false, in which case the unitary matrix Q is not computated. + """ + + n = A.rows + + norm = 0 + for x in xrange(n): + for y in xrange(min(x+2, n)): + norm += ctx.re(A[y,x]) ** 2 + ctx.im(A[y,x]) ** 2 + norm = ctx.sqrt(norm) / n + + if norm == 0: + return + + n0 = 0 + n1 = n + + eps = ctx.eps / (100 * n) + maxits = ctx.dps * 4 + + its = totalits = 0 + + while 1: + # kressner p.32 algo 3 + # the active submatrix is A[n0:n1,n0:n1] + + k = n0 + + while k + 1 < n1: + s = abs(ctx.re(A[k,k])) + abs(ctx.im(A[k,k])) + abs(ctx.re(A[k+1,k+1])) + abs(ctx.im(A[k+1,k+1])) + if s < eps * norm: + s = norm + if abs(A[k+1,k]) < eps * s: + break + k += 1 + + if k + 1 < n1: + # deflation found at position (k+1, k) + + A[k+1,k] = 0 + n0 = k + 1 + + its = 0 + + if n0 + 1 >= n1: + # block of size at most two has converged + n0 = 0 + n1 = k + 1 + if n1 < 2: + # QR algorithm has converged + return + else: + if (its % 30) == 10: + # exceptional shift + shift = A[n1-1,n1-2] + elif (its % 30) == 20: + # exceptional shift + shift = abs(A[n1-1,n1-2]) + elif (its % 30) == 29: + # exceptional shift + shift = norm + else: + # A = [ a b ] det(x-A)=x*x-x*tr(A)+det(A) + # [ c d ] + # + # eigenvalues bad: (tr(A)+sqrt((tr(A))**2-4*det(A)))/2 + # bad because of cancellation if |c| is small and |a-d| is small, too. + # + # eigenvalues good: (a+d+sqrt((a-d)**2+4*b*c))/2 + + t = A[n1-2,n1-2] + A[n1-1,n1-1] + s = (A[n1-1,n1-1] - A[n1-2,n1-2]) ** 2 + 4 * A[n1-1,n1-2] * A[n1-2,n1-1] + if ctx.re(s) > 0: + s = ctx.sqrt(s) + else: + s = ctx.sqrt(-s) * 1j + a = (t + s) / 2 + b = (t - s) / 2 + if abs(A[n1-1,n1-1] - a) > abs(A[n1-1,n1-1] - b): + shift = b + else: + shift = a + + its += 1 + totalits += 1 + + qr_step(ctx, n0, n1, A, Q, shift) + + if its > maxits: + raise RuntimeError("qr: failed to converge after %d steps" % its) + + +@defun +def schur(ctx, A, overwrite_a = False): + """ + This routine computes the Schur decomposition of a square matrix A. + Given A, an unitary matrix Q is determined such that + + Q' A Q = R and Q' Q = Q Q' = 1 + + where R is an upper right triangular matrix. Here ' denotes the + hermitian transpose (i.e. transposition and conjugation). + + input: + A : a real or complex square matrix + overwrite_a : if true, allows modification of A which may improve + performance. if false, A is not modified. + + output: + Q : an unitary matrix + R : an upper right triangular matrix + + return value: (Q, R) + + example: + >>> from mpmath import mp + >>> A = mp.matrix([[3, -1, 2], [2, 5, -5], [-2, -3, 7]]) + >>> Q, R = mp.schur(A) + >>> mp.nprint(R, 3) # doctest:+SKIP + [2.0 0.417 -2.53] + [0.0 4.0 -4.74] + [0.0 0.0 9.0] + >>> print(mp.chop(A - Q * R * Q.transpose_conj())) + [0.0 0.0 0.0] + [0.0 0.0 0.0] + [0.0 0.0 0.0] + + warning: The Schur decomposition is not unique. + """ + + n = A.rows + + if n == 1: + return (ctx.matrix([[1]]), A) + + if not overwrite_a: + A = A.copy() + + T = ctx.matrix(n, 1) + + hessenberg_reduce_0(ctx, A, T) + Q = A.copy() + hessenberg_reduce_1(ctx, Q, T) + + for x in xrange(n): + for y in xrange(x + 2, n): + A[y,x] = 0 + + hessenberg_qr(ctx, A, Q) + + return Q, A + + +def eig_tr_r(ctx, A): + """ + This routine calculates the right eigenvectors of an upper right triangular matrix. + + input: + A an upper right triangular matrix + + output: + ER a matrix whose columns form the right eigenvectors of A + + return value: ER + """ + + # this subroutine is inspired by the lapack routines ctrevc.f,clatrs.f + + n = A.rows + + ER = ctx.eye(n) + + eps = ctx.eps + + unfl = ctx.ldexp(ctx.one, -ctx.prec * 30) + # since mpmath effectively has no limits on the exponent, we simply scale doubles up + # original double has prec*20 + + smlnum = unfl * (n / eps) + simin = 1 / ctx.sqrt(eps) + + rmax = 1 + + for i in xrange(1, n): + s = A[i,i] + + smin = max(eps * abs(s), smlnum) + + for j in xrange(i - 1, -1, -1): + + r = 0 + for k in xrange(j + 1, i + 1): + r += A[j,k] * ER[k,i] + + t = A[j,j] - s + if abs(t) < smin: + t = smin + + r = -r / t + ER[j,i] = r + + rmax = max(rmax, abs(r)) + if rmax > simin: + for k in xrange(j, i+1): + ER[k,i] /= rmax + rmax = 1 + + if rmax != 1: + for k in xrange(0, i + 1): + ER[k,i] /= rmax + + return ER + +def eig_tr_l(ctx, A): + """ + This routine calculates the left eigenvectors of an upper right triangular matrix. + + input: + A an upper right triangular matrix + + output: + EL a matrix whose rows form the left eigenvectors of A + + return value: EL + """ + + n = A.rows + + EL = ctx.eye(n) + + eps = ctx.eps + + unfl = ctx.ldexp(ctx.one, -ctx.prec * 30) + # since mpmath effectively has no limits on the exponent, we simply scale doubles up + # original double has prec*20 + + smlnum = unfl * (n / eps) + simin = 1 / ctx.sqrt(eps) + + rmax = 1 + + for i in xrange(0, n - 1): + s = A[i,i] + + smin = max(eps * abs(s), smlnum) + + for j in xrange(i + 1, n): + + r = 0 + for k in xrange(i, j): + r += EL[i,k] * A[k,j] + + t = A[j,j] - s + if abs(t) < smin: + t = smin + + r = -r / t + EL[i,j] = r + + rmax = max(rmax, abs(r)) + if rmax > simin: + for k in xrange(i, j + 1): + EL[i,k] /= rmax + rmax = 1 + + if rmax != 1: + for k in xrange(i, n): + EL[i,k] /= rmax + + return EL + +@defun +def eig(ctx, A, left = False, right = True, overwrite_a = False): + """ + This routine computes the eigenvalues and optionally the left and right + eigenvectors of a square matrix A. Given A, a vector E and matrices ER + and EL are calculated such that + + A ER[:,i] = E[i] ER[:,i] + EL[i,:] A = EL[i,:] E[i] + + E contains the eigenvalues of A. The columns of ER contain the right eigenvectors + of A whereas the rows of EL contain the left eigenvectors. + + + input: + A : a real or complex square matrix of shape (n, n) + left : if true, the left eigenvectors are calculated. + right : if true, the right eigenvectors are calculated. + overwrite_a : if true, allows modification of A which may improve + performance. if false, A is not modified. + + output: + E : a list of length n containing the eigenvalues of A. + ER : a matrix whose columns contain the right eigenvectors of A. + EL : a matrix whose rows contain the left eigenvectors of A. + + return values: + E if left and right are both false. + (E, ER) if right is true and left is false. + (E, EL) if left is true and right is false. + (E, EL, ER) if left and right are true. + + + examples: + >>> from mpmath import mp + >>> A = mp.matrix([[3, -1, 2], [2, 5, -5], [-2, -3, 7]]) + >>> E, ER = mp.eig(A) + >>> print(mp.chop(A * ER[:,0] - E[0] * ER[:,0])) + [0.0] + [0.0] + [0.0] + + >>> E, EL, ER = mp.eig(A,left = True, right = True) + >>> E, EL, ER = mp.eig_sort(E, EL, ER) + >>> mp.nprint(E) + [2.0, 4.0, 9.0] + >>> print(mp.chop(A * ER[:,0] - E[0] * ER[:,0])) + [0.0] + [0.0] + [0.0] + >>> print(mp.chop( EL[0,:] * A - EL[0,:] * E[0])) + [0.0 0.0 0.0] + + warning: + - If there are multiple eigenvalues, the eigenvectors do not necessarily + span the whole vectorspace, i.e. ER and EL may have not full rank. + Furthermore in that case the eigenvectors are numerical ill-conditioned. + - In the general case the eigenvalues have no natural order. + + see also: + - eigh (or eigsy, eighe) for the symmetric eigenvalue problem. + - eig_sort for sorting of eigenvalues and eigenvectors + """ + + n = A.rows + + if n == 1: + if left and (not right): + return ([A[0]], ctx.matrix([[1]])) + + if right and (not left): + return ([A[0]], ctx.matrix([[1]])) + + return ([A[0]], ctx.matrix([[1]]), ctx.matrix([[1]])) + + if not overwrite_a: + A = A.copy() + + T = ctx.zeros(n, 1) + + hessenberg_reduce_0(ctx, A, T) + + if left or right: + Q = A.copy() + hessenberg_reduce_1(ctx, Q, T) + else: + Q = False + + for x in xrange(n): + for y in xrange(x + 2, n): + A[y,x] = 0 + + hessenberg_qr(ctx, A, Q) + + E = [0 for i in xrange(n)] + for i in xrange(n): + E[i] = A[i,i] + + if not (left or right): + return E + + if left: + EL = eig_tr_l(ctx, A) + EL = EL * Q.transpose_conj() + + if right: + ER = eig_tr_r(ctx, A) + ER = Q * ER + + if left and (not right): + return (E, EL) + + if right and (not left): + return (E, ER) + + return (E, EL, ER) + +@defun +def eig_sort(ctx, E, EL = False, ER = False, f = "real"): + """ + This routine sorts the eigenvalues and eigenvectors delivered by ``eig``. + + parameters: + E : the eigenvalues as delivered by eig + EL : the left eigenvectors as delivered by eig, or false + ER : the right eigenvectors as delivered by eig, or false + f : either a string ("real" sort by increasing real part, "imag" sort by + increasing imag part, "abs" sort by absolute value) or a function + mapping complexs to the reals, i.e. ``f = lambda x: -mp.re(x) `` + would sort the eigenvalues by decreasing real part. + + return values: + E if EL and ER are both false. + (E, ER) if ER is not false and left is false. + (E, EL) if EL is not false and right is false. + (E, EL, ER) if EL and ER are not false. + + example: + >>> from mpmath import mp + >>> A = mp.matrix([[3, -1, 2], [2, 5, -5], [-2, -3, 7]]) + >>> E, EL, ER = mp.eig(A,left = True, right = True) + >>> E, EL, ER = mp.eig_sort(E, EL, ER) + >>> mp.nprint(E) + [2.0, 4.0, 9.0] + >>> E, EL, ER = mp.eig_sort(E, EL, ER,f = lambda x: -mp.re(x)) + >>> mp.nprint(E) + [9.0, 4.0, 2.0] + >>> print(mp.chop(A * ER[:,0] - E[0] * ER[:,0])) + [0.0] + [0.0] + [0.0] + >>> print(mp.chop( EL[0,:] * A - EL[0,:] * E[0])) + [0.0 0.0 0.0] + """ + + if isinstance(f, str): + if f == "real": + f = ctx.re + elif f == "imag": + f = ctx.im + elif f == "abs": + f = abs + else: + raise RuntimeError("unknown function %s" % f) + + n = len(E) + + # Sort eigenvalues (bubble-sort) + + for i in xrange(n): + imax = i + s = f(E[i]) # s is the current maximal element + + for j in xrange(i + 1, n): + c = f(E[j]) + if c < s: + s = c + imax = j + + if imax != i: + # swap eigenvalues + + z = E[i] + E[i] = E[imax] + E[imax] = z + + if not isinstance(EL, bool): + for j in xrange(n): + z = EL[i,j] + EL[i,j] = EL[imax,j] + EL[imax,j] = z + + if not isinstance(ER, bool): + for j in xrange(n): + z = ER[j,i] + ER[j,i] = ER[j,imax] + ER[j,imax] = z + + if isinstance(EL, bool) and isinstance(ER, bool): + return E + + if isinstance(EL, bool) and not(isinstance(ER, bool)): + return (E, ER) + + if isinstance(ER, bool) and not(isinstance(EL, bool)): + return (E, EL) + + return (E, EL, ER) diff --git a/MLPY/Lib/site-packages/mpmath/matrices/eigen_symmetric.py b/MLPY/Lib/site-packages/mpmath/matrices/eigen_symmetric.py new file mode 100644 index 0000000000000000000000000000000000000000..c82c0bb061d22c37a89f82a0b9bdab3e9ba7ddde --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/matrices/eigen_symmetric.py @@ -0,0 +1,1807 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +################################################################################################## +# module for the symmetric eigenvalue problem +# Copyright 2013 Timo Hartmann (thartmann15 at gmail.com) +# +# todo: +# - implement balancing +# +################################################################################################## + +""" +The symmetric eigenvalue problem. +--------------------------------- + +This file contains routines for the symmetric eigenvalue problem. + +high level routines: + + eigsy : real symmetric (ordinary) eigenvalue problem + eighe : complex hermitian (ordinary) eigenvalue problem + eigh : unified interface for eigsy and eighe + svd_r : singular value decomposition for real matrices + svd_c : singular value decomposition for complex matrices + svd : unified interface for svd_r and svd_c + + +low level routines: + + r_sy_tridiag : reduction of real symmetric matrix to real symmetric tridiagonal matrix + c_he_tridiag_0 : reduction of complex hermitian matrix to real symmetric tridiagonal matrix + c_he_tridiag_1 : auxiliary routine to c_he_tridiag_0 + c_he_tridiag_2 : auxiliary routine to c_he_tridiag_0 + tridiag_eigen : solves the real symmetric tridiagonal matrix eigenvalue problem + svd_r_raw : raw singular value decomposition for real matrices + svd_c_raw : raw singular value decomposition for complex matrices +""" + +from ..libmp.backend import xrange +from .eigen import defun + + +def r_sy_tridiag(ctx, A, D, E, calc_ev = True): + """ + This routine transforms a real symmetric matrix A to a real symmetric + tridiagonal matrix T using an orthogonal similarity transformation: + Q' * A * Q = T (here ' denotes the matrix transpose). + The orthogonal matrix Q is build up from Householder reflectors. + + parameters: + A (input/output) On input, A contains the real symmetric matrix of + dimension (n,n). On output, if calc_ev is true, A contains the + orthogonal matrix Q, otherwise A is destroyed. + + D (output) real array of length n, contains the diagonal elements + of the tridiagonal matrix + + E (output) real array of length n, contains the offdiagonal elements + of the tridiagonal matrix in E[0:(n-1)] where is the dimension of + the matrix A. E[n-1] is undefined. + + calc_ev (input) If calc_ev is true, this routine explicitly calculates the + orthogonal matrix Q which is then returned in A. If calc_ev is + false, Q is not explicitly calculated resulting in a shorter run time. + + This routine is a python translation of the fortran routine tred2.f in the + software library EISPACK (see netlib.org) which itself is based on the algol + procedure tred2 described in: + - Num. Math. 11, p.181-195 (1968) by Martin, Reinsch and Wilkonson + - Handbook for auto. comp., Vol II, Linear Algebra, p.212-226 (1971) + + For a good introduction to Householder reflections, see also + Stoer, Bulirsch - Introduction to Numerical Analysis. + """ + + # note : the vector v of the i-th houshoulder reflector is stored in a[(i+1):,i] + # whereas v/ is stored in a[i,(i+1):] + + n = A.rows + for i in xrange(n - 1, 0, -1): + # scale the vector + + scale = 0 + for k in xrange(0, i): + scale += abs(A[k,i]) + + scale_inv = 0 + if scale != 0: + scale_inv = 1/scale + + # sadly there are floating point numbers not equal to zero whose reciprocal is infinity + + if i == 1 or scale == 0 or ctx.isinf(scale_inv): + E[i] = A[i-1,i] # nothing to do + D[i] = 0 + continue + + # calculate parameters for housholder transformation + + H = 0 + for k in xrange(0, i): + A[k,i] *= scale_inv + H += A[k,i] * A[k,i] + + F = A[i-1,i] + G = ctx.sqrt(H) + if F > 0: + G = -G + E[i] = scale * G + H -= F * G + A[i-1,i] = F - G + F = 0 + + # apply housholder transformation + + for j in xrange(0, i): + if calc_ev: + A[i,j] = A[j,i] / H + + G = 0 # calculate A*U + for k in xrange(0, j + 1): + G += A[k,j] * A[k,i] + for k in xrange(j + 1, i): + G += A[j,k] * A[k,i] + + E[j] = G / H # calculate P + F += E[j] * A[j,i] + + HH = F / (2 * H) + + for j in xrange(0, i): # calculate reduced A + F = A[j,i] + G = E[j] - HH * F # calculate Q + E[j] = G + + for k in xrange(0, j + 1): + A[k,j] -= F * E[k] + G * A[k,i] + + D[i] = H + + for i in xrange(1, n): # better for compatibility + E[i-1] = E[i] + E[n-1] = 0 + + if calc_ev: + D[0] = 0 + for i in xrange(0, n): + if D[i] != 0: + for j in xrange(0, i): # accumulate transformation matrices + G = 0 + for k in xrange(0, i): + G += A[i,k] * A[k,j] + for k in xrange(0, i): + A[k,j] -= G * A[k,i] + + D[i] = A[i,i] + A[i,i] = 1 + + for j in xrange(0, i): + A[j,i] = A[i,j] = 0 + else: + for i in xrange(0, n): + D[i] = A[i,i] + + + + + +def c_he_tridiag_0(ctx, A, D, E, T): + """ + This routine transforms a complex hermitian matrix A to a real symmetric + tridiagonal matrix T using an unitary similarity transformation: + Q' * A * Q = T (here ' denotes the hermitian matrix transpose, + i.e. transposition und conjugation). + The unitary matrix Q is build up from Householder reflectors and + an unitary diagonal matrix. + + parameters: + A (input/output) On input, A contains the complex hermitian matrix + of dimension (n,n). On output, A contains the unitary matrix Q + in compressed form. + + D (output) real array of length n, contains the diagonal elements + of the tridiagonal matrix. + + E (output) real array of length n, contains the offdiagonal elements + of the tridiagonal matrix in E[0:(n-1)] where is the dimension of + the matrix A. E[n-1] is undefined. + + T (output) complex array of length n, contains a unitary diagonal + matrix. + + This routine is a python translation (in slightly modified form) of the fortran + routine htridi.f in the software library EISPACK (see netlib.org) which itself + is a complex version of the algol procedure tred1 described in: + - Num. Math. 11, p.181-195 (1968) by Martin, Reinsch and Wilkonson + - Handbook for auto. comp., Vol II, Linear Algebra, p.212-226 (1971) + + For a good introduction to Householder reflections, see also + Stoer, Bulirsch - Introduction to Numerical Analysis. + """ + + n = A.rows + T[n-1] = 1 + for i in xrange(n - 1, 0, -1): + + # scale the vector + + scale = 0 + for k in xrange(0, i): + scale += abs(ctx.re(A[k,i])) + abs(ctx.im(A[k,i])) + + scale_inv = 0 + if scale != 0: + scale_inv = 1 / scale + + # sadly there are floating point numbers not equal to zero whose reciprocal is infinity + + if scale == 0 or ctx.isinf(scale_inv): + E[i] = 0 + D[i] = 0 + T[i-1] = 1 + continue + + if i == 1: + F = A[i-1,i] + f = abs(F) + E[i] = f + D[i] = 0 + if f != 0: + T[i-1] = T[i] * F / f + else: + T[i-1] = T[i] + continue + + # calculate parameters for housholder transformation + + H = 0 + for k in xrange(0, i): + A[k,i] *= scale_inv + rr = ctx.re(A[k,i]) + ii = ctx.im(A[k,i]) + H += rr * rr + ii * ii + + F = A[i-1,i] + f = abs(F) + G = ctx.sqrt(H) + H += G * f + E[i] = scale * G + if f != 0: + F = F / f + TZ = - T[i] * F # T[i-1]=-T[i]*F, but we need T[i-1] as temporary storage + G *= F + else: + TZ = -T[i] # T[i-1]=-T[i] + A[i-1,i] += G + F = 0 + + # apply housholder transformation + + for j in xrange(0, i): + A[i,j] = A[j,i] / H + + G = 0 # calculate A*U + for k in xrange(0, j + 1): + G += ctx.conj(A[k,j]) * A[k,i] + for k in xrange(j + 1, i): + G += A[j,k] * A[k,i] + + T[j] = G / H # calculate P + F += ctx.conj(T[j]) * A[j,i] + + HH = F / (2 * H) + + for j in xrange(0, i): # calculate reduced A + F = A[j,i] + G = T[j] - HH * F # calculate Q + T[j] = G + + for k in xrange(0, j + 1): + A[k,j] -= ctx.conj(F) * T[k] + ctx.conj(G) * A[k,i] + # as we use the lower left part for storage + # we have to use the transpose of the normal formula + + T[i-1] = TZ + D[i] = H + + for i in xrange(1, n): # better for compatibility + E[i-1] = E[i] + E[n-1] = 0 + + D[0] = 0 + for i in xrange(0, n): + zw = D[i] + D[i] = ctx.re(A[i,i]) + A[i,i] = zw + + + + + + + +def c_he_tridiag_1(ctx, A, T): + """ + This routine forms the unitary matrix Q described in c_he_tridiag_0. + + parameters: + A (input/output) On input, A is the same matrix as delivered by + c_he_tridiag_0. On output, A is set to Q. + + T (input) On input, T is the same array as delivered by c_he_tridiag_0. + + """ + + n = A.rows + + for i in xrange(0, n): + if A[i,i] != 0: + for j in xrange(0, i): + G = 0 + for k in xrange(0, i): + G += ctx.conj(A[i,k]) * A[k,j] + for k in xrange(0, i): + A[k,j] -= G * A[k,i] + + A[i,i] = 1 + + for j in xrange(0, i): + A[j,i] = A[i,j] = 0 + + for i in xrange(0, n): + for k in xrange(0, n): + A[i,k] *= T[k] + + + + +def c_he_tridiag_2(ctx, A, T, B): + """ + This routine applied the unitary matrix Q described in c_he_tridiag_0 + onto the the matrix B, i.e. it forms Q*B. + + parameters: + A (input) On input, A is the same matrix as delivered by c_he_tridiag_0. + + T (input) On input, T is the same array as delivered by c_he_tridiag_0. + + B (input/output) On input, B is a complex matrix. On output B is replaced + by Q*B. + + This routine is a python translation of the fortran routine htribk.f in the + software library EISPACK (see netlib.org). See c_he_tridiag_0 for more + references. + """ + + n = A.rows + + for i in xrange(0, n): + for k in xrange(0, n): + B[k,i] *= T[k] + + for i in xrange(0, n): + if A[i,i] != 0: + for j in xrange(0, n): + G = 0 + for k in xrange(0, i): + G += ctx.conj(A[i,k]) * B[k,j] + for k in xrange(0, i): + B[k,j] -= G * A[k,i] + + + + + +def tridiag_eigen(ctx, d, e, z = False): + """ + This subroutine find the eigenvalues and the first components of the + eigenvectors of a real symmetric tridiagonal matrix using the implicit + QL method. + + parameters: + + d (input/output) real array of length n. on input, d contains the diagonal + elements of the input matrix. on output, d contains the eigenvalues in + ascending order. + + e (input) real array of length n. on input, e contains the offdiagonal + elements of the input matrix in e[0:(n-1)]. On output, e has been + destroyed. + + z (input/output) If z is equal to False, no eigenvectors will be computed. + Otherwise on input z should have the format z[0:m,0:n] (i.e. a real or + complex matrix of dimension (m,n) ). On output this matrix will be + multiplied by the matrix of the eigenvectors (i.e. the columns of this + matrix are the eigenvectors): z --> z*EV + That means if z[i,j]={1 if j==j; 0 otherwise} on input, then on output + z will contain the first m components of the eigenvectors. That means + if m is equal to n, the i-th eigenvector will be z[:,i]. + + This routine is a python translation (in slightly modified form) of the + fortran routine imtql2.f in the software library EISPACK (see netlib.org) + which itself is based on the algol procudure imtql2 desribed in: + - num. math. 12, p. 377-383(1968) by matrin and wilkinson + - modified in num. math. 15, p. 450(1970) by dubrulle + - handbook for auto. comp., vol. II-linear algebra, p. 241-248 (1971) + See also the routine gaussq.f in netlog.org or acm algorithm 726. + """ + + n = len(d) + e[n-1] = 0 + iterlim = 2 * ctx.dps + + for l in xrange(n): + j = 0 + while 1: + m = l + while 1: + # look for a small subdiagonal element + if m + 1 == n: + break + if abs(e[m]) <= ctx.eps * (abs(d[m]) + abs(d[m + 1])): + break + m = m + 1 + if m == l: + break + + if j >= iterlim: + raise RuntimeError("tridiag_eigen: no convergence to an eigenvalue after %d iterations" % iterlim) + + j += 1 + + # form shift + + p = d[l] + g = (d[l + 1] - p) / (2 * e[l]) + r = ctx.hypot(g, 1) + + if g < 0: + s = g - r + else: + s = g + r + + g = d[m] - p + e[l] / s + + s, c, p = 1, 1, 0 + + for i in xrange(m - 1, l - 1, -1): + f = s * e[i] + b = c * e[i] + if abs(f) > abs(g): # this here is a slight improvement also used in gaussq.f or acm algorithm 726. + c = g / f + r = ctx.hypot(c, 1) + e[i + 1] = f * r + s = 1 / r + c = c * s + else: + s = f / g + r = ctx.hypot(s, 1) + e[i + 1] = g * r + c = 1 / r + s = s * c + g = d[i + 1] - p + r = (d[i] - g) * s + 2 * c * b + p = s * r + d[i + 1] = g + p + g = c * r - b + + if not isinstance(z, bool): + # calculate eigenvectors + for w in xrange(z.rows): + f = z[w,i+1] + z[w,i+1] = s * z[w,i] + c * f + z[w,i ] = c * z[w,i] - s * f + + d[l] = d[l] - p + e[l] = g + e[m] = 0 + + for ii in xrange(1, n): + # sort eigenvalues and eigenvectors (bubble-sort) + i = ii - 1 + k = i + p = d[i] + for j in xrange(ii, n): + if d[j] >= p: + continue + k = j + p = d[k] + if k == i: + continue + d[k] = d[i] + d[i] = p + + if not isinstance(z, bool): + for w in xrange(z.rows): + p = z[w,i] + z[w,i] = z[w,k] + z[w,k] = p + +######################################################################################## + +@defun +def eigsy(ctx, A, eigvals_only = False, overwrite_a = False): + """ + This routine solves the (ordinary) eigenvalue problem for a real symmetric + square matrix A. Given A, an orthogonal matrix Q is calculated which + diagonalizes A: + + Q' A Q = diag(E) and Q Q' = Q' Q = 1 + + Here diag(E) is a diagonal matrix whose diagonal is E. + ' denotes the transpose. + + The columns of Q are the eigenvectors of A and E contains the eigenvalues: + + A Q[:,i] = E[i] Q[:,i] + + + input: + + A: real matrix of format (n,n) which is symmetric + (i.e. A=A' or A[i,j]=A[j,i]) + + eigvals_only: if true, calculates only the eigenvalues E. + if false, calculates both eigenvectors and eigenvalues. + + overwrite_a: if true, allows modification of A which may improve + performance. if false, A is not modified. + + output: + + E: vector of format (n). contains the eigenvalues of A in ascending order. + + Q: orthogonal matrix of format (n,n). contains the eigenvectors + of A as columns. + + return value: + + E if eigvals_only is true + (E, Q) if eigvals_only is false + + example: + >>> from mpmath import mp + >>> A = mp.matrix([[3, 2], [2, 0]]) + >>> E = mp.eigsy(A, eigvals_only = True) + >>> print(E) + [-1.0] + [ 4.0] + + >>> A = mp.matrix([[1, 2], [2, 3]]) + >>> E, Q = mp.eigsy(A) + >>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0])) + [0.0] + [0.0] + + see also: eighe, eigh, eig + """ + + if not overwrite_a: + A = A.copy() + + d = ctx.zeros(A.rows, 1) + e = ctx.zeros(A.rows, 1) + + if eigvals_only: + r_sy_tridiag(ctx, A, d, e, calc_ev = False) + tridiag_eigen(ctx, d, e, False) + return d + else: + r_sy_tridiag(ctx, A, d, e, calc_ev = True) + tridiag_eigen(ctx, d, e, A) + return (d, A) + + +@defun +def eighe(ctx, A, eigvals_only = False, overwrite_a = False): + """ + This routine solves the (ordinary) eigenvalue problem for a complex + hermitian square matrix A. Given A, an unitary matrix Q is calculated which + diagonalizes A: + + Q' A Q = diag(E) and Q Q' = Q' Q = 1 + + Here diag(E) a is diagonal matrix whose diagonal is E. + ' denotes the hermitian transpose (i.e. ordinary transposition and + complex conjugation). + + The columns of Q are the eigenvectors of A and E contains the eigenvalues: + + A Q[:,i] = E[i] Q[:,i] + + + input: + + A: complex matrix of format (n,n) which is hermitian + (i.e. A=A' or A[i,j]=conj(A[j,i])) + + eigvals_only: if true, calculates only the eigenvalues E. + if false, calculates both eigenvectors and eigenvalues. + + overwrite_a: if true, allows modification of A which may improve + performance. if false, A is not modified. + + output: + + E: vector of format (n). contains the eigenvalues of A in ascending order. + + Q: unitary matrix of format (n,n). contains the eigenvectors + of A as columns. + + return value: + + E if eigvals_only is true + (E, Q) if eigvals_only is false + + example: + >>> from mpmath import mp + >>> A = mp.matrix([[1, -3 - 1j], [-3 + 1j, -2]]) + >>> E = mp.eighe(A, eigvals_only = True) + >>> print(E) + [-4.0] + [ 3.0] + + >>> A = mp.matrix([[1, 2 + 5j], [2 - 5j, 3]]) + >>> E, Q = mp.eighe(A) + >>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0])) + [0.0] + [0.0] + + see also: eigsy, eigh, eig + """ + + if not overwrite_a: + A = A.copy() + + d = ctx.zeros(A.rows, 1) + e = ctx.zeros(A.rows, 1) + t = ctx.zeros(A.rows, 1) + + if eigvals_only: + c_he_tridiag_0(ctx, A, d, e, t) + tridiag_eigen(ctx, d, e, False) + return d + else: + c_he_tridiag_0(ctx, A, d, e, t) + B = ctx.eye(A.rows) + tridiag_eigen(ctx, d, e, B) + c_he_tridiag_2(ctx, A, t, B) + return (d, B) + +@defun +def eigh(ctx, A, eigvals_only = False, overwrite_a = False): + """ + "eigh" is a unified interface for "eigsy" and "eighe". Depending on + whether A is real or complex the appropriate function is called. + + This routine solves the (ordinary) eigenvalue problem for a real symmetric + or complex hermitian square matrix A. Given A, an orthogonal (A real) or + unitary (A complex) matrix Q is calculated which diagonalizes A: + + Q' A Q = diag(E) and Q Q' = Q' Q = 1 + + Here diag(E) a is diagonal matrix whose diagonal is E. + ' denotes the hermitian transpose (i.e. ordinary transposition and + complex conjugation). + + The columns of Q are the eigenvectors of A and E contains the eigenvalues: + + A Q[:,i] = E[i] Q[:,i] + + input: + + A: a real or complex square matrix of format (n,n) which is symmetric + (i.e. A[i,j]=A[j,i]) or hermitian (i.e. A[i,j]=conj(A[j,i])). + + eigvals_only: if true, calculates only the eigenvalues E. + if false, calculates both eigenvectors and eigenvalues. + + overwrite_a: if true, allows modification of A which may improve + performance. if false, A is not modified. + + output: + + E: vector of format (n). contains the eigenvalues of A in ascending order. + + Q: an orthogonal or unitary matrix of format (n,n). contains the + eigenvectors of A as columns. + + return value: + + E if eigvals_only is true + (E, Q) if eigvals_only is false + + example: + >>> from mpmath import mp + >>> A = mp.matrix([[3, 2], [2, 0]]) + >>> E = mp.eigh(A, eigvals_only = True) + >>> print(E) + [-1.0] + [ 4.0] + + >>> A = mp.matrix([[1, 2], [2, 3]]) + >>> E, Q = mp.eigh(A) + >>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0])) + [0.0] + [0.0] + + >>> A = mp.matrix([[1, 2 + 5j], [2 - 5j, 3]]) + >>> E, Q = mp.eigh(A) + >>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0])) + [0.0] + [0.0] + + see also: eigsy, eighe, eig + """ + + iscomplex = any(type(x) is ctx.mpc for x in A) + + if iscomplex: + return ctx.eighe(A, eigvals_only = eigvals_only, overwrite_a = overwrite_a) + else: + return ctx.eigsy(A, eigvals_only = eigvals_only, overwrite_a = overwrite_a) + + +@defun +def gauss_quadrature(ctx, n, qtype = "legendre", alpha = 0, beta = 0): + """ + This routine calulates gaussian quadrature rules for different + families of orthogonal polynomials. Let (a, b) be an interval, + W(x) a positive weight function and n a positive integer. + Then the purpose of this routine is to calculate pairs (x_k, w_k) + for k=0, 1, 2, ... (n-1) which give + + int(W(x) * F(x), x = a..b) = sum(w_k * F(x_k),k = 0..(n-1)) + + exact for all polynomials F(x) of degree (strictly) less than 2*n. For all + integrable functions F(x) the sum is a (more or less) good approximation to + the integral. The x_k are called nodes (which are the zeros of the + related orthogonal polynomials) and the w_k are called the weights. + + parameters + n (input) The degree of the quadrature rule, i.e. its number of + nodes. + + qtype (input) The family of orthogonal polynmomials for which to + compute the quadrature rule. See the list below. + + alpha (input) real number, used as parameter for some orthogonal + polynomials + + beta (input) real number, used as parameter for some orthogonal + polynomials. + + return value + + (X, W) a pair of two real arrays where x_k = X[k] and w_k = W[k]. + + + orthogonal polynomials: + + qtype polynomial + ----- ---------- + + "legendre" Legendre polynomials, W(x)=1 on the interval (-1, +1) + "legendre01" shifted Legendre polynomials, W(x)=1 on the interval (0, +1) + "hermite" Hermite polynomials, W(x)=exp(-x*x) on (-infinity,+infinity) + "laguerre" Laguerre polynomials, W(x)=exp(-x) on (0,+infinity) + "glaguerre" generalized Laguerre polynomials, W(x)=exp(-x)*x**alpha + on (0, +infinity) + "chebyshev1" Chebyshev polynomials of the first kind, W(x)=1/sqrt(1-x*x) + on (-1, +1) + "chebyshev2" Chebyshev polynomials of the second kind, W(x)=sqrt(1-x*x) + on (-1, +1) + "jacobi" Jacobi polynomials, W(x)=(1-x)**alpha * (1+x)**beta on (-1, +1) + with alpha>-1 and beta>-1 + + examples: + >>> from mpmath import mp + >>> f = lambda x: x**8 + 2 * x**6 - 3 * x**4 + 5 * x**2 - 7 + >>> X, W = mp.gauss_quadrature(5, "hermite") + >>> A = mp.fdot([(f(x), w) for x, w in zip(X, W)]) + >>> B = mp.sqrt(mp.pi) * 57 / 16 + >>> C = mp.quad(lambda x: mp.exp(- x * x) * f(x), [-mp.inf, +mp.inf]) + >>> mp.nprint((mp.chop(A-B, tol = 1e-10), mp.chop(A-C, tol = 1e-10))) + (0.0, 0.0) + + >>> f = lambda x: x**5 - 2 * x**4 + 3 * x**3 - 5 * x**2 + 7 * x - 11 + >>> X, W = mp.gauss_quadrature(3, "laguerre") + >>> A = mp.fdot([(f(x), w) for x, w in zip(X, W)]) + >>> B = 76 + >>> C = mp.quad(lambda x: mp.exp(-x) * f(x), [0, +mp.inf]) + >>> mp.nprint(mp.chop(A-B, tol = 1e-10), mp.chop(A-C, tol = 1e-10)) + .0 + + # orthogonality of the chebyshev polynomials: + >>> f = lambda x: mp.chebyt(3, x) * mp.chebyt(2, x) + >>> X, W = mp.gauss_quadrature(3, "chebyshev1") + >>> A = mp.fdot([(f(x), w) for x, w in zip(X, W)]) + >>> print(mp.chop(A, tol = 1e-10)) + 0.0 + + references: + - golub and welsch, "calculations of gaussian quadrature rules", mathematics of + computation 23, p. 221-230 (1969) + - golub, "some modified matrix eigenvalue problems", siam review 15, p. 318-334 (1973) + - stroud and secrest, "gaussian quadrature formulas", prentice-hall (1966) + + See also the routine gaussq.f in netlog.org or ACM Transactions on + Mathematical Software algorithm 726. + """ + + d = ctx.zeros(n, 1) + e = ctx.zeros(n, 1) + z = ctx.zeros(1, n) + + z[0,0] = 1 + + if qtype == "legendre": + # legendre on the range -1 +1 , abramowitz, table 25.4, p.916 + w = 2 + for i in xrange(n): + j = i + 1 + e[i] = ctx.sqrt(j * j / (4 * j * j - ctx.mpf(1))) + elif qtype == "legendre01": + # legendre shifted to 0 1 , abramowitz, table 25.8, p.921 + w = 1 + for i in xrange(n): + d[i] = 1 / ctx.mpf(2) + j = i + 1 + e[i] = ctx.sqrt(j * j / (16 * j * j - ctx.mpf(4))) + elif qtype == "hermite": + # hermite on the range -inf +inf , abramowitz, table 25.10,p.924 + w = ctx.sqrt(ctx.pi) + for i in xrange(n): + j = i + 1 + e[i] = ctx.sqrt(j / ctx.mpf(2)) + elif qtype == "laguerre": + # laguerre on the range 0 +inf , abramowitz, table 25.9, p. 923 + w = 1 + for i in xrange(n): + j = i + 1 + d[i] = 2 * j - 1 + e[i] = j + elif qtype=="chebyshev1": + # chebyshev polynimials of the first kind + w = ctx.pi + for i in xrange(n): + e[i] = 1 / ctx.mpf(2) + e[0] = ctx.sqrt(1 / ctx.mpf(2)) + elif qtype == "chebyshev2": + # chebyshev polynimials of the second kind + w = ctx.pi / 2 + for i in xrange(n): + e[i] = 1 / ctx.mpf(2) + elif qtype == "glaguerre": + # generalized laguerre on the range 0 +inf + w = ctx.gamma(1 + alpha) + for i in xrange(n): + j = i + 1 + d[i] = 2 * j - 1 + alpha + e[i] = ctx.sqrt(j * (j + alpha)) + elif qtype == "jacobi": + # jacobi polynomials + alpha = ctx.mpf(alpha) + beta = ctx.mpf(beta) + ab = alpha + beta + abi = ab + 2 + w = (2**(ab+1)) * ctx.gamma(alpha + 1) * ctx.gamma(beta + 1) / ctx.gamma(abi) + d[0] = (beta - alpha) / abi + e[0] = ctx.sqrt(4 * (1 + alpha) * (1 + beta) / ((abi + 1) * (abi * abi))) + a2b2 = beta * beta - alpha * alpha + for i in xrange(1, n): + j = i + 1 + abi = 2 * j + ab + d[i] = a2b2 / ((abi - 2) * abi) + e[i] = ctx.sqrt(4 * j * (j + alpha) * (j + beta) * (j + ab) / ((abi * abi - 1) * abi * abi)) + elif isinstance(qtype, str): + raise ValueError("unknown quadrature rule \"%s\"" % qtype) + elif not isinstance(qtype, str): + w = qtype(d, e) + else: + assert 0 + + tridiag_eigen(ctx, d, e, z) + + for i in xrange(len(z)): + z[i] *= z[i] + + z = z.transpose() + return (d, w * z) + +################################################################################################## +################################################################################################## +################################################################################################## + +def svd_r_raw(ctx, A, V = False, calc_u = False): + """ + This routine computes the singular value decomposition of a matrix A. + Given A, two orthogonal matrices U and V are calculated such that + + A = U S V + + where S is a suitable shaped matrix whose off-diagonal elements are zero. + The diagonal elements of S are the singular values of A, i.e. the + squareroots of the eigenvalues of A' A or A A'. Here ' denotes the transpose. + Householder bidiagonalization and a variant of the QR algorithm is used. + + overview of the matrices : + + A : m*n A gets replaced by U + U : m*n U replaces A. If n>m then only the first m*m block of U is + non-zero. column-orthogonal: U' U = B + here B is a n*n matrix whose first min(m,n) diagonal + elements are 1 and all other elements are zero. + S : n*n diagonal matrix, only the diagonal elements are stored in + the array S. only the first min(m,n) diagonal elements are non-zero. + V : n*n orthogonal: V V' = V' V = 1 + + parameters: + A (input/output) On input, A contains a real matrix of shape m*n. + On output, if calc_u is true A contains the column-orthogonal + matrix U; otherwise A is simply used as workspace and thus destroyed. + + V (input/output) if false, the matrix V is not calculated. otherwise + V must be a matrix of shape n*n. + + calc_u (input) If true, the matrix U is calculated and replaces A. + if false, U is not calculated and A is simply destroyed + + return value: + S an array of length n containing the singular values of A sorted by + decreasing magnitude. only the first min(m,n) elements are non-zero. + + This routine is a python translation of the fortran routine svd.f in the + software library EISPACK (see netlib.org) which itself is based on the + algol procedure svd described in: + - num. math. 14, 403-420(1970) by golub and reinsch. + - wilkinson/reinsch: handbook for auto. comp., vol ii-linear algebra, 134-151(1971). + + """ + + m, n = A.rows, A.cols + + S = ctx.zeros(n, 1) + + # work is a temporary array of size n + work = ctx.zeros(n, 1) + + g = scale = anorm = 0 + maxits = 3 * ctx.dps + + for i in xrange(n): # householder reduction to bidiagonal form + work[i] = scale*g + g = s = scale = 0 + if i < m: + for k in xrange(i, m): + scale += ctx.fabs(A[k,i]) + if scale != 0: + for k in xrange(i, m): + A[k,i] /= scale + s += A[k,i] * A[k,i] + f = A[i,i] + g = -ctx.sqrt(s) + if f < 0: + g = -g + h = f * g - s + A[i,i] = f - g + for j in xrange(i+1, n): + s = 0 + for k in xrange(i, m): + s += A[k,i] * A[k,j] + f = s / h + for k in xrange(i, m): + A[k,j] += f * A[k,i] + for k in xrange(i,m): + A[k,i] *= scale + + S[i] = scale * g + g = s = scale = 0 + + if i < m and i != n - 1: + for k in xrange(i+1, n): + scale += ctx.fabs(A[i,k]) + if scale: + for k in xrange(i+1, n): + A[i,k] /= scale + s += A[i,k] * A[i,k] + f = A[i,i+1] + g = -ctx.sqrt(s) + if f < 0: + g = -g + h = f * g - s + A[i,i+1] = f - g + + for k in xrange(i+1, n): + work[k] = A[i,k] / h + + for j in xrange(i+1, m): + s = 0 + for k in xrange(i+1, n): + s += A[j,k] * A[i,k] + for k in xrange(i+1, n): + A[j,k] += s * work[k] + + for k in xrange(i+1, n): + A[i,k] *= scale + + anorm = max(anorm, ctx.fabs(S[i]) + ctx.fabs(work[i])) + + if not isinstance(V, bool): + for i in xrange(n-2, -1, -1): # accumulation of right hand transformations + V[i+1,i+1] = 1 + + if work[i+1] != 0: + for j in xrange(i+1, n): + V[i,j] = (A[i,j] / A[i,i+1]) / work[i+1] + for j in xrange(i+1, n): + s = 0 + for k in xrange(i+1, n): + s += A[i,k] * V[j,k] + for k in xrange(i+1, n): + V[j,k] += s * V[i,k] + + for j in xrange(i+1, n): + V[j,i] = V[i,j] = 0 + + V[0,0] = 1 + + if m= maxits: + raise RuntimeError("svd: no convergence to an eigenvalue after %d iterations" % its) + + x = S[l] # shift from bottom 2 by 2 minor + nm = k-1 + y = S[nm] + g = work[nm] + h = work[k] + f = ((y - z) * (y + z) + (g - h) * (g + h))/(2 * h * y) + g = ctx.hypot(f, 1) + if f >= 0: f = ((x - z) * (x + z) + h * ((y / (f + g)) - h)) / x + else: f = ((x - z) * (x + z) + h * ((y / (f - g)) - h)) / x + + c = s = 1 # next qt transformation + + for j in xrange(l, nm + 1): + g = work[j+1] + y = S[j+1] + h = s * g + g = c * g + z = ctx.hypot(f, h) + work[j] = z + c = f / z + s = h / z + f = x * c + g * s + g = g * c - x * s + h = y * s + y *= c + if not isinstance(V, bool): + for jj in xrange(n): + x = V[j ,jj] + z = V[j+1,jj] + V[j ,jj]= x * c + z * s + V[j+1 ,jj]= z * c - x * s + z = ctx.hypot(f, h) + S[j] = z + if z != 0: # rotation can be arbitray if z=0 + z = 1 / z + c = f * z + s = h * z + f = c * g + s * y + x = c * y - s * g + + if calc_u: + for jj in xrange(m): + y = A[jj,j ] + z = A[jj,j+1] + A[jj,j ] = y * c + z * s + A[jj,j+1 ] = z * c - y * s + + work[l] = 0 + work[k] = f + S[k] = x + + ########################## + + # Sort singular values into decreasing order (bubble-sort) + + for i in xrange(n): + imax = i + s = ctx.fabs(S[i]) # s is the current maximal element + + for j in xrange(i + 1, n): + c = ctx.fabs(S[j]) + if c > s: + s = c + imax = j + + if imax != i: + # swap singular values + + z = S[i] + S[i] = S[imax] + S[imax] = z + + if calc_u: + for j in xrange(m): + z = A[j,i] + A[j,i] = A[j,imax] + A[j,imax] = z + + if not isinstance(V, bool): + for j in xrange(n): + z = V[i,j] + V[i,j] = V[imax,j] + V[imax,j] = z + + return S + +####################### + +def svd_c_raw(ctx, A, V = False, calc_u = False): + """ + This routine computes the singular value decomposition of a matrix A. + Given A, two unitary matrices U and V are calculated such that + + A = U S V + + where S is a suitable shaped matrix whose off-diagonal elements are zero. + The diagonal elements of S are the singular values of A, i.e. the + squareroots of the eigenvalues of A' A or A A'. Here ' denotes the hermitian + transpose (i.e. transposition and conjugation). Householder bidiagonalization + and a variant of the QR algorithm is used. + + overview of the matrices : + + A : m*n A gets replaced by U + U : m*n U replaces A. If n>m then only the first m*m block of U is + non-zero. column-unitary: U' U = B + here B is a n*n matrix whose first min(m,n) diagonal + elements are 1 and all other elements are zero. + S : n*n diagonal matrix, only the diagonal elements are stored in + the array S. only the first min(m,n) diagonal elements are non-zero. + V : n*n unitary: V V' = V' V = 1 + + parameters: + A (input/output) On input, A contains a complex matrix of shape m*n. + On output, if calc_u is true A contains the column-unitary + matrix U; otherwise A is simply used as workspace and thus destroyed. + + V (input/output) if false, the matrix V is not calculated. otherwise + V must be a matrix of shape n*n. + + calc_u (input) If true, the matrix U is calculated and replaces A. + if false, U is not calculated and A is simply destroyed + + return value: + S an array of length n containing the singular values of A sorted by + decreasing magnitude. only the first min(m,n) elements are non-zero. + + This routine is a python translation of the fortran routine svd.f in the + software library EISPACK (see netlib.org) which itself is based on the + algol procedure svd described in: + - num. math. 14, 403-420(1970) by golub and reinsch. + - wilkinson/reinsch: handbook for auto. comp., vol ii-linear algebra, 134-151(1971). + + """ + + m, n = A.rows, A.cols + + S = ctx.zeros(n, 1) + + # work is a temporary array of size n + work = ctx.zeros(n, 1) + lbeta = ctx.zeros(n, 1) + rbeta = ctx.zeros(n, 1) + dwork = ctx.zeros(n, 1) + + g = scale = anorm = 0 + maxits = 3 * ctx.dps + + for i in xrange(n): # householder reduction to bidiagonal form + dwork[i] = scale * g # dwork are the side-diagonal elements + g = s = scale = 0 + if i < m: + for k in xrange(i, m): + scale += ctx.fabs(ctx.re(A[k,i])) + ctx.fabs(ctx.im(A[k,i])) + if scale != 0: + for k in xrange(i, m): + A[k,i] /= scale + ar = ctx.re(A[k,i]) + ai = ctx.im(A[k,i]) + s += ar * ar + ai * ai + f = A[i,i] + g = -ctx.sqrt(s) + if ctx.re(f) < 0: + beta = -g - ctx.conj(f) + g = -g + else: + beta = -g + ctx.conj(f) + beta /= ctx.conj(beta) + beta += 1 + h = 2 * (ctx.re(f) * g - s) + A[i,i] = f - g + beta /= h + lbeta[i] = (beta / scale) / scale + for j in xrange(i+1, n): + s = 0 + for k in xrange(i, m): + s += ctx.conj(A[k,i]) * A[k,j] + f = beta * s + for k in xrange(i, m): + A[k,j] += f * A[k,i] + for k in xrange(i, m): + A[k,i] *= scale + + S[i] = scale * g # S are the diagonal elements + g = s = scale = 0 + + if i < m and i != n - 1: + for k in xrange(i+1, n): + scale += ctx.fabs(ctx.re(A[i,k])) + ctx.fabs(ctx.im(A[i,k])) + if scale: + for k in xrange(i+1, n): + A[i,k] /= scale + ar = ctx.re(A[i,k]) + ai = ctx.im(A[i,k]) + s += ar * ar + ai * ai + f = A[i,i+1] + g = -ctx.sqrt(s) + if ctx.re(f) < 0: + beta = -g - ctx.conj(f) + g = -g + else: + beta = -g + ctx.conj(f) + + beta /= ctx.conj(beta) + beta += 1 + + h = 2 * (ctx.re(f) * g - s) + A[i,i+1] = f - g + + beta /= h + rbeta[i] = (beta / scale) / scale + + for k in xrange(i+1, n): + work[k] = A[i, k] + + for j in xrange(i+1, m): + s = 0 + for k in xrange(i+1, n): + s += ctx.conj(A[i,k]) * A[j,k] + f = s * beta + for k in xrange(i+1,n): + A[j,k] += f * work[k] + + for k in xrange(i+1, n): + A[i,k] *= scale + + anorm = max(anorm,ctx.fabs(S[i]) + ctx.fabs(dwork[i])) + + if not isinstance(V, bool): + for i in xrange(n-2, -1, -1): # accumulation of right hand transformations + V[i+1,i+1] = 1 + + if dwork[i+1] != 0: + f = ctx.conj(rbeta[i]) + for j in xrange(i+1, n): + V[i,j] = A[i,j] * f + for j in xrange(i+1, n): + s = 0 + for k in xrange(i+1, n): + s += ctx.conj(A[i,k]) * V[j,k] + for k in xrange(i+1, n): + V[j,k] += s * V[i,k] + + for j in xrange(i+1,n): + V[j,i] = V[i,j] = 0 + + V[0,0] = 1 + + if m < n : minnm = m + else : minnm = n + + if calc_u: + for i in xrange(minnm-1, -1, -1): # accumulation of left hand transformations + g = S[i] + for j in xrange(i+1, n): + A[i,j] = 0 + if g != 0: + g = 1 / g + for j in xrange(i+1, n): + s = 0 + for k in xrange(i+1, m): + s += ctx.conj(A[k,i]) * A[k,j] + f = s * ctx.conj(lbeta[i]) + for k in xrange(i, m): + A[k,j] += f * A[k,i] + for j in xrange(i, m): + A[j,i] *= g + else: + for j in xrange(i, m): + A[j,i] = 0 + A[i,i] += 1 + + for k in xrange(n-1, -1, -1): + # diagonalization of the bidiagonal form: + # loop over singular values, and over allowed itations + + its = 0 + while 1: + its += 1 + flag = True + + for l in xrange(k, -1, -1): + nm = l - 1 + + if ctx.fabs(dwork[l]) + anorm == anorm: + flag = False + break + + if ctx.fabs(S[nm]) + anorm == anorm: + break + + if flag: + c = 0 + s = 1 + for i in xrange(l, k+1): + f = s * dwork[i] + dwork[i] *= c + if ctx.fabs(f) + anorm == anorm: + break + g = S[i] + h = ctx.hypot(f, g) + S[i] = h + h = 1 / h + c = g * h + s = -f * h + + if calc_u: + for j in xrange(m): + y = A[j,nm] + z = A[j,i] + A[j,nm]= y * c + z * s + A[j,i] = z * c - y * s + + z = S[k] + + if l == k: # convergence + if z < 0: # singular value is made nonnegative + S[k] = -z + if not isinstance(V, bool): + for j in xrange(n): + V[k,j] = -V[k,j] + break + + if its >= maxits: + raise RuntimeError("svd: no convergence to an eigenvalue after %d iterations" % its) + + x = S[l] # shift from bottom 2 by 2 minor + nm = k-1 + y = S[nm] + g = dwork[nm] + h = dwork[k] + f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2 * h * y) + g = ctx.hypot(f, 1) + if f >=0: f = (( x - z) *( x + z) + h *((y / (f + g)) - h)) / x + else: f = (( x - z) *( x + z) + h *((y / (f - g)) - h)) / x + + c = s = 1 # next qt transformation + + for j in xrange(l, nm + 1): + g = dwork[j+1] + y = S[j+1] + h = s * g + g = c * g + z = ctx.hypot(f, h) + dwork[j] = z + c = f / z + s = h / z + f = x * c + g * s + g = g * c - x * s + h = y * s + y *= c + if not isinstance(V, bool): + for jj in xrange(n): + x = V[j ,jj] + z = V[j+1,jj] + V[j ,jj]= x * c + z * s + V[j+1,jj ]= z * c - x * s + z = ctx.hypot(f, h) + S[j] = z + if z != 0: # rotation can be arbitray if z=0 + z = 1 / z + c = f * z + s = h * z + f = c * g + s * y + x = c * y - s * g + if calc_u: + for jj in xrange(m): + y = A[jj,j ] + z = A[jj,j+1] + A[jj,j ]= y * c + z * s + A[jj,j+1 ]= z * c - y * s + + dwork[l] = 0 + dwork[k] = f + S[k] = x + + ########################## + + # Sort singular values into decreasing order (bubble-sort) + + for i in xrange(n): + imax = i + s = ctx.fabs(S[i]) # s is the current maximal element + + for j in xrange(i + 1, n): + c = ctx.fabs(S[j]) + if c > s: + s = c + imax = j + + if imax != i: + # swap singular values + + z = S[i] + S[i] = S[imax] + S[imax] = z + + if calc_u: + for j in xrange(m): + z = A[j,i] + A[j,i] = A[j,imax] + A[j,imax] = z + + if not isinstance(V, bool): + for j in xrange(n): + z = V[i,j] + V[i,j] = V[imax,j] + V[imax,j] = z + + return S + +################################################################################################## + +@defun +def svd_r(ctx, A, full_matrices = False, compute_uv = True, overwrite_a = False): + """ + This routine computes the singular value decomposition of a matrix A. + Given A, two orthogonal matrices U and V are calculated such that + + A = U S V and U' U = 1 and V V' = 1 + + where S is a suitable shaped matrix whose off-diagonal elements are zero. + Here ' denotes the transpose. The diagonal elements of S are the singular + values of A, i.e. the squareroots of the eigenvalues of A' A or A A'. + + input: + A : a real matrix of shape (m, n) + full_matrices : if true, U and V are of shape (m, m) and (n, n). + if false, U and V are of shape (m, min(m, n)) and (min(m, n), n). + compute_uv : if true, U and V are calculated. if false, only S is calculated. + overwrite_a : if true, allows modification of A which may improve + performance. if false, A is not modified. + + output: + U : an orthogonal matrix: U' U = 1. if full_matrices is true, U is of + shape (m, m). ortherwise it is of shape (m, min(m, n)). + + S : an array of length min(m, n) containing the singular values of A sorted by + decreasing magnitude. + + V : an orthogonal matrix: V V' = 1. if full_matrices is true, V is of + shape (n, n). ortherwise it is of shape (min(m, n), n). + + return value: + + S if compute_uv is false + (U, S, V) if compute_uv is true + + overview of the matrices: + + full_matrices true: + A : m*n + U : m*m U' U = 1 + S as matrix : m*n + V : n*n V V' = 1 + + full_matrices false: + A : m*n + U : m*min(n,m) U' U = 1 + S as matrix : min(m,n)*min(m,n) + V : min(m,n)*n V V' = 1 + + examples: + + >>> from mpmath import mp + >>> A = mp.matrix([[2, -2, -1], [3, 4, -2], [-2, -2, 0]]) + >>> S = mp.svd_r(A, compute_uv = False) + >>> print(S) + [6.0] + [3.0] + [1.0] + + >>> U, S, V = mp.svd_r(A) + >>> print(mp.chop(A - U * mp.diag(S) * V)) + [0.0 0.0 0.0] + [0.0 0.0 0.0] + [0.0 0.0 0.0] + + + see also: svd, svd_c + """ + + m, n = A.rows, A.cols + + if not compute_uv: + if not overwrite_a: + A = A.copy() + S = svd_r_raw(ctx, A, V = False, calc_u = False) + S = S[:min(m,n)] + return S + + if full_matrices and n < m: + V = ctx.zeros(m, m) + A0 = ctx.zeros(m, m) + A0[:,:n] = A + S = svd_r_raw(ctx, A0, V, calc_u = True) + + S = S[:n] + V = V[:n,:n] + + return (A0, S, V) + else: + if not overwrite_a: + A = A.copy() + V = ctx.zeros(n, n) + S = svd_r_raw(ctx, A, V, calc_u = True) + + if n > m: + if full_matrices == False: + V = V[:m,:] + + S = S[:m] + A = A[:,:m] + + return (A, S, V) + +############################## + +@defun +def svd_c(ctx, A, full_matrices = False, compute_uv = True, overwrite_a = False): + """ + This routine computes the singular value decomposition of a matrix A. + Given A, two unitary matrices U and V are calculated such that + + A = U S V and U' U = 1 and V V' = 1 + + where S is a suitable shaped matrix whose off-diagonal elements are zero. + Here ' denotes the hermitian transpose (i.e. transposition and complex + conjugation). The diagonal elements of S are the singular values of A, + i.e. the squareroots of the eigenvalues of A' A or A A'. + + input: + A : a complex matrix of shape (m, n) + full_matrices : if true, U and V are of shape (m, m) and (n, n). + if false, U and V are of shape (m, min(m, n)) and (min(m, n), n). + compute_uv : if true, U and V are calculated. if false, only S is calculated. + overwrite_a : if true, allows modification of A which may improve + performance. if false, A is not modified. + + output: + U : an unitary matrix: U' U = 1. if full_matrices is true, U is of + shape (m, m). ortherwise it is of shape (m, min(m, n)). + + S : an array of length min(m, n) containing the singular values of A sorted by + decreasing magnitude. + + V : an unitary matrix: V V' = 1. if full_matrices is true, V is of + shape (n, n). ortherwise it is of shape (min(m, n), n). + + return value: + + S if compute_uv is false + (U, S, V) if compute_uv is true + + overview of the matrices: + + full_matrices true: + A : m*n + U : m*m U' U = 1 + S as matrix : m*n + V : n*n V V' = 1 + + full_matrices false: + A : m*n + U : m*min(n,m) U' U = 1 + S as matrix : min(m,n)*min(m,n) + V : min(m,n)*n V V' = 1 + + example: + >>> from mpmath import mp + >>> A = mp.matrix([[-2j, -1-3j, -2+2j], [2-2j, -1-3j, 1], [-3+1j,-2j,0]]) + >>> S = mp.svd_c(A, compute_uv = False) + >>> print(mp.chop(S - mp.matrix([mp.sqrt(34), mp.sqrt(15), mp.sqrt(6)]))) + [0.0] + [0.0] + [0.0] + + >>> U, S, V = mp.svd_c(A) + >>> print(mp.chop(A - U * mp.diag(S) * V)) + [0.0 0.0 0.0] + [0.0 0.0 0.0] + [0.0 0.0 0.0] + + see also: svd, svd_r + """ + + m, n = A.rows, A.cols + + if not compute_uv: + if not overwrite_a: + A = A.copy() + S = svd_c_raw(ctx, A, V = False, calc_u = False) + S = S[:min(m,n)] + return S + + if full_matrices and n < m: + V = ctx.zeros(m, m) + A0 = ctx.zeros(m, m) + A0[:,:n] = A + S = svd_c_raw(ctx, A0, V, calc_u = True) + + S = S[:n] + V = V[:n,:n] + + return (A0, S, V) + else: + if not overwrite_a: + A = A.copy() + V = ctx.zeros(n, n) + S = svd_c_raw(ctx, A, V, calc_u = True) + + if n > m: + if full_matrices == False: + V = V[:m,:] + + S = S[:m] + A = A[:,:m] + + return (A, S, V) + +@defun +def svd(ctx, A, full_matrices = False, compute_uv = True, overwrite_a = False): + """ + "svd" is a unified interface for "svd_r" and "svd_c". Depending on + whether A is real or complex the appropriate function is called. + + This routine computes the singular value decomposition of a matrix A. + Given A, two orthogonal (A real) or unitary (A complex) matrices U and V + are calculated such that + + A = U S V and U' U = 1 and V V' = 1 + + where S is a suitable shaped matrix whose off-diagonal elements are zero. + Here ' denotes the hermitian transpose (i.e. transposition and complex + conjugation). The diagonal elements of S are the singular values of A, + i.e. the squareroots of the eigenvalues of A' A or A A'. + + input: + A : a real or complex matrix of shape (m, n) + full_matrices : if true, U and V are of shape (m, m) and (n, n). + if false, U and V are of shape (m, min(m, n)) and (min(m, n), n). + compute_uv : if true, U and V are calculated. if false, only S is calculated. + overwrite_a : if true, allows modification of A which may improve + performance. if false, A is not modified. + + output: + U : an orthogonal or unitary matrix: U' U = 1. if full_matrices is true, U is of + shape (m, m). ortherwise it is of shape (m, min(m, n)). + + S : an array of length min(m, n) containing the singular values of A sorted by + decreasing magnitude. + + V : an orthogonal or unitary matrix: V V' = 1. if full_matrices is true, V is of + shape (n, n). ortherwise it is of shape (min(m, n), n). + + return value: + + S if compute_uv is false + (U, S, V) if compute_uv is true + + overview of the matrices: + + full_matrices true: + A : m*n + U : m*m U' U = 1 + S as matrix : m*n + V : n*n V V' = 1 + + full_matrices false: + A : m*n + U : m*min(n,m) U' U = 1 + S as matrix : min(m,n)*min(m,n) + V : min(m,n)*n V V' = 1 + + examples: + + >>> from mpmath import mp + >>> A = mp.matrix([[2, -2, -1], [3, 4, -2], [-2, -2, 0]]) + >>> S = mp.svd(A, compute_uv = False) + >>> print(S) + [6.0] + [3.0] + [1.0] + + >>> U, S, V = mp.svd(A) + >>> print(mp.chop(A - U * mp.diag(S) * V)) + [0.0 0.0 0.0] + [0.0 0.0 0.0] + [0.0 0.0 0.0] + + see also: svd_r, svd_c + """ + + iscomplex = any(type(x) is ctx.mpc for x in A) + + if iscomplex: + return ctx.svd_c(A, full_matrices = full_matrices, compute_uv = compute_uv, overwrite_a = overwrite_a) + else: + return ctx.svd_r(A, full_matrices = full_matrices, compute_uv = compute_uv, overwrite_a = overwrite_a) diff --git a/MLPY/Lib/site-packages/mpmath/matrices/linalg.py b/MLPY/Lib/site-packages/mpmath/matrices/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..e2fe643e809822e3d05a52b73c965edb622f9af9 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/matrices/linalg.py @@ -0,0 +1,790 @@ +""" +Linear algebra +-------------- + +Linear equations +................ + +Basic linear algebra is implemented; you can for example solve the linear +equation system:: + + x + 2*y = -10 + 3*x + 4*y = 10 + +using ``lu_solve``:: + + >>> from mpmath import * + >>> mp.pretty = False + >>> A = matrix([[1, 2], [3, 4]]) + >>> b = matrix([-10, 10]) + >>> x = lu_solve(A, b) + >>> x + matrix( + [['30.0'], + ['-20.0']]) + +If you don't trust the result, use ``residual`` to calculate the residual ||A*x-b||:: + + >>> residual(A, x, b) + matrix( + [['3.46944695195361e-18'], + ['3.46944695195361e-18']]) + >>> str(eps) + '2.22044604925031e-16' + +As you can see, the solution is quite accurate. The error is caused by the +inaccuracy of the internal floating point arithmetic. Though, it's even smaller +than the current machine epsilon, which basically means you can trust the +result. + +If you need more speed, use NumPy, or ``fp.lu_solve`` for a floating-point computation. + + >>> fp.lu_solve(A, b) # doctest: +ELLIPSIS + matrix(...) + +``lu_solve`` accepts overdetermined systems. It is usually not possible to solve +such systems, so the residual is minimized instead. Internally this is done +using Cholesky decomposition to compute a least squares approximation. This means +that that ``lu_solve`` will square the errors. If you can't afford this, use +``qr_solve`` instead. It is twice as slow but more accurate, and it calculates +the residual automatically. + + +Matrix factorization +.................... + +The function ``lu`` computes an explicit LU factorization of a matrix:: + + >>> P, L, U = lu(matrix([[0,2,3],[4,5,6],[7,8,9]])) + >>> print(P) + [0.0 0.0 1.0] + [1.0 0.0 0.0] + [0.0 1.0 0.0] + >>> print(L) + [ 1.0 0.0 0.0] + [ 0.0 1.0 0.0] + [0.571428571428571 0.214285714285714 1.0] + >>> print(U) + [7.0 8.0 9.0] + [0.0 2.0 3.0] + [0.0 0.0 0.214285714285714] + >>> print(P.T*L*U) + [0.0 2.0 3.0] + [4.0 5.0 6.0] + [7.0 8.0 9.0] + +Interval matrices +----------------- + +Matrices may contain interval elements. This allows one to perform +basic linear algebra operations such as matrix multiplication +and equation solving with rigorous error bounds:: + + >>> a = iv.matrix([['0.1','0.3','1.0'], + ... ['7.1','5.5','4.8'], + ... ['3.2','4.4','5.6']]) + >>> + >>> b = iv.matrix(['4','0.6','0.5']) + >>> c = iv.lu_solve(a, b) + >>> print(c) + [ [5.2582327113062568605927528666, 5.25823271130625686059275702219]] + [[-13.1550493962678375411635581388, -13.1550493962678375411635540152]] + [ [7.42069154774972557628979076189, 7.42069154774972557628979190734]] + >>> print(a*c) + [ [3.99999999999999999999999844904, 4.00000000000000000000000155096]] + [[0.599999999999999999999968898009, 0.600000000000000000000031763736]] + [[0.499999999999999999999979320485, 0.500000000000000000000020679515]] +""" + +# TODO: +# *implement high-level qr() +# *test unitvector +# *iterative solving + +from copy import copy + +from ..libmp.backend import xrange + +class LinearAlgebraMethods(object): + + def LU_decomp(ctx, A, overwrite=False, use_cache=True): + """ + LU-factorization of a n*n matrix using the Gauss algorithm. + Returns L and U in one matrix and the pivot indices. + + Use overwrite to specify whether A will be overwritten with L and U. + """ + if not A.rows == A.cols: + raise ValueError('need n*n matrix') + # get from cache if possible + if use_cache and isinstance(A, ctx.matrix) and A._LU: + return A._LU + if not overwrite: + orig = A + A = A.copy() + tol = ctx.absmin(ctx.mnorm(A,1) * ctx.eps) # each pivot element has to be bigger + n = A.rows + p = [None]*(n - 1) + for j in xrange(n - 1): + # pivoting, choose max(abs(reciprocal row sum)*abs(pivot element)) + biggest = 0 + for k in xrange(j, n): + s = ctx.fsum([ctx.absmin(A[k,l]) for l in xrange(j, n)]) + if ctx.absmin(s) <= tol: + raise ZeroDivisionError('matrix is numerically singular') + current = 1/s * ctx.absmin(A[k,j]) + if current > biggest: # TODO: what if equal? + biggest = current + p[j] = k + # swap rows according to p + ctx.swap_row(A, j, p[j]) + if ctx.absmin(A[j,j]) <= tol: + raise ZeroDivisionError('matrix is numerically singular') + # calculate elimination factors and add rows + for i in xrange(j + 1, n): + A[i,j] /= A[j,j] + for k in xrange(j + 1, n): + A[i,k] -= A[i,j]*A[j,k] + if ctx.absmin(A[n - 1,n - 1]) <= tol: + raise ZeroDivisionError('matrix is numerically singular') + # cache decomposition + if not overwrite and isinstance(orig, ctx.matrix): + orig._LU = (A, p) + return A, p + + def L_solve(ctx, L, b, p=None): + """ + Solve the lower part of a LU factorized matrix for y. + """ + if L.rows != L.cols: + raise RuntimeError("need n*n matrix") + n = L.rows + if len(b) != n: + raise ValueError("Value should be equal to n") + b = copy(b) + if p: # swap b according to p + for k in xrange(0, len(p)): + ctx.swap_row(b, k, p[k]) + # solve + for i in xrange(1, n): + for j in xrange(i): + b[i] -= L[i,j] * b[j] + return b + + def U_solve(ctx, U, y): + """ + Solve the upper part of a LU factorized matrix for x. + """ + if U.rows != U.cols: + raise RuntimeError("need n*n matrix") + n = U.rows + if len(y) != n: + raise ValueError("Value should be equal to n") + x = copy(y) + for i in xrange(n - 1, -1, -1): + for j in xrange(i + 1, n): + x[i] -= U[i,j] * x[j] + x[i] /= U[i,i] + return x + + def lu_solve(ctx, A, b, **kwargs): + """ + Ax = b => x + + Solve a determined or overdetermined linear equations system. + Fast LU decomposition is used, which is less accurate than QR decomposition + (especially for overdetermined systems), but it's twice as efficient. + Use qr_solve if you want more precision or have to solve a very ill- + conditioned system. + + If you specify real=True, it does not check for overdeterminded complex + systems. + """ + prec = ctx.prec + try: + ctx.prec += 10 + # do not overwrite A nor b + A, b = ctx.matrix(A, **kwargs).copy(), ctx.matrix(b, **kwargs).copy() + if A.rows < A.cols: + raise ValueError('cannot solve underdetermined system') + if A.rows > A.cols: + # use least-squares method if overdetermined + # (this increases errors) + AH = A.H + A = AH * A + b = AH * b + if (kwargs.get('real', False) or + not sum(type(i) is ctx.mpc for i in A)): + # TODO: necessary to check also b? + x = ctx.cholesky_solve(A, b) + else: + x = ctx.lu_solve(A, b) + else: + # LU factorization + A, p = ctx.LU_decomp(A) + b = ctx.L_solve(A, b, p) + x = ctx.U_solve(A, b) + finally: + ctx.prec = prec + return x + + def improve_solution(ctx, A, x, b, maxsteps=1): + """ + Improve a solution to a linear equation system iteratively. + + This re-uses the LU decomposition and is thus cheap. + Usually 3 up to 4 iterations are giving the maximal improvement. + """ + if A.rows != A.cols: + raise RuntimeError("need n*n matrix") # TODO: really? + for _ in xrange(maxsteps): + r = ctx.residual(A, x, b) + if ctx.norm(r, 2) < 10*ctx.eps: + break + # this uses cached LU decomposition and is thus cheap + dx = ctx.lu_solve(A, -r) + x += dx + return x + + def lu(ctx, A): + """ + A -> P, L, U + + LU factorisation of a square matrix A. L is the lower, U the upper part. + P is the permutation matrix indicating the row swaps. + + P*A = L*U + + If you need efficiency, use the low-level method LU_decomp instead, it's + much more memory efficient. + """ + # get factorization + A, p = ctx.LU_decomp(A) + n = A.rows + L = ctx.matrix(n) + U = ctx.matrix(n) + for i in xrange(n): + for j in xrange(n): + if i > j: + L[i,j] = A[i,j] + elif i == j: + L[i,j] = 1 + U[i,j] = A[i,j] + else: + U[i,j] = A[i,j] + # calculate permutation matrix + P = ctx.eye(n) + for k in xrange(len(p)): + ctx.swap_row(P, k, p[k]) + return P, L, U + + def unitvector(ctx, n, i): + """ + Return the i-th n-dimensional unit vector. + """ + assert 0 < i <= n, 'this unit vector does not exist' + return [ctx.zero]*(i-1) + [ctx.one] + [ctx.zero]*(n-i) + + def inverse(ctx, A, **kwargs): + """ + Calculate the inverse of a matrix. + + If you want to solve an equation system Ax = b, it's recommended to use + solve(A, b) instead, it's about 3 times more efficient. + """ + prec = ctx.prec + try: + ctx.prec += 10 + # do not overwrite A + A = ctx.matrix(A, **kwargs).copy() + n = A.rows + # get LU factorisation + A, p = ctx.LU_decomp(A) + cols = [] + # calculate unit vectors and solve corresponding system to get columns + for i in xrange(1, n + 1): + e = ctx.unitvector(n, i) + y = ctx.L_solve(A, e, p) + cols.append(ctx.U_solve(A, y)) + # convert columns to matrix + inv = [] + for i in xrange(n): + row = [] + for j in xrange(n): + row.append(cols[j][i]) + inv.append(row) + result = ctx.matrix(inv, **kwargs) + finally: + ctx.prec = prec + return result + + def householder(ctx, A): + """ + (A|b) -> H, p, x, res + + (A|b) is the coefficient matrix with left hand side of an optionally + overdetermined linear equation system. + H and p contain all information about the transformation matrices. + x is the solution, res the residual. + """ + if not isinstance(A, ctx.matrix): + raise TypeError("A should be a type of ctx.matrix") + m = A.rows + n = A.cols + if m < n - 1: + raise RuntimeError("Columns should not be less than rows") + # calculate Householder matrix + p = [] + for j in xrange(0, n - 1): + s = ctx.fsum(abs(A[i,j])**2 for i in xrange(j, m)) + if not abs(s) > ctx.eps: + raise ValueError('matrix is numerically singular') + p.append(-ctx.sign(ctx.re(A[j,j])) * ctx.sqrt(s)) + kappa = ctx.one / (s - p[j] * A[j,j]) + A[j,j] -= p[j] + for k in xrange(j+1, n): + y = ctx.fsum(ctx.conj(A[i,j]) * A[i,k] for i in xrange(j, m)) * kappa + for i in xrange(j, m): + A[i,k] -= A[i,j] * y + # solve Rx = c1 + x = [A[i,n - 1] for i in xrange(n - 1)] + for i in xrange(n - 2, -1, -1): + x[i] -= ctx.fsum(A[i,j] * x[j] for j in xrange(i + 1, n - 1)) + x[i] /= p[i] + # calculate residual + if not m == n - 1: + r = [A[m-1-i, n-1] for i in xrange(m - n + 1)] + else: + # determined system, residual should be 0 + r = [0]*m # maybe a bad idea, changing r[i] will change all elements + return A, p, x, r + + #def qr(ctx, A): + # """ + # A -> Q, R + # + # QR factorisation of a square matrix A using Householder decomposition. + # Q is orthogonal, this leads to very few numerical errors. + # + # A = Q*R + # """ + # H, p, x, res = householder(A) + # TODO: implement this + + def residual(ctx, A, x, b, **kwargs): + """ + Calculate the residual of a solution to a linear equation system. + + r = A*x - b for A*x = b + """ + oldprec = ctx.prec + try: + ctx.prec *= 2 + A, x, b = ctx.matrix(A, **kwargs), ctx.matrix(x, **kwargs), ctx.matrix(b, **kwargs) + return A*x - b + finally: + ctx.prec = oldprec + + def qr_solve(ctx, A, b, norm=None, **kwargs): + """ + Ax = b => x, ||Ax - b|| + + Solve a determined or overdetermined linear equations system and + calculate the norm of the residual (error). + QR decomposition using Householder factorization is applied, which gives very + accurate results even for ill-conditioned matrices. qr_solve is twice as + efficient. + """ + if norm is None: + norm = ctx.norm + prec = ctx.prec + try: + ctx.prec += 10 + # do not overwrite A nor b + A, b = ctx.matrix(A, **kwargs).copy(), ctx.matrix(b, **kwargs).copy() + if A.rows < A.cols: + raise ValueError('cannot solve underdetermined system') + H, p, x, r = ctx.householder(ctx.extend(A, b)) + res = ctx.norm(r) + # calculate residual "manually" for determined systems + if res == 0: + res = ctx.norm(ctx.residual(A, x, b)) + return ctx.matrix(x, **kwargs), res + finally: + ctx.prec = prec + + def cholesky(ctx, A, tol=None): + r""" + Cholesky decomposition of a symmetric positive-definite matrix `A`. + Returns a lower triangular matrix `L` such that `A = L \times L^T`. + More generally, for a complex Hermitian positive-definite matrix, + a Cholesky decomposition satisfying `A = L \times L^H` is returned. + + The Cholesky decomposition can be used to solve linear equation + systems twice as efficiently as LU decomposition, or to + test whether `A` is positive-definite. + + The optional parameter ``tol`` determines the tolerance for + verifying positive-definiteness. + + **Examples** + + Cholesky decomposition of a positive-definite symmetric matrix:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> A = eye(3) + hilbert(3) + >>> nprint(A) + [ 2.0 0.5 0.333333] + [ 0.5 1.33333 0.25] + [0.333333 0.25 1.2] + >>> L = cholesky(A) + >>> nprint(L) + [ 1.41421 0.0 0.0] + [0.353553 1.09924 0.0] + [0.235702 0.15162 1.05899] + >>> chop(A - L*L.T) + [0.0 0.0 0.0] + [0.0 0.0 0.0] + [0.0 0.0 0.0] + + Cholesky decomposition of a Hermitian matrix:: + + >>> A = eye(3) + matrix([[0,0.25j,-0.5j],[-0.25j,0,0],[0.5j,0,0]]) + >>> L = cholesky(A) + >>> nprint(L) + [ 1.0 0.0 0.0] + [(0.0 - 0.25j) (0.968246 + 0.0j) 0.0] + [ (0.0 + 0.5j) (0.129099 + 0.0j) (0.856349 + 0.0j)] + >>> chop(A - L*L.H) + [0.0 0.0 0.0] + [0.0 0.0 0.0] + [0.0 0.0 0.0] + + Attempted Cholesky decomposition of a matrix that is not positive + definite:: + + >>> A = -eye(3) + hilbert(3) + >>> L = cholesky(A) + Traceback (most recent call last): + ... + ValueError: matrix is not positive-definite + + **References** + + 1. [Wikipedia]_ http://en.wikipedia.org/wiki/Cholesky_decomposition + + """ + if not isinstance(A, ctx.matrix): + raise RuntimeError("A should be a type of ctx.matrix") + if not A.rows == A.cols: + raise ValueError('need n*n matrix') + if tol is None: + tol = +ctx.eps + n = A.rows + L = ctx.matrix(n) + for j in xrange(n): + c = ctx.re(A[j,j]) + if abs(c-A[j,j]) > tol: + raise ValueError('matrix is not Hermitian') + s = c - ctx.fsum((L[j,k] for k in xrange(j)), + absolute=True, squared=True) + if s < tol: + raise ValueError('matrix is not positive-definite') + L[j,j] = ctx.sqrt(s) + for i in xrange(j, n): + it1 = (L[i,k] for k in xrange(j)) + it2 = (L[j,k] for k in xrange(j)) + t = ctx.fdot(it1, it2, conjugate=True) + L[i,j] = (A[i,j] - t) / L[j,j] + return L + + def cholesky_solve(ctx, A, b, **kwargs): + """ + Ax = b => x + + Solve a symmetric positive-definite linear equation system. + This is twice as efficient as lu_solve. + + Typical use cases: + * A.T*A + * Hessian matrix + * differential equations + """ + prec = ctx.prec + try: + ctx.prec += 10 + # do not overwrite A nor b + A, b = ctx.matrix(A, **kwargs).copy(), ctx.matrix(b, **kwargs).copy() + if A.rows != A.cols: + raise ValueError('can only solve determined system') + # Cholesky factorization + L = ctx.cholesky(A) + # solve + n = L.rows + if len(b) != n: + raise ValueError("Value should be equal to n") + for i in xrange(n): + b[i] -= ctx.fsum(L[i,j] * b[j] for j in xrange(i)) + b[i] /= L[i,i] + x = ctx.U_solve(L.T, b) + return x + finally: + ctx.prec = prec + + def det(ctx, A): + """ + Calculate the determinant of a matrix. + """ + prec = ctx.prec + try: + # do not overwrite A + A = ctx.matrix(A).copy() + # use LU factorization to calculate determinant + try: + R, p = ctx.LU_decomp(A) + except ZeroDivisionError: + return 0 + z = 1 + for i, e in enumerate(p): + if i != e: + z *= -1 + for i in xrange(A.rows): + z *= R[i,i] + return z + finally: + ctx.prec = prec + + def cond(ctx, A, norm=None): + """ + Calculate the condition number of a matrix using a specified matrix norm. + + The condition number estimates the sensitivity of a matrix to errors. + Example: small input errors for ill-conditioned coefficient matrices + alter the solution of the system dramatically. + + For ill-conditioned matrices it's recommended to use qr_solve() instead + of lu_solve(). This does not help with input errors however, it just avoids + to add additional errors. + + Definition: cond(A) = ||A|| * ||A**-1|| + """ + if norm is None: + norm = lambda x: ctx.mnorm(x,1) + return norm(A) * norm(ctx.inverse(A)) + + def lu_solve_mat(ctx, a, b): + """Solve a * x = b where a and b are matrices.""" + r = ctx.matrix(a.rows, b.cols) + for i in range(b.cols): + c = ctx.lu_solve(a, b.column(i)) + for j in range(len(c)): + r[j, i] = c[j] + return r + + def qr(ctx, A, mode = 'full', edps = 10): + """ + Compute a QR factorization $A = QR$ where + A is an m x n matrix of real or complex numbers where m >= n + + mode has following meanings: + (1) mode = 'raw' returns two matrixes (A, tau) in the + internal format used by LAPACK + (2) mode = 'skinny' returns the leading n columns of Q + and n rows of R + (3) Any other value returns the leading m columns of Q + and m rows of R + + edps is the increase in mp precision used for calculations + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 15 + >>> mp.pretty = True + >>> A = matrix([[1, 2], [3, 4], [1, 1]]) + >>> Q, R = qr(A) + >>> Q + [-0.301511344577764 0.861640436855329 0.408248290463863] + [-0.904534033733291 -0.123091490979333 -0.408248290463863] + [-0.301511344577764 -0.492365963917331 0.816496580927726] + >>> R + [-3.3166247903554 -4.52267016866645] + [ 0.0 0.738548945875996] + [ 0.0 0.0] + >>> Q * R + [1.0 2.0] + [3.0 4.0] + [1.0 1.0] + >>> chop(Q.T * Q) + [1.0 0.0 0.0] + [0.0 1.0 0.0] + [0.0 0.0 1.0] + >>> B = matrix([[1+0j, 2-3j], [3+j, 4+5j]]) + >>> Q, R = qr(B) + >>> nprint(Q) + [ (-0.301511 + 0.0j) (0.0695795 - 0.95092j)] + [(-0.904534 - 0.301511j) (-0.115966 + 0.278318j)] + >>> nprint(R) + [(-3.31662 + 0.0j) (-5.72872 - 2.41209j)] + [ 0.0 (3.91965 + 0.0j)] + >>> Q * R + [(1.0 + 0.0j) (2.0 - 3.0j)] + [(3.0 + 1.0j) (4.0 + 5.0j)] + >>> chop(Q.T * Q.conjugate()) + [1.0 0.0] + [0.0 1.0] + + """ + + # check values before continuing + assert isinstance(A, ctx.matrix) + m = A.rows + n = A.cols + assert n >= 0 + assert m >= n + assert edps >= 0 + + # check for complex data type + cmplx = any(type(x) is ctx.mpc for x in A) + + # temporarily increase the precision and initialize + with ctx.extradps(edps): + tau = ctx.matrix(n,1) + A = A.copy() + + # --------------- + # FACTOR MATRIX A + # --------------- + if cmplx: + one = ctx.mpc('1.0', '0.0') + zero = ctx.mpc('0.0', '0.0') + rzero = ctx.mpf('0.0') + + # main loop to factor A (complex) + for j in xrange(0, n): + alpha = A[j,j] + alphr = ctx.re(alpha) + alphi = ctx.im(alpha) + + if (m-j) >= 2: + xnorm = ctx.fsum( A[i,j]*ctx.conj(A[i,j]) for i in xrange(j+1, m) ) + xnorm = ctx.re( ctx.sqrt(xnorm) ) + else: + xnorm = rzero + + if (xnorm == rzero) and (alphi == rzero): + tau[j] = zero + continue + + if alphr < rzero: + beta = ctx.sqrt(alphr**2 + alphi**2 + xnorm**2) + else: + beta = -ctx.sqrt(alphr**2 + alphi**2 + xnorm**2) + + tau[j] = ctx.mpc( (beta - alphr) / beta, -alphi / beta ) + t = -ctx.conj(tau[j]) + za = one / (alpha - beta) + + for i in xrange(j+1, m): + A[i,j] *= za + + A[j,j] = one + for k in xrange(j+1, n): + y = ctx.fsum(A[i,j] * ctx.conj(A[i,k]) for i in xrange(j, m)) + temp = t * ctx.conj(y) + for i in xrange(j, m): + A[i,k] += A[i,j] * temp + + A[j,j] = ctx.mpc(beta, '0.0') + else: + one = ctx.mpf('1.0') + zero = ctx.mpf('0.0') + + # main loop to factor A (real) + for j in xrange(0, n): + alpha = A[j,j] + + if (m-j) > 2: + xnorm = ctx.fsum( (A[i,j])**2 for i in xrange(j+1, m) ) + xnorm = ctx.sqrt(xnorm) + elif (m-j) == 2: + xnorm = abs( A[m-1,j] ) + else: + xnorm = zero + + if xnorm == zero: + tau[j] = zero + continue + + if alpha < zero: + beta = ctx.sqrt(alpha**2 + xnorm**2) + else: + beta = -ctx.sqrt(alpha**2 + xnorm**2) + + tau[j] = (beta - alpha) / beta + t = -tau[j] + da = one / (alpha - beta) + + for i in xrange(j+1, m): + A[i,j] *= da + + A[j,j] = one + for k in xrange(j+1, n): + y = ctx.fsum( A[i,j] * A[i,k] for i in xrange(j, m) ) + temp = t * y + for i in xrange(j,m): + A[i,k] += A[i,j] * temp + + A[j,j] = beta + + # return factorization in same internal format as LAPACK + if (mode == 'raw') or (mode == 'RAW'): + return A, tau + + # ---------------------------------- + # FORM Q USING BACKWARD ACCUMULATION + # ---------------------------------- + + # form R before the values are overwritten + R = A.copy() + for j in xrange(0, n): + for i in xrange(j+1, m): + R[i,j] = zero + + # set the value of p (number of columns of Q to return) + p = m + if (mode == 'skinny') or (mode == 'SKINNY'): + p = n + + # add columns to A if needed and initialize + A.cols += (p-n) + for j in xrange(0, p): + A[j,j] = one + for i in xrange(0, j): + A[i,j] = zero + + # main loop to form Q + for j in xrange(n-1, -1, -1): + t = -tau[j] + A[j,j] += t + + for k in xrange(j+1, p): + if cmplx: + y = ctx.fsum(A[i,j] * ctx.conj(A[i,k]) for i in xrange(j+1, m)) + temp = t * ctx.conj(y) + else: + y = ctx.fsum(A[i,j] * A[i,k] for i in xrange(j+1, m)) + temp = t * y + A[j,k] = temp + for i in xrange(j+1, m): + A[i,k] += A[i,j] * temp + + for i in xrange(j+1, m): + A[i, j] *= t + + return A, R[0:p,0:n] + + # ------------------ + # END OF FUNCTION QR + # ------------------ diff --git a/MLPY/Lib/site-packages/mpmath/matrices/matrices.py b/MLPY/Lib/site-packages/mpmath/matrices/matrices.py new file mode 100644 index 0000000000000000000000000000000000000000..a97d5a9ca7e173195386dc7cb60860a826ab6a97 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/matrices/matrices.py @@ -0,0 +1,1005 @@ +from ..libmp.backend import xrange +import warnings + +# TODO: interpret list as vectors (for multiplication) + +rowsep = '\n' +colsep = ' ' + +class _matrix(object): + """ + Numerical matrix. + + Specify the dimensions or the data as a nested list. + Elements default to zero. + Use a flat list to create a column vector easily. + + The datatype of the context (mpf for mp, mpi for iv, and float for fp) is used to store the data. + + Creating matrices + ----------------- + + Matrices in mpmath are implemented using dictionaries. Only non-zero values + are stored, so it is cheap to represent sparse matrices. + + The most basic way to create one is to use the ``matrix`` class directly. + You can create an empty matrix specifying the dimensions: + + >>> from mpmath import * + >>> mp.dps = 15 + >>> matrix(2) + matrix( + [['0.0', '0.0'], + ['0.0', '0.0']]) + >>> matrix(2, 3) + matrix( + [['0.0', '0.0', '0.0'], + ['0.0', '0.0', '0.0']]) + + Calling ``matrix`` with one dimension will create a square matrix. + + To access the dimensions of a matrix, use the ``rows`` or ``cols`` keyword: + + >>> A = matrix(3, 2) + >>> A + matrix( + [['0.0', '0.0'], + ['0.0', '0.0'], + ['0.0', '0.0']]) + >>> A.rows + 3 + >>> A.cols + 2 + + You can also change the dimension of an existing matrix. This will set the + new elements to 0. If the new dimension is smaller than before, the + concerning elements are discarded: + + >>> A.rows = 2 + >>> A + matrix( + [['0.0', '0.0'], + ['0.0', '0.0']]) + + Internally ``mpmathify`` is used every time an element is set. This + is done using the syntax A[row,column], counting from 0: + + >>> A = matrix(2) + >>> A[1,1] = 1 + 1j + >>> A + matrix( + [['0.0', '0.0'], + ['0.0', mpc(real='1.0', imag='1.0')]]) + + A more comfortable way to create a matrix lets you use nested lists: + + >>> matrix([[1, 2], [3, 4]]) + matrix( + [['1.0', '2.0'], + ['3.0', '4.0']]) + + Convenient advanced functions are available for creating various standard + matrices, see ``zeros``, ``ones``, ``diag``, ``eye``, ``randmatrix`` and + ``hilbert``. + + Vectors + ....... + + Vectors may also be represented by the ``matrix`` class (with rows = 1 or cols = 1). + For vectors there are some things which make life easier. A column vector can + be created using a flat list, a row vectors using an almost flat nested list:: + + >>> matrix([1, 2, 3]) + matrix( + [['1.0'], + ['2.0'], + ['3.0']]) + >>> matrix([[1, 2, 3]]) + matrix( + [['1.0', '2.0', '3.0']]) + + Optionally vectors can be accessed like lists, using only a single index:: + + >>> x = matrix([1, 2, 3]) + >>> x[1] + mpf('2.0') + >>> x[1,0] + mpf('2.0') + + Other + ..... + + Like you probably expected, matrices can be printed:: + + >>> print randmatrix(3) # doctest:+SKIP + [ 0.782963853573023 0.802057689719883 0.427895717335467] + [0.0541876859348597 0.708243266653103 0.615134039977379] + [ 0.856151514955773 0.544759264818486 0.686210904770947] + + Use ``nstr`` or ``nprint`` to specify the number of digits to print:: + + >>> nprint(randmatrix(5), 3) # doctest:+SKIP + [2.07e-1 1.66e-1 5.06e-1 1.89e-1 8.29e-1] + [6.62e-1 6.55e-1 4.47e-1 4.82e-1 2.06e-2] + [4.33e-1 7.75e-1 6.93e-2 2.86e-1 5.71e-1] + [1.01e-1 2.53e-1 6.13e-1 3.32e-1 2.59e-1] + [1.56e-1 7.27e-2 6.05e-1 6.67e-2 2.79e-1] + + As matrices are mutable, you will need to copy them sometimes:: + + >>> A = matrix(2) + >>> A + matrix( + [['0.0', '0.0'], + ['0.0', '0.0']]) + >>> B = A.copy() + >>> B[0,0] = 1 + >>> B + matrix( + [['1.0', '0.0'], + ['0.0', '0.0']]) + >>> A + matrix( + [['0.0', '0.0'], + ['0.0', '0.0']]) + + Finally, it is possible to convert a matrix to a nested list. This is very useful, + as most Python libraries involving matrices or arrays (namely NumPy or SymPy) + support this format:: + + >>> B.tolist() + [[mpf('1.0'), mpf('0.0')], [mpf('0.0'), mpf('0.0')]] + + + Matrix operations + ----------------- + + You can add and subtract matrices of compatible dimensions:: + + >>> A = matrix([[1, 2], [3, 4]]) + >>> B = matrix([[-2, 4], [5, 9]]) + >>> A + B + matrix( + [['-1.0', '6.0'], + ['8.0', '13.0']]) + >>> A - B + matrix( + [['3.0', '-2.0'], + ['-2.0', '-5.0']]) + >>> A + ones(3) # doctest:+ELLIPSIS + Traceback (most recent call last): + ... + ValueError: incompatible dimensions for addition + + It is possible to multiply or add matrices and scalars. In the latter case the + operation will be done element-wise:: + + >>> A * 2 + matrix( + [['2.0', '4.0'], + ['6.0', '8.0']]) + >>> A / 4 + matrix( + [['0.25', '0.5'], + ['0.75', '1.0']]) + >>> A - 1 + matrix( + [['0.0', '1.0'], + ['2.0', '3.0']]) + + Of course you can perform matrix multiplication, if the dimensions are + compatible, using ``@`` (for Python >= 3.5) or ``*``. For clarity, ``@`` is + recommended (`PEP 465 `), because + the meaning of ``*`` is different in many other Python libraries such as NumPy. + + >>> A @ B # doctest:+SKIP + matrix( + [['8.0', '22.0'], + ['14.0', '48.0']]) + >>> A * B # same as A @ B + matrix( + [['8.0', '22.0'], + ['14.0', '48.0']]) + >>> matrix([[1, 2, 3]]) * matrix([[-6], [7], [-2]]) + matrix( + [['2.0']]) + + .. + COMMENT: TODO: the above "doctest:+SKIP" may be removed as soon as we + have dropped support for Python 3.5 and below. + + You can raise powers of square matrices:: + + >>> A**2 + matrix( + [['7.0', '10.0'], + ['15.0', '22.0']]) + + Negative powers will calculate the inverse:: + + >>> A**-1 + matrix( + [['-2.0', '1.0'], + ['1.5', '-0.5']]) + >>> A * A**-1 + matrix( + [['1.0', '1.0842021724855e-19'], + ['-2.16840434497101e-19', '1.0']]) + + + + Matrix transposition is straightforward:: + + >>> A = ones(2, 3) + >>> A + matrix( + [['1.0', '1.0', '1.0'], + ['1.0', '1.0', '1.0']]) + >>> A.T + matrix( + [['1.0', '1.0'], + ['1.0', '1.0'], + ['1.0', '1.0']]) + + Norms + ..... + + Sometimes you need to know how "large" a matrix or vector is. Due to their + multidimensional nature it's not possible to compare them, but there are + several functions to map a matrix or a vector to a positive real number, the + so called norms. + + For vectors the p-norm is intended, usually the 1-, the 2- and the oo-norm are + used. + + >>> x = matrix([-10, 2, 100]) + >>> norm(x, 1) + mpf('112.0') + >>> norm(x, 2) + mpf('100.5186549850325') + >>> norm(x, inf) + mpf('100.0') + + Please note that the 2-norm is the most used one, though it is more expensive + to calculate than the 1- or oo-norm. + + It is possible to generalize some vector norms to matrix norm:: + + >>> A = matrix([[1, -1000], [100, 50]]) + >>> mnorm(A, 1) + mpf('1050.0') + >>> mnorm(A, inf) + mpf('1001.0') + >>> mnorm(A, 'F') + mpf('1006.2310867787777') + + The last norm (the "Frobenius-norm") is an approximation for the 2-norm, which + is hard to calculate and not available. The Frobenius-norm lacks some + mathematical properties you might expect from a norm. + """ + + def __init__(self, *args, **kwargs): + self.__data = {} + # LU decompostion cache, this is useful when solving the same system + # multiple times, when calculating the inverse and when calculating the + # determinant + self._LU = None + if "force_type" in kwargs: + warnings.warn("The force_type argument was removed, it did not work" + " properly anyway. If you want to force floating-point or" + " interval computations, use the respective methods from `fp`" + " or `mp` instead, e.g., `fp.matrix()` or `iv.matrix()`." + " If you want to truncate values to integer, use .apply(int) instead.") + if isinstance(args[0], (list, tuple)): + if isinstance(args[0][0], (list, tuple)): + # interpret nested list as matrix + A = args[0] + self.__rows = len(A) + self.__cols = len(A[0]) + for i, row in enumerate(A): + for j, a in enumerate(row): + # note: this will call __setitem__ which will call self.ctx.convert() to convert the datatype. + self[i, j] = a + else: + # interpret list as row vector + v = args[0] + self.__rows = len(v) + self.__cols = 1 + for i, e in enumerate(v): + self[i, 0] = e + elif isinstance(args[0], int): + # create empty matrix of given dimensions + if len(args) == 1: + self.__rows = self.__cols = args[0] + else: + if not isinstance(args[1], int): + raise TypeError("expected int") + self.__rows = args[0] + self.__cols = args[1] + elif isinstance(args[0], _matrix): + A = args[0] + self.__rows = A._matrix__rows + self.__cols = A._matrix__cols + for i in xrange(A.__rows): + for j in xrange(A.__cols): + self[i, j] = A[i, j] + elif hasattr(args[0], 'tolist'): + A = self.ctx.matrix(args[0].tolist()) + self.__data = A._matrix__data + self.__rows = A._matrix__rows + self.__cols = A._matrix__cols + else: + raise TypeError('could not interpret given arguments') + + def apply(self, f): + """ + Return a copy of self with the function `f` applied elementwise. + """ + new = self.ctx.matrix(self.__rows, self.__cols) + for i in xrange(self.__rows): + for j in xrange(self.__cols): + new[i,j] = f(self[i,j]) + return new + + def __nstr__(self, n=None, **kwargs): + # Build table of string representations of the elements + res = [] + # Track per-column max lengths for pretty alignment + maxlen = [0] * self.cols + for i in range(self.rows): + res.append([]) + for j in range(self.cols): + if n: + string = self.ctx.nstr(self[i,j], n, **kwargs) + else: + string = str(self[i,j]) + res[-1].append(string) + maxlen[j] = max(len(string), maxlen[j]) + # Patch strings together + for i, row in enumerate(res): + for j, elem in enumerate(row): + # Pad each element up to maxlen so the columns line up + row[j] = elem.rjust(maxlen[j]) + res[i] = "[" + colsep.join(row) + "]" + return rowsep.join(res) + + def __str__(self): + return self.__nstr__() + + def _toliststr(self, avoid_type=False): + """ + Create a list string from a matrix. + + If avoid_type: avoid multiple 'mpf's. + """ + # XXX: should be something like self.ctx._types + typ = self.ctx.mpf + s = '[' + for i in xrange(self.__rows): + s += '[' + for j in xrange(self.__cols): + if not avoid_type or not isinstance(self[i,j], typ): + a = repr(self[i,j]) + else: + a = "'" + str(self[i,j]) + "'" + s += a + ', ' + s = s[:-2] + s += '],\n ' + s = s[:-3] + s += ']' + return s + + def tolist(self): + """ + Convert the matrix to a nested list. + """ + return [[self[i,j] for j in range(self.__cols)] for i in range(self.__rows)] + + def __repr__(self): + if self.ctx.pretty: + return self.__str__() + s = 'matrix(\n' + s += self._toliststr(avoid_type=True) + ')' + return s + + def __get_element(self, key): + ''' + Fast extraction of the i,j element from the matrix + This function is for private use only because is unsafe: + 1. Does not check on the value of key it expects key to be a integer tuple (i,j) + 2. Does not check bounds + ''' + if key in self.__data: + return self.__data[key] + else: + return self.ctx.zero + + def __set_element(self, key, value): + ''' + Fast assignment of the i,j element in the matrix + This function is unsafe: + 1. Does not check on the value of key it expects key to be a integer tuple (i,j) + 2. Does not check bounds + 3. Does not check the value type + 4. Does not reset the LU cache + ''' + if value: # only store non-zeros + self.__data[key] = value + elif key in self.__data: + del self.__data[key] + + + def __getitem__(self, key): + ''' + Getitem function for mp matrix class with slice index enabled + it allows the following assingments + scalar to a slice of the matrix + B = A[:,2:6] + ''' + # Convert vector to matrix indexing + if isinstance(key, int) or isinstance(key,slice): + # only sufficent for vectors + if self.__rows == 1: + key = (0, key) + elif self.__cols == 1: + key = (key, 0) + else: + raise IndexError('insufficient indices for matrix') + + if isinstance(key[0],slice) or isinstance(key[1],slice): + + #Rows + if isinstance(key[0],slice): + #Check bounds + if (key[0].start is None or key[0].start >= 0) and \ + (key[0].stop is None or key[0].stop <= self.__rows+1): + # Generate indices + rows = xrange(*key[0].indices(self.__rows)) + else: + raise IndexError('Row index out of bounds') + else: + # Single row + rows = [key[0]] + + # Columns + if isinstance(key[1],slice): + # Check bounds + if (key[1].start is None or key[1].start >= 0) and \ + (key[1].stop is None or key[1].stop <= self.__cols+1): + # Generate indices + columns = xrange(*key[1].indices(self.__cols)) + else: + raise IndexError('Column index out of bounds') + + else: + # Single column + columns = [key[1]] + + # Create matrix slice + m = self.ctx.matrix(len(rows),len(columns)) + + # Assign elements to the output matrix + for i,x in enumerate(rows): + for j,y in enumerate(columns): + m.__set_element((i,j),self.__get_element((x,y))) + + return m + + else: + # single element extraction + if key[0] >= self.__rows or key[1] >= self.__cols: + raise IndexError('matrix index out of range') + if key in self.__data: + return self.__data[key] + else: + return self.ctx.zero + + def __setitem__(self, key, value): + # setitem function for mp matrix class with slice index enabled + # it allows the following assingments + # scalar to a slice of the matrix + # A[:,2:6] = 2.5 + # submatrix to matrix (the value matrix should be the same size as the slice size) + # A[3,:] = B where A is n x m and B is n x 1 + # Convert vector to matrix indexing + if isinstance(key, int) or isinstance(key,slice): + # only sufficent for vectors + if self.__rows == 1: + key = (0, key) + elif self.__cols == 1: + key = (key, 0) + else: + raise IndexError('insufficient indices for matrix') + # Slice indexing + if isinstance(key[0],slice) or isinstance(key[1],slice): + # Rows + if isinstance(key[0],slice): + # Check bounds + if (key[0].start is None or key[0].start >= 0) and \ + (key[0].stop is None or key[0].stop <= self.__rows+1): + # generate row indices + rows = xrange(*key[0].indices(self.__rows)) + else: + raise IndexError('Row index out of bounds') + else: + # Single row + rows = [key[0]] + # Columns + if isinstance(key[1],slice): + # Check bounds + if (key[1].start is None or key[1].start >= 0) and \ + (key[1].stop is None or key[1].stop <= self.__cols+1): + # Generate column indices + columns = xrange(*key[1].indices(self.__cols)) + else: + raise IndexError('Column index out of bounds') + else: + # Single column + columns = [key[1]] + # Assign slice with a scalar + if isinstance(value,self.ctx.matrix): + # Assign elements to matrix if input and output dimensions match + if len(rows) == value.rows and len(columns) == value.cols: + for i,x in enumerate(rows): + for j,y in enumerate(columns): + self.__set_element((x,y), value.__get_element((i,j))) + else: + raise ValueError('Dimensions do not match') + else: + # Assign slice with scalars + value = self.ctx.convert(value) + for i in rows: + for j in columns: + self.__set_element((i,j), value) + else: + # Single element assingment + # Check bounds + if key[0] >= self.__rows or key[1] >= self.__cols: + raise IndexError('matrix index out of range') + # Convert and store value + value = self.ctx.convert(value) + if value: # only store non-zeros + self.__data[key] = value + elif key in self.__data: + del self.__data[key] + + if self._LU: + self._LU = None + return + + def __iter__(self): + for i in xrange(self.__rows): + for j in xrange(self.__cols): + yield self[i,j] + + def __mul__(self, other): + if isinstance(other, self.ctx.matrix): + # dot multiplication + if self.__cols != other.__rows: + raise ValueError('dimensions not compatible for multiplication') + new = self.ctx.matrix(self.__rows, other.__cols) + self_zero = self.ctx.zero + self_get = self.__data.get + other_zero = other.ctx.zero + other_get = other.__data.get + for i in xrange(self.__rows): + for j in xrange(other.__cols): + new[i, j] = self.ctx.fdot((self_get((i,k), self_zero), other_get((k,j), other_zero)) + for k in xrange(other.__rows)) + return new + else: + # try scalar multiplication + new = self.ctx.matrix(self.__rows, self.__cols) + for i in xrange(self.__rows): + for j in xrange(self.__cols): + new[i, j] = other * self[i, j] + return new + + def __matmul__(self, other): + return self.__mul__(other) + + def __rmul__(self, other): + # assume other is scalar and thus commutative + if isinstance(other, self.ctx.matrix): + raise TypeError("other should not be type of ctx.matrix") + return self.__mul__(other) + + def __pow__(self, other): + # avoid cyclic import problems + #from linalg import inverse + if not isinstance(other, int): + raise ValueError('only integer exponents are supported') + if not self.__rows == self.__cols: + raise ValueError('only powers of square matrices are defined') + n = other + if n == 0: + return self.ctx.eye(self.__rows) + if n < 0: + n = -n + neg = True + else: + neg = False + i = n + y = 1 + z = self.copy() + while i != 0: + if i % 2 == 1: + y = y * z + z = z*z + i = i // 2 + if neg: + y = self.ctx.inverse(y) + return y + + def __div__(self, other): + # assume other is scalar and do element-wise divison + assert not isinstance(other, self.ctx.matrix) + new = self.ctx.matrix(self.__rows, self.__cols) + for i in xrange(self.__rows): + for j in xrange(self.__cols): + new[i,j] = self[i,j] / other + return new + + __truediv__ = __div__ + + def __add__(self, other): + if isinstance(other, self.ctx.matrix): + if not (self.__rows == other.__rows and self.__cols == other.__cols): + raise ValueError('incompatible dimensions for addition') + new = self.ctx.matrix(self.__rows, self.__cols) + for i in xrange(self.__rows): + for j in xrange(self.__cols): + new[i,j] = self[i,j] + other[i,j] + return new + else: + # assume other is scalar and add element-wise + new = self.ctx.matrix(self.__rows, self.__cols) + for i in xrange(self.__rows): + for j in xrange(self.__cols): + new[i,j] += self[i,j] + other + return new + + def __radd__(self, other): + return self.__add__(other) + + def __sub__(self, other): + if isinstance(other, self.ctx.matrix) and not (self.__rows == other.__rows + and self.__cols == other.__cols): + raise ValueError('incompatible dimensions for subtraction') + return self.__add__(other * (-1)) + + def __pos__(self): + """ + +M returns a copy of M, rounded to current working precision. + """ + return (+1) * self + + def __neg__(self): + return (-1) * self + + def __rsub__(self, other): + return -self + other + + def __eq__(self, other): + return self.__rows == other.__rows and self.__cols == other.__cols \ + and self.__data == other.__data + + def __len__(self): + if self.rows == 1: + return self.cols + elif self.cols == 1: + return self.rows + else: + return self.rows # do it like numpy + + def __getrows(self): + return self.__rows + + def __setrows(self, value): + for key in self.__data.copy(): + if key[0] >= value: + del self.__data[key] + self.__rows = value + + rows = property(__getrows, __setrows, doc='number of rows') + + def __getcols(self): + return self.__cols + + def __setcols(self, value): + for key in self.__data.copy(): + if key[1] >= value: + del self.__data[key] + self.__cols = value + + cols = property(__getcols, __setcols, doc='number of columns') + + def transpose(self): + new = self.ctx.matrix(self.__cols, self.__rows) + for i in xrange(self.__rows): + for j in xrange(self.__cols): + new[j,i] = self[i,j] + return new + + T = property(transpose) + + def conjugate(self): + return self.apply(self.ctx.conj) + + def transpose_conj(self): + return self.conjugate().transpose() + + H = property(transpose_conj) + + def copy(self): + new = self.ctx.matrix(self.__rows, self.__cols) + new.__data = self.__data.copy() + return new + + __copy__ = copy + + def column(self, n): + m = self.ctx.matrix(self.rows, 1) + for i in range(self.rows): + m[i] = self[i,n] + return m + +class MatrixMethods(object): + + def __init__(ctx): + # XXX: subclass + ctx.matrix = type('matrix', (_matrix,), {}) + ctx.matrix.ctx = ctx + ctx.matrix.convert = ctx.convert + + def eye(ctx, n, **kwargs): + """ + Create square identity matrix n x n. + """ + A = ctx.matrix(n, **kwargs) + for i in xrange(n): + A[i,i] = 1 + return A + + def diag(ctx, diagonal, **kwargs): + """ + Create square diagonal matrix using given list. + + Example: + >>> from mpmath import diag, mp + >>> mp.pretty = False + >>> diag([1, 2, 3]) + matrix( + [['1.0', '0.0', '0.0'], + ['0.0', '2.0', '0.0'], + ['0.0', '0.0', '3.0']]) + """ + A = ctx.matrix(len(diagonal), **kwargs) + for i in xrange(len(diagonal)): + A[i,i] = diagonal[i] + return A + + def zeros(ctx, *args, **kwargs): + """ + Create matrix m x n filled with zeros. + One given dimension will create square matrix n x n. + + Example: + >>> from mpmath import zeros, mp + >>> mp.pretty = False + >>> zeros(2) + matrix( + [['0.0', '0.0'], + ['0.0', '0.0']]) + """ + if len(args) == 1: + m = n = args[0] + elif len(args) == 2: + m = args[0] + n = args[1] + else: + raise TypeError('zeros expected at most 2 arguments, got %i' % len(args)) + A = ctx.matrix(m, n, **kwargs) + for i in xrange(m): + for j in xrange(n): + A[i,j] = 0 + return A + + def ones(ctx, *args, **kwargs): + """ + Create matrix m x n filled with ones. + One given dimension will create square matrix n x n. + + Example: + >>> from mpmath import ones, mp + >>> mp.pretty = False + >>> ones(2) + matrix( + [['1.0', '1.0'], + ['1.0', '1.0']]) + """ + if len(args) == 1: + m = n = args[0] + elif len(args) == 2: + m = args[0] + n = args[1] + else: + raise TypeError('ones expected at most 2 arguments, got %i' % len(args)) + A = ctx.matrix(m, n, **kwargs) + for i in xrange(m): + for j in xrange(n): + A[i,j] = 1 + return A + + def hilbert(ctx, m, n=None): + """ + Create (pseudo) hilbert matrix m x n. + One given dimension will create hilbert matrix n x n. + + The matrix is very ill-conditioned and symmetric, positive definite if + square. + """ + if n is None: + n = m + A = ctx.matrix(m, n) + for i in xrange(m): + for j in xrange(n): + A[i,j] = ctx.one / (i + j + 1) + return A + + def randmatrix(ctx, m, n=None, min=0, max=1, **kwargs): + """ + Create a random m x n matrix. + + All values are >= min and >> from mpmath import randmatrix + >>> randmatrix(2) # doctest:+SKIP + matrix( + [['0.53491598236191806', '0.57195669543302752'], + ['0.85589992269513615', '0.82444367501382143']]) + """ + if not n: + n = m + A = ctx.matrix(m, n, **kwargs) + for i in xrange(m): + for j in xrange(n): + A[i,j] = ctx.rand() * (max - min) + min + return A + + def swap_row(ctx, A, i, j): + """ + Swap row i with row j. + """ + if i == j: + return + if isinstance(A, ctx.matrix): + for k in xrange(A.cols): + A[i,k], A[j,k] = A[j,k], A[i,k] + elif isinstance(A, list): + A[i], A[j] = A[j], A[i] + else: + raise TypeError('could not interpret type') + + def extend(ctx, A, b): + """ + Extend matrix A with column b and return result. + """ + if not isinstance(A, ctx.matrix): + raise TypeError("A should be a type of ctx.matrix") + if A.rows != len(b): + raise ValueError("Value should be equal to len(b)") + A = A.copy() + A.cols += 1 + for i in xrange(A.rows): + A[i, A.cols-1] = b[i] + return A + + def norm(ctx, x, p=2): + r""" + Gives the entrywise `p`-norm of an iterable *x*, i.e. the vector norm + `\left(\sum_k |x_k|^p\right)^{1/p}`, for any given `1 \le p \le \infty`. + + Special cases: + + If *x* is not iterable, this just returns ``absmax(x)``. + + ``p=1`` gives the sum of absolute values. + + ``p=2`` is the standard Euclidean vector norm. + + ``p=inf`` gives the magnitude of the largest element. + + For *x* a matrix, ``p=2`` is the Frobenius norm. + For operator matrix norms, use :func:`~mpmath.mnorm` instead. + + You can use the string 'inf' as well as float('inf') or mpf('inf') + to specify the infinity norm. + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> x = matrix([-10, 2, 100]) + >>> norm(x, 1) + mpf('112.0') + >>> norm(x, 2) + mpf('100.5186549850325') + >>> norm(x, inf) + mpf('100.0') + + """ + try: + iter(x) + except TypeError: + return ctx.absmax(x) + if type(p) is not int: + p = ctx.convert(p) + if p == ctx.inf: + return max(ctx.absmax(i) for i in x) + elif p == 1: + return ctx.fsum(x, absolute=1) + elif p == 2: + return ctx.sqrt(ctx.fsum(x, absolute=1, squared=1)) + elif p > 1: + return ctx.nthroot(ctx.fsum(abs(i)**p for i in x), p) + else: + raise ValueError('p has to be >= 1') + + def mnorm(ctx, A, p=1): + r""" + Gives the matrix (operator) `p`-norm of A. Currently ``p=1`` and ``p=inf`` + are supported: + + ``p=1`` gives the 1-norm (maximal column sum) + + ``p=inf`` gives the `\infty`-norm (maximal row sum). + You can use the string 'inf' as well as float('inf') or mpf('inf') + + ``p=2`` (not implemented) for a square matrix is the usual spectral + matrix norm, i.e. the largest singular value. + + ``p='f'`` (or 'F', 'fro', 'Frobenius, 'frobenius') gives the + Frobenius norm, which is the elementwise 2-norm. The Frobenius norm is an + approximation of the spectral norm and satisfies + + .. math :: + + \frac{1}{\sqrt{\mathrm{rank}(A)}} \|A\|_F \le \|A\|_2 \le \|A\|_F + + The Frobenius norm lacks some mathematical properties that might + be expected of a norm. + + For general elementwise `p`-norms, use :func:`~mpmath.norm` instead. + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> A = matrix([[1, -1000], [100, 50]]) + >>> mnorm(A, 1) + mpf('1050.0') + >>> mnorm(A, inf) + mpf('1001.0') + >>> mnorm(A, 'F') + mpf('1006.2310867787777') + + """ + A = ctx.matrix(A) + if type(p) is not int: + if type(p) is str and 'frobenius'.startswith(p.lower()): + return ctx.norm(A, 2) + p = ctx.convert(p) + m, n = A.rows, A.cols + if p == 1: + return max(ctx.fsum((A[i,j] for i in xrange(m)), absolute=1) for j in xrange(n)) + elif p == ctx.inf: + return max(ctx.fsum((A[i,j] for j in xrange(n)), absolute=1) for i in xrange(m)) + else: + raise NotImplementedError("matrix p-norm for arbitrary p") + +if __name__ == '__main__': + import doctest + doctest.testmod() diff --git a/MLPY/Lib/site-packages/mpmath/rational.py b/MLPY/Lib/site-packages/mpmath/rational.py new file mode 100644 index 0000000000000000000000000000000000000000..58745205319ac3548ad5feb49371d2d154b2d3c8 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/rational.py @@ -0,0 +1,240 @@ +import operator +import sys +from .libmp import int_types, mpf_hash, bitcount, from_man_exp, HASH_MODULUS + +new = object.__new__ + +def create_reduced(p, q, _cache={}): + key = p, q + if key in _cache: + return _cache[key] + x, y = p, q + while y: + x, y = y, x % y + if x != 1: + p //= x + q //= x + v = new(mpq) + v._mpq_ = p, q + # Speedup integers, half-integers and other small fractions + if q <= 4 and abs(key[0]) < 100: + _cache[key] = v + return v + +class mpq(object): + """ + Exact rational type, currently only intended for internal use. + """ + + __slots__ = ["_mpq_"] + + def __new__(cls, p, q=1): + if type(p) is tuple: + p, q = p + elif hasattr(p, '_mpq_'): + p, q = p._mpq_ + return create_reduced(p, q) + + def __repr__(s): + return "mpq(%s,%s)" % s._mpq_ + + def __str__(s): + return "(%s/%s)" % s._mpq_ + + def __int__(s): + a, b = s._mpq_ + return a // b + + def __nonzero__(s): + return bool(s._mpq_[0]) + + __bool__ = __nonzero__ + + def __hash__(s): + a, b = s._mpq_ + if sys.version_info >= (3, 2): + inverse = pow(b, HASH_MODULUS-2, HASH_MODULUS) + if not inverse: + h = sys.hash_info.inf + else: + h = (abs(a) * inverse) % HASH_MODULUS + if a < 0: h = -h + if h == -1: h = -2 + return h + else: + if b == 1: + return hash(a) + # Power of two: mpf compatible hash + if not (b & (b-1)): + return mpf_hash(from_man_exp(a, 1-bitcount(b))) + return hash((a,b)) + + def __eq__(s, t): + ttype = type(t) + if ttype is mpq: + return s._mpq_ == t._mpq_ + if ttype in int_types: + a, b = s._mpq_ + if b != 1: + return False + return a == t + return NotImplemented + + def __ne__(s, t): + ttype = type(t) + if ttype is mpq: + return s._mpq_ != t._mpq_ + if ttype in int_types: + a, b = s._mpq_ + if b != 1: + return True + return a != t + return NotImplemented + + def _cmp(s, t, op): + ttype = type(t) + if ttype in int_types: + a, b = s._mpq_ + return op(a, t*b) + if ttype is mpq: + a, b = s._mpq_ + c, d = t._mpq_ + return op(a*d, b*c) + return NotImplementedError + + def __lt__(s, t): return s._cmp(t, operator.lt) + def __le__(s, t): return s._cmp(t, operator.le) + def __gt__(s, t): return s._cmp(t, operator.gt) + def __ge__(s, t): return s._cmp(t, operator.ge) + + def __abs__(s): + a, b = s._mpq_ + if a >= 0: + return s + v = new(mpq) + v._mpq_ = -a, b + return v + + def __neg__(s): + a, b = s._mpq_ + v = new(mpq) + v._mpq_ = -a, b + return v + + def __pos__(s): + return s + + def __add__(s, t): + ttype = type(t) + if ttype is mpq: + a, b = s._mpq_ + c, d = t._mpq_ + return create_reduced(a*d+b*c, b*d) + if ttype in int_types: + a, b = s._mpq_ + v = new(mpq) + v._mpq_ = a+b*t, b + return v + return NotImplemented + + __radd__ = __add__ + + def __sub__(s, t): + ttype = type(t) + if ttype is mpq: + a, b = s._mpq_ + c, d = t._mpq_ + return create_reduced(a*d-b*c, b*d) + if ttype in int_types: + a, b = s._mpq_ + v = new(mpq) + v._mpq_ = a-b*t, b + return v + return NotImplemented + + def __rsub__(s, t): + ttype = type(t) + if ttype is mpq: + a, b = s._mpq_ + c, d = t._mpq_ + return create_reduced(b*c-a*d, b*d) + if ttype in int_types: + a, b = s._mpq_ + v = new(mpq) + v._mpq_ = b*t-a, b + return v + return NotImplemented + + def __mul__(s, t): + ttype = type(t) + if ttype is mpq: + a, b = s._mpq_ + c, d = t._mpq_ + return create_reduced(a*c, b*d) + if ttype in int_types: + a, b = s._mpq_ + return create_reduced(a*t, b) + return NotImplemented + + __rmul__ = __mul__ + + def __div__(s, t): + ttype = type(t) + if ttype is mpq: + a, b = s._mpq_ + c, d = t._mpq_ + return create_reduced(a*d, b*c) + if ttype in int_types: + a, b = s._mpq_ + return create_reduced(a, b*t) + return NotImplemented + + def __rdiv__(s, t): + ttype = type(t) + if ttype is mpq: + a, b = s._mpq_ + c, d = t._mpq_ + return create_reduced(b*c, a*d) + if ttype in int_types: + a, b = s._mpq_ + return create_reduced(b*t, a) + return NotImplemented + + def __pow__(s, t): + ttype = type(t) + if ttype in int_types: + a, b = s._mpq_ + if t: + if t < 0: + a, b, t = b, a, -t + v = new(mpq) + v._mpq_ = a**t, b**t + return v + raise ZeroDivisionError + return NotImplemented + + +mpq_1 = mpq((1,1)) +mpq_0 = mpq((0,1)) +mpq_1_2 = mpq((1,2)) +mpq_3_2 = mpq((3,2)) +mpq_1_4 = mpq((1,4)) +mpq_1_16 = mpq((1,16)) +mpq_3_16 = mpq((3,16)) +mpq_5_2 = mpq((5,2)) +mpq_3_4 = mpq((3,4)) +mpq_7_4 = mpq((7,4)) +mpq_5_4 = mpq((5,4)) + + +# Register with "numbers" ABC +# We do not subclass, hence we do not use the @abstractmethod checks. While +# this is less invasive it may turn out that we do not actually support +# parts of the expected interfaces. See +# http://docs.python.org/2/library/numbers.html for list of abstract +# methods. +try: + import numbers + numbers.Rational.register(mpq) +except ImportError: + pass diff --git a/MLPY/Lib/site-packages/mpmath/tests/__init__.py b/MLPY/Lib/site-packages/mpmath/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..237f560a43614ae1791493fd5b0a1c4a5d708006 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/extratest_gamma.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/extratest_gamma.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..996826601acbcc4bec8ca903a610ff00186c4119 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/extratest_gamma.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/extratest_zeta.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/extratest_zeta.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4043a4d07ff09052a796e5fa598f744b8c2f85b0 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/extratest_zeta.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/runtests.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/runtests.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..111cef1bb8b24dd2c6c4a3a46322e8aec40b47af Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/runtests.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_basic_ops.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_basic_ops.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fad7cf1dfdca2e7ead291f5e722ad69c8d85ee93 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_basic_ops.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_bitwise.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_bitwise.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a0205c170d4b15681f919b262fe6b1cdb5e2c9c Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_bitwise.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_calculus.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_calculus.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b4200419ab7d43e7ab2b46d39c5d1cd69cbeb59 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_calculus.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_compatibility.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_compatibility.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e32374cbd51138f36dc503f74133a7c787e97cd Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_compatibility.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_convert.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_convert.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c38ccf180dccb5dc37472b463030ad7a4478c316 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_convert.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_diff.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_diff.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd53e684e313f1442bedd9b07cd02caa6e22bcba Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_diff.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_division.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_division.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b638cd4d04f9fd769eee70adf0487fb14fee2f90 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_division.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_eigen.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_eigen.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7227a4e058426f0c2d384ddc4b7d16afd69736c2 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_eigen.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_eigen_symmetric.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_eigen_symmetric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b354c4bdb2bb03e1003b130d8f0c302261a2298b Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_eigen_symmetric.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_elliptic.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_elliptic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14b40302b8c2a5fa1e4305afe14c09bfc6ae0bc3 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_elliptic.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_fp.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_fp.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b1e3884fde2d5700cbfd78d303fda54e22d8cc4 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_fp.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_functions.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_functions.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80527a3abcd3a48a21589f476cf92ad42244662f Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_functions.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_functions2.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_functions2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14c0e1a0d98edb9db7fe47dbf0a2d778bd23b0b4 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_functions2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_gammazeta.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_gammazeta.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a274160f9826fa866ef921089915e9f8e225a8d Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_gammazeta.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_hp.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_hp.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..818042932d97b00c37588d4fd01531130c549a71 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_hp.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_identify.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_identify.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a21212de43dee73fde7f04332a8e1798f631fee Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_identify.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_interval.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_interval.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cfc0508eae847eaf067bc29da6092a531ba7b8c Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_interval.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_levin.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_levin.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34624e311a6ed8bbd8cbb3ae7b75250b421749a6 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_levin.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_linalg.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_linalg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4d03a37c05ccd6974ef58930f23135c5e31a435 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_linalg.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_matrices.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_matrices.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83bca329525d4f495eb3cfef6b5f8f584bd7fee9 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_matrices.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_mpmath.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_mpmath.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a39932b2537f53954376a259dcc94b15e381ea96 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_mpmath.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_ode.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_ode.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cfce8a3e4b32b452e65c6a43de6ecf41853751a Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_ode.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_pickle.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_pickle.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e43adb73a4f677266c2070d9ef23d7ced91e7f6 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_pickle.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_power.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_power.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b15b3c70f934eead4c31dfa5357d0094d1daea13 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_power.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_quad.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_quad.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c57c57c0d57daf05d5dbd8dd1df687753ca29697 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_quad.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_rootfinding.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_rootfinding.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bacfda5dcb62d260fecb31fce94d4c2e6ecb1115 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_rootfinding.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_special.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_special.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5184450c7936e5a3e1b347f473fa923be0bc1ef Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_special.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_str.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_str.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85bd12fd26c6765ab641d55e5cf76a538519f05d Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_str.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_summation.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_summation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12ac3964abb3d7e32d881125e5766d3d51f80361 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_summation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_trig.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_trig.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3bed735abe783ea9a38095abe6161b0e955fe2c Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_trig.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_visualization.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_visualization.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1517af6b6c55077a88e64c0afdfa0b6efb1f2e49 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/test_visualization.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/__pycache__/torture.cpython-39.pyc b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/torture.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14f000f0b896ea60bf5ea18eea36b12eca7a7ee2 Binary files /dev/null and b/MLPY/Lib/site-packages/mpmath/tests/__pycache__/torture.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/mpmath/tests/extratest_gamma.py b/MLPY/Lib/site-packages/mpmath/tests/extratest_gamma.py new file mode 100644 index 0000000000000000000000000000000000000000..5a27b61b19aba0abf6bdb8adc16fc1ec7689b67a --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/extratest_gamma.py @@ -0,0 +1,215 @@ +from mpmath import * +from mpmath.libmp import ifac + +import sys +if "-dps" in sys.argv: + maxdps = int(sys.argv[sys.argv.index("-dps")+1]) +else: + maxdps = 1000 + +raise_ = "-raise" in sys.argv + +errcount = 0 + +def check(name, func, z, y): + global errcount + try: + x = func(z) + except: + errcount += 1 + if raise_: + raise + print() + print(name) + print("EXCEPTION") + import traceback + traceback.print_tb(sys.exc_info()[2]) + print() + return + xre = x.real + xim = x.imag + yre = y.real + yim = y.imag + tol = eps*8 + err = 0 + if abs(xre-yre) > abs(yre)*tol: + err = 1 + print() + print("Error! %s (re = %s, wanted %s, err=%s)" % (name, nstr(xre,10), nstr(yre,10), nstr(abs(xre-yre)))) + errcount += 1 + if raise_: + raise SystemExit + if abs(xim-yim) > abs(yim)*tol: + err = 1 + print() + print("Error! %s (im = %s, wanted %s, err=%s)" % (name, nstr(xim,10), nstr(yim,10), nstr(abs(xim-yim)))) + errcount += 1 + if raise_: + raise SystemExit + if not err: + sys.stdout.write("%s ok; " % name) + +def testcase(case): + z, result = case + print("Testing z =", z) + mp.dps = 1010 + z = eval(z) + mp.dps = maxdps + 50 + if result is None: + gamma_val = gamma(z) + loggamma_val = loggamma(z) + factorial_val = factorial(z) + rgamma_val = rgamma(z) + else: + loggamma_val = eval(result) + gamma_val = exp(loggamma_val) + factorial_val = z * gamma_val + rgamma_val = 1/gamma_val + for dps in [5, 10, 15, 25, 40, 60, 90, 120, 250, 600, 1000, 1800, 3600]: + if dps > maxdps: + break + mp.dps = dps + print("dps = %s" % dps) + check("gamma", gamma, z, gamma_val) + check("rgamma", rgamma, z, rgamma_val) + check("loggamma", loggamma, z, loggamma_val) + check("factorial", factorial, z, factorial_val) + print() + mp.dps = 15 + +testcases = [] + +# Basic values +for n in list(range(1,200)) + list(range(201,2000,17)): + testcases.append(["%s" % n, None]) +for n in range(-200,200): + testcases.append(["%s+0.5" % n, None]) + testcases.append(["%s+0.37" % n, None]) + +testcases += [\ +["(0.1+1j)", None], +["(-0.1+1j)", None], +["(0.1-1j)", None], +["(-0.1-1j)", None], +["10j", None], +["-10j", None], +["100j", None], +["10000j", None], +["-10000000j", None], +["(10**100)*j", None], +["125+(10**100)*j", None], +["-125+(10**100)*j", None], +["(10**10)*(1+j)", None], +["(10**10)*(-1+j)", None], +["(10**100)*(1+j)", None], +["(10**100)*(-1+j)", None], +["(1.5-1j)", None], +["(6+4j)", None], +["(4+1j)", None], +["(3.5+2j)", None], +["(1.5-1j)", None], +["(-6-4j)", None], +["(-2-3j)", None], +["(-2.5-2j)", None], +["(4+1j)", None], +["(3+3j)", None], +["(2-2j)", None], +["1", "0"], +["2", "0"], +["3", "log(2)"], +["4", "log(6)"], +["5", "log(24)"], +["0.5", "log(pi)/2"], +["1.5", "log(sqrt(pi)/2)"], +["2.5", "log(3*sqrt(pi)/4)"], +["mpf('0.37')", None], +["0.25", "log(sqrt(2*sqrt(2*pi**3)/agm(1,sqrt(2))))"], +["-0.4", None], +["mpf('-1.9')", None], +["mpf('12.8')", None], +["mpf('33.7')", None], +["mpf('95.2')", None], +["mpf('160.3')", None], +["mpf('2057.8')", None], +["25", "log(ifac(24))"], +["80", "log(ifac(79))"], +["500", "log(ifac(500-1))"], +["8000", "log(ifac(8000-1))"], +["8000.5", None], +["mpf('8000.1')", None], +["mpf('1.37e10')", None], +["mpf('1.37e10')*(1+j)", None], +["mpf('1.37e10')*(-1+j)", None], +["mpf('1.37e10')*(-1-j)", None], +["mpf('1.37e10')*(-1+j)", None], +["mpf('1.37e100')", None], +["mpf('1.37e100')*(1+j)", None], +["mpf('1.37e100')*(-1+j)", None], +["mpf('1.37e100')*(-1-j)", None], +["mpf('1.37e100')*(-1+j)", None], +["3+4j", +"mpc('" +"-1.7566267846037841105306041816232757851567066070613445016197619371316057169" +"4723618263960834804618463052988607348289672535780644470689771115236512106002" +"5970873471563240537307638968509556191696167970488390423963867031934333890838" +"8009531786948197210025029725361069435208930363494971027388382086721660805397" +"9163230643216054580167976201709951509519218635460317367338612500626714783631" +"7498317478048447525674016344322545858832610325861086336204591943822302971823" +"5161814175530618223688296232894588415495615809337292518431903058265147109853" +"1710568942184987827643886816200452860853873815413367529829631430146227470517" +"6579967222200868632179482214312673161276976117132204633283806161971389519137" +"1243359764435612951384238091232760634271570950240717650166551484551654327989" +"9360285030081716934130446150245110557038117075172576825490035434069388648124" +"6678152254554001586736120762641422590778766100376515737713938521275749049949" +"1284143906816424244705094759339932733567910991920631339597278805393743140853" +"391550313363278558195609260225928','" +"4.74266443803465792819488940755002274088830335171164611359052405215840070271" +"5906813009373171139767051863542508136875688550817670379002790304870822775498" +"2809996675877564504192565392367259119610438951593128982646945990372179860613" +"4294436498090428077839141927485901735557543641049637962003652638924845391650" +"9546290137755550107224907606529385248390667634297183361902055842228798984200" +"9591180450211798341715874477629099687609819466457990642030707080894518168924" +"6805549314043258530272479246115112769957368212585759640878745385160943755234" +"9398036774908108204370323896757543121853650025529763655312360354244898913463" +"7115955702828838923393113618205074162812089732064414530813087483533203244056" +"0546577484241423134079056537777170351934430586103623577814746004431994179990" +"5318522939077992613855205801498201930221975721246498720895122345420698451980" +"0051215797310305885845964334761831751370672996984756815410977750799748813563" +"8784405288158432214886648743541773208808731479748217023665577802702269468013" +"673719173759245720489020315779001')"], +] + +for z in [4, 14, 34, 64]: + testcases.append(["(2+j)*%s/3" % z, None]) + testcases.append(["(-2+j)*%s/3" % z, None]) + testcases.append(["(1+2*j)*%s/3" % z, None]) + testcases.append(["(2-j)*%s/3" % z, None]) + testcases.append(["(20+j)*%s/3" % z, None]) + testcases.append(["(-20+j)*%s/3" % z, None]) + testcases.append(["(1+20*j)*%s/3" % z, None]) + testcases.append(["(20-j)*%s/3" % z, None]) + testcases.append(["(200+j)*%s/3" % z, None]) + testcases.append(["(-200+j)*%s/3" % z, None]) + testcases.append(["(1+200*j)*%s/3" % z, None]) + testcases.append(["(200-j)*%s/3" % z, None]) + +# Poles +for n in [0,1,2,3,4,25,-1,-2,-3,-4,-20,-21,-50,-51,-200,-201,-20000,-20001]: + for t in ['1e-5', '1e-20', '1e-100', '1e-10000']: + testcases.append(["fadd(%s,'%s',exact=True)" % (n, t), None]) + testcases.append(["fsub(%s,'%s',exact=True)" % (n, t), None]) + testcases.append(["fadd(%s,'%sj',exact=True)" % (n, t), None]) + testcases.append(["fsub(%s,'%sj',exact=True)" % (n, t), None]) + +if __name__ == "__main__": + from timeit import default_timer as clock + tot_time = 0.0 + for case in testcases: + t1 = clock() + testcase(case) + t2 = clock() + print("Test time:", t2-t1) + print() + tot_time += (t2-t1) + print("Total time:", tot_time) + print("Errors:", errcount) diff --git a/MLPY/Lib/site-packages/mpmath/tests/extratest_zeta.py b/MLPY/Lib/site-packages/mpmath/tests/extratest_zeta.py new file mode 100644 index 0000000000000000000000000000000000000000..582b3d9cbd956b9cdf94309e0e718371fe716101 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/extratest_zeta.py @@ -0,0 +1,30 @@ +from mpmath import zetazero +from timeit import default_timer as clock + +def test_zetazero(): + cases = [\ + (399999999, 156762524.6750591511), + (241389216, 97490234.2276711795), + (526196239, 202950727.691229534), + (542964976, 209039046.578535272), + (1048449112, 388858885.231056486), + (1048449113, 388858885.384337406), + (1048449114, 388858886.002285122), + (1048449115, 388858886.00239369), + (1048449116, 388858886.690745053) + ] + for n, v in cases: + print(n, v) + t1 = clock() + ok = zetazero(n).ae(complex(0.5,v)) + t2 = clock() + print("ok =", ok, ("(time = %s)" % round(t2-t1,3))) + print("Now computing two huge zeros (this may take hours)") + print("Computing zetazero(8637740722917)") + ok = zetazero(8637740722917).ae(complex(0.5,2124447368584.39296466152)) + print("ok =", ok) + ok = zetazero(8637740722918).ae(complex(0.5,2124447368584.39298170604)) + print("ok =", ok) + +if __name__ == "__main__": + test_zetazero() diff --git a/MLPY/Lib/site-packages/mpmath/tests/runtests.py b/MLPY/Lib/site-packages/mpmath/tests/runtests.py new file mode 100644 index 0000000000000000000000000000000000000000..70fde272fdc0e05e3d8951edddca380bd36139ab --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/runtests.py @@ -0,0 +1,161 @@ +#!/usr/bin/env python + +""" +python runtests.py -py + Use py.test to run tests (more useful for debugging) + +python runtests.py -coverage + Generate test coverage report. Statistics are written to /tmp + +python runtests.py -profile + Generate profile stats (this is much slower) + +python runtests.py -nogmpy + Run tests without using GMPY even if it exists + +python runtests.py -strict + Enforce extra tests in normalize() + +python runtests.py -local + Insert '../..' at the beginning of sys.path to use local mpmath + +python runtests.py -skip ... + Skip tests from the listed modules + +Additional arguments are used to filter the tests to run. Only files that have +one of the arguments in their name are executed. + +""" + +import sys, os, traceback + +profile = False +if "-profile" in sys.argv: + sys.argv.remove('-profile') + profile = True + +coverage = False +if "-coverage" in sys.argv: + sys.argv.remove('-coverage') + coverage = True + +if "-nogmpy" in sys.argv: + sys.argv.remove('-nogmpy') + os.environ['MPMATH_NOGMPY'] = 'Y' + +if "-strict" in sys.argv: + sys.argv.remove('-strict') + os.environ['MPMATH_STRICT'] = 'Y' + +if "-local" in sys.argv: + sys.argv.remove('-local') + importdir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), + '../..')) +else: + importdir = '' + +# TODO: add a flag for this +testdir = '' + +def testit(importdir='', testdir='', exit_on_fail=False): + """Run all tests in testdir while importing from importdir.""" + if importdir: + sys.path.insert(1, importdir) + if testdir: + sys.path.insert(1, testdir) + import os.path + import mpmath + print("mpmath imported from %s" % os.path.dirname(mpmath.__file__)) + print("mpmath backend: %s" % mpmath.libmp.backend.BACKEND) + print("mpmath mp class: %s" % repr(mpmath.mp)) + print("mpmath version: %s" % mpmath.__version__) + print("Python version: %s" % sys.version) + print("") + if "-py" in sys.argv: + sys.argv.remove('-py') + import py + py.test.cmdline.main() + else: + import glob + from timeit import default_timer as clock + modules = [] + args = sys.argv[1:] + excluded = [] + if '-skip' in args: + excluded = args[args.index('-skip')+1:] + args = args[:args.index('-skip')] + # search for tests in directory of this file if not otherwise specified + if not testdir: + pattern = os.path.dirname(sys.argv[0]) + else: + pattern = testdir + if pattern: + pattern += '/' + pattern += 'test*.py' + # look for tests (respecting specified filter) + for f in glob.glob(pattern): + name = os.path.splitext(os.path.basename(f))[0] + # If run as a script, only run tests given as args, if any are given + if args and __name__ == "__main__": + ok = False + for arg in args: + if arg in name: + ok = True + break + if not ok: + continue + elif name in excluded: + continue + module = __import__(name) + priority = module.__dict__.get('priority', 100) + if priority == 666: + modules = [[priority, name, module]] + break + modules.append([priority, name, module]) + # execute tests + modules.sort() + tstart = clock() + for priority, name, module in modules: + print(name) + for f in sorted(module.__dict__.keys()): + if f.startswith('test_'): + if coverage and ('numpy' in f): + continue + sys.stdout.write(" " + f[5:].ljust(25) + " ") + t1 = clock() + try: + module.__dict__[f]() + except: + etype, evalue, trb = sys.exc_info() + if etype in (KeyboardInterrupt, SystemExit): + raise + print("") + print("TEST FAILED!") + print("") + traceback.print_exc() + if exit_on_fail: + return + t2 = clock() + print("ok " + " " + ("%.7f" % (t2-t1)) + " s") + tend = clock() + print("") + print("finished tests in " + ("%.2f" % (tend-tstart)) + " seconds") + # clean sys.path + if importdir: + sys.path.remove(importdir) + if testdir: + sys.path.remove(testdir) + +if __name__ == '__main__': + if profile: + import cProfile + cProfile.run("testit('%s', '%s')" % (importdir, testdir), sort=1) + elif coverage: + import trace + tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix], + trace=0, count=1) + tracer.run('testit(importdir, testdir)') + r = tracer.results() + r.write_results(show_missing=True, summary=True, coverdir="/tmp") + else: + testit(importdir, testdir) diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_basic_ops.py b/MLPY/Lib/site-packages/mpmath/tests/test_basic_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..f577c7fa9f9734876b6767f6cc21144df305d82f --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_basic_ops.py @@ -0,0 +1,451 @@ +import mpmath +from mpmath import * +from mpmath.libmp import * +import random +import sys + +try: + long = long +except NameError: + long = int + +def test_type_compare(): + assert mpf(2) == mpc(2,0) + assert mpf(0) == mpc(0) + assert mpf(2) != mpc(2, 0.00001) + assert mpf(2) == 2.0 + assert mpf(2) != 3.0 + assert mpf(2) == 2 + assert mpf(2) != '2.0' + assert mpc(2) != '2.0' + +def test_add(): + assert mpf(2.5) + mpf(3) == 5.5 + assert mpf(2.5) + 3 == 5.5 + assert mpf(2.5) + 3.0 == 5.5 + assert 3 + mpf(2.5) == 5.5 + assert 3.0 + mpf(2.5) == 5.5 + assert (3+0j) + mpf(2.5) == 5.5 + assert mpc(2.5) + mpf(3) == 5.5 + assert mpc(2.5) + 3 == 5.5 + assert mpc(2.5) + 3.0 == 5.5 + assert mpc(2.5) + (3+0j) == 5.5 + assert 3 + mpc(2.5) == 5.5 + assert 3.0 + mpc(2.5) == 5.5 + assert (3+0j) + mpc(2.5) == 5.5 + +def test_sub(): + assert mpf(2.5) - mpf(3) == -0.5 + assert mpf(2.5) - 3 == -0.5 + assert mpf(2.5) - 3.0 == -0.5 + assert 3 - mpf(2.5) == 0.5 + assert 3.0 - mpf(2.5) == 0.5 + assert (3+0j) - mpf(2.5) == 0.5 + assert mpc(2.5) - mpf(3) == -0.5 + assert mpc(2.5) - 3 == -0.5 + assert mpc(2.5) - 3.0 == -0.5 + assert mpc(2.5) - (3+0j) == -0.5 + assert 3 - mpc(2.5) == 0.5 + assert 3.0 - mpc(2.5) == 0.5 + assert (3+0j) - mpc(2.5) == 0.5 + +def test_mul(): + assert mpf(2.5) * mpf(3) == 7.5 + assert mpf(2.5) * 3 == 7.5 + assert mpf(2.5) * 3.0 == 7.5 + assert 3 * mpf(2.5) == 7.5 + assert 3.0 * mpf(2.5) == 7.5 + assert (3+0j) * mpf(2.5) == 7.5 + assert mpc(2.5) * mpf(3) == 7.5 + assert mpc(2.5) * 3 == 7.5 + assert mpc(2.5) * 3.0 == 7.5 + assert mpc(2.5) * (3+0j) == 7.5 + assert 3 * mpc(2.5) == 7.5 + assert 3.0 * mpc(2.5) == 7.5 + assert (3+0j) * mpc(2.5) == 7.5 + +def test_div(): + assert mpf(6) / mpf(3) == 2.0 + assert mpf(6) / 3 == 2.0 + assert mpf(6) / 3.0 == 2.0 + assert 6 / mpf(3) == 2.0 + assert 6.0 / mpf(3) == 2.0 + assert (6+0j) / mpf(3.0) == 2.0 + assert mpc(6) / mpf(3) == 2.0 + assert mpc(6) / 3 == 2.0 + assert mpc(6) / 3.0 == 2.0 + assert mpc(6) / (3+0j) == 2.0 + assert 6 / mpc(3) == 2.0 + assert 6.0 / mpc(3) == 2.0 + assert (6+0j) / mpc(3) == 2.0 + +def test_pow(): + assert mpf(6) ** mpf(3) == 216.0 + assert mpf(6) ** 3 == 216.0 + assert mpf(6) ** 3.0 == 216.0 + assert 6 ** mpf(3) == 216.0 + assert 6.0 ** mpf(3) == 216.0 + assert (6+0j) ** mpf(3.0) == 216.0 + assert mpc(6) ** mpf(3) == 216.0 + assert mpc(6) ** 3 == 216.0 + assert mpc(6) ** 3.0 == 216.0 + assert mpc(6) ** (3+0j) == 216.0 + assert 6 ** mpc(3) == 216.0 + assert 6.0 ** mpc(3) == 216.0 + assert (6+0j) ** mpc(3) == 216.0 + +def test_mixed_misc(): + assert 1 + mpf(3) == mpf(3) + 1 == 4 + assert 1 - mpf(3) == -(mpf(3) - 1) == -2 + assert 3 * mpf(2) == mpf(2) * 3 == 6 + assert 6 / mpf(2) == mpf(6) / 2 == 3 + assert 1.0 + mpf(3) == mpf(3) + 1.0 == 4 + assert 1.0 - mpf(3) == -(mpf(3) - 1.0) == -2 + assert 3.0 * mpf(2) == mpf(2) * 3.0 == 6 + assert 6.0 / mpf(2) == mpf(6) / 2.0 == 3 + +def test_add_misc(): + mp.dps = 15 + assert mpf(4) + mpf(-70) == -66 + assert mpf(1) + mpf(1.1)/80 == 1 + 1.1/80 + assert mpf((1, 10000000000)) + mpf(3) == mpf((1, 10000000000)) + assert mpf(3) + mpf((1, 10000000000)) == mpf((1, 10000000000)) + assert mpf((1, -10000000000)) + mpf(3) == mpf(3) + assert mpf(3) + mpf((1, -10000000000)) == mpf(3) + assert mpf(1) + 1e-15 != 1 + assert mpf(1) + 1e-20 == 1 + assert mpf(1.07e-22) + 0 == mpf(1.07e-22) + assert mpf(0) + mpf(1.07e-22) == mpf(1.07e-22) + +def test_complex_misc(): + # many more tests needed + assert 1 + mpc(2) == 3 + assert not mpc(2).ae(2 + 1e-13) + assert mpc(2+1e-15j).ae(2) + +def test_complex_zeros(): + for a in [0,2]: + for b in [0,3]: + for c in [0,4]: + for d in [0,5]: + assert mpc(a,b)*mpc(c,d) == complex(a,b)*complex(c,d) + +def test_hash(): + for i in range(-256, 256): + assert hash(mpf(i)) == hash(i) + assert hash(mpf(0.5)) == hash(0.5) + assert hash(mpc(2,3)) == hash(2+3j) + # Check that this doesn't fail + assert hash(inf) + # Check that overflow doesn't assign equal hashes to large numbers + assert hash(mpf('1e1000')) != hash('1e10000') + assert hash(mpc(100,'1e1000')) != hash(mpc(200,'1e1000')) + from mpmath.rational import mpq + assert hash(mp.mpq(1,3)) + assert hash(mp.mpq(0,1)) == 0 + assert hash(mp.mpq(-1,1)) == hash(-1) + assert hash(mp.mpq(1,1)) == hash(1) + assert hash(mp.mpq(5,1)) == hash(5) + assert hash(mp.mpq(1,2)) == hash(0.5) + if sys.version_info >= (3, 2): + assert hash(mpf(1)*2**2000) == hash(2**2000) + assert hash(mpf(1)/2**2000) == hash(mpq(1,2**2000)) + +# Advanced rounding test +def test_add_rounding(): + mp.dps = 15 + a = from_float(1e-50) + assert mpf_sub(mpf_add(fone, a, 53, round_up), fone, 53, round_up) == from_float(2.2204460492503131e-16) + assert mpf_sub(fone, a, 53, round_up) == fone + assert mpf_sub(fone, mpf_sub(fone, a, 53, round_down), 53, round_down) == from_float(1.1102230246251565e-16) + assert mpf_add(fone, a, 53, round_down) == fone + +def test_almost_equal(): + assert mpf(1.2).ae(mpf(1.20000001), 1e-7) + assert not mpf(1.2).ae(mpf(1.20000001), 1e-9) + assert not mpf(-0.7818314824680298).ae(mpf(-0.774695868667929)) + +def test_arithmetic_functions(): + import operator + ops = [(operator.add, fadd), (operator.sub, fsub), (operator.mul, fmul), + (operator.truediv, fdiv)] + a = mpf(0.27) + b = mpf(1.13) + c = mpc(0.51+2.16j) + d = mpc(1.08-0.99j) + for x in [a,b,c,d]: + for y in [a,b,c,d]: + for op, fop in ops: + if fop is not fdiv: + mp.prec = 200 + z0 = op(x,y) + mp.prec = 60 + z1 = op(x,y) + mp.prec = 53 + z2 = op(x,y) + assert fop(x, y, prec=60) == z1 + assert fop(x, y) == z2 + if fop is not fdiv: + assert fop(x, y, prec=inf) == z0 + assert fop(x, y, dps=inf) == z0 + assert fop(x, y, exact=True) == z0 + assert fneg(fneg(z1, exact=True), prec=inf) == z1 + assert fneg(z1) == -(+z1) + mp.dps = 15 + +def test_exact_integer_arithmetic(): + # XXX: re-fix this so that all operations are tested with all rounding modes + random.seed(0) + for prec in [6, 10, 25, 40, 100, 250, 725]: + for rounding in ['d', 'u', 'f', 'c', 'n']: + mp.dps = prec + M = 10**(prec-2) + M2 = 10**(prec//2-2) + for i in range(10): + a = random.randint(-M, M) + b = random.randint(-M, M) + assert mpf(a, rounding=rounding) == a + assert int(mpf(a, rounding=rounding)) == a + assert int(mpf(str(a), rounding=rounding)) == a + assert mpf(a) + mpf(b) == a + b + assert mpf(a) - mpf(b) == a - b + assert -mpf(a) == -a + a = random.randint(-M2, M2) + b = random.randint(-M2, M2) + assert mpf(a) * mpf(b) == a*b + assert mpf_mul(from_int(a), from_int(b), mp.prec, rounding) == from_int(a*b) + mp.dps = 15 + +def test_odd_int_bug(): + assert to_int(from_int(3), round_nearest) == 3 + +def test_str_1000_digits(): + mp.dps = 1001 + # last digit may be wrong + assert str(mpf(2)**0.5)[-10:-1] == '9518488472'[:9] + assert str(pi)[-10:-1] == '2164201989'[:9] + mp.dps = 15 + +def test_str_10000_digits(): + mp.dps = 10001 + # last digit may be wrong + assert str(mpf(2)**0.5)[-10:-1] == '5873258351'[:9] + assert str(pi)[-10:-1] == '5256375678'[:9] + mp.dps = 15 + +def test_monitor(): + f = lambda x: x**2 + a = [] + b = [] + g = monitor(f, a.append, b.append) + assert g(3) == 9 + assert g(4) == 16 + assert a[0] == ((3,), {}) + assert b[0] == 9 + +def test_nint_distance(): + assert nint_distance(mpf(-3)) == (-3, -inf) + assert nint_distance(mpc(-3)) == (-3, -inf) + assert nint_distance(mpf(-3.1)) == (-3, -3) + assert nint_distance(mpf(-3.01)) == (-3, -6) + assert nint_distance(mpf(-3.001)) == (-3, -9) + assert nint_distance(mpf(-3.0001)) == (-3, -13) + assert nint_distance(mpf(-2.9)) == (-3, -3) + assert nint_distance(mpf(-2.99)) == (-3, -6) + assert nint_distance(mpf(-2.999)) == (-3, -9) + assert nint_distance(mpf(-2.9999)) == (-3, -13) + assert nint_distance(mpc(-3+0.1j)) == (-3, -3) + assert nint_distance(mpc(-3+0.01j)) == (-3, -6) + assert nint_distance(mpc(-3.1+0.1j)) == (-3, -3) + assert nint_distance(mpc(-3.01+0.01j)) == (-3, -6) + assert nint_distance(mpc(-3.001+0.001j)) == (-3, -9) + assert nint_distance(mpf(0)) == (0, -inf) + assert nint_distance(mpf(0.01)) == (0, -6) + assert nint_distance(mpf('1e-100')) == (0, -332) + +def test_floor_ceil_nint_frac(): + mp.dps = 15 + for n in range(-10,10): + assert floor(n) == n + assert floor(n+0.5) == n + assert ceil(n) == n + assert ceil(n+0.5) == n+1 + assert nint(n) == n + # nint rounds to even + if n % 2 == 1: + assert nint(n+0.5) == n+1 + else: + assert nint(n+0.5) == n + assert floor(inf) == inf + assert floor(ninf) == ninf + assert isnan(floor(nan)) + assert ceil(inf) == inf + assert ceil(ninf) == ninf + assert isnan(ceil(nan)) + assert nint(inf) == inf + assert nint(ninf) == ninf + assert isnan(nint(nan)) + assert floor(0.1) == 0 + assert floor(0.9) == 0 + assert floor(-0.1) == -1 + assert floor(-0.9) == -1 + assert floor(10000000000.1) == 10000000000 + assert floor(10000000000.9) == 10000000000 + assert floor(-10000000000.1) == -10000000000-1 + assert floor(-10000000000.9) == -10000000000-1 + assert floor(1e-100) == 0 + assert floor(-1e-100) == -1 + assert floor(1e100) == 1e100 + assert floor(-1e100) == -1e100 + assert ceil(0.1) == 1 + assert ceil(0.9) == 1 + assert ceil(-0.1) == 0 + assert ceil(-0.9) == 0 + assert ceil(10000000000.1) == 10000000000+1 + assert ceil(10000000000.9) == 10000000000+1 + assert ceil(-10000000000.1) == -10000000000 + assert ceil(-10000000000.9) == -10000000000 + assert ceil(1e-100) == 1 + assert ceil(-1e-100) == 0 + assert ceil(1e100) == 1e100 + assert ceil(-1e100) == -1e100 + assert nint(0.1) == 0 + assert nint(0.9) == 1 + assert nint(-0.1) == 0 + assert nint(-0.9) == -1 + assert nint(10000000000.1) == 10000000000 + assert nint(10000000000.9) == 10000000000+1 + assert nint(-10000000000.1) == -10000000000 + assert nint(-10000000000.9) == -10000000000-1 + assert nint(1e-100) == 0 + assert nint(-1e-100) == 0 + assert nint(1e100) == 1e100 + assert nint(-1e100) == -1e100 + assert floor(3.2+4.6j) == 3+4j + assert ceil(3.2+4.6j) == 4+5j + assert nint(3.2+4.6j) == 3+5j + for n in range(-10,10): + assert frac(n) == 0 + assert frac(0.25) == 0.25 + assert frac(1.25) == 0.25 + assert frac(2.25) == 0.25 + assert frac(-0.25) == 0.75 + assert frac(-1.25) == 0.75 + assert frac(-2.25) == 0.75 + assert frac('1e100000000000000') == 0 + u = mpf('1e-100000000000000') + assert frac(u) == u + assert frac(-u) == 1 # rounding! + u = mpf('1e-400') + assert frac(-u, prec=0) == fsub(1, u, exact=True) + assert frac(3.25+4.75j) == 0.25+0.75j + +def test_isnan_etc(): + from mpmath.rational import mpq + assert isnan(nan) == True + assert isnan(3) == False + assert isnan(mpf(3)) == False + assert isnan(inf) == False + assert isnan(mpc(2,nan)) == True + assert isnan(mpc(2,nan)) == True + assert isnan(mpc(nan,nan)) == True + assert isnan(mpc(2,2)) == False + assert isnan(mpc(nan,inf)) == True + assert isnan(mpc(inf,inf)) == False + assert isnan(mpq((3,2))) == False + assert isnan(mpq((0,1))) == False + assert isinf(inf) == True + assert isinf(-inf) == True + assert isinf(3) == False + assert isinf(nan) == False + assert isinf(3+4j) == False + assert isinf(mpc(inf)) == True + assert isinf(mpc(3,inf)) == True + assert isinf(mpc(inf,3)) == True + assert isinf(mpc(inf,inf)) == True + assert isinf(mpc(nan,inf)) == True + assert isinf(mpc(inf,nan)) == True + assert isinf(mpc(nan,nan)) == False + assert isinf(mpq((3,2))) == False + assert isinf(mpq((0,1))) == False + assert isnormal(3) == True + assert isnormal(3.5) == True + assert isnormal(mpf(3.5)) == True + assert isnormal(0) == False + assert isnormal(mpf(0)) == False + assert isnormal(0.0) == False + assert isnormal(inf) == False + assert isnormal(-inf) == False + assert isnormal(nan) == False + assert isnormal(float(inf)) == False + assert isnormal(mpc(0,0)) == False + assert isnormal(mpc(3,0)) == True + assert isnormal(mpc(0,3)) == True + assert isnormal(mpc(3,3)) == True + assert isnormal(mpc(0,nan)) == False + assert isnormal(mpc(0,inf)) == False + assert isnormal(mpc(3,nan)) == False + assert isnormal(mpc(3,inf)) == False + assert isnormal(mpc(3,-inf)) == False + assert isnormal(mpc(nan,0)) == False + assert isnormal(mpc(inf,0)) == False + assert isnormal(mpc(nan,3)) == False + assert isnormal(mpc(inf,3)) == False + assert isnormal(mpc(inf,nan)) == False + assert isnormal(mpc(nan,inf)) == False + assert isnormal(mpc(nan,nan)) == False + assert isnormal(mpc(inf,inf)) == False + assert isnormal(mpq((3,2))) == True + assert isnormal(mpq((0,1))) == False + assert isint(3) == True + assert isint(0) == True + assert isint(long(3)) == True + assert isint(long(0)) == True + assert isint(mpf(3)) == True + assert isint(mpf(0)) == True + assert isint(mpf(-3)) == True + assert isint(mpf(3.2)) == False + assert isint(3.2) == False + assert isint(nan) == False + assert isint(inf) == False + assert isint(-inf) == False + assert isint(mpc(0)) == True + assert isint(mpc(3)) == True + assert isint(mpc(3.2)) == False + assert isint(mpc(3,inf)) == False + assert isint(mpc(inf)) == False + assert isint(mpc(3,2)) == False + assert isint(mpc(0,2)) == False + assert isint(mpc(3,2),gaussian=True) == True + assert isint(mpc(3,0),gaussian=True) == True + assert isint(mpc(0,3),gaussian=True) == True + assert isint(3+4j) == False + assert isint(3+4j, gaussian=True) == True + assert isint(3+0j) == True + assert isint(mpq((3,2))) == False + assert isint(mpq((3,9))) == False + assert isint(mpq((9,3))) == True + assert isint(mpq((0,4))) == True + assert isint(mpq((1,1))) == True + assert isint(mpq((-1,1))) == True + assert mp.isnpint(0) == True + assert mp.isnpint(1) == False + assert mp.isnpint(-1) == True + assert mp.isnpint(-1.1) == False + assert mp.isnpint(-1.0) == True + assert mp.isnpint(mp.mpq(1,2)) == False + assert mp.isnpint(mp.mpq(-1,2)) == False + assert mp.isnpint(mp.mpq(-3,1)) == True + assert mp.isnpint(mp.mpq(0,1)) == True + assert mp.isnpint(mp.mpq(1,1)) == False + assert mp.isnpint(0+0j) == True + assert mp.isnpint(-1+0j) == True + assert mp.isnpint(-1.1+0j) == False + assert mp.isnpint(-1+0.1j) == False + assert mp.isnpint(0+0.1j) == False + + +def test_issue_438(): + assert mpf(finf) == mpf('inf') + assert mpf(fninf) == mpf('-inf') + assert mpf(fnan)._mpf_ == mpf('nan')._mpf_ diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_bitwise.py b/MLPY/Lib/site-packages/mpmath/tests/test_bitwise.py new file mode 100644 index 0000000000000000000000000000000000000000..4f61b69fc8819cf275abaedd98847c58c3b5924a --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_bitwise.py @@ -0,0 +1,188 @@ +""" +Test bit-level integer and mpf operations +""" + +from mpmath import * +from mpmath.libmp import * + +def test_bitcount(): + assert bitcount(0) == 0 + assert bitcount(1) == 1 + assert bitcount(7) == 3 + assert bitcount(8) == 4 + assert bitcount(2**100) == 101 + assert bitcount(2**100-1) == 100 + +def test_trailing(): + assert trailing(0) == 0 + assert trailing(1) == 0 + assert trailing(2) == 1 + assert trailing(7) == 0 + assert trailing(8) == 3 + assert trailing(2**100) == 100 + assert trailing(2**100-1) == 0 + +def test_round_down(): + assert from_man_exp(0, -4, 4, round_down)[:3] == (0, 0, 0) + assert from_man_exp(0xf0, -4, 4, round_down)[:3] == (0, 15, 0) + assert from_man_exp(0xf1, -4, 4, round_down)[:3] == (0, 15, 0) + assert from_man_exp(0xff, -4, 4, round_down)[:3] == (0, 15, 0) + assert from_man_exp(-0xf0, -4, 4, round_down)[:3] == (1, 15, 0) + assert from_man_exp(-0xf1, -4, 4, round_down)[:3] == (1, 15, 0) + assert from_man_exp(-0xff, -4, 4, round_down)[:3] == (1, 15, 0) + +def test_round_up(): + assert from_man_exp(0, -4, 4, round_up)[:3] == (0, 0, 0) + assert from_man_exp(0xf0, -4, 4, round_up)[:3] == (0, 15, 0) + assert from_man_exp(0xf1, -4, 4, round_up)[:3] == (0, 1, 4) + assert from_man_exp(0xff, -4, 4, round_up)[:3] == (0, 1, 4) + assert from_man_exp(-0xf0, -4, 4, round_up)[:3] == (1, 15, 0) + assert from_man_exp(-0xf1, -4, 4, round_up)[:3] == (1, 1, 4) + assert from_man_exp(-0xff, -4, 4, round_up)[:3] == (1, 1, 4) + +def test_round_floor(): + assert from_man_exp(0, -4, 4, round_floor)[:3] == (0, 0, 0) + assert from_man_exp(0xf0, -4, 4, round_floor)[:3] == (0, 15, 0) + assert from_man_exp(0xf1, -4, 4, round_floor)[:3] == (0, 15, 0) + assert from_man_exp(0xff, -4, 4, round_floor)[:3] == (0, 15, 0) + assert from_man_exp(-0xf0, -4, 4, round_floor)[:3] == (1, 15, 0) + assert from_man_exp(-0xf1, -4, 4, round_floor)[:3] == (1, 1, 4) + assert from_man_exp(-0xff, -4, 4, round_floor)[:3] == (1, 1, 4) + +def test_round_ceiling(): + assert from_man_exp(0, -4, 4, round_ceiling)[:3] == (0, 0, 0) + assert from_man_exp(0xf0, -4, 4, round_ceiling)[:3] == (0, 15, 0) + assert from_man_exp(0xf1, -4, 4, round_ceiling)[:3] == (0, 1, 4) + assert from_man_exp(0xff, -4, 4, round_ceiling)[:3] == (0, 1, 4) + assert from_man_exp(-0xf0, -4, 4, round_ceiling)[:3] == (1, 15, 0) + assert from_man_exp(-0xf1, -4, 4, round_ceiling)[:3] == (1, 15, 0) + assert from_man_exp(-0xff, -4, 4, round_ceiling)[:3] == (1, 15, 0) + +def test_round_nearest(): + assert from_man_exp(0, -4, 4, round_nearest)[:3] == (0, 0, 0) + assert from_man_exp(0xf0, -4, 4, round_nearest)[:3] == (0, 15, 0) + assert from_man_exp(0xf7, -4, 4, round_nearest)[:3] == (0, 15, 0) + assert from_man_exp(0xf8, -4, 4, round_nearest)[:3] == (0, 1, 4) # 1111.1000 -> 10000.0 + assert from_man_exp(0xf9, -4, 4, round_nearest)[:3] == (0, 1, 4) # 1111.1001 -> 10000.0 + assert from_man_exp(0xe8, -4, 4, round_nearest)[:3] == (0, 7, 1) # 1110.1000 -> 1110.0 + assert from_man_exp(0xe9, -4, 4, round_nearest)[:3] == (0, 15, 0) # 1110.1001 -> 1111.0 + assert from_man_exp(-0xf0, -4, 4, round_nearest)[:3] == (1, 15, 0) + assert from_man_exp(-0xf7, -4, 4, round_nearest)[:3] == (1, 15, 0) + assert from_man_exp(-0xf8, -4, 4, round_nearest)[:3] == (1, 1, 4) + assert from_man_exp(-0xf9, -4, 4, round_nearest)[:3] == (1, 1, 4) + assert from_man_exp(-0xe8, -4, 4, round_nearest)[:3] == (1, 7, 1) + assert from_man_exp(-0xe9, -4, 4, round_nearest)[:3] == (1, 15, 0) + +def test_rounding_bugs(): + # 1 less than power-of-two cases + assert from_man_exp(72057594037927935, -56, 53, round_up) == (0, 1, 0, 1) + assert from_man_exp(73786976294838205979, -65, 53, round_nearest) == (0, 1, 1, 1) + assert from_man_exp(31, 0, 4, round_up) == (0, 1, 5, 1) + assert from_man_exp(-31, 0, 4, round_floor) == (1, 1, 5, 1) + assert from_man_exp(255, 0, 7, round_up) == (0, 1, 8, 1) + assert from_man_exp(-255, 0, 7, round_floor) == (1, 1, 8, 1) + +def test_rounding_issue_200(): + a = from_man_exp(9867,-100) + b = from_man_exp(9867,-200) + c = from_man_exp(-1,0) + z = (1, 1023, -10, 10) + assert mpf_add(a, c, 10, 'd') == z + assert mpf_add(b, c, 10, 'd') == z + assert mpf_add(c, a, 10, 'd') == z + assert mpf_add(c, b, 10, 'd') == z + +def test_perturb(): + a = fone + b = from_float(0.99999999999999989) + c = from_float(1.0000000000000002) + assert mpf_perturb(a, 0, 53, round_nearest) == a + assert mpf_perturb(a, 1, 53, round_nearest) == a + assert mpf_perturb(a, 0, 53, round_up) == c + assert mpf_perturb(a, 0, 53, round_ceiling) == c + assert mpf_perturb(a, 0, 53, round_down) == a + assert mpf_perturb(a, 0, 53, round_floor) == a + assert mpf_perturb(a, 1, 53, round_up) == a + assert mpf_perturb(a, 1, 53, round_ceiling) == a + assert mpf_perturb(a, 1, 53, round_down) == b + assert mpf_perturb(a, 1, 53, round_floor) == b + a = mpf_neg(a) + b = mpf_neg(b) + c = mpf_neg(c) + assert mpf_perturb(a, 0, 53, round_nearest) == a + assert mpf_perturb(a, 1, 53, round_nearest) == a + assert mpf_perturb(a, 0, 53, round_up) == a + assert mpf_perturb(a, 0, 53, round_floor) == a + assert mpf_perturb(a, 0, 53, round_down) == b + assert mpf_perturb(a, 0, 53, round_ceiling) == b + assert mpf_perturb(a, 1, 53, round_up) == c + assert mpf_perturb(a, 1, 53, round_floor) == c + assert mpf_perturb(a, 1, 53, round_down) == a + assert mpf_perturb(a, 1, 53, round_ceiling) == a + +def test_add_exact(): + ff = from_float + assert mpf_add(ff(3.0), ff(2.5)) == ff(5.5) + assert mpf_add(ff(3.0), ff(-2.5)) == ff(0.5) + assert mpf_add(ff(-3.0), ff(2.5)) == ff(-0.5) + assert mpf_add(ff(-3.0), ff(-2.5)) == ff(-5.5) + assert mpf_sub(mpf_add(fone, ff(1e-100)), fone) == ff(1e-100) + assert mpf_sub(mpf_add(ff(1e-100), fone), fone) == ff(1e-100) + assert mpf_sub(mpf_add(fone, ff(-1e-100)), fone) == ff(-1e-100) + assert mpf_sub(mpf_add(ff(-1e-100), fone), fone) == ff(-1e-100) + assert mpf_add(fone, fzero) == fone + assert mpf_add(fzero, fone) == fone + assert mpf_add(fzero, fzero) == fzero + +def test_long_exponent_shifts(): + mp.dps = 15 + # Check for possible bugs due to exponent arithmetic overflow + # in a C implementation + x = mpf(1) + for p in [32, 64]: + a = ldexp(1,2**(p-1)) + b = ldexp(1,2**p) + c = ldexp(1,2**(p+1)) + d = ldexp(1,-2**(p-1)) + e = ldexp(1,-2**p) + f = ldexp(1,-2**(p+1)) + assert (x+a) == a + assert (x+b) == b + assert (x+c) == c + assert (x+d) == x + assert (x+e) == x + assert (x+f) == x + assert (a+x) == a + assert (b+x) == b + assert (c+x) == c + assert (d+x) == x + assert (e+x) == x + assert (f+x) == x + assert (x-a) == -a + assert (x-b) == -b + assert (x-c) == -c + assert (x-d) == x + assert (x-e) == x + assert (x-f) == x + assert (a-x) == a + assert (b-x) == b + assert (c-x) == c + assert (d-x) == -x + assert (e-x) == -x + assert (f-x) == -x + +def test_float_rounding(): + mp.prec = 64 + for x in [mpf(1), mpf(1)+eps, mpf(1)-eps, -mpf(1)+eps, -mpf(1)-eps]: + fa = float(x) + fb = float(fadd(x,0,prec=53,rounding='n')) + assert fa == fb + z = mpc(x,x) + ca = complex(z) + cb = complex(fadd(z,0,prec=53,rounding='n')) + assert ca == cb + for rnd in ['n', 'd', 'u', 'f', 'c']: + fa = to_float(x._mpf_, rnd=rnd) + fb = to_float(fadd(x,0,prec=53,rounding=rnd)._mpf_, rnd=rnd) + assert fa == fb + mp.prec = 53 diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_calculus.py b/MLPY/Lib/site-packages/mpmath/tests/test_calculus.py new file mode 100644 index 0000000000000000000000000000000000000000..f0a59773d672f0db20bb5072773472a5a3cc1d1f --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_calculus.py @@ -0,0 +1,216 @@ +import pytest +from mpmath import * + +def test_approximation(): + mp.dps = 15 + f = lambda x: cos(2-2*x)/x + p, err = chebyfit(f, [2, 4], 8, error=True) + assert err < 1e-5 + for i in range(10): + x = 2 + i/5. + assert abs(polyval(p, x) - f(x)) < err + +def test_limits(): + mp.dps = 15 + assert limit(lambda x: (x-sin(x))/x**3, 0).ae(mpf(1)/6) + assert limit(lambda n: (1+1/n)**n, inf).ae(e) + +def test_polyval(): + assert polyval([], 3) == 0 + assert polyval([0], 3) == 0 + assert polyval([5], 3) == 5 + # 4x^3 - 2x + 5 + p = [4, 0, -2, 5] + assert polyval(p,4) == 253 + assert polyval(p,4,derivative=True) == (253, 190) + +def test_polyroots(): + p = polyroots([1,-4]) + assert p[0].ae(4) + p, q = polyroots([1,2,3]) + assert p.ae(-1 - sqrt(2)*j) + assert q.ae(-1 + sqrt(2)*j) + #this is not a real test, it only tests a specific case + assert polyroots([1]) == [] + pytest.raises(ValueError, lambda: polyroots([0])) + +def test_polyroots_legendre(): + n = 64 + coeffs = [11975573020964041433067793888190275875, 0, + -190100434726484311252477736051902332000, 0, + 1437919688271127330313741595496589239248, 0, + -6897338342113537600691931230430793911840, 0, + 23556405536185284408974715545252277554280, 0, + -60969520211303089058522793175947071316960, 0, + 124284021969194758465450309166353645376880, 0, + -204721258548015217049921875719981284186016, 0, + 277415422258095841688223780704620656114900, 0, + -313237834141273382807123548182995095192800, 0, + 297432255354328395601259515935229287637200, 0, + -239057700565161140389797367947941296605600, 0, + 163356095386193445933028201431093219347160, 0, + -95158890516229191805647495979277603503200, 0, + 47310254620162038075933656063247634556400, 0, + -20071017111583894941305187420771723751200, 0, + 7255051932731034189479516844750603752850, 0, + -2228176940331017311443863996901733412640, 0, + 579006552594977616773047095969088431600, 0, + -126584428502545713788439446082310831200, 0, + 23112325428835593809686977515028663000, 0, + -3491517141958743235617737161547844000, 0, + 431305058712550634988073414073557200, 0, + -42927166660756742088912492757452000, 0, + 3378527005707706553294038781836500, 0, + -205277590220215081719131470288800, 0, + 9330799555464321896324157740400, 0, + -304114948474392713657972548576, 0, + 6695289961520387531608984680, 0, + -91048139350447232095702560, 0, + 659769125727878493447120, 0, + -1905929106580294155360, 0, + 916312070471295267] + + with mp.workdps(3): + with pytest.raises(mp.NoConvergence): + polyroots(coeffs, maxsteps=5, cleanup=True, error=False, + extraprec=n*10) + + roots = polyroots(coeffs, maxsteps=50, cleanup=True, error=False, + extraprec=n*10) + roots = [str(r) for r in roots] + assert roots == \ + ['-0.999', '-0.996', '-0.991', '-0.983', '-0.973', '-0.961', + '-0.946', '-0.93', '-0.911', '-0.889', '-0.866', '-0.841', + '-0.813', '-0.784', '-0.753', '-0.72', '-0.685', '-0.649', + '-0.611', '-0.572', '-0.531', '-0.489', '-0.446', '-0.402', + '-0.357', '-0.311', '-0.265', '-0.217', '-0.17', '-0.121', + '-0.073', '-0.0243', '0.0243', '0.073', '0.121', '0.17', '0.217', + '0.265', '0.311', '0.357', '0.402', '0.446', '0.489', '0.531', + '0.572', '0.611', '0.649', '0.685', '0.72', '0.753', '0.784', + '0.813', '0.841', '0.866', '0.889', '0.911', '0.93', '0.946', + '0.961', '0.973', '0.983', '0.991', '0.996', '0.999'] + +def test_polyroots_legendre_init(): + extra_prec = 100 + coeffs = [11975573020964041433067793888190275875, 0, + -190100434726484311252477736051902332000, 0, + 1437919688271127330313741595496589239248, 0, + -6897338342113537600691931230430793911840, 0, + 23556405536185284408974715545252277554280, 0, + -60969520211303089058522793175947071316960, 0, + 124284021969194758465450309166353645376880, 0, + -204721258548015217049921875719981284186016, 0, + 277415422258095841688223780704620656114900, 0, + -313237834141273382807123548182995095192800, 0, + 297432255354328395601259515935229287637200, 0, + -239057700565161140389797367947941296605600, 0, + 163356095386193445933028201431093219347160, 0, + -95158890516229191805647495979277603503200, 0, + 47310254620162038075933656063247634556400, 0, + -20071017111583894941305187420771723751200, 0, + 7255051932731034189479516844750603752850, 0, + -2228176940331017311443863996901733412640, 0, + 579006552594977616773047095969088431600, 0, + -126584428502545713788439446082310831200, 0, + 23112325428835593809686977515028663000, 0, + -3491517141958743235617737161547844000, 0, + 431305058712550634988073414073557200, 0, + -42927166660756742088912492757452000, 0, + 3378527005707706553294038781836500, 0, + -205277590220215081719131470288800, 0, + 9330799555464321896324157740400, 0, + -304114948474392713657972548576, 0, + 6695289961520387531608984680, 0, + -91048139350447232095702560, 0, + 659769125727878493447120, 0, + -1905929106580294155360, 0, + 916312070471295267] + + roots_init = matrix(['-0.999', '-0.996', '-0.991', '-0.983', '-0.973', + '-0.961', '-0.946', '-0.93', '-0.911', '-0.889', + '-0.866', '-0.841', '-0.813', '-0.784', '-0.753', + '-0.72', '-0.685', '-0.649', '-0.611', '-0.572', + '-0.531', '-0.489', '-0.446', '-0.402', '-0.357', + '-0.311', '-0.265', '-0.217', '-0.17', '-0.121', + '-0.073', '-0.0243', '0.0243', '0.073', '0.121', + '0.17', '0.217', '0.265', ' 0.311', '0.357', + '0.402', '0.446', '0.489', '0.531', '0.572', + '0.611', '0.649', '0.685', '0.72', '0.753', + '0.784', '0.813', '0.841', '0.866', '0.889', + '0.911', '0.93', '0.946', '0.961', '0.973', + '0.983', '0.991', '0.996', '0.999', '1.0']) + with mp.workdps(2*mp.dps): + roots_exact = polyroots(coeffs, maxsteps=50, cleanup=True, error=False, + extraprec=2*extra_prec) + with pytest.raises(mp.NoConvergence): + polyroots(coeffs, maxsteps=5, cleanup=True, error=False, + extraprec=extra_prec) + roots,err = polyroots(coeffs, maxsteps=5, cleanup=True, error=True, + extraprec=extra_prec,roots_init=roots_init) + assert max(matrix(roots_exact)-matrix(roots).apply(abs)) < err + roots1,err1 = polyroots(coeffs, maxsteps=25, cleanup=True, error=True, + extraprec=extra_prec,roots_init=roots_init[:60]) + assert max(matrix(roots_exact)-matrix(roots1).apply(abs)) < err1 + +def test_pade(): + one = mpf(1) + mp.dps = 20 + N = 10 + a = [one] + k = 1 + for i in range(1, N+1): + k *= i + a.append(one/k) + p, q = pade(a, N//2, N//2) + for x in arange(0, 1, 0.1): + r = polyval(p[::-1], x)/polyval(q[::-1], x) + assert(r.ae(exp(x), 1.0e-10)) + mp.dps = 15 + +def test_fourier(): + mp.dps = 15 + c, s = fourier(lambda x: x+1, [-1, 2], 2) + #plot([lambda x: x+1, lambda x: fourierval((c, s), [-1, 2], x)], [-1, 2]) + assert c[0].ae(1.5) + assert c[1].ae(-3*sqrt(3)/(2*pi)) + assert c[2].ae(3*sqrt(3)/(4*pi)) + assert s[0] == 0 + assert s[1].ae(3/(2*pi)) + assert s[2].ae(3/(4*pi)) + assert fourierval((c, s), [-1, 2], 1).ae(1.9134966715663442) + +def test_differint(): + mp.dps = 15 + assert differint(lambda t: t, 2, -0.5).ae(8*sqrt(2/pi)/3) + +def test_invlap(): + mp.dps = 15 + t = 0.01 + fp = lambda p: 1/(p+1)**2 + ft = lambda t: t*exp(-t) + ftt = ft(t) + assert invertlaplace(fp,t,method='talbot').ae(ftt) + assert invertlaplace(fp,t,method='stehfest').ae(ftt) + assert invertlaplace(fp,t,method='dehoog').ae(ftt) + assert invertlaplace(fp,t,method='cohen').ae(ftt) + t = 1.0 + ftt = ft(t) + assert invertlaplace(fp,t,method='talbot').ae(ftt) + assert invertlaplace(fp,t,method='stehfest').ae(ftt) + assert invertlaplace(fp,t,method='dehoog').ae(ftt) + assert invertlaplace(fp,t,method='cohen').ae(ftt) + + t = 0.01 + fp = lambda p: log(p)/p + ft = lambda t: -euler-log(t) + ftt = ft(t) + assert invertlaplace(fp,t,method='talbot').ae(ftt) + assert invertlaplace(fp,t,method='stehfest').ae(ftt) + assert invertlaplace(fp,t,method='dehoog').ae(ftt) + assert invertlaplace(fp,t,method='cohen').ae(ftt) + t = 1.0 + ftt = ft(t) + assert invertlaplace(fp,t,method='talbot').ae(ftt) + assert invertlaplace(fp,t,method='stehfest').ae(ftt) + assert invertlaplace(fp,t,method='dehoog').ae(ftt) + assert invertlaplace(fp,t,method='cohen').ae(ftt) diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_compatibility.py b/MLPY/Lib/site-packages/mpmath/tests/test_compatibility.py new file mode 100644 index 0000000000000000000000000000000000000000..f26d6044b521306b6d1eaeadc5c7839be226dc54 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_compatibility.py @@ -0,0 +1,77 @@ +from mpmath import * +from random import seed, randint, random +import math + +# Test compatibility with Python floats, which are +# IEEE doubles (53-bit) + +N = 5000 +seed(1) + +# Choosing exponents between roughly -140, 140 ensures that +# the Python floats don't overflow or underflow +xs = [(random()-1) * 10**randint(-140, 140) for x in range(N)] +ys = [(random()-1) * 10**randint(-140, 140) for x in range(N)] + +# include some equal values +ys[int(N*0.8):] = xs[int(N*0.8):] + +# Detect whether Python is compiled to use 80-bit floating-point +# instructions, in which case the double compatibility test breaks +uses_x87 = -4.1974624032366689e+117 / -8.4657370748010221e-47 \ + == 4.9581771393902231e+163 + +def test_double_compatibility(): + mp.prec = 53 + for x, y in zip(xs, ys): + mpx = mpf(x) + mpy = mpf(y) + assert mpf(x) == x + assert (mpx < mpy) == (x < y) + assert (mpx > mpy) == (x > y) + assert (mpx == mpy) == (x == y) + assert (mpx != mpy) == (x != y) + assert (mpx <= mpy) == (x <= y) + assert (mpx >= mpy) == (x >= y) + assert mpx == mpx + if uses_x87: + mp.prec = 64 + a = mpx + mpy + b = mpx * mpy + c = mpx / mpy + d = mpx % mpy + mp.prec = 53 + assert +a == x + y + assert +b == x * y + assert +c == x / y + assert +d == x % y + else: + assert mpx + mpy == x + y + assert mpx * mpy == x * y + assert mpx / mpy == x / y + assert mpx % mpy == x % y + assert abs(mpx) == abs(x) + assert mpf(repr(x)) == x + assert ceil(mpx) == math.ceil(x) + assert floor(mpx) == math.floor(x) + +def test_sqrt(): + # this fails quite often. it appers to be float + # that rounds the wrong way, not mpf + fail = 0 + mp.prec = 53 + for x in xs: + x = abs(x) + mp.prec = 100 + mp_high = mpf(x)**0.5 + mp.prec = 53 + mp_low = mpf(x)**0.5 + fp = x**0.5 + assert abs(mp_low-mp_high) <= abs(fp-mp_high) + fail += mp_low != fp + assert fail < N/10 + +def test_bugs(): + # particular bugs + assert mpf(4.4408920985006262E-16) < mpf(1.7763568394002505E-15) + assert mpf(-4.4408920985006262E-16) > mpf(-1.7763568394002505E-15) diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_convert.py b/MLPY/Lib/site-packages/mpmath/tests/test_convert.py new file mode 100644 index 0000000000000000000000000000000000000000..cb1db5b55c89e980e08fc3fa43cc9715ad68cac9 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_convert.py @@ -0,0 +1,233 @@ +import random +from mpmath import * +from mpmath.libmp import * + + +def test_basic_string(): + """ + Test basic string conversion + """ + mp.dps = 15 + assert mpf('3') == mpf('3.0') == mpf('0003.') == mpf('0.03e2') == mpf(3.0) + assert mpf('30') == mpf('30.0') == mpf('00030.') == mpf(30.0) + for i in range(10): + for j in range(10): + assert mpf('%ie%i' % (i,j)) == i * 10**j + assert str(mpf('25000.0')) == '25000.0' + assert str(mpf('2500.0')) == '2500.0' + assert str(mpf('250.0')) == '250.0' + assert str(mpf('25.0')) == '25.0' + assert str(mpf('2.5')) == '2.5' + assert str(mpf('0.25')) == '0.25' + assert str(mpf('0.025')) == '0.025' + assert str(mpf('0.0025')) == '0.0025' + assert str(mpf('0.00025')) == '0.00025' + assert str(mpf('0.000025')) == '2.5e-5' + assert str(mpf(0)) == '0.0' + assert str(mpf('2.5e1000000000000000000000')) == '2.5e+1000000000000000000000' + assert str(mpf('2.6e-1000000000000000000000')) == '2.6e-1000000000000000000000' + assert str(mpf(1.23402834e-15)) == '1.23402834e-15' + assert str(mpf(-1.23402834e-15)) == '-1.23402834e-15' + assert str(mpf(-1.2344e-15)) == '-1.2344e-15' + assert repr(mpf(-1.2344e-15)) == "mpf('-1.2343999999999999e-15')" + assert str(mpf("2163048125L")) == '2163048125.0' + assert str(mpf("-2163048125l")) == '-2163048125.0' + assert str(mpf("-2163048125L/1088391168")) == '-1.98738118113799' + assert str(mpf("2163048125/1088391168l")) == '1.98738118113799' + +def test_pretty(): + mp.pretty = True + assert repr(mpf(2.5)) == '2.5' + assert repr(mpc(2.5,3.5)) == '(2.5 + 3.5j)' + mp.pretty = False + iv.pretty = True + assert repr(mpi(2.5,3.5)) == '[2.5, 3.5]' + iv.pretty = False + +def test_str_whitespace(): + assert mpf('1.26 ') == 1.26 + +def test_unicode(): + mp.dps = 15 + try: + unicode = unicode + except NameError: + unicode = str + assert mpf(unicode('2.76')) == 2.76 + assert mpf(unicode('inf')) == inf + +def test_str_format(): + assert to_str(from_float(0.1),15,strip_zeros=False) == '0.100000000000000' + assert to_str(from_float(0.0),15,show_zero_exponent=True) == '0.0e+0' + assert to_str(from_float(0.0),0,show_zero_exponent=True) == '.0e+0' + assert to_str(from_float(0.0),0,show_zero_exponent=False) == '.0' + assert to_str(from_float(0.0),1,show_zero_exponent=True) == '0.0e+0' + assert to_str(from_float(0.0),1,show_zero_exponent=False) == '0.0' + assert to_str(from_float(1.23),3,show_zero_exponent=True) == '1.23e+0' + assert to_str(from_float(1.23456789000000e-2),15,strip_zeros=False,min_fixed=0,max_fixed=0) == '1.23456789000000e-2' + assert to_str(from_float(1.23456789000000e+2),15,strip_zeros=False,min_fixed=0,max_fixed=0) == '1.23456789000000e+2' + assert to_str(from_float(2.1287e14), 15, max_fixed=1000) == '212870000000000.0' + assert to_str(from_float(2.1287e15), 15, max_fixed=1000) == '2128700000000000.0' + assert to_str(from_float(2.1287e16), 15, max_fixed=1000) == '21287000000000000.0' + assert to_str(from_float(2.1287e30), 15, max_fixed=1000) == '2128700000000000000000000000000.0' + +def test_tight_string_conversion(): + mp.dps = 15 + # In an old version, '0.5' wasn't recognized as representing + # an exact binary number and was erroneously rounded up or down + assert from_str('0.5', 10, round_floor) == fhalf + assert from_str('0.5', 10, round_ceiling) == fhalf + +def test_eval_repr_invariant(): + """Test that eval(repr(x)) == x""" + random.seed(123) + for dps in [10, 15, 20, 50, 100]: + mp.dps = dps + for i in range(1000): + a = mpf(random.random())**0.5 * 10**random.randint(-100, 100) + assert eval(repr(a)) == a + mp.dps = 15 + +def test_str_bugs(): + mp.dps = 15 + # Decimal rounding used to give the wrong exponent in some cases + assert str(mpf('1e600')) == '1.0e+600' + assert str(mpf('1e10000')) == '1.0e+10000' + +def test_str_prec0(): + assert to_str(from_float(1.234), 0) == '.0e+0' + assert to_str(from_float(1e-15), 0) == '.0e-15' + assert to_str(from_float(1e+15), 0) == '.0e+15' + assert to_str(from_float(-1e-15), 0) == '-.0e-15' + assert to_str(from_float(-1e+15), 0) == '-.0e+15' + +def test_convert_rational(): + mp.dps = 15 + assert from_rational(30, 5, 53, round_nearest) == (0, 3, 1, 2) + assert from_rational(-7, 4, 53, round_nearest) == (1, 7, -2, 3) + assert to_rational((0, 1, -1, 1)) == (1, 2) + +def test_custom_class(): + class mympf: + @property + def _mpf_(self): + return mpf(3.5)._mpf_ + class mympc: + @property + def _mpc_(self): + return mpf(3.5)._mpf_, mpf(2.5)._mpf_ + assert mpf(2) + mympf() == 5.5 + assert mympf() + mpf(2) == 5.5 + assert mpf(mympf()) == 3.5 + assert mympc() + mpc(2) == mpc(5.5, 2.5) + assert mpc(2) + mympc() == mpc(5.5, 2.5) + assert mpc(mympc()) == (3.5+2.5j) + +def test_conversion_methods(): + class SomethingRandom: + pass + class SomethingReal: + def _mpmath_(self, prec, rounding): + return mp.make_mpf(from_str('1.3', prec, rounding)) + class SomethingComplex: + def _mpmath_(self, prec, rounding): + return mp.make_mpc((from_str('1.3', prec, rounding), \ + from_str('1.7', prec, rounding))) + x = mpf(3) + z = mpc(3) + a = SomethingRandom() + y = SomethingReal() + w = SomethingComplex() + for d in [15, 45]: + mp.dps = d + assert (x+y).ae(mpf('4.3')) + assert (y+x).ae(mpf('4.3')) + assert (x+w).ae(mpc('4.3', '1.7')) + assert (w+x).ae(mpc('4.3', '1.7')) + assert (z+y).ae(mpc('4.3')) + assert (y+z).ae(mpc('4.3')) + assert (z+w).ae(mpc('4.3', '1.7')) + assert (w+z).ae(mpc('4.3', '1.7')) + x-y; y-x; x-w; w-x; z-y; y-z; z-w; w-z + x*y; y*x; x*w; w*x; z*y; y*z; z*w; w*z + x/y; y/x; x/w; w/x; z/y; y/z; z/w; w/z + x**y; y**x; x**w; w**x; z**y; y**z; z**w; w**z + x==y; y==x; x==w; w==x; z==y; y==z; z==w; w==z + mp.dps = 15 + assert x.__add__(a) is NotImplemented + assert x.__radd__(a) is NotImplemented + assert x.__lt__(a) is NotImplemented + assert x.__gt__(a) is NotImplemented + assert x.__le__(a) is NotImplemented + assert x.__ge__(a) is NotImplemented + assert x.__eq__(a) is NotImplemented + assert x.__ne__(a) is NotImplemented + # implementation detail + if hasattr(x, "__cmp__"): + assert x.__cmp__(a) is NotImplemented + assert x.__sub__(a) is NotImplemented + assert x.__rsub__(a) is NotImplemented + assert x.__mul__(a) is NotImplemented + assert x.__rmul__(a) is NotImplemented + assert x.__div__(a) is NotImplemented + assert x.__rdiv__(a) is NotImplemented + assert x.__mod__(a) is NotImplemented + assert x.__rmod__(a) is NotImplemented + assert x.__pow__(a) is NotImplemented + assert x.__rpow__(a) is NotImplemented + assert z.__add__(a) is NotImplemented + assert z.__radd__(a) is NotImplemented + assert z.__eq__(a) is NotImplemented + assert z.__ne__(a) is NotImplemented + assert z.__sub__(a) is NotImplemented + assert z.__rsub__(a) is NotImplemented + assert z.__mul__(a) is NotImplemented + assert z.__rmul__(a) is NotImplemented + assert z.__div__(a) is NotImplemented + assert z.__rdiv__(a) is NotImplemented + assert z.__pow__(a) is NotImplemented + assert z.__rpow__(a) is NotImplemented + +def test_mpmathify(): + assert mpmathify('1/2') == 0.5 + assert mpmathify('(1.0+1.0j)') == mpc(1, 1) + assert mpmathify('(1.2e-10 - 3.4e5j)') == mpc('1.2e-10', '-3.4e5') + assert mpmathify('1j') == mpc(1j) + +def test_issue548(): + try: + # This expression is invalid, but may trigger the ReDOS vulnerability + # in the regular expression for parsing complex numbers. + mpmathify('(' + '1' * 5000 + '!j') + except: + return + # The expression is invalid and should raise an exception. + assert False + +def test_compatibility(): + try: + import numpy as np + from fractions import Fraction + from decimal import Decimal + import decimal + except ImportError: + return + # numpy types + for nptype in np.core.numerictypes.typeDict.values(): + if issubclass(nptype, np.complexfloating): + x = nptype(complex(0.5, -0.5)) + elif issubclass(nptype, np.floating): + x = nptype(0.5) + elif issubclass(nptype, np.integer): + x = nptype(2) + # Handle the weird types + try: diff = np.abs(type(np.sqrt(x))(sqrt(x)) - np.sqrt(x)) + except: continue + assert diff < 2.0**-53 + #Fraction and Decimal + oldprec = mp.prec + mp.prec = 1000 + decimal.getcontext().prec = mp.dps + assert sqrt(Fraction(2, 3)).ae(sqrt(mpf('2/3'))) + assert sqrt(Decimal(2)/Decimal(3)).ae(sqrt(mpf('2/3'))) + mp.prec = oldprec diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_diff.py b/MLPY/Lib/site-packages/mpmath/tests/test_diff.py new file mode 100644 index 0000000000000000000000000000000000000000..f5711609da38862eb4fd62c88d35f1704c9425a4 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_diff.py @@ -0,0 +1,61 @@ +from mpmath import * + +def test_diff(): + mp.dps = 15 + assert diff(log, 2.0, n=0).ae(log(2)) + assert diff(cos, 1.0).ae(-sin(1)) + assert diff(abs, 0.0) == 0 + assert diff(abs, 0.0, direction=1) == 1 + assert diff(abs, 0.0, direction=-1) == -1 + assert diff(exp, 1.0).ae(e) + assert diff(exp, 1.0, n=5).ae(e) + assert diff(exp, 2.0, n=5, direction=3*j).ae(e**2) + assert diff(lambda x: x**2, 3.0, method='quad').ae(6) + assert diff(lambda x: 3+x**5, 3.0, n=2, method='quad').ae(540) + assert diff(lambda x: 3+x**5, 3.0, n=2, method='step').ae(540) + assert diffun(sin)(2).ae(cos(2)) + assert diffun(sin, n=2)(2).ae(-sin(2)) + +def test_diffs(): + mp.dps = 15 + assert [chop(d) for d in diffs(sin, 0, 1)] == [0, 1] + assert [chop(d) for d in diffs(sin, 0, 1, method='quad')] == [0, 1] + assert [chop(d) for d in diffs(sin, 0, 2)] == [0, 1, 0] + assert [chop(d) for d in diffs(sin, 0, 2, method='quad')] == [0, 1, 0] + +def test_taylor(): + mp.dps = 15 + # Easy to test since the coefficients are exact in floating-point + assert taylor(sqrt, 1, 4) == [1, 0.5, -0.125, 0.0625, -0.0390625] + +def test_diff_partial(): + mp.dps = 15 + x,y,z = xyz = 2,3,7 + f = lambda x,y,z: 3*x**2 * (y+2)**3 * z**5 + assert diff(f, xyz, (0,0,0)).ae(25210500) + assert diff(f, xyz, (0,0,1)).ae(18007500) + assert diff(f, xyz, (0,0,2)).ae(10290000) + assert diff(f, xyz, (0,1,0)).ae(15126300) + assert diff(f, xyz, (0,1,1)).ae(10804500) + assert diff(f, xyz, (0,1,2)).ae(6174000) + assert diff(f, xyz, (0,2,0)).ae(6050520) + assert diff(f, xyz, (0,2,1)).ae(4321800) + assert diff(f, xyz, (0,2,2)).ae(2469600) + assert diff(f, xyz, (1,0,0)).ae(25210500) + assert diff(f, xyz, (1,0,1)).ae(18007500) + assert diff(f, xyz, (1,0,2)).ae(10290000) + assert diff(f, xyz, (1,1,0)).ae(15126300) + assert diff(f, xyz, (1,1,1)).ae(10804500) + assert diff(f, xyz, (1,1,2)).ae(6174000) + assert diff(f, xyz, (1,2,0)).ae(6050520) + assert diff(f, xyz, (1,2,1)).ae(4321800) + assert diff(f, xyz, (1,2,2)).ae(2469600) + assert diff(f, xyz, (2,0,0)).ae(12605250) + assert diff(f, xyz, (2,0,1)).ae(9003750) + assert diff(f, xyz, (2,0,2)).ae(5145000) + assert diff(f, xyz, (2,1,0)).ae(7563150) + assert diff(f, xyz, (2,1,1)).ae(5402250) + assert diff(f, xyz, (2,1,2)).ae(3087000) + assert diff(f, xyz, (2,2,0)).ae(3025260) + assert diff(f, xyz, (2,2,1)).ae(2160900) + assert diff(f, xyz, (2,2,2)).ae(1234800) diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_division.py b/MLPY/Lib/site-packages/mpmath/tests/test_division.py new file mode 100644 index 0000000000000000000000000000000000000000..c704cadeb953793ac0a887aa09c4278cf68a2824 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_division.py @@ -0,0 +1,143 @@ +from mpmath.libmp import * +from mpmath import mpf, mp + +from random import randint, choice, seed + +all_modes = [round_floor, round_ceiling, round_down, round_up, round_nearest] + +fb = from_bstr +fi = from_int +ff = from_float + + +def test_div_1_3(): + a = fi(1) + b = fi(3) + c = fi(-1) + + # floor rounds down, ceiling rounds up + assert mpf_div(a, b, 7, round_floor) == fb('0.01010101') + assert mpf_div(a, b, 7, round_ceiling) == fb('0.01010110') + assert mpf_div(a, b, 7, round_down) == fb('0.01010101') + assert mpf_div(a, b, 7, round_up) == fb('0.01010110') + assert mpf_div(a, b, 7, round_nearest) == fb('0.01010101') + + # floor rounds up, ceiling rounds down + assert mpf_div(c, b, 7, round_floor) == fb('-0.01010110') + assert mpf_div(c, b, 7, round_ceiling) == fb('-0.01010101') + assert mpf_div(c, b, 7, round_down) == fb('-0.01010101') + assert mpf_div(c, b, 7, round_up) == fb('-0.01010110') + assert mpf_div(c, b, 7, round_nearest) == fb('-0.01010101') + +def test_mpf_divi_1_3(): + a = 1 + b = fi(3) + c = -1 + assert mpf_rdiv_int(a, b, 7, round_floor) == fb('0.01010101') + assert mpf_rdiv_int(a, b, 7, round_ceiling) == fb('0.01010110') + assert mpf_rdiv_int(a, b, 7, round_down) == fb('0.01010101') + assert mpf_rdiv_int(a, b, 7, round_up) == fb('0.01010110') + assert mpf_rdiv_int(a, b, 7, round_nearest) == fb('0.01010101') + assert mpf_rdiv_int(c, b, 7, round_floor) == fb('-0.01010110') + assert mpf_rdiv_int(c, b, 7, round_ceiling) == fb('-0.01010101') + assert mpf_rdiv_int(c, b, 7, round_down) == fb('-0.01010101') + assert mpf_rdiv_int(c, b, 7, round_up) == fb('-0.01010110') + assert mpf_rdiv_int(c, b, 7, round_nearest) == fb('-0.01010101') + + +def test_div_300(): + + q = fi(1000000) + a = fi(300499999) # a/q is a little less than a half-integer + b = fi(300500000) # b/q exactly a half-integer + c = fi(300500001) # c/q is a little more than a half-integer + + # Check nearest integer rounding (prec=9 as 2**8 < 300 < 2**9) + + assert mpf_div(a, q, 9, round_down) == fi(300) + assert mpf_div(b, q, 9, round_down) == fi(300) + assert mpf_div(c, q, 9, round_down) == fi(300) + assert mpf_div(a, q, 9, round_up) == fi(301) + assert mpf_div(b, q, 9, round_up) == fi(301) + assert mpf_div(c, q, 9, round_up) == fi(301) + + # Nearest even integer is down + assert mpf_div(a, q, 9, round_nearest) == fi(300) + assert mpf_div(b, q, 9, round_nearest) == fi(300) + assert mpf_div(c, q, 9, round_nearest) == fi(301) + + # Nearest even integer is up + a = fi(301499999) + b = fi(301500000) + c = fi(301500001) + assert mpf_div(a, q, 9, round_nearest) == fi(301) + assert mpf_div(b, q, 9, round_nearest) == fi(302) + assert mpf_div(c, q, 9, round_nearest) == fi(302) + + +def test_tight_integer_division(): + # Test that integer division at tightest possible precision is exact + N = 100 + seed(1) + for i in range(N): + a = choice([1, -1]) * randint(1, 1< 1: + print("original matrix (hessenberg):\n", A) + + n = A.rows + + Q, H = mp.hessenberg(A) + + if verbose > 1: + print("Q:\n",Q) + print("H:\n",H) + + B = Q * H * Q.transpose_conj() + + eps = mp.exp(0.8 * mp.log(mp.eps)) + + err0 = 0 + for x in xrange(n): + for y in xrange(n): + err0 += abs(A[y,x] - B[y,x]) + err0 /= n * n + + err1 = 0 + for x in xrange(n): + for y in xrange(x + 2, n): + err1 += abs(H[y,x]) + + if verbose > 0: + print("difference (H):", err0, err1) + + if verbose > 1: + print("B:\n", B) + + assert err0 < eps + assert err1 == 0 + + +def run_schur(A, verbose = 0): + if verbose > 1: + print("original matrix (schur):\n", A) + + n = A.rows + + Q, R = mp.schur(A) + + if verbose > 1: + print("Q:\n", Q) + print("R:\n", R) + + B = Q * R * Q.transpose_conj() + C = Q * Q.transpose_conj() + + eps = mp.exp(0.8 * mp.log(mp.eps)) + + err0 = 0 + for x in xrange(n): + for y in xrange(n): + err0 += abs(A[y,x] - B[y,x]) + err0 /= n * n + + err1 = 0 + for x in xrange(n): + for y in xrange(n): + if x == y: + C[y,x] -= 1 + err1 += abs(C[y,x]) + err1 /= n * n + + err2 = 0 + for x in xrange(n): + for y in xrange(x + 1, n): + err2 += abs(R[y,x]) + + if verbose > 0: + print("difference (S):", err0, err1, err2) + + if verbose > 1: + print("B:\n", B) + + assert err0 < eps + assert err1 < eps + assert err2 == 0 + +def run_eig(A, verbose = 0): + if verbose > 1: + print("original matrix (eig):\n", A) + + n = A.rows + + E, EL, ER = mp.eig(A, left = True, right = True) + + if verbose > 1: + print("E:\n", E) + print("EL:\n", EL) + print("ER:\n", ER) + + eps = mp.exp(0.8 * mp.log(mp.eps)) + + err0 = 0 + for i in xrange(n): + B = A * ER[:,i] - E[i] * ER[:,i] + err0 = max(err0, mp.mnorm(B)) + + B = EL[i,:] * A - EL[i,:] * E[i] + err0 = max(err0, mp.mnorm(B)) + + err0 /= n * n + + if verbose > 0: + print("difference (E):", err0) + + assert err0 < eps + +##################### + +def test_eig_dyn(): + v = 0 + for i in xrange(5): + n = 1 + int(mp.rand() * 5) + if mp.rand() > 0.5: + # real + A = 2 * mp.randmatrix(n, n) - 1 + if mp.rand() > 0.5: + A *= 10 + for x in xrange(n): + for y in xrange(n): + A[x,y] = int(A[x,y]) + else: + A = (2 * mp.randmatrix(n, n) - 1) + 1j * (2 * mp.randmatrix(n, n) - 1) + if mp.rand() > 0.5: + A *= 10 + for x in xrange(n): + for y in xrange(n): + A[x,y] = int(mp.re(A[x,y])) + 1j * int(mp.im(A[x,y])) + + run_hessenberg(A, verbose = v) + run_schur(A, verbose = v) + run_eig(A, verbose = v) + +def test_eig(): + v = 0 + AS = [] + + A = mp.matrix([[2, 1, 0], # jordan block of size 3 + [0, 2, 1], + [0, 0, 2]]) + AS.append(A) + AS.append(A.transpose()) + + A = mp.matrix([[2, 0, 0], # jordan block of size 2 + [0, 2, 1], + [0, 0, 2]]) + AS.append(A) + AS.append(A.transpose()) + + A = mp.matrix([[2, 0, 1], # jordan block of size 2 + [0, 2, 0], + [0, 0, 2]]) + AS.append(A) + AS.append(A.transpose()) + + A= mp.matrix([[0, 0, 1], # cyclic + [1, 0, 0], + [0, 1, 0]]) + AS.append(A) + AS.append(A.transpose()) + + for A in AS: + run_hessenberg(A, verbose = v) + run_schur(A, verbose = v) + run_eig(A, verbose = v) diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_eigen_symmetric.py b/MLPY/Lib/site-packages/mpmath/tests/test_eigen_symmetric.py new file mode 100644 index 0000000000000000000000000000000000000000..aab3d8ea3142aada6e14ad6d3ea25a7e8293554d --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_eigen_symmetric.py @@ -0,0 +1,357 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +from mpmath import mp +from mpmath import libmp + +xrange = libmp.backend.xrange + +def run_eigsy(A, verbose = False): + if verbose: + print("original matrix:\n", str(A)) + + D, Q = mp.eigsy(A) + B = Q * mp.diag(D) * Q.transpose() + C = A - B + E = Q * Q.transpose() - mp.eye(A.rows) + + if verbose: + print("eigenvalues:\n", D) + print("eigenvectors:\n", Q) + + NC = mp.mnorm(C) + NE = mp.mnorm(E) + + if verbose: + print("difference:", NC, "\n", C, "\n") + print("difference:", NE, "\n", E, "\n") + + eps = mp.exp( 0.8 * mp.log(mp.eps)) + + assert NC < eps + assert NE < eps + + return NC + +def run_eighe(A, verbose = False): + if verbose: + print("original matrix:\n", str(A)) + + D, Q = mp.eighe(A) + B = Q * mp.diag(D) * Q.transpose_conj() + C = A - B + E = Q * Q.transpose_conj() - mp.eye(A.rows) + + if verbose: + print("eigenvalues:\n", D) + print("eigenvectors:\n", Q) + + NC = mp.mnorm(C) + NE = mp.mnorm(E) + + if verbose: + print("difference:", NC, "\n", C, "\n") + print("difference:", NE, "\n", E, "\n") + + eps = mp.exp( 0.8 * mp.log(mp.eps)) + + assert NC < eps + assert NE < eps + + return NC + +def run_svd_r(A, full_matrices = False, verbose = True): + + m, n = A.rows, A.cols + + eps = mp.exp(0.8 * mp.log(mp.eps)) + + if verbose: + print("original matrix:\n", str(A)) + print("full", full_matrices) + + U, S0, V = mp.svd_r(A, full_matrices = full_matrices) + + S = mp.zeros(U.cols, V.rows) + for j in xrange(min(m, n)): + S[j,j] = S0[j] + + if verbose: + print("U:\n", str(U)) + print("S:\n", str(S0)) + print("V:\n", str(V)) + + C = U * S * V - A + err = mp.mnorm(C) + if verbose: + print("C\n", str(C), "\n", err) + assert err < eps + + D = V * V.transpose() - mp.eye(V.rows) + err = mp.mnorm(D) + if verbose: + print("D:\n", str(D), "\n", err) + assert err < eps + + E = U.transpose() * U - mp.eye(U.cols) + err = mp.mnorm(E) + if verbose: + print("E:\n", str(E), "\n", err) + assert err < eps + +def run_svd_c(A, full_matrices = False, verbose = True): + + m, n = A.rows, A.cols + + eps = mp.exp(0.8 * mp.log(mp.eps)) + + if verbose: + print("original matrix:\n", str(A)) + print("full", full_matrices) + + U, S0, V = mp.svd_c(A, full_matrices = full_matrices) + + S = mp.zeros(U.cols, V.rows) + for j in xrange(min(m, n)): + S[j,j] = S0[j] + + if verbose: + print("U:\n", str(U)) + print("S:\n", str(S0)) + print("V:\n", str(V)) + + C = U * S * V - A + err = mp.mnorm(C) + if verbose: + print("C\n", str(C), "\n", err) + assert err < eps + + D = V * V.transpose_conj() - mp.eye(V.rows) + err = mp.mnorm(D) + if verbose: + print("D:\n", str(D), "\n", err) + assert err < eps + + E = U.transpose_conj() * U - mp.eye(U.cols) + err = mp.mnorm(E) + if verbose: + print("E:\n", str(E), "\n", err) + assert err < eps + +def run_gauss(qtype, a, b): + eps = 1e-5 + + d, e = mp.gauss_quadrature(len(a), qtype) + d -= mp.matrix(a) + e -= mp.matrix(b) + + assert mp.mnorm(d) < eps + assert mp.mnorm(e) < eps + +def irandmatrix(n, range = 10): + """ + random matrix with integer entries + """ + A = mp.matrix(n, n) + for i in xrange(n): + for j in xrange(n): + A[i,j]=int( (2 * mp.rand() - 1) * range) + return A + +####################### + +def test_eighe_fixed_matrix(): + A = mp.matrix([[2, 3], [3, 5]]) + run_eigsy(A) + run_eighe(A) + + A = mp.matrix([[7, -11], [-11, 13]]) + run_eigsy(A) + run_eighe(A) + + A = mp.matrix([[2, 11, 7], [11, 3, 13], [7, 13, 5]]) + run_eigsy(A) + run_eighe(A) + + A = mp.matrix([[2, 0, 7], [0, 3, 1], [7, 1, 5]]) + run_eigsy(A) + run_eighe(A) + + # + + A = mp.matrix([[2, 3+7j], [3-7j, 5]]) + run_eighe(A) + + A = mp.matrix([[2, -11j, 0], [+11j, 3, 29j], [0, -29j, 5]]) + run_eighe(A) + + A = mp.matrix([[2, 11 + 17j, 7 + 19j], [11 - 17j, 3, -13 + 23j], [7 - 19j, -13 - 23j, 5]]) + run_eighe(A) + +def test_eigsy_randmatrix(): + N = 5 + + for a in xrange(10): + A = 2 * mp.randmatrix(N, N) - 1 + + for i in xrange(0, N): + for j in xrange(i + 1, N): + A[j,i] = A[i,j] + + run_eigsy(A) + +def test_eighe_randmatrix(): + N = 5 + + for a in xrange(10): + A = (2 * mp.randmatrix(N, N) - 1) + 1j * (2 * mp.randmatrix(N, N) - 1) + + for i in xrange(0, N): + A[i,i] = mp.re(A[i,i]) + for j in xrange(i + 1, N): + A[j,i] = mp.conj(A[i,j]) + + run_eighe(A) + +def test_eigsy_irandmatrix(): + N = 4 + R = 4 + + for a in xrange(10): + A=irandmatrix(N, R) + + for i in xrange(0, N): + for j in xrange(i + 1, N): + A[j,i] = A[i,j] + + run_eigsy(A) + +def test_eighe_irandmatrix(): + N = 4 + R = 4 + + for a in xrange(10): + A=irandmatrix(N, R) + 1j * irandmatrix(N, R) + + for i in xrange(0, N): + A[i,i] = mp.re(A[i,i]) + for j in xrange(i + 1, N): + A[j,i] = mp.conj(A[i,j]) + + run_eighe(A) + +def test_svd_r_rand(): + for i in xrange(5): + full = mp.rand() > 0.5 + m = 1 + int(mp.rand() * 10) + n = 1 + int(mp.rand() * 10) + A = 2 * mp.randmatrix(m, n) - 1 + if mp.rand() > 0.5: + A *= 10 + for x in xrange(m): + for y in xrange(n): + A[x,y]=int(A[x,y]) + + run_svd_r(A, full_matrices = full, verbose = False) + +def test_svd_c_rand(): + for i in xrange(5): + full = mp.rand() > 0.5 + m = 1 + int(mp.rand() * 10) + n = 1 + int(mp.rand() * 10) + A = (2 * mp.randmatrix(m, n) - 1) + 1j * (2 * mp.randmatrix(m, n) - 1) + if mp.rand() > 0.5: + A *= 10 + for x in xrange(m): + for y in xrange(n): + A[x,y]=int(mp.re(A[x,y])) + 1j * int(mp.im(A[x,y])) + + run_svd_c(A, full_matrices=full, verbose=False) + +def test_svd_test_case(): + # a test case from Golub and Reinsch + # (see wilkinson/reinsch: handbook for auto. comp., vol ii-linear algebra, 134-151(1971).) + + eps = mp.exp(0.8 * mp.log(mp.eps)) + + a = [[22, 10, 2, 3, 7], + [14, 7, 10, 0, 8], + [-1, 13, -1, -11, 3], + [-3, -2, 13, -2, 4], + [ 9, 8, 1, -2, 4], + [ 9, 1, -7, 5, -1], + [ 2, -6, 6, 5, 1], + [ 4, 5, 0, -2, 2]] + + a = mp.matrix(a) + b = mp.matrix([mp.sqrt(1248), 20, mp.sqrt(384), 0, 0]) + + S = mp.svd_r(a, compute_uv = False) + S -= b + assert mp.mnorm(S) < eps + + S = mp.svd_c(a, compute_uv = False) + S -= b + assert mp.mnorm(S) < eps + + +def test_gauss_quadrature_static(): + a = [-0.57735027, 0.57735027] + b = [ 1, 1] + run_gauss("legendre", a , b) + + a = [ -0.906179846, -0.538469310, 0, 0.538469310, 0.906179846] + b = [ 0.23692689, 0.47862867, 0.56888889, 0.47862867, 0.23692689] + run_gauss("legendre", a , b) + + a = [ 0.06943184, 0.33000948, 0.66999052, 0.93056816] + b = [ 0.17392742, 0.32607258, 0.32607258, 0.17392742] + run_gauss("legendre01", a , b) + + a = [-0.70710678, 0.70710678] + b = [ 0.88622693, 0.88622693] + run_gauss("hermite", a , b) + + a = [ -2.02018287, -0.958572465, 0, 0.958572465, 2.02018287] + b = [ 0.01995324, 0.39361932, 0.94530872, 0.39361932, 0.01995324] + run_gauss("hermite", a , b) + + a = [ 0.41577456, 2.29428036, 6.28994508] + b = [ 0.71109301, 0.27851773, 0.01038926] + run_gauss("laguerre", a , b) + +def test_gauss_quadrature_dynamic(verbose = False): + n = 5 + + A = mp.randmatrix(2 * n, 1) + + def F(x): + r = 0 + for i in xrange(len(A) - 1, -1, -1): + r = r * x + A[i] + return r + + def run(qtype, FW, R, alpha = 0, beta = 0): + X, W = mp.gauss_quadrature(n, qtype, alpha = alpha, beta = beta) + + a = 0 + for i in xrange(len(X)): + a += W[i] * F(X[i]) + + b = mp.quad(lambda x: FW(x) * F(x), R) + + c = mp.fabs(a - b) + + if verbose: + print(qtype, c, a, b) + + assert c < 1e-5 + + run("legendre", lambda x: 1, [-1, 1]) + run("legendre01", lambda x: 1, [0, 1]) + run("hermite", lambda x: mp.exp(-x*x), [-mp.inf, mp.inf]) + run("laguerre", lambda x: mp.exp(-x), [0, mp.inf]) + run("glaguerre", lambda x: mp.sqrt(x)*mp.exp(-x), [0, mp.inf], alpha = 1 / mp.mpf(2)) + run("chebyshev1", lambda x: 1/mp.sqrt(1-x*x), [-1, 1]) + run("chebyshev2", lambda x: mp.sqrt(1-x*x), [-1, 1]) + run("jacobi", lambda x: (1-x)**(1/mp.mpf(3)) * (1+x)**(1/mp.mpf(5)), [-1, 1], alpha = 1 / mp.mpf(3), beta = 1 / mp.mpf(5) ) diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_elliptic.py b/MLPY/Lib/site-packages/mpmath/tests/test_elliptic.py new file mode 100644 index 0000000000000000000000000000000000000000..4dddc2df34b8d2fa7f2028b3501e5b7f140d8912 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_elliptic.py @@ -0,0 +1,670 @@ +""" +Limited tests of the elliptic functions module. A full suite of +extensive testing can be found in elliptic_torture_tests.py + +Author of the first version: M.T. Taschuk + +References: + +[1] Abramowitz & Stegun. 'Handbook of Mathematical Functions, 9th Ed.', + (Dover duplicate of 1972 edition) +[2] Whittaker 'A Course of Modern Analysis, 4th Ed.', 1946, + Cambridge University Press + +""" + +import mpmath +import random +import pytest + +from mpmath import * + +def mpc_ae(a, b, eps=eps): + res = True + res = res and a.real.ae(b.real, eps) + res = res and a.imag.ae(b.imag, eps) + return res + +zero = mpf(0) +one = mpf(1) + +jsn = ellipfun('sn') +jcn = ellipfun('cn') +jdn = ellipfun('dn') + +calculate_nome = lambda k: qfrom(k=k) + +def test_ellipfun(): + mp.dps = 15 + assert ellipfun('ss', 0, 0) == 1 + assert ellipfun('cc', 0, 0) == 1 + assert ellipfun('dd', 0, 0) == 1 + assert ellipfun('nn', 0, 0) == 1 + assert ellipfun('sn', 0.25, 0).ae(sin(0.25)) + assert ellipfun('cn', 0.25, 0).ae(cos(0.25)) + assert ellipfun('dn', 0.25, 0).ae(1) + assert ellipfun('ns', 0.25, 0).ae(csc(0.25)) + assert ellipfun('nc', 0.25, 0).ae(sec(0.25)) + assert ellipfun('nd', 0.25, 0).ae(1) + assert ellipfun('sc', 0.25, 0).ae(tan(0.25)) + assert ellipfun('sd', 0.25, 0).ae(sin(0.25)) + assert ellipfun('cd', 0.25, 0).ae(cos(0.25)) + assert ellipfun('cs', 0.25, 0).ae(cot(0.25)) + assert ellipfun('dc', 0.25, 0).ae(sec(0.25)) + assert ellipfun('ds', 0.25, 0).ae(csc(0.25)) + assert ellipfun('sn', 0.25, 1).ae(tanh(0.25)) + assert ellipfun('cn', 0.25, 1).ae(sech(0.25)) + assert ellipfun('dn', 0.25, 1).ae(sech(0.25)) + assert ellipfun('ns', 0.25, 1).ae(coth(0.25)) + assert ellipfun('nc', 0.25, 1).ae(cosh(0.25)) + assert ellipfun('nd', 0.25, 1).ae(cosh(0.25)) + assert ellipfun('sc', 0.25, 1).ae(sinh(0.25)) + assert ellipfun('sd', 0.25, 1).ae(sinh(0.25)) + assert ellipfun('cd', 0.25, 1).ae(1) + assert ellipfun('cs', 0.25, 1).ae(csch(0.25)) + assert ellipfun('dc', 0.25, 1).ae(1) + assert ellipfun('ds', 0.25, 1).ae(csch(0.25)) + assert ellipfun('sn', 0.25, 0.5).ae(0.24615967096986145833) + assert ellipfun('cn', 0.25, 0.5).ae(0.96922928989378439337) + assert ellipfun('dn', 0.25, 0.5).ae(0.98473484156599474563) + assert ellipfun('ns', 0.25, 0.5).ae(4.0624038700573130369) + assert ellipfun('nc', 0.25, 0.5).ae(1.0317476065024692949) + assert ellipfun('nd', 0.25, 0.5).ae(1.0155017958029488665) + assert ellipfun('sc', 0.25, 0.5).ae(0.25397465134058993408) + assert ellipfun('sd', 0.25, 0.5).ae(0.24997558792415733063) + assert ellipfun('cd', 0.25, 0.5).ae(0.98425408443195497052) + assert ellipfun('cs', 0.25, 0.5).ae(3.9374008182374110826) + assert ellipfun('dc', 0.25, 0.5).ae(1.0159978158253033913) + assert ellipfun('ds', 0.25, 0.5).ae(4.0003906313579720593) + + + + +def test_calculate_nome(): + mp.dps = 100 + + q = calculate_nome(zero) + assert(q == zero) + + mp.dps = 25 + # used Mathematica's EllipticNomeQ[m] + math1 = [(mpf(1)/10, mpf('0.006584651553858370274473060')), + (mpf(2)/10, mpf('0.01394285727531826872146409')), + (mpf(3)/10, mpf('0.02227743615715350822901627')), + (mpf(4)/10, mpf('0.03188334731336317755064299')), + (mpf(5)/10, mpf('0.04321391826377224977441774')), + (mpf(6)/10, mpf('0.05702025781460967637754953')), + (mpf(7)/10, mpf('0.07468994353717944761143751')), + (mpf(8)/10, mpf('0.09927369733882489703607378')), + (mpf(9)/10, mpf('0.1401731269542615524091055')), + (mpf(9)/10, mpf('0.1401731269542615524091055'))] + + for i in math1: + m = i[0] + q = calculate_nome(sqrt(m)) + assert q.ae(i[1]) + + mp.dps = 15 + +def test_jtheta(): + mp.dps = 25 + + z = q = zero + for n in range(1,5): + value = jtheta(n, z, q) + assert(value == (n-1)//2) + + for q in [one, mpf(2)]: + for n in range(1,5): + pytest.raises(ValueError, lambda: jtheta(n, z, q)) + + z = one/10 + q = one/11 + + # Mathematical N[EllipticTheta[1, 1/10, 1/11], 25] + res = mpf('0.1069552990104042681962096') + result = jtheta(1, z, q) + assert(result.ae(res)) + + # Mathematica N[EllipticTheta[2, 1/10, 1/11], 25] + res = mpf('1.101385760258855791140606') + result = jtheta(2, z, q) + assert(result.ae(res)) + + # Mathematica N[EllipticTheta[3, 1/10, 1/11], 25] + res = mpf('1.178319743354331061795905') + result = jtheta(3, z, q) + assert(result.ae(res)) + + # Mathematica N[EllipticTheta[4, 1/10, 1/11], 25] + res = mpf('0.8219318954665153577314573') + result = jtheta(4, z, q) + assert(result.ae(res)) + + # test for sin zeros for jtheta(1, z, q) + # test for cos zeros for jtheta(2, z, q) + z1 = pi + z2 = pi/2 + for i in range(10): + qstring = str(random.random()) + q = mpf(qstring) + result = jtheta(1, z1, q) + assert(result.ae(0)) + result = jtheta(2, z2, q) + assert(result.ae(0)) + mp.dps = 15 + + +def test_jtheta_issue_79(): + # near the circle of covergence |q| = 1 the convergence slows + # down; for |q| > Q_LIM the theta functions raise ValueError + mp.dps = 30 + mp.dps += 30 + q = mpf(6)/10 - one/10**6 - mpf(8)/10 * j + mp.dps -= 30 + # Mathematica run first + # N[EllipticTheta[3, 1, 6/10 - 10^-6 - 8/10*I], 2000] + # then it works: + # N[EllipticTheta[3, 1, 6/10 - 10^-6 - 8/10*I], 30] + res = mpf('32.0031009628901652627099524264') + \ + mpf('16.6153027998236087899308935624') * j + result = jtheta(3, 1, q) + # check that for abs(q) > Q_LIM a ValueError exception is raised + mp.dps += 30 + q = mpf(6)/10 - one/10**7 - mpf(8)/10 * j + mp.dps -= 30 + pytest.raises(ValueError, lambda: jtheta(3, 1, q)) + + # bug reported in issue 79 + mp.dps = 100 + z = (1+j)/3 + q = mpf(368983957219251)/10**15 + mpf(636363636363636)/10**15 * j + # Mathematica N[EllipticTheta[1, z, q], 35] + res = mpf('2.4439389177990737589761828991467471') + \ + mpf('0.5446453005688226915290954851851490') *j + mp.dps = 30 + result = jtheta(1, z, q) + assert(result.ae(res)) + mp.dps = 80 + z = 3 + 4*j + q = 0.5 + 0.5*j + r1 = jtheta(1, z, q) + mp.dps = 15 + r2 = jtheta(1, z, q) + assert r1.ae(r2) + mp.dps = 80 + z = 3 + j + q1 = exp(j*3) + # longer test + # for n in range(1, 6) + for n in range(1, 2): + mp.dps = 80 + q = q1*(1 - mpf(1)/10**n) + r1 = jtheta(1, z, q) + mp.dps = 15 + r2 = jtheta(1, z, q) + assert r1.ae(r2) + mp.dps = 15 + # issue 79 about high derivatives + assert jtheta(3, 4.5, 0.25, 9).ae(1359.04892680683) + assert jtheta(3, 4.5, 0.25, 50).ae(-6.14832772630905e+33) + mp.dps = 50 + r = jtheta(3, 4.5, 0.25, 9) + assert r.ae('1359.048926806828939547859396600218966947753213803') + r = jtheta(3, 4.5, 0.25, 50) + assert r.ae('-6148327726309051673317975084654262.4119215720343656') + +def test_jtheta_identities(): + """ + Tests the some of the jacobi identidies found in Abramowitz, + Sec. 16.28, Pg. 576. The identities are tested to 1 part in 10^98. + """ + mp.dps = 110 + eps1 = ldexp(eps, 30) + + for i in range(10): + qstring = str(random.random()) + q = mpf(qstring) + + zstring = str(10*random.random()) + z = mpf(zstring) + # Abramowitz 16.28.1 + # v_1(z, q)**2 * v_4(0, q)**2 = v_3(z, q)**2 * v_2(0, q)**2 + # - v_2(z, q)**2 * v_3(0, q)**2 + term1 = (jtheta(1, z, q)**2) * (jtheta(4, zero, q)**2) + term2 = (jtheta(3, z, q)**2) * (jtheta(2, zero, q)**2) + term3 = (jtheta(2, z, q)**2) * (jtheta(3, zero, q)**2) + equality = term1 - term2 + term3 + assert(equality.ae(0, eps1)) + + zstring = str(100*random.random()) + z = mpf(zstring) + # Abramowitz 16.28.2 + # v_2(z, q)**2 * v_4(0, q)**2 = v_4(z, q)**2 * v_2(0, q)**2 + # - v_1(z, q)**2 * v_3(0, q)**2 + term1 = (jtheta(2, z, q)**2) * (jtheta(4, zero, q)**2) + term2 = (jtheta(4, z, q)**2) * (jtheta(2, zero, q)**2) + term3 = (jtheta(1, z, q)**2) * (jtheta(3, zero, q)**2) + equality = term1 - term2 + term3 + assert(equality.ae(0, eps1)) + + # Abramowitz 16.28.3 + # v_3(z, q)**2 * v_4(0, q)**2 = v_4(z, q)**2 * v_3(0, q)**2 + # - v_1(z, q)**2 * v_2(0, q)**2 + term1 = (jtheta(3, z, q)**2) * (jtheta(4, zero, q)**2) + term2 = (jtheta(4, z, q)**2) * (jtheta(3, zero, q)**2) + term3 = (jtheta(1, z, q)**2) * (jtheta(2, zero, q)**2) + equality = term1 - term2 + term3 + assert(equality.ae(0, eps1)) + + # Abramowitz 16.28.4 + # v_4(z, q)**2 * v_4(0, q)**2 = v_3(z, q)**2 * v_3(0, q)**2 + # - v_2(z, q)**2 * v_2(0, q)**2 + term1 = (jtheta(4, z, q)**2) * (jtheta(4, zero, q)**2) + term2 = (jtheta(3, z, q)**2) * (jtheta(3, zero, q)**2) + term3 = (jtheta(2, z, q)**2) * (jtheta(2, zero, q)**2) + equality = term1 - term2 + term3 + assert(equality.ae(0, eps1)) + + # Abramowitz 16.28.5 + # v_2(0, q)**4 + v_4(0, q)**4 == v_3(0, q)**4 + term1 = (jtheta(2, zero, q))**4 + term2 = (jtheta(4, zero, q))**4 + term3 = (jtheta(3, zero, q))**4 + equality = term1 + term2 - term3 + assert(equality.ae(0, eps1)) + mp.dps = 15 + +def test_jtheta_complex(): + mp.dps = 30 + z = mpf(1)/4 + j/8 + q = mpf(1)/3 + j/7 + # Mathematica N[EllipticTheta[1, 1/4 + I/8, 1/3 + I/7], 35] + res = mpf('0.31618034835986160705729105731678285') + \ + mpf('0.07542013825835103435142515194358975') * j + r = jtheta(1, z, q) + assert(mpc_ae(r, res)) + + # Mathematica N[EllipticTheta[2, 1/4 + I/8, 1/3 + I/7], 35] + res = mpf('1.6530986428239765928634711417951828') + \ + mpf('0.2015344864707197230526742145361455') * j + r = jtheta(2, z, q) + assert(mpc_ae(r, res)) + + # Mathematica N[EllipticTheta[3, 1/4 + I/8, 1/3 + I/7], 35] + res = mpf('1.6520564411784228184326012700348340') + \ + mpf('0.1998129119671271328684690067401823') * j + r = jtheta(3, z, q) + assert(mpc_ae(r, res)) + + # Mathematica N[EllipticTheta[4, 1/4 + I/8, 1/3 + I/7], 35] + res = mpf('0.37619082382228348252047624089973824') - \ + mpf('0.15623022130983652972686227200681074') * j + r = jtheta(4, z, q) + assert(mpc_ae(r, res)) + + # check some theta function identities + mp.dos = 100 + z = mpf(1)/4 + j/8 + q = mpf(1)/3 + j/7 + mp.dps += 10 + a = [0,0, jtheta(2, 0, q), jtheta(3, 0, q), jtheta(4, 0, q)] + t = [0, jtheta(1, z, q), jtheta(2, z, q), jtheta(3, z, q), jtheta(4, z, q)] + r = [(t[2]*a[4])**2 - (t[4]*a[2])**2 + (t[1] *a[3])**2, + (t[3]*a[4])**2 - (t[4]*a[3])**2 + (t[1] *a[2])**2, + (t[1]*a[4])**2 - (t[3]*a[2])**2 + (t[2] *a[3])**2, + (t[4]*a[4])**2 - (t[3]*a[3])**2 + (t[2] *a[2])**2, + a[2]**4 + a[4]**4 - a[3]**4] + mp.dps -= 10 + for x in r: + assert(mpc_ae(x, mpc(0))) + mp.dps = 15 + +def test_djtheta(): + mp.dps = 30 + + z = one/7 + j/3 + q = one/8 + j/5 + # Mathematica N[EllipticThetaPrime[1, 1/7 + I/3, 1/8 + I/5], 35] + res = mpf('1.5555195883277196036090928995803201') - \ + mpf('0.02439761276895463494054149673076275') * j + result = jtheta(1, z, q, 1) + assert(mpc_ae(result, res)) + + # Mathematica N[EllipticThetaPrime[2, 1/7 + I/3, 1/8 + I/5], 35] + res = mpf('0.19825296689470982332701283509685662') - \ + mpf('0.46038135182282106983251742935250009') * j + result = jtheta(2, z, q, 1) + assert(mpc_ae(result, res)) + + # Mathematica N[EllipticThetaPrime[3, 1/7 + I/3, 1/8 + I/5], 35] + res = mpf('0.36492498415476212680896699407390026') - \ + mpf('0.57743812698666990209897034525640369') * j + result = jtheta(3, z, q, 1) + assert(mpc_ae(result, res)) + + # Mathematica N[EllipticThetaPrime[4, 1/7 + I/3, 1/8 + I/5], 35] + res = mpf('-0.38936892528126996010818803742007352') + \ + mpf('0.66549886179739128256269617407313625') * j + result = jtheta(4, z, q, 1) + assert(mpc_ae(result, res)) + + for i in range(10): + q = (one*random.random() + j*random.random())/2 + # identity in Wittaker, Watson &21.41 + a = jtheta(1, 0, q, 1) + b = jtheta(2, 0, q)*jtheta(3, 0, q)*jtheta(4, 0, q) + assert(a.ae(b)) + + # test higher derivatives + mp.dps = 20 + for q,z in [(one/3, one/5), (one/3 + j/8, one/5), + (one/3, one/5 + j/8), (one/3 + j/7, one/5 + j/8)]: + for n in [1, 2, 3, 4]: + r = jtheta(n, z, q, 2) + r1 = diff(lambda zz: jtheta(n, zz, q), z, n=2) + assert r.ae(r1) + r = jtheta(n, z, q, 3) + r1 = diff(lambda zz: jtheta(n, zz, q), z, n=3) + assert r.ae(r1) + + # identity in Wittaker, Watson &21.41 + q = one/3 + z = zero + a = [0]*5 + a[1] = jtheta(1, z, q, 3)/jtheta(1, z, q, 1) + for n in [2,3,4]: + a[n] = jtheta(n, z, q, 2)/jtheta(n, z, q) + equality = a[2] + a[3] + a[4] - a[1] + assert(equality.ae(0)) + mp.dps = 15 + +def test_jsn(): + """ + Test some special cases of the sn(z, q) function. + """ + mp.dps = 100 + + # trival case + result = jsn(zero, zero) + assert(result == zero) + + # Abramowitz Table 16.5 + # + # sn(0, m) = 0 + + for i in range(10): + qstring = str(random.random()) + q = mpf(qstring) + + equality = jsn(zero, q) + assert(equality.ae(0)) + + # Abramowitz Table 16.6.1 + # + # sn(z, 0) = sin(z), m == 0 + # + # sn(z, 1) = tanh(z), m == 1 + # + # It would be nice to test these, but I find that they run + # in to numerical trouble. I'm currently treating as a boundary + # case for sn function. + + mp.dps = 25 + arg = one/10 + #N[JacobiSN[1/10, 2^-100], 25] + res = mpf('0.09983341664682815230681420') + m = ldexp(one, -100) + result = jsn(arg, m) + assert(result.ae(res)) + + # N[JacobiSN[1/10, 1/10], 25] + res = mpf('0.09981686718599080096451168') + result = jsn(arg, arg) + assert(result.ae(res)) + mp.dps = 15 + +def test_jcn(): + """ + Test some special cases of the cn(z, q) function. + """ + mp.dps = 100 + + # Abramowitz Table 16.5 + # cn(0, q) = 1 + qstring = str(random.random()) + q = mpf(qstring) + cn = jcn(zero, q) + assert(cn.ae(one)) + + # Abramowitz Table 16.6.2 + # + # cn(u, 0) = cos(u), m == 0 + # + # cn(u, 1) = sech(z), m == 1 + # + # It would be nice to test these, but I find that they run + # in to numerical trouble. I'm currently treating as a boundary + # case for cn function. + + mp.dps = 25 + arg = one/10 + m = ldexp(one, -100) + #N[JacobiCN[1/10, 2^-100], 25] + res = mpf('0.9950041652780257660955620') + result = jcn(arg, m) + assert(result.ae(res)) + + # N[JacobiCN[1/10, 1/10], 25] + res = mpf('0.9950058256237368748520459') + result = jcn(arg, arg) + assert(result.ae(res)) + mp.dps = 15 + +def test_jdn(): + """ + Test some special cases of the dn(z, q) function. + """ + mp.dps = 100 + + # Abramowitz Table 16.5 + # dn(0, q) = 1 + mstring = str(random.random()) + m = mpf(mstring) + + dn = jdn(zero, m) + assert(dn.ae(one)) + + mp.dps = 25 + # N[JacobiDN[1/10, 1/10], 25] + res = mpf('0.9995017055025556219713297') + arg = one/10 + result = jdn(arg, arg) + assert(result.ae(res)) + mp.dps = 15 + + +def test_sn_cn_dn_identities(): + """ + Tests the some of the jacobi elliptic function identities found + on Mathworld. Haven't found in Abramowitz. + """ + mp.dps = 100 + N = 5 + for i in range(N): + qstring = str(random.random()) + q = mpf(qstring) + zstring = str(100*random.random()) + z = mpf(zstring) + + # MathWorld + # sn(z, q)**2 + cn(z, q)**2 == 1 + term1 = jsn(z, q)**2 + term2 = jcn(z, q)**2 + equality = one - term1 - term2 + assert(equality.ae(0)) + + # MathWorld + # k**2 * sn(z, m)**2 + dn(z, m)**2 == 1 + for i in range(N): + mstring = str(random.random()) + m = mpf(qstring) + k = m.sqrt() + zstring = str(10*random.random()) + z = mpf(zstring) + term1 = k**2 * jsn(z, m)**2 + term2 = jdn(z, m)**2 + equality = one - term1 - term2 + assert(equality.ae(0)) + + + for i in range(N): + mstring = str(random.random()) + m = mpf(mstring) + k = m.sqrt() + zstring = str(random.random()) + z = mpf(zstring) + + # MathWorld + # k**2 * cn(z, m)**2 + (1 - k**2) = dn(z, m)**2 + term1 = k**2 * jcn(z, m)**2 + term2 = 1 - k**2 + term3 = jdn(z, m)**2 + equality = term3 - term1 - term2 + assert(equality.ae(0)) + + K = ellipk(k**2) + # Abramowitz Table 16.5 + # sn(K, m) = 1; K is K(k), first complete elliptic integral + r = jsn(K, m) + assert(r.ae(one)) + + # Abramowitz Table 16.5 + # cn(K, q) = 0; K is K(k), first complete elliptic integral + equality = jcn(K, m) + assert(equality.ae(0)) + + # Abramowitz Table 16.6.3 + # dn(z, 0) = 1, m == 0 + z = m + value = jdn(z, zero) + assert(value.ae(one)) + + mp.dps = 15 + +def test_sn_cn_dn_complex(): + mp.dps = 30 + # N[JacobiSN[1/4 + I/8, 1/3 + I/7], 35] in Mathematica + res = mpf('0.2495674401066275492326652143537') + \ + mpf('0.12017344422863833381301051702823') * j + u = mpf(1)/4 + j/8 + m = mpf(1)/3 + j/7 + r = jsn(u, m) + assert(mpc_ae(r, res)) + + #N[JacobiCN[1/4 + I/8, 1/3 + I/7], 35] + res = mpf('0.9762691700944007312693721148331') - \ + mpf('0.0307203994181623243583169154824')*j + r = jcn(u, m) + #assert r.real.ae(res.real) + #assert r.imag.ae(res.imag) + assert(mpc_ae(r, res)) + + #N[JacobiDN[1/4 + I/8, 1/3 + I/7], 35] + res = mpf('0.99639490163039577560547478589753039') - \ + mpf('0.01346296520008176393432491077244994')*j + r = jdn(u, m) + assert(mpc_ae(r, res)) + mp.dps = 15 + +def test_elliptic_integrals(): + # Test cases from Carlson's paper + mp.dps = 15 + assert elliprd(0,2,1).ae(1.7972103521033883112) + assert elliprd(2,3,4).ae(0.16510527294261053349) + assert elliprd(j,-j,2).ae(0.65933854154219768919) + assert elliprd(0,j,-j).ae(1.2708196271909686299 + 2.7811120159520578777j) + assert elliprd(0,j-1,j).ae(-1.8577235439239060056 - 0.96193450888838559989j) + assert elliprd(-2-j,-j,-1+j).ae(1.8249027393703805305 - 1.2218475784827035855j) + # extra test cases + assert elliprg(0,0,0) == 0 + assert elliprg(0,0,16).ae(2) + assert elliprg(0,16,0).ae(2) + assert elliprg(16,0,0).ae(2) + assert elliprg(1,4,0).ae(1.2110560275684595248036) + assert elliprg(1,0,4).ae(1.2110560275684595248036) + assert elliprg(0,4,1).ae(1.2110560275684595248036) + # should be symmetric -- fixes a bug present in the paper + x,y,z = 1,1j,-1+1j + assert elliprg(x,y,z).ae(0.64139146875812627545 + 0.58085463774808290907j) + assert elliprg(x,z,y).ae(0.64139146875812627545 + 0.58085463774808290907j) + assert elliprg(y,x,z).ae(0.64139146875812627545 + 0.58085463774808290907j) + assert elliprg(y,z,x).ae(0.64139146875812627545 + 0.58085463774808290907j) + assert elliprg(z,x,y).ae(0.64139146875812627545 + 0.58085463774808290907j) + assert elliprg(z,y,x).ae(0.64139146875812627545 + 0.58085463774808290907j) + + for n in [5, 15, 30, 60, 100]: + mp.dps = n + assert elliprf(1,2,0).ae('1.3110287771460599052324197949455597068413774757158115814084108519003952935352071251151477664807145467230678763') + assert elliprf(0.5,1,0).ae('1.854074677301371918433850347195260046217598823521766905585928045056021776838119978357271861650371897277771871') + assert elliprf(j,-j,0).ae('1.854074677301371918433850347195260046217598823521766905585928045056021776838119978357271861650371897277771871') + assert elliprf(j-1,j,0).ae(mpc('0.79612586584233913293056938229563057846592264089185680214929401744498956943287031832657642790719940442165621412', + '-1.2138566698364959864300942567386038975419875860741507618279563735753073152507112254567291141460317931258599889')) + assert elliprf(2,3,4).ae('0.58408284167715170669284916892566789240351359699303216166309375305508295130412919665541330837704050454472379308') + assert elliprf(j,-j,2).ae('1.0441445654064360931078658361850779139591660747973017593275012615517220315993723776182276555339288363064476126') + assert elliprf(j-1,j,1-j).ae(mpc('0.93912050218619371196624617169781141161485651998254431830645241993282941057500174238125105410055253623847335313', + '-0.53296252018635269264859303449447908970360344322834582313172115220559316331271520508208025270300138589669326136')) + assert elliprc(0,0.25).ae(+pi) + assert elliprc(2.25,2).ae(+ln2) + assert elliprc(0,j).ae(mpc('1.1107207345395915617539702475151734246536554223439225557713489017391086982748684776438317336911913093408525532', + '-1.1107207345395915617539702475151734246536554223439225557713489017391086982748684776438317336911913093408525532')) + assert elliprc(-j,j).ae(mpc('1.2260849569072198222319655083097718755633725139745941606203839524036426936825652935738621522906572884239069297', + '-0.34471136988767679699935618332997956653521218571295874986708834375026550946053920574015526038040124556716711353')) + assert elliprc(0.25,-2).ae(ln2/3) + assert elliprc(j,-1).ae(mpc('0.77778596920447389875196055840799837589537035343923012237628610795937014001905822029050288316217145443865649819', + '0.1983248499342877364755170948292130095921681309577950696116251029742793455964385947473103628983664877025779304')) + assert elliprj(0,1,2,3).ae('0.77688623778582332014190282640545501102298064276022952731669118325952563819813258230708177398475643634103990878') + assert elliprj(2,3,4,5).ae('0.14297579667156753833233879421985774801466647854232626336218889885463800128817976132826443904216546421431528308') + assert elliprj(2,3,4,-1+j).ae(mpc('0.13613945827770535203521374457913768360237593025944342652613569368333226052158214183059386307242563164036672709', + '-0.38207561624427164249600936454845112611060375760094156571007648297226090050927156176977091273224510621553615189')) + assert elliprj(j,-j,0,2).ae('1.6490011662710884518243257224860232300246792717163891216346170272567376981346412066066050103935109581019055806') + assert elliprj(-1+j,-1-j,1,2).ae('0.94148358841220238083044612133767270187474673547917988681610772381758628963408843935027667916713866133196845063') + assert elliprj(j,-j,0,1-j).ae(mpc('1.8260115229009316249372594065790946657011067182850435297162034335356430755397401849070610280860044610878657501', + '1.2290661908643471500163617732957042849283739403009556715926326841959667290840290081010472716420690899886276961')) + assert elliprj(-1+j,-1-j,1,-3+j).ae(mpc('-0.61127970812028172123588152373622636829986597243716610650831553882054127570542477508023027578037045504958619422', + '-1.0684038390006807880182112972232562745485871763154040245065581157751693730095703406209466903752930797510491155')) + assert elliprj(-1+j,-2-j,-j,-1+j).ae(mpc('1.8249027393703805304622013339009022294368078659619988943515764258335975852685224202567854526307030593012768954', + '-1.2218475784827035854568450371590419833166777535029296025352291308244564398645467465067845461070602841312456831')) + + assert elliprg(0,16,16).ae(+pi) + assert elliprg(2,3,4).ae('1.7255030280692277601061148835701141842692457170470456590515892070736643637303053506944907685301315299153040991') + assert elliprg(0,j,-j).ae('0.42360654239698954330324956174109581824072295516347109253028968632986700241706737986160014699730561497106114281') + assert elliprg(j-1,j,0).ae(mpc('0.44660591677018372656731970402124510811555212083508861036067729944477855594654762496407405328607219895053798354', + '0.70768352357515390073102719507612395221369717586839400605901402910893345301718731499237159587077682267374159282')) + assert elliprg(-j,j-1,j).ae(mpc('0.36023392184473309033675652092928695596803358846377334894215349632203382573844427952830064383286995172598964266', + '0.40348623401722113740956336997761033878615232917480045914551915169013722542827052849476969199578321834819903921')) + assert elliprg(0, mpf('0.0796'), 4).ae('1.0284758090288040009838871385180217366569777284430590125081211090574701293154645750017813190805144572673802094') + mp.dps = 15 + + # more test cases for the branch of ellippi / elliprj + assert elliprj(-1-0.5j, -10-6j, -10-3j, -5+10j).ae(0.128470516743927699 + 0.102175950778504625j, abs_eps=1e-8) + assert elliprj(1.987, 4.463 - 1.614j, 0, -3.965).ae(-0.341575118513811305 - 0.394703757004268486j, abs_eps=1e-8) + assert elliprj(0.3068, -4.037+0.632j, 1.654, -0.9609).ae(-1.14735199581485639 - 0.134450158867472264j, abs_eps=1e-8) + assert elliprj(0.3068, -4.037-0.632j, 1.654, -0.9609).ae(1.758765901861727 - 0.161002343366626892j, abs_eps=1e-5) + assert elliprj(0.3068, -4.037+0.0632j, 1.654, -0.9609).ae(-1.17157627949475577 - 0.069182614173988811j, abs_eps=1e-8) + assert elliprj(0.3068, -4.037+0.00632j, 1.654, -0.9609).ae(-1.17337595670549633 - 0.0623069224526925j, abs_eps=1e-8) + + # these require accurate integration + assert elliprj(0.3068, -4.037-0.0632j, 1.654, -0.9609).ae(1.77940452391261626 + 0.0388711305592447234j) + assert elliprj(0.3068, -4.037-0.00632j, 1.654, -0.9609).ae(1.77806722756403055 + 0.0592749824572262329j) + # issue #571 + assert ellippi(2.1 + 0.94j, 2.3 + 0.98j, 2.5 + 0.01j).ae(-0.40652414240811963438 + 2.1547659461404749309j) + + assert ellippi(2.0-1.0j, 2.0+1.0j).ae(1.8578723151271115 - 1.18642180609983531j) + assert ellippi(2.0-0.5j, 0.5+1.0j).ae(0.936761970766645807 - 1.61876787838890786j) + assert ellippi(2.0, 1.0+1.0j).ae(0.999881420735506708 - 2.4139272867045391j) + assert ellippi(2.0+1.0j, 2.0-1.0j).ae(1.8578723151271115 + 1.18642180609983531j) + assert ellippi(2.0+1.0j, 2.0).ae(2.78474654927885845 + 2.02204728966993314j) + +def test_issue_238(): + assert isnan(qfrom(m=nan)) diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_fp.py b/MLPY/Lib/site-packages/mpmath/tests/test_fp.py new file mode 100644 index 0000000000000000000000000000000000000000..99f3759c3071c4d55e0481472f3d16c1f5df1fef --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_fp.py @@ -0,0 +1,1671 @@ +""" +Easy-to-use test-generating code: + +cases = ''' +exp 2.25 +log 2.25 +''' + +from mpmath import * +mp.dps = 20 +for test in cases.splitlines(): + if not test: + continue + words = test.split() + fname = words[0] + args = words[1:] + argstr = ", ".join(args) + testline = "%s(%s)" % (fname, argstr) + ans = str(eval(testline)) + print " assert ae(fp.%s, %s)" % (testline, ans) + +""" + +from mpmath import fp + +def ae(x, y, tol=1e-12): + if x == y: + return True + return abs(x-y) <= tol*abs(y) + +def test_conj(): + assert fp.conj(4) == 4 + assert fp.conj(3+4j) == 3-4j + assert fp.fdot([1,2],[3,2+1j], conjugate=True) == 7-2j + +def test_fp_number_parts(): + assert ae(fp.arg(3), 0.0) + assert ae(fp.arg(-3), 3.1415926535897932385) + assert ae(fp.arg(3j), 1.5707963267948966192) + assert ae(fp.arg(-3j), -1.5707963267948966192) + assert ae(fp.arg(2+3j), 0.98279372324732906799) + assert ae(fp.arg(-1-1j), -2.3561944901923449288) + assert ae(fp.re(2.5), 2.5) + assert ae(fp.re(2.5+3j), 2.5) + assert ae(fp.im(2.5), 0.0) + assert ae(fp.im(2.5+3j), 3.0) + assert ae(fp.floor(2.5), 2.0) + assert ae(fp.floor(2), 2.0) + assert ae(fp.floor(2.0+0j), (2.0 + 0.0j)) + assert ae(fp.floor(-1.5-0.5j), (-2.0 - 1.0j)) + assert ae(fp.ceil(2.5), 3.0) + assert ae(fp.ceil(2), 2.0) + assert ae(fp.ceil(2.0+0j), (2.0 + 0.0j)) + assert ae(fp.ceil(-1.5-0.5j), (-1.0 + 0.0j)) + +def test_fp_cospi_sinpi(): + assert ae(fp.sinpi(0), 0.0) + assert ae(fp.sinpi(0.25), 0.7071067811865475244) + assert ae(fp.sinpi(0.5), 1.0) + assert ae(fp.sinpi(0.75), 0.7071067811865475244) + assert ae(fp.sinpi(1), 0.0) + assert ae(fp.sinpi(1.25), -0.7071067811865475244) + assert ae(fp.sinpi(1.5), -1.0) + assert ae(fp.sinpi(1.75), -0.7071067811865475244) + assert ae(fp.sinpi(2), 0.0) + assert ae(fp.sinpi(2.25), 0.7071067811865475244) + assert ae(fp.sinpi(0+3j), (0.0 + 6195.8238636085899556j)) + assert ae(fp.sinpi(0.25+3j), (4381.1091260582448033 + 4381.1090689950686908j)) + assert ae(fp.sinpi(0.5+3j), (6195.8239443081075259 + 0.0j)) + assert ae(fp.sinpi(0.75+3j), (4381.1091260582448033 - 4381.1090689950686908j)) + assert ae(fp.sinpi(1+3j), (0.0 - 6195.8238636085899556j)) + assert ae(fp.sinpi(1.25+3j), (-4381.1091260582448033 - 4381.1090689950686908j)) + assert ae(fp.sinpi(1.5+3j), (-6195.8239443081075259 + 0.0j)) + assert ae(fp.sinpi(1.75+3j), (-4381.1091260582448033 + 4381.1090689950686908j)) + assert ae(fp.sinpi(2+3j), (0.0 + 6195.8238636085899556j)) + assert ae(fp.sinpi(2.25+3j), (4381.1091260582448033 + 4381.1090689950686908j)) + assert ae(fp.sinpi(-0.75), -0.7071067811865475244) + assert ae(fp.sinpi(-1e-10), -3.1415926535897933529e-10) + assert ae(fp.sinpi(1e-10), 3.1415926535897933529e-10) + assert ae(fp.sinpi(1e-10+1e-10j), (3.141592653589793353e-10 + 3.1415926535897933528e-10j)) + assert ae(fp.sinpi(1e-10-1e-10j), (3.141592653589793353e-10 - 3.1415926535897933528e-10j)) + assert ae(fp.sinpi(-1e-10+1e-10j), (-3.141592653589793353e-10 + 3.1415926535897933528e-10j)) + assert ae(fp.sinpi(-1e-10-1e-10j), (-3.141592653589793353e-10 - 3.1415926535897933528e-10j)) + assert ae(fp.cospi(0), 1.0) + assert ae(fp.cospi(0.25), 0.7071067811865475244) + assert ae(fp.cospi(0.5), 0.0) + assert ae(fp.cospi(0.75), -0.7071067811865475244) + assert ae(fp.cospi(1), -1.0) + assert ae(fp.cospi(1.25), -0.7071067811865475244) + assert ae(fp.cospi(1.5), 0.0) + assert ae(fp.cospi(1.75), 0.7071067811865475244) + assert ae(fp.cospi(2), 1.0) + assert ae(fp.cospi(2.25), 0.7071067811865475244) + assert ae(fp.cospi(0+3j), (6195.8239443081075259 + 0.0j)) + assert ae(fp.cospi(0.25+3j), (4381.1091260582448033 - 4381.1090689950686908j)) + assert ae(fp.cospi(0.5+3j), (0.0 - 6195.8238636085899556j)) + assert ae(fp.cospi(0.75+3j), (-4381.1091260582448033 - 4381.1090689950686908j)) + assert ae(fp.cospi(1+3j), (-6195.8239443081075259 + 0.0j)) + assert ae(fp.cospi(1.25+3j), (-4381.1091260582448033 + 4381.1090689950686908j)) + assert ae(fp.cospi(1.5+3j), (0.0 + 6195.8238636085899556j)) + assert ae(fp.cospi(1.75+3j), (4381.1091260582448033 + 4381.1090689950686908j)) + assert ae(fp.cospi(2+3j), (6195.8239443081075259 + 0.0j)) + assert ae(fp.cospi(2.25+3j), (4381.1091260582448033 - 4381.1090689950686908j)) + assert ae(fp.cospi(-0.75), -0.7071067811865475244) + assert ae(fp.sinpi(-0.7), -0.80901699437494750611) + assert ae(fp.cospi(-0.7), -0.5877852522924730163) + assert ae(fp.cospi(-3+2j), (-267.74676148374822225 + 0.0j)) + assert ae(fp.sinpi(-3+2j), (0.0 - 267.74489404101651426j)) + assert ae(fp.sinpi(-0.7+2j), (-216.6116802292079471 - 157.37650009392034693j)) + assert ae(fp.cospi(-0.7+2j), (-157.37759774921754565 + 216.61016943630197336j)) + +def test_fp_expj(): + assert ae(fp.expj(0), (1.0 + 0.0j)) + assert ae(fp.expj(1), (0.5403023058681397174 + 0.84147098480789650665j)) + assert ae(fp.expj(2), (-0.416146836547142387 + 0.9092974268256816954j)) + assert ae(fp.expj(0.75), (0.73168886887382088631 + 0.68163876002333416673j)) + assert ae(fp.expj(2+3j), (-0.020718731002242879378 + 0.045271253156092975488j)) + assert ae(fp.expjpi(0), (1.0 + 0.0j)) + assert ae(fp.expjpi(1), (-1.0 + 0.0j)) + assert ae(fp.expjpi(2), (1.0 + 0.0j)) + assert ae(fp.expjpi(0.75), (-0.7071067811865475244 + 0.7071067811865475244j)) + assert ae(fp.expjpi(2+3j), (0.000080699517570304599239 + 0.0j)) + +def test_fp_bernoulli(): + assert ae(fp.bernoulli(0), 1.0) + assert ae(fp.bernoulli(1), -0.5) + assert ae(fp.bernoulli(2), 0.16666666666666666667) + assert ae(fp.bernoulli(10), 0.075757575757575757576) + assert ae(fp.bernoulli(11), 0.0) + +def test_fp_gamma(): + assert ae(fp.gamma(1), 1.0) + assert ae(fp.gamma(1.5), 0.88622692545275801365) + assert ae(fp.gamma(10), 362880.0) + assert ae(fp.gamma(-0.5), -3.5449077018110320546) + assert ae(fp.gamma(-7.1), 0.0016478244570263333622) + assert ae(fp.gamma(12.3), 83385367.899970000963) + assert ae(fp.gamma(2+0j), (1.0 + 0.0j)) + assert ae(fp.gamma(-2.5+0j), (-0.94530872048294188123 + 0.0j)) + assert ae(fp.gamma(3+4j), (0.0052255384713692141947 - 0.17254707929430018772j)) + assert ae(fp.gamma(-3-4j), (0.00001460997305874775607 - 0.000020760733311509070396j)) + assert ae(fp.fac(0), 1.0) + assert ae(fp.fac(1), 1.0) + assert ae(fp.fac(20), 2432902008176640000.0) + assert ae(fp.fac(-3.5), -0.94530872048294188123) + assert ae(fp.fac(2+3j), (-0.44011340763700171113 - 0.06363724312631702183j)) + assert ae(fp.loggamma(1.0), 0.0) + assert ae(fp.loggamma(2.0), 0.0) + assert ae(fp.loggamma(3.0), 0.69314718055994530942) + assert ae(fp.loggamma(7.25), 7.0521854507385394449) + assert ae(fp.loggamma(1000.0), 5905.2204232091812118) + assert ae(fp.loggamma(1e50), 1.1412925464970229298e+52) + assert ae(fp.loggamma(1e25+1e25j), (5.6125802751733671621e+26 + 5.7696599078528568383e+26j)) + assert ae(fp.loggamma(3+4j), (-1.7566267846037841105 + 4.7426644380346579282j)) + assert ae(fp.loggamma(-0.5), (1.2655121234846453965 - 3.1415926535897932385j)) + assert ae(fp.loggamma(-1.25), (1.3664317612369762346 - 6.2831853071795864769j)) + assert ae(fp.loggamma(-2.75), (0.0044878975359557733115 - 9.4247779607693797154j)) + assert ae(fp.loggamma(-3.5), (-1.3090066849930420464 - 12.566370614359172954j)) + assert ae(fp.loggamma(-4.5), (-2.8130840817693161197 - 15.707963267948966192j)) + assert ae(fp.loggamma(-2+3j), (-6.776523813485657093 - 4.568791367260286402j)) + assert ae(fp.loggamma(-1000.3), (-5912.8440347785205041 - 3144.7342462433830317j)) + assert ae(fp.loggamma(-100-100j), (-632.35117666833135562 - 158.37641469650352462j)) + assert ae(fp.loggamma(1e-10), 23.025850929882735237) + assert ae(fp.loggamma(-1e-10), (23.02585092999817837 - 3.1415926535897932385j)) + assert ae(fp.loggamma(1e-10j), (23.025850929940456804 - 1.5707963268526181857j)) + assert ae(fp.loggamma(1e-10j-1e-10), (22.679277339718205716 - 2.3561944902500664954j)) + +def test_fp_psi(): + assert ae(fp.psi(0, 3.7), 1.1671535393615114409) + assert ae(fp.psi(0, 0.5), -1.9635100260214234794) + assert ae(fp.psi(0, 1), -0.57721566490153286061) + assert ae(fp.psi(0, -2.5), 1.1031566406452431872) + assert ae(fp.psi(0, 12.9), 2.5179671503279156347) + assert ae(fp.psi(0, 100), 4.6001618527380874002) + assert ae(fp.psi(0, 2500.3), 7.8239660143238547877) + assert ae(fp.psi(0, 1e40), 92.103403719761827391) + assert ae(fp.psi(0, 1e200), 460.51701859880913677) + assert ae(fp.psi(0, 3.7+0j), (1.1671535393615114409 + 0.0j)) + assert ae(fp.psi(1, 3), 0.39493406684822643647) + assert ae(fp.psi(3, 2+3j), (-0.05383196209159972116 + 0.0076890935247364805218j)) + assert ae(fp.psi(4, -0.5+1j), (1.2719531355492328195 - 18.211833410936276774j)) + assert ae(fp.harmonic(0), 0.0) + assert ae(fp.harmonic(1), 1.0) + assert ae(fp.harmonic(2), 1.5) + assert ae(fp.harmonic(100), 5.1873775176396202608) + assert ae(fp.harmonic(-2.5), 1.2803723055467760478) + assert ae(fp.harmonic(2+3j), (1.9390425294578375875 + 0.87336044981834544043j)) + assert ae(fp.harmonic(-5-4j), (2.3725754822349437733 - 2.4160904444801621j)) + +def test_fp_zeta(): + assert ae(fp.zeta(1e100), 1.0) + assert ae(fp.zeta(3), 1.2020569031595942854) + assert ae(fp.zeta(2+0j), (1.6449340668482264365 + 0.0j)) + assert ae(fp.zeta(0.93), -13.713619351638164784) + assert ae(fp.zeta(1.74), 1.9796863545771774095) + assert ae(fp.zeta(0.0), -0.5) + assert ae(fp.zeta(-1.0), -0.083333333333333333333) + assert ae(fp.zeta(-2.0), 0.0) + assert ae(fp.zeta(-3.0), 0.0083333333333333333333) + assert ae(fp.zeta(-500.0), 0.0) + assert ae(fp.zeta(-7.4), 0.0036537321227995882447) + assert ae(fp.zeta(2.1), 1.5602165335033620158) + assert ae(fp.zeta(26.9), 1.0000000079854809935) + assert ae(fp.zeta(26), 1.0000000149015548284) + assert ae(fp.zeta(27), 1.0000000074507117898) + assert ae(fp.zeta(28), 1.0000000037253340248) + assert ae(fp.zeta(27.1), 1.000000006951755045) + assert ae(fp.zeta(32.7), 1.0000000001433243232) + assert ae(fp.zeta(100), 1.0) + assert ae(fp.altzeta(3.5), 0.92755357777394803511) + assert ae(fp.altzeta(1), 0.69314718055994530942) + assert ae(fp.altzeta(2), 0.82246703342411321824) + assert ae(fp.altzeta(0), 0.5) + assert ae(fp.zeta(-2+3j, 1), (0.13297115587929864827 + 0.12305330040458776494j)) + assert ae(fp.zeta(-2+3j, 5), (18.384866151867576927 - 11.377015110597711009j)) + assert ae(fp.zeta(1.0000000001), 9999999173.1735741337) + assert ae(fp.zeta(0.9999999999), -9999999172.0191428039) + assert ae(fp.zeta(1+0.000000001j), (0.57721566490153286061 - 999999999.99999993765j)) + assert ae(fp.primezeta(2.5+4j), (-0.16922458243438033385 - 0.010847965298387727811j)) + assert ae(fp.primezeta(4), 0.076993139764246844943) + assert ae(fp.riemannr(3.7), 2.3034079839110855717) + assert ae(fp.riemannr(8), 3.9011860449341499474) + assert ae(fp.riemannr(3+4j), (2.2369653314259991796 + 1.6339943856990281694j)) + +def test_fp_hyp2f1(): + assert ae(fp.hyp2f1(1, (3,2), 3.25, 5.0), (-0.46600275923108143059 - 0.74393667908854842325j)) + assert ae(fp.hyp2f1(1+1j, (3,2), 3.25, 5.0), (-5.9208875603806515987 - 2.3813557707889590686j)) + assert ae(fp.hyp2f1(1+1j, (3,2), 3.25, 2+3j), (0.17174552030925080445 + 0.19589781970539389999j)) + +def test_fp_erf(): + assert fp.erf(2) == fp.erf(2.0) == fp.erf(2.0+0.0j) + assert fp.erf(fp.inf) == 1.0 + assert fp.erf(fp.ninf) == -1.0 + assert ae(fp.erf(0), 0.0) + assert ae(fp.erf(-0), -0.0) + assert ae(fp.erf(0.3), 0.32862675945912741619) + assert ae(fp.erf(-0.3), -0.32862675945912741619) + assert ae(fp.erf(0.9), 0.79690821242283213966) + assert ae(fp.erf(-0.9), -0.79690821242283213966) + assert ae(fp.erf(1.0), 0.84270079294971486934) + assert ae(fp.erf(-1.0), -0.84270079294971486934) + assert ae(fp.erf(1.1), 0.88020506957408172966) + assert ae(fp.erf(-1.1), -0.88020506957408172966) + assert ae(fp.erf(8.5), 1.0) + assert ae(fp.erf(-8.5), -1.0) + assert ae(fp.erf(9.1), 1.0) + assert ae(fp.erf(-9.1), -1.0) + assert ae(fp.erf(20.0), 1.0) + assert ae(fp.erf(-20.0), -1.0) + assert ae(fp.erf(10000.0), 1.0) + assert ae(fp.erf(-10000.0), -1.0) + assert ae(fp.erf(1e+50), 1.0) + assert ae(fp.erf(-1e+50), -1.0) + assert ae(fp.erf(1j), 1.650425758797542876j) + assert ae(fp.erf(-1j), -1.650425758797542876j) + assert ae(fp.erf((2+3j)), (-20.829461427614568389 + 8.6873182714701631444j)) + assert ae(fp.erf(-(2+3j)), -(-20.829461427614568389 + 8.6873182714701631444j)) + assert ae(fp.erf((8+9j)), (-1072004.2525062051158 + 364149.91954310255423j)) + assert ae(fp.erf(-(8+9j)), -(-1072004.2525062051158 + 364149.91954310255423j)) + assert fp.erfc(fp.inf) == 0.0 + assert fp.erfc(fp.ninf) == 2.0 + assert fp.erfc(0) == 1 + assert fp.erfc(-0.0) == 1 + assert fp.erfc(0+0j) == 1 + assert ae(fp.erfc(0.3), 0.67137324054087258381) + assert ae(fp.erfc(-0.3), 1.3286267594591274162) + assert ae(fp.erfc(0.9), 0.20309178757716786034) + assert ae(fp.erfc(-0.9), 1.7969082124228321397) + assert ae(fp.erfc(1.0), 0.15729920705028513066) + assert ae(fp.erfc(-1.0), 1.8427007929497148693) + assert ae(fp.erfc(1.1), 0.11979493042591827034) + assert ae(fp.erfc(-1.1), 1.8802050695740817297) + assert ae(fp.erfc(8.5), 2.7623240713337714461e-33) + assert ae(fp.erfc(-8.5), 2.0) + assert ae(fp.erfc(9.1), 6.6969004279886077452e-38) + assert ae(fp.erfc(-9.1), 2.0) + assert ae(fp.erfc(20.0), 5.3958656116079009289e-176) + assert ae(fp.erfc(-20.0), 2.0) + assert ae(fp.erfc(10000.0), 0.0) + assert ae(fp.erfc(-10000.0), 2.0) + assert ae(fp.erfc(1e+50), 0.0) + assert ae(fp.erfc(-1e+50), 2.0) + assert ae(fp.erfc(1j), (1.0 - 1.650425758797542876j)) + assert ae(fp.erfc(-1j), (1.0 + 1.650425758797542876j)) + assert ae(fp.erfc((2+3j)), (21.829461427614568389 - 8.6873182714701631444j), 1e-13) + assert ae(fp.erfc(-(2+3j)), (-19.829461427614568389 + 8.6873182714701631444j), 1e-13) + assert ae(fp.erfc((8+9j)), (1072005.2525062051158 - 364149.91954310255423j)) + assert ae(fp.erfc(-(8+9j)), (-1072003.2525062051158 + 364149.91954310255423j)) + assert ae(fp.erfc(20+0j), (5.3958656116079009289e-176 + 0.0j)) + +def test_fp_lambertw(): + assert ae(fp.lambertw(0.0), 0.0) + assert ae(fp.lambertw(1.0), 0.567143290409783873) + assert ae(fp.lambertw(7.5), 1.5662309537823875394) + assert ae(fp.lambertw(-0.25), -0.35740295618138890307) + assert ae(fp.lambertw(-10.0), (1.3699809685212708156 + 2.140194527074713196j)) + assert ae(fp.lambertw(0+0j), (0.0 + 0.0j)) + assert ae(fp.lambertw(4+0j), (1.2021678731970429392 + 0.0j)) + assert ae(fp.lambertw(1000.5), 5.2500227450408980127) + assert ae(fp.lambertw(1e100), 224.84310644511850156) + assert ae(fp.lambertw(-1000.0), (5.1501630246362515223 + 2.6641981432905204596j)) + assert ae(fp.lambertw(1e-10), 9.9999999990000003645e-11) + assert ae(fp.lambertw(1e-10j), (1.0000000000000000728e-20 + 1.0000000000000000364e-10j)) + assert ae(fp.lambertw(3+4j), (1.2815618061237758782 + 0.53309522202097107131j)) + assert ae(fp.lambertw(-3-4j), (1.0750730665692549276 - 1.3251023817343588823j)) + assert ae(fp.lambertw(10000+1000j), (7.2361526563371602186 + 0.087567810943839352034j)) + assert ae(fp.lambertw(0.0, -1), -fp.inf) + assert ae(fp.lambertw(1.0, -1), (-1.5339133197935745079 - 4.3751851530618983855j)) + assert ae(fp.lambertw(7.5, -1), (0.44125668415098614999 - 4.8039842008452390179j)) + assert ae(fp.lambertw(-0.25, -1), -2.1532923641103496492) + assert ae(fp.lambertw(-10.0, -1), (1.3699809685212708156 - 2.140194527074713196j)) + assert ae(fp.lambertw(0+0j, -1), -fp.inf) + assert ae(fp.lambertw(4+0j, -1), (-0.15730793189620765317 - 4.6787800704666656212j)) + assert ae(fp.lambertw(1000.5, -1), (4.9153765415404024736 - 5.4465682700815159569j)) + assert ae(fp.lambertw(1e100, -1), (224.84272130101601052 - 6.2553713838167244141j)) + assert ae(fp.lambertw(-1000.0, -1), (5.1501630246362515223 - 2.6641981432905204596j)) + assert ae(fp.lambertw(1e-10, -1), (-26.303186778379041521 - 3.2650939117038283975j)) + assert ae(fp.lambertw(1e-10j, -1), (-26.297238779529035028 - 1.6328071613455765135j)) + assert ae(fp.lambertw(3+4j, -1), (0.25856740686699741676 - 3.8521166861614355895j)) + assert ae(fp.lambertw(-3-4j, -1), (-0.32028750204310768396 - 6.8801677192091972343j)) + assert ae(fp.lambertw(10000+1000j, -1), (7.0255308742285435567 - 5.5177506835734067601j)) + assert ae(fp.lambertw(0.0, 2), -fp.inf) + assert ae(fp.lambertw(1.0, 2), (-2.4015851048680028842 + 10.776299516115070898j)) + assert ae(fp.lambertw(7.5, 2), (-0.38003357962843791529 + 10.960916473368746184j)) + assert ae(fp.lambertw(-0.25, 2), (-4.0558735269061511898 + 13.852334658567271386j)) + assert ae(fp.lambertw(-10.0, 2), (-0.34479123764318858696 + 14.112740596763592363j)) + assert ae(fp.lambertw(0+0j, 2), -fp.inf) + assert ae(fp.lambertw(4+0j, 2), (-1.0070343323804262788 + 10.903476551861683082j)) + assert ae(fp.lambertw(1000.5, 2), (4.4076185165459395295 + 11.365524591091402177j)) + assert ae(fp.lambertw(1e100, 2), (224.84156762724875878 + 12.510785262632255672j)) + assert ae(fp.lambertw(-1000.0, 2), (4.1984245610246530756 + 14.420478573754313845j)) + assert ae(fp.lambertw(1e-10, 2), (-26.362258095445866488 + 9.7800247407031482519j)) + assert ae(fp.lambertw(1e-10j, 2), (-26.384250801683084252 + 11.403535950607739763j)) + assert ae(fp.lambertw(3+4j, 2), (-0.86554679943333993562 + 11.849956798331992027j)) + assert ae(fp.lambertw(-3-4j, 2), (-0.55792273874679112639 + 8.7173627024159324811j)) + assert ae(fp.lambertw(10000+1000j, 2), (6.6223802254585662734 + 11.61348646825020766j)) + +def test_fp_stress_ei_e1(): + # Can be tightened on recent Pythons with more accurate math/cmath + ATOL = 1e-13 + PTOL = 1e-12 + v = fp.e1(1.1641532182693481445e-10) + assert ae(v, 22.296641293693077672, tol=ATOL) + assert type(v) is float + v = fp.e1(0.25) + assert ae(v, 1.0442826344437381945, tol=ATOL) + assert type(v) is float + v = fp.e1(1.0) + assert ae(v, 0.21938393439552027368, tol=ATOL) + assert type(v) is float + v = fp.e1(2.0) + assert ae(v, 0.048900510708061119567, tol=ATOL) + assert type(v) is float + v = fp.e1(5.0) + assert ae(v, 0.0011482955912753257973, tol=ATOL) + assert type(v) is float + v = fp.e1(20.0) + assert ae(v, 9.8355252906498816904e-11, tol=ATOL) + assert type(v) is float + v = fp.e1(30.0) + assert ae(v, 3.0215520106888125448e-15, tol=ATOL) + assert type(v) is float + v = fp.e1(40.0) + assert ae(v, 1.0367732614516569722e-19, tol=ATOL) + assert type(v) is float + v = fp.e1(50.0) + assert ae(v, 3.7832640295504590187e-24, tol=ATOL) + assert type(v) is float + v = fp.e1(80.0) + assert ae(v, 2.2285432586884729112e-37, tol=ATOL) + assert type(v) is float + v = fp.e1((1.1641532182693481445e-10 + 0.0j)) + assert ae(v, (22.296641293693077672 + 0.0j), tol=ATOL) + assert ae(v.real, 22.296641293693077672, tol=PTOL) + assert v.imag == 0 + v = fp.e1((0.25 + 0.0j)) + assert ae(v, (1.0442826344437381945 + 0.0j), tol=ATOL) + assert ae(v.real, 1.0442826344437381945, tol=PTOL) + assert v.imag == 0 + v = fp.e1((1.0 + 0.0j)) + assert ae(v, (0.21938393439552027368 + 0.0j), tol=ATOL) + assert ae(v.real, 0.21938393439552027368, tol=PTOL) + assert v.imag == 0 + v = fp.e1((2.0 + 0.0j)) + assert ae(v, (0.048900510708061119567 + 0.0j), tol=ATOL) + assert ae(v.real, 0.048900510708061119567, tol=PTOL) + assert v.imag == 0 + v = fp.e1((5.0 + 0.0j)) + assert ae(v, (0.0011482955912753257973 + 0.0j), tol=ATOL) + assert ae(v.real, 0.0011482955912753257973, tol=PTOL) + assert v.imag == 0 + v = fp.e1((20.0 + 0.0j)) + assert ae(v, (9.8355252906498816904e-11 + 0.0j), tol=ATOL) + assert ae(v.real, 9.8355252906498816904e-11, tol=PTOL) + assert v.imag == 0 + v = fp.e1((30.0 + 0.0j)) + assert ae(v, (3.0215520106888125448e-15 + 0.0j), tol=ATOL) + assert ae(v.real, 3.0215520106888125448e-15, tol=PTOL) + assert v.imag == 0 + v = fp.e1((40.0 + 0.0j)) + assert ae(v, (1.0367732614516569722e-19 + 0.0j), tol=ATOL) + assert ae(v.real, 1.0367732614516569722e-19, tol=PTOL) + assert v.imag == 0 + v = fp.e1((50.0 + 0.0j)) + assert ae(v, (3.7832640295504590187e-24 + 0.0j), tol=ATOL) + assert ae(v.real, 3.7832640295504590187e-24, tol=PTOL) + assert v.imag == 0 + v = fp.e1((80.0 + 0.0j)) + assert ae(v, (2.2285432586884729112e-37 + 0.0j), tol=ATOL) + assert ae(v.real, 2.2285432586884729112e-37, tol=PTOL) + assert v.imag == 0 + v = fp.e1((4.6566128730773925781e-10 + 1.1641532182693481445e-10j)) + assert ae(v, (20.880034622014215597 - 0.24497866301044883237j), tol=ATOL) + assert ae(v.real, 20.880034622014215597, tol=PTOL) + assert ae(v.imag, -0.24497866301044883237, tol=PTOL) + v = fp.e1((1.0 + 0.25j)) + assert ae(v, (0.19731063945004229095 - 0.087366045774299963672j), tol=ATOL) + assert ae(v.real, 0.19731063945004229095, tol=PTOL) + assert ae(v.imag, -0.087366045774299963672, tol=PTOL) + v = fp.e1((4.0 + 1.0j)) + assert ae(v, (0.0013106173980145506944 - 0.0034542480199350626699j), tol=ATOL) + assert ae(v.real, 0.0013106173980145506944, tol=PTOL) + assert ae(v.imag, -0.0034542480199350626699, tol=PTOL) + v = fp.e1((8.0 + 2.0j)) + assert ae(v, (-0.000022278049065270225945 - 0.000029191940456521555288j), tol=ATOL) + assert ae(v.real, -0.000022278049065270225945, tol=PTOL) + assert ae(v.imag, -0.000029191940456521555288, tol=PTOL) + v = fp.e1((20.0 + 5.0j)) + assert ae(v, (4.7711374515765346894e-11 + 8.2902652405126947359e-11j), tol=ATOL) + assert ae(v.real, 4.7711374515765346894e-11, tol=PTOL) + assert ae(v.imag, 8.2902652405126947359e-11, tol=PTOL) + v = fp.e1((80.0 + 20.0j)) + assert ae(v, (3.8353473865788235787e-38 - 2.129247592349605139e-37j), tol=ATOL) + assert ae(v.real, 3.8353473865788235787e-38, tol=PTOL) + assert ae(v.imag, -2.129247592349605139e-37, tol=PTOL) + v = fp.e1((120.0 + 30.0j)) + assert ae(v, (2.3836002337480334716e-55 + 5.6704043587126198306e-55j), tol=ATOL) + assert ae(v.real, 2.3836002337480334716e-55, tol=PTOL) + assert ae(v.imag, 5.6704043587126198306e-55, tol=PTOL) + v = fp.e1((160.0 + 40.0j)) + assert ae(v, (-1.6238022898654510661e-72 - 1.104172355572287367e-72j), tol=ATOL) + assert ae(v.real, -1.6238022898654510661e-72, tol=PTOL) + assert ae(v.imag, -1.104172355572287367e-72, tol=PTOL) + v = fp.e1((200.0 + 50.0j)) + assert ae(v, (6.6800061461666228487e-90 + 1.4473816083541016115e-91j), tol=ATOL) + assert ae(v.real, 6.6800061461666228487e-90, tol=PTOL) + assert ae(v.imag, 1.4473816083541016115e-91, tol=PTOL) + v = fp.e1((320.0 + 80.0j)) + assert ae(v, (4.2737871527778786157e-143 + 3.1789935525785660314e-142j), tol=ATOL) + assert ae(v.real, 4.2737871527778786157e-143, tol=PTOL) + assert ae(v.imag, 3.1789935525785660314e-142, tol=PTOL) + v = fp.e1((1.1641532182693481445e-10 + 1.1641532182693481445e-10j)) + assert ae(v, (21.950067703413105017 - 0.7853981632810329878j), tol=ATOL) + assert ae(v.real, 21.950067703413105017, tol=PTOL) + assert ae(v.imag, -0.7853981632810329878, tol=PTOL) + v = fp.e1((0.25 + 0.25j)) + assert ae(v, (0.71092525792923287894 - 0.56491812441304194711j), tol=ATOL) + assert ae(v.real, 0.71092525792923287894, tol=PTOL) + assert ae(v.imag, -0.56491812441304194711, tol=PTOL) + v = fp.e1((1.0 + 1.0j)) + assert ae(v, (0.00028162445198141832551 - 0.17932453503935894015j), tol=ATOL) + assert ae(v.real, 0.00028162445198141832551, tol=PTOL) + assert ae(v.imag, -0.17932453503935894015, tol=PTOL) + v = fp.e1((2.0 + 2.0j)) + assert ae(v, (-0.033767089606562004246 - 0.018599414169750541925j), tol=ATOL) + assert ae(v.real, -0.033767089606562004246, tol=PTOL) + assert ae(v.imag, -0.018599414169750541925, tol=PTOL) + v = fp.e1((5.0 + 5.0j)) + assert ae(v, (0.0007266506660356393891 + 0.00047102780163522245054j), tol=ATOL) + assert ae(v.real, 0.0007266506660356393891, tol=PTOL) + assert ae(v.imag, 0.00047102780163522245054, tol=PTOL) + v = fp.e1((20.0 + 20.0j)) + assert ae(v, (-2.3824537449367396579e-11 - 6.6969873156525615158e-11j), tol=ATOL) + assert ae(v.real, -2.3824537449367396579e-11, tol=PTOL) + assert ae(v.imag, -6.6969873156525615158e-11, tol=PTOL) + v = fp.e1((30.0 + 30.0j)) + assert ae(v, (1.7316045841744061617e-15 + 1.3065678019487308689e-15j), tol=ATOL) + assert ae(v.real, 1.7316045841744061617e-15, tol=PTOL) + assert ae(v.imag, 1.3065678019487308689e-15, tol=PTOL) + v = fp.e1((40.0 + 40.0j)) + assert ae(v, (-7.4001043002899232182e-20 - 4.991847855336816304e-21j), tol=ATOL) + assert ae(v.real, -7.4001043002899232182e-20, tol=PTOL) + assert ae(v.imag, -4.991847855336816304e-21, tol=PTOL) + v = fp.e1((50.0 + 50.0j)) + assert ae(v, (2.3566128324644641219e-24 - 1.3188326726201614778e-24j), tol=ATOL) + assert ae(v.real, 2.3566128324644641219e-24, tol=PTOL) + assert ae(v.imag, -1.3188326726201614778e-24, tol=PTOL) + v = fp.e1((80.0 + 80.0j)) + assert ae(v, (9.8279750572186526673e-38 + 1.243952841288868831e-37j), tol=ATOL) + assert ae(v.real, 9.8279750572186526673e-38, tol=PTOL) + assert ae(v.imag, 1.243952841288868831e-37, tol=PTOL) + v = fp.e1((1.1641532182693481445e-10 + 4.6566128730773925781e-10j)) + assert ae(v, (20.880034621664969632 - 1.3258176632023711778j), tol=ATOL) + assert ae(v.real, 20.880034621664969632, tol=PTOL) + assert ae(v.imag, -1.3258176632023711778, tol=PTOL) + v = fp.e1((0.25 + 1.0j)) + assert ae(v, (-0.16868306393667788761 - 0.4858011885947426971j), tol=ATOL) + assert ae(v.real, -0.16868306393667788761, tol=PTOL) + assert ae(v.imag, -0.4858011885947426971, tol=PTOL) + v = fp.e1((1.0 + 4.0j)) + assert ae(v, (0.03373591813926547318 + 0.073523452241083821877j), tol=ATOL) + assert ae(v.real, 0.03373591813926547318, tol=PTOL) + assert ae(v.imag, 0.073523452241083821877, tol=PTOL) + v = fp.e1((2.0 + 8.0j)) + assert ae(v, (-0.015392833434733785143 - 0.0031747121557605415914j), tol=ATOL) + assert ae(v.real, -0.015392833434733785143, tol=PTOL) + assert ae(v.imag, -0.0031747121557605415914, tol=PTOL) + v = fp.e1((5.0 + 20.0j)) + assert ae(v, (-0.00024419662286542966525 - 0.00021008322966152755674j), tol=ATOL) + assert ae(v.real, -0.00024419662286542966525, tol=PTOL) + assert ae(v.imag, -0.00021008322966152755674, tol=PTOL) + v = fp.e1((20.0 + 80.0j)) + assert ae(v, (2.3255552781051330088e-11 + 8.9463918891349438007e-12j), tol=ATOL) + assert ae(v.real, 2.3255552781051330088e-11, tol=PTOL) + assert ae(v.imag, 8.9463918891349438007e-12, tol=PTOL) + v = fp.e1((30.0 + 120.0j)) + assert ae(v, (-2.7068919097124652332e-16 - 7.0477762411705130239e-16j), tol=ATOL) + assert ae(v.real, -2.7068919097124652332e-16, tol=PTOL) + assert ae(v.imag, -7.0477762411705130239e-16, tol=PTOL) + v = fp.e1((40.0 + 160.0j)) + assert ae(v, (-1.1695597827678024687e-20 + 2.2907401455645736661e-20j), tol=ATOL) + assert ae(v.real, -1.1695597827678024687e-20, tol=PTOL) + assert ae(v.imag, 2.2907401455645736661e-20, tol=PTOL) + v = fp.e1((50.0 + 200.0j)) + assert ae(v, (9.0323746914410162531e-25 - 2.3950601790033530935e-25j), tol=ATOL) + assert ae(v.real, 9.0323746914410162531e-25, tol=PTOL) + assert ae(v.imag, -2.3950601790033530935e-25, tol=PTOL) + v = fp.e1((80.0 + 320.0j)) + assert ae(v, (3.4819106748728063576e-38 - 4.215653005615772724e-38j), tol=ATOL) + assert ae(v.real, 3.4819106748728063576e-38, tol=PTOL) + assert ae(v.imag, -4.215653005615772724e-38, tol=PTOL) + v = fp.e1((0.0 + 1.1641532182693481445e-10j)) + assert ae(v, (22.29664129357666235 - 1.5707963266784812974j), tol=ATOL) + assert ae(v.real, 22.29664129357666235, tol=PTOL) + assert ae(v.imag, -1.5707963266784812974, tol=PTOL) + v = fp.e1((0.0 + 0.25j)) + assert ae(v, (0.82466306258094565309 - 1.3216627564751394551j), tol=ATOL) + assert ae(v.real, 0.82466306258094565309, tol=PTOL) + assert ae(v.imag, -1.3216627564751394551, tol=PTOL) + v = fp.e1((0.0 + 1.0j)) + assert ae(v, (-0.33740392290096813466 - 0.62471325642771360429j), tol=ATOL) + assert ae(v.real, -0.33740392290096813466, tol=PTOL) + assert ae(v.imag, -0.62471325642771360429, tol=PTOL) + v = fp.e1((0.0 + 2.0j)) + assert ae(v, (-0.4229808287748649957 + 0.034616650007798229345j), tol=ATOL) + assert ae(v.real, -0.4229808287748649957, tol=PTOL) + assert ae(v.imag, 0.034616650007798229345, tol=PTOL) + v = fp.e1((0.0 + 5.0j)) + assert ae(v, (0.19002974965664387862 - 0.020865081850222481957j), tol=ATOL) + assert ae(v.real, 0.19002974965664387862, tol=PTOL) + assert ae(v.imag, -0.020865081850222481957, tol=PTOL) + v = fp.e1((0.0 + 20.0j)) + assert ae(v, (-0.04441982084535331654 - 0.022554625751456779068j), tol=ATOL) + assert ae(v.real, -0.04441982084535331654, tol=PTOL) + assert ae(v.imag, -0.022554625751456779068, tol=PTOL) + v = fp.e1((0.0 + 30.0j)) + assert ae(v, (0.033032417282071143779 - 0.0040397867645455082476j), tol=ATOL) + assert ae(v.real, 0.033032417282071143779, tol=PTOL) + assert ae(v.imag, -0.0040397867645455082476, tol=PTOL) + v = fp.e1((0.0 + 40.0j)) + assert ae(v, (-0.019020007896208766962 + 0.016188792559887887544j), tol=ATOL) + assert ae(v.real, -0.019020007896208766962, tol=PTOL) + assert ae(v.imag, 0.016188792559887887544, tol=PTOL) + v = fp.e1((0.0 + 50.0j)) + assert ae(v, (0.0056283863241163054402 - 0.019179254308960724503j), tol=ATOL) + assert ae(v.real, 0.0056283863241163054402, tol=PTOL) + assert ae(v.imag, -0.019179254308960724503, tol=PTOL) + v = fp.e1((0.0 + 80.0j)) + assert ae(v, (0.012402501155070958192 + 0.0015345601175906961199j), tol=ATOL) + assert ae(v.real, 0.012402501155070958192, tol=PTOL) + assert ae(v.imag, 0.0015345601175906961199, tol=PTOL) + v = fp.e1((-1.1641532182693481445e-10 + 4.6566128730773925781e-10j)) + assert ae(v, (20.880034621432138988 - 1.8157749894560994861j), tol=ATOL) + assert ae(v.real, 20.880034621432138988, tol=PTOL) + assert ae(v.imag, -1.8157749894560994861, tol=PTOL) + v = fp.e1((-0.25 + 1.0j)) + assert ae(v, (-0.59066621214766308594 - 0.74474454765205036972j), tol=ATOL) + assert ae(v.real, -0.59066621214766308594, tol=PTOL) + assert ae(v.imag, -0.74474454765205036972, tol=PTOL) + v = fp.e1((-1.0 + 4.0j)) + assert ae(v, (0.49739047283060471093 + 0.41543605404038863174j), tol=ATOL) + assert ae(v.real, 0.49739047283060471093, tol=PTOL) + assert ae(v.imag, 0.41543605404038863174, tol=PTOL) + v = fp.e1((-2.0 + 8.0j)) + assert ae(v, (-0.8705211147733730969 + 0.24099328498605539667j), tol=ATOL) + assert ae(v.real, -0.8705211147733730969, tol=PTOL) + assert ae(v.imag, 0.24099328498605539667, tol=PTOL) + v = fp.e1((-5.0 + 20.0j)) + assert ae(v, (-7.0789514293925893007 - 1.6102177171960790536j), tol=ATOL) + assert ae(v.real, -7.0789514293925893007, tol=PTOL) + assert ae(v.imag, -1.6102177171960790536, tol=PTOL) + v = fp.e1((-20.0 + 80.0j)) + assert ae(v, (5855431.4907298084434 - 720920.93315409165707j), tol=ATOL) + assert ae(v.real, 5855431.4907298084434, tol=PTOL) + assert ae(v.imag, -720920.93315409165707, tol=PTOL) + v = fp.e1((-30.0 + 120.0j)) + assert ae(v, (-65402491644.703470747 - 56697658399.657460294j), tol=ATOL) + assert ae(v.real, -65402491644.703470747, tol=PTOL) + assert ae(v.imag, -56697658399.657460294, tol=PTOL) + v = fp.e1((-40.0 + 160.0j)) + assert ae(v, (25504929379604.776769 + 1429035198630573.2463j), tol=ATOL) + assert ae(v.real, 25504929379604.776769, tol=PTOL) + assert ae(v.imag, 1429035198630573.2463, tol=PTOL) + v = fp.e1((-50.0 + 200.0j)) + assert ae(v, (18437746526988116954.0 - 17146362239046152345.0j), tol=ATOL) + assert ae(v.real, 18437746526988116954.0, tol=PTOL) + assert ae(v.imag, -17146362239046152345.0, tol=PTOL) + v = fp.e1((-80.0 + 320.0j)) + assert ae(v, (3.3464697299634526706e+31 - 1.6473152633843023919e+32j), tol=ATOL) + assert ae(v.real, 3.3464697299634526706e+31, tol=PTOL) + assert ae(v.imag, -1.6473152633843023919e+32, tol=PTOL) + v = fp.e1((-4.6566128730773925781e-10 + 1.1641532182693481445e-10j)) + assert ae(v, (20.880034621082893023 - 2.8966139903465137624j), tol=ATOL) + assert ae(v.real, 20.880034621082893023, tol=PTOL) + assert ae(v.imag, -2.8966139903465137624, tol=PTOL) + v = fp.e1((-1.0 + 0.25j)) + assert ae(v, (-1.8942716983721074932 - 2.4689102827070540799j), tol=ATOL) + assert ae(v.real, -1.8942716983721074932, tol=PTOL) + assert ae(v.imag, -2.4689102827070540799, tol=PTOL) + v = fp.e1((-4.0 + 1.0j)) + assert ae(v, (-14.806699492675420438 + 9.1384225230837893776j), tol=ATOL) + assert ae(v.real, -14.806699492675420438, tol=PTOL) + assert ae(v.imag, 9.1384225230837893776, tol=PTOL) + v = fp.e1((-8.0 + 2.0j)) + assert ae(v, (54.633252667426386294 + 413.20318163814670688j), tol=ATOL) + assert ae(v.real, 54.633252667426386294, tol=PTOL) + assert ae(v.imag, 413.20318163814670688, tol=PTOL) + v = fp.e1((-20.0 + 5.0j)) + assert ae(v, (-711836.97165402624643 - 24745250.939695900956j), tol=ATOL) + assert ae(v.real, -711836.97165402624643, tol=PTOL) + assert ae(v.imag, -24745250.939695900956, tol=PTOL) + v = fp.e1((-80.0 + 20.0j)) + assert ae(v, (-4.2139911108612653091e+32 + 5.3367124741918251637e+32j), tol=ATOL) + assert ae(v.real, -4.2139911108612653091e+32, tol=PTOL) + assert ae(v.imag, 5.3367124741918251637e+32, tol=PTOL) + v = fp.e1((-120.0 + 30.0j)) + assert ae(v, (9.7760616203707508892e+48 - 1.058257682317195792e+50j), tol=ATOL) + assert ae(v.real, 9.7760616203707508892e+48, tol=PTOL) + assert ae(v.imag, -1.058257682317195792e+50, tol=PTOL) + v = fp.e1((-160.0 + 40.0j)) + assert ae(v, (8.7065541466623638861e+66 + 1.6577106725141739889e+67j), tol=ATOL) + assert ae(v.real, 8.7065541466623638861e+66, tol=PTOL) + assert ae(v.imag, 1.6577106725141739889e+67, tol=PTOL) + v = fp.e1((-200.0 + 50.0j)) + assert ae(v, (-3.070744996327018106e+84 - 1.7243244846769415903e+84j), tol=ATOL) + assert ae(v.real, -3.070744996327018106e+84, tol=PTOL) + assert ae(v.imag, -1.7243244846769415903e+84, tol=PTOL) + v = fp.e1((-320.0 + 80.0j)) + assert ae(v, (9.9960598637998647276e+135 - 2.6855081527595608863e+136j), tol=ATOL) + assert ae(v.real, 9.9960598637998647276e+135, tol=PTOL) + assert ae(v.imag, -2.6855081527595608863e+136, tol=PTOL) + v = fp.e1(-1.1641532182693481445e-10) + assert ae(v, (22.296641293460247028 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, 22.296641293460247028, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1(-0.25) + assert ae(v, (0.54254326466191372953 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, 0.54254326466191372953, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1(-1.0) + assert ae(v, (-1.8951178163559367555 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -1.8951178163559367555, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1(-2.0) + assert ae(v, (-4.9542343560018901634 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -4.9542343560018901634, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1(-5.0) + assert ae(v, (-40.185275355803177455 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -40.185275355803177455, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1(-20.0) + assert ae(v, (-25615652.66405658882 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -25615652.66405658882, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1(-30.0) + assert ae(v, (-368973209407.27419706 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -368973209407.27419706, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1(-40.0) + assert ae(v, (-6039718263611241.5784 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -6039718263611241.5784, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1(-50.0) + assert ae(v, (-1.0585636897131690963e+20 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -1.0585636897131690963e+20, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1(-80.0) + assert ae(v, (-7.0146000049047999696e+32 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -7.0146000049047999696e+32, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-1.1641532182693481445e-10 + 0.0j)) + assert ae(v, (22.296641293460247028 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, 22.296641293460247028, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-0.25 + 0.0j)) + assert ae(v, (0.54254326466191372953 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, 0.54254326466191372953, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-1.0 + 0.0j)) + assert ae(v, (-1.8951178163559367555 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -1.8951178163559367555, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-2.0 + 0.0j)) + assert ae(v, (-4.9542343560018901634 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -4.9542343560018901634, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-5.0 + 0.0j)) + assert ae(v, (-40.185275355803177455 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -40.185275355803177455, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-20.0 + 0.0j)) + assert ae(v, (-25615652.66405658882 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -25615652.66405658882, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-30.0 + 0.0j)) + assert ae(v, (-368973209407.27419706 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -368973209407.27419706, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-40.0 + 0.0j)) + assert ae(v, (-6039718263611241.5784 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -6039718263611241.5784, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-50.0 + 0.0j)) + assert ae(v, (-1.0585636897131690963e+20 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -1.0585636897131690963e+20, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-80.0 + 0.0j)) + assert ae(v, (-7.0146000049047999696e+32 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -7.0146000049047999696e+32, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.e1((-4.6566128730773925781e-10 - 1.1641532182693481445e-10j)) + assert ae(v, (20.880034621082893023 + 2.8966139903465137624j), tol=ATOL) + assert ae(v.real, 20.880034621082893023, tol=PTOL) + assert ae(v.imag, 2.8966139903465137624, tol=PTOL) + v = fp.e1((-1.0 - 0.25j)) + assert ae(v, (-1.8942716983721074932 + 2.4689102827070540799j), tol=ATOL) + assert ae(v.real, -1.8942716983721074932, tol=PTOL) + assert ae(v.imag, 2.4689102827070540799, tol=PTOL) + v = fp.e1((-4.0 - 1.0j)) + assert ae(v, (-14.806699492675420438 - 9.1384225230837893776j), tol=ATOL) + assert ae(v.real, -14.806699492675420438, tol=PTOL) + assert ae(v.imag, -9.1384225230837893776, tol=PTOL) + v = fp.e1((-8.0 - 2.0j)) + assert ae(v, (54.633252667426386294 - 413.20318163814670688j), tol=ATOL) + assert ae(v.real, 54.633252667426386294, tol=PTOL) + assert ae(v.imag, -413.20318163814670688, tol=PTOL) + v = fp.e1((-20.0 - 5.0j)) + assert ae(v, (-711836.97165402624643 + 24745250.939695900956j), tol=ATOL) + assert ae(v.real, -711836.97165402624643, tol=PTOL) + assert ae(v.imag, 24745250.939695900956, tol=PTOL) + v = fp.e1((-80.0 - 20.0j)) + assert ae(v, (-4.2139911108612653091e+32 - 5.3367124741918251637e+32j), tol=ATOL) + assert ae(v.real, -4.2139911108612653091e+32, tol=PTOL) + assert ae(v.imag, -5.3367124741918251637e+32, tol=PTOL) + v = fp.e1((-120.0 - 30.0j)) + assert ae(v, (9.7760616203707508892e+48 + 1.058257682317195792e+50j), tol=ATOL) + assert ae(v.real, 9.7760616203707508892e+48, tol=PTOL) + assert ae(v.imag, 1.058257682317195792e+50, tol=PTOL) + v = fp.e1((-160.0 - 40.0j)) + assert ae(v, (8.7065541466623638861e+66 - 1.6577106725141739889e+67j), tol=ATOL) + assert ae(v.real, 8.7065541466623638861e+66, tol=PTOL) + assert ae(v.imag, -1.6577106725141739889e+67, tol=PTOL) + v = fp.e1((-200.0 - 50.0j)) + assert ae(v, (-3.070744996327018106e+84 + 1.7243244846769415903e+84j), tol=ATOL) + assert ae(v.real, -3.070744996327018106e+84, tol=PTOL) + assert ae(v.imag, 1.7243244846769415903e+84, tol=PTOL) + v = fp.e1((-320.0 - 80.0j)) + assert ae(v, (9.9960598637998647276e+135 + 2.6855081527595608863e+136j), tol=ATOL) + assert ae(v.real, 9.9960598637998647276e+135, tol=PTOL) + assert ae(v.imag, 2.6855081527595608863e+136, tol=PTOL) + v = fp.e1((-1.1641532182693481445e-10 - 1.1641532182693481445e-10j)) + assert ae(v, (21.950067703180274374 + 2.356194490075929607j), tol=ATOL) + assert ae(v.real, 21.950067703180274374, tol=PTOL) + assert ae(v.imag, 2.356194490075929607, tol=PTOL) + v = fp.e1((-0.25 - 0.25j)) + assert ae(v, (0.21441047326710323254 + 2.0732153554307936389j), tol=ATOL) + assert ae(v.real, 0.21441047326710323254, tol=PTOL) + assert ae(v.imag, 2.0732153554307936389, tol=PTOL) + v = fp.e1((-1.0 - 1.0j)) + assert ae(v, (-1.7646259855638540684 + 0.7538228020792708192j), tol=ATOL) + assert ae(v.real, -1.7646259855638540684, tol=PTOL) + assert ae(v.imag, 0.7538228020792708192, tol=PTOL) + v = fp.e1((-2.0 - 2.0j)) + assert ae(v, (-1.8920781621855474089 - 2.1753697842428647236j), tol=ATOL) + assert ae(v.real, -1.8920781621855474089, tol=PTOL) + assert ae(v.imag, -2.1753697842428647236, tol=PTOL) + v = fp.e1((-5.0 - 5.0j)) + assert ae(v, (13.470936071475245856 + 18.464085049321024206j), tol=ATOL) + assert ae(v.real, 13.470936071475245856, tol=PTOL) + assert ae(v.imag, 18.464085049321024206, tol=PTOL) + v = fp.e1((-20.0 - 20.0j)) + assert ae(v, (-16589317.398788971896 - 5831702.3296441771206j), tol=ATOL) + assert ae(v.real, -16589317.398788971896, tol=PTOL) + assert ae(v.imag, -5831702.3296441771206, tol=PTOL) + v = fp.e1((-30.0 - 30.0j)) + assert ae(v, (154596484273.69322527 + 204179357837.41389696j), tol=ATOL) + assert ae(v.real, 154596484273.69322527, tol=PTOL) + assert ae(v.imag, 204179357837.41389696, tol=PTOL) + v = fp.e1((-40.0 - 40.0j)) + assert ae(v, (-287512180321448.45408 - 4203502407932314.974j), tol=ATOL) + assert ae(v.real, -287512180321448.45408, tol=PTOL) + assert ae(v.imag, -4203502407932314.974, tol=PTOL) + v = fp.e1((-50.0 - 50.0j)) + assert ae(v, (-36128528616649268826.0 + 64648801861338741963.0j), tol=ATOL) + assert ae(v.real, -36128528616649268826.0, tol=PTOL) + assert ae(v.imag, 64648801861338741963.0, tol=PTOL) + v = fp.e1((-80.0 - 80.0j)) + assert ae(v, (3.8674816337930010217e+32 + 3.0540709639658071041e+32j), tol=ATOL) + assert ae(v.real, 3.8674816337930010217e+32, tol=PTOL) + assert ae(v.imag, 3.0540709639658071041e+32, tol=PTOL) + v = fp.e1((-1.1641532182693481445e-10 - 4.6566128730773925781e-10j)) + assert ae(v, (20.880034621432138988 + 1.8157749894560994861j), tol=ATOL) + assert ae(v.real, 20.880034621432138988, tol=PTOL) + assert ae(v.imag, 1.8157749894560994861, tol=PTOL) + v = fp.e1((-0.25 - 1.0j)) + assert ae(v, (-0.59066621214766308594 + 0.74474454765205036972j), tol=ATOL) + assert ae(v.real, -0.59066621214766308594, tol=PTOL) + assert ae(v.imag, 0.74474454765205036972, tol=PTOL) + v = fp.e1((-1.0 - 4.0j)) + assert ae(v, (0.49739047283060471093 - 0.41543605404038863174j), tol=ATOL) + assert ae(v.real, 0.49739047283060471093, tol=PTOL) + assert ae(v.imag, -0.41543605404038863174, tol=PTOL) + v = fp.e1((-2.0 - 8.0j)) + assert ae(v, (-0.8705211147733730969 - 0.24099328498605539667j), tol=ATOL) + assert ae(v.real, -0.8705211147733730969, tol=PTOL) + assert ae(v.imag, -0.24099328498605539667, tol=PTOL) + v = fp.e1((-5.0 - 20.0j)) + assert ae(v, (-7.0789514293925893007 + 1.6102177171960790536j), tol=ATOL) + assert ae(v.real, -7.0789514293925893007, tol=PTOL) + assert ae(v.imag, 1.6102177171960790536, tol=PTOL) + v = fp.e1((-20.0 - 80.0j)) + assert ae(v, (5855431.4907298084434 + 720920.93315409165707j), tol=ATOL) + assert ae(v.real, 5855431.4907298084434, tol=PTOL) + assert ae(v.imag, 720920.93315409165707, tol=PTOL) + v = fp.e1((-30.0 - 120.0j)) + assert ae(v, (-65402491644.703470747 + 56697658399.657460294j), tol=ATOL) + assert ae(v.real, -65402491644.703470747, tol=PTOL) + assert ae(v.imag, 56697658399.657460294, tol=PTOL) + v = fp.e1((-40.0 - 160.0j)) + assert ae(v, (25504929379604.776769 - 1429035198630573.2463j), tol=ATOL) + assert ae(v.real, 25504929379604.776769, tol=PTOL) + assert ae(v.imag, -1429035198630573.2463, tol=PTOL) + v = fp.e1((-50.0 - 200.0j)) + assert ae(v, (18437746526988116954.0 + 17146362239046152345.0j), tol=ATOL) + assert ae(v.real, 18437746526988116954.0, tol=PTOL) + assert ae(v.imag, 17146362239046152345.0, tol=PTOL) + v = fp.e1((-80.0 - 320.0j)) + assert ae(v, (3.3464697299634526706e+31 + 1.6473152633843023919e+32j), tol=ATOL) + assert ae(v.real, 3.3464697299634526706e+31, tol=PTOL) + assert ae(v.imag, 1.6473152633843023919e+32, tol=PTOL) + v = fp.e1((0.0 - 1.1641532182693481445e-10j)) + assert ae(v, (22.29664129357666235 + 1.5707963266784812974j), tol=ATOL) + assert ae(v.real, 22.29664129357666235, tol=PTOL) + assert ae(v.imag, 1.5707963266784812974, tol=PTOL) + v = fp.e1((0.0 - 0.25j)) + assert ae(v, (0.82466306258094565309 + 1.3216627564751394551j), tol=ATOL) + assert ae(v.real, 0.82466306258094565309, tol=PTOL) + assert ae(v.imag, 1.3216627564751394551, tol=PTOL) + v = fp.e1((0.0 - 1.0j)) + assert ae(v, (-0.33740392290096813466 + 0.62471325642771360429j), tol=ATOL) + assert ae(v.real, -0.33740392290096813466, tol=PTOL) + assert ae(v.imag, 0.62471325642771360429, tol=PTOL) + v = fp.e1((0.0 - 2.0j)) + assert ae(v, (-0.4229808287748649957 - 0.034616650007798229345j), tol=ATOL) + assert ae(v.real, -0.4229808287748649957, tol=PTOL) + assert ae(v.imag, -0.034616650007798229345, tol=PTOL) + v = fp.e1((0.0 - 5.0j)) + assert ae(v, (0.19002974965664387862 + 0.020865081850222481957j), tol=ATOL) + assert ae(v.real, 0.19002974965664387862, tol=PTOL) + assert ae(v.imag, 0.020865081850222481957, tol=PTOL) + v = fp.e1((0.0 - 20.0j)) + assert ae(v, (-0.04441982084535331654 + 0.022554625751456779068j), tol=ATOL) + assert ae(v.real, -0.04441982084535331654, tol=PTOL) + assert ae(v.imag, 0.022554625751456779068, tol=PTOL) + v = fp.e1((0.0 - 30.0j)) + assert ae(v, (0.033032417282071143779 + 0.0040397867645455082476j), tol=ATOL) + assert ae(v.real, 0.033032417282071143779, tol=PTOL) + assert ae(v.imag, 0.0040397867645455082476, tol=PTOL) + v = fp.e1((0.0 - 40.0j)) + assert ae(v, (-0.019020007896208766962 - 0.016188792559887887544j), tol=ATOL) + assert ae(v.real, -0.019020007896208766962, tol=PTOL) + assert ae(v.imag, -0.016188792559887887544, tol=PTOL) + v = fp.e1((0.0 - 50.0j)) + assert ae(v, (0.0056283863241163054402 + 0.019179254308960724503j), tol=ATOL) + assert ae(v.real, 0.0056283863241163054402, tol=PTOL) + assert ae(v.imag, 0.019179254308960724503, tol=PTOL) + v = fp.e1((0.0 - 80.0j)) + assert ae(v, (0.012402501155070958192 - 0.0015345601175906961199j), tol=ATOL) + assert ae(v.real, 0.012402501155070958192, tol=PTOL) + assert ae(v.imag, -0.0015345601175906961199, tol=PTOL) + v = fp.e1((1.1641532182693481445e-10 - 4.6566128730773925781e-10j)) + assert ae(v, (20.880034621664969632 + 1.3258176632023711778j), tol=ATOL) + assert ae(v.real, 20.880034621664969632, tol=PTOL) + assert ae(v.imag, 1.3258176632023711778, tol=PTOL) + v = fp.e1((0.25 - 1.0j)) + assert ae(v, (-0.16868306393667788761 + 0.4858011885947426971j), tol=ATOL) + assert ae(v.real, -0.16868306393667788761, tol=PTOL) + assert ae(v.imag, 0.4858011885947426971, tol=PTOL) + v = fp.e1((1.0 - 4.0j)) + assert ae(v, (0.03373591813926547318 - 0.073523452241083821877j), tol=ATOL) + assert ae(v.real, 0.03373591813926547318, tol=PTOL) + assert ae(v.imag, -0.073523452241083821877, tol=PTOL) + v = fp.e1((2.0 - 8.0j)) + assert ae(v, (-0.015392833434733785143 + 0.0031747121557605415914j), tol=ATOL) + assert ae(v.real, -0.015392833434733785143, tol=PTOL) + assert ae(v.imag, 0.0031747121557605415914, tol=PTOL) + v = fp.e1((5.0 - 20.0j)) + assert ae(v, (-0.00024419662286542966525 + 0.00021008322966152755674j), tol=ATOL) + assert ae(v.real, -0.00024419662286542966525, tol=PTOL) + assert ae(v.imag, 0.00021008322966152755674, tol=PTOL) + v = fp.e1((20.0 - 80.0j)) + assert ae(v, (2.3255552781051330088e-11 - 8.9463918891349438007e-12j), tol=ATOL) + assert ae(v.real, 2.3255552781051330088e-11, tol=PTOL) + assert ae(v.imag, -8.9463918891349438007e-12, tol=PTOL) + v = fp.e1((30.0 - 120.0j)) + assert ae(v, (-2.7068919097124652332e-16 + 7.0477762411705130239e-16j), tol=ATOL) + assert ae(v.real, -2.7068919097124652332e-16, tol=PTOL) + assert ae(v.imag, 7.0477762411705130239e-16, tol=PTOL) + v = fp.e1((40.0 - 160.0j)) + assert ae(v, (-1.1695597827678024687e-20 - 2.2907401455645736661e-20j), tol=ATOL) + assert ae(v.real, -1.1695597827678024687e-20, tol=PTOL) + assert ae(v.imag, -2.2907401455645736661e-20, tol=PTOL) + v = fp.e1((50.0 - 200.0j)) + assert ae(v, (9.0323746914410162531e-25 + 2.3950601790033530935e-25j), tol=ATOL) + assert ae(v.real, 9.0323746914410162531e-25, tol=PTOL) + assert ae(v.imag, 2.3950601790033530935e-25, tol=PTOL) + v = fp.e1((80.0 - 320.0j)) + assert ae(v, (3.4819106748728063576e-38 + 4.215653005615772724e-38j), tol=ATOL) + assert ae(v.real, 3.4819106748728063576e-38, tol=PTOL) + assert ae(v.imag, 4.215653005615772724e-38, tol=PTOL) + v = fp.e1((1.1641532182693481445e-10 - 1.1641532182693481445e-10j)) + assert ae(v, (21.950067703413105017 + 0.7853981632810329878j), tol=ATOL) + assert ae(v.real, 21.950067703413105017, tol=PTOL) + assert ae(v.imag, 0.7853981632810329878, tol=PTOL) + v = fp.e1((0.25 - 0.25j)) + assert ae(v, (0.71092525792923287894 + 0.56491812441304194711j), tol=ATOL) + assert ae(v.real, 0.71092525792923287894, tol=PTOL) + assert ae(v.imag, 0.56491812441304194711, tol=PTOL) + v = fp.e1((1.0 - 1.0j)) + assert ae(v, (0.00028162445198141832551 + 0.17932453503935894015j), tol=ATOL) + assert ae(v.real, 0.00028162445198141832551, tol=PTOL) + assert ae(v.imag, 0.17932453503935894015, tol=PTOL) + v = fp.e1((2.0 - 2.0j)) + assert ae(v, (-0.033767089606562004246 + 0.018599414169750541925j), tol=ATOL) + assert ae(v.real, -0.033767089606562004246, tol=PTOL) + assert ae(v.imag, 0.018599414169750541925, tol=PTOL) + v = fp.e1((5.0 - 5.0j)) + assert ae(v, (0.0007266506660356393891 - 0.00047102780163522245054j), tol=ATOL) + assert ae(v.real, 0.0007266506660356393891, tol=PTOL) + assert ae(v.imag, -0.00047102780163522245054, tol=PTOL) + v = fp.e1((20.0 - 20.0j)) + assert ae(v, (-2.3824537449367396579e-11 + 6.6969873156525615158e-11j), tol=ATOL) + assert ae(v.real, -2.3824537449367396579e-11, tol=PTOL) + assert ae(v.imag, 6.6969873156525615158e-11, tol=PTOL) + v = fp.e1((30.0 - 30.0j)) + assert ae(v, (1.7316045841744061617e-15 - 1.3065678019487308689e-15j), tol=ATOL) + assert ae(v.real, 1.7316045841744061617e-15, tol=PTOL) + assert ae(v.imag, -1.3065678019487308689e-15, tol=PTOL) + v = fp.e1((40.0 - 40.0j)) + assert ae(v, (-7.4001043002899232182e-20 + 4.991847855336816304e-21j), tol=ATOL) + assert ae(v.real, -7.4001043002899232182e-20, tol=PTOL) + assert ae(v.imag, 4.991847855336816304e-21, tol=PTOL) + v = fp.e1((50.0 - 50.0j)) + assert ae(v, (2.3566128324644641219e-24 + 1.3188326726201614778e-24j), tol=ATOL) + assert ae(v.real, 2.3566128324644641219e-24, tol=PTOL) + assert ae(v.imag, 1.3188326726201614778e-24, tol=PTOL) + v = fp.e1((80.0 - 80.0j)) + assert ae(v, (9.8279750572186526673e-38 - 1.243952841288868831e-37j), tol=ATOL) + assert ae(v.real, 9.8279750572186526673e-38, tol=PTOL) + assert ae(v.imag, -1.243952841288868831e-37, tol=PTOL) + v = fp.e1((4.6566128730773925781e-10 - 1.1641532182693481445e-10j)) + assert ae(v, (20.880034622014215597 + 0.24497866301044883237j), tol=ATOL) + assert ae(v.real, 20.880034622014215597, tol=PTOL) + assert ae(v.imag, 0.24497866301044883237, tol=PTOL) + v = fp.e1((1.0 - 0.25j)) + assert ae(v, (0.19731063945004229095 + 0.087366045774299963672j), tol=ATOL) + assert ae(v.real, 0.19731063945004229095, tol=PTOL) + assert ae(v.imag, 0.087366045774299963672, tol=PTOL) + v = fp.e1((4.0 - 1.0j)) + assert ae(v, (0.0013106173980145506944 + 0.0034542480199350626699j), tol=ATOL) + assert ae(v.real, 0.0013106173980145506944, tol=PTOL) + assert ae(v.imag, 0.0034542480199350626699, tol=PTOL) + v = fp.e1((8.0 - 2.0j)) + assert ae(v, (-0.000022278049065270225945 + 0.000029191940456521555288j), tol=ATOL) + assert ae(v.real, -0.000022278049065270225945, tol=PTOL) + assert ae(v.imag, 0.000029191940456521555288, tol=PTOL) + v = fp.e1((20.0 - 5.0j)) + assert ae(v, (4.7711374515765346894e-11 - 8.2902652405126947359e-11j), tol=ATOL) + assert ae(v.real, 4.7711374515765346894e-11, tol=PTOL) + assert ae(v.imag, -8.2902652405126947359e-11, tol=PTOL) + v = fp.e1((80.0 - 20.0j)) + assert ae(v, (3.8353473865788235787e-38 + 2.129247592349605139e-37j), tol=ATOL) + assert ae(v.real, 3.8353473865788235787e-38, tol=PTOL) + assert ae(v.imag, 2.129247592349605139e-37, tol=PTOL) + v = fp.e1((120.0 - 30.0j)) + assert ae(v, (2.3836002337480334716e-55 - 5.6704043587126198306e-55j), tol=ATOL) + assert ae(v.real, 2.3836002337480334716e-55, tol=PTOL) + assert ae(v.imag, -5.6704043587126198306e-55, tol=PTOL) + v = fp.e1((160.0 - 40.0j)) + assert ae(v, (-1.6238022898654510661e-72 + 1.104172355572287367e-72j), tol=ATOL) + assert ae(v.real, -1.6238022898654510661e-72, tol=PTOL) + assert ae(v.imag, 1.104172355572287367e-72, tol=PTOL) + v = fp.e1((200.0 - 50.0j)) + assert ae(v, (6.6800061461666228487e-90 - 1.4473816083541016115e-91j), tol=ATOL) + assert ae(v.real, 6.6800061461666228487e-90, tol=PTOL) + assert ae(v.imag, -1.4473816083541016115e-91, tol=PTOL) + v = fp.e1((320.0 - 80.0j)) + assert ae(v, (4.2737871527778786157e-143 - 3.1789935525785660314e-142j), tol=ATOL) + assert ae(v.real, 4.2737871527778786157e-143, tol=PTOL) + assert ae(v.imag, -3.1789935525785660314e-142, tol=PTOL) + v = fp.ei(1.1641532182693481445e-10) + assert ae(v, -22.296641293460247028, tol=ATOL) + assert type(v) is float + v = fp.ei(0.25) + assert ae(v, -0.54254326466191372953, tol=ATOL) + assert type(v) is float + v = fp.ei(1.0) + assert ae(v, 1.8951178163559367555, tol=ATOL) + assert type(v) is float + v = fp.ei(2.0) + assert ae(v, 4.9542343560018901634, tol=ATOL) + assert type(v) is float + v = fp.ei(5.0) + assert ae(v, 40.185275355803177455, tol=ATOL) + assert type(v) is float + v = fp.ei(20.0) + assert ae(v, 25615652.66405658882, tol=ATOL) + assert type(v) is float + v = fp.ei(30.0) + assert ae(v, 368973209407.27419706, tol=ATOL) + assert type(v) is float + v = fp.ei(40.0) + assert ae(v, 6039718263611241.5784, tol=ATOL) + assert type(v) is float + v = fp.ei(50.0) + assert ae(v, 1.0585636897131690963e+20, tol=ATOL) + assert type(v) is float + v = fp.ei(80.0) + assert ae(v, 7.0146000049047999696e+32, tol=ATOL) + assert type(v) is float + v = fp.ei((1.1641532182693481445e-10 + 0.0j)) + assert ae(v, (-22.296641293460247028 + 0.0j), tol=ATOL) + assert ae(v.real, -22.296641293460247028, tol=PTOL) + assert v.imag == 0 + v = fp.ei((0.25 + 0.0j)) + assert ae(v, (-0.54254326466191372953 + 0.0j), tol=ATOL) + assert ae(v.real, -0.54254326466191372953, tol=PTOL) + assert v.imag == 0 + v = fp.ei((1.0 + 0.0j)) + assert ae(v, (1.8951178163559367555 + 0.0j), tol=ATOL) + assert ae(v.real, 1.8951178163559367555, tol=PTOL) + assert v.imag == 0 + v = fp.ei((2.0 + 0.0j)) + assert ae(v, (4.9542343560018901634 + 0.0j), tol=ATOL) + assert ae(v.real, 4.9542343560018901634, tol=PTOL) + assert v.imag == 0 + v = fp.ei((5.0 + 0.0j)) + assert ae(v, (40.185275355803177455 + 0.0j), tol=ATOL) + assert ae(v.real, 40.185275355803177455, tol=PTOL) + assert v.imag == 0 + v = fp.ei((20.0 + 0.0j)) + assert ae(v, (25615652.66405658882 + 0.0j), tol=ATOL) + assert ae(v.real, 25615652.66405658882, tol=PTOL) + assert v.imag == 0 + v = fp.ei((30.0 + 0.0j)) + assert ae(v, (368973209407.27419706 + 0.0j), tol=ATOL) + assert ae(v.real, 368973209407.27419706, tol=PTOL) + assert v.imag == 0 + v = fp.ei((40.0 + 0.0j)) + assert ae(v, (6039718263611241.5784 + 0.0j), tol=ATOL) + assert ae(v.real, 6039718263611241.5784, tol=PTOL) + assert v.imag == 0 + v = fp.ei((50.0 + 0.0j)) + assert ae(v, (1.0585636897131690963e+20 + 0.0j), tol=ATOL) + assert ae(v.real, 1.0585636897131690963e+20, tol=PTOL) + assert v.imag == 0 + v = fp.ei((80.0 + 0.0j)) + assert ae(v, (7.0146000049047999696e+32 + 0.0j), tol=ATOL) + assert ae(v.real, 7.0146000049047999696e+32, tol=PTOL) + assert v.imag == 0 + v = fp.ei((4.6566128730773925781e-10 + 1.1641532182693481445e-10j)) + assert ae(v, (-20.880034621082893023 + 0.24497866324327947603j), tol=ATOL) + assert ae(v.real, -20.880034621082893023, tol=PTOL) + assert ae(v.imag, 0.24497866324327947603, tol=PTOL) + v = fp.ei((1.0 + 0.25j)) + assert ae(v, (1.8942716983721074932 + 0.67268237088273915854j), tol=ATOL) + assert ae(v.real, 1.8942716983721074932, tol=PTOL) + assert ae(v.imag, 0.67268237088273915854, tol=PTOL) + v = fp.ei((4.0 + 1.0j)) + assert ae(v, (14.806699492675420438 + 12.280015176673582616j), tol=ATOL) + assert ae(v.real, 14.806699492675420438, tol=PTOL) + assert ae(v.imag, 12.280015176673582616, tol=PTOL) + v = fp.ei((8.0 + 2.0j)) + assert ae(v, (-54.633252667426386294 + 416.34477429173650012j), tol=ATOL) + assert ae(v.real, -54.633252667426386294, tol=PTOL) + assert ae(v.imag, 416.34477429173650012, tol=PTOL) + v = fp.ei((20.0 + 5.0j)) + assert ae(v, (711836.97165402624643 - 24745247.798103247366j), tol=ATOL) + assert ae(v.real, 711836.97165402624643, tol=PTOL) + assert ae(v.imag, -24745247.798103247366, tol=PTOL) + v = fp.ei((80.0 + 20.0j)) + assert ae(v, (4.2139911108612653091e+32 + 5.3367124741918251637e+32j), tol=ATOL) + assert ae(v.real, 4.2139911108612653091e+32, tol=PTOL) + assert ae(v.imag, 5.3367124741918251637e+32, tol=PTOL) + v = fp.ei((120.0 + 30.0j)) + assert ae(v, (-9.7760616203707508892e+48 - 1.058257682317195792e+50j), tol=ATOL) + assert ae(v.real, -9.7760616203707508892e+48, tol=PTOL) + assert ae(v.imag, -1.058257682317195792e+50, tol=PTOL) + v = fp.ei((160.0 + 40.0j)) + assert ae(v, (-8.7065541466623638861e+66 + 1.6577106725141739889e+67j), tol=ATOL) + assert ae(v.real, -8.7065541466623638861e+66, tol=PTOL) + assert ae(v.imag, 1.6577106725141739889e+67, tol=PTOL) + v = fp.ei((200.0 + 50.0j)) + assert ae(v, (3.070744996327018106e+84 - 1.7243244846769415903e+84j), tol=ATOL) + assert ae(v.real, 3.070744996327018106e+84, tol=PTOL) + assert ae(v.imag, -1.7243244846769415903e+84, tol=PTOL) + v = fp.ei((320.0 + 80.0j)) + assert ae(v, (-9.9960598637998647276e+135 - 2.6855081527595608863e+136j), tol=ATOL) + assert ae(v.real, -9.9960598637998647276e+135, tol=PTOL) + assert ae(v.imag, -2.6855081527595608863e+136, tol=PTOL) + v = fp.ei((1.1641532182693481445e-10 + 1.1641532182693481445e-10j)) + assert ae(v, (-21.950067703180274374 + 0.78539816351386363145j), tol=ATOL) + assert ae(v.real, -21.950067703180274374, tol=PTOL) + assert ae(v.imag, 0.78539816351386363145, tol=PTOL) + v = fp.ei((0.25 + 0.25j)) + assert ae(v, (-0.21441047326710323254 + 1.0683772981589995996j), tol=ATOL) + assert ae(v.real, -0.21441047326710323254, tol=PTOL) + assert ae(v.imag, 1.0683772981589995996, tol=PTOL) + v = fp.ei((1.0 + 1.0j)) + assert ae(v, (1.7646259855638540684 + 2.3877698515105224193j), tol=ATOL) + assert ae(v.real, 1.7646259855638540684, tol=PTOL) + assert ae(v.imag, 2.3877698515105224193, tol=PTOL) + v = fp.ei((2.0 + 2.0j)) + assert ae(v, (1.8920781621855474089 + 5.3169624378326579621j), tol=ATOL) + assert ae(v.real, 1.8920781621855474089, tol=PTOL) + assert ae(v.imag, 5.3169624378326579621, tol=PTOL) + v = fp.ei((5.0 + 5.0j)) + assert ae(v, (-13.470936071475245856 - 15.322492395731230968j), tol=ATOL) + assert ae(v.real, -13.470936071475245856, tol=PTOL) + assert ae(v.imag, -15.322492395731230968, tol=PTOL) + v = fp.ei((20.0 + 20.0j)) + assert ae(v, (16589317.398788971896 + 5831705.4712368307104j), tol=ATOL) + assert ae(v.real, 16589317.398788971896, tol=PTOL) + assert ae(v.imag, 5831705.4712368307104, tol=PTOL) + v = fp.ei((30.0 + 30.0j)) + assert ae(v, (-154596484273.69322527 - 204179357834.2723043j), tol=ATOL) + assert ae(v.real, -154596484273.69322527, tol=PTOL) + assert ae(v.imag, -204179357834.2723043, tol=PTOL) + v = fp.ei((40.0 + 40.0j)) + assert ae(v, (287512180321448.45408 + 4203502407932318.1156j), tol=ATOL) + assert ae(v.real, 287512180321448.45408, tol=PTOL) + assert ae(v.imag, 4203502407932318.1156, tol=PTOL) + v = fp.ei((50.0 + 50.0j)) + assert ae(v, (36128528616649268826.0 - 64648801861338741960.0j), tol=ATOL) + assert ae(v.real, 36128528616649268826.0, tol=PTOL) + assert ae(v.imag, -64648801861338741960.0, tol=PTOL) + v = fp.ei((80.0 + 80.0j)) + assert ae(v, (-3.8674816337930010217e+32 - 3.0540709639658071041e+32j), tol=ATOL) + assert ae(v.real, -3.8674816337930010217e+32, tol=PTOL) + assert ae(v.imag, -3.0540709639658071041e+32, tol=PTOL) + v = fp.ei((1.1641532182693481445e-10 + 4.6566128730773925781e-10j)) + assert ae(v, (-20.880034621432138988 + 1.3258176641336937524j), tol=ATOL) + assert ae(v.real, -20.880034621432138988, tol=PTOL) + assert ae(v.imag, 1.3258176641336937524, tol=PTOL) + v = fp.ei((0.25 + 1.0j)) + assert ae(v, (0.59066621214766308594 + 2.3968481059377428687j), tol=ATOL) + assert ae(v.real, 0.59066621214766308594, tol=PTOL) + assert ae(v.imag, 2.3968481059377428687, tol=PTOL) + v = fp.ei((1.0 + 4.0j)) + assert ae(v, (-0.49739047283060471093 + 3.5570287076301818702j), tol=ATOL) + assert ae(v.real, -0.49739047283060471093, tol=PTOL) + assert ae(v.imag, 3.5570287076301818702, tol=PTOL) + v = fp.ei((2.0 + 8.0j)) + assert ae(v, (0.8705211147733730969 + 3.3825859385758486351j), tol=ATOL) + assert ae(v.real, 0.8705211147733730969, tol=PTOL) + assert ae(v.imag, 3.3825859385758486351, tol=PTOL) + v = fp.ei((5.0 + 20.0j)) + assert ae(v, (7.0789514293925893007 + 1.5313749363937141849j), tol=ATOL) + assert ae(v.real, 7.0789514293925893007, tol=PTOL) + assert ae(v.imag, 1.5313749363937141849, tol=PTOL) + v = fp.ei((20.0 + 80.0j)) + assert ae(v, (-5855431.4907298084434 - 720917.79156143806727j), tol=ATOL) + assert ae(v.real, -5855431.4907298084434, tol=PTOL) + assert ae(v.imag, -720917.79156143806727, tol=PTOL) + v = fp.ei((30.0 + 120.0j)) + assert ae(v, (65402491644.703470747 - 56697658396.51586764j), tol=ATOL) + assert ae(v.real, 65402491644.703470747, tol=PTOL) + assert ae(v.imag, -56697658396.51586764, tol=PTOL) + v = fp.ei((40.0 + 160.0j)) + assert ae(v, (-25504929379604.776769 + 1429035198630576.3879j), tol=ATOL) + assert ae(v.real, -25504929379604.776769, tol=PTOL) + assert ae(v.imag, 1429035198630576.3879, tol=PTOL) + v = fp.ei((50.0 + 200.0j)) + assert ae(v, (-18437746526988116954.0 - 17146362239046152342.0j), tol=ATOL) + assert ae(v.real, -18437746526988116954.0, tol=PTOL) + assert ae(v.imag, -17146362239046152342.0, tol=PTOL) + v = fp.ei((80.0 + 320.0j)) + assert ae(v, (-3.3464697299634526706e+31 - 1.6473152633843023919e+32j), tol=ATOL) + assert ae(v.real, -3.3464697299634526706e+31, tol=PTOL) + assert ae(v.imag, -1.6473152633843023919e+32, tol=PTOL) + v = fp.ei((0.0 + 1.1641532182693481445e-10j)) + assert ae(v, (-22.29664129357666235 + 1.5707963269113119411j), tol=ATOL) + assert ae(v.real, -22.29664129357666235, tol=PTOL) + assert ae(v.imag, 1.5707963269113119411, tol=PTOL) + v = fp.ei((0.0 + 0.25j)) + assert ae(v, (-0.82466306258094565309 + 1.8199298971146537833j), tol=ATOL) + assert ae(v.real, -0.82466306258094565309, tol=PTOL) + assert ae(v.imag, 1.8199298971146537833, tol=PTOL) + v = fp.ei((0.0 + 1.0j)) + assert ae(v, (0.33740392290096813466 + 2.5168793971620796342j), tol=ATOL) + assert ae(v.real, 0.33740392290096813466, tol=PTOL) + assert ae(v.imag, 2.5168793971620796342, tol=PTOL) + v = fp.ei((0.0 + 2.0j)) + assert ae(v, (0.4229808287748649957 + 3.1762093035975914678j), tol=ATOL) + assert ae(v.real, 0.4229808287748649957, tol=PTOL) + assert ae(v.imag, 3.1762093035975914678, tol=PTOL) + v = fp.ei((0.0 + 5.0j)) + assert ae(v, (-0.19002974965664387862 + 3.1207275717395707565j), tol=ATOL) + assert ae(v.real, -0.19002974965664387862, tol=PTOL) + assert ae(v.imag, 3.1207275717395707565, tol=PTOL) + v = fp.ei((0.0 + 20.0j)) + assert ae(v, (0.04441982084535331654 + 3.1190380278383364594j), tol=ATOL) + assert ae(v.real, 0.04441982084535331654, tol=PTOL) + assert ae(v.imag, 3.1190380278383364594, tol=PTOL) + v = fp.ei((0.0 + 30.0j)) + assert ae(v, (-0.033032417282071143779 + 3.1375528668252477302j), tol=ATOL) + assert ae(v.real, -0.033032417282071143779, tol=PTOL) + assert ae(v.imag, 3.1375528668252477302, tol=PTOL) + v = fp.ei((0.0 + 40.0j)) + assert ae(v, (0.019020007896208766962 + 3.157781446149681126j), tol=ATOL) + assert ae(v.real, 0.019020007896208766962, tol=PTOL) + assert ae(v.imag, 3.157781446149681126, tol=PTOL) + v = fp.ei((0.0 + 50.0j)) + assert ae(v, (-0.0056283863241163054402 + 3.122413399280832514j), tol=ATOL) + assert ae(v.real, -0.0056283863241163054402, tol=PTOL) + assert ae(v.imag, 3.122413399280832514, tol=PTOL) + v = fp.ei((0.0 + 80.0j)) + assert ae(v, (-0.012402501155070958192 + 3.1431272137073839346j), tol=ATOL) + assert ae(v.real, -0.012402501155070958192, tol=PTOL) + assert ae(v.imag, 3.1431272137073839346, tol=PTOL) + v = fp.ei((-1.1641532182693481445e-10 + 4.6566128730773925781e-10j)) + assert ae(v, (-20.880034621664969632 + 1.8157749903874220607j), tol=ATOL) + assert ae(v.real, -20.880034621664969632, tol=PTOL) + assert ae(v.imag, 1.8157749903874220607, tol=PTOL) + v = fp.ei((-0.25 + 1.0j)) + assert ae(v, (0.16868306393667788761 + 2.6557914649950505414j), tol=ATOL) + assert ae(v.real, 0.16868306393667788761, tol=PTOL) + assert ae(v.imag, 2.6557914649950505414, tol=PTOL) + v = fp.ei((-1.0 + 4.0j)) + assert ae(v, (-0.03373591813926547318 + 3.2151161058308770603j), tol=ATOL) + assert ae(v.real, -0.03373591813926547318, tol=PTOL) + assert ae(v.imag, 3.2151161058308770603, tol=PTOL) + v = fp.ei((-2.0 + 8.0j)) + assert ae(v, (0.015392833434733785143 + 3.1384179414340326969j), tol=ATOL) + assert ae(v.real, 0.015392833434733785143, tol=PTOL) + assert ae(v.imag, 3.1384179414340326969, tol=PTOL) + v = fp.ei((-5.0 + 20.0j)) + assert ae(v, (0.00024419662286542966525 + 3.1413825703601317109j), tol=ATOL) + assert ae(v.real, 0.00024419662286542966525, tol=PTOL) + assert ae(v.imag, 3.1413825703601317109, tol=PTOL) + v = fp.ei((-20.0 + 80.0j)) + assert ae(v, (-2.3255552781051330088e-11 + 3.1415926535987396304j), tol=ATOL) + assert ae(v.real, -2.3255552781051330088e-11, tol=PTOL) + assert ae(v.imag, 3.1415926535987396304, tol=PTOL) + v = fp.ei((-30.0 + 120.0j)) + assert ae(v, (2.7068919097124652332e-16 + 3.1415926535897925337j), tol=ATOL) + assert ae(v.real, 2.7068919097124652332e-16, tol=PTOL) + assert ae(v.imag, 3.1415926535897925337, tol=PTOL) + v = fp.ei((-40.0 + 160.0j)) + assert ae(v, (1.1695597827678024687e-20 + 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, 1.1695597827678024687e-20, tol=PTOL) + assert ae(v.imag, 3.1415926535897932385, tol=PTOL) + v = fp.ei((-50.0 + 200.0j)) + assert ae(v, (-9.0323746914410162531e-25 + 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -9.0323746914410162531e-25, tol=PTOL) + assert ae(v.imag, 3.1415926535897932385, tol=PTOL) + v = fp.ei((-80.0 + 320.0j)) + assert ae(v, (-3.4819106748728063576e-38 + 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -3.4819106748728063576e-38, tol=PTOL) + assert ae(v.imag, 3.1415926535897932385, tol=PTOL) + v = fp.ei((-4.6566128730773925781e-10 + 1.1641532182693481445e-10j)) + assert ae(v, (-20.880034622014215597 + 2.8966139905793444061j), tol=ATOL) + assert ae(v.real, -20.880034622014215597, tol=PTOL) + assert ae(v.imag, 2.8966139905793444061, tol=PTOL) + v = fp.ei((-1.0 + 0.25j)) + assert ae(v, (-0.19731063945004229095 + 3.0542266078154932748j), tol=ATOL) + assert ae(v.real, -0.19731063945004229095, tol=PTOL) + assert ae(v.imag, 3.0542266078154932748, tol=PTOL) + v = fp.ei((-4.0 + 1.0j)) + assert ae(v, (-0.0013106173980145506944 + 3.1381384055698581758j), tol=ATOL) + assert ae(v.real, -0.0013106173980145506944, tol=PTOL) + assert ae(v.imag, 3.1381384055698581758, tol=PTOL) + v = fp.ei((-8.0 + 2.0j)) + assert ae(v, (0.000022278049065270225945 + 3.1415634616493367169j), tol=ATOL) + assert ae(v.real, 0.000022278049065270225945, tol=PTOL) + assert ae(v.imag, 3.1415634616493367169, tol=PTOL) + v = fp.ei((-20.0 + 5.0j)) + assert ae(v, (-4.7711374515765346894e-11 + 3.1415926536726958909j), tol=ATOL) + assert ae(v.real, -4.7711374515765346894e-11, tol=PTOL) + assert ae(v.imag, 3.1415926536726958909, tol=PTOL) + v = fp.ei((-80.0 + 20.0j)) + assert ae(v, (-3.8353473865788235787e-38 + 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -3.8353473865788235787e-38, tol=PTOL) + assert ae(v.imag, 3.1415926535897932385, tol=PTOL) + v = fp.ei((-120.0 + 30.0j)) + assert ae(v, (-2.3836002337480334716e-55 + 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -2.3836002337480334716e-55, tol=PTOL) + assert ae(v.imag, 3.1415926535897932385, tol=PTOL) + v = fp.ei((-160.0 + 40.0j)) + assert ae(v, (1.6238022898654510661e-72 + 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, 1.6238022898654510661e-72, tol=PTOL) + assert ae(v.imag, 3.1415926535897932385, tol=PTOL) + v = fp.ei((-200.0 + 50.0j)) + assert ae(v, (-6.6800061461666228487e-90 + 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -6.6800061461666228487e-90, tol=PTOL) + assert ae(v.imag, 3.1415926535897932385, tol=PTOL) + v = fp.ei((-320.0 + 80.0j)) + assert ae(v, (-4.2737871527778786157e-143 + 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -4.2737871527778786157e-143, tol=PTOL) + assert ae(v.imag, 3.1415926535897932385, tol=PTOL) + v = fp.ei(-1.1641532182693481445e-10) + assert ae(v, -22.296641293693077672, tol=ATOL) + assert type(v) is float + v = fp.ei(-0.25) + assert ae(v, -1.0442826344437381945, tol=ATOL) + assert type(v) is float + v = fp.ei(-1.0) + assert ae(v, -0.21938393439552027368, tol=ATOL) + assert type(v) is float + v = fp.ei(-2.0) + assert ae(v, -0.048900510708061119567, tol=ATOL) + assert type(v) is float + v = fp.ei(-5.0) + assert ae(v, -0.0011482955912753257973, tol=ATOL) + assert type(v) is float + v = fp.ei(-20.0) + assert ae(v, -9.8355252906498816904e-11, tol=ATOL) + assert type(v) is float + v = fp.ei(-30.0) + assert ae(v, -3.0215520106888125448e-15, tol=ATOL) + assert type(v) is float + v = fp.ei(-40.0) + assert ae(v, -1.0367732614516569722e-19, tol=ATOL) + assert type(v) is float + v = fp.ei(-50.0) + assert ae(v, -3.7832640295504590187e-24, tol=ATOL) + assert type(v) is float + v = fp.ei(-80.0) + assert ae(v, -2.2285432586884729112e-37, tol=ATOL) + assert type(v) is float + v = fp.ei((-1.1641532182693481445e-10 + 0.0j)) + assert ae(v, (-22.296641293693077672 + 0.0j), tol=ATOL) + assert ae(v.real, -22.296641293693077672, tol=PTOL) + assert v.imag == 0 + v = fp.ei((-0.25 + 0.0j)) + assert ae(v, (-1.0442826344437381945 + 0.0j), tol=ATOL) + assert ae(v.real, -1.0442826344437381945, tol=PTOL) + assert v.imag == 0 + v = fp.ei((-1.0 + 0.0j)) + assert ae(v, (-0.21938393439552027368 + 0.0j), tol=ATOL) + assert ae(v.real, -0.21938393439552027368, tol=PTOL) + assert v.imag == 0 + v = fp.ei((-2.0 + 0.0j)) + assert ae(v, (-0.048900510708061119567 + 0.0j), tol=ATOL) + assert ae(v.real, -0.048900510708061119567, tol=PTOL) + assert v.imag == 0 + v = fp.ei((-5.0 + 0.0j)) + assert ae(v, (-0.0011482955912753257973 + 0.0j), tol=ATOL) + assert ae(v.real, -0.0011482955912753257973, tol=PTOL) + assert v.imag == 0 + v = fp.ei((-20.0 + 0.0j)) + assert ae(v, (-9.8355252906498816904e-11 + 0.0j), tol=ATOL) + assert ae(v.real, -9.8355252906498816904e-11, tol=PTOL) + assert v.imag == 0 + v = fp.ei((-30.0 + 0.0j)) + assert ae(v, (-3.0215520106888125448e-15 + 0.0j), tol=ATOL) + assert ae(v.real, -3.0215520106888125448e-15, tol=PTOL) + assert v.imag == 0 + v = fp.ei((-40.0 + 0.0j)) + assert ae(v, (-1.0367732614516569722e-19 + 0.0j), tol=ATOL) + assert ae(v.real, -1.0367732614516569722e-19, tol=PTOL) + assert v.imag == 0 + v = fp.ei((-50.0 + 0.0j)) + assert ae(v, (-3.7832640295504590187e-24 + 0.0j), tol=ATOL) + assert ae(v.real, -3.7832640295504590187e-24, tol=PTOL) + assert v.imag == 0 + v = fp.ei((-80.0 + 0.0j)) + assert ae(v, (-2.2285432586884729112e-37 + 0.0j), tol=ATOL) + assert ae(v.real, -2.2285432586884729112e-37, tol=PTOL) + assert v.imag == 0 + v = fp.ei((-4.6566128730773925781e-10 - 1.1641532182693481445e-10j)) + assert ae(v, (-20.880034622014215597 - 2.8966139905793444061j), tol=ATOL) + assert ae(v.real, -20.880034622014215597, tol=PTOL) + assert ae(v.imag, -2.8966139905793444061, tol=PTOL) + v = fp.ei((-1.0 - 0.25j)) + assert ae(v, (-0.19731063945004229095 - 3.0542266078154932748j), tol=ATOL) + assert ae(v.real, -0.19731063945004229095, tol=PTOL) + assert ae(v.imag, -3.0542266078154932748, tol=PTOL) + v = fp.ei((-4.0 - 1.0j)) + assert ae(v, (-0.0013106173980145506944 - 3.1381384055698581758j), tol=ATOL) + assert ae(v.real, -0.0013106173980145506944, tol=PTOL) + assert ae(v.imag, -3.1381384055698581758, tol=PTOL) + v = fp.ei((-8.0 - 2.0j)) + assert ae(v, (0.000022278049065270225945 - 3.1415634616493367169j), tol=ATOL) + assert ae(v.real, 0.000022278049065270225945, tol=PTOL) + assert ae(v.imag, -3.1415634616493367169, tol=PTOL) + v = fp.ei((-20.0 - 5.0j)) + assert ae(v, (-4.7711374515765346894e-11 - 3.1415926536726958909j), tol=ATOL) + assert ae(v.real, -4.7711374515765346894e-11, tol=PTOL) + assert ae(v.imag, -3.1415926536726958909, tol=PTOL) + v = fp.ei((-80.0 - 20.0j)) + assert ae(v, (-3.8353473865788235787e-38 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -3.8353473865788235787e-38, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((-120.0 - 30.0j)) + assert ae(v, (-2.3836002337480334716e-55 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -2.3836002337480334716e-55, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((-160.0 - 40.0j)) + assert ae(v, (1.6238022898654510661e-72 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, 1.6238022898654510661e-72, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((-200.0 - 50.0j)) + assert ae(v, (-6.6800061461666228487e-90 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -6.6800061461666228487e-90, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((-320.0 - 80.0j)) + assert ae(v, (-4.2737871527778786157e-143 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -4.2737871527778786157e-143, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((-1.1641532182693481445e-10 - 1.1641532182693481445e-10j)) + assert ae(v, (-21.950067703413105017 - 2.3561944903087602507j), tol=ATOL) + assert ae(v.real, -21.950067703413105017, tol=PTOL) + assert ae(v.imag, -2.3561944903087602507, tol=PTOL) + v = fp.ei((-0.25 - 0.25j)) + assert ae(v, (-0.71092525792923287894 - 2.5766745291767512913j), tol=ATOL) + assert ae(v.real, -0.71092525792923287894, tol=PTOL) + assert ae(v.imag, -2.5766745291767512913, tol=PTOL) + v = fp.ei((-1.0 - 1.0j)) + assert ae(v, (-0.00028162445198141832551 - 2.9622681185504342983j), tol=ATOL) + assert ae(v.real, -0.00028162445198141832551, tol=PTOL) + assert ae(v.imag, -2.9622681185504342983, tol=PTOL) + v = fp.ei((-2.0 - 2.0j)) + assert ae(v, (0.033767089606562004246 - 3.1229932394200426965j), tol=ATOL) + assert ae(v.real, 0.033767089606562004246, tol=PTOL) + assert ae(v.imag, -3.1229932394200426965, tol=PTOL) + v = fp.ei((-5.0 - 5.0j)) + assert ae(v, (-0.0007266506660356393891 - 3.1420636813914284609j), tol=ATOL) + assert ae(v.real, -0.0007266506660356393891, tol=PTOL) + assert ae(v.imag, -3.1420636813914284609, tol=PTOL) + v = fp.ei((-20.0 - 20.0j)) + assert ae(v, (2.3824537449367396579e-11 - 3.1415926535228233653j), tol=ATOL) + assert ae(v.real, 2.3824537449367396579e-11, tol=PTOL) + assert ae(v.imag, -3.1415926535228233653, tol=PTOL) + v = fp.ei((-30.0 - 30.0j)) + assert ae(v, (-1.7316045841744061617e-15 - 3.141592653589794545j), tol=ATOL) + assert ae(v.real, -1.7316045841744061617e-15, tol=PTOL) + assert ae(v.imag, -3.141592653589794545, tol=PTOL) + v = fp.ei((-40.0 - 40.0j)) + assert ae(v, (7.4001043002899232182e-20 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, 7.4001043002899232182e-20, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((-50.0 - 50.0j)) + assert ae(v, (-2.3566128324644641219e-24 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -2.3566128324644641219e-24, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((-80.0 - 80.0j)) + assert ae(v, (-9.8279750572186526673e-38 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -9.8279750572186526673e-38, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((-1.1641532182693481445e-10 - 4.6566128730773925781e-10j)) + assert ae(v, (-20.880034621664969632 - 1.8157749903874220607j), tol=ATOL) + assert ae(v.real, -20.880034621664969632, tol=PTOL) + assert ae(v.imag, -1.8157749903874220607, tol=PTOL) + v = fp.ei((-0.25 - 1.0j)) + assert ae(v, (0.16868306393667788761 - 2.6557914649950505414j), tol=ATOL) + assert ae(v.real, 0.16868306393667788761, tol=PTOL) + assert ae(v.imag, -2.6557914649950505414, tol=PTOL) + v = fp.ei((-1.0 - 4.0j)) + assert ae(v, (-0.03373591813926547318 - 3.2151161058308770603j), tol=ATOL) + assert ae(v.real, -0.03373591813926547318, tol=PTOL) + assert ae(v.imag, -3.2151161058308770603, tol=PTOL) + v = fp.ei((-2.0 - 8.0j)) + assert ae(v, (0.015392833434733785143 - 3.1384179414340326969j), tol=ATOL) + assert ae(v.real, 0.015392833434733785143, tol=PTOL) + assert ae(v.imag, -3.1384179414340326969, tol=PTOL) + v = fp.ei((-5.0 - 20.0j)) + assert ae(v, (0.00024419662286542966525 - 3.1413825703601317109j), tol=ATOL) + assert ae(v.real, 0.00024419662286542966525, tol=PTOL) + assert ae(v.imag, -3.1413825703601317109, tol=PTOL) + v = fp.ei((-20.0 - 80.0j)) + assert ae(v, (-2.3255552781051330088e-11 - 3.1415926535987396304j), tol=ATOL) + assert ae(v.real, -2.3255552781051330088e-11, tol=PTOL) + assert ae(v.imag, -3.1415926535987396304, tol=PTOL) + v = fp.ei((-30.0 - 120.0j)) + assert ae(v, (2.7068919097124652332e-16 - 3.1415926535897925337j), tol=ATOL) + assert ae(v.real, 2.7068919097124652332e-16, tol=PTOL) + assert ae(v.imag, -3.1415926535897925337, tol=PTOL) + v = fp.ei((-40.0 - 160.0j)) + assert ae(v, (1.1695597827678024687e-20 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, 1.1695597827678024687e-20, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((-50.0 - 200.0j)) + assert ae(v, (-9.0323746914410162531e-25 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -9.0323746914410162531e-25, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((-80.0 - 320.0j)) + assert ae(v, (-3.4819106748728063576e-38 - 3.1415926535897932385j), tol=ATOL) + assert ae(v.real, -3.4819106748728063576e-38, tol=PTOL) + assert ae(v.imag, -3.1415926535897932385, tol=PTOL) + v = fp.ei((0.0 - 1.1641532182693481445e-10j)) + assert ae(v, (-22.29664129357666235 - 1.5707963269113119411j), tol=ATOL) + assert ae(v.real, -22.29664129357666235, tol=PTOL) + assert ae(v.imag, -1.5707963269113119411, tol=PTOL) + v = fp.ei((0.0 - 0.25j)) + assert ae(v, (-0.82466306258094565309 - 1.8199298971146537833j), tol=ATOL) + assert ae(v.real, -0.82466306258094565309, tol=PTOL) + assert ae(v.imag, -1.8199298971146537833, tol=PTOL) + v = fp.ei((0.0 - 1.0j)) + assert ae(v, (0.33740392290096813466 - 2.5168793971620796342j), tol=ATOL) + assert ae(v.real, 0.33740392290096813466, tol=PTOL) + assert ae(v.imag, -2.5168793971620796342, tol=PTOL) + v = fp.ei((0.0 - 2.0j)) + assert ae(v, (0.4229808287748649957 - 3.1762093035975914678j), tol=ATOL) + assert ae(v.real, 0.4229808287748649957, tol=PTOL) + assert ae(v.imag, -3.1762093035975914678, tol=PTOL) + v = fp.ei((0.0 - 5.0j)) + assert ae(v, (-0.19002974965664387862 - 3.1207275717395707565j), tol=ATOL) + assert ae(v.real, -0.19002974965664387862, tol=PTOL) + assert ae(v.imag, -3.1207275717395707565, tol=PTOL) + v = fp.ei((0.0 - 20.0j)) + assert ae(v, (0.04441982084535331654 - 3.1190380278383364594j), tol=ATOL) + assert ae(v.real, 0.04441982084535331654, tol=PTOL) + assert ae(v.imag, -3.1190380278383364594, tol=PTOL) + v = fp.ei((0.0 - 30.0j)) + assert ae(v, (-0.033032417282071143779 - 3.1375528668252477302j), tol=ATOL) + assert ae(v.real, -0.033032417282071143779, tol=PTOL) + assert ae(v.imag, -3.1375528668252477302, tol=PTOL) + v = fp.ei((0.0 - 40.0j)) + assert ae(v, (0.019020007896208766962 - 3.157781446149681126j), tol=ATOL) + assert ae(v.real, 0.019020007896208766962, tol=PTOL) + assert ae(v.imag, -3.157781446149681126, tol=PTOL) + v = fp.ei((0.0 - 50.0j)) + assert ae(v, (-0.0056283863241163054402 - 3.122413399280832514j), tol=ATOL) + assert ae(v.real, -0.0056283863241163054402, tol=PTOL) + assert ae(v.imag, -3.122413399280832514, tol=PTOL) + v = fp.ei((0.0 - 80.0j)) + assert ae(v, (-0.012402501155070958192 - 3.1431272137073839346j), tol=ATOL) + assert ae(v.real, -0.012402501155070958192, tol=PTOL) + assert ae(v.imag, -3.1431272137073839346, tol=PTOL) + v = fp.ei((1.1641532182693481445e-10 - 4.6566128730773925781e-10j)) + assert ae(v, (-20.880034621432138988 - 1.3258176641336937524j), tol=ATOL) + assert ae(v.real, -20.880034621432138988, tol=PTOL) + assert ae(v.imag, -1.3258176641336937524, tol=PTOL) + v = fp.ei((0.25 - 1.0j)) + assert ae(v, (0.59066621214766308594 - 2.3968481059377428687j), tol=ATOL) + assert ae(v.real, 0.59066621214766308594, tol=PTOL) + assert ae(v.imag, -2.3968481059377428687, tol=PTOL) + v = fp.ei((1.0 - 4.0j)) + assert ae(v, (-0.49739047283060471093 - 3.5570287076301818702j), tol=ATOL) + assert ae(v.real, -0.49739047283060471093, tol=PTOL) + assert ae(v.imag, -3.5570287076301818702, tol=PTOL) + v = fp.ei((2.0 - 8.0j)) + assert ae(v, (0.8705211147733730969 - 3.3825859385758486351j), tol=ATOL) + assert ae(v.real, 0.8705211147733730969, tol=PTOL) + assert ae(v.imag, -3.3825859385758486351, tol=PTOL) + v = fp.ei((5.0 - 20.0j)) + assert ae(v, (7.0789514293925893007 - 1.5313749363937141849j), tol=ATOL) + assert ae(v.real, 7.0789514293925893007, tol=PTOL) + assert ae(v.imag, -1.5313749363937141849, tol=PTOL) + v = fp.ei((20.0 - 80.0j)) + assert ae(v, (-5855431.4907298084434 + 720917.79156143806727j), tol=ATOL) + assert ae(v.real, -5855431.4907298084434, tol=PTOL) + assert ae(v.imag, 720917.79156143806727, tol=PTOL) + v = fp.ei((30.0 - 120.0j)) + assert ae(v, (65402491644.703470747 + 56697658396.51586764j), tol=ATOL) + assert ae(v.real, 65402491644.703470747, tol=PTOL) + assert ae(v.imag, 56697658396.51586764, tol=PTOL) + v = fp.ei((40.0 - 160.0j)) + assert ae(v, (-25504929379604.776769 - 1429035198630576.3879j), tol=ATOL) + assert ae(v.real, -25504929379604.776769, tol=PTOL) + assert ae(v.imag, -1429035198630576.3879, tol=PTOL) + v = fp.ei((50.0 - 200.0j)) + assert ae(v, (-18437746526988116954.0 + 17146362239046152342.0j), tol=ATOL) + assert ae(v.real, -18437746526988116954.0, tol=PTOL) + assert ae(v.imag, 17146362239046152342.0, tol=PTOL) + v = fp.ei((80.0 - 320.0j)) + assert ae(v, (-3.3464697299634526706e+31 + 1.6473152633843023919e+32j), tol=ATOL) + assert ae(v.real, -3.3464697299634526706e+31, tol=PTOL) + assert ae(v.imag, 1.6473152633843023919e+32, tol=PTOL) + v = fp.ei((1.1641532182693481445e-10 - 1.1641532182693481445e-10j)) + assert ae(v, (-21.950067703180274374 - 0.78539816351386363145j), tol=ATOL) + assert ae(v.real, -21.950067703180274374, tol=PTOL) + assert ae(v.imag, -0.78539816351386363145, tol=PTOL) + v = fp.ei((0.25 - 0.25j)) + assert ae(v, (-0.21441047326710323254 - 1.0683772981589995996j), tol=ATOL) + assert ae(v.real, -0.21441047326710323254, tol=PTOL) + assert ae(v.imag, -1.0683772981589995996, tol=PTOL) + v = fp.ei((1.0 - 1.0j)) + assert ae(v, (1.7646259855638540684 - 2.3877698515105224193j), tol=ATOL) + assert ae(v.real, 1.7646259855638540684, tol=PTOL) + assert ae(v.imag, -2.3877698515105224193, tol=PTOL) + v = fp.ei((2.0 - 2.0j)) + assert ae(v, (1.8920781621855474089 - 5.3169624378326579621j), tol=ATOL) + assert ae(v.real, 1.8920781621855474089, tol=PTOL) + assert ae(v.imag, -5.3169624378326579621, tol=PTOL) + v = fp.ei((5.0 - 5.0j)) + assert ae(v, (-13.470936071475245856 + 15.322492395731230968j), tol=ATOL) + assert ae(v.real, -13.470936071475245856, tol=PTOL) + assert ae(v.imag, 15.322492395731230968, tol=PTOL) + v = fp.ei((20.0 - 20.0j)) + assert ae(v, (16589317.398788971896 - 5831705.4712368307104j), tol=ATOL) + assert ae(v.real, 16589317.398788971896, tol=PTOL) + assert ae(v.imag, -5831705.4712368307104, tol=PTOL) + v = fp.ei((30.0 - 30.0j)) + assert ae(v, (-154596484273.69322527 + 204179357834.2723043j), tol=ATOL) + assert ae(v.real, -154596484273.69322527, tol=PTOL) + assert ae(v.imag, 204179357834.2723043, tol=PTOL) + v = fp.ei((40.0 - 40.0j)) + assert ae(v, (287512180321448.45408 - 4203502407932318.1156j), tol=ATOL) + assert ae(v.real, 287512180321448.45408, tol=PTOL) + assert ae(v.imag, -4203502407932318.1156, tol=PTOL) + v = fp.ei((50.0 - 50.0j)) + assert ae(v, (36128528616649268826.0 + 64648801861338741960.0j), tol=ATOL) + assert ae(v.real, 36128528616649268826.0, tol=PTOL) + assert ae(v.imag, 64648801861338741960.0, tol=PTOL) + v = fp.ei((80.0 - 80.0j)) + assert ae(v, (-3.8674816337930010217e+32 + 3.0540709639658071041e+32j), tol=ATOL) + assert ae(v.real, -3.8674816337930010217e+32, tol=PTOL) + assert ae(v.imag, 3.0540709639658071041e+32, tol=PTOL) + v = fp.ei((4.6566128730773925781e-10 - 1.1641532182693481445e-10j)) + assert ae(v, (-20.880034621082893023 - 0.24497866324327947603j), tol=ATOL) + assert ae(v.real, -20.880034621082893023, tol=PTOL) + assert ae(v.imag, -0.24497866324327947603, tol=PTOL) + v = fp.ei((1.0 - 0.25j)) + assert ae(v, (1.8942716983721074932 - 0.67268237088273915854j), tol=ATOL) + assert ae(v.real, 1.8942716983721074932, tol=PTOL) + assert ae(v.imag, -0.67268237088273915854, tol=PTOL) + v = fp.ei((4.0 - 1.0j)) + assert ae(v, (14.806699492675420438 - 12.280015176673582616j), tol=ATOL) + assert ae(v.real, 14.806699492675420438, tol=PTOL) + assert ae(v.imag, -12.280015176673582616, tol=PTOL) + v = fp.ei((8.0 - 2.0j)) + assert ae(v, (-54.633252667426386294 - 416.34477429173650012j), tol=ATOL) + assert ae(v.real, -54.633252667426386294, tol=PTOL) + assert ae(v.imag, -416.34477429173650012, tol=PTOL) + v = fp.ei((20.0 - 5.0j)) + assert ae(v, (711836.97165402624643 + 24745247.798103247366j), tol=ATOL) + assert ae(v.real, 711836.97165402624643, tol=PTOL) + assert ae(v.imag, 24745247.798103247366, tol=PTOL) + v = fp.ei((80.0 - 20.0j)) + assert ae(v, (4.2139911108612653091e+32 - 5.3367124741918251637e+32j), tol=ATOL) + assert ae(v.real, 4.2139911108612653091e+32, tol=PTOL) + assert ae(v.imag, -5.3367124741918251637e+32, tol=PTOL) + v = fp.ei((120.0 - 30.0j)) + assert ae(v, (-9.7760616203707508892e+48 + 1.058257682317195792e+50j), tol=ATOL) + assert ae(v.real, -9.7760616203707508892e+48, tol=PTOL) + assert ae(v.imag, 1.058257682317195792e+50, tol=PTOL) + v = fp.ei((160.0 - 40.0j)) + assert ae(v, (-8.7065541466623638861e+66 - 1.6577106725141739889e+67j), tol=ATOL) + assert ae(v.real, -8.7065541466623638861e+66, tol=PTOL) + assert ae(v.imag, -1.6577106725141739889e+67, tol=PTOL) + v = fp.ei((200.0 - 50.0j)) + assert ae(v, (3.070744996327018106e+84 + 1.7243244846769415903e+84j), tol=ATOL) + assert ae(v.real, 3.070744996327018106e+84, tol=PTOL) + assert ae(v.imag, 1.7243244846769415903e+84, tol=PTOL) + v = fp.ei((320.0 - 80.0j)) + assert ae(v, (-9.9960598637998647276e+135 + 2.6855081527595608863e+136j), tol=ATOL) + assert ae(v.real, -9.9960598637998647276e+135, tol=PTOL) + assert ae(v.imag, 2.6855081527595608863e+136, tol=PTOL) diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_functions.py b/MLPY/Lib/site-packages/mpmath/tests/test_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..3bfe852f008173eb636c147abc83d71dbdd4d23a --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_functions.py @@ -0,0 +1,920 @@ +from mpmath.libmp import * +from mpmath import * +import random +import time +import math +import cmath + +def mpc_ae(a, b, eps=eps): + res = True + res = res and a.real.ae(b.real, eps) + res = res and a.imag.ae(b.imag, eps) + return res + +#---------------------------------------------------------------------------- +# Constants and functions +# + +tpi = "3.1415926535897932384626433832795028841971693993751058209749445923078\ +1640628620899862803482534211706798" +te = "2.71828182845904523536028747135266249775724709369995957496696762772407\ +663035354759457138217852516642743" +tdegree = "0.017453292519943295769236907684886127134428718885417254560971914\ +4017100911460344944368224156963450948221" +teuler = "0.5772156649015328606065120900824024310421593359399235988057672348\ +84867726777664670936947063291746749516" +tln2 = "0.693147180559945309417232121458176568075500134360255254120680009493\ +393621969694715605863326996418687542" +tln10 = "2.30258509299404568401799145468436420760110148862877297603332790096\ +757260967735248023599720508959829834" +tcatalan = "0.91596559417721901505460351493238411077414937428167213426649811\ +9621763019776254769479356512926115106249" +tkhinchin = "2.6854520010653064453097148354817956938203822939944629530511523\ +4555721885953715200280114117493184769800" +tglaisher = "1.2824271291006226368753425688697917277676889273250011920637400\ +2174040630885882646112973649195820237439420646" +tapery = "1.2020569031595942853997381615114499907649862923404988817922715553\ +4183820578631309018645587360933525815" +tphi = "1.618033988749894848204586834365638117720309179805762862135448622705\ +26046281890244970720720418939113748475" +tmertens = "0.26149721284764278375542683860869585905156664826119920619206421\ +3924924510897368209714142631434246651052" +ttwinprime = "0.660161815846869573927812110014555778432623360284733413319448\ +423335405642304495277143760031413839867912" + +def test_constants(): + for prec in [3, 7, 10, 15, 20, 37, 80, 100, 29]: + mp.dps = prec + assert pi == mpf(tpi) + assert e == mpf(te) + assert degree == mpf(tdegree) + assert euler == mpf(teuler) + assert ln2 == mpf(tln2) + assert ln10 == mpf(tln10) + assert catalan == mpf(tcatalan) + assert khinchin == mpf(tkhinchin) + assert glaisher == mpf(tglaisher) + assert phi == mpf(tphi) + if prec < 50: + assert mertens == mpf(tmertens) + assert twinprime == mpf(ttwinprime) + mp.dps = 15 + assert pi >= -1 + assert pi > 2 + assert pi > 3 + assert pi < 4 + +def test_exact_sqrts(): + for i in range(20000): + assert sqrt(mpf(i*i)) == i + random.seed(1) + for prec in [100, 300, 1000, 10000]: + mp.dps = prec + for i in range(20): + A = random.randint(10**(prec//2-2), 10**(prec//2-1)) + assert sqrt(mpf(A*A)) == A + mp.dps = 15 + for i in range(100): + for a in [1, 8, 25, 112307]: + assert sqrt(mpf((a*a, 2*i))) == mpf((a, i)) + assert sqrt(mpf((a*a, -2*i))) == mpf((a, -i)) + +def test_sqrt_rounding(): + for i in [2, 3, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15]: + i = from_int(i) + for dps in [7, 15, 83, 106, 2000]: + mp.dps = dps + a = mpf_pow_int(mpf_sqrt(i, mp.prec, round_down), 2, mp.prec, round_down) + b = mpf_pow_int(mpf_sqrt(i, mp.prec, round_up), 2, mp.prec, round_up) + assert mpf_lt(a, i) + assert mpf_gt(b, i) + random.seed(1234) + prec = 100 + for rnd in [round_down, round_nearest, round_ceiling]: + for i in range(100): + a = mpf_rand(prec) + b = mpf_mul(a, a) + assert mpf_sqrt(b, prec, rnd) == a + # Test some extreme cases + mp.dps = 100 + a = mpf(9) + 1e-90 + b = mpf(9) - 1e-90 + mp.dps = 15 + assert sqrt(a, rounding='d') == 3 + assert sqrt(a, rounding='n') == 3 + assert sqrt(a, rounding='u') > 3 + assert sqrt(b, rounding='d') < 3 + assert sqrt(b, rounding='n') == 3 + assert sqrt(b, rounding='u') == 3 + # A worst case, from the MPFR test suite + assert sqrt(mpf('7.0503726185518891')) == mpf('2.655253776675949') + +def test_float_sqrt(): + mp.dps = 15 + # These should round identically + for x in [0, 1e-7, 0.1, 0.5, 1, 2, 3, 4, 5, 0.333, 76.19]: + assert sqrt(mpf(x)) == float(x)**0.5 + assert sqrt(-1) == 1j + assert sqrt(-2).ae(cmath.sqrt(-2)) + assert sqrt(-3).ae(cmath.sqrt(-3)) + assert sqrt(-100).ae(cmath.sqrt(-100)) + assert sqrt(1j).ae(cmath.sqrt(1j)) + assert sqrt(-1j).ae(cmath.sqrt(-1j)) + assert sqrt(math.pi + math.e*1j).ae(cmath.sqrt(math.pi + math.e*1j)) + assert sqrt(math.pi - math.e*1j).ae(cmath.sqrt(math.pi - math.e*1j)) + +def test_hypot(): + assert hypot(0, 0) == 0 + assert hypot(0, 0.33) == mpf(0.33) + assert hypot(0.33, 0) == mpf(0.33) + assert hypot(-0.33, 0) == mpf(0.33) + assert hypot(3, 4) == mpf(5) + +def test_exact_cbrt(): + for i in range(0, 20000, 200): + assert cbrt(mpf(i*i*i)) == i + random.seed(1) + for prec in [100, 300, 1000, 10000]: + mp.dps = prec + A = random.randint(10**(prec//2-2), 10**(prec//2-1)) + assert cbrt(mpf(A*A*A)) == A + mp.dps = 15 + +def test_exp(): + assert exp(0) == 1 + assert exp(10000).ae(mpf('8.8068182256629215873e4342')) + assert exp(-10000).ae(mpf('1.1354838653147360985e-4343')) + a = exp(mpf((1, 8198646019315405, -53, 53))) + assert(a.bc == bitcount(a.man)) + mp.prec = 67 + a = exp(mpf((1, 1781864658064754565, -60, 61))) + assert(a.bc == bitcount(a.man)) + mp.prec = 53 + assert exp(ln2 * 10).ae(1024) + assert exp(2+2j).ae(cmath.exp(2+2j)) + +def test_issue_73(): + mp.dps = 512 + a = exp(-1) + b = exp(1) + mp.dps = 15 + assert (+a).ae(0.36787944117144233) + assert (+b).ae(2.7182818284590451) + +def test_log(): + mp.dps = 15 + assert log(1) == 0 + for x in [0.5, 1.5, 2.0, 3.0, 100, 10**50, 1e-50]: + assert log(x).ae(math.log(x)) + assert log(x, x) == 1 + assert log(1024, 2) == 10 + assert log(10**1234, 10) == 1234 + assert log(2+2j).ae(cmath.log(2+2j)) + # Accuracy near 1 + assert (log(0.6+0.8j).real*10**17).ae(2.2204460492503131) + assert (log(0.6-0.8j).real*10**17).ae(2.2204460492503131) + assert (log(0.8-0.6j).real*10**17).ae(2.2204460492503131) + assert (log(1+1e-8j).real*10**16).ae(0.5) + assert (log(1-1e-8j).real*10**16).ae(0.5) + assert (log(-1+1e-8j).real*10**16).ae(0.5) + assert (log(-1-1e-8j).real*10**16).ae(0.5) + assert (log(1j+1e-8).real*10**16).ae(0.5) + assert (log(1j-1e-8).real*10**16).ae(0.5) + assert (log(-1j+1e-8).real*10**16).ae(0.5) + assert (log(-1j-1e-8).real*10**16).ae(0.5) + assert (log(1+1e-40j).real*10**80).ae(0.5) + assert (log(1j+1e-40).real*10**80).ae(0.5) + # Huge + assert log(ldexp(1.234,10**20)).ae(log(2)*1e20) + assert log(ldexp(1.234,10**200)).ae(log(2)*1e200) + # Some special values + assert log(mpc(0,0)) == mpc(-inf,0) + assert isnan(log(mpc(nan,0)).real) + assert isnan(log(mpc(nan,0)).imag) + assert isnan(log(mpc(0,nan)).real) + assert isnan(log(mpc(0,nan)).imag) + assert isnan(log(mpc(nan,1)).real) + assert isnan(log(mpc(nan,1)).imag) + assert isnan(log(mpc(1,nan)).real) + assert isnan(log(mpc(1,nan)).imag) + +def test_trig_hyperb_basic(): + for x in (list(range(100)) + list(range(-100,0))): + t = x / 4.1 + assert cos(mpf(t)).ae(math.cos(t)) + assert sin(mpf(t)).ae(math.sin(t)) + assert tan(mpf(t)).ae(math.tan(t)) + assert cosh(mpf(t)).ae(math.cosh(t)) + assert sinh(mpf(t)).ae(math.sinh(t)) + assert tanh(mpf(t)).ae(math.tanh(t)) + assert sin(1+1j).ae(cmath.sin(1+1j)) + assert sin(-4-3.6j).ae(cmath.sin(-4-3.6j)) + assert cos(1+1j).ae(cmath.cos(1+1j)) + assert cos(-4-3.6j).ae(cmath.cos(-4-3.6j)) + +def test_degrees(): + assert cos(0*degree) == 1 + assert cos(90*degree).ae(0) + assert cos(180*degree).ae(-1) + assert cos(270*degree).ae(0) + assert cos(360*degree).ae(1) + assert sin(0*degree) == 0 + assert sin(90*degree).ae(1) + assert sin(180*degree).ae(0) + assert sin(270*degree).ae(-1) + assert sin(360*degree).ae(0) + +def random_complexes(N): + random.seed(1) + a = [] + for i in range(N): + x1 = random.uniform(-10, 10) + y1 = random.uniform(-10, 10) + x2 = random.uniform(-10, 10) + y2 = random.uniform(-10, 10) + z1 = complex(x1, y1) + z2 = complex(x2, y2) + a.append((z1, z2)) + return a + +def test_complex_powers(): + for dps in [15, 30, 100]: + # Check accuracy for complex square root + mp.dps = dps + a = mpc(1j)**0.5 + assert a.real == a.imag == mpf(2)**0.5 / 2 + mp.dps = 15 + random.seed(1) + for (z1, z2) in random_complexes(100): + assert (mpc(z1)**mpc(z2)).ae(z1**z2, 1e-12) + assert (e**(-pi*1j)).ae(-1) + mp.dps = 50 + assert (e**(-pi*1j)).ae(-1) + mp.dps = 15 + +def test_complex_sqrt_accuracy(): + def test_mpc_sqrt(lst): + for a, b in lst: + z = mpc(a + j*b) + assert mpc_ae(sqrt(z*z), z) + z = mpc(-a + j*b) + assert mpc_ae(sqrt(z*z), -z) + z = mpc(a - j*b) + assert mpc_ae(sqrt(z*z), z) + z = mpc(-a - j*b) + assert mpc_ae(sqrt(z*z), -z) + random.seed(2) + N = 10 + mp.dps = 30 + dps = mp.dps + test_mpc_sqrt([(random.uniform(0, 10),random.uniform(0, 10)) for i in range(N)]) + test_mpc_sqrt([(i + 0.1, (i + 0.2)*10**i) for i in range(N)]) + mp.dps = 15 + +def test_atan(): + mp.dps = 15 + assert atan(-2.3).ae(math.atan(-2.3)) + assert atan(1e-50) == 1e-50 + assert atan(1e50).ae(pi/2) + assert atan(-1e-50) == -1e-50 + assert atan(-1e50).ae(-pi/2) + assert atan(10**1000).ae(pi/2) + for dps in [25, 70, 100, 300, 1000]: + mp.dps = dps + assert (4*atan(1)).ae(pi) + mp.dps = 15 + pi2 = pi/2 + assert atan(mpc(inf,-1)).ae(pi2) + assert atan(mpc(inf,0)).ae(pi2) + assert atan(mpc(inf,1)).ae(pi2) + assert atan(mpc(1,inf)).ae(pi2) + assert atan(mpc(0,inf)).ae(pi2) + assert atan(mpc(-1,inf)).ae(-pi2) + assert atan(mpc(-inf,1)).ae(-pi2) + assert atan(mpc(-inf,0)).ae(-pi2) + assert atan(mpc(-inf,-1)).ae(-pi2) + assert atan(mpc(-1,-inf)).ae(-pi2) + assert atan(mpc(0,-inf)).ae(-pi2) + assert atan(mpc(1,-inf)).ae(pi2) + +def test_atan2(): + mp.dps = 15 + assert atan2(1,1).ae(pi/4) + assert atan2(1,-1).ae(3*pi/4) + assert atan2(-1,-1).ae(-3*pi/4) + assert atan2(-1,1).ae(-pi/4) + assert atan2(-1,0).ae(-pi/2) + assert atan2(1,0).ae(pi/2) + assert atan2(0,0) == 0 + assert atan2(inf,0).ae(pi/2) + assert atan2(-inf,0).ae(-pi/2) + assert isnan(atan2(inf,inf)) + assert isnan(atan2(-inf,inf)) + assert isnan(atan2(inf,-inf)) + assert isnan(atan2(3,nan)) + assert isnan(atan2(nan,3)) + assert isnan(atan2(0,nan)) + assert isnan(atan2(nan,0)) + assert atan2(0,inf) == 0 + assert atan2(0,-inf).ae(pi) + assert atan2(10,inf) == 0 + assert atan2(-10,inf) == 0 + assert atan2(-10,-inf).ae(-pi) + assert atan2(10,-inf).ae(pi) + assert atan2(inf,10).ae(pi/2) + assert atan2(inf,-10).ae(pi/2) + assert atan2(-inf,10).ae(-pi/2) + assert atan2(-inf,-10).ae(-pi/2) + +def test_areal_inverses(): + assert asin(mpf(0)) == 0 + assert asinh(mpf(0)) == 0 + assert acosh(mpf(1)) == 0 + assert isinstance(asin(mpf(0.5)), mpf) + assert isinstance(asin(mpf(2.0)), mpc) + assert isinstance(acos(mpf(0.5)), mpf) + assert isinstance(acos(mpf(2.0)), mpc) + assert isinstance(atanh(mpf(0.1)), mpf) + assert isinstance(atanh(mpf(1.1)), mpc) + + random.seed(1) + for i in range(50): + x = random.uniform(0, 1) + assert asin(mpf(x)).ae(math.asin(x)) + assert acos(mpf(x)).ae(math.acos(x)) + + x = random.uniform(-10, 10) + assert asinh(mpf(x)).ae(cmath.asinh(x).real) + assert isinstance(asinh(mpf(x)), mpf) + x = random.uniform(1, 10) + assert acosh(mpf(x)).ae(cmath.acosh(x).real) + assert isinstance(acosh(mpf(x)), mpf) + x = random.uniform(-10, 0.999) + assert isinstance(acosh(mpf(x)), mpc) + + x = random.uniform(-1, 1) + assert atanh(mpf(x)).ae(cmath.atanh(x).real) + assert isinstance(atanh(mpf(x)), mpf) + + dps = mp.dps + mp.dps = 300 + assert isinstance(asin(0.5), mpf) + mp.dps = 1000 + assert asin(1).ae(pi/2) + assert asin(-1).ae(-pi/2) + mp.dps = dps + +def test_invhyperb_inaccuracy(): + mp.dps = 15 + assert (asinh(1e-5)*10**5).ae(0.99999999998333333) + assert (asinh(1e-10)*10**10).ae(1) + assert (asinh(1e-50)*10**50).ae(1) + assert (asinh(-1e-5)*10**5).ae(-0.99999999998333333) + assert (asinh(-1e-10)*10**10).ae(-1) + assert (asinh(-1e-50)*10**50).ae(-1) + assert asinh(10**20).ae(46.744849040440862) + assert asinh(-10**20).ae(-46.744849040440862) + assert (tanh(1e-10)*10**10).ae(1) + assert (tanh(-1e-10)*10**10).ae(-1) + assert (atanh(1e-10)*10**10).ae(1) + assert (atanh(-1e-10)*10**10).ae(-1) + +def test_complex_functions(): + for x in (list(range(10)) + list(range(-10,0))): + for y in (list(range(10)) + list(range(-10,0))): + z = complex(x, y)/4.3 + 0.01j + assert exp(mpc(z)).ae(cmath.exp(z)) + assert log(mpc(z)).ae(cmath.log(z)) + assert cos(mpc(z)).ae(cmath.cos(z)) + assert sin(mpc(z)).ae(cmath.sin(z)) + assert tan(mpc(z)).ae(cmath.tan(z)) + assert sinh(mpc(z)).ae(cmath.sinh(z)) + assert cosh(mpc(z)).ae(cmath.cosh(z)) + assert tanh(mpc(z)).ae(cmath.tanh(z)) + +def test_complex_inverse_functions(): + mp.dps = 15 + iv.dps = 15 + for (z1, z2) in random_complexes(30): + # apparently cmath uses a different branch, so we + # can't use it for comparison + assert sinh(asinh(z1)).ae(z1) + # + assert acosh(z1).ae(cmath.acosh(z1)) + assert atanh(z1).ae(cmath.atanh(z1)) + assert atan(z1).ae(cmath.atan(z1)) + # the reason we set a big eps here is that the cmath + # functions are inaccurate + assert asin(z1).ae(cmath.asin(z1), rel_eps=1e-12) + assert acos(z1).ae(cmath.acos(z1), rel_eps=1e-12) + one = mpf(1) + for i in range(-9, 10, 3): + for k in range(-9, 10, 3): + a = 0.9*j*10**k + 0.8*one*10**i + b = cos(acos(a)) + assert b.ae(a) + b = sin(asin(a)) + assert b.ae(a) + one = mpf(1) + err = 2*10**-15 + for i in range(-9, 9, 3): + for k in range(-9, 9, 3): + a = -0.9*10**k + j*0.8*one*10**i + b = cosh(acosh(a)) + assert b.ae(a, err) + b = sinh(asinh(a)) + assert b.ae(a, err) + +def test_reciprocal_functions(): + assert sec(3).ae(-1.01010866590799375) + assert csc(3).ae(7.08616739573718592) + assert cot(3).ae(-7.01525255143453347) + assert sech(3).ae(0.0993279274194332078) + assert csch(3).ae(0.0998215696688227329) + assert coth(3).ae(1.00496982331368917) + assert asec(3).ae(1.23095941734077468) + assert acsc(3).ae(0.339836909454121937) + assert acot(3).ae(0.321750554396642193) + assert asech(0.5).ae(1.31695789692481671) + assert acsch(3).ae(0.327450150237258443) + assert acoth(3).ae(0.346573590279972655) + assert acot(0).ae(1.5707963267948966192) + assert acoth(0).ae(1.5707963267948966192j) + +def test_ldexp(): + mp.dps = 15 + assert ldexp(mpf(2.5), 0) == 2.5 + assert ldexp(mpf(2.5), -1) == 1.25 + assert ldexp(mpf(2.5), 2) == 10 + assert ldexp(mpf('inf'), 3) == mpf('inf') + +def test_frexp(): + mp.dps = 15 + assert frexp(0) == (0.0, 0) + assert frexp(9) == (0.5625, 4) + assert frexp(1) == (0.5, 1) + assert frexp(0.2) == (0.8, -2) + assert frexp(1000) == (0.9765625, 10) + +def test_aliases(): + assert ln(7) == log(7) + assert log10(3.75) == log(3.75,10) + assert degrees(5.6) == 5.6 / degree + assert radians(5.6) == 5.6 * degree + assert power(-1,0.5) == j + assert fmod(25,7) == 4.0 and isinstance(fmod(25,7), mpf) + +def test_arg_sign(): + assert arg(3) == 0 + assert arg(-3).ae(pi) + assert arg(j).ae(pi/2) + assert arg(-j).ae(-pi/2) + assert arg(0) == 0 + assert isnan(atan2(3,nan)) + assert isnan(atan2(nan,3)) + assert isnan(atan2(0,nan)) + assert isnan(atan2(nan,0)) + assert isnan(atan2(nan,nan)) + assert arg(inf) == 0 + assert arg(-inf).ae(pi) + assert isnan(arg(nan)) + #assert arg(inf*j).ae(pi/2) + assert sign(0) == 0 + assert sign(3) == 1 + assert sign(-3) == -1 + assert sign(inf) == 1 + assert sign(-inf) == -1 + assert isnan(sign(nan)) + assert sign(j) == j + assert sign(-3*j) == -j + assert sign(1+j).ae((1+j)/sqrt(2)) + +def test_misc_bugs(): + # test that this doesn't raise an exception + mp.dps = 1000 + log(1302) + mp.dps = 15 + +def test_arange(): + assert arange(10) == [mpf('0.0'), mpf('1.0'), mpf('2.0'), mpf('3.0'), + mpf('4.0'), mpf('5.0'), mpf('6.0'), mpf('7.0'), + mpf('8.0'), mpf('9.0')] + assert arange(-5, 5) == [mpf('-5.0'), mpf('-4.0'), mpf('-3.0'), + mpf('-2.0'), mpf('-1.0'), mpf('0.0'), + mpf('1.0'), mpf('2.0'), mpf('3.0'), mpf('4.0')] + assert arange(0, 1, 0.1) == [mpf('0.0'), mpf('0.10000000000000001'), + mpf('0.20000000000000001'), + mpf('0.30000000000000004'), + mpf('0.40000000000000002'), + mpf('0.5'), mpf('0.60000000000000009'), + mpf('0.70000000000000007'), + mpf('0.80000000000000004'), + mpf('0.90000000000000002')] + assert arange(17, -9, -3) == [mpf('17.0'), mpf('14.0'), mpf('11.0'), + mpf('8.0'), mpf('5.0'), mpf('2.0'), + mpf('-1.0'), mpf('-4.0'), mpf('-7.0')] + assert arange(0.2, 0.1, -0.1) == [mpf('0.20000000000000001')] + assert arange(0) == [] + assert arange(1000, -1) == [] + assert arange(-1.23, 3.21, -0.0000001) == [] + +def test_linspace(): + assert linspace(2, 9, 7) == [mpf('2.0'), mpf('3.166666666666667'), + mpf('4.3333333333333339'), mpf('5.5'), mpf('6.666666666666667'), + mpf('7.8333333333333339'), mpf('9.0')] + assert linspace(2, 9, 7, endpoint=0) == [mpf('2.0'), mpf('3.0'), mpf('4.0'), + mpf('5.0'), mpf('6.0'), mpf('7.0'), mpf('8.0')] + assert linspace(2, 7, 1) == [mpf(2)] + +def test_float_cbrt(): + mp.dps = 30 + for a in arange(0,10,0.1): + assert cbrt(a*a*a).ae(a, eps) + assert cbrt(-1).ae(0.5 + j*sqrt(3)/2) + one_third = mpf(1)/3 + for a in arange(0,10,2.7) + [0.1 + 10**5]: + a = mpc(a + 1.1j) + r1 = cbrt(a) + mp.dps += 10 + r2 = pow(a, one_third) + mp.dps -= 10 + assert r1.ae(r2, eps) + mp.dps = 100 + for n in range(100, 301, 100): + w = 10**n + j*10**-3 + z = w*w*w + r = cbrt(z) + assert mpc_ae(r, w, eps) + mp.dps = 15 + +def test_root(): + mp.dps = 30 + random.seed(1) + a = random.randint(0, 10000) + p = a*a*a + r = nthroot(mpf(p), 3) + assert r == a + for n in range(4, 10): + p = p*a + assert nthroot(mpf(p), n) == a + mp.dps = 40 + for n in range(10, 5000, 100): + for a in [random.random()*10000, random.random()*10**100]: + r = nthroot(a, n) + r1 = pow(a, mpf(1)/n) + assert r.ae(r1) + r = nthroot(a, -n) + r1 = pow(a, -mpf(1)/n) + assert r.ae(r1) + # XXX: this is broken right now + # tests for nthroot rounding + for rnd in ['nearest', 'up', 'down']: + mp.rounding = rnd + for n in [-5, -3, 3, 5]: + prec = 50 + for i in range(10): + mp.prec = prec + a = rand() + mp.prec = 2*prec + b = a**n + mp.prec = prec + r = nthroot(b, n) + assert r == a + mp.dps = 30 + for n in range(3, 21): + a = (random.random() + j*random.random()) + assert nthroot(a, n).ae(pow(a, mpf(1)/n)) + assert mpc_ae(nthroot(a, n), pow(a, mpf(1)/n)) + a = (random.random()*10**100 + j*random.random()) + r = nthroot(a, n) + mp.dps += 4 + r1 = pow(a, mpf(1)/n) + mp.dps -= 4 + assert r.ae(r1) + assert mpc_ae(r, r1, eps) + r = nthroot(a, -n) + mp.dps += 4 + r1 = pow(a, -mpf(1)/n) + mp.dps -= 4 + assert r.ae(r1) + assert mpc_ae(r, r1, eps) + mp.dps = 15 + assert nthroot(4, 1) == 4 + assert nthroot(4, 0) == 1 + assert nthroot(4, -1) == 0.25 + assert nthroot(inf, 1) == inf + assert nthroot(inf, 2) == inf + assert nthroot(inf, 3) == inf + assert nthroot(inf, -1) == 0 + assert nthroot(inf, -2) == 0 + assert nthroot(inf, -3) == 0 + assert nthroot(j, 1) == j + assert nthroot(j, 0) == 1 + assert nthroot(j, -1) == -j + assert isnan(nthroot(nan, 1)) + assert isnan(nthroot(nan, 0)) + assert isnan(nthroot(nan, -1)) + assert isnan(nthroot(inf, 0)) + assert root(2,3) == nthroot(2,3) + assert root(16,4,0) == 2 + assert root(16,4,1) == 2j + assert root(16,4,2) == -2 + assert root(16,4,3) == -2j + assert root(16,4,4) == 2 + assert root(-125,3,1) == -5 + +def test_issue_136(): + for dps in [20, 80]: + mp.dps = dps + r = nthroot(mpf('-1e-20'), 4) + assert r.ae(mpf(10)**(-5) * (1 + j) * mpf(2)**(-0.5)) + mp.dps = 80 + assert nthroot('-1e-3', 4).ae(mpf(10)**(-3./4) * (1 + j)/sqrt(2)) + assert nthroot('-1e-6', 4).ae((1 + j)/(10 * sqrt(20))) + # Check that this doesn't take eternity to compute + mp.dps = 20 + assert nthroot('-1e100000000', 4).ae((1+j)*mpf('1e25000000')/sqrt(2)) + mp.dps = 15 + +def test_mpcfun_real_imag(): + mp.dps = 15 + x = mpf(0.3) + y = mpf(0.4) + assert exp(mpc(x,0)) == exp(x) + assert exp(mpc(0,y)) == mpc(cos(y),sin(y)) + assert cos(mpc(x,0)) == cos(x) + assert sin(mpc(x,0)) == sin(x) + assert cos(mpc(0,y)) == cosh(y) + assert sin(mpc(0,y)) == mpc(0,sinh(y)) + assert cospi(mpc(x,0)) == cospi(x) + assert sinpi(mpc(x,0)) == sinpi(x) + assert cospi(mpc(0,y)).ae(cosh(pi*y)) + assert sinpi(mpc(0,y)).ae(mpc(0,sinh(pi*y))) + c, s = cospi_sinpi(mpc(x,0)) + assert c == cospi(x) + assert s == sinpi(x) + c, s = cospi_sinpi(mpc(0,y)) + assert c.ae(cosh(pi*y)) + assert s.ae(mpc(0,sinh(pi*y))) + c, s = cos_sin(mpc(x,0)) + assert c == cos(x) + assert s == sin(x) + c, s = cos_sin(mpc(0,y)) + assert c == cosh(y) + assert s == mpc(0,sinh(y)) + +def test_perturbation_rounding(): + mp.dps = 100 + a = pi/10**50 + b = -pi/10**50 + c = 1 + a + d = 1 + b + mp.dps = 15 + assert exp(a) == 1 + assert exp(a, rounding='c') > 1 + assert exp(b, rounding='c') == 1 + assert exp(a, rounding='f') == 1 + assert exp(b, rounding='f') < 1 + assert cos(a) == 1 + assert cos(a, rounding='c') == 1 + assert cos(b, rounding='c') == 1 + assert cos(a, rounding='f') < 1 + assert cos(b, rounding='f') < 1 + for f in [sin, atan, asinh, tanh]: + assert f(a) == +a + assert f(a, rounding='c') > a + assert f(a, rounding='f') < a + assert f(b) == +b + assert f(b, rounding='c') > b + assert f(b, rounding='f') < b + for f in [asin, tan, sinh, atanh]: + assert f(a) == +a + assert f(b) == +b + assert f(a, rounding='c') > a + assert f(b, rounding='c') > b + assert f(a, rounding='f') < a + assert f(b, rounding='f') < b + assert ln(c) == +a + assert ln(d) == +b + assert ln(c, rounding='c') > a + assert ln(c, rounding='f') < a + assert ln(d, rounding='c') > b + assert ln(d, rounding='f') < b + assert cosh(a) == 1 + assert cosh(b) == 1 + assert cosh(a, rounding='c') > 1 + assert cosh(b, rounding='c') > 1 + assert cosh(a, rounding='f') == 1 + assert cosh(b, rounding='f') == 1 + +def test_integer_parts(): + assert floor(3.2) == 3 + assert ceil(3.2) == 4 + assert floor(3.2+5j) == 3+5j + assert ceil(3.2+5j) == 4+5j + +def test_complex_parts(): + assert fabs('3') == 3 + assert fabs(3+4j) == 5 + assert re(3) == 3 + assert re(1+4j) == 1 + assert im(3) == 0 + assert im(1+4j) == 4 + assert conj(3) == 3 + assert conj(3+4j) == 3-4j + assert mpf(3).conjugate() == 3 + +def test_cospi_sinpi(): + assert sinpi(0) == 0 + assert sinpi(0.5) == 1 + assert sinpi(1) == 0 + assert sinpi(1.5) == -1 + assert sinpi(2) == 0 + assert sinpi(2.5) == 1 + assert sinpi(-0.5) == -1 + assert cospi(0) == 1 + assert cospi(0.5) == 0 + assert cospi(1) == -1 + assert cospi(1.5) == 0 + assert cospi(2) == 1 + assert cospi(2.5) == 0 + assert cospi(-0.5) == 0 + assert cospi(100000000000.25).ae(sqrt(2)/2) + a = cospi(2+3j) + assert a.real.ae(cos((2+3j)*pi).real) + assert a.imag == 0 + b = sinpi(2+3j) + assert b.imag.ae(sin((2+3j)*pi).imag) + assert b.real == 0 + mp.dps = 35 + x1 = mpf(10000) - mpf('1e-15') + x2 = mpf(10000) + mpf('1e-15') + x3 = mpf(10000.5) - mpf('1e-15') + x4 = mpf(10000.5) + mpf('1e-15') + x5 = mpf(10001) - mpf('1e-15') + x6 = mpf(10001) + mpf('1e-15') + x7 = mpf(10001.5) - mpf('1e-15') + x8 = mpf(10001.5) + mpf('1e-15') + mp.dps = 15 + M = 10**15 + assert (sinpi(x1)*M).ae(-pi) + assert (sinpi(x2)*M).ae(pi) + assert (cospi(x3)*M).ae(pi) + assert (cospi(x4)*M).ae(-pi) + assert (sinpi(x5)*M).ae(pi) + assert (sinpi(x6)*M).ae(-pi) + assert (cospi(x7)*M).ae(-pi) + assert (cospi(x8)*M).ae(pi) + assert 0.999 < cospi(x1, rounding='d') < 1 + assert 0.999 < cospi(x2, rounding='d') < 1 + assert 0.999 < sinpi(x3, rounding='d') < 1 + assert 0.999 < sinpi(x4, rounding='d') < 1 + assert -1 < cospi(x5, rounding='d') < -0.999 + assert -1 < cospi(x6, rounding='d') < -0.999 + assert -1 < sinpi(x7, rounding='d') < -0.999 + assert -1 < sinpi(x8, rounding='d') < -0.999 + assert (sinpi(1e-15)*M).ae(pi) + assert (sinpi(-1e-15)*M).ae(-pi) + assert cospi(1e-15) == 1 + assert cospi(1e-15, rounding='d') < 1 + +def test_expj(): + assert expj(0) == 1 + assert expj(1).ae(exp(j)) + assert expj(j).ae(exp(-1)) + assert expj(1+j).ae(exp(j*(1+j))) + assert expjpi(0) == 1 + assert expjpi(1).ae(exp(j*pi)) + assert expjpi(j).ae(exp(-pi)) + assert expjpi(1+j).ae(exp(j*pi*(1+j))) + assert expjpi(-10**15 * j).ae('2.22579818340535731e+1364376353841841') + +def test_sinc(): + assert sinc(0) == sincpi(0) == 1 + assert sinc(inf) == sincpi(inf) == 0 + assert sinc(-inf) == sincpi(-inf) == 0 + assert sinc(2).ae(0.45464871341284084770) + assert sinc(2+3j).ae(0.4463290318402435457-2.7539470277436474940j) + assert sincpi(2) == 0 + assert sincpi(1.5).ae(-0.212206590789193781) + +def test_fibonacci(): + mp.dps = 15 + assert [fibonacci(n) for n in range(-5, 10)] == \ + [5, -3, 2, -1, 1, 0, 1, 1, 2, 3, 5, 8, 13, 21, 34] + assert fib(2.5).ae(1.4893065462657091) + assert fib(3+4j).ae(-5248.51130728372 - 14195.962288353j) + assert fib(1000).ae(4.3466557686937455e+208) + assert str(fib(10**100)) == '6.24499112864607e+2089876402499787337692720892375554168224592399182109535392875613974104853496745963277658556235103534' + mp.dps = 2100 + a = fib(10000) + assert a % 10**10 == 9947366875 + mp.dps = 15 + assert fibonacci(inf) == inf + assert fib(3+0j) == 2 + +def test_call_with_dps(): + mp.dps = 15 + assert abs(exp(1, dps=30)-e(dps=35)) < 1e-29 + +def test_tanh(): + mp.dps = 15 + assert tanh(0) == 0 + assert tanh(inf) == 1 + assert tanh(-inf) == -1 + assert isnan(tanh(nan)) + assert tanh(mpc('inf', '0')) == 1 + +def test_atanh(): + mp.dps = 15 + assert atanh(0) == 0 + assert atanh(0.5).ae(0.54930614433405484570) + assert atanh(-0.5).ae(-0.54930614433405484570) + assert atanh(1) == inf + assert atanh(-1) == -inf + assert isnan(atanh(nan)) + assert isinstance(atanh(1), mpf) + assert isinstance(atanh(-1), mpf) + # Limits at infinity + jpi2 = j*pi/2 + assert atanh(inf).ae(-jpi2) + assert atanh(-inf).ae(jpi2) + assert atanh(mpc(inf,-1)).ae(-jpi2) + assert atanh(mpc(inf,0)).ae(-jpi2) + assert atanh(mpc(inf,1)).ae(jpi2) + assert atanh(mpc(1,inf)).ae(jpi2) + assert atanh(mpc(0,inf)).ae(jpi2) + assert atanh(mpc(-1,inf)).ae(jpi2) + assert atanh(mpc(-inf,1)).ae(jpi2) + assert atanh(mpc(-inf,0)).ae(jpi2) + assert atanh(mpc(-inf,-1)).ae(-jpi2) + assert atanh(mpc(-1,-inf)).ae(-jpi2) + assert atanh(mpc(0,-inf)).ae(-jpi2) + assert atanh(mpc(1,-inf)).ae(-jpi2) + +def test_expm1(): + mp.dps = 15 + assert expm1(0) == 0 + assert expm1(3).ae(exp(3)-1) + assert expm1(inf) == inf + assert expm1(1e-50).ae(1e-50) + assert (expm1(1e-10)*1e10).ae(1.00000000005) + +def test_log1p(): + mp.dps = 15 + assert log1p(0) == 0 + assert log1p(3).ae(log(1+3)) + assert log1p(inf) == inf + assert log1p(1e-50).ae(1e-50) + assert (log1p(1e-10)*1e10).ae(0.99999999995) + +def test_powm1(): + mp.dps = 15 + assert powm1(2,3) == 7 + assert powm1(-1,2) == 0 + assert powm1(-1,0) == 0 + assert powm1(-2,0) == 0 + assert powm1(3+4j,0) == 0 + assert powm1(0,1) == -1 + assert powm1(0,0) == 0 + assert powm1(1,0) == 0 + assert powm1(1,2) == 0 + assert powm1(1,3+4j) == 0 + assert powm1(1,5) == 0 + assert powm1(j,4) == 0 + assert powm1(-j,4) == 0 + assert (powm1(2,1e-100)*1e100).ae(ln2) + assert powm1(2,'1e-100000000000') != 0 + assert (powm1(fadd(1,1e-100,exact=True), 5)*1e100).ae(5) + +def test_unitroots(): + assert unitroots(1) == [1] + assert unitroots(2) == [1, -1] + a, b, c = unitroots(3) + assert a == 1 + assert b.ae(-0.5 + 0.86602540378443864676j) + assert c.ae(-0.5 - 0.86602540378443864676j) + assert unitroots(1, primitive=True) == [1] + assert unitroots(2, primitive=True) == [-1] + assert unitroots(3, primitive=True) == unitroots(3)[1:] + assert unitroots(4, primitive=True) == [j, -j] + assert len(unitroots(17, primitive=True)) == 16 + assert len(unitroots(16, primitive=True)) == 8 + +def test_cyclotomic(): + mp.dps = 15 + assert [cyclotomic(n,1) for n in range(31)] == [1,0,2,3,2,5,1,7,2,3,1,11,1,13,1,1,2,17,1,19,1,1,1,23,1,5,1,3,1,29,1] + assert [cyclotomic(n,-1) for n in range(31)] == [1,-2,0,1,2,1,3,1,2,1,5,1,1,1,7,1,2,1,3,1,1,1,11,1,1,1,13,1,1,1,1] + assert [cyclotomic(n,j) for n in range(21)] == [1,-1+j,1+j,j,0,1,-j,j,2,-j,1,j,3,1,-j,1,2,1,j,j,5] + assert [cyclotomic(n,-j) for n in range(21)] == [1,-1-j,1-j,-j,0,1,j,-j,2,j,1,-j,3,1,j,1,2,1,-j,-j,5] + assert cyclotomic(1624,j) == 1 + assert cyclotomic(33600,j) == 1 + u = sqrt(j, prec=500) + assert cyclotomic(8, u).ae(0) + assert cyclotomic(30, u).ae(5.8284271247461900976) + assert cyclotomic(2040, u).ae(1) + assert cyclotomic(0,2.5) == 1 + assert cyclotomic(1,2.5) == 2.5-1 + assert cyclotomic(2,2.5) == 2.5+1 + assert cyclotomic(3,2.5) == 2.5**2 + 2.5 + 1 + assert cyclotomic(7,2.5) == 406.234375 diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_functions2.py b/MLPY/Lib/site-packages/mpmath/tests/test_functions2.py new file mode 100644 index 0000000000000000000000000000000000000000..2b2d57fcec9be0db4d921b013f24fd6a5e0e9930 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_functions2.py @@ -0,0 +1,2384 @@ +import math +import pytest +from mpmath import * + +def test_bessel(): + mp.dps = 15 + assert j0(1).ae(0.765197686557966551) + assert j0(pi).ae(-0.304242177644093864) + assert j0(1000).ae(0.0247866861524201746) + assert j0(-25).ae(0.0962667832759581162) + assert j1(1).ae(0.440050585744933516) + assert j1(pi).ae(0.284615343179752757) + assert j1(1000).ae(0.00472831190708952392) + assert j1(-25).ae(0.125350249580289905) + assert besselj(5,1).ae(0.000249757730211234431) + assert besselj(5+0j,1).ae(0.000249757730211234431) + assert besselj(5,pi).ae(0.0521411843671184747) + assert besselj(5,1000).ae(0.00502540694523318607) + assert besselj(5,-25).ae(0.0660079953984229934) + assert besselj(-3,2).ae(-0.128943249474402051) + assert besselj(-4,2).ae(0.0339957198075684341) + assert besselj(3,3+2j).ae(0.424718794929639595942 + 0.625665327745785804812j) + assert besselj(0.25,4).ae(-0.374760630804249715) + assert besselj(1+2j,3+4j).ae(0.319247428741872131 - 0.669557748880365678j) + assert (besselj(3, 10**10) * 10**5).ae(0.76765081748139204023) + assert bessely(-0.5, 0) == 0 + assert bessely(0.5, 0) == -inf + assert bessely(1.5, 0) == -inf + assert bessely(0,0) == -inf + assert bessely(-0.4, 0) == -inf + assert bessely(-0.6, 0) == inf + assert bessely(-1, 0) == inf + assert bessely(-1.4, 0) == inf + assert bessely(-1.6, 0) == -inf + assert bessely(-1, 0) == inf + assert bessely(-2, 0) == -inf + assert bessely(-3, 0) == inf + assert bessely(0.5, 0) == -inf + assert bessely(1, 0) == -inf + assert bessely(1.5, 0) == -inf + assert bessely(2, 0) == -inf + assert bessely(2.5, 0) == -inf + assert bessely(3, 0) == -inf + assert bessely(0,0.5).ae(-0.44451873350670655715) + assert bessely(1,0.5).ae(-1.4714723926702430692) + assert bessely(-1,0.5).ae(1.4714723926702430692) + assert bessely(3.5,0.5).ae(-138.86400867242488443) + assert bessely(0,3+4j).ae(4.6047596915010138655-8.8110771408232264208j) + assert bessely(0,j).ae(-0.26803248203398854876+1.26606587775200833560j) + assert (bessely(3, 10**10) * 10**5).ae(0.21755917537013204058) + assert besseli(0,0) == 1 + assert besseli(1,0) == 0 + assert besseli(2,0) == 0 + assert besseli(-1,0) == 0 + assert besseli(-2,0) == 0 + assert besseli(0,0.5).ae(1.0634833707413235193) + assert besseli(1,0.5).ae(0.25789430539089631636) + assert besseli(-1,0.5).ae(0.25789430539089631636) + assert besseli(3.5,0.5).ae(0.00068103597085793815863) + assert besseli(0,3+4j).ae(-3.3924877882755196097-1.3239458916287264815j) + assert besseli(0,j).ae(besselj(0,1)) + assert (besseli(3, 10**10) * mpf(10)**(-4342944813)).ae(4.2996028505491271875) + assert besselk(0,0) == inf + assert besselk(1,0) == inf + assert besselk(2,0) == inf + assert besselk(-1,0) == inf + assert besselk(-2,0) == inf + assert besselk(0,0.5).ae(0.92441907122766586178) + assert besselk(1,0.5).ae(1.6564411200033008937) + assert besselk(-1,0.5).ae(1.6564411200033008937) + assert besselk(3.5,0.5).ae(207.48418747548460607) + assert besselk(0,3+4j).ae(-0.007239051213570155013+0.026510418350267677215j) + assert besselk(0,j).ae(-0.13863371520405399968-1.20196971531720649914j) + assert (besselk(3, 10**10) * mpf(10)**4342944824).ae(1.1628981033356187851) + # test for issue 331, bug reported by Michael Hartmann + for n in range(10,100,10): + mp.dps = n + assert besseli(91.5,24.7708).ae("4.00830632138673963619656140653537080438462342928377020695738635559218797348548092636896796324190271316137982810144874264e-41") + +def test_bessel_zeros(): + mp.dps = 15 + assert besseljzero(0,1).ae(2.40482555769577276869) + assert besseljzero(2,1).ae(5.1356223018406825563) + assert besseljzero(1,50).ae(157.86265540193029781) + assert besseljzero(10,1).ae(14.475500686554541220) + assert besseljzero(0.5,3).ae(9.4247779607693797153) + assert besseljzero(2,1,1).ae(3.0542369282271403228) + assert besselyzero(0,1).ae(0.89357696627916752158) + assert besselyzero(2,1).ae(3.3842417671495934727) + assert besselyzero(1,50).ae(156.29183520147840108) + assert besselyzero(10,1).ae(12.128927704415439387) + assert besselyzero(0.5,3).ae(7.8539816339744830962) + assert besselyzero(2,1,1).ae(5.0025829314460639452) + +def test_hankel(): + mp.dps = 15 + assert hankel1(0,0.5).ae(0.93846980724081290423-0.44451873350670655715j) + assert hankel1(1,0.5).ae(0.2422684576748738864-1.4714723926702430692j) + assert hankel1(-1,0.5).ae(-0.2422684576748738864+1.4714723926702430692j) + assert hankel1(1.5,0.5).ae(0.0917016996256513026-2.5214655504213378514j) + assert hankel1(1.5,3+4j).ae(0.0066806866476728165382-0.0036684231610839127106j) + assert hankel2(0,0.5).ae(0.93846980724081290423+0.44451873350670655715j) + assert hankel2(1,0.5).ae(0.2422684576748738864+1.4714723926702430692j) + assert hankel2(-1,0.5).ae(-0.2422684576748738864-1.4714723926702430692j) + assert hankel2(1.5,0.5).ae(0.0917016996256513026+2.5214655504213378514j) + assert hankel2(1.5,3+4j).ae(14.783528526098567526-7.397390270853446512j) + +def test_struve(): + mp.dps = 15 + assert struveh(2,3).ae(0.74238666967748318564) + assert struveh(-2.5,3).ae(0.41271003220971599344) + assert struvel(2,3).ae(1.7476573277362782744) + assert struvel(-2.5,3).ae(1.5153394466819651377) + +def test_whittaker(): + mp.dps = 15 + assert whitm(2,3,4).ae(49.753745589025246591) + assert whitw(2,3,4).ae(14.111656223052932215) + +def test_kelvin(): + mp.dps = 15 + assert ber(2,3).ae(0.80836846563726819091) + assert ber(3,4).ae(-0.28262680167242600233) + assert ber(-3,2).ae(-0.085611448496796363669) + assert bei(2,3).ae(-0.89102236377977331571) + assert bei(-3,2).ae(-0.14420994155731828415) + assert ker(2,3).ae(0.12839126695733458928) + assert ker(-3,2).ae(-0.29802153400559142783) + assert ker(0.5,3).ae(-0.085662378535217097524) + assert kei(2,3).ae(0.036804426134164634000) + assert kei(-3,2).ae(0.88682069845786731114) + assert kei(0.5,3).ae(0.013633041571314302948) + +def test_hyper_misc(): + mp.dps = 15 + assert hyp0f1(1,0) == 1 + assert hyp1f1(1,2,0) == 1 + assert hyp1f2(1,2,3,0) == 1 + assert hyp2f1(1,2,3,0) == 1 + assert hyp2f2(1,2,3,4,0) == 1 + assert hyp2f3(1,2,3,4,5,0) == 1 + # Degenerate case: 0F0 + assert hyper([],[],0) == 1 + assert hyper([],[],-2).ae(exp(-2)) + # Degenerate case: 1F0 + assert hyper([2],[],1.5) == 4 + # + assert hyp2f1((1,3),(2,3),(5,6),mpf(27)/32).ae(1.6) + assert hyp2f1((1,4),(1,2),(3,4),mpf(80)/81).ae(1.8) + assert hyp2f1((2,3),(1,1),(3,2),(2+j)/3).ae(1.327531603558679093+0.439585080092769253j) + mp.dps = 25 + v = mpc('1.2282306665029814734863026', '-0.1225033830118305184672133') + assert hyper([(3,4),2+j,1],[1,5,j/3],mpf(1)/5+j/8).ae(v) + mp.dps = 15 + +def test_elliptic_integrals(): + mp.dps = 15 + assert ellipk(0).ae(pi/2) + assert ellipk(0.5).ae(gamma(0.25)**2/(4*sqrt(pi))) + assert ellipk(1) == inf + assert ellipk(1+0j) == inf + assert ellipk(-1).ae('1.3110287771460599052') + assert ellipk(-2).ae('1.1714200841467698589') + assert isinstance(ellipk(-2), mpf) + assert isinstance(ellipe(-2), mpf) + assert ellipk(-50).ae('0.47103424540873331679') + mp.dps = 30 + n1 = +fraction(99999,100000) + n2 = +fraction(100001,100000) + mp.dps = 15 + assert ellipk(n1).ae('7.1427724505817781901') + assert ellipk(n2).ae(mpc('7.1427417367963090109', '-1.5707923998261688019')) + assert ellipe(n1).ae('1.0000332138990829170') + v = ellipe(n2) + assert v.real.ae('0.999966786328145474069137') + assert (v.imag*10**6).ae('7.853952181727432') + assert ellipk(2).ae(mpc('1.3110287771460599052', '-1.3110287771460599052')) + assert ellipk(50).ae(mpc('0.22326753950210985451', '-0.47434723226254522087')) + assert ellipk(3+4j).ae(mpc('0.91119556380496500866', '0.63133428324134524388')) + assert ellipk(3-4j).ae(mpc('0.91119556380496500866', '-0.63133428324134524388')) + assert ellipk(-3+4j).ae(mpc('0.95357894880405122483', '0.23093044503746114444')) + assert ellipk(-3-4j).ae(mpc('0.95357894880405122483', '-0.23093044503746114444')) + assert isnan(ellipk(nan)) + assert isnan(ellipe(nan)) + assert ellipk(inf) == 0 + assert isinstance(ellipk(inf), mpc) + assert ellipk(-inf) == 0 + assert ellipk(1+0j) == inf + assert ellipe(0).ae(pi/2) + assert ellipe(0.5).ae(pi**(mpf(3)/2)/gamma(0.25)**2 +gamma(0.25)**2/(8*sqrt(pi))) + assert ellipe(1) == 1 + assert ellipe(1+0j) == 1 + assert ellipe(inf) == mpc(0,inf) + assert ellipe(-inf) == inf + assert ellipe(3+4j).ae(1.4995535209333469543-1.5778790079127582745j) + assert ellipe(3-4j).ae(1.4995535209333469543+1.5778790079127582745j) + assert ellipe(-3+4j).ae(2.5804237855343377803-0.8306096791000413778j) + assert ellipe(-3-4j).ae(2.5804237855343377803+0.8306096791000413778j) + assert ellipe(2).ae(0.59907011736779610372+0.59907011736779610372j) + assert ellipe('1e-1000000000').ae(pi/2) + assert ellipk('1e-1000000000').ae(pi/2) + assert ellipe(-pi).ae(2.4535865983838923) + mp.dps = 50 + assert ellipk(1/pi).ae('1.724756270009501831744438120951614673874904182624739673') + assert ellipe(1/pi).ae('1.437129808135123030101542922290970050337425479058225712') + assert ellipk(-10*pi).ae('0.5519067523886233967683646782286965823151896970015484512') + assert ellipe(-10*pi).ae('5.926192483740483797854383268707108012328213431657645509') + v = ellipk(pi) + assert v.real.ae('0.973089521698042334840454592642137667227167622330325225') + assert v.imag.ae('-1.156151296372835303836814390793087600271609993858798016') + v = ellipe(pi) + assert v.real.ae('0.4632848917264710404078033487934663562998345622611263332') + assert v.imag.ae('1.0637961621753130852473300451583414489944099504180510966') + mp.dps = 15 + +def test_exp_integrals(): + mp.dps = 15 + x = +e + z = e + sqrt(3)*j + assert ei(x).ae(8.21168165538361560) + assert li(x).ae(1.89511781635593676) + assert si(x).ae(1.82104026914756705) + assert ci(x).ae(0.213958001340379779) + assert shi(x).ae(4.11520706247846193) + assert chi(x).ae(4.09647459290515367) + assert fresnels(x).ae(0.437189718149787643) + assert fresnelc(x).ae(0.401777759590243012) + assert airyai(x).ae(0.0108502401568586681) + assert airybi(x).ae(8.98245748585468627) + assert ei(z).ae(3.72597969491314951 + 7.34213212314224421j) + assert li(z).ae(2.28662658112562502 + 1.50427225297269364j) + assert si(z).ae(2.48122029237669054 + 0.12684703275254834j) + assert ci(z).ae(0.169255590269456633 - 0.892020751420780353j) + assert shi(z).ae(1.85810366559344468 + 3.66435842914920263j) + assert chi(z).ae(1.86787602931970484 + 3.67777369399304159j) + assert fresnels(z/3).ae(0.034534397197008182 + 0.754859844188218737j) + assert fresnelc(z/3).ae(1.261581645990027372 + 0.417949198775061893j) + assert airyai(z).ae(-0.0162552579839056062 - 0.0018045715700210556j) + assert airybi(z).ae(-4.98856113282883371 + 2.08558537872180623j) + assert li(0) == 0.0 + assert li(1) == -inf + assert li(inf) == inf + assert isinstance(li(0.7), mpf) + assert si(inf).ae(pi/2) + assert si(-inf).ae(-pi/2) + assert ci(inf) == 0 + assert ci(0) == -inf + assert isinstance(ei(-0.7), mpf) + assert airyai(inf) == 0 + assert airybi(inf) == inf + assert airyai(-inf) == 0 + assert airybi(-inf) == 0 + assert fresnels(inf) == 0.5 + assert fresnelc(inf) == 0.5 + assert fresnels(-inf) == -0.5 + assert fresnelc(-inf) == -0.5 + assert shi(0) == 0 + assert shi(inf) == inf + assert shi(-inf) == -inf + assert chi(0) == -inf + assert chi(inf) == inf + +def test_ei(): + mp.dps = 15 + assert ei(0) == -inf + assert ei(inf) == inf + assert ei(-inf) == -0.0 + assert ei(20+70j).ae(6.1041351911152984397e6 - 2.7324109310519928872e6j) + # tests for the asymptotic expansion + # values checked with Mathematica ExpIntegralEi + mp.dps = 50 + r = ei(20000) + s = '3.8781962825045010930273870085501819470698476975019e+8681' + assert str(r) == s + r = ei(-200) + s = '-6.8852261063076355977108174824557929738368086933303e-90' + assert str(r) == s + r =ei(20000 + 10*j) + sre = '-3.255138234032069402493850638874410725961401274106e+8681' + sim = '-2.1081929993474403520785942429469187647767369645423e+8681' + assert str(r.real) == sre and str(r.imag) == sim + mp.dps = 15 + # More asymptotic expansions + assert chi(-10**6+100j).ae('1.3077239389562548386e+434288 + 7.6808956999707408158e+434287j') + assert shi(-10**6+100j).ae('-1.3077239389562548386e+434288 - 7.6808956999707408158e+434287j') + mp.dps = 15 + assert ei(10j).ae(-0.0454564330044553726+3.2291439210137706686j) + assert ei(100j).ae(-0.0051488251426104921+3.1330217936839529126j) + u = ei(fmul(10**20, j, exact=True)) + assert u.real.ae(-6.4525128526578084421345e-21, abs_eps=0, rel_eps=8*eps) + assert u.imag.ae(pi) + assert ei(-10j).ae(-0.0454564330044553726-3.2291439210137706686j) + assert ei(-100j).ae(-0.0051488251426104921-3.1330217936839529126j) + u = ei(fmul(-10**20, j, exact=True)) + assert u.real.ae(-6.4525128526578084421345e-21, abs_eps=0, rel_eps=8*eps) + assert u.imag.ae(-pi) + assert ei(10+10j).ae(-1576.1504265768517448+436.9192317011328140j) + u = ei(-10+10j) + assert u.real.ae(7.6698978415553488362543e-7, abs_eps=0, rel_eps=8*eps) + assert u.imag.ae(3.141595611735621062025) + +def test_e1(): + mp.dps = 15 + assert e1(0) == inf + assert e1(inf) == 0 + assert e1(-inf) == mpc(-inf, -pi) + assert e1(10j).ae(0.045456433004455372635 + 0.087551267423977430100j) + assert e1(100j).ae(0.0051488251426104921444 - 0.0085708599058403258790j) + assert e1(fmul(10**20, j, exact=True)).ae(6.4525128526578084421e-21 - 7.6397040444172830039e-21j, abs_eps=0, rel_eps=8*eps) + assert e1(-10j).ae(0.045456433004455372635 - 0.087551267423977430100j) + assert e1(-100j).ae(0.0051488251426104921444 + 0.0085708599058403258790j) + assert e1(fmul(-10**20, j, exact=True)).ae(6.4525128526578084421e-21 + 7.6397040444172830039e-21j, abs_eps=0, rel_eps=8*eps) + +def test_expint(): + mp.dps = 15 + assert expint(0,0) == inf + assert expint(0,1).ae(1/e) + assert expint(0,1.5).ae(2/exp(1.5)/3) + assert expint(1,1).ae(-ei(-1)) + assert expint(2,0).ae(1) + assert expint(3,0).ae(1/2.) + assert expint(4,0).ae(1/3.) + assert expint(-2, 0.5).ae(26/sqrt(e)) + assert expint(-1,-1) == 0 + assert expint(-2,-1).ae(-e) + assert expint(5.5, 0).ae(2/9.) + assert expint(2.00000001,0).ae(100000000./100000001) + assert expint(2+3j,4-j).ae(0.0023461179581675065414+0.0020395540604713669262j) + assert expint('1.01', '1e-1000').ae(99.9999999899412802) + assert expint('1.000000000001', 3.5).ae(0.00697013985754701819446) + assert expint(2,3).ae(3*ei(-3)+exp(-3)) + assert (expint(10,20)*10**10).ae(0.694439055541231353) + assert expint(3,inf) == 0 + assert expint(3.2,inf) == 0 + assert expint(3.2+2j,inf) == 0 + assert expint(1,3j).ae(-0.11962978600800032763 + 0.27785620120457163717j) + assert expint(1,3).ae(0.013048381094197037413) + assert expint(1,-3).ae(-ei(3)-pi*j) + #assert expint(3) == expint(1,3) + assert expint(1,-20).ae(-25615652.66405658882 - 3.1415926535897932385j) + assert expint(1000000,0).ae(1./999999) + assert expint(0,2+3j).ae(-0.025019798357114678171 + 0.027980439405104419040j) + assert expint(-1,2+3j).ae(-0.022411973626262070419 + 0.038058922011377716932j) + assert expint(-1.5,0) == inf + +def test_trig_integrals(): + mp.dps = 30 + assert si(mpf(1)/1000000).ae('0.000000999999999999944444444444446111') + assert ci(mpf(1)/1000000).ae('-13.2382948930629912435014366276') + assert si(10**10).ae('1.5707963267075846569685111517747537') + assert ci(10**10).ae('-4.87506025174822653785729773959e-11') + assert si(10**100).ae(pi/2) + assert (ci(10**100)*10**100).ae('-0.372376123661276688262086695553') + assert si(-3) == -si(3) + assert ci(-3).ae(ci(3) + pi*j) + # Test complex structure + mp.dps = 15 + assert mp.ci(50).ae(-0.0056283863241163054402) + assert mp.ci(50+2j).ae(-0.018378282946133067149+0.070352808023688336193j) + assert mp.ci(20j).ae(1.28078263320282943611e7+1.5707963267949j) + assert mp.ci(-2+20j).ae(-4.050116856873293505e6+1.207476188206989909e7j) + assert mp.ci(-50+2j).ae(-0.0183782829461330671+3.0712398455661049023j) + assert mp.ci(-50).ae(-0.0056283863241163054+3.1415926535897932385j) + assert mp.ci(-50-2j).ae(-0.0183782829461330671-3.0712398455661049023j) + assert mp.ci(-2-20j).ae(-4.050116856873293505e6-1.207476188206989909e7j) + assert mp.ci(-20j).ae(1.28078263320282943611e7-1.5707963267949j) + assert mp.ci(50-2j).ae(-0.018378282946133067149-0.070352808023688336193j) + assert mp.si(50).ae(1.5516170724859358947) + assert mp.si(50+2j).ae(1.497884414277228461-0.017515007378437448j) + assert mp.si(20j).ae(1.2807826332028294459e7j) + assert mp.si(-2+20j).ae(-1.20747603112735722103e7-4.050116856873293554e6j) + assert mp.si(-50+2j).ae(-1.497884414277228461-0.017515007378437448j) + assert mp.si(-50).ae(-1.5516170724859358947) + assert mp.si(-50-2j).ae(-1.497884414277228461+0.017515007378437448j) + assert mp.si(-2-20j).ae(-1.20747603112735722103e7+4.050116856873293554e6j) + assert mp.si(-20j).ae(-1.2807826332028294459e7j) + assert mp.si(50-2j).ae(1.497884414277228461+0.017515007378437448j) + assert mp.chi(50j).ae(-0.0056283863241163054+1.5707963267948966192j) + assert mp.chi(-2+50j).ae(-0.0183782829461330671+1.6411491348185849554j) + assert mp.chi(-20).ae(1.28078263320282943611e7+3.1415926535898j) + assert mp.chi(-20-2j).ae(-4.050116856873293505e6+1.20747571696809187053e7j) + assert mp.chi(-2-50j).ae(-0.0183782829461330671-1.6411491348185849554j) + assert mp.chi(-50j).ae(-0.0056283863241163054-1.5707963267948966192j) + assert mp.chi(2-50j).ae(-0.0183782829461330671-1.500443518771208283j) + assert mp.chi(20-2j).ae(-4.050116856873293505e6-1.20747603112735722951e7j) + assert mp.chi(20).ae(1.2807826332028294361e7) + assert mp.chi(2+50j).ae(-0.0183782829461330671+1.500443518771208283j) + assert mp.shi(50j).ae(1.5516170724859358947j) + assert mp.shi(-2+50j).ae(0.017515007378437448+1.497884414277228461j) + assert mp.shi(-20).ae(-1.2807826332028294459e7) + assert mp.shi(-20-2j).ae(4.050116856873293554e6-1.20747603112735722103e7j) + assert mp.shi(-2-50j).ae(0.017515007378437448-1.497884414277228461j) + assert mp.shi(-50j).ae(-1.5516170724859358947j) + assert mp.shi(2-50j).ae(-0.017515007378437448-1.497884414277228461j) + assert mp.shi(20-2j).ae(-4.050116856873293554e6-1.20747603112735722103e7j) + assert mp.shi(20).ae(1.2807826332028294459e7) + assert mp.shi(2+50j).ae(-0.017515007378437448+1.497884414277228461j) + def ae(x,y,tol=1e-12): + return abs(x-y) <= abs(y)*tol + assert fp.ci(fp.inf) == 0 + assert ae(fp.ci(fp.ninf), fp.pi*1j) + assert ae(fp.si(fp.inf), fp.pi/2) + assert ae(fp.si(fp.ninf), -fp.pi/2) + assert fp.si(0) == 0 + assert ae(fp.ci(50), -0.0056283863241163054402) + assert ae(fp.ci(50+2j), -0.018378282946133067149+0.070352808023688336193j) + assert ae(fp.ci(20j), 1.28078263320282943611e7+1.5707963267949j) + assert ae(fp.ci(-2+20j), -4.050116856873293505e6+1.207476188206989909e7j) + assert ae(fp.ci(-50+2j), -0.0183782829461330671+3.0712398455661049023j) + assert ae(fp.ci(-50), -0.0056283863241163054+3.1415926535897932385j) + assert ae(fp.ci(-50-2j), -0.0183782829461330671-3.0712398455661049023j) + assert ae(fp.ci(-2-20j), -4.050116856873293505e6-1.207476188206989909e7j) + assert ae(fp.ci(-20j), 1.28078263320282943611e7-1.5707963267949j) + assert ae(fp.ci(50-2j), -0.018378282946133067149-0.070352808023688336193j) + assert ae(fp.si(50), 1.5516170724859358947) + assert ae(fp.si(50+2j), 1.497884414277228461-0.017515007378437448j) + assert ae(fp.si(20j), 1.2807826332028294459e7j) + assert ae(fp.si(-2+20j), -1.20747603112735722103e7-4.050116856873293554e6j) + assert ae(fp.si(-50+2j), -1.497884414277228461-0.017515007378437448j) + assert ae(fp.si(-50), -1.5516170724859358947) + assert ae(fp.si(-50-2j), -1.497884414277228461+0.017515007378437448j) + assert ae(fp.si(-2-20j), -1.20747603112735722103e7+4.050116856873293554e6j) + assert ae(fp.si(-20j), -1.2807826332028294459e7j) + assert ae(fp.si(50-2j), 1.497884414277228461+0.017515007378437448j) + assert ae(fp.chi(50j), -0.0056283863241163054+1.5707963267948966192j) + assert ae(fp.chi(-2+50j), -0.0183782829461330671+1.6411491348185849554j) + assert ae(fp.chi(-20), 1.28078263320282943611e7+3.1415926535898j) + assert ae(fp.chi(-20-2j), -4.050116856873293505e6+1.20747571696809187053e7j) + assert ae(fp.chi(-2-50j), -0.0183782829461330671-1.6411491348185849554j) + assert ae(fp.chi(-50j), -0.0056283863241163054-1.5707963267948966192j) + assert ae(fp.chi(2-50j), -0.0183782829461330671-1.500443518771208283j) + assert ae(fp.chi(20-2j), -4.050116856873293505e6-1.20747603112735722951e7j) + assert ae(fp.chi(20), 1.2807826332028294361e7) + assert ae(fp.chi(2+50j), -0.0183782829461330671+1.500443518771208283j) + assert ae(fp.shi(50j), 1.5516170724859358947j) + assert ae(fp.shi(-2+50j), 0.017515007378437448+1.497884414277228461j) + assert ae(fp.shi(-20), -1.2807826332028294459e7) + assert ae(fp.shi(-20-2j), 4.050116856873293554e6-1.20747603112735722103e7j) + assert ae(fp.shi(-2-50j), 0.017515007378437448-1.497884414277228461j) + assert ae(fp.shi(-50j), -1.5516170724859358947j) + assert ae(fp.shi(2-50j), -0.017515007378437448-1.497884414277228461j) + assert ae(fp.shi(20-2j), -4.050116856873293554e6-1.20747603112735722103e7j) + assert ae(fp.shi(20), 1.2807826332028294459e7) + assert ae(fp.shi(2+50j), -0.017515007378437448+1.497884414277228461j) + +def test_airy(): + mp.dps = 15 + assert (airyai(10)*10**10).ae(1.1047532552898687) + assert (airybi(10)/10**9).ae(0.45564115354822515) + assert (airyai(1000)*10**9158).ae(9.306933063179556004) + assert (airybi(1000)/10**9154).ae(5.4077118391949465477) + assert airyai(-1000).ae(0.055971895773019918842) + assert airybi(-1000).ae(-0.083264574117080633012) + assert (airyai(100+100j)*10**188).ae(2.9099582462207032076 + 2.353013591706178756j) + assert (airybi(100+100j)/10**185).ae(1.7086751714463652039 - 3.1416590020830804578j) + +def test_hyper_0f1(): + mp.dps = 15 + v = 8.63911136507950465 + assert hyper([],[(1,3)],1.5).ae(v) + assert hyper([],[1/3.],1.5).ae(v) + assert hyp0f1(1/3.,1.5).ae(v) + assert hyp0f1((1,3),1.5).ae(v) + # Asymptotic expansion + assert hyp0f1(3,1e9).ae('4.9679055380347771271e+27455') + assert hyp0f1(3,1e9j).ae('-2.1222788784457702157e+19410 + 5.0840597555401854116e+19410j') + +def test_hyper_1f1(): + mp.dps = 15 + v = 1.2917526488617656673 + assert hyper([(1,2)],[(3,2)],0.7).ae(v) + assert hyper([(1,2)],[(3,2)],0.7+0j).ae(v) + assert hyper([0.5],[(3,2)],0.7).ae(v) + assert hyper([0.5],[1.5],0.7).ae(v) + assert hyper([0.5],[(3,2)],0.7+0j).ae(v) + assert hyper([0.5],[1.5],0.7+0j).ae(v) + assert hyper([(1,2)],[1.5+0j],0.7).ae(v) + assert hyper([0.5+0j],[1.5],0.7).ae(v) + assert hyper([0.5+0j],[1.5+0j],0.7+0j).ae(v) + assert hyp1f1(0.5,1.5,0.7).ae(v) + assert hyp1f1((1,2),1.5,0.7).ae(v) + # Asymptotic expansion + assert hyp1f1(2,3,1e10).ae('2.1555012157015796988e+4342944809') + assert (hyp1f1(2,3,1e10j)*10**10).ae(-0.97501205020039745852 - 1.7462392454512132074j) + # Shouldn't use asymptotic expansion + assert hyp1f1(-2, 1, 10000).ae(49980001) + # Bug + assert hyp1f1(1j,fraction(1,3),0.415-69.739j).ae(25.857588206024346592 + 15.738060264515292063j) + +def test_hyper_2f1(): + mp.dps = 15 + v = 1.0652207633823291032 + assert hyper([(1,2), (3,4)], [2], 0.3).ae(v) + assert hyper([(1,2), 0.75], [2], 0.3).ae(v) + assert hyper([0.5, 0.75], [2.0], 0.3).ae(v) + assert hyper([0.5, 0.75], [2.0], 0.3+0j).ae(v) + assert hyper([0.5+0j, (3,4)], [2.0], 0.3+0j).ae(v) + assert hyper([0.5+0j, (3,4)], [2.0], 0.3).ae(v) + assert hyper([0.5, (3,4)], [2.0+0j], 0.3).ae(v) + assert hyper([0.5+0j, 0.75+0j], [2.0+0j], 0.3+0j).ae(v) + v = 1.09234681096223231717 + 0.18104859169479360380j + assert hyper([(1,2),0.75+j], [2], 0.5).ae(v) + assert hyper([0.5,0.75+j], [2.0], 0.5).ae(v) + assert hyper([0.5,0.75+j], [2.0], 0.5+0j).ae(v) + assert hyper([0.5,0.75+j], [2.0+0j], 0.5+0j).ae(v) + v = 0.9625 - 0.125j + assert hyper([(3,2),-1],[4], 0.1+j/3).ae(v) + assert hyper([1.5,-1.0],[4], 0.1+j/3).ae(v) + assert hyper([1.5,-1.0],[4+0j], 0.1+j/3).ae(v) + assert hyper([1.5+0j,-1.0+0j],[4+0j], 0.1+j/3).ae(v) + v = 1.02111069501693445001 - 0.50402252613466859521j + assert hyper([(2,10),(3,10)],[(4,10)],1.5).ae(v) + assert hyper([0.2,(3,10)],[0.4+0j],1.5).ae(v) + assert hyper([0.2,(3,10)],[0.4+0j],1.5+0j).ae(v) + v = 0.76922501362865848528 + 0.32640579593235886194j + assert hyper([(2,10),(3,10)],[(4,10)],4+2j).ae(v) + assert hyper([0.2,(3,10)],[0.4+0j],4+2j).ae(v) + assert hyper([0.2,(3,10)],[(4,10)],4+2j).ae(v) + +def test_hyper_2f1_hard(): + mp.dps = 15 + # Singular cases + assert hyp2f1(2,-1,-1,3).ae(7) + assert hyp2f1(2,-1,-1,3,eliminate_all=True).ae(0.25) + assert hyp2f1(2,-2,-2,3).ae(34) + assert hyp2f1(2,-2,-2,3,eliminate_all=True).ae(0.25) + assert hyp2f1(2,-2,-3,3) == 14 + assert hyp2f1(2,-3,-2,3) == inf + assert hyp2f1(2,-1.5,-1.5,3) == 0.25 + assert hyp2f1(1,2,3,0) == 1 + assert hyp2f1(0,1,0,0) == 1 + assert hyp2f1(0,0,0,0) == 1 + assert isnan(hyp2f1(1,1,0,0)) + assert hyp2f1(2,-1,-5, 0.25+0.25j).ae(1.1+0.1j) + assert hyp2f1(2,-5,-5, 0.25+0.25j, eliminate=False).ae(163./128 + 125./128*j) + assert hyp2f1(0.7235, -1, -5, 0.3).ae(1.04341) + assert hyp2f1(0.7235, -5, -5, 0.3, eliminate=False).ae(1.2939225017815903812) + assert hyp2f1(-1,-2,4,1) == 1.5 + assert hyp2f1(1,2,-3,1) == inf + assert hyp2f1(-2,-2,1,1) == 6 + assert hyp2f1(1,-2,-4,1).ae(5./3) + assert hyp2f1(0,-6,-4,1) == 1 + assert hyp2f1(0,-3,-4,1) == 1 + assert hyp2f1(0,0,0,1) == 1 + assert hyp2f1(1,0,0,1,eliminate=False) == 1 + assert hyp2f1(1,1,0,1) == inf + assert hyp2f1(1,-6,-4,1) == inf + assert hyp2f1(-7.2,-0.5,-4.5,1) == 0 + assert hyp2f1(-7.2,-1,-2,1).ae(-2.6) + assert hyp2f1(1,-0.5,-4.5, 1) == inf + assert hyp2f1(1,0.5,-4.5, 1) == -inf + # Check evaluation on / close to unit circle + z = exp(j*pi/3) + w = (nthroot(2,3)+1)*exp(j*pi/12)/nthroot(3,4)**3 + assert hyp2f1('1/2','1/6','1/3', z).ae(w) + assert hyp2f1('1/2','1/6','1/3', z.conjugate()).ae(w.conjugate()) + assert hyp2f1(0.25, (1,3), 2, '0.999').ae(1.06826449496030635) + assert hyp2f1(0.25, (1,3), 2, '1.001').ae(1.06867299254830309446-0.00001446586793975874j) + assert hyp2f1(0.25, (1,3), 2, -1).ae(0.96656584492524351673) + assert hyp2f1(0.25, (1,3), 2, j).ae(0.99041766248982072266+0.03777135604180735522j) + assert hyp2f1(2,3,5,'0.99').ae(27.699347904322690602) + assert hyp2f1((3,2),-0.5,3,'0.99').ae(0.68403036843911661388) + assert hyp2f1(2,3,5,1j).ae(0.37290667145974386127+0.59210004902748285917j) + assert fsum([hyp2f1((7,10),(2,3),(-1,2), 0.95*exp(j*k)) for k in range(1,15)]).ae(52.851400204289452922+6.244285013912953225j) + assert fsum([hyp2f1((7,10),(2,3),(-1,2), 1.05*exp(j*k)) for k in range(1,15)]).ae(54.506013786220655330-3.000118813413217097j) + assert fsum([hyp2f1((7,10),(2,3),(-1,2), exp(j*k)) for k in range(1,15)]).ae(55.792077935955314887+1.731986485778500241j) + assert hyp2f1(2,2.5,-3.25,0.999).ae(218373932801217082543180041.33) + # Branches + assert hyp2f1(1,1,2,1.01).ae(4.5595744415723676911-3.1104877758314784539j) + assert hyp2f1(1,1,2,1.01+0.1j).ae(2.4149427480552782484+1.4148224796836938829j) + assert hyp2f1(1,1,2,3+4j).ae(0.14576709331407297807+0.48379185417980360773j) + assert hyp2f1(1,1,2,4).ae(-0.27465307216702742285 - 0.78539816339744830962j) + assert hyp2f1(1,1,2,-4).ae(0.40235947810852509365) + # Other: + # Cancellation with a large parameter involved (bug reported on sage-devel) + assert hyp2f1(112, (51,10), (-9,10), -0.99999).ae(-1.6241361047970862961e-24, abs_eps=0, rel_eps=eps*16) + +def test_hyper_3f2_etc(): + assert hyper([1,2,3],[1.5,8],-1).ae(0.67108992351533333030) + assert hyper([1,2,3,4],[5,6,7], -1).ae(0.90232988035425506008) + assert hyper([1,2,3],[1.25,5], 1).ae(28.924181329701905701) + assert hyper([1,2,3,4],[5,6,7],5).ae(1.5192307344006649499-1.1529845225075537461j) + assert hyper([1,2,3,4,5],[6,7,8,9],-1).ae(0.96288759462882357253) + assert hyper([1,2,3,4,5],[6,7,8,9],1).ae(1.0428697385885855841) + assert hyper([1,2,3,4,5],[6,7,8,9],5).ae(1.33980653631074769423-0.07143405251029226699j) + assert hyper([1,2.79,3.08,4.37],[5.2,6.1,7.3],5).ae(1.0996321464692607231-1.7748052293979985001j) + assert hyper([1,1,1],[1,2],1) == inf + assert hyper([1,1,1],[2,(101,100)],1).ae(100.01621213528313220) + # slow -- covered by doctests + #assert hyper([1,1,1],[2,3],0.9999).ae(1.2897972005319693905) + +def test_hyper_u(): + mp.dps = 15 + assert hyperu(2,-3,0).ae(0.05) + assert hyperu(2,-3.5,0).ae(4./99) + assert hyperu(2,0,0) == 0.5 + assert hyperu(-5,1,0) == -120 + assert hyperu(-5,2,0) == inf + assert hyperu(-5,-2,0) == 0 + assert hyperu(7,7,3).ae(0.00014681269365593503986) #exp(3)*gammainc(-6,3) + assert hyperu(2,-3,4).ae(0.011836478100271995559) + assert hyperu(3,4,5).ae(1./125) + assert hyperu(2,3,0.0625) == 256 + assert hyperu(-1,2,0.25+0.5j) == -1.75+0.5j + assert hyperu(0.5,1.5,7.25).ae(2/sqrt(29)) + assert hyperu(2,6,pi).ae(0.55804439825913399130) + assert (hyperu((3,2),8,100+201j)*10**4).ae(-0.3797318333856738798 - 2.9974928453561707782j) + assert (hyperu((5,2),(-1,2),-5000)*10**10).ae(-5.6681877926881664678j) + # XXX: fails because of undetected cancellation in low level series code + # Alternatively: could use asymptotic series here, if convergence test + # tweaked back to recognize this one + #assert (hyperu((5,2),(-1,2),-500)*10**7).ae(-1.82526906001593252847j) + +def test_hyper_2f0(): + mp.dps = 15 + assert hyper([1,2],[],3) == hyp2f0(1,2,3) + assert hyp2f0(2,3,7).ae(0.0116108068639728714668 - 0.0073727413865865802130j) + assert hyp2f0(2,3,0) == 1 + assert hyp2f0(0,0,0) == 1 + assert hyp2f0(-1,-1,1).ae(2) + assert hyp2f0(-4,1,1.5).ae(62.5) + assert hyp2f0(-4,1,50).ae(147029801) + assert hyp2f0(-4,1,0.0001).ae(0.99960011997600240000) + assert hyp2f0(0.5,0.25,0.001).ae(1.0001251174078538115) + assert hyp2f0(0.5,0.25,3+4j).ae(0.85548875824755163518 + 0.21636041283392292973j) + # Important: cancellation check + assert hyp2f0((1,6),(5,6),-0.02371708245126284498).ae(0.996785723120804309) + # Should be exact; polynomial case + assert hyp2f0(-2,1,0.5+0.5j,zeroprec=200) == 0 + assert hyp2f0(1,-2,0.5+0.5j,zeroprec=200) == 0 + # There used to be a bug in thresholds that made one of the following hang + for d in [15, 50, 80]: + mp.dps = d + assert hyp2f0(1.5, 0.5, 0.009).ae('1.006867007239309717945323585695344927904000945829843527398772456281301440034218290443367270629519483 + 1.238277162240704919639384945859073461954721356062919829456053965502443570466701567100438048602352623e-46j') + +def test_hyper_1f2(): + mp.dps = 15 + assert hyper([1],[2,3],4) == hyp1f2(1,2,3,4) + a1,b1,b2 = (1,10),(2,3),1./16 + assert hyp1f2(a1,b1,b2,10).ae(298.7482725554557568) + assert hyp1f2(a1,b1,b2,100).ae(224128961.48602947604) + assert hyp1f2(a1,b1,b2,1000).ae(1.1669528298622675109e+27) + assert hyp1f2(a1,b1,b2,10000).ae(2.4780514622487212192e+86) + assert hyp1f2(a1,b1,b2,100000).ae(1.3885391458871523997e+274) + assert hyp1f2(a1,b1,b2,1000000).ae('9.8851796978960318255e+867') + assert hyp1f2(a1,b1,b2,10**7).ae('1.1505659189516303646e+2746') + assert hyp1f2(a1,b1,b2,10**8).ae('1.4672005404314334081e+8685') + assert hyp1f2(a1,b1,b2,10**20).ae('3.6888217332150976493e+8685889636') + assert hyp1f2(a1,b1,b2,10*j).ae(-16.163252524618572878 - 44.321567896480184312j) + assert hyp1f2(a1,b1,b2,100*j).ae(61938.155294517848171 + 637349.45215942348739j) + assert hyp1f2(a1,b1,b2,1000*j).ae(8455057657257695958.7 + 6261969266997571510.6j) + assert hyp1f2(a1,b1,b2,10000*j).ae(-8.9771211184008593089e+60 + 4.6550528111731631456e+59j) + assert hyp1f2(a1,b1,b2,100000*j).ae(2.6398091437239324225e+193 + 4.1658080666870618332e+193j) + assert hyp1f2(a1,b1,b2,1000000*j).ae('3.5999042951925965458e+613 + 1.5026014707128947992e+613j') + assert hyp1f2(a1,b1,b2,10**7*j).ae('-8.3208715051623234801e+1939 - 3.6752883490851869429e+1941j') + assert hyp1f2(a1,b1,b2,10**8*j).ae('2.0724195707891484454e+6140 - 1.3276619482724266387e+6141j') + assert hyp1f2(a1,b1,b2,10**20*j).ae('-1.1734497974795488504e+6141851462 + 1.1498106965385471542e+6141851462j') + +def test_hyper_2f3(): + mp.dps = 15 + assert hyper([1,2],[3,4,5],6) == hyp2f3(1,2,3,4,5,6) + a1,a2,b1,b2,b3 = (1,10),(2,3),(3,10), 2, 1./16 + # Check asymptotic expansion + assert hyp2f3(a1,a2,b1,b2,b3,10).ae(128.98207160698659976) + assert hyp2f3(a1,a2,b1,b2,b3,1000).ae(6.6309632883131273141e25) + assert hyp2f3(a1,a2,b1,b2,b3,10000).ae(4.6863639362713340539e84) + assert hyp2f3(a1,a2,b1,b2,b3,100000).ae(8.6632451236103084119e271) + assert hyp2f3(a1,a2,b1,b2,b3,10**6).ae('2.0291718386574980641e865') + assert hyp2f3(a1,a2,b1,b2,b3,10**7).ae('7.7639836665710030977e2742') + assert hyp2f3(a1,a2,b1,b2,b3,10**8).ae('3.2537462584071268759e8681') + assert hyp2f3(a1,a2,b1,b2,b3,10**20).ae('1.2966030542911614163e+8685889627') + assert hyp2f3(a1,a2,b1,b2,b3,10*j).ae(-18.551602185587547854 - 13.348031097874113552j) + assert hyp2f3(a1,a2,b1,b2,b3,100*j).ae(78634.359124504488695 + 74459.535945281973996j) + assert hyp2f3(a1,a2,b1,b2,b3,1000*j).ae(597682550276527901.59 - 65136194809352613.078j) + assert hyp2f3(a1,a2,b1,b2,b3,10000*j).ae(-1.1779696326238582496e+59 + 1.2297607505213133872e+59j) + assert hyp2f3(a1,a2,b1,b2,b3,100000*j).ae(2.9844228969804380301e+191 + 7.5587163231490273296e+190j) + assert hyp2f3(a1,a2,b1,b2,b3,1000000*j).ae('7.4859161049322370311e+610 - 2.8467477015940090189e+610j') + assert hyp2f3(a1,a2,b1,b2,b3,10**7*j).ae('-1.7477645579418800826e+1938 - 1.7606522995808116405e+1938j') + assert hyp2f3(a1,a2,b1,b2,b3,10**8*j).ae('-1.6932731942958401784e+6137 - 2.4521909113114629368e+6137j') + assert hyp2f3(a1,a2,b1,b2,b3,10**20*j).ae('-2.0988815677627225449e+6141851451 + 5.7708223542739208681e+6141851452j') + +def test_hyper_2f2(): + mp.dps = 15 + assert hyper([1,2],[3,4],5) == hyp2f2(1,2,3,4,5) + a1,a2,b1,b2 = (3,10),4,(1,2),1./16 + assert hyp2f2(a1,a2,b1,b2,10).ae(448225936.3377556696) + assert hyp2f2(a1,a2,b1,b2,10000).ae('1.2012553712966636711e+4358') + assert hyp2f2(a1,a2,b1,b2,-20000).ae(-0.04182343755661214626) + assert hyp2f2(a1,a2,b1,b2,10**20).ae('1.1148680024303263661e+43429448190325182840') + +def test_orthpoly(): + mp.dps = 15 + assert jacobi(-4,2,3,0.7).ae(22800./4913) + assert jacobi(3,2,4,5.5) == 4133.125 + assert jacobi(1.5,5/6.,4,0).ae(-1.0851951434075508417) + assert jacobi(-2, 1, 2, 4).ae(-0.16) + assert jacobi(2, -1, 2.5, 4).ae(34.59375) + #assert jacobi(2, -1, 2, 4) == 28.5 + assert legendre(5, 7) == 129367 + assert legendre(0.5,0).ae(0.53935260118837935667) + assert legendre(-1,-1) == 1 + assert legendre(0,-1) == 1 + assert legendre(0, 1) == 1 + assert legendre(1, -1) == -1 + assert legendre(7, 1) == 1 + assert legendre(7, -1) == -1 + assert legendre(8,1.5).ae(15457523./32768) + assert legendre(j,-j).ae(2.4448182735671431011 + 0.6928881737669934843j) + assert chebyu(5,1) == 6 + assert chebyt(3,2) == 26 + assert legendre(3.5,-1) == inf + assert legendre(4.5,-1) == -inf + assert legendre(3.5+1j,-1) == mpc(inf,inf) + assert legendre(4.5+1j,-1) == mpc(-inf,-inf) + assert laguerre(4, -2, 3).ae(-1.125) + assert laguerre(3, 1+j, 0.5).ae(0.2291666666666666667 + 2.5416666666666666667j) + +def test_hermite(): + mp.dps = 15 + assert hermite(-2, 0).ae(0.5) + assert hermite(-1, 0).ae(0.88622692545275801365) + assert hermite(0, 0).ae(1) + assert hermite(1, 0) == 0 + assert hermite(2, 0).ae(-2) + assert hermite(0, 2).ae(1) + assert hermite(1, 2).ae(4) + assert hermite(1, -2).ae(-4) + assert hermite(2, -2).ae(14) + assert hermite(0.5, 0).ae(0.69136733903629335053) + assert hermite(9, 0) == 0 + assert hermite(4,4).ae(3340) + assert hermite(3,4).ae(464) + assert hermite(-4,4).ae(0.00018623860287512396181) + assert hermite(-3,4).ae(0.0016540169879668766270) + assert hermite(9, 2.5j).ae(13638725j) + assert hermite(9, -2.5j).ae(-13638725j) + assert hermite(9, 100).ae(511078883759363024000) + assert hermite(9, -100).ae(-511078883759363024000) + assert hermite(9, 100j).ae(512922083920643024000j) + assert hermite(9, -100j).ae(-512922083920643024000j) + assert hermite(-9.5, 2.5j).ae(-2.9004951258126778174e-6 + 1.7601372934039951100e-6j) + assert hermite(-9.5, -2.5j).ae(-2.9004951258126778174e-6 - 1.7601372934039951100e-6j) + assert hermite(-9.5, 100).ae(1.3776300722767084162e-22, abs_eps=0, rel_eps=eps) + assert hermite(-9.5, -100).ae('1.3106082028470671626e4355') + assert hermite(-9.5, 100j).ae(-9.7900218581864768430e-23 - 9.7900218581864768430e-23j, abs_eps=0, rel_eps=eps) + assert hermite(-9.5, -100j).ae(-9.7900218581864768430e-23 + 9.7900218581864768430e-23j, abs_eps=0, rel_eps=eps) + assert hermite(2+3j, -1-j).ae(851.3677063883687676 - 1496.4373467871007997j) + +def test_gegenbauer(): + mp.dps = 15 + assert gegenbauer(1,2,3).ae(12) + assert gegenbauer(2,3,4).ae(381) + assert gegenbauer(0,0,0) == 0 + assert gegenbauer(2,-1,3) == 0 + assert gegenbauer(-7, 0.5, 3).ae(8989) + assert gegenbauer(1, -0.5, 3).ae(-3) + assert gegenbauer(1, -1.5, 3).ae(-9) + assert gegenbauer(1, -0.5, 3).ae(-3) + assert gegenbauer(-0.5, -0.5, 3).ae(-2.6383553159023906245) + assert gegenbauer(2+3j, 1-j, 3+4j).ae(14.880536623203696780 + 20.022029711598032898j) + #assert gegenbauer(-2, -0.5, 3).ae(-12) + +def test_legenp(): + mp.dps = 15 + assert legenp(2,0,4) == legendre(2,4) + assert legenp(-2, -1, 0.5).ae(0.43301270189221932338) + assert legenp(-2, -1, 0.5, type=3).ae(0.43301270189221932338j) + assert legenp(-2, 1, 0.5).ae(-0.86602540378443864676) + assert legenp(2+j, 3+4j, -j).ae(134742.98773236786148 + 429782.72924463851745j) + assert legenp(2+j, 3+4j, -j, type=3).ae(802.59463394152268507 - 251.62481308942906447j) + assert legenp(2,4,3).ae(0) + assert legenp(2,4,3,type=3).ae(0) + assert legenp(2,1,0.5).ae(-1.2990381056766579701) + assert legenp(2,1,0.5,type=3).ae(1.2990381056766579701j) + assert legenp(3,2,3).ae(-360) + assert legenp(3,3,3).ae(240j*2**0.5) + assert legenp(3,4,3).ae(0) + assert legenp(0,0.5,2).ae(0.52503756790433198939 - 0.52503756790433198939j) + assert legenp(-1,-0.5,2).ae(0.60626116232846498110 + 0.60626116232846498110j) + assert legenp(-2,0.5,2).ae(1.5751127037129959682 - 1.5751127037129959682j) + assert legenp(-2,0.5,-0.5).ae(-0.85738275810499171286) + +def test_legenq(): + mp.dps = 15 + f = legenq + # Evaluation at poles + assert isnan(f(3,2,1)) + assert isnan(f(3,2,-1)) + assert isnan(f(3,2,1,type=3)) + assert isnan(f(3,2,-1,type=3)) + # Evaluation at 0 + assert f(0,1,0,type=2).ae(-1) + assert f(-2,2,0,type=2,zeroprec=200).ae(0) + assert f(1.5,3,0,type=2).ae(-2.2239343475841951023) + assert f(0,1,0,type=3).ae(j) + assert f(-2,2,0,type=3,zeroprec=200).ae(0) + assert f(1.5,3,0,type=3).ae(2.2239343475841951022*(1-1j)) + # Standard case, degree 0 + assert f(0,0,-1.5).ae(-0.8047189562170501873 + 1.5707963267948966192j) + assert f(0,0,-0.5).ae(-0.54930614433405484570) + assert f(0,0,0,zeroprec=200).ae(0) + assert f(0,0,0.5).ae(0.54930614433405484570) + assert f(0,0,1.5).ae(0.8047189562170501873 - 1.5707963267948966192j) + assert f(0,0,-1.5,type=3).ae(-0.80471895621705018730) + assert f(0,0,-0.5,type=3).ae(-0.5493061443340548457 - 1.5707963267948966192j) + assert f(0,0,0,type=3).ae(-1.5707963267948966192j) + assert f(0,0,0.5,type=3).ae(0.5493061443340548457 - 1.5707963267948966192j) + assert f(0,0,1.5,type=3).ae(0.80471895621705018730) + # Standard case, degree 1 + assert f(1,0,-1.5).ae(0.2070784343255752810 - 2.3561944901923449288j) + assert f(1,0,-0.5).ae(-0.72534692783297257715) + assert f(1,0,0).ae(-1) + assert f(1,0,0.5).ae(-0.72534692783297257715) + assert f(1,0,1.5).ae(0.2070784343255752810 - 2.3561944901923449288j) + # Standard case, degree 2 + assert f(2,0,-1.5).ae(-0.0635669991240192885 + 4.5160394395353277803j) + assert f(2,0,-0.5).ae(0.81866326804175685571) + assert f(2,0,0,zeroprec=200).ae(0) + assert f(2,0,0.5).ae(-0.81866326804175685571) + assert f(2,0,1.5).ae(0.0635669991240192885 - 4.5160394395353277803j) + # Misc orders and degrees + assert f(2,3,1.5,type=2).ae(-5.7243340223994616228j) + assert f(2,3,1.5,type=3).ae(-5.7243340223994616228) + assert f(2,3,0.5,type=2).ae(-12.316805742712016310) + assert f(2,3,0.5,type=3).ae(-12.316805742712016310j) + assert f(2,3,-1.5,type=2).ae(-5.7243340223994616228j) + assert f(2,3,-1.5,type=3).ae(5.7243340223994616228) + assert f(2,3,-0.5,type=2).ae(-12.316805742712016310) + assert f(2,3,-0.5,type=3).ae(-12.316805742712016310j) + assert f(2+3j, 3+4j, 0.5, type=3).ae(0.0016119404873235186807 - 0.0005885900510718119836j) + assert f(2+3j, 3+4j, -1.5, type=3).ae(0.008451400254138808670 + 0.020645193304593235298j) + assert f(-2.5,1,-1.5).ae(3.9553395527435335749j) + assert f(-2.5,1,-0.5).ae(1.9290561746445456908) + assert f(-2.5,1,0).ae(1.2708196271909686299) + assert f(-2.5,1,0.5).ae(-0.31584812990742202869) + assert f(-2.5,1,1.5).ae(-3.9553395527435335742 + 0.2993235655044701706j) + assert f(-2.5,1,-1.5,type=3).ae(0.29932356550447017254j) + assert f(-2.5,1,-0.5,type=3).ae(-0.3158481299074220287 - 1.9290561746445456908j) + assert f(-2.5,1,0,type=3).ae(1.2708196271909686292 - 1.2708196271909686299j) + assert f(-2.5,1,0.5,type=3).ae(1.9290561746445456907 + 0.3158481299074220287j) + assert f(-2.5,1,1.5,type=3).ae(-0.29932356550447017254) + +def test_agm(): + mp.dps = 15 + assert agm(0,0) == 0 + assert agm(0,1) == 0 + assert agm(1,1) == 1 + assert agm(7,7) == 7 + assert agm(j,j) == j + assert (1/agm(1,sqrt(2))).ae(0.834626841674073186) + assert agm(1,2).ae(1.4567910310469068692) + assert agm(1,3).ae(1.8636167832448965424) + assert agm(1,j).ae(0.599070117367796104+0.599070117367796104j) + assert agm(2) == agm(1,2) + assert agm(-3,4).ae(0.63468509766550907+1.3443087080896272j) + +def test_gammainc(): + mp.dps = 15 + assert gammainc(2,5).ae(6*exp(-5)) + assert gammainc(2,0,5).ae(1-6*exp(-5)) + assert gammainc(2,3,5).ae(-6*exp(-5)+4*exp(-3)) + assert gammainc(-2.5,-0.5).ae(-0.9453087204829418812-5.3164237738936178621j) + assert gammainc(0,2,4).ae(0.045121158298212213088) + assert gammainc(0,3).ae(0.013048381094197037413) + assert gammainc(0,2+j,1-j).ae(0.00910653685850304839-0.22378752918074432574j) + assert gammainc(0,1-j).ae(0.00028162445198141833+0.17932453503935894015j) + assert gammainc(3,4,5,True).ae(0.11345128607046320253) + assert gammainc(3.5,0,inf).ae(gamma(3.5)) + assert gammainc(-150.5,500).ae('6.9825435345798951153e-627') + assert gammainc(-150.5,800).ae('4.6885137549474089431e-788') + assert gammainc(-3.5, -20.5).ae(0.27008820585226911 - 1310.31447140574997636j) + assert gammainc(-3.5, -200.5).ae(0.27008820585226911 - 5.3264597096208368435e76j) # XXX real part + assert gammainc(0,0,2) == inf + assert gammainc(1,b=1).ae(0.6321205588285576784) + assert gammainc(3,2,2) == 0 + assert gammainc(2,3+j,3-j).ae(-0.28135485191849314194j) + assert gammainc(4+0j,1).ae(5.8860710587430771455) + # GH issue #301 + assert gammainc(-1,-1).ae(-0.8231640121031084799 + 3.1415926535897932385j) + assert gammainc(-2,-1).ae(1.7707229202810768576 - 1.5707963267948966192j) + assert gammainc(-3,-1).ae(-1.4963349162467073643 + 0.5235987755982988731j) + assert gammainc(-4,-1).ae(1.05365418617643814992 - 0.13089969389957471827j) + # Regularized upper gamma + assert isnan(gammainc(0, 0, regularized=True)) + assert gammainc(-1, 0, regularized=True) == inf + assert gammainc(1, 0, regularized=True) == 1 + assert gammainc(0, 5, regularized=True) == 0 + assert gammainc(0, 2+3j, regularized=True) == 0 + assert gammainc(0, 5000, regularized=True) == 0 + assert gammainc(0, 10**30, regularized=True) == 0 + assert gammainc(-1, 5, regularized=True) == 0 + assert gammainc(-1, 5000, regularized=True) == 0 + assert gammainc(-1, 10**30, regularized=True) == 0 + assert gammainc(-1, -5, regularized=True) == 0 + assert gammainc(-1, -5000, regularized=True) == 0 + assert gammainc(-1, -10**30, regularized=True) == 0 + assert gammainc(-1, 3+4j, regularized=True) == 0 + assert gammainc(1, 5, regularized=True).ae(exp(-5)) + assert gammainc(1, 5000, regularized=True).ae(exp(-5000)) + assert gammainc(1, 10**30, regularized=True).ae(exp(-10**30)) + assert gammainc(1, 3+4j, regularized=True).ae(exp(-3-4j)) + assert gammainc(-1000000,2).ae('1.3669297209397347754e-301037', abs_eps=0, rel_eps=8*eps) + assert gammainc(-1000000,2,regularized=True) == 0 + assert gammainc(-1000000,3+4j).ae('-1.322575609404222361e-698979 - 4.9274570591854533273e-698978j', abs_eps=0, rel_eps=8*eps) + assert gammainc(-1000000,3+4j,regularized=True) == 0 + assert gammainc(2+3j, 4+5j, regularized=True).ae(0.085422013530993285774-0.052595379150390078503j) + assert gammainc(1000j, 1000j, regularized=True).ae(0.49702647628921131761 + 0.00297355675013575341j) + # Generalized + assert gammainc(3,4,2) == -gammainc(3,2,4) + assert gammainc(4, 2, 3).ae(1.2593494302978947396) + assert gammainc(4, 2, 3, regularized=True).ae(0.20989157171631578993) + assert gammainc(0, 2, 3).ae(0.035852129613864082155) + assert gammainc(0, 2, 3, regularized=True) == 0 + assert gammainc(-1, 2, 3).ae(0.015219822548487616132) + assert gammainc(-1, 2, 3, regularized=True) == 0 + assert gammainc(0, 2, 3).ae(0.035852129613864082155) + assert gammainc(0, 2, 3, regularized=True) == 0 + # Should use upper gammas + assert gammainc(5, 10000, 12000).ae('1.1359381951461801687e-4327', abs_eps=0, rel_eps=8*eps) + # Should use lower gammas + assert gammainc(10000, 2, 3).ae('8.1244514125995785934e4765') + # GH issue 306 + assert gammainc(3,-1-1j) == 0 + assert gammainc(3,-1+1j) == 0 + assert gammainc(2,-1) == 0 + assert gammainc(2,-1+0j) == 0 + assert gammainc(2+0j,-1) == 0 + +def test_gammainc_expint_n(): + # These tests are intended to check all cases of the low-level code + # for upper gamma and expint with small integer index. + # Need to cover positive/negative arguments; small/large/huge arguments + # for both positive and negative indices, as well as indices 0 and 1 + # which may be special-cased + mp.dps = 15 + assert expint(-3,3.5).ae(0.021456366563296693987) + assert expint(-2,3.5).ae(0.014966633183073309405) + assert expint(-1,3.5).ae(0.011092916359219041088) + assert expint(0,3.5).ae(0.0086278238349481430685) + assert expint(1,3.5).ae(0.0069701398575483929193) + assert expint(2,3.5).ae(0.0058018939208991255223) + assert expint(3,3.5).ae(0.0049453773495857807058) + assert expint(-3,-3.5).ae(-4.6618170604073311319) + assert expint(-2,-3.5).ae(-5.5996974157555515963) + assert expint(-1,-3.5).ae(-6.7582555017739415818) + assert expint(0,-3.5).ae(-9.4615577024835182145) + assert expint(1,-3.5).ae(-13.925353995152335292 - 3.1415926535897932385j) + assert expint(2,-3.5).ae(-15.62328702434085977 - 10.995574287564276335j) + assert expint(3,-3.5).ae(-10.783026313250347722 - 19.242255003237483586j) + assert expint(-3,350).ae(2.8614825451252838069e-155, abs_eps=0, rel_eps=8*eps) + assert expint(-2,350).ae(2.8532837224504675901e-155, abs_eps=0, rel_eps=8*eps) + assert expint(-1,350).ae(2.8451316155828634555e-155, abs_eps=0, rel_eps=8*eps) + assert expint(0,350).ae(2.8370258275042797989e-155, abs_eps=0, rel_eps=8*eps) + assert expint(1,350).ae(2.8289659656701459404e-155, abs_eps=0, rel_eps=8*eps) + assert expint(2,350).ae(2.8209516419468505006e-155, abs_eps=0, rel_eps=8*eps) + assert expint(3,350).ae(2.8129824725501272171e-155, abs_eps=0, rel_eps=8*eps) + assert expint(-3,-350).ae(-2.8528796154044839443e+149) + assert expint(-2,-350).ae(-2.8610072121701264351e+149) + assert expint(-1,-350).ae(-2.8691813842677537647e+149) + assert expint(0,-350).ae(-2.8774025343659421709e+149) + u = expint(1,-350) + assert u.ae(-2.8856710698020863568e+149) + assert u.imag.ae(-3.1415926535897932385) + u = expint(2,-350) + assert u.ae(-2.8939874026504650534e+149) + assert u.imag.ae(-1099.5574287564276335) + u = expint(3,-350) + assert u.ae(-2.9023519497915044349e+149) + assert u.imag.ae(-192422.55003237483586) + assert expint(-3,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps) + assert expint(-2,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps) + assert expint(-1,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps) + assert expint(0,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps) + assert expint(1,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps) + assert expint(2,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps) + assert expint(3,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps) + assert expint(-3,-350000000000000000000000).ae('-3.7805306852415755699e+152003068666138139677871') + assert expint(-2,-350000000000000000000000).ae('-3.7805306852415755699e+152003068666138139677871') + assert expint(-1,-350000000000000000000000).ae('-3.7805306852415755699e+152003068666138139677871') + assert expint(0,-350000000000000000000000).ae('-3.7805306852415755699e+152003068666138139677871') + u = expint(1,-350000000000000000000000) + assert u.ae('-3.7805306852415755699e+152003068666138139677871') + assert u.imag.ae(-3.1415926535897932385) + u = expint(2,-350000000000000000000000) + assert u.imag.ae(-1.0995574287564276335e+24) + assert u.ae('-3.7805306852415755699e+152003068666138139677871') + u = expint(3,-350000000000000000000000) + assert u.imag.ae(-1.9242255003237483586e+47) + assert u.ae('-3.7805306852415755699e+152003068666138139677871') + # Small case; no branch cut + assert gammainc(-3,3.5).ae(0.00010020262545203707109) + assert gammainc(-2,3.5).ae(0.00040370427343557393517) + assert gammainc(-1,3.5).ae(0.0016576839773997501492) + assert gammainc(0,3.5).ae(0.0069701398575483929193) + assert gammainc(1,3.5).ae(0.03019738342231850074) + assert gammainc(2,3.5).ae(0.13588822540043325333) + assert gammainc(3,3.5).ae(0.64169439772426814072) + # Small case; with branch cut + assert gammainc(-3,-3.5).ae(0.03595832954467563286 + 0.52359877559829887308j) + assert gammainc(-2,-3.5).ae(-0.88024704597962022221 - 1.5707963267948966192j) + assert gammainc(-1,-3.5).ae(4.4637962926688170771 + 3.1415926535897932385j) + assert gammainc(0,-3.5).ae(-13.925353995152335292 - 3.1415926535897932385j) + assert gammainc(1,-3.5).ae(33.115451958692313751) + assert gammainc(2,-3.5).ae(-82.788629896730784377) + assert gammainc(3,-3.5).ae(240.08702670051927469) + # Asymptotic case; no branch cut + assert gammainc(-3,350).ae(6.5424095113340358813e-163, abs_eps=0, rel_eps=8*eps) + assert gammainc(-2,350).ae(2.296312222489899769e-160, abs_eps=0, rel_eps=8*eps) + assert gammainc(-1,350).ae(8.059861834133858573e-158, abs_eps=0, rel_eps=8*eps) + assert gammainc(0,350).ae(2.8289659656701459404e-155, abs_eps=0, rel_eps=8*eps) + assert gammainc(1,350).ae(9.9295903962649792963e-153, abs_eps=0, rel_eps=8*eps) + assert gammainc(2,350).ae(3.485286229089007733e-150, abs_eps=0, rel_eps=8*eps) + assert gammainc(3,350).ae(1.2233453960006379793e-147, abs_eps=0, rel_eps=8*eps) + # Asymptotic case; branch cut + u = gammainc(-3,-350) + assert u.ae(6.7889565783842895085e+141) + assert u.imag.ae(0.52359877559829887308) + u = gammainc(-2,-350) + assert u.ae(-2.3692668977889832121e+144) + assert u.imag.ae(-1.5707963267948966192) + u = gammainc(-1,-350) + assert u.ae(8.2685354361441858669e+146) + assert u.imag.ae(3.1415926535897932385) + u = gammainc(0,-350) + assert u.ae(-2.8856710698020863568e+149) + assert u.imag.ae(-3.1415926535897932385) + u = gammainc(1,-350) + assert u.ae(1.0070908870280797598e+152) + assert u.imag == 0 + u = gammainc(2,-350) + assert u.ae(-3.5147471957279983618e+154) + assert u.imag == 0 + u = gammainc(3,-350) + assert u.ae(1.2266568422179417091e+157) + assert u.imag == 0 + # Extreme asymptotic case + assert gammainc(-3,350000000000000000000000).ae('5.0362468738874738859e-152003068666138139677990', abs_eps=0, rel_eps=8*eps) + assert gammainc(-2,350000000000000000000000).ae('1.7626864058606158601e-152003068666138139677966', abs_eps=0, rel_eps=8*eps) + assert gammainc(-1,350000000000000000000000).ae('6.1694024205121555102e-152003068666138139677943', abs_eps=0, rel_eps=8*eps) + assert gammainc(0,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps) + assert gammainc(1,350000000000000000000000).ae('7.5575179651273905e-152003068666138139677896', abs_eps=0, rel_eps=8*eps) + assert gammainc(2,350000000000000000000000).ae('2.645131287794586675e-152003068666138139677872', abs_eps=0, rel_eps=8*eps) + assert gammainc(3,350000000000000000000000).ae('9.2579595072810533625e-152003068666138139677849', abs_eps=0, rel_eps=8*eps) + u = gammainc(-3,-350000000000000000000000) + assert u.ae('8.8175642804468234866e+152003068666138139677800') + assert u.imag.ae(0.52359877559829887308) + u = gammainc(-2,-350000000000000000000000) + assert u.ae('-3.0861474981563882203e+152003068666138139677824') + assert u.imag.ae(-1.5707963267948966192) + u = gammainc(-1,-350000000000000000000000) + assert u.ae('1.0801516243547358771e+152003068666138139677848') + assert u.imag.ae(3.1415926535897932385) + u = gammainc(0,-350000000000000000000000) + assert u.ae('-3.7805306852415755699e+152003068666138139677871') + assert u.imag.ae(-3.1415926535897932385) + assert gammainc(1,-350000000000000000000000).ae('1.3231857398345514495e+152003068666138139677895') + assert gammainc(2,-350000000000000000000000).ae('-4.6311500894209300731e+152003068666138139677918') + assert gammainc(3,-350000000000000000000000).ae('1.6209025312973255256e+152003068666138139677942') + +def test_incomplete_beta(): + mp.dps = 15 + assert betainc(-2,-3,0.5,0.75).ae(63.4305673311255413583969) + assert betainc(4.5,0.5+2j,2.5,6).ae(0.2628801146130621387903065 + 0.5162565234467020592855378j) + assert betainc(4,5,0,6).ae(90747.77142857142857142857) + +def test_erf(): + mp.dps = 15 + assert erf(0) == 0 + assert erf(1).ae(0.84270079294971486934) + assert erf(3+4j).ae(-120.186991395079444098 - 27.750337293623902498j) + assert erf(-4-3j).ae(-0.99991066178539168236 + 0.00004972026054496604j) + assert erf(pi).ae(0.99999112385363235839) + assert erf(1j).ae(1.6504257587975428760j) + assert erf(-1j).ae(-1.6504257587975428760j) + assert isinstance(erf(1), mpf) + assert isinstance(erf(-1), mpf) + assert isinstance(erf(0), mpf) + assert isinstance(erf(0j), mpc) + assert erf(inf) == 1 + assert erf(-inf) == -1 + assert erfi(0) == 0 + assert erfi(1/pi).ae(0.371682698493894314) + assert erfi(inf) == inf + assert erfi(-inf) == -inf + assert erf(1+0j) == erf(1) + assert erfc(1+0j) == erfc(1) + assert erf(0.2+0.5j).ae(1 - erfc(0.2+0.5j)) + assert erfc(0) == 1 + assert erfc(1).ae(1-erf(1)) + assert erfc(-1).ae(1-erf(-1)) + assert erfc(1/pi).ae(1-erf(1/pi)) + assert erfc(-10) == 2 + assert erfc(-1000000) == 2 + assert erfc(-inf) == 2 + assert erfc(inf) == 0 + assert isnan(erfc(nan)) + assert (erfc(10**4)*mpf(10)**43429453).ae('3.63998738656420') + assert erf(8+9j).ae(-1072004.2525062051158 + 364149.91954310255423j) + assert erfc(8+9j).ae(1072005.2525062051158 - 364149.91954310255423j) + assert erfc(-8-9j).ae(-1072003.2525062051158 + 364149.91954310255423j) + mp.dps = 50 + # This one does not use the asymptotic series + assert (erfc(10)*10**45).ae('2.0884875837625447570007862949577886115608181193212') + # This one does + assert (erfc(50)*10**1088).ae('2.0709207788416560484484478751657887929322509209954') + mp.dps = 15 + assert str(erfc(10**50)) == '3.66744826532555e-4342944819032518276511289189166050822943970058036665661144537831658646492088707747292249493384317534' + assert erfinv(0) == 0 + assert erfinv(0.5).ae(0.47693627620446987338) + assert erfinv(-0.5).ae(-0.47693627620446987338) + assert erfinv(1) == inf + assert erfinv(-1) == -inf + assert erf(erfinv(0.95)).ae(0.95) + assert erf(erfinv(0.999999999995)).ae(0.999999999995) + assert erf(erfinv(-0.999999999995)).ae(-0.999999999995) + mp.dps = 50 + assert erf(erfinv('0.99999999999999999999999999999995')).ae('0.99999999999999999999999999999995') + assert erf(erfinv('0.999999999999999999999999999999995')).ae('0.999999999999999999999999999999995') + assert erf(erfinv('-0.999999999999999999999999999999995')).ae('-0.999999999999999999999999999999995') + mp.dps = 15 + # Complex asymptotic expansions + v = erfc(50j) + assert v.real == 1 + assert v.imag.ae('-6.1481820666053078736e+1083') + assert erfc(-100+5j).ae(2) + assert (erfc(100+5j)*10**4335).ae(2.3973567853824133572 - 3.9339259530609420597j) + assert erfc(100+100j).ae(0.00065234366376857698698 - 0.0039357263629214118437j) + +def test_pdf(): + mp.dps = 15 + assert npdf(-inf) == 0 + assert npdf(inf) == 0 + assert npdf(5,0,2).ae(npdf(5+4,4,2)) + assert quadts(lambda x: npdf(x,-0.5,0.8), [-inf, inf]) == 1 + assert ncdf(0) == 0.5 + assert ncdf(3,3) == 0.5 + assert ncdf(-inf) == 0 + assert ncdf(inf) == 1 + assert ncdf(10) == 1 + # Verify that this is computed accurately + assert (ncdf(-10)*10**24).ae(7.619853024160526) + +def test_lambertw(): + mp.dps = 15 + assert lambertw(0) == 0 + assert lambertw(0+0j) == 0 + assert lambertw(inf) == inf + assert isnan(lambertw(nan)) + assert lambertw(inf,1).real == inf + assert lambertw(inf,1).imag.ae(2*pi) + assert lambertw(-inf,1).real == inf + assert lambertw(-inf,1).imag.ae(3*pi) + assert lambertw(0,-1) == -inf + assert lambertw(0,1) == -inf + assert lambertw(0,3) == -inf + assert lambertw(e).ae(1) + assert lambertw(1).ae(0.567143290409783873) + assert lambertw(-pi/2).ae(j*pi/2) + assert lambertw(-log(2)/2).ae(-log(2)) + assert lambertw(0.25).ae(0.203888354702240164) + assert lambertw(-0.25).ae(-0.357402956181388903) + assert lambertw(-1./10000,0).ae(-0.000100010001500266719) + assert lambertw(-0.25,-1).ae(-2.15329236411034965) + assert lambertw(0.25,-1).ae(-3.00899800997004620-4.07652978899159763j) + assert lambertw(-0.25,-1).ae(-2.15329236411034965) + assert lambertw(0.25,1).ae(-3.00899800997004620+4.07652978899159763j) + assert lambertw(-0.25,1).ae(-3.48973228422959210+7.41405453009603664j) + assert lambertw(-4).ae(0.67881197132094523+1.91195078174339937j) + assert lambertw(-4,1).ae(-0.66743107129800988+7.76827456802783084j) + assert lambertw(-4,-1).ae(0.67881197132094523-1.91195078174339937j) + assert lambertw(1000).ae(5.24960285240159623) + assert lambertw(1000,1).ae(4.91492239981054535+5.44652615979447070j) + assert lambertw(1000,-1).ae(4.91492239981054535-5.44652615979447070j) + assert lambertw(1000,5).ae(3.5010625305312892+29.9614548941181328j) + assert lambertw(3+4j).ae(1.281561806123775878+0.533095222020971071j) + assert lambertw(-0.4+0.4j).ae(-0.10396515323290657+0.61899273315171632j) + assert lambertw(3+4j,1).ae(-0.11691092896595324+5.61888039871282334j) + assert lambertw(3+4j,-1).ae(0.25856740686699742-3.85211668616143559j) + assert lambertw(-0.5,-1).ae(-0.794023632344689368-0.770111750510379110j) + assert lambertw(-1./10000,1).ae(-11.82350837248724344+6.80546081842002101j) + assert lambertw(-1./10000,-1).ae(-11.6671145325663544) + assert lambertw(-1./10000,-2).ae(-11.82350837248724344-6.80546081842002101j) + assert lambertw(-1./100000,4).ae(-14.9186890769540539+26.1856750178782046j) + assert lambertw(-1./100000,5).ae(-15.0931437726379218666+32.5525721210262290086j) + assert lambertw((2+j)/10).ae(0.173704503762911669+0.071781336752835511j) + assert lambertw((2+j)/10,1).ae(-3.21746028349820063+4.56175438896292539j) + assert lambertw((2+j)/10,-1).ae(-3.03781405002993088-3.53946629633505737j) + assert lambertw((2+j)/10,4).ae(-4.6878509692773249+23.8313630697683291j) + assert lambertw(-(2+j)/10).ae(-0.226933772515757933-0.164986470020154580j) + assert lambertw(-(2+j)/10,1).ae(-2.43569517046110001+0.76974067544756289j) + assert lambertw(-(2+j)/10,-1).ae(-3.54858738151989450-6.91627921869943589j) + assert lambertw(-(2+j)/10,4).ae(-4.5500846928118151+20.6672982215434637j) + mp.dps = 50 + assert lambertw(pi).ae('1.073658194796149172092178407024821347547745350410314531') + mp.dps = 15 + # Former bug in generated branch + assert lambertw(-0.5+0.002j).ae(-0.78917138132659918344 + 0.76743539379990327749j) + assert lambertw(-0.5-0.002j).ae(-0.78917138132659918344 - 0.76743539379990327749j) + assert lambertw(-0.448+0.4j).ae(-0.11855133765652382241 + 0.66570534313583423116j) + assert lambertw(-0.448-0.4j).ae(-0.11855133765652382241 - 0.66570534313583423116j) + assert lambertw(-0.65475+0.0001j).ae(-0.61053421111385310898+1.0396534993944097723803j) + # Huge branch index + w = lambertw(1,10**20) + assert w.real.ae(-47.889578926290259164) + assert w.imag.ae(6.2831853071795864769e+20) + +def test_lambertw_hard(): + def check(x,y): + y = convert(y) + type_ok = True + if isinstance(y, mpf): + type_ok = isinstance(x, mpf) + real_ok = abs(x.real-y.real) <= abs(y.real)*8*eps + imag_ok = abs(x.imag-y.imag) <= abs(y.imag)*8*eps + #print x, y, abs(x.real-y.real), abs(x.imag-y.imag) + return real_ok and imag_ok + # Evaluation near 0 + mp.dps = 15 + assert check(lambertw(1e-10), 9.999999999000000000e-11) + assert check(lambertw(-1e-10), -1.000000000100000000e-10) + assert check(lambertw(1e-10j), 9.999999999999999999733e-21 + 9.99999999999999999985e-11j) + assert check(lambertw(-1e-10j), 9.999999999999999999733e-21 - 9.99999999999999999985e-11j) + assert check(lambertw(1e-10,1), -26.303186778379041559 + 3.265093911703828397j) + assert check(lambertw(-1e-10,1), -26.326236166739163892 + 6.526183280686333315j) + assert check(lambertw(1e-10j,1), -26.312931726911421551 + 4.896366881798013421j) + assert check(lambertw(-1e-10j,1), -26.297238779529035066 + 1.632807161345576513j) + assert check(lambertw(1e-10,-1), -26.303186778379041559 - 3.265093911703828397j) + assert check(lambertw(-1e-10,-1), -26.295238819246925694) + assert check(lambertw(1e-10j,-1), -26.297238779529035028 - 1.6328071613455765135j) + assert check(lambertw(-1e-10j,-1), -26.312931726911421551 - 4.896366881798013421j) + # Test evaluation very close to the branch point -1/e + # on the -1, 0, and 1 branches + add = lambda x, y: fadd(x,y,exact=True) + sub = lambda x, y: fsub(x,y,exact=True) + addj = lambda x, y: fadd(x,fmul(y,1j,exact=True),exact=True) + subj = lambda x, y: fadd(x,fmul(y,-1j,exact=True),exact=True) + mp.dps = 1500 + a = -1/e + 10*eps + d3 = mpf('1e-3') + d10 = mpf('1e-10') + d20 = mpf('1e-20') + d40 = mpf('1e-40') + d80 = mpf('1e-80') + d300 = mpf('1e-300') + d1000 = mpf('1e-1000') + mp.dps = 15 + # ---- Branch 0 ---- + # -1/e + eps + assert check(lambertw(add(a,d3)), -0.92802015005456704876) + assert check(lambertw(add(a,d10)), -0.99997668374140088071) + assert check(lambertw(add(a,d20)), -0.99999999976683560186) + assert lambertw(add(a,d40)) == -1 + assert lambertw(add(a,d80)) == -1 + assert lambertw(add(a,d300)) == -1 + assert lambertw(add(a,d1000)) == -1 + # -1/e - eps + assert check(lambertw(sub(a,d3)), -0.99819016149860989001+0.07367191188934638577j) + assert check(lambertw(sub(a,d10)), -0.9999999998187812114595992+0.0000233164398140346109194j) + assert check(lambertw(sub(a,d20)), -0.99999999999999999998187+2.331643981597124203344e-10j) + assert check(lambertw(sub(a,d40)), -1.0+2.33164398159712420336e-20j) + assert check(lambertw(sub(a,d80)), -1.0+2.33164398159712420336e-40j) + assert check(lambertw(sub(a,d300)), -1.0+2.33164398159712420336e-150j) + assert check(lambertw(sub(a,d1000)), mpc(-1,'2.33164398159712420336e-500')) + # -1/e + eps*j + assert check(lambertw(addj(a,d3)), -0.94790387486938526634+0.05036819639190132490j) + assert check(lambertw(addj(a,d10)), -0.9999835127872943680999899+0.0000164870314895821225256j) + assert check(lambertw(addj(a,d20)), -0.999999999835127872929987+1.64872127051890935830e-10j) + assert check(lambertw(addj(a,d40)), -0.9999999999999999999835+1.6487212707001281468305e-20j) + assert check(lambertw(addj(a,d80)), -1.0 + 1.64872127070012814684865e-40j) + assert check(lambertw(addj(a,d300)), -1.0 + 1.64872127070012814684865e-150j) + assert check(lambertw(addj(a,d1000)), mpc(-1.0,'1.64872127070012814684865e-500')) + # -1/e - eps*j + assert check(lambertw(subj(a,d3)), -0.94790387486938526634-0.05036819639190132490j) + assert check(lambertw(subj(a,d10)), -0.9999835127872943680999899-0.0000164870314895821225256j) + assert check(lambertw(subj(a,d20)), -0.999999999835127872929987-1.64872127051890935830e-10j) + assert check(lambertw(subj(a,d40)), -0.9999999999999999999835-1.6487212707001281468305e-20j) + assert check(lambertw(subj(a,d80)), -1.0 - 1.64872127070012814684865e-40j) + assert check(lambertw(subj(a,d300)), -1.0 - 1.64872127070012814684865e-150j) + assert check(lambertw(subj(a,d1000)), mpc(-1.0,'-1.64872127070012814684865e-500')) + # ---- Branch 1 ---- + assert check(lambertw(addj(a,d3),1), -3.088501303219933378005990 + 7.458676867597474813950098j) + assert check(lambertw(addj(a,d80),1), -3.088843015613043855957087 + 7.461489285654254556906117j) + assert check(lambertw(addj(a,d300),1), -3.088843015613043855957087 + 7.461489285654254556906117j) + assert check(lambertw(addj(a,d1000),1), -3.088843015613043855957087 + 7.461489285654254556906117j) + assert check(lambertw(subj(a,d3),1), -1.0520914180450129534365906 + 0.0539925638125450525673175j) + assert check(lambertw(subj(a,d10),1), -1.0000164872127056318529390 + 0.000016487393927159250398333077j) + assert check(lambertw(subj(a,d20),1), -1.0000000001648721270700128 + 1.64872127088134693542628e-10j) + assert check(lambertw(subj(a,d40),1), -1.000000000000000000016487 + 1.64872127070012814686677e-20j) + assert check(lambertw(subj(a,d80),1), -1.0 + 1.64872127070012814684865e-40j) + assert check(lambertw(subj(a,d300),1), -1.0 + 1.64872127070012814684865e-150j) + assert check(lambertw(subj(a,d1000),1), mpc(-1.0, '1.64872127070012814684865e-500')) + # ---- Branch -1 ---- + # -1/e + eps + assert check(lambertw(add(a,d3),-1), -1.075608941186624989414945) + assert check(lambertw(add(a,d10),-1), -1.000023316621036696460620) + assert check(lambertw(add(a,d20),-1), -1.000000000233164398177834) + assert lambertw(add(a,d40),-1) == -1 + assert lambertw(add(a,d80),-1) == -1 + assert lambertw(add(a,d300),-1) == -1 + assert lambertw(add(a,d1000),-1) == -1 + # -1/e - eps + assert check(lambertw(sub(a,d3),-1), -0.99819016149860989001-0.07367191188934638577j) + assert check(lambertw(sub(a,d10),-1), -0.9999999998187812114595992-0.0000233164398140346109194j) + assert check(lambertw(sub(a,d20),-1), -0.99999999999999999998187-2.331643981597124203344e-10j) + assert check(lambertw(sub(a,d40),-1), -1.0-2.33164398159712420336e-20j) + assert check(lambertw(sub(a,d80),-1), -1.0-2.33164398159712420336e-40j) + assert check(lambertw(sub(a,d300),-1), -1.0-2.33164398159712420336e-150j) + assert check(lambertw(sub(a,d1000),-1), mpc(-1,'-2.33164398159712420336e-500')) + # -1/e + eps*j + assert check(lambertw(addj(a,d3),-1), -1.0520914180450129534365906 - 0.0539925638125450525673175j) + assert check(lambertw(addj(a,d10),-1), -1.0000164872127056318529390 - 0.0000164873939271592503983j) + assert check(lambertw(addj(a,d20),-1), -1.0000000001648721270700 - 1.64872127088134693542628e-10j) + assert check(lambertw(addj(a,d40),-1), -1.00000000000000000001648 - 1.6487212707001281468667726e-20j) + assert check(lambertw(addj(a,d80),-1), -1.0 - 1.64872127070012814684865e-40j) + assert check(lambertw(addj(a,d300),-1), -1.0 - 1.64872127070012814684865e-150j) + assert check(lambertw(addj(a,d1000),-1), mpc(-1.0,'-1.64872127070012814684865e-500')) + # -1/e - eps*j + assert check(lambertw(subj(a,d3),-1), -3.088501303219933378005990-7.458676867597474813950098j) + assert check(lambertw(subj(a,d10),-1), -3.088843015579260686911033-7.461489285372968780020716j) + assert check(lambertw(subj(a,d20),-1), -3.088843015613043855953708-7.461489285654254556877988j) + assert check(lambertw(subj(a,d40),-1), -3.088843015613043855957087-7.461489285654254556906117j) + assert check(lambertw(subj(a,d80),-1), -3.088843015613043855957087 - 7.461489285654254556906117j) + assert check(lambertw(subj(a,d300),-1), -3.088843015613043855957087 - 7.461489285654254556906117j) + assert check(lambertw(subj(a,d1000),-1), -3.088843015613043855957087 - 7.461489285654254556906117j) + # One more case, testing higher precision + mp.dps = 500 + x = -1/e + mpf('1e-13') + ans = "-0.99999926266961377166355784455394913638782494543377383"\ + "744978844374498153493943725364881490261187530235150668593869563"\ + "168276697689459394902153960200361935311512317183678882" + mp.dps = 15 + assert lambertw(x).ae(ans) + mp.dps = 50 + assert lambertw(x).ae(ans) + mp.dps = 150 + assert lambertw(x).ae(ans) + +def test_meijerg(): + mp.dps = 15 + assert meijerg([[2,3],[1]],[[0.5,2],[3,4]], 2.5).ae(4.2181028074787439386) + assert meijerg([[],[1+j]],[[1],[1]], 3+4j).ae(271.46290321152464592 - 703.03330399954820169j) + assert meijerg([[0.25],[1]],[[0.5],[2]],0) == 0 + assert meijerg([[0],[]],[[0,0,'1/3','2/3'], []], '2/27').ae(2.2019391389653314120) + # Verify 1/z series being used + assert meijerg([[-3],[-0.5]], [[-1],[-2.5]], -0.5).ae(-1.338096165935754898687431) + assert meijerg([[1-(-1)],[1-(-2.5)]], [[1-(-3)],[1-(-0.5)]], -2.0).ae(-1.338096165935754898687431) + assert meijerg([[-3],[-0.5]], [[-1],[-2.5]], -1).ae(-(pi+4)/(4*pi)) + a = 2.5 + b = 1.25 + for z in [mpf(0.25), mpf(2)]: + x1 = hyp1f1(a,b,z) + x2 = gamma(b)/gamma(a)*meijerg([[1-a],[]],[[0],[1-b]],-z) + x3 = gamma(b)/gamma(a)*meijerg([[1-0],[1-(1-b)]],[[1-(1-a)],[]],-1/z) + assert x1.ae(x2) + assert x1.ae(x3) + +def test_appellf1(): + mp.dps = 15 + assert appellf1(2,-2,1,1,2,3).ae(-1.75) + assert appellf1(2,1,-2,1,2,3).ae(-8) + assert appellf1(2,1,-2,1,0.5,0.25).ae(1.5) + assert appellf1(-2,1,3,2,3,3).ae(19) + assert appellf1(1,2,3,4,0.5,0.125).ae( 1.53843285792549786518) + +def test_coulomb(): + # Note: most tests are doctests + # Test for a bug: + mp.dps = 15 + assert coulombg(mpc(-5,0),2,3).ae(20.087729487721430394) + +def test_hyper_param_accuracy(): + mp.dps = 15 + As = [n+1e-10 for n in range(-5,-1)] + Bs = [n+1e-10 for n in range(-12,-5)] + assert hyper(As,Bs,10).ae(-381757055858.652671927) + assert legenp(0.5, 100, 0.25).ae(-2.4124576567211311755e+144) + assert (hyp1f1(1000,1,-100)*10**24).ae(5.2589445437370169113) + assert (hyp2f1(10, -900, 10.5, 0.99)*10**24).ae(1.9185370579660768203) + assert (hyp2f1(1000,1.5,-3.5,-1.5)*10**385).ae(-2.7367529051334000764) + assert hyp2f1(-5, 10, 3, 0.5, zeroprec=500) == 0 + assert (hyp1f1(-10000, 1000, 100)*10**424).ae(-3.1046080515824859974) + assert (hyp2f1(1000,1.5,-3.5,-0.75,maxterms=100000)*10**231).ae(-4.0534790813913998643) + assert legenp(2, 3, 0.25) == 0 + pytest.raises(ValueError, lambda: hypercomb(lambda a: [([],[],[],[],[a],[-a],0.5)], [3])) + assert hypercomb(lambda a: [([],[],[],[],[a],[-a],0.5)], [3], infprec=200) == inf + assert meijerg([[],[]],[[0,0,0,0],[]],0.1).ae(1.5680822343832351418) + assert (besselk(400,400)*10**94).ae(1.4387057277018550583) + mp.dps = 5 + (hyp1f1(-5000.5, 1500, 100)*10**185).ae(8.5185229673381935522) + (hyp1f1(-5000, 1500, 100)*10**185).ae(9.1501213424563944311) + mp.dps = 15 + (hyp1f1(-5000.5, 1500, 100)*10**185).ae(8.5185229673381935522) + (hyp1f1(-5000, 1500, 100)*10**185).ae(9.1501213424563944311) + assert hyp0f1(fadd(-20,'1e-100',exact=True), 0.25).ae(1.85014429040102783e+49) + assert hyp0f1((-20*10**100+1, 10**100), 0.25).ae(1.85014429040102783e+49) + +def test_hypercomb_zero_pow(): + # check that 0^0 = 1 + assert hypercomb(lambda a: (([0],[a],[],[],[],[],0),), [0]) == 1 + assert meijerg([[-1.5],[]],[[0],[-0.75]],0).ae(1.4464090846320771425) + +def test_spherharm(): + mp.dps = 15 + t = 0.5; r = 0.25 + assert spherharm(0,0,t,r).ae(0.28209479177387814347) + assert spherharm(1,-1,t,r).ae(0.16048941205971996369 - 0.04097967481096344271j) + assert spherharm(1,0,t,r).ae(0.42878904414183579379) + assert spherharm(1,1,t,r).ae(-0.16048941205971996369 - 0.04097967481096344271j) + assert spherharm(2,-2,t,r).ae(0.077915886919031181734 - 0.042565643022253962264j) + assert spherharm(2,-1,t,r).ae(0.31493387233497459884 - 0.08041582001959297689j) + assert spherharm(2,0,t,r).ae(0.41330596756220761898) + assert spherharm(2,1,t,r).ae(-0.31493387233497459884 - 0.08041582001959297689j) + assert spherharm(2,2,t,r).ae(0.077915886919031181734 + 0.042565643022253962264j) + assert spherharm(3,-3,t,r).ae(0.033640236589690881646 - 0.031339125318637082197j) + assert spherharm(3,-2,t,r).ae(0.18091018743101461963 - 0.09883168583167010241j) + assert spherharm(3,-1,t,r).ae(0.42796713930907320351 - 0.10927795157064962317j) + assert spherharm(3,0,t,r).ae(0.27861659336351639787) + assert spherharm(3,1,t,r).ae(-0.42796713930907320351 - 0.10927795157064962317j) + assert spherharm(3,2,t,r).ae(0.18091018743101461963 + 0.09883168583167010241j) + assert spherharm(3,3,t,r).ae(-0.033640236589690881646 - 0.031339125318637082197j) + assert spherharm(0,-1,t,r) == 0 + assert spherharm(0,-2,t,r) == 0 + assert spherharm(0,1,t,r) == 0 + assert spherharm(0,2,t,r) == 0 + assert spherharm(1,2,t,r) == 0 + assert spherharm(1,3,t,r) == 0 + assert spherharm(1,-2,t,r) == 0 + assert spherharm(1,-3,t,r) == 0 + assert spherharm(2,3,t,r) == 0 + assert spherharm(2,4,t,r) == 0 + assert spherharm(2,-3,t,r) == 0 + assert spherharm(2,-4,t,r) == 0 + assert spherharm(3,4.5,0.5,0.25).ae(-22.831053442240790148 + 10.910526059510013757j) + assert spherharm(2+3j, 1-j, 1+j, 3+4j).ae(-2.6582752037810116935 - 1.0909214905642160211j) + assert spherharm(-6,2.5,t,r).ae(0.39383644983851448178 + 0.28414687085358299021j) + assert spherharm(-3.5, 3, 0.5, 0.25).ae(0.014516852987544698924 - 0.015582769591477628495j) + assert spherharm(-3, 3, 0.5, 0.25) == 0 + assert spherharm(-6, 3, 0.5, 0.25).ae(-0.16544349818782275459 - 0.15412657723253924562j) + assert spherharm(-6, 1.5, 0.5, 0.25).ae(0.032208193499767402477 + 0.012678000924063664921j) + assert spherharm(3,0,0,1).ae(0.74635266518023078283) + assert spherharm(3,-2,0,1) == 0 + assert spherharm(3,-2,1,1).ae(-0.16270707338254028971 - 0.35552144137546777097j) + +def test_qfunctions(): + mp.dps = 15 + assert qp(2,3,100).ae('2.7291482267247332183e2391') + +def test_issue_239(): + mp.prec = 150 + x = ldexp(2476979795053773,-52) + assert betainc(206, 385, 0, 0.55, 1).ae('0.99999999999999999999996570910644857895771110649954') + mp.dps = 15 + pytest.raises(ValueError, lambda: hyp2f1(-5,5,0.5,0.5)) + +# Extra stress testing for Bessel functions +# Reference zeros generated with the aid of scipy.special +# jn_zero, jnp_zero, yn_zero, ynp_zero + +V = 15 +M = 15 + +jn_small_zeros = \ +[[2.4048255576957728, + 5.5200781102863106, + 8.6537279129110122, + 11.791534439014282, + 14.930917708487786, + 18.071063967910923, + 21.211636629879259, + 24.352471530749303, + 27.493479132040255, + 30.634606468431975, + 33.775820213573569, + 36.917098353664044, + 40.058425764628239, + 43.19979171317673, + 46.341188371661814], + [3.8317059702075123, + 7.0155866698156188, + 10.173468135062722, + 13.323691936314223, + 16.470630050877633, + 19.615858510468242, + 22.760084380592772, + 25.903672087618383, + 29.046828534916855, + 32.189679910974404, + 35.332307550083865, + 38.474766234771615, + 41.617094212814451, + 44.759318997652822, + 47.901460887185447], + [5.1356223018406826, + 8.4172441403998649, + 11.619841172149059, + 14.795951782351261, + 17.959819494987826, + 21.116997053021846, + 24.270112313573103, + 27.420573549984557, + 30.569204495516397, + 33.7165195092227, + 36.86285651128381, + 40.008446733478192, + 43.153453778371463, + 46.297996677236919, + 49.442164110416873], + [6.3801618959239835, + 9.7610231299816697, + 13.015200721698434, + 16.223466160318768, + 19.409415226435012, + 22.582729593104442, + 25.748166699294978, + 28.908350780921758, + 32.064852407097709, + 35.218670738610115, + 38.370472434756944, + 41.520719670406776, + 44.669743116617253, + 47.817785691533302, + 50.965029906205183], + [7.5883424345038044, + 11.064709488501185, + 14.37253667161759, + 17.615966049804833, + 20.826932956962388, + 24.01901952477111, + 27.199087765981251, + 30.371007667117247, + 33.537137711819223, + 36.699001128744649, + 39.857627302180889, + 43.01373772335443, + 46.167853512924375, + 49.320360686390272, + 52.471551398458023], + [8.771483815959954, + 12.338604197466944, + 15.700174079711671, + 18.980133875179921, + 22.217799896561268, + 25.430341154222704, + 28.626618307291138, + 31.811716724047763, + 34.988781294559295, + 38.159868561967132, + 41.326383254047406, + 44.489319123219673, + 47.649399806697054, + 50.80716520300633, + 53.963026558378149], + [9.9361095242176849, + 13.589290170541217, + 17.003819667816014, + 20.320789213566506, + 23.58608443558139, + 26.820151983411405, + 30.033722386570469, + 33.233041762847123, + 36.422019668258457, + 39.603239416075404, + 42.778481613199507, + 45.949015998042603, + 49.11577372476426, + 52.279453903601052, + 55.440592068853149], + [11.086370019245084, + 14.821268727013171, + 18.287582832481726, + 21.641541019848401, + 24.934927887673022, + 28.191188459483199, + 31.42279419226558, + 34.637089352069324, + 37.838717382853611, + 41.030773691585537, + 44.21540850526126, + 47.394165755570512, + 50.568184679795566, + 53.738325371963291, + 56.905249991978781], + [12.225092264004655, + 16.037774190887709, + 19.554536430997055, + 22.94517313187462, + 26.266814641176644, + 29.54565967099855, + 32.795800037341462, + 36.025615063869571, + 39.240447995178135, + 42.443887743273558, + 45.638444182199141, + 48.825930381553857, + 52.007691456686903, + 55.184747939289049, + 58.357889025269694], + [13.354300477435331, + 17.241220382489128, + 20.807047789264107, + 24.233885257750552, + 27.583748963573006, + 30.885378967696675, + 34.154377923855096, + 37.400099977156589, + 40.628553718964528, + 43.843801420337347, + 47.048700737654032, + 50.245326955305383, + 53.435227157042058, + 56.619580266508436, + 59.799301630960228], + [14.475500686554541, + 18.433463666966583, + 22.046985364697802, + 25.509450554182826, + 28.887375063530457, + 32.211856199712731, + 35.499909205373851, + 38.761807017881651, + 42.004190236671805, + 45.231574103535045, + 48.447151387269394, + 51.653251668165858, + 54.851619075963349, + 58.043587928232478, + 61.230197977292681], + [15.589847884455485, + 19.61596690396692, + 23.275853726263409, + 26.773322545509539, + 30.17906117878486, + 33.526364075588624, + 36.833571341894905, + 40.111823270954241, + 43.368360947521711, + 46.608132676274944, + 49.834653510396724, + 53.050498959135054, + 56.257604715114484, + 59.457456908388002, + 62.651217388202912], + [16.698249933848246, + 20.789906360078443, + 24.494885043881354, + 28.026709949973129, + 31.45996003531804, + 34.829986990290238, + 38.156377504681354, + 41.451092307939681, + 44.721943543191147, + 47.974293531269048, + 51.211967004101068, + 54.437776928325074, + 57.653844811906946, + 60.8618046824805, + 64.062937824850136], + [17.801435153282442, + 21.95624406783631, + 25.705103053924724, + 29.270630441874802, + 32.731053310978403, + 36.123657666448762, + 39.469206825243883, + 42.780439265447158, + 46.06571091157561, + 49.330780096443524, + 52.579769064383396, + 55.815719876305778, + 59.040934037249271, + 62.257189393731728, + 65.465883797232125], + [18.899997953174024, + 23.115778347252756, + 26.907368976182104, + 30.505950163896036, + 33.993184984781542, + 37.408185128639695, + 40.772827853501868, + 44.100590565798301, + 47.400347780543231, + 50.678236946479898, + 53.93866620912693, + 57.184898598119301, + 60.419409852130297, + 63.644117508962281, + 66.860533012260103]] + +jnp_small_zeros = \ +[[0.0, + 3.8317059702075123, + 7.0155866698156188, + 10.173468135062722, + 13.323691936314223, + 16.470630050877633, + 19.615858510468242, + 22.760084380592772, + 25.903672087618383, + 29.046828534916855, + 32.189679910974404, + 35.332307550083865, + 38.474766234771615, + 41.617094212814451, + 44.759318997652822], + [1.8411837813406593, + 5.3314427735250326, + 8.5363163663462858, + 11.706004902592064, + 14.863588633909033, + 18.015527862681804, + 21.16436985918879, + 24.311326857210776, + 27.457050571059246, + 30.601922972669094, + 33.746182898667383, + 36.889987409236811, + 40.033444053350675, + 43.176628965448822, + 46.319597561173912], + [3.0542369282271403, + 6.7061331941584591, + 9.9694678230875958, + 13.170370856016123, + 16.347522318321783, + 19.512912782488205, + 22.671581772477426, + 25.826037141785263, + 28.977672772993679, + 32.127327020443474, + 35.275535050674691, + 38.422654817555906, + 41.568934936074314, + 44.714553532819734, + 47.859641607992093], + [4.2011889412105285, + 8.0152365983759522, + 11.345924310743006, + 14.585848286167028, + 17.78874786606647, + 20.9724769365377, + 24.144897432909265, + 27.310057930204349, + 30.470268806290424, + 33.626949182796679, + 36.781020675464386, + 39.933108623659488, + 43.083652662375079, + 46.232971081836478, + 49.381300092370349], + [5.3175531260839944, + 9.2823962852416123, + 12.681908442638891, + 15.964107037731551, + 19.196028800048905, + 22.401032267689004, + 25.589759681386733, + 28.767836217666503, + 31.938539340972783, + 35.103916677346764, + 38.265316987088158, + 41.423666498500732, + 44.579623137359257, + 47.733667523865744, + 50.886159153182682], + [6.4156163757002403, + 10.519860873772308, + 13.9871886301403, + 17.312842487884625, + 20.575514521386888, + 23.803581476593863, + 27.01030789777772, + 30.20284907898166, + 33.385443901010121, + 36.560777686880356, + 39.730640230067416, + 42.896273163494417, + 46.058566273567043, + 49.218174614666636, + 52.375591529563596], + [7.501266144684147, + 11.734935953042708, + 15.268181461097873, + 18.637443009666202, + 21.931715017802236, + 25.183925599499626, + 28.409776362510085, + 31.617875716105035, + 34.81339298429743, + 37.999640897715301, + 41.178849474321413, + 44.352579199070217, + 47.521956905768113, + 50.687817781723741, + 53.85079463676896], + [8.5778364897140741, + 12.932386237089576, + 16.529365884366944, + 19.941853366527342, + 23.268052926457571, + 26.545032061823576, + 29.790748583196614, + 33.015178641375142, + 36.224380548787162, + 39.422274578939259, + 42.611522172286684, + 45.793999658055002, + 48.971070951900596, + 52.143752969301988, + 55.312820330403446], + [9.6474216519972168, + 14.115518907894618, + 17.774012366915256, + 21.229062622853124, + 24.587197486317681, + 27.889269427955092, + 31.155326556188325, + 34.39662855427218, + 37.620078044197086, + 40.830178681822041, + 44.030010337966153, + 47.221758471887113, + 50.407020967034367, + 53.586995435398319, + 56.762598475105272], + [10.711433970699945, + 15.28673766733295, + 19.004593537946053, + 22.501398726777283, + 25.891277276839136, + 29.218563499936081, + 32.505247352375523, + 35.763792928808799, + 39.001902811514218, + 42.224638430753279, + 45.435483097475542, + 48.636922645305525, + 51.830783925834728, + 55.01844255063594, + 58.200955824859509], + [11.770876674955582, + 16.447852748486498, + 20.223031412681701, + 23.760715860327448, + 27.182021527190532, + 30.534504754007074, + 33.841965775135715, + 37.118000423665604, + 40.371068905333891, + 43.606764901379516, + 46.828959446564562, + 50.040428970943456, + 53.243223214220535, + 56.438892058982552, + 59.628631306921512], + [12.826491228033465, + 17.600266557468326, + 21.430854238060294, + 25.008518704644261, + 28.460857279654847, + 31.838424458616998, + 35.166714427392629, + 38.460388720328256, + 41.728625562624312, + 44.977526250903469, + 48.211333836373288, + 51.433105171422278, + 54.645106240447105, + 57.849056857839799, + 61.046288512821078], + [13.878843069697276, + 18.745090916814406, + 22.629300302835503, + 26.246047773946584, + 29.72897816891134, + 33.131449953571661, + 36.480548302231658, + 39.791940718940855, + 43.075486800191012, + 46.337772104541405, + 49.583396417633095, + 52.815686826850452, + 56.037118687012179, + 59.249577075517968, + 62.454525995970462], + [14.928374492964716, + 19.88322436109951, + 23.81938909003628, + 27.474339750968247, + 30.987394331665278, + 34.414545662167183, + 37.784378506209499, + 41.113512376883377, + 44.412454519229281, + 47.688252845993366, + 50.945849245830813, + 54.188831071035124, + 57.419876154678179, + 60.641030026538746, + 63.853885828967512], + [15.975438807484321, + 21.015404934568315, + 25.001971500138194, + 28.694271223110755, + 32.236969407878118, + 35.688544091185301, + 39.078998185245057, + 42.425854432866141, + 45.740236776624833, + 49.029635055514276, + 52.299319390331728, + 55.553127779547459, + 58.793933759028134, + 62.02393848337554, + 65.244860767043859]] + +yn_small_zeros = \ +[[0.89357696627916752, + 3.9576784193148579, + 7.0860510603017727, + 10.222345043496417, + 13.361097473872763, + 16.500922441528091, + 19.64130970088794, + 22.782028047291559, + 25.922957653180923, + 29.064030252728398, + 32.205204116493281, + 35.346452305214321, + 38.487756653081537, + 41.629104466213808, + 44.770486607221993], + [2.197141326031017, + 5.4296810407941351, + 8.5960058683311689, + 11.749154830839881, + 14.897442128336725, + 18.043402276727856, + 21.188068934142213, + 24.331942571356912, + 27.475294980449224, + 30.618286491641115, + 33.761017796109326, + 36.90355531614295, + 40.045944640266876, + 43.188218097393211, + 46.330399250701687], + [3.3842417671495935, + 6.7938075132682675, + 10.023477979360038, + 13.209986710206416, + 16.378966558947457, + 19.539039990286384, + 22.69395593890929, + 25.845613720902269, + 28.995080395650151, + 32.143002257627551, + 35.289793869635804, + 38.435733485446343, + 41.581014867297885, + 44.725777117640461, + 47.870122696676504], + [4.5270246611496439, + 8.0975537628604907, + 11.396466739595867, + 14.623077742393873, + 17.81845523294552, + 20.997284754187761, + 24.166235758581828, + 27.328799850405162, + 30.486989604098659, + 33.642049384702463, + 36.794791029185579, + 39.945767226378749, + 43.095367507846703, + 46.2438744334407, + 49.391498015725107], + [5.6451478942208959, + 9.3616206152445429, + 12.730144474090465, + 15.999627085382479, + 19.22442895931681, + 22.424810599698521, + 25.610267054939328, + 28.785893657666548, + 31.954686680031668, + 35.118529525584828, + 38.278668089521758, + 41.435960629910073, + 44.591018225353424, + 47.744288086361052, + 50.896105199722123], + [6.7471838248710219, + 10.597176726782031, + 14.033804104911233, + 17.347086393228382, + 20.602899017175335, + 23.826536030287532, + 27.030134937138834, + 30.220335654231385, + 33.401105611047908, + 36.574972486670962, + 39.743627733020277, + 42.908248189569535, + 46.069679073215439, + 49.228543693445843, + 52.385312123112282], + [7.8377378223268716, + 11.811037107609447, + 15.313615118517857, + 18.670704965906724, + 21.958290897126571, + 25.206207715021249, + 28.429037095235496, + 31.634879502950644, + 34.828638524084437, + 38.013473399691765, + 41.19151880917741, + 44.364272633271975, + 47.53281875312084, + 50.697961822183806, + 53.860312300118388], + [8.919605734873789, + 13.007711435388313, + 16.573915129085334, + 19.974342312352426, + 23.293972585596648, + 26.5667563757203, + 29.809531451608321, + 33.031769327150685, + 36.239265816598239, + 39.435790312675323, + 42.623910919472727, + 45.805442883111651, + 48.981708325514764, + 52.153694518185572, + 55.322154420959698], + [9.9946283820824834, + 14.190361295800141, + 17.817887841179873, + 21.26093227125945, + 24.612576377421522, + 27.910524883974868, + 31.173701563441602, + 34.412862242025045, + 37.634648706110989, + 40.843415321050884, + 44.04214994542435, + 47.232978012841169, + 50.417456447370186, + 53.596753874948731, + 56.771765754432457], + [11.064090256031013, + 15.361301343575925, + 19.047949646361388, + 22.532765416313869, + 25.91620496332662, + 29.2394205079349, + 32.523270869465881, + 35.779715464475261, + 39.016196664616095, + 42.237627509803703, + 45.4474001519274, + 48.647941127433196, + 51.841036928216499, + 55.028034667184916, + 58.209970905250097], + [12.128927704415439, + 16.522284394784426, + 20.265984501212254, + 23.791669719454272, + 27.206568881574774, + 30.555020011020762, + 33.859683872746356, + 37.133649760307504, + 40.385117593813002, + 43.619533085646856, + 46.840676630553575, + 50.051265851897857, + 53.253310556711732, + 56.448332488918971, + 59.637507005589829], + [13.189846995683845, + 17.674674253171487, + 21.473493977824902, + 25.03913093040942, + 28.485081336558058, + 31.858644293774859, + 35.184165245422787, + 38.475796636190897, + 41.742455848758449, + 44.990096293791186, + 48.222870660068338, + 51.443777308699826, + 54.655042589416311, + 57.858358441436511, + 61.055036135780528], + [14.247395665073945, + 18.819555894710682, + 22.671697117872794, + 26.276375544903892, + 29.752925495549038, + 33.151412708998983, + 36.497763772987645, + 39.807134090704376, + 43.089121522203808, + 46.350163579538652, + 49.594769786270069, + 52.82620892320143, + 56.046916910756961, + 59.258751140598783, + 62.463155567737854], + [15.30200785858925, + 19.957808654258601, + 23.861599172945054, + 27.504429642227545, + 31.011103429019229, + 34.434283425782942, + 37.801385632318459, + 41.128514139788358, + 44.425913324440663, + 47.700482714581842, + 50.957073905278458, + 54.199216028087261, + 57.429547607017405, + 60.65008661807661, + 63.862406280068586], + [16.354034360047551, + 21.090156519983806, + 25.044040298785627, + 28.724161640881914, + 32.260472459522644, + 35.708083982611664, + 39.095820003878235, + 42.440684315990936, + 45.75353669045622, + 49.041718113283529, + 52.310408280968073, + 55.56338698149062, + 58.803488508906895, + 62.032886550960831, + 65.253280088312461]] + +ynp_small_zeros = \ +[[2.197141326031017, + 5.4296810407941351, + 8.5960058683311689, + 11.749154830839881, + 14.897442128336725, + 18.043402276727856, + 21.188068934142213, + 24.331942571356912, + 27.475294980449224, + 30.618286491641115, + 33.761017796109326, + 36.90355531614295, + 40.045944640266876, + 43.188218097393211, + 46.330399250701687], + [3.6830228565851777, + 6.9414999536541757, + 10.123404655436613, + 13.285758156782854, + 16.440058007293282, + 19.590241756629495, + 22.738034717396327, + 25.884314618788867, + 29.029575819372535, + 32.174118233366201, + 35.318134458192094, + 38.461753870997549, + 41.605066618873108, + 44.74813744908079, + 47.891014070791065], + [5.0025829314460639, + 8.3507247014130795, + 11.574195465217647, + 14.760909306207676, + 17.931285939466855, + 21.092894504412739, + 24.249231678519058, + 27.402145837145258, + 30.552708880564553, + 33.70158627151572, + 36.849213419846257, + 39.995887376143356, + 43.141817835750686, + 46.287157097544201, + 49.432018469138281], + [6.2536332084598136, + 9.6987879841487711, + 12.972409052292216, + 16.19044719506921, + 19.38238844973613, + 22.559791857764261, + 25.728213194724094, + 28.890678419054777, + 32.048984005266337, + 35.204266606440635, + 38.357281675961019, + 41.508551443818436, + 44.658448731963676, + 47.807246956681162, + 50.95515126455207], + [7.4649217367571329, + 11.005169149809189, + 14.3317235192331, + 17.58443601710272, + 20.801062338411128, + 23.997004122902644, + 27.179886689853435, + 30.353960608554323, + 33.521797098666792, + 36.685048382072301, + 39.844826969405863, + 43.001910515625288, + 46.15685955107263, + 49.310088614282257, + 52.461911043685864], + [8.6495562436971983, + 12.280868725807848, + 15.660799304540377, + 18.949739756016503, + 22.192841809428241, + 25.409072788867674, + 28.608039283077593, + 31.795195353138159, + 34.973890634255288, + 38.14630522169358, + 41.313923188794905, + 44.477791768537617, + 47.638672065035628, + 50.797131066967842, + 53.953600129601663], + [9.8147970120105779, + 13.532811875789828, + 16.965526446046053, + 20.291285512443867, + 23.56186260680065, + 26.799499736027237, + 30.015665481543419, + 33.216968050039509, + 36.407516858984748, + 39.590015243560459, + 42.766320595957378, + 45.937754257017323, + 49.105283450953203, + 52.269633324547373, + 55.431358715604255], + [10.965152105242974, + 14.765687379508912, + 18.250123150217555, + 21.612750053384621, + 24.911310600813573, + 28.171051927637585, + 31.40518108895689, + 34.621401012564177, + 37.824552065973114, + 41.017847386464902, + 44.203512240871601, + 47.3831408366063, + 50.557907466622796, + 53.728697478957026, + 56.896191727313342], + [12.103641941939539, + 15.982840905145284, + 19.517731005559611, + 22.916962141504605, + 26.243700855690533, + 29.525960140695407, + 32.778568197561124, + 36.010261572392516, + 39.226578757802172, + 42.43122493258747, + 45.626783824134354, + 48.815117837929515, + 51.997606404328863, + 55.175294723956816, + 58.348990221754937], + [13.232403808592215, + 17.186756572616758, + 20.770762917490496, + 24.206152448722253, + 27.561059462697153, + 30.866053571250639, + 34.137476603379774, + 37.385039772270268, + 40.614946085165892, + 43.831373184731238, + 47.037251786726299, + 50.234705848765229, + 53.425316228549359, + 56.610286079882087, + 59.790548623216652], + [14.35301374369987, + 18.379337301642568, + 22.011118775283494, + 25.482116178696707, + 28.865046588695164, + 32.192853922166294, + 35.483296655830277, + 38.747005493021857, + 41.990815194320955, + 45.219355876831731, + 48.435892856078888, + 51.642803925173029, + 54.84186659475857, + 58.034439083840155, + 61.221578745109862], + [15.466672066554263, + 19.562077985759503, + 23.240325531101082, + 26.746322986645901, + 30.157042415639891, + 33.507642948240263, + 36.817212798512775, + 40.097251300178642, + 43.355193847719752, + 46.596103410173672, + 49.823567279972794, + 53.040208868780832, + 56.247996968470062, + 59.448441365714251, + 62.642721301357187], + [16.574317035530872, + 20.73617763753932, + 24.459631728238804, + 27.999993668839644, + 31.438208790267783, + 34.811512070805535, + 38.140243708611251, + 41.436725143893739, + 44.708963264433333, + 47.962435051891027, + 51.201037321915983, + 54.427630745992975, + 57.644369734615238, + 60.852911791989989, + 64.054555435720397], + [17.676697936439624, + 21.9026148697762, + 25.670073356263225, + 29.244155124266438, + 32.709534477396028, + 36.105399554497548, + 39.453272918267025, + 42.766255701958017, + 46.052899215578358, + 49.319076602061401, + 52.568982147952547, + 55.805705507386287, + 59.031580956740466, + 62.248409689597653, + 65.457606670836759], + [18.774423978290318, + 23.06220035979272, + 26.872520985976736, + 30.479680663499762, + 33.971869047372436, + 37.390118854896324, + 40.757072537673599, + 44.086572292170345, + 47.387688809191869, + 50.66667461073936, + 53.928009929563275, + 57.175005343085052, + 60.410169281219877, + 63.635442539153021, + 66.85235358587768]] + +@pytest.mark.slow +def test_bessel_zeros_extra(): + mp.dps = 15 + for v in range(V): + for m in range(1,M+1): + print(v, m, "of", V, M) + # Twice to test cache (if used) + assert besseljzero(v,m).ae(jn_small_zeros[v][m-1]) + assert besseljzero(v,m).ae(jn_small_zeros[v][m-1]) + assert besseljzero(v,m,1).ae(jnp_small_zeros[v][m-1]) + assert besseljzero(v,m,1).ae(jnp_small_zeros[v][m-1]) + assert besselyzero(v,m).ae(yn_small_zeros[v][m-1]) + assert besselyzero(v,m).ae(yn_small_zeros[v][m-1]) + assert besselyzero(v,m,1).ae(ynp_small_zeros[v][m-1]) + assert besselyzero(v,m,1).ae(ynp_small_zeros[v][m-1]) diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_gammazeta.py b/MLPY/Lib/site-packages/mpmath/tests/test_gammazeta.py new file mode 100644 index 0000000000000000000000000000000000000000..6a18a7964d746561dfd5f81177cd78ccc46d2a5d --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_gammazeta.py @@ -0,0 +1,698 @@ +from mpmath import * +from mpmath.libmp import round_up, from_float, mpf_zeta_int + +def test_zeta_int_bug(): + assert mpf_zeta_int(0, 10) == from_float(-0.5) + +def test_bernoulli(): + assert bernfrac(0) == (1,1) + assert bernfrac(1) == (-1,2) + assert bernfrac(2) == (1,6) + assert bernfrac(3) == (0,1) + assert bernfrac(4) == (-1,30) + assert bernfrac(5) == (0,1) + assert bernfrac(6) == (1,42) + assert bernfrac(8) == (-1,30) + assert bernfrac(10) == (5,66) + assert bernfrac(12) == (-691,2730) + assert bernfrac(18) == (43867,798) + p, q = bernfrac(228) + assert p % 10**10 == 164918161 + assert q == 625170 + p, q = bernfrac(1000) + assert p % 10**10 == 7950421099 + assert q == 342999030 + mp.dps = 15 + assert bernoulli(0) == 1 + assert bernoulli(1) == -0.5 + assert bernoulli(2).ae(1./6) + assert bernoulli(3) == 0 + assert bernoulli(4).ae(-1./30) + assert bernoulli(5) == 0 + assert bernoulli(6).ae(1./42) + assert str(bernoulli(10)) == '0.0757575757575758' + assert str(bernoulli(234)) == '7.62772793964344e+267' + assert str(bernoulli(10**5)) == '-5.82229431461335e+376755' + assert str(bernoulli(10**8+2)) == '1.19570355039953e+676752584' + + mp.dps = 50 + assert str(bernoulli(10)) == '0.075757575757575757575757575757575757575757575757576' + assert str(bernoulli(234)) == '7.6277279396434392486994969020496121553385863373331e+267' + assert str(bernoulli(10**5)) == '-5.8222943146133508236497045360612887555320691004308e+376755' + assert str(bernoulli(10**8+2)) == '1.1957035503995297272263047884604346914602088317782e+676752584' + + mp.dps = 1000 + assert bernoulli(10).ae(mpf(5)/66) + + mp.dps = 50000 + assert bernoulli(10).ae(mpf(5)/66) + + mp.dps = 15 + +def test_bernpoly_eulerpoly(): + mp.dps = 15 + assert bernpoly(0,-1).ae(1) + assert bernpoly(0,0).ae(1) + assert bernpoly(0,'1/2').ae(1) + assert bernpoly(0,'3/4').ae(1) + assert bernpoly(0,1).ae(1) + assert bernpoly(0,2).ae(1) + assert bernpoly(1,-1).ae('-3/2') + assert bernpoly(1,0).ae('-1/2') + assert bernpoly(1,'1/2').ae(0) + assert bernpoly(1,'3/4').ae('1/4') + assert bernpoly(1,1).ae('1/2') + assert bernpoly(1,2).ae('3/2') + assert bernpoly(2,-1).ae('13/6') + assert bernpoly(2,0).ae('1/6') + assert bernpoly(2,'1/2').ae('-1/12') + assert bernpoly(2,'3/4').ae('-1/48') + assert bernpoly(2,1).ae('1/6') + assert bernpoly(2,2).ae('13/6') + assert bernpoly(3,-1).ae(-3) + assert bernpoly(3,0).ae(0) + assert bernpoly(3,'1/2').ae(0) + assert bernpoly(3,'3/4').ae('-3/64') + assert bernpoly(3,1).ae(0) + assert bernpoly(3,2).ae(3) + assert bernpoly(4,-1).ae('119/30') + assert bernpoly(4,0).ae('-1/30') + assert bernpoly(4,'1/2').ae('7/240') + assert bernpoly(4,'3/4').ae('7/3840') + assert bernpoly(4,1).ae('-1/30') + assert bernpoly(4,2).ae('119/30') + assert bernpoly(5,-1).ae(-5) + assert bernpoly(5,0).ae(0) + assert bernpoly(5,'1/2').ae(0) + assert bernpoly(5,'3/4').ae('25/1024') + assert bernpoly(5,1).ae(0) + assert bernpoly(5,2).ae(5) + assert bernpoly(10,-1).ae('665/66') + assert bernpoly(10,0).ae('5/66') + assert bernpoly(10,'1/2').ae('-2555/33792') + assert bernpoly(10,'3/4').ae('-2555/34603008') + assert bernpoly(10,1).ae('5/66') + assert bernpoly(10,2).ae('665/66') + assert bernpoly(11,-1).ae(-11) + assert bernpoly(11,0).ae(0) + assert bernpoly(11,'1/2').ae(0) + assert bernpoly(11,'3/4').ae('-555731/4194304') + assert bernpoly(11,1).ae(0) + assert bernpoly(11,2).ae(11) + assert eulerpoly(0,-1).ae(1) + assert eulerpoly(0,0).ae(1) + assert eulerpoly(0,'1/2').ae(1) + assert eulerpoly(0,'3/4').ae(1) + assert eulerpoly(0,1).ae(1) + assert eulerpoly(0,2).ae(1) + assert eulerpoly(1,-1).ae('-3/2') + assert eulerpoly(1,0).ae('-1/2') + assert eulerpoly(1,'1/2').ae(0) + assert eulerpoly(1,'3/4').ae('1/4') + assert eulerpoly(1,1).ae('1/2') + assert eulerpoly(1,2).ae('3/2') + assert eulerpoly(2,-1).ae(2) + assert eulerpoly(2,0).ae(0) + assert eulerpoly(2,'1/2').ae('-1/4') + assert eulerpoly(2,'3/4').ae('-3/16') + assert eulerpoly(2,1).ae(0) + assert eulerpoly(2,2).ae(2) + assert eulerpoly(3,-1).ae('-9/4') + assert eulerpoly(3,0).ae('1/4') + assert eulerpoly(3,'1/2').ae(0) + assert eulerpoly(3,'3/4').ae('-11/64') + assert eulerpoly(3,1).ae('-1/4') + assert eulerpoly(3,2).ae('9/4') + assert eulerpoly(4,-1).ae(2) + assert eulerpoly(4,0).ae(0) + assert eulerpoly(4,'1/2').ae('5/16') + assert eulerpoly(4,'3/4').ae('57/256') + assert eulerpoly(4,1).ae(0) + assert eulerpoly(4,2).ae(2) + assert eulerpoly(5,-1).ae('-3/2') + assert eulerpoly(5,0).ae('-1/2') + assert eulerpoly(5,'1/2').ae(0) + assert eulerpoly(5,'3/4').ae('361/1024') + assert eulerpoly(5,1).ae('1/2') + assert eulerpoly(5,2).ae('3/2') + assert eulerpoly(10,-1).ae(2) + assert eulerpoly(10,0).ae(0) + assert eulerpoly(10,'1/2').ae('-50521/1024') + assert eulerpoly(10,'3/4').ae('-36581523/1048576') + assert eulerpoly(10,1).ae(0) + assert eulerpoly(10,2).ae(2) + assert eulerpoly(11,-1).ae('-699/4') + assert eulerpoly(11,0).ae('691/4') + assert eulerpoly(11,'1/2').ae(0) + assert eulerpoly(11,'3/4').ae('-512343611/4194304') + assert eulerpoly(11,1).ae('-691/4') + assert eulerpoly(11,2).ae('699/4') + # Potential accuracy issues + assert bernpoly(10000,10000).ae('5.8196915936323387117e+39999') + assert bernpoly(200,17.5).ae(3.8048418524583064909e244) + assert eulerpoly(200,17.5).ae(-3.7309911582655785929e275) + +def test_gamma(): + mp.dps = 15 + assert gamma(0.25).ae(3.6256099082219083119) + assert gamma(0.0001).ae(9999.4228832316241908) + assert gamma(300).ae('1.0201917073881354535e612') + assert gamma(-0.5).ae(-3.5449077018110320546) + assert gamma(-7.43).ae(0.00026524416464197007186) + #assert gamma(Rational(1,2)) == gamma(0.5) + #assert gamma(Rational(-7,3)).ae(gamma(mpf(-7)/3)) + assert gamma(1+1j).ae(0.49801566811835604271 - 0.15494982830181068512j) + assert gamma(-1+0.01j).ae(-0.422733904013474115 + 99.985883082635367436j) + assert gamma(20+30j).ae(-1453876687.5534810 + 1163777777.8031573j) + # Should always give exact factorials when they can + # be represented as mpfs under the current working precision + fact = 1 + for i in range(1, 18): + assert gamma(i) == fact + fact *= i + for dps in [170, 600]: + fact = 1 + mp.dps = dps + for i in range(1, 105): + assert gamma(i) == fact + fact *= i + mp.dps = 100 + assert gamma(0.5).ae(sqrt(pi)) + mp.dps = 15 + assert factorial(0) == fac(0) == 1 + assert factorial(3) == 6 + assert isnan(gamma(nan)) + assert gamma(1100).ae('4.8579168073569433667e2866') + assert rgamma(0) == 0 + assert rgamma(-1) == 0 + assert rgamma(2) == 1.0 + assert rgamma(3) == 0.5 + assert loggamma(2+8j).ae(-8.5205176753667636926 + 10.8569497125597429366j) + assert loggamma('1e10000').ae('2.302485092994045684017991e10004') + assert loggamma('1e10000j').ae(mpc('-1.570796326794896619231322e10000','2.302485092994045684017991e10004')) + +def test_fac2(): + mp.dps = 15 + assert [fac2(n) for n in range(10)] == [1,1,2,3,8,15,48,105,384,945] + assert fac2(-5).ae(1./3) + assert fac2(-11).ae(-1./945) + assert fac2(50).ae(5.20469842636666623e32) + assert fac2(0.5+0.75j).ae(0.81546769394688069176-0.34901016085573266889j) + assert fac2(inf) == inf + assert isnan(fac2(-inf)) + +def test_gamma_quotients(): + mp.dps = 15 + h = 1e-8 + ep = 1e-4 + G = gamma + assert gammaprod([-1],[-3,-4]) == 0 + assert gammaprod([-1,0],[-5]) == inf + assert abs(gammaprod([-1],[-2]) - G(-1+h)/G(-2+h)) < 1e-4 + assert abs(gammaprod([-4,-3],[-2,0]) - G(-4+h)*G(-3+h)/G(-2+h)/G(0+h)) < 1e-4 + assert rf(3,0) == 1 + assert rf(2.5,1) == 2.5 + assert rf(-5,2) == 20 + assert rf(j,j).ae(gamma(2*j)/gamma(j)) + assert rf('-255.5815971722918','-0.5119253100282322').ae('-0.1952720278805729485') # issue 421 + assert ff(-2,0) == 1 + assert ff(-2,1) == -2 + assert ff(4,3) == 24 + assert ff(3,4) == 0 + assert binomial(0,0) == 1 + assert binomial(1,0) == 1 + assert binomial(0,-1) == 0 + assert binomial(3,2) == 3 + assert binomial(5,2) == 10 + assert binomial(5,3) == 10 + assert binomial(5,5) == 1 + assert binomial(-1,0) == 1 + assert binomial(-2,-4) == 3 + assert binomial(4.5, 1.5) == 6.5625 + assert binomial(1100,1) == 1100 + assert binomial(1100,2) == 604450 + assert beta(1,1) == 1 + assert beta(0,0) == inf + assert beta(3,0) == inf + assert beta(-1,-1) == inf + assert beta(1.5,1).ae(2/3.) + assert beta(1.5,2.5).ae(pi/16) + assert (10**15*beta(10,100)).ae(2.3455339739604649879) + assert beta(inf,inf) == 0 + assert isnan(beta(-inf,inf)) + assert isnan(beta(-3,inf)) + assert isnan(beta(0,inf)) + assert beta(inf,0.5) == beta(0.5,inf) == 0 + assert beta(inf,-1.5) == inf + assert beta(inf,-0.5) == -inf + assert beta(1+2j,-1-j/2).ae(1.16396542451069943086+0.08511695947832914640j) + assert beta(-0.5,0.5) == 0 + assert beta(-3,3).ae(-1/3.) + assert beta('-255.5815971722918','-0.5119253100282322').ae('18.157330562703710339') # issue 421 + +def test_zeta(): + mp.dps = 15 + assert zeta(2).ae(pi**2 / 6) + assert zeta(2.0).ae(pi**2 / 6) + assert zeta(mpc(2)).ae(pi**2 / 6) + assert zeta(100).ae(1) + assert zeta(0).ae(-0.5) + assert zeta(0.5).ae(-1.46035450880958681) + assert zeta(-1).ae(-mpf(1)/12) + assert zeta(-2) == 0 + assert zeta(-3).ae(mpf(1)/120) + assert zeta(-4) == 0 + assert zeta(-100) == 0 + assert isnan(zeta(nan)) + assert zeta(1e-30).ae(-0.5) + assert zeta(-1e-30).ae(-0.5) + # Zeros in the critical strip + assert zeta(mpc(0.5, 14.1347251417346937904)).ae(0) + assert zeta(mpc(0.5, 21.0220396387715549926)).ae(0) + assert zeta(mpc(0.5, 25.0108575801456887632)).ae(0) + assert zeta(mpc(1e-30,1e-40)).ae(-0.5) + assert zeta(mpc(-1e-30,1e-40)).ae(-0.5) + mp.dps = 50 + im = '236.5242296658162058024755079556629786895294952121891237' + assert zeta(mpc(0.5, im)).ae(0, 1e-46) + mp.dps = 15 + # Complex reflection formula + assert (zeta(-60+3j) / 10**34).ae(8.6270183987866146+15.337398548226238j) + # issue #358 + assert zeta(0,0.5) == 0 + assert zeta(0,0) == 0.5 + assert zeta(0,0.5,1).ae(-0.34657359027997265) + # see issue #390 + assert zeta(-1.5,0.5j).ae(-0.13671400162512768475 + 0.11411333638426559139j) + +def test_altzeta(): + mp.dps = 15 + assert altzeta(-2) == 0 + assert altzeta(-4) == 0 + assert altzeta(-100) == 0 + assert altzeta(0) == 0.5 + assert altzeta(-1) == 0.25 + assert altzeta(-3) == -0.125 + assert altzeta(-5) == 0.25 + assert altzeta(-21) == 1180529130.25 + assert altzeta(1).ae(log(2)) + assert altzeta(2).ae(pi**2/12) + assert altzeta(10).ae(73*pi**10/6842880) + assert altzeta(50) < 1 + assert altzeta(60, rounding='d') < 1 + assert altzeta(60, rounding='u') == 1 + assert altzeta(10000, rounding='d') < 1 + assert altzeta(10000, rounding='u') == 1 + assert altzeta(3+0j) == altzeta(3) + s = 3+4j + assert altzeta(s).ae((1-2**(1-s))*zeta(s)) + s = -3+4j + assert altzeta(s).ae((1-2**(1-s))*zeta(s)) + assert altzeta(-100.5).ae(4.58595480083585913e+108) + assert altzeta(1.3).ae(0.73821404216623045) + assert altzeta(1e-30).ae(0.5) + assert altzeta(-1e-30).ae(0.5) + assert altzeta(mpc(1e-30,1e-40)).ae(0.5) + assert altzeta(mpc(-1e-30,1e-40)).ae(0.5) + +def test_zeta_huge(): + mp.dps = 15 + assert zeta(inf) == 1 + mp.dps = 50 + assert zeta(100).ae('1.0000000000000000000000000000007888609052210118073522') + assert zeta(40*pi).ae('1.0000000000000000000000000000000000000148407238666182') + mp.dps = 10000 + v = zeta(33000) + mp.dps = 15 + assert str(v-1) == '1.02363019598118e-9934' + assert zeta(pi*1000, rounding=round_up) > 1 + assert zeta(3000, rounding=round_up) > 1 + assert zeta(pi*1000) == 1 + assert zeta(3000) == 1 + +def test_zeta_negative(): + mp.dps = 150 + a = -pi*10**40 + mp.dps = 15 + assert str(zeta(a)) == '2.55880492708712e+1233536161668617575553892558646631323374078' + mp.dps = 50 + assert str(zeta(a)) == '2.5588049270871154960875033337384432038436330847333e+1233536161668617575553892558646631323374078' + mp.dps = 15 + +def test_polygamma(): + mp.dps = 15 + psi0 = lambda z: psi(0,z) + psi1 = lambda z: psi(1,z) + assert psi0(3) == psi(0,3) == digamma(3) + #assert psi2(3) == psi(2,3) == tetragamma(3) + #assert psi3(3) == psi(3,3) == pentagamma(3) + assert psi0(pi).ae(0.97721330794200673) + assert psi0(-pi).ae(7.8859523853854902) + assert psi0(-pi+1).ae(7.5676424992016996) + assert psi0(pi+j).ae(1.04224048313859376 + 0.35853686544063749j) + assert psi0(-pi-j).ae(1.3404026194821986 - 2.8824392476809402j) + assert findroot(psi0, 1).ae(1.4616321449683622) + assert psi0(1e-10).ae(-10000000000.57722) + assert psi0(1e-40).ae(-1.000000000000000e+40) + assert psi0(1e-10+1e-10j).ae(-5000000000.577215 + 5000000000.000000j) + assert psi0(1e-40+1e-40j).ae(-5.000000000000000e+39 + 5.000000000000000e+39j) + assert psi0(inf) == inf + assert psi1(inf) == 0 + assert psi(2,inf) == 0 + assert psi1(pi).ae(0.37424376965420049) + assert psi1(-pi).ae(53.030438740085385) + assert psi1(pi+j).ae(0.32935710377142464 - 0.12222163911221135j) + assert psi1(-pi-j).ae(-0.30065008356019703 + 0.01149892486928227j) + assert (10**6*psi(4,1+10*pi*j)).ae(-6.1491803479004446 - 0.3921316371664063j) + assert psi0(1+10*pi*j).ae(3.4473994217222650 + 1.5548808324857071j) + assert isnan(psi0(nan)) + assert isnan(psi0(-inf)) + assert psi0(-100.5).ae(4.615124601338064) + assert psi0(3+0j).ae(psi0(3)) + assert psi0(-100+3j).ae(4.6106071768714086321+3.1117510556817394626j) + assert isnan(psi(2,mpc(0,inf))) + assert isnan(psi(2,mpc(0,nan))) + assert isnan(psi(2,mpc(0,-inf))) + assert isnan(psi(2,mpc(1,inf))) + assert isnan(psi(2,mpc(1,nan))) + assert isnan(psi(2,mpc(1,-inf))) + assert isnan(psi(2,mpc(inf,inf))) + assert isnan(psi(2,mpc(nan,nan))) + assert isnan(psi(2,mpc(-inf,-inf))) + mp.dps = 30 + # issue #534 + assert digamma(-0.75+1j).ae(mpc('0.46317279488182026118963809283042317', '2.4821070143037957102007677817351115')) + mp.dps = 15 + +def test_polygamma_high_prec(): + mp.dps = 100 + assert str(psi(0,pi)) == "0.9772133079420067332920694864061823436408346099943256380095232865318105924777141317302075654362928734" + assert str(psi(10,pi)) == "-12.98876181434889529310283769414222588307175962213707170773803550518307617769657562747174101900659238" + +def test_polygamma_identities(): + mp.dps = 15 + psi0 = lambda z: psi(0,z) + psi1 = lambda z: psi(1,z) + psi2 = lambda z: psi(2,z) + assert psi0(0.5).ae(-euler-2*log(2)) + assert psi0(1).ae(-euler) + assert psi1(0.5).ae(0.5*pi**2) + assert psi1(1).ae(pi**2/6) + assert psi1(0.25).ae(pi**2 + 8*catalan) + assert psi2(1).ae(-2*apery) + mp.dps = 20 + u = -182*apery+4*sqrt(3)*pi**3 + mp.dps = 15 + assert psi(2,5/6.).ae(u) + assert psi(3,0.5).ae(pi**4) + +def test_foxtrot_identity(): + # A test of the complex digamma function. + # See http://mathworld.wolfram.com/FoxTrotSeries.html and + # http://mathworld.wolfram.com/DigammaFunction.html + psi0 = lambda z: psi(0,z) + mp.dps = 50 + a = (-1)**fraction(1,3) + b = (-1)**fraction(2,3) + x = -psi0(0.5*a) - psi0(-0.5*b) + psi0(0.5*(1+a)) + psi0(0.5*(1-b)) + y = 2*pi*sech(0.5*sqrt(3)*pi) + assert x.ae(y) + mp.dps = 15 + +def test_polygamma_high_order(): + mp.dps = 100 + assert str(psi(50, pi)) == "-1344100348958402765749252447726432491812.641985273160531055707095989227897753035823152397679626136483" + assert str(psi(50, pi + 14*e)) == "-0.00000000000000000189793739550804321623512073101895801993019919886375952881053090844591920308111549337295143780341396" + assert str(psi(50, pi + 14*e*j)) == ("(-0.0000000000000000522516941152169248975225472155683565752375889510631513244785" + "9377385233700094871256507814151956624433 - 0.00000000000000001813157041407010184" + "702414110218205348527862196327980417757665282244728963891298080199341480881811613j)") + mp.dps = 15 + assert str(psi(50, pi)) == "-1.34410034895841e+39" + assert str(psi(50, pi + 14*e)) == "-1.89793739550804e-18" + assert str(psi(50, pi + 14*e*j)) == "(-5.2251694115217e-17 - 1.81315704140701e-17j)" + +def test_harmonic(): + mp.dps = 15 + assert harmonic(0) == 0 + assert harmonic(1) == 1 + assert harmonic(2) == 1.5 + assert harmonic(3).ae(1. + 1./2 + 1./3) + assert harmonic(10**10).ae(23.603066594891989701) + assert harmonic(10**1000).ae(2303.162308658947) + assert harmonic(0.5).ae(2-2*log(2)) + assert harmonic(inf) == inf + assert harmonic(2+0j) == 1.5+0j + assert harmonic(1+2j).ae(1.4918071802755104+0.92080728264223022j) + +def test_gamma_huge_1(): + mp.dps = 500 + x = mpf(10**10) / 7 + mp.dps = 15 + assert str(gamma(x)) == "6.26075321389519e+12458010678" + mp.dps = 50 + assert str(gamma(x)) == "6.2607532138951929201303779291707455874010420783933e+12458010678" + mp.dps = 15 + +def test_gamma_huge_2(): + mp.dps = 500 + x = mpf(10**100) / 19 + mp.dps = 15 + assert str(gamma(x)) == (\ + "1.82341134776679e+5172997469323364168990133558175077136829182824042201886051511" + "9656908623426021308685461258226190190661") + mp.dps = 50 + assert str(gamma(x)) == (\ + "1.82341134776678875374414910350027596939980412984e+5172997469323364168990133558" + "1750771368291828240422018860515119656908623426021308685461258226190190661") + +def test_gamma_huge_3(): + mp.dps = 500 + x = 10**80 // 3 + 10**70*j / 7 + mp.dps = 15 + y = gamma(x) + assert str(y.real) == (\ + "-6.82925203918106e+2636286142112569524501781477865238132302397236429627932441916" + "056964386399485392600") + assert str(y.imag) == (\ + "8.54647143678418e+26362861421125695245017814778652381323023972364296279324419160" + "56964386399485392600") + mp.dps = 50 + y = gamma(x) + assert str(y.real) == (\ + "-6.8292520391810548460682736226799637356016538421817e+26362861421125695245017814" + "77865238132302397236429627932441916056964386399485392600") + assert str(y.imag) == (\ + "8.5464714367841748507479306948130687511711420234015e+263628614211256952450178147" + "7865238132302397236429627932441916056964386399485392600") + +def test_gamma_huge_4(): + x = 3200+11500j + mp.dps = 15 + assert str(gamma(x)) == \ + "(8.95783268539713e+5164 - 1.94678798329735e+5164j)" + mp.dps = 50 + assert str(gamma(x)) == (\ + "(8.9578326853971339570292952697675570822206567327092e+5164" + " - 1.9467879832973509568895402139429643650329524144794e+51" + "64j)") + mp.dps = 15 + +def test_gamma_huge_5(): + mp.dps = 500 + x = 10**60 * j / 3 + mp.dps = 15 + y = gamma(x) + assert str(y.real) == "-3.27753899634941e-227396058973640224580963937571892628368354580620654233316839" + assert str(y.imag) == "-7.1519888950416e-227396058973640224580963937571892628368354580620654233316841" + mp.dps = 50 + y = gamma(x) + assert str(y.real) == (\ + "-3.2775389963494132168950056995974690946983219123935e-22739605897364022458096393" + "7571892628368354580620654233316839") + assert str(y.imag) == (\ + "-7.1519888950415979749736749222530209713136588885897e-22739605897364022458096393" + "7571892628368354580620654233316841") + mp.dps = 15 + +def test_gamma_huge_7(): + mp.dps = 100 + a = 3 + j/mpf(10)**1000 + mp.dps = 15 + y = gamma(a) + assert str(y.real) == "2.0" + # wrong + #assert str(y.imag) == "2.16735365342606e-1000" + assert str(y.imag) == "1.84556867019693e-1000" + mp.dps = 50 + y = gamma(a) + assert str(y.real) == "2.0" + #assert str(y.imag) == "2.1673536534260596065418805612488708028522563689298e-1000" + assert str(y.imag) == "1.8455686701969342787869758198351951379156813281202e-1000" + +def test_stieltjes(): + mp.dps = 15 + assert stieltjes(0).ae(+euler) + mp.dps = 25 + assert stieltjes(1).ae('-0.07281584548367672486058637587') + assert stieltjes(2).ae('-0.009690363192872318484530386035') + assert stieltjes(3).ae('0.002053834420303345866160046543') + assert stieltjes(4).ae('0.002325370065467300057468170178') + mp.dps = 15 + assert stieltjes(1).ae(-0.07281584548367672486058637587) + assert stieltjes(2).ae(-0.009690363192872318484530386035) + assert stieltjes(3).ae(0.002053834420303345866160046543) + assert stieltjes(4).ae(0.0023253700654673000574681701775) + +def test_barnesg(): + mp.dps = 15 + assert barnesg(0) == barnesg(-1) == 0 + assert [superfac(i) for i in range(8)] == [1, 1, 2, 12, 288, 34560, 24883200, 125411328000] + assert str(superfac(1000)) == '3.24570818422368e+1177245' + assert isnan(barnesg(nan)) + assert isnan(superfac(nan)) + assert isnan(hyperfac(nan)) + assert barnesg(inf) == inf + assert superfac(inf) == inf + assert hyperfac(inf) == inf + assert isnan(superfac(-inf)) + assert barnesg(0.7).ae(0.8068722730141471) + assert barnesg(2+3j).ae(-0.17810213864082169+0.04504542715447838j) + assert [hyperfac(n) for n in range(7)] == [1, 1, 4, 108, 27648, 86400000, 4031078400000] + assert [hyperfac(n) for n in range(0,-7,-1)] == [1,1,-1,-4,108,27648,-86400000] + a = barnesg(-3+0j) + assert a == 0 and isinstance(a, mpc) + a = hyperfac(-3+0j) + assert a == -4 and isinstance(a, mpc) + +def test_polylog(): + mp.dps = 15 + zs = [mpmathify(z) for z in [0, 0.5, 0.99, 4, -0.5, -4, 1j, 3+4j]] + for z in zs: assert polylog(1, z).ae(-log(1-z)) + for z in zs: assert polylog(0, z).ae(z/(1-z)) + for z in zs: assert polylog(-1, z).ae(z/(1-z)**2) + for z in zs: assert polylog(-2, z).ae(z*(1+z)/(1-z)**3) + for z in zs: assert polylog(-3, z).ae(z*(1+4*z+z**2)/(1-z)**4) + assert polylog(3, 7).ae(5.3192579921456754382-5.9479244480803301023j) + assert polylog(3, -7).ae(-4.5693548977219423182) + assert polylog(2, 0.9).ae(1.2997147230049587252) + assert polylog(2, -0.9).ae(-0.75216317921726162037) + assert polylog(2, 0.9j).ae(-0.17177943786580149299+0.83598828572550503226j) + assert polylog(2, 1.1).ae(1.9619991013055685931-0.2994257606855892575j) + assert polylog(2, -1.1).ae(-0.89083809026228260587) + assert polylog(2, 1.1*sqrt(j)).ae(0.58841571107611387722+1.09962542118827026011j) + assert polylog(-2, 0.9).ae(1710) + assert polylog(-2, -0.9).ae(-90/6859.) + assert polylog(3, 0.9).ae(1.0496589501864398696) + assert polylog(-3, 0.9).ae(48690) + assert polylog(-3, -4).ae(-0.0064) + assert polylog(0.5+j/3, 0.5+j/2).ae(0.31739144796565650535 + 0.99255390416556261437j) + assert polylog(3+4j,1).ae(zeta(3+4j)) + assert polylog(3+4j,-1).ae(-altzeta(3+4j)) + # issue 390 + assert polylog(1.5, -48.910886523731889).ae(-6.272992229311817) + assert polylog(1.5, 200).ae(-8.349608319033686529 - 8.159694826434266042j) + assert polylog(-2+0j, -2).ae(mpf(1)/13.5) + assert polylog(-2+0j, 1.25).ae(-180) + +def test_bell_polyexp(): + mp.dps = 15 + # TODO: more tests for polyexp + assert (polyexp(0,1e-10)*10**10).ae(1.00000000005) + assert (polyexp(1,1e-10)*10**10).ae(1.0000000001) + assert polyexp(5,3j).ae(-607.7044517476176454+519.962786482001476087j) + assert polyexp(-1,3.5).ae(12.09537536175543444) + # bell(0,x) = 1 + assert bell(0,0) == 1 + assert bell(0,1) == 1 + assert bell(0,2) == 1 + assert bell(0,inf) == 1 + assert bell(0,-inf) == 1 + assert isnan(bell(0,nan)) + # bell(1,x) = x + assert bell(1,4) == 4 + assert bell(1,0) == 0 + assert bell(1,inf) == inf + assert bell(1,-inf) == -inf + assert isnan(bell(1,nan)) + # bell(2,x) = x*(1+x) + assert bell(2,-1) == 0 + assert bell(2,0) == 0 + # large orders / arguments + assert bell(10) == 115975 + assert bell(10,1) == 115975 + assert bell(10, -8) == 11054008 + assert bell(5,-50) == -253087550 + assert bell(50,-50).ae('3.4746902914629720259e74') + mp.dps = 80 + assert bell(50,-50) == 347469029146297202586097646631767227177164818163463279814268368579055777450 + assert bell(40,50) == 5575520134721105844739265207408344706846955281965031698187656176321717550 + assert bell(74) == 5006908024247925379707076470957722220463116781409659160159536981161298714301202 + mp.dps = 15 + assert bell(10,20j) == 7504528595600+15649605360020j + # continuity of the generalization + assert bell(0.5,0).ae(sinc(pi*0.5)) + +def test_primezeta(): + mp.dps = 15 + assert primezeta(0.9).ae(1.8388316154446882243 + 3.1415926535897932385j) + assert primezeta(4).ae(0.076993139764246844943) + assert primezeta(1) == inf + assert primezeta(inf) == 0 + assert isnan(primezeta(nan)) + +def test_rs_zeta(): + mp.dps = 15 + assert zeta(0.5+100000j).ae(1.0730320148577531321 + 5.7808485443635039843j) + assert zeta(0.75+100000j).ae(1.837852337251873704 + 1.9988492668661145358j) + assert zeta(0.5+1000000j, derivative=3).ae(1647.7744105852674733 - 1423.1270943036622097j) + assert zeta(1+1000000j, derivative=3).ae(3.4085866124523582894 - 18.179184721525947301j) + assert zeta(1+1000000j, derivative=1).ae(-0.10423479366985452134 - 0.74728992803359056244j) + assert zeta(0.5-1000000j, derivative=1).ae(11.636804066002521459 + 17.127254072212996004j) + # Additional sanity tests using fp arithmetic. + # Some more high-precision tests are found in the docstrings + def ae(x, y, tol=1e-6): + return abs(x-y) < tol*abs(y) + assert ae(fp.zeta(0.5-100000j), 1.0730320148577531321 - 5.7808485443635039843j) + assert ae(fp.zeta(0.75-100000j), 1.837852337251873704 - 1.9988492668661145358j) + assert ae(fp.zeta(0.5+1e6j), 0.076089069738227100006 + 2.8051021010192989554j) + assert ae(fp.zeta(0.5+1e6j, derivative=1), 11.636804066002521459 - 17.127254072212996004j) + assert ae(fp.zeta(1+1e6j), 0.94738726251047891048 + 0.59421999312091832833j) + assert ae(fp.zeta(1+1e6j, derivative=1), -0.10423479366985452134 - 0.74728992803359056244j) + assert ae(fp.zeta(0.5+100000j, derivative=1), 10.766962036817482375 - 30.92705282105996714j) + assert ae(fp.zeta(0.5+100000j, derivative=2), -119.40515625740538429 + 217.14780631141830251j) + assert ae(fp.zeta(0.5+100000j, derivative=3), 1129.7550282628460881 - 1685.4736895169690346j) + assert ae(fp.zeta(0.5+100000j, derivative=4), -10407.160819314958615 + 13777.786698628045085j) + assert ae(fp.zeta(0.75+100000j, derivative=1), -0.41742276699594321475 - 6.4453816275049955949j) + assert ae(fp.zeta(0.75+100000j, derivative=2), -9.214314279161977266 + 35.07290795337967899j) + assert ae(fp.zeta(0.75+100000j, derivative=3), 110.61331857820103469 - 236.87847130518129926j) + assert ae(fp.zeta(0.75+100000j, derivative=4), -1054.334275898559401 + 1769.9177890161596383j) + +def test_siegelz(): + mp.dps = 15 + assert siegelz(100000).ae(5.87959246868176504171) + assert siegelz(100000, derivative=2).ae(-54.1172711010126452832) + assert siegelz(100000, derivative=3).ae(-278.930831343966552538) + assert siegelz(100000+j,derivative=1).ae(678.214511857070283307-379.742160779916375413j) + + + +def test_zeta_near_1(): + # Test for a former bug in mpf_zeta and mpc_zeta + mp.dps = 15 + s1 = fadd(1, '1e-10', exact=True) + s2 = fadd(1, '-1e-10', exact=True) + s3 = fadd(1, '1e-10j', exact=True) + assert zeta(s1).ae(1.000000000057721566490881444e10) + assert zeta(s2).ae(-9.99999999942278433510574872e9) + z = zeta(s3) + assert z.real.ae(0.57721566490153286060) + assert z.imag.ae(-9.9999999999999999999927184e9) + mp.dps = 30 + s1 = fadd(1, '1e-50', exact=True) + s2 = fadd(1, '-1e-50', exact=True) + s3 = fadd(1, '1e-50j', exact=True) + assert zeta(s1).ae('1e50') + assert zeta(s2).ae('-1e50') + z = zeta(s3) + assert z.real.ae('0.57721566490153286060651209008240243104215933593992') + assert z.imag.ae('-1e50') diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_hp.py b/MLPY/Lib/site-packages/mpmath/tests/test_hp.py new file mode 100644 index 0000000000000000000000000000000000000000..9eba0af798f64ac3f8d464e2d3bf231567a48c9b --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_hp.py @@ -0,0 +1,291 @@ +""" +Check that the output from irrational functions is accurate for +high-precision input, from 5 to 200 digits. The reference values were +verified with Mathematica. +""" + +import time +from mpmath import * + +precs = [5, 15, 28, 35, 57, 80, 100, 150, 200] + +# sqrt(3) + pi/2 +a = \ +"3.302847134363773912758768033145623809041389953497933538543279275605"\ +"841220051904536395163599428307109666700184672047856353516867399774243594"\ +"67433521615861420725323528325327484262075464241255915238845599752675" + +# e + 1/euler**2 +b = \ +"5.719681166601007617111261398629939965860873957353320734275716220045750"\ +"31474116300529519620938123730851145473473708966080207482581266469342214"\ +"824842256999042984813905047895479210702109260221361437411947323431" + +# sqrt(a) +sqrt_a = \ +"1.817373691447021556327498239690365674922395036495564333152483422755"\ +"144321726165582817927383239308173567921345318453306994746434073691275094"\ +"484777905906961689902608644112196725896908619756404253109722911487" + +# sqrt(a+b*i).real +sqrt_abi_real = \ +"2.225720098415113027729407777066107959851146508557282707197601407276"\ +"89160998185797504198062911768240808839104987021515555650875977724230130"\ +"3584116233925658621288393930286871862273400475179312570274423840384" + +# sqrt(a+b*i).imag +sqrt_abi_imag = \ +"1.2849057639084690902371581529110949983261182430040898147672052833653668"\ +"0629534491275114877090834296831373498336559849050755848611854282001250"\ +"1924311019152914021365263161630765255610885489295778894976075186" + +# log(a) +log_a = \ +"1.194784864491089550288313512105715261520511949410072046160598707069"\ +"4336653155025770546309137440687056366757650909754708302115204338077595203"\ +"83005773986664564927027147084436553262269459110211221152925732612" + +# log(a+b*i).real +log_abi_real = \ +"1.8877985921697018111624077550443297276844736840853590212962006811663"\ +"04949387789489704203167470111267581371396245317618589339274243008242708"\ +"014251531496104028712866224020066439049377679709216784954509456421" + +# log(a+b*i).imag +log_abi_imag = \ +"1.0471204952840802663567714297078763189256357109769672185219334169734948"\ +"4265809854092437285294686651806426649541504240470168212723133326542181"\ +"8300136462287639956713914482701017346851009323172531601894918640" + +# exp(a) +exp_a = \ +"27.18994224087168661137253262213293847994194869430518354305430976149"\ +"382792035050358791398632888885200049857986258414049540376323785711941636"\ +"100358982497583832083513086941635049329804685212200507288797531143" + +# exp(a+b*i).real +exp_abi_real = \ +"22.98606617170543596386921087657586890620262522816912505151109385026"\ +"40160179326569526152851983847133513990281518417211964710397233157168852"\ +"4963130831190142571659948419307628119985383887599493378056639916701" + +# exp(a+b*i).imag +exp_abi_imag = \ +"-14.523557450291489727214750571590272774669907424478129280902375851196283"\ +"3377162379031724734050088565710975758824441845278120105728824497308303"\ +"6065619788140201636218705414429933685889542661364184694108251449" + +# a**b +pow_a_b = \ +"928.7025342285568142947391505837660251004990092821305668257284426997"\ +"361966028275685583421197860603126498884545336686124793155581311527995550"\ +"580229264427202446131740932666832138634013168125809402143796691154" + +# (a**(a+b*i)).real +pow_a_abi_real = \ +"44.09156071394489511956058111704382592976814280267142206420038656267"\ +"67707916510652790502399193109819563864568986234654864462095231138500505"\ +"8197456514795059492120303477512711977915544927440682508821426093455" + +# (a**(a+b*i)).imag +pow_a_abi_imag = \ +"27.069371511573224750478105146737852141664955461266218367212527612279886"\ +"9322304536553254659049205414427707675802193810711302947536332040474573"\ +"8166261217563960235014674118610092944307893857862518964990092301" + +# ((a+b*i)**(a+b*i)).real +pow_abi_abi_real = \ +"-0.15171310677859590091001057734676423076527145052787388589334350524"\ +"8084195882019497779202452975350579073716811284169068082670778986235179"\ +"0813026562962084477640470612184016755250592698408112493759742219150452"\ + +# ((a+b*i)**(a+b*i)).imag +pow_abi_abi_imag = \ +"1.2697592504953448936553147870155987153192995316950583150964099070426"\ +"4736837932577176947632535475040521749162383347758827307504526525647759"\ +"97547638617201824468382194146854367480471892602963428122896045019902" + +# sin(a) +sin_a = \ +"-0.16055653857469062740274792907968048154164433772938156243509084009"\ +"38437090841460493108570147191289893388608611542655654723437248152535114"\ +"528368009465836614227575701220612124204622383149391870684288862269631" + +# sin(1000*a) +sin_1000a = \ +"-0.85897040577443833776358106803777589664322997794126153477060795801"\ +"09151695416961724733492511852267067419573754315098042850381158563024337"\ +"216458577140500488715469780315833217177634490142748614625281171216863" + +# sin(a+b*i) +sin_abi_real = \ +"-24.4696999681556977743346798696005278716053366404081910969773939630"\ +"7149215135459794473448465734589287491880563183624997435193637389884206"\ +"02151395451271809790360963144464736839412254746645151672423256977064" + +sin_abi_imag = \ +"-150.42505378241784671801405965872972765595073690984080160750785565810981"\ +"8314482499135443827055399655645954830931316357243750839088113122816583"\ +"7169201254329464271121058839499197583056427233866320456505060735" + +# cos +cos_a = \ +"-0.98702664499035378399332439243967038895709261414476495730788864004"\ +"05406821549361039745258003422386169330787395654908532996287293003581554"\ +"257037193284199198069707141161341820684198547572456183525659969145501" + +cos_1000a = \ +"-0.51202523570982001856195696460663971099692261342827540426136215533"\ +"52686662667660613179619804463250686852463876088694806607652218586060613"\ +"951310588158830695735537073667299449753951774916401887657320950496820" + +# tan +tan_a = \ +"0.162666873675188117341401059858835168007137819495998960250142156848"\ +"639654718809412181543343168174807985559916643549174530459883826451064966"\ +"7996119428949951351938178809444268785629011625179962457123195557310" + +tan_abi_real = \ +"6.822696615947538488826586186310162599974827139564433912601918442911"\ +"1026830824380070400102213741875804368044342309515353631134074491271890"\ +"467615882710035471686578162073677173148647065131872116479947620E-6" + +tan_abi_imag = \ +"0.9999795833048243692245661011298447587046967777739649018690797625964167"\ +"1446419978852235960862841608081413169601038230073129482874832053357571"\ +"62702259309150715669026865777947502665936317953101462202542168429" + + +def test_hp(): + for dps in precs: + mp.dps = dps + 8 + aa = mpf(a) + bb = mpf(b) + a1000 = 1000*mpf(a) + abi = mpc(aa, bb) + mp.dps = dps + assert (sqrt(3) + pi/2).ae(aa) + assert (e + 1/euler**2).ae(bb) + + assert sqrt(aa).ae(mpf(sqrt_a)) + assert sqrt(abi).ae(mpc(sqrt_abi_real, sqrt_abi_imag)) + + assert log(aa).ae(mpf(log_a)) + assert log(abi).ae(mpc(log_abi_real, log_abi_imag)) + + assert exp(aa).ae(mpf(exp_a)) + assert exp(abi).ae(mpc(exp_abi_real, exp_abi_imag)) + + assert (aa**bb).ae(mpf(pow_a_b)) + assert (aa**abi).ae(mpc(pow_a_abi_real, pow_a_abi_imag)) + assert (abi**abi).ae(mpc(pow_abi_abi_real, pow_abi_abi_imag)) + + assert sin(a).ae(mpf(sin_a)) + assert sin(a1000).ae(mpf(sin_1000a)) + assert sin(abi).ae(mpc(sin_abi_real, sin_abi_imag)) + + assert cos(a).ae(mpf(cos_a)) + assert cos(a1000).ae(mpf(cos_1000a)) + + assert tan(a).ae(mpf(tan_a)) + assert tan(abi).ae(mpc(tan_abi_real, tan_abi_imag)) + + # check that complex cancellation is avoided so that both + # real and imaginary parts have high relative accuracy. + # abs_eps should be 0, but has to be set to 1e-205 to pass the + # 200-digit case, probably due to slight inaccuracy in the + # precomputed input + assert (tan(abi).real).ae(mpf(tan_abi_real), abs_eps=1e-205) + assert (tan(abi).imag).ae(mpf(tan_abi_imag), abs_eps=1e-205) + mp.dps = 460 + assert str(log(3))[-20:] == '02166121184001409826' + mp.dps = 15 + +# Since str(a) can differ in the last digit from rounded a, and I want +# to compare the last digits of big numbers with the results in Mathematica, +# I made this hack to get the last 20 digits of rounded a + +def last_digits(a): + r = repr(a) + s = str(a) + #dps = mp.dps + #mp.dps += 3 + m = 10 + r = r.replace(s[:-m],'') + r = r.replace("mpf('",'').replace("')",'') + num0 = 0 + for c in r: + if c == '0': + num0 += 1 + else: + break + b = float(int(r))/10**(len(r) - m) + if b >= 10**m - 0.5: # pragma: no cover + raise NotImplementedError + n = int(round(b)) + sn = str(n) + s = s[:-m] + '0'*num0 + sn + return s[-20:] + +# values checked with Mathematica +def test_log_hp(): + mp.dps = 2000 + a = mpf(10)**15000/3 + r = log(a) + res = last_digits(r) + # Mathematica N[Log[10^15000/3], 2000] + # ...7443804441768333470331 + assert res == '43804441768333470331' + + # see issue 145 + r = log(mpf(3)/2) + # Mathematica N[Log[3/2], 2000] + # ...69653749808140753263288 + res = last_digits(r) + assert res == '53749808140753263288' + + mp.dps = 10000 + r = log(2) + res = last_digits(r) + # Mathematica N[Log[2], 10000] + # ...695615913401856601359655561 + assert res == '13401856601359655561' + r = log(mpf(10)**10/3) + res = last_digits(r) + # Mathematica N[Log[10^10/3], 10000] + # ...587087654020631943060007154 + assert res == '54020631943060007154', res + r = log(mpf(10)**100/3) + res = last_digits(r) + # Mathematica N[Log[10^100/3], 10000] + # ,,,59246336539088351652334666 + assert res == '36539088351652334666', res + mp.dps += 10 + a = 1 - mpf(1)/10**10 + mp.dps -= 10 + r = log(a) + res = last_digits(r) + # ...3310334360482956137216724048322957404 + # 372167240483229574038733026370 + # Mathematica N[Log[1 - 10^-10]*10^10, 10000] + # ...60482956137216724048322957404 + assert res == '37216724048322957404', res + mp.dps = 10000 + mp.dps += 100 + a = 1 + mpf(1)/10**100 + mp.dps -= 100 + + r = log(a) + res = last_digits(+r) + # Mathematica N[Log[1 + 10^-100]*10^10, 10030] + # ...3994733877377412241546890854692521568292338268273 10^-91 + assert res == '39947338773774122415', res + + mp.dps = 15 + +def test_exp_hp(): + mp.dps = 4000 + r = exp(mpf(1)/10) + # IntegerPart[N[Exp[1/10] * 10^4000, 4000]] + # ...92167105162069688129 + assert int(r * 10**mp.dps) % 10**20 == 92167105162069688129 diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_identify.py b/MLPY/Lib/site-packages/mpmath/tests/test_identify.py new file mode 100644 index 0000000000000000000000000000000000000000..f75ab0bc4f04ecb614011e7f4599989465cab785 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_identify.py @@ -0,0 +1,19 @@ +from mpmath import * + +def test_pslq(): + mp.dps = 15 + assert pslq([3*pi+4*e/7, pi, e, log(2)]) == [7, -21, -4, 0] + assert pslq([4.9999999999999991, 1]) == [1, -5] + assert pslq([2,1]) == [1, -2] + +def test_identify(): + mp.dps = 20 + assert identify(zeta(4), ['log(2)', 'pi**4']) == '((1/90)*pi**4)' + mp.dps = 15 + assert identify(exp(5)) == 'exp(5)' + assert identify(exp(4)) == 'exp(4)' + assert identify(log(5)) == 'log(5)' + assert identify(exp(3*pi), ['pi']) == 'exp((3*pi))' + assert identify(3, full=True) == ['3', '3', '1/(1/3)', 'sqrt(9)', + '1/sqrt((1/9))', '(sqrt(12)/2)**2', '1/(sqrt(12)/6)**2'] + assert identify(pi+1, {'a':+pi}) == '(1 + 1*a)' diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_interval.py b/MLPY/Lib/site-packages/mpmath/tests/test_interval.py new file mode 100644 index 0000000000000000000000000000000000000000..251fd8b7ddb00074e8ae27cce4a01d8f4f8fe151 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_interval.py @@ -0,0 +1,453 @@ +from mpmath import * + +def test_interval_identity(): + iv.dps = 15 + assert mpi(2) == mpi(2, 2) + assert mpi(2) != mpi(-2, 2) + assert not (mpi(2) != mpi(2, 2)) + assert mpi(-1, 1) == mpi(-1, 1) + assert str(mpi('0.1')) == "[0.099999999999999991673, 0.10000000000000000555]" + assert repr(mpi('0.1')) == "mpi('0.099999999999999992', '0.10000000000000001')" + u = mpi(-1, 3) + assert -1 in u + assert 2 in u + assert 3 in u + assert -1.1 not in u + assert 3.1 not in u + assert mpi(-1, 3) in u + assert mpi(0, 1) in u + assert mpi(-1.1, 2) not in u + assert mpi(2.5, 3.1) not in u + w = mpi(-inf, inf) + assert mpi(-5, 5) in w + assert mpi(2, inf) in w + assert mpi(0, 2) in mpi(0, 10) + assert not (3 in mpi(-inf, 0)) + +def test_interval_hash(): + assert hash(mpi(3)) == hash(3) + assert hash(mpi(3.25)) == hash(3.25) + assert hash(mpi(3,4)) == hash(mpi(3,4)) + assert hash(iv.mpc(3)) == hash(3) + assert hash(iv.mpc(3,4)) == hash(3+4j) + assert hash(iv.mpc((1,3),(2,4))) == hash(iv.mpc((1,3),(2,4))) + +def test_interval_arithmetic(): + iv.dps = 15 + assert mpi(2) + mpi(3,4) == mpi(5,6) + assert mpi(1, 2)**2 == mpi(1, 4) + assert mpi(1) + mpi(0, 1e-50) == mpi(1, mpf('1.0000000000000002')) + x = 1 / (1 / mpi(3)) + assert x.a < 3 < x.b + x = mpi(2) ** mpi(0.5) + iv.dps += 5 + sq = iv.sqrt(2) + iv.dps -= 5 + assert x.a < sq < x.b + assert mpi(1) / mpi(1, inf) + assert mpi(2, 3) / inf == mpi(0, 0) + assert mpi(0) / inf == 0 + assert mpi(0) / 0 == mpi(-inf, inf) + assert mpi(inf) / 0 == mpi(-inf, inf) + assert mpi(0) * inf == mpi(-inf, inf) + assert 1 / mpi(2, inf) == mpi(0, 0.5) + assert str((mpi(50, 50) * mpi(-10, -10)) / 3) == \ + '[-166.66666666666668561, -166.66666666666665719]' + assert mpi(0, 4) ** 3 == mpi(0, 64) + assert mpi(2,4).mid == 3 + iv.dps = 30 + a = mpi(iv.pi) + iv.dps = 15 + b = +a + assert b.a < a.a + assert b.b > a.b + a = mpi(iv.pi) + assert a == +a + assert abs(mpi(-1,2)) == mpi(0,2) + assert abs(mpi(0.5,2)) == mpi(0.5,2) + assert abs(mpi(-3,2)) == mpi(0,3) + assert abs(mpi(-3,-0.5)) == mpi(0.5,3) + assert mpi(0) * mpi(2,3) == mpi(0) + assert mpi(2,3) * mpi(0) == mpi(0) + assert mpi(1,3).delta == 2 + assert mpi(1,2) - mpi(3,4) == mpi(-3,-1) + assert mpi(-inf,0) - mpi(0,inf) == mpi(-inf,0) + assert mpi(-inf,0) - mpi(-inf,inf) == mpi(-inf,inf) + assert mpi(0,inf) - mpi(-inf,1) == mpi(-1,inf) + +def test_interval_mul(): + assert mpi(-1, 0) * inf == mpi(-inf, 0) + assert mpi(-1, 0) * -inf == mpi(0, inf) + assert mpi(0, 1) * inf == mpi(0, inf) + assert mpi(0, 1) * mpi(0, inf) == mpi(0, inf) + assert mpi(-1, 1) * inf == mpi(-inf, inf) + assert mpi(-1, 1) * mpi(0, inf) == mpi(-inf, inf) + assert mpi(-1, 1) * mpi(-inf, inf) == mpi(-inf, inf) + assert mpi(-inf, 0) * mpi(0, 1) == mpi(-inf, 0) + assert mpi(-inf, 0) * mpi(0, 0) * mpi(-inf, 0) + assert mpi(-inf, 0) * mpi(-inf, inf) == mpi(-inf, inf) + assert mpi(-5,0)*mpi(-32,28) == mpi(-140,160) + assert mpi(2,3) * mpi(-1,2) == mpi(-3,6) + # Should be undefined? + assert mpi(inf, inf) * 0 == mpi(-inf, inf) + assert mpi(-inf, -inf) * 0 == mpi(-inf, inf) + assert mpi(0) * mpi(-inf,2) == mpi(-inf,inf) + assert mpi(0) * mpi(-2,inf) == mpi(-inf,inf) + assert mpi(-2,inf) * mpi(0) == mpi(-inf,inf) + assert mpi(-inf,2) * mpi(0) == mpi(-inf,inf) + +def test_interval_pow(): + assert mpi(3)**2 == mpi(9, 9) + assert mpi(-3)**2 == mpi(9, 9) + assert mpi(-3, 1)**2 == mpi(0, 9) + assert mpi(-3, -1)**2 == mpi(1, 9) + assert mpi(-3, -1)**3 == mpi(-27, -1) + assert mpi(-3, 1)**3 == mpi(-27, 1) + assert mpi(-2, 3)**2 == mpi(0, 9) + assert mpi(-3, 2)**2 == mpi(0, 9) + assert mpi(4) ** -1 == mpi(0.25, 0.25) + assert mpi(-4) ** -1 == mpi(-0.25, -0.25) + assert mpi(4) ** -2 == mpi(0.0625, 0.0625) + assert mpi(-4) ** -2 == mpi(0.0625, 0.0625) + assert mpi(0, 1) ** inf == mpi(0, 1) + assert mpi(0, 1) ** -inf == mpi(1, inf) + assert mpi(0, inf) ** inf == mpi(0, inf) + assert mpi(0, inf) ** -inf == mpi(0, inf) + assert mpi(1, inf) ** inf == mpi(1, inf) + assert mpi(1, inf) ** -inf == mpi(0, 1) + assert mpi(2, 3) ** 1 == mpi(2, 3) + assert mpi(2, 3) ** 0 == 1 + assert mpi(1,3) ** mpi(2) == mpi(1,9) + +def test_interval_sqrt(): + assert mpi(4) ** 0.5 == mpi(2) + +def test_interval_div(): + assert mpi(0.5, 1) / mpi(-1, 0) == mpi(-inf, -0.5) + assert mpi(0, 1) / mpi(0, 1) == mpi(0, inf) + assert mpi(inf, inf) / mpi(inf, inf) == mpi(0, inf) + assert mpi(inf, inf) / mpi(2, inf) == mpi(0, inf) + assert mpi(inf, inf) / mpi(2, 2) == mpi(inf, inf) + assert mpi(0, inf) / mpi(2, inf) == mpi(0, inf) + assert mpi(0, inf) / mpi(2, 2) == mpi(0, inf) + assert mpi(2, inf) / mpi(2, 2) == mpi(1, inf) + assert mpi(2, inf) / mpi(2, inf) == mpi(0, inf) + assert mpi(-4, 8) / mpi(1, inf) == mpi(-4, 8) + assert mpi(-4, 8) / mpi(0.5, inf) == mpi(-8, 16) + assert mpi(-inf, 8) / mpi(0.5, inf) == mpi(-inf, 16) + assert mpi(-inf, inf) / mpi(0.5, inf) == mpi(-inf, inf) + assert mpi(8, inf) / mpi(0.5, inf) == mpi(0, inf) + assert mpi(-8, inf) / mpi(0.5, inf) == mpi(-16, inf) + assert mpi(-4, 8) / mpi(inf, inf) == mpi(0, 0) + assert mpi(0, 8) / mpi(inf, inf) == mpi(0, 0) + assert mpi(0, 0) / mpi(inf, inf) == mpi(0, 0) + assert mpi(-inf, 0) / mpi(inf, inf) == mpi(-inf, 0) + assert mpi(-inf, 8) / mpi(inf, inf) == mpi(-inf, 0) + assert mpi(-inf, inf) / mpi(inf, inf) == mpi(-inf, inf) + assert mpi(-8, inf) / mpi(inf, inf) == mpi(0, inf) + assert mpi(0, inf) / mpi(inf, inf) == mpi(0, inf) + assert mpi(8, inf) / mpi(inf, inf) == mpi(0, inf) + assert mpi(inf, inf) / mpi(inf, inf) == mpi(0, inf) + assert mpi(-1, 2) / mpi(0, 1) == mpi(-inf, +inf) + assert mpi(0, 1) / mpi(0, 1) == mpi(0.0, +inf) + assert mpi(-1, 0) / mpi(0, 1) == mpi(-inf, 0.0) + assert mpi(-0.5, -0.25) / mpi(0, 1) == mpi(-inf, -0.25) + assert mpi(0.5, 1) / mpi(0, 1) == mpi(0.5, +inf) + assert mpi(0.5, 4) / mpi(0, 1) == mpi(0.5, +inf) + assert mpi(-1, -0.5) / mpi(0, 1) == mpi(-inf, -0.5) + assert mpi(-4, -0.5) / mpi(0, 1) == mpi(-inf, -0.5) + assert mpi(-1, 2) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(0, 1) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(-1, 0) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(-0.5, -0.25) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(0.5, 1) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(0.5, 4) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(-1, -0.5) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(-4, -0.5) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(-1, 2) / mpi(-1, 0) == mpi(-inf, +inf) + assert mpi(0, 1) / mpi(-1, 0) == mpi(-inf, 0.0) + assert mpi(-1, 0) / mpi(-1, 0) == mpi(0.0, +inf) + assert mpi(-0.5, -0.25) / mpi(-1, 0) == mpi(0.25, +inf) + assert mpi(0.5, 1) / mpi(-1, 0) == mpi(-inf, -0.5) + assert mpi(0.5, 4) / mpi(-1, 0) == mpi(-inf, -0.5) + assert mpi(-1, -0.5) / mpi(-1, 0) == mpi(0.5, +inf) + assert mpi(-4, -0.5) / mpi(-1, 0) == mpi(0.5, +inf) + assert mpi(-1, 2) / mpi(0.5, 1) == mpi(-2.0, 4.0) + assert mpi(0, 1) / mpi(0.5, 1) == mpi(0.0, 2.0) + assert mpi(-1, 0) / mpi(0.5, 1) == mpi(-2.0, 0.0) + assert mpi(-0.5, -0.25) / mpi(0.5, 1) == mpi(-1.0, -0.25) + assert mpi(0.5, 1) / mpi(0.5, 1) == mpi(0.5, 2.0) + assert mpi(0.5, 4) / mpi(0.5, 1) == mpi(0.5, 8.0) + assert mpi(-1, -0.5) / mpi(0.5, 1) == mpi(-2.0, -0.5) + assert mpi(-4, -0.5) / mpi(0.5, 1) == mpi(-8.0, -0.5) + assert mpi(-1, 2) / mpi(-2, -0.5) == mpi(-4.0, 2.0) + assert mpi(0, 1) / mpi(-2, -0.5) == mpi(-2.0, 0.0) + assert mpi(-1, 0) / mpi(-2, -0.5) == mpi(0.0, 2.0) + assert mpi(-0.5, -0.25) / mpi(-2, -0.5) == mpi(0.125, 1.0) + assert mpi(0.5, 1) / mpi(-2, -0.5) == mpi(-2.0, -0.25) + assert mpi(0.5, 4) / mpi(-2, -0.5) == mpi(-8.0, -0.25) + assert mpi(-1, -0.5) / mpi(-2, -0.5) == mpi(0.25, 2.0) + assert mpi(-4, -0.5) / mpi(-2, -0.5) == mpi(0.25, 8.0) + # Should be undefined? + assert mpi(0, 0) / mpi(0, 0) == mpi(-inf, inf) + assert mpi(0, 0) / mpi(0, 1) == mpi(-inf, inf) + +def test_interval_cos_sin(): + iv.dps = 15 + cos = iv.cos + sin = iv.sin + tan = iv.tan + pi = iv.pi + # Around 0 + assert cos(mpi(0)) == 1 + assert sin(mpi(0)) == 0 + assert cos(mpi(0,1)) == mpi(0.54030230586813965399, 1.0) + assert sin(mpi(0,1)) == mpi(0, 0.8414709848078966159) + assert cos(mpi(1,2)) == mpi(-0.4161468365471424069, 0.54030230586813976501) + assert sin(mpi(1,2)) == mpi(0.84147098480789650488, 1.0) + assert sin(mpi(1,2.5)) == mpi(0.59847214410395643824, 1.0) + assert cos(mpi(-1, 1)) == mpi(0.54030230586813965399, 1.0) + assert cos(mpi(-1, 0.5)) == mpi(0.54030230586813965399, 1.0) + assert cos(mpi(-1, 1.5)) == mpi(0.070737201667702906405, 1.0) + assert sin(mpi(-1,1)) == mpi(-0.8414709848078966159, 0.8414709848078966159) + assert sin(mpi(-1,0.5)) == mpi(-0.8414709848078966159, 0.47942553860420300538) + assert mpi(-0.8414709848078966159, 1.00000000000000002e-100) in sin(mpi(-1,1e-100)) + assert mpi(-2.00000000000000004e-100, 1.00000000000000002e-100) in sin(mpi(-2e-100,1e-100)) + # Same interval + assert cos(mpi(2, 2.5)) + assert cos(mpi(3.5, 4)) == mpi(-0.93645668729079634129, -0.65364362086361182946) + assert cos(mpi(5, 5.5)) == mpi(0.28366218546322624627, 0.70866977429126010168) + assert mpi(0.59847214410395654927, 0.90929742682568170942) in sin(mpi(2, 2.5)) + assert sin(mpi(3.5, 4)) == mpi(-0.75680249530792831347, -0.35078322768961983646) + assert sin(mpi(5, 5.5)) == mpi(-0.95892427466313856499, -0.70554032557039181306) + # Higher roots + iv.dps = 55 + w = 4*10**50 + mpi(0.5) + for p in [15, 40, 80]: + iv.dps = p + assert 0 in sin(4*mpi(pi)) + assert 0 in sin(4*10**50*mpi(pi)) + assert 0 in cos((4+0.5)*mpi(pi)) + assert 0 in cos(w*mpi(pi)) + assert 1 in cos(4*mpi(pi)) + assert 1 in cos(4*10**50*mpi(pi)) + iv.dps = 15 + assert cos(mpi(2,inf)) == mpi(-1,1) + assert sin(mpi(2,inf)) == mpi(-1,1) + assert cos(mpi(-inf,2)) == mpi(-1,1) + assert sin(mpi(-inf,2)) == mpi(-1,1) + u = tan(mpi(0.5,1)) + assert mpf(u.a).ae(mp.tan(0.5)) + assert mpf(u.b).ae(mp.tan(1)) + v = iv.cot(mpi(0.5,1)) + assert mpf(v.a).ae(mp.cot(1)) + assert mpf(v.b).ae(mp.cot(0.5)) + # Sanity check of evaluation at n*pi and (n+1/2)*pi + for n in range(-5,7,2): + x = iv.cos(n*iv.pi) + assert -1 in x + assert x >= -1 + assert x != -1 + x = iv.sin((n+0.5)*iv.pi) + assert -1 in x + assert x >= -1 + assert x != -1 + for n in range(-6,8,2): + x = iv.cos(n*iv.pi) + assert 1 in x + assert x <= 1 + if n: + assert x != 1 + x = iv.sin((n+0.5)*iv.pi) + assert 1 in x + assert x <= 1 + assert x != 1 + for n in range(-6,7): + x = iv.cos((n+0.5)*iv.pi) + assert x.a < 0 < x.b + x = iv.sin(n*iv.pi) + if n: + assert x.a < 0 < x.b + +def test_interval_complex(): + # TODO: many more tests + iv.dps = 15 + mp.dps = 15 + assert iv.mpc(2,3) == 2+3j + assert iv.mpc(2,3) != 2+4j + assert iv.mpc(2,3) != 1+3j + assert 1+3j in iv.mpc([1,2],[3,4]) + assert 2+5j not in iv.mpc([1,2],[3,4]) + assert iv.mpc(1,2) + 1j == 1+3j + assert iv.mpc([1,2],[2,3]) + 2+3j == iv.mpc([3,4],[5,6]) + assert iv.mpc([2,4],[4,8]) / 2 == iv.mpc([1,2],[2,4]) + assert iv.mpc([1,2],[2,4]) * 2j == iv.mpc([-8,-4],[2,4]) + assert iv.mpc([2,4],[4,8]) / 2j == iv.mpc([2,4],[-2,-1]) + assert iv.exp(2+3j).ae(mp.exp(2+3j)) + assert iv.log(2+3j).ae(mp.log(2+3j)) + assert (iv.mpc(2,3) ** iv.mpc(0.5,2)).ae(mp.mpc(2,3) ** mp.mpc(0.5,2)) + assert 1j in (iv.mpf(-1) ** 0.5) + assert 1j in (iv.mpc(-1) ** 0.5) + assert abs(iv.mpc(0)) == 0 + assert abs(iv.mpc(inf)) == inf + assert abs(iv.mpc(3,4)) == 5 + assert abs(iv.mpc(4)) == 4 + assert abs(iv.mpc(0,4)) == 4 + assert abs(iv.mpc(0,[2,3])) == iv.mpf([2,3]) + assert abs(iv.mpc(0,[-3,2])) == iv.mpf([0,3]) + assert abs(iv.mpc([3,5],[4,12])) == iv.mpf([5,13]) + assert abs(iv.mpc([3,5],[-4,12])) == iv.mpf([3,13]) + assert iv.mpc(2,3) ** 0 == 1 + assert iv.mpc(2,3) ** 1 == (2+3j) + assert iv.mpc(2,3) ** 2 == (2+3j)**2 + assert iv.mpc(2,3) ** 3 == (2+3j)**3 + assert iv.mpc(2,3) ** 4 == (2+3j)**4 + assert iv.mpc(2,3) ** 5 == (2+3j)**5 + assert iv.mpc(2,2) ** (-1) == (2+2j) ** (-1) + assert iv.mpc(2,2) ** (-2) == (2+2j) ** (-2) + assert iv.cos(2).ae(mp.cos(2)) + assert iv.sin(2).ae(mp.sin(2)) + assert iv.cos(2+3j).ae(mp.cos(2+3j)) + assert iv.sin(2+3j).ae(mp.sin(2+3j)) + +def test_interval_complex_arg(): + mp.dps = 15 + iv.dps = 15 + assert iv.arg(3) == 0 + assert iv.arg(0) == 0 + assert iv.arg([0,3]) == 0 + assert iv.arg(-3).ae(pi) + assert iv.arg(2+3j).ae(iv.arg(2+3j)) + z = iv.mpc([-2,-1],[3,4]) + t = iv.arg(z) + assert t.a.ae(mp.arg(-1+4j)) + assert t.b.ae(mp.arg(-2+3j)) + z = iv.mpc([-2,1],[3,4]) + t = iv.arg(z) + assert t.a.ae(mp.arg(1+3j)) + assert t.b.ae(mp.arg(-2+3j)) + z = iv.mpc([1,2],[3,4]) + t = iv.arg(z) + assert t.a.ae(mp.arg(2+3j)) + assert t.b.ae(mp.arg(1+4j)) + z = iv.mpc([1,2],[-2,3]) + t = iv.arg(z) + assert t.a.ae(mp.arg(1-2j)) + assert t.b.ae(mp.arg(1+3j)) + z = iv.mpc([1,2],[-4,-3]) + t = iv.arg(z) + assert t.a.ae(mp.arg(1-4j)) + assert t.b.ae(mp.arg(2-3j)) + z = iv.mpc([-1,2],[-4,-3]) + t = iv.arg(z) + assert t.a.ae(mp.arg(-1-3j)) + assert t.b.ae(mp.arg(2-3j)) + z = iv.mpc([-2,-1],[-4,-3]) + t = iv.arg(z) + assert t.a.ae(mp.arg(-2-3j)) + assert t.b.ae(mp.arg(-1-4j)) + z = iv.mpc([-2,-1],[-3,3]) + t = iv.arg(z) + assert t.a.ae(-mp.pi) + assert t.b.ae(mp.pi) + z = iv.mpc([-2,2],[-3,3]) + t = iv.arg(z) + assert t.a.ae(-mp.pi) + assert t.b.ae(mp.pi) + +def test_interval_ae(): + iv.dps = 15 + x = iv.mpf([1,2]) + assert x.ae(1) is None + assert x.ae(1.5) is None + assert x.ae(2) is None + assert x.ae(2.01) is False + assert x.ae(0.99) is False + x = iv.mpf(3.5) + assert x.ae(3.5) is True + assert x.ae(3.5+1e-15) is True + assert x.ae(3.5-1e-15) is True + assert x.ae(3.501) is False + assert x.ae(3.499) is False + assert x.ae(iv.mpf([3.5,3.501])) is None + assert x.ae(iv.mpf([3.5,4.5+1e-15])) is None + +def test_interval_nstr(): + iv.dps = n = 30 + x = mpi(1, 2) + # FIXME: error_dps should not be necessary + assert iv.nstr(x, n, mode='plusminus', error_dps=6) == '1.5 +- 0.5' + assert iv.nstr(x, n, mode='plusminus', use_spaces=False, error_dps=6) == '1.5+-0.5' + assert iv.nstr(x, n, mode='percent') == '1.5 (33.33%)' + assert iv.nstr(x, n, mode='brackets', use_spaces=False) == '[1.0,2.0]' + assert iv.nstr(x, n, mode='brackets' , brackets=('<', '>')) == '<1.0, 2.0>' + x = mpi('5.2582327113062393041', '5.2582327113062749951') + assert iv.nstr(x, n, mode='diff') == '5.2582327113062[393041, 749951]' + assert iv.nstr(iv.cos(mpi(1)), n, mode='diff', use_spaces=False) == '0.54030230586813971740093660744[2955,3053]' + assert iv.nstr(mpi('1e123', '1e129'), n, mode='diff') == '[1.0e+123, 1.0e+129]' + exp = iv.exp + assert iv.nstr(iv.exp(mpi('5000.1')), n, mode='diff') == '3.2797365856787867069110487[0926, 1191]e+2171' + iv.dps = 15 + +def test_mpi_from_str(): + iv.dps = 15 + assert iv.convert('1.5 +- 0.5') == mpi(mpf('1.0'), mpf('2.0')) + assert mpi(1, 2) in iv.convert('1.5 (33.33333333333333333333333333333%)') + assert iv.convert('[1, 2]') == mpi(1, 2) + assert iv.convert('1[2, 3]') == mpi(12, 13) + assert iv.convert('1.[23,46]e-8') == mpi('1.23e-8', '1.46e-8') + assert iv.convert('12[3.4,5.9]e4') == mpi('123.4e+4', '125.9e4') + +def test_interval_gamma(): + mp.dps = 15 + iv.dps = 15 + # TODO: need many more tests + assert iv.rgamma(0) == 0 + assert iv.fac(0) == 1 + assert iv.fac(1) == 1 + assert iv.fac(2) == 2 + assert iv.fac(3) == 6 + assert iv.gamma(0) == [-inf,inf] + assert iv.gamma(1) == 1 + assert iv.gamma(2) == 1 + assert iv.gamma(3) == 2 + assert -3.5449077018110320546 in iv.gamma(-0.5) + assert iv.loggamma(1) == 0 + assert iv.loggamma(2) == 0 + assert 0.69314718055994530942 in iv.loggamma(3) + # Test tight log-gamma endpoints based on monotonicity + xs = [iv.mpc([2,3],[1,4]), + iv.mpc([2,3],[-4,-1]), + iv.mpc([2,3],[-1,4]), + iv.mpc([2,3],[-4,1]), + iv.mpc([2,3],[-4,4]), + iv.mpc([-3,-2],[2,4]), + iv.mpc([-3,-2],[-4,-2])] + for x in xs: + ys = [mp.loggamma(mp.mpc(x.a,x.c)), + mp.loggamma(mp.mpc(x.b,x.c)), + mp.loggamma(mp.mpc(x.a,x.d)), + mp.loggamma(mp.mpc(x.b,x.d))] + if 0 in x.imag: + ys += [mp.loggamma(x.a), mp.loggamma(x.b)] + min_real = min([y.real for y in ys]) + max_real = max([y.real for y in ys]) + min_imag = min([y.imag for y in ys]) + max_imag = max([y.imag for y in ys]) + z = iv.loggamma(x) + assert z.a.ae(min_real) + assert z.b.ae(max_real) + assert z.c.ae(min_imag) + assert z.d.ae(max_imag) + +def test_interval_conversions(): + mp.dps = 15 + iv.dps = 15 + for a, b in ((-0.0, 0), (0.0, 0.5), (1.0, 1), \ + ('-inf', 20.5), ('-inf', float(sqrt(2)))): + r = mpi(a, b) + assert int(r.b) == int(b) + assert float(r.a) == float(a) + assert float(r.b) == float(b) + assert complex(r.a) == complex(a) + assert complex(r.b) == complex(b) diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_levin.py b/MLPY/Lib/site-packages/mpmath/tests/test_levin.py new file mode 100644 index 0000000000000000000000000000000000000000..b14855df4de1a45da27080dcd239267842a4ac7a --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_levin.py @@ -0,0 +1,153 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +from mpmath import mp +from mpmath import libmp + +xrange = libmp.backend.xrange + +# Attention: +# These tests run with 15-20 decimal digits precision. For higher precision the +# working precision must be raised. + +def test_levin_0(): + mp.dps = 17 + eps = mp.mpf(mp.eps) + with mp.extraprec(2 * mp.prec): + L = mp.levin(method = "levin", variant = "u") + S, s, n = [], 0, 1 + while 1: + s += mp.one / (n * n) + n += 1 + S.append(s) + v, e = L.update_psum(S) + if e < eps: + break + if n > 1000: raise RuntimeError("iteration limit exceeded") + eps = mp.exp(0.9 * mp.log(eps)) + err = abs(v - mp.pi ** 2 / 6) + assert err < eps + w = mp.nsum(lambda n: 1/(n * n), [1, mp.inf], method = "levin", levin_variant = "u") + err = abs(v - w) + assert err < eps + +def test_levin_1(): + mp.dps = 17 + eps = mp.mpf(mp.eps) + with mp.extraprec(2 * mp.prec): + L = mp.levin(method = "levin", variant = "v") + A, n = [], 1 + while 1: + s = mp.mpf(n) ** (2 + 3j) + n += 1 + A.append(s) + v, e = L.update(A) + if e < eps: + break + if n > 1000: raise RuntimeError("iteration limit exceeded") + eps = mp.exp(0.9 * mp.log(eps)) + err = abs(v - mp.zeta(-2-3j)) + assert err < eps + w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v") + err = abs(v - w) + assert err < eps + +def test_levin_2(): + # [2] A. Sidi - "Pratical Extrapolation Methods" p.373 + mp.dps = 17 + z=mp.mpf(10) + eps = mp.mpf(mp.eps) + with mp.extraprec(2 * mp.prec): + L = mp.levin(method = "sidi", variant = "t") + n = 0 + while 1: + s = (-1)**n * mp.fac(n) * z ** (-n) + v, e = L.step(s) + n += 1 + if e < eps: + break + if n > 1000: raise RuntimeError("iteration limit exceeded") + eps = mp.exp(0.9 * mp.log(eps)) + exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf]) + # there is also a symbolic expression for the integral: + # exact = z * mp.exp(z) * mp.expint(1,z) + err = abs(v - exact) + assert err < eps + w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t") + assert err < eps + +def test_levin_3(): + mp.dps = 17 + z=mp.mpf(2) + eps = mp.mpf(mp.eps) + with mp.extraprec(7*mp.prec): # we need copious amount of precision to sum this highly divergent series + L = mp.levin(method = "levin", variant = "t") + n, s = 0, 0 + while 1: + s += (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)) + n += 1 + v, e = L.step_psum(s) + if e < eps: + break + if n > 1000: raise RuntimeError("iteration limit exceeded") + eps = mp.exp(0.8 * mp.log(eps)) + exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi) + # there is also a symbolic expression for the integral: + # exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) + err = abs(v - exact) + assert err < eps + w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)), [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)]) + err = abs(v - w) + assert err < eps + +def test_levin_nsum(): + mp.dps = 17 + + with mp.extraprec(mp.prec): + z = mp.mpf(10) ** (-10) + a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "l") - 1 / z + assert abs(a - mp.euler) < 1e-10 + + eps = mp.exp(0.8 * mp.log(mp.eps)) + + a = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "sidi") + assert abs(a - mp.log(2)) < eps + + z = 2 + 1j + f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n)) + v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)]) + exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z) + assert abs(exact - v) < eps + +def test_cohen_alt_0(): + mp.dps = 17 + AC = mp.cohen_alt() + S, s, n = [], 0, 1 + while 1: + s += -((-1) ** n) * mp.one / (n * n) + n += 1 + S.append(s) + v, e = AC.update_psum(S) + if e < mp.eps: + break + if n > 1000: raise RuntimeError("iteration limit exceeded") + eps = mp.exp(0.9 * mp.log(mp.eps)) + err = abs(v - mp.pi ** 2 / 12) + assert err < eps + +def test_cohen_alt_1(): + mp.dps = 17 + A = [] + AC = mp.cohen_alt() + n = 1 + while 1: + A.append( mp.loggamma(1 + mp.one / (2 * n - 1))) + A.append(-mp.loggamma(1 + mp.one / (2 * n))) + n += 1 + v, e = AC.update(A) + if e < mp.eps: + break + if n > 1000: raise RuntimeError("iteration limit exceeded") + v = mp.exp(v) + err = abs(v - 1.06215090557106) + assert err < 1e-12 diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_linalg.py b/MLPY/Lib/site-packages/mpmath/tests/test_linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..14256a79f8953d3e4ef8b296258560d48204f547 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_linalg.py @@ -0,0 +1,332 @@ +# TODO: don't use round + +from __future__ import division + +import pytest +from mpmath import * +xrange = libmp.backend.xrange + +# XXX: these shouldn't be visible(?) +LU_decomp = mp.LU_decomp +L_solve = mp.L_solve +U_solve = mp.U_solve +householder = mp.householder +improve_solution = mp.improve_solution + +A1 = matrix([[3, 1, 6], + [2, 1, 3], + [1, 1, 1]]) +b1 = [2, 7, 4] + +A2 = matrix([[ 2, -1, -1, 2], + [ 6, -2, 3, -1], + [-4, 2, 3, -2], + [ 2, 0, 4, -3]]) +b2 = [3, -3, -2, -1] + +A3 = matrix([[ 1, 0, -1, -1, 0], + [ 0, 1, 1, 0, -1], + [ 4, -5, 2, 0, 0], + [ 0, 0, -2, 9,-12], + [ 0, 5, 0, 0, 12]]) +b3 = [0, 0, 0, 0, 50] + +A4 = matrix([[10.235, -4.56, 0., -0.035, 5.67], + [-2.463, 1.27, 3.97, -8.63, 1.08], + [-6.58, 0.86, -0.257, 9.32, -43.6 ], + [ 9.83, 7.39, -17.25, 0.036, 24.86], + [-9.31, 34.9, 78.56, 1.07, 65.8 ]]) +b4 = [8.95, 20.54, 7.42, 5.60, 58.43] + +A5 = matrix([[ 1, 2, -4], + [-2, -3, 5], + [ 3, 5, -8]]) + +A6 = matrix([[ 1.377360, 2.481400, 5.359190], + [ 2.679280, -1.229560, 25.560210], + [-1.225280+1.e6, 9.910180, -35.049900-1.e6]]) +b6 = [23.500000, -15.760000, 2.340000] + +A7 = matrix([[1, -0.5], + [2, 1], + [-2, 6]]) +b7 = [3, 2, -4] + +A8 = matrix([[1, 2, 3], + [-1, 0, 1], + [-1, -2, -1], + [1, 0, -1]]) +b8 = [1, 2, 3, 4] + +A9 = matrix([[ 4, 2, -2], + [ 2, 5, -4], + [-2, -4, 5.5]]) +b9 = [10, 16, -15.5] + +A10 = matrix([[1.0 + 1.0j, 2.0, 2.0], + [4.0, 5.0, 6.0], + [7.0, 8.0, 9.0]]) +b10 = [1.0, 1.0 + 1.0j, 1.0] + + +def test_LU_decomp(): + A = A3.copy() + b = b3 + A, p = LU_decomp(A) + y = L_solve(A, b, p) + x = U_solve(A, y) + assert p == [2, 1, 2, 3] + assert [round(i, 14) for i in x] == [3.78953107960742, 2.9989094874591098, + -0.081788440567070006, 3.8713195201744801, 2.9171210468920399] + A = A4.copy() + b = b4 + A, p = LU_decomp(A) + y = L_solve(A, b, p) + x = U_solve(A, y) + assert p == [0, 3, 4, 3] + assert [round(i, 14) for i in x] == [2.6383625899619201, 2.6643834462368399, + 0.79208015947958998, -2.5088376454101899, -1.0567657691375001] + A = randmatrix(3) + bak = A.copy() + LU_decomp(A, overwrite=1) + assert A != bak + +def test_inverse(): + for A in [A1, A2, A5]: + inv = inverse(A) + assert mnorm(A*inv - eye(A.rows), 1) < 1.e-14 + +def test_householder(): + mp.dps = 15 + A, b = A8, b8 + H, p, x, r = householder(extend(A, b)) + assert H == matrix( + [[mpf('3.0'), mpf('-2.0'), mpf('-1.0'), 0], + [-1.0,mpf('3.333333333333333'),mpf('-2.9999999999999991'),mpf('2.0')], + [-1.0, mpf('-0.66666666666666674'),mpf('2.8142135623730948'), + mpf('-2.8284271247461898')], + [1.0, mpf('-1.3333333333333333'),mpf('-0.20000000000000018'), + mpf('4.2426406871192857')]]) + assert p == [-2, -2, mpf('-1.4142135623730949')] + assert round(norm(r, 2), 10) == 4.2426406870999998 + + y = [102.102, 58.344, 36.463, 24.310, 17.017, 12.376, 9.282, 7.140, 5.610, + 4.488, 3.6465, 3.003] + + def coeff(n): + # similiar to Hilbert matrix + A = [] + for i in range(1, 13): + A.append([1. / (i + j - 1) for j in range(1, n + 1)]) + return matrix(A) + + residuals = [] + refres = [] + for n in range(2, 7): + A = coeff(n) + H, p, x, r = householder(extend(A, y)) + x = matrix(x) + y = matrix(y) + residuals.append(norm(r, 2)) + refres.append(norm(residual(A, x, y), 2)) + assert [round(res, 10) for res in residuals] == [15.1733888877, + 0.82378073210000002, 0.302645887, 0.0260109244, + 0.00058653999999999998] + assert norm(matrix(residuals) - matrix(refres), inf) < 1.e-13 + + def hilbert_cmplx(n): + # Complexified Hilbert matrix + A = hilbert(2*n,n) + v = randmatrix(2*n, 2, min=-1, max=1) + v = v.apply(lambda x: exp(1J*pi()*x)) + A = diag(v[:,0])*A*diag(v[:n,1]) + return A + + residuals_cmplx = [] + refres_cmplx = [] + for n in range(2, 10): + A = hilbert_cmplx(n) + H, p, x, r = householder(A.copy()) + residuals_cmplx.append(norm(r, 2)) + refres_cmplx.append(norm(residual(A[:,:n-1], x, A[:,n-1]), 2)) + assert norm(matrix(residuals_cmplx) - matrix(refres_cmplx), inf) < 1.e-13 + +def test_factorization(): + A = randmatrix(5) + P, L, U = lu(A) + assert mnorm(P*A - L*U, 1) < 1.e-15 + +def test_solve(): + assert norm(residual(A6, lu_solve(A6, b6), b6), inf) < 1.e-10 + assert norm(residual(A7, lu_solve(A7, b7), b7), inf) < 1.5 + assert norm(residual(A8, lu_solve(A8, b8), b8), inf) <= 3 + 1.e-10 + assert norm(residual(A6, qr_solve(A6, b6)[0], b6), inf) < 1.e-10 + assert norm(residual(A7, qr_solve(A7, b7)[0], b7), inf) < 1.5 + assert norm(residual(A8, qr_solve(A8, b8)[0], b8), 2) <= 4.3 + assert norm(residual(A10, lu_solve(A10, b10), b10), 2) < 1.e-10 + assert norm(residual(A10, qr_solve(A10, b10)[0], b10), 2) < 1.e-10 + +def test_solve_overdet_complex(): + A = matrix([[1, 2j], [3, 4j], [5, 6]]) + b = matrix([1 + j, 2, -j]) + assert norm(residual(A, lu_solve(A, b), b)) < 1.0208 + +def test_singular(): + mp.dps = 15 + A = [[5.6, 1.2], [7./15, .1]] + B = repr(zeros(2)) + b = [1, 2] + for i in ['lu_solve(%s, %s)' % (A, b), 'lu_solve(%s, %s)' % (B, b), + 'qr_solve(%s, %s)' % (A, b), 'qr_solve(%s, %s)' % (B, b)]: + pytest.raises((ZeroDivisionError, ValueError), lambda: eval(i)) + +def test_cholesky(): + assert fp.cholesky(fp.matrix(A9)) == fp.matrix([[2, 0, 0], [1, 2, 0], [-1, -3/2, 3/2]]) + x = fp.cholesky_solve(A9, b9) + assert fp.norm(fp.residual(A9, x, b9), fp.inf) == 0 + +def test_det(): + assert det(A1) == 1 + assert round(det(A2), 14) == 8 + assert round(det(A3)) == 1834 + assert round(det(A4)) == 4443376 + assert det(A5) == 1 + assert round(det(A6)) == 78356463 + assert det(zeros(3)) == 0 + +def test_cond(): + mp.dps = 15 + A = matrix([[1.2969, 0.8648], [0.2161, 0.1441]]) + assert cond(A, lambda x: mnorm(x,1)) == mpf('327065209.73817754') + assert cond(A, lambda x: mnorm(x,inf)) == mpf('327065209.73817754') + assert cond(A, lambda x: mnorm(x,'F')) == mpf('249729266.80008656') + +@extradps(50) +def test_precision(): + A = randmatrix(10, 10) + assert mnorm(inverse(inverse(A)) - A, 1) < 1.e-45 + +def test_interval_matrix(): + mp.dps = 15 + iv.dps = 15 + a = iv.matrix([['0.1','0.3','1.0'],['7.1','5.5','4.8'],['3.2','4.4','5.6']]) + b = iv.matrix(['4','0.6','0.5']) + c = iv.lu_solve(a, b) + assert c[0].delta < 1e-13 + assert c[1].delta < 1e-13 + assert c[2].delta < 1e-13 + assert 5.25823271130625686059275 in c[0] + assert -13.155049396267837541163 in c[1] + assert 7.42069154774972557628979 in c[2] + +def test_LU_cache(): + A = randmatrix(3) + LU = LU_decomp(A) + assert A._LU == LU_decomp(A) + A[0,0] = -1000 + assert A._LU is None + +def test_improve_solution(): + A = randmatrix(5, min=1e-20, max=1e20) + b = randmatrix(5, 1, min=-1000, max=1000) + x1 = lu_solve(A, b) + randmatrix(5, 1, min=-1e-5, max=1.e-5) + x2 = improve_solution(A, x1, b) + assert norm(residual(A, x2, b), 2) < norm(residual(A, x1, b), 2) + +def test_exp_pade(): + for i in range(3): + dps = 15 + extra = 15 + mp.dps = dps + extra + dm = 0 + N = 3 + dg = range(1,N+1) + a = diag(dg) + expa = diag([exp(x) for x in dg]) + # choose a random matrix not close to be singular + # to avoid adding too much extra precision in computing + # m**-1 * M * m + while abs(dm) < 0.01: + m = randmatrix(N) + dm = det(m) + m = m/dm + a1 = m**-1 * a * m + e2 = m**-1 * expa * m + mp.dps = dps + e1 = expm(a1, method='pade') + mp.dps = dps + extra + d = e2 - e1 + #print d + mp.dps = dps + assert norm(d, inf).ae(0) + mp.dps = 15 + +def test_qr(): + mp.dps = 15 # used default value for dps + lowlimit = -9 # lower limit of matrix element value + uplimit = 9 # uppter limit of matrix element value + maxm = 4 # max matrix size + flg = False # toggle to create real vs complex matrix + zero = mpf('0.0') + + for k in xrange(0,10): + exdps = 0 + mode = 'full' + flg = bool(k % 2) + + # generate arbitrary matrix size (2 to maxm) + num1 = nint(maxm*rand()) + num2 = nint(maxm*rand()) + m = int(max(num1, num2)) + n = int(min(num1, num2)) + + # create matrix + A = mp.matrix(m,n) + + # populate matrix values with arbitrary integers + if flg: + flg = False + dtype = 'complex' + for j in xrange(0,n): + for i in xrange(0,m): + val = nint(lowlimit + (uplimit-lowlimit)*rand()) + val2 = nint(lowlimit + (uplimit-lowlimit)*rand()) + A[i,j] = mpc(val, val2) + else: + flg = True + dtype = 'real' + for j in xrange(0,n): + for i in xrange(0,m): + val = nint(lowlimit + (uplimit-lowlimit)*rand()) + A[i,j] = mpf(val) + + # perform A -> QR decomposition + Q, R = qr(A, mode, edps = exdps) + + #print('\n\n A = \n', nstr(A, 4)) + #print('\n Q = \n', nstr(Q, 4)) + #print('\n R = \n', nstr(R, 4)) + #print('\n Q*R = \n', nstr(Q*R, 4)) + + maxnorm = mpf('1.0E-11') + n1 = norm(A - Q * R) + #print '\n Norm of A - Q * R = ', n1 + assert n1 <= maxnorm + + if dtype == 'real': + n1 = norm(eye(m) - Q.T * Q) + #print ' Norm of I - Q.T * Q = ', n1 + assert n1 <= maxnorm + + n1 = norm(eye(m) - Q * Q.T) + #print ' Norm of I - Q * Q.T = ', n1 + assert n1 <= maxnorm + + if dtype == 'complex': + n1 = norm(eye(m) - Q.T * Q.conjugate()) + #print ' Norm of I - Q.T * Q.conjugate() = ', n1 + assert n1 <= maxnorm + + n1 = norm(eye(m) - Q.conjugate() * Q.T) + #print ' Norm of I - Q.conjugate() * Q.T = ', n1 + assert n1 <= maxnorm diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_matrices.py b/MLPY/Lib/site-packages/mpmath/tests/test_matrices.py new file mode 100644 index 0000000000000000000000000000000000000000..1547b90664dba66a98a7f026a04a4ed1aa1ed3b4 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_matrices.py @@ -0,0 +1,253 @@ +import pytest +import sys +from mpmath import * + +def test_matrix_basic(): + A1 = matrix(3) + for i in range(3): + A1[i,i] = 1 + assert A1 == eye(3) + assert A1 == matrix(A1) + A2 = matrix(3, 2) + assert not A2._matrix__data + A3 = matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + assert list(A3) == list(range(1, 10)) + A3[1,1] = 0 + assert not (1, 1) in A3._matrix__data + A4 = matrix([[1, 2, 3], [4, 5, 6]]) + A5 = matrix([[6, -1], [3, 2], [0, -3]]) + assert A4 * A5 == matrix([[12, -6], [39, -12]]) + assert A1 * A3 == A3 * A1 == A3 + pytest.raises(ValueError, lambda: A2*A2) + l = [[10, 20, 30], [40, 0, 60], [70, 80, 90]] + A6 = matrix(l) + assert A6.tolist() == l + assert A6 == eval(repr(A6)) + A6 = fp.matrix(A6) + assert A6 == eval(repr(A6)) + assert A6*1j == eval(repr(A6*1j)) + assert A3 * 10 == 10 * A3 == A6 + assert A2.rows == 3 + assert A2.cols == 2 + A3.rows = 2 + A3.cols = 2 + assert len(A3._matrix__data) == 3 + assert A4 + A4 == 2*A4 + pytest.raises(ValueError, lambda: A4 + A2) + assert sum(A1 - A1) == 0 + A7 = matrix([[1, 2], [3, 4], [5, 6], [7, 8]]) + x = matrix([10, -10]) + assert A7*x == matrix([-10, -10, -10, -10]) + A8 = ones(5) + assert sum((A8 + 1) - (2 - zeros(5))) == 0 + assert (1 + ones(4)) / 2 - 1 == zeros(4) + assert eye(3)**10 == eye(3) + pytest.raises(ValueError, lambda: A7**2) + A9 = randmatrix(3) + A10 = matrix(A9) + A9[0,0] = -100 + assert A9 != A10 + assert nstr(A9) + +def test_matmul(): + """ + Test the PEP465 "@" matrix multiplication syntax. + To avoid syntax errors when importing this file in Python 3.5 and below, we have to use exec() - sorry for that. + """ + # TODO remove exec() wrapper as soon as we drop support for Python <= 3.5 + if sys.hexversion < 0x30500f0: + # we are on Python < 3.5 + pytest.skip("'@' (__matmul__) is only supported in Python 3.5 or newer") + A4 = matrix([[1, 2, 3], [4, 5, 6]]) + A5 = matrix([[6, -1], [3, 2], [0, -3]]) + exec("assert A4 @ A5 == A4 * A5") + +def test_matrix_slices(): + A = matrix([ [1, 2, 3], + [4, 5 ,6], + [7, 8 ,9]]) + V = matrix([1,2,3,4,5]) + + # Get slice + assert A[:,:] == A + assert A[:,1] == matrix([[2],[5],[8]]) + assert A[2,:] == matrix([[7, 8 ,9]]) + assert A[1:3,1:3] == matrix([[5,6],[8,9]]) + assert V[2:4] == matrix([3,4]) + pytest.raises(IndexError, lambda: A[:,1:6]) + + # Assign slice with matrix + A1 = matrix(3) + A1[:,:] = A + assert A1[:,:] == matrix([[1, 2, 3], + [4, 5 ,6], + [7, 8 ,9]]) + A1[0,:] = matrix([[10, 11, 12]]) + assert A1 == matrix([ [10, 11, 12], + [4, 5 ,6], + [7, 8 ,9]]) + A1[:,2] = matrix([[13], [14], [15]]) + assert A1 == matrix([ [10, 11, 13], + [4, 5 ,14], + [7, 8 ,15]]) + A1[:2,:2] = matrix([[16, 17], [18 , 19]]) + assert A1 == matrix([ [16, 17, 13], + [18, 19 ,14], + [7, 8 ,15]]) + V[1:3] = 10 + assert V == matrix([1,10,10,4,5]) + with pytest.raises(ValueError): + A1[2,:] = A[:,1] + + with pytest.raises(IndexError): + A1[2,1:20] = A[:,:] + + # Assign slice with scalar + A1[:,2] = 10 + assert A1 == matrix([ [16, 17, 10], + [18, 19 ,10], + [7, 8 ,10]]) + A1[:,:] = 40 + for x in A1: + assert x == 40 + + +def test_matrix_power(): + A = matrix([[1, 2], [3, 4]]) + assert A**2 == A*A + assert A**3 == A*A*A + assert A**-1 == inverse(A) + assert A**-2 == inverse(A*A) + +def test_matrix_transform(): + A = matrix([[1, 2], [3, 4], [5, 6]]) + assert A.T == A.transpose() == matrix([[1, 3, 5], [2, 4, 6]]) + swap_row(A, 1, 2) + assert A == matrix([[1, 2], [5, 6], [3, 4]]) + l = [1, 2] + swap_row(l, 0, 1) + assert l == [2, 1] + assert extend(eye(3), [1,2,3]) == matrix([[1,0,0,1],[0,1,0,2],[0,0,1,3]]) + +def test_matrix_conjugate(): + A = matrix([[1 + j, 0], [2, j]]) + assert A.conjugate() == matrix([[mpc(1, -1), 0], [2, mpc(0, -1)]]) + assert A.transpose_conj() == A.H == matrix([[mpc(1, -1), 2], + [0, mpc(0, -1)]]) + +def test_matrix_creation(): + assert diag([1, 2, 3]) == matrix([[1, 0, 0], [0, 2, 0], [0, 0, 3]]) + A1 = ones(2, 3) + assert A1.rows == 2 and A1.cols == 3 + for a in A1: + assert a == 1 + A2 = zeros(3, 2) + assert A2.rows == 3 and A2.cols == 2 + for a in A2: + assert a == 0 + assert randmatrix(10) != randmatrix(10) + one = mpf(1) + assert hilbert(3) == matrix([[one, one/2, one/3], + [one/2, one/3, one/4], + [one/3, one/4, one/5]]) + +def test_norms(): + # matrix norms + A = matrix([[1, -2], [-3, -1], [2, 1]]) + assert mnorm(A,1) == 6 + assert mnorm(A,inf) == 4 + assert mnorm(A,'F') == sqrt(20) + # vector norms + assert norm(-3) == 3 + x = [1, -2, 7, -12] + assert norm(x, 1) == 22 + assert round(norm(x, 2), 10) == 14.0712472795 + assert round(norm(x, 10), 10) == 12.0054633727 + assert norm(x, inf) == 12 + +def test_vector(): + x = matrix([0, 1, 2, 3, 4]) + assert x == matrix([[0], [1], [2], [3], [4]]) + assert x[3] == 3 + assert len(x._matrix__data) == 4 + assert list(x) == list(range(5)) + x[0] = -10 + x[4] = 0 + assert x[0] == -10 + assert len(x) == len(x.T) == 5 + assert x.T*x == matrix([[114]]) + +def test_matrix_copy(): + A = ones(6) + B = A.copy() + C = +A + assert A == B + assert A == C + B[0,0] = 0 + assert A != B + C[0,0] = 42 + assert A != C + +def test_matrix_numpy(): + try: + import numpy + except ImportError: + return + l = [[1, 2], [3, 4], [5, 6]] + a = numpy.array(l) + assert matrix(l) == matrix(a) + +def test_interval_matrix_scalar_mult(): + """Multiplication of iv.matrix and any scalar type""" + a = mpi(-1, 1) + b = a + a * 2j + c = mpf(42) + d = c + c * 2j + e = 1.234 + f = fp.convert(e) + g = e + e * 3j + h = fp.convert(g) + M = iv.ones(1) + for x in [a, b, c, d, e, f, g, h]: + assert x * M == iv.matrix([x]) + assert M * x == iv.matrix([x]) + +@pytest.mark.xfail() +def test_interval_matrix_matrix_mult(): + """Multiplication of iv.matrix and other matrix types""" + A = ones(1) + B = fp.ones(1) + M = iv.ones(1) + for X in [A, B, M]: + assert X * M == iv.matrix(X) + assert X * M == X + assert M * X == iv.matrix(X) + assert M * X == X + +def test_matrix_conversion_to_iv(): + # Test that matrices with foreign datatypes are properly converted + for other_type_eye in [eye(3), fp.eye(3), iv.eye(3)]: + A = iv.matrix(other_type_eye) + B = iv.eye(3) + assert type(A[0,0]) == type(B[0,0]) + assert A.tolist() == B.tolist() + +def test_interval_matrix_mult_bug(): + # regression test for interval matrix multiplication: + # result must be nonzero-width and contain the exact result + x = convert('1.00000000000001') # note: this is implicitly rounded to some near mpf float value + A = matrix([[x]]) + B = iv.matrix(A) + C = iv.matrix([[x]]) + assert B == C + B = B * B + C = C * C + assert B == C + assert B[0, 0].delta > 1e-16 + assert B[0, 0].delta < 3e-16 + assert C[0, 0].delta > 1e-16 + assert C[0, 0].delta < 3e-16 + assert mp.mpf('1.00000000000001998401444325291756783368705994138804689654') in B[0, 0] + assert mp.mpf('1.00000000000001998401444325291756783368705994138804689654') in C[0, 0] + # the following caused an error before the bug was fixed + assert iv.matrix(mp.eye(2)) * (iv.ones(2) + mpi(1, 2)) == iv.matrix([[mpi(2, 3), mpi(2, 3)], [mpi(2, 3), mpi(2, 3)]]) diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_mpmath.py b/MLPY/Lib/site-packages/mpmath/tests/test_mpmath.py new file mode 100644 index 0000000000000000000000000000000000000000..9f1fe36ae9b1b0feca4677eeb90396bfa7ed8f7a --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_mpmath.py @@ -0,0 +1,7 @@ +from mpmath.libmp import * +from mpmath import * + +def test_newstyle_classes(): + for cls in [mp, fp, iv, mpf, mpc]: + for s in cls.__class__.__mro__: + assert isinstance(s, type) diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_ode.py b/MLPY/Lib/site-packages/mpmath/tests/test_ode.py new file mode 100644 index 0000000000000000000000000000000000000000..6b6dbffa79cfd4ca6dbf14f8591296ee48b16682 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_ode.py @@ -0,0 +1,73 @@ +#from mpmath.calculus import ODE_step_euler, ODE_step_rk4, odeint, arange +from mpmath import odefun, cos, sin, mpf, sinc, mp + +''' +solvers = [ODE_step_euler, ODE_step_rk4] + +def test_ode1(): + """ + Let's solve: + + x'' + w**2 * x = 0 + + i.e. x1 = x, x2 = x1': + + x1' = x2 + x2' = -x1 + """ + def derivs((x1, x2), t): + return x2, -x1 + + for solver in solvers: + t = arange(0, 3.1415926, 0.005) + sol = odeint(derivs, (0., 1.), t, solver) + x1 = [a[0] for a in sol] + x2 = [a[1] for a in sol] + # the result is x1 = sin(t), x2 = cos(t) + # let's just check the end points for t = pi + assert abs(x1[-1]) < 1e-2 + assert abs(x2[-1] - (-1)) < 1e-2 + +def test_ode2(): + """ + Let's solve: + + x' - x = 0 + + i.e. x = exp(x) + + """ + def derivs((x), t): + return x + + for solver in solvers: + t = arange(0, 1, 1e-3) + sol = odeint(derivs, (1.,), t, solver) + x = [a[0] for a in sol] + # the result is x = exp(t) + # let's just check the end point for t = 1, i.e. x = e + assert abs(x[-1] - 2.718281828) < 1e-2 +''' + +def test_odefun_rational(): + mp.dps = 15 + # A rational function + f = lambda t: 1/(1+mpf(t)**2) + g = odefun(lambda x, y: [-2*x*y[0]**2], 0, [f(0)]) + assert f(2).ae(g(2)[0]) + +def test_odefun_sinc_large(): + mp.dps = 15 + # Sinc function; test for large x + f = sinc + g = odefun(lambda x, y: [(cos(x)-y[0])/x], 1, [f(1)], tol=0.01, degree=5) + assert abs(f(100) - g(100)[0])/f(100) < 0.01 + +def test_odefun_harmonic(): + mp.dps = 15 + # Harmonic oscillator + f = odefun(lambda x, y: [-y[1], y[0]], 0, [1, 0]) + for x in [0, 1, 2.5, 8, 3.7]: # we go back to 3.7 to check caching + c, s = f(x) + assert c.ae(cos(x)) + assert s.ae(sin(x)) diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_pickle.py b/MLPY/Lib/site-packages/mpmath/tests/test_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..c3d96e73a53603e0fa3f9525c5c0059725bdffb7 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_pickle.py @@ -0,0 +1,27 @@ +import os +import tempfile +import pickle + +from mpmath import * + +def pickler(obj): + fn = tempfile.mktemp() + + f = open(fn, 'wb') + pickle.dump(obj, f) + f.close() + + f = open(fn, 'rb') + obj2 = pickle.load(f) + f.close() + os.remove(fn) + + return obj2 + +def test_pickle(): + + obj = mpf('0.5') + assert obj == pickler(obj) + + obj = mpc('0.5','0.2') + assert obj == pickler(obj) diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_power.py b/MLPY/Lib/site-packages/mpmath/tests/test_power.py new file mode 100644 index 0000000000000000000000000000000000000000..7a2447a62c36f9e02df79b9a40a8603f8a69b1d8 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_power.py @@ -0,0 +1,156 @@ +from mpmath import * +from mpmath.libmp import * + +import random + +def test_fractional_pow(): + mp.dps = 15 + assert mpf(16) ** 2.5 == 1024 + assert mpf(64) ** 0.5 == 8 + assert mpf(64) ** -0.5 == 0.125 + assert mpf(16) ** -2.5 == 0.0009765625 + assert (mpf(10) ** 0.5).ae(3.1622776601683791) + assert (mpf(10) ** 2.5).ae(316.2277660168379) + assert (mpf(10) ** -0.5).ae(0.31622776601683794) + assert (mpf(10) ** -2.5).ae(0.0031622776601683794) + assert (mpf(10) ** 0.3).ae(1.9952623149688795) + assert (mpf(10) ** -0.3).ae(0.50118723362727224) + +def test_pow_integer_direction(): + """ + Test that inexact integer powers are rounded in the right + direction. + """ + random.seed(1234) + for prec in [10, 53, 200]: + for i in range(50): + a = random.randint(1<<(prec-1), 1< ab + + +def test_pow_epsilon_rounding(): + """ + Stress test directed rounding for powers with integer exponents. + Basically, we look at the following cases: + + >>> 1.0001 ** -5 # doctest: +SKIP + 0.99950014996500702 + >>> 0.9999 ** -5 # doctest: +SKIP + 1.000500150035007 + >>> (-1.0001) ** -5 # doctest: +SKIP + -0.99950014996500702 + >>> (-0.9999) ** -5 # doctest: +SKIP + -1.000500150035007 + + >>> 1.0001 ** -6 # doctest: +SKIP + 0.99940020994401269 + >>> 0.9999 ** -6 # doctest: +SKIP + 1.0006002100560125 + >>> (-1.0001) ** -6 # doctest: +SKIP + 0.99940020994401269 + >>> (-0.9999) ** -6 # doctest: +SKIP + 1.0006002100560125 + + etc. + + We run the tests with values a very small epsilon away from 1: + small enough that the result is indistinguishable from 1 when + rounded to nearest at the output precision. We check that the + result is not erroneously rounded to 1 in cases where the + rounding should be done strictly away from 1. + """ + + def powr(x, n, r): + return make_mpf(mpf_pow_int(x._mpf_, n, mp.prec, r)) + + for (inprec, outprec) in [(100, 20), (5000, 3000)]: + + mp.prec = inprec + + pos10001 = mpf(1) + mpf(2)**(-inprec+5) + pos09999 = mpf(1) - mpf(2)**(-inprec+5) + neg10001 = -pos10001 + neg09999 = -pos09999 + + mp.prec = outprec + r = round_up + assert powr(pos10001, 5, r) > 1 + assert powr(pos09999, 5, r) == 1 + assert powr(neg10001, 5, r) < -1 + assert powr(neg09999, 5, r) == -1 + assert powr(pos10001, 6, r) > 1 + assert powr(pos09999, 6, r) == 1 + assert powr(neg10001, 6, r) > 1 + assert powr(neg09999, 6, r) == 1 + + assert powr(pos10001, -5, r) == 1 + assert powr(pos09999, -5, r) > 1 + assert powr(neg10001, -5, r) == -1 + assert powr(neg09999, -5, r) < -1 + assert powr(pos10001, -6, r) == 1 + assert powr(pos09999, -6, r) > 1 + assert powr(neg10001, -6, r) == 1 + assert powr(neg09999, -6, r) > 1 + + r = round_down + assert powr(pos10001, 5, r) == 1 + assert powr(pos09999, 5, r) < 1 + assert powr(neg10001, 5, r) == -1 + assert powr(neg09999, 5, r) > -1 + assert powr(pos10001, 6, r) == 1 + assert powr(pos09999, 6, r) < 1 + assert powr(neg10001, 6, r) == 1 + assert powr(neg09999, 6, r) < 1 + + assert powr(pos10001, -5, r) < 1 + assert powr(pos09999, -5, r) == 1 + assert powr(neg10001, -5, r) > -1 + assert powr(neg09999, -5, r) == -1 + assert powr(pos10001, -6, r) < 1 + assert powr(pos09999, -6, r) == 1 + assert powr(neg10001, -6, r) < 1 + assert powr(neg09999, -6, r) == 1 + + r = round_ceiling + assert powr(pos10001, 5, r) > 1 + assert powr(pos09999, 5, r) == 1 + assert powr(neg10001, 5, r) == -1 + assert powr(neg09999, 5, r) > -1 + assert powr(pos10001, 6, r) > 1 + assert powr(pos09999, 6, r) == 1 + assert powr(neg10001, 6, r) > 1 + assert powr(neg09999, 6, r) == 1 + + assert powr(pos10001, -5, r) == 1 + assert powr(pos09999, -5, r) > 1 + assert powr(neg10001, -5, r) > -1 + assert powr(neg09999, -5, r) == -1 + assert powr(pos10001, -6, r) == 1 + assert powr(pos09999, -6, r) > 1 + assert powr(neg10001, -6, r) == 1 + assert powr(neg09999, -6, r) > 1 + + r = round_floor + assert powr(pos10001, 5, r) == 1 + assert powr(pos09999, 5, r) < 1 + assert powr(neg10001, 5, r) < -1 + assert powr(neg09999, 5, r) == -1 + assert powr(pos10001, 6, r) == 1 + assert powr(pos09999, 6, r) < 1 + assert powr(neg10001, 6, r) == 1 + assert powr(neg09999, 6, r) < 1 + + assert powr(pos10001, -5, r) < 1 + assert powr(pos09999, -5, r) == 1 + assert powr(neg10001, -5, r) == -1 + assert powr(neg09999, -5, r) < -1 + assert powr(pos10001, -6, r) < 1 + assert powr(pos09999, -6, r) == 1 + assert powr(neg10001, -6, r) < 1 + assert powr(neg09999, -6, r) == 1 + + mp.dps = 15 diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_quad.py b/MLPY/Lib/site-packages/mpmath/tests/test_quad.py new file mode 100644 index 0000000000000000000000000000000000000000..fc71c5f5ef9c0ecd876c988e7d033b321f065cdc --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_quad.py @@ -0,0 +1,95 @@ +import pytest +from mpmath import * + +def ae(a, b): + return abs(a-b) < 10**(-mp.dps+5) + +def test_basic_integrals(): + for prec in [15, 30, 100]: + mp.dps = prec + assert ae(quadts(lambda x: x**3 - 3*x**2, [-2, 4]), -12) + assert ae(quadgl(lambda x: x**3 - 3*x**2, [-2, 4]), -12) + assert ae(quadts(sin, [0, pi]), 2) + assert ae(quadts(sin, [0, 2*pi]), 0) + assert ae(quadts(exp, [-inf, -1]), 1/e) + assert ae(quadts(lambda x: exp(-x), [0, inf]), 1) + assert ae(quadts(lambda x: exp(-x*x), [-inf, inf]), sqrt(pi)) + assert ae(quadts(lambda x: 1/(1+x*x), [-1, 1]), pi/2) + assert ae(quadts(lambda x: 1/(1+x*x), [-inf, inf]), pi) + assert ae(quadts(lambda x: 2*sqrt(1-x*x), [-1, 1]), pi) + mp.dps = 15 + +def test_multiple_intervals(): + y,err = quad(lambda x: sign(x), [-0.5, 0.9, 1], maxdegree=2, error=True) + assert abs(y-0.5) < 2*err + +def test_quad_symmetry(): + assert quadts(sin, [-1, 1]) == 0 + assert quadgl(sin, [-1, 1]) == 0 + +def test_quad_infinite_mirror(): + # Check mirrored infinite interval + assert ae(quad(lambda x: exp(-x*x), [inf,-inf]), -sqrt(pi)) + assert ae(quad(lambda x: exp(x), [0,-inf]), -1) + +def test_quadgl_linear(): + assert quadgl(lambda x: x, [0, 1], maxdegree=1).ae(0.5) + +def test_complex_integration(): + assert quadts(lambda x: x, [0, 1+j]).ae(j) + +def test_quadosc(): + mp.dps = 15 + assert quadosc(lambda x: sin(x)/x, [0, inf], period=2*pi).ae(pi/2) + +# Double integrals +def test_double_trivial(): + assert ae(quadts(lambda x, y: x, [0, 1], [0, 1]), 0.5) + assert ae(quadts(lambda x, y: x, [-1, 1], [-1, 1]), 0.0) + +def test_double_1(): + assert ae(quadts(lambda x, y: cos(x+y/2), [-pi/2, pi/2], [0, pi]), 4) + +def test_double_2(): + assert ae(quadts(lambda x, y: (x-1)/((1-x*y)*log(x*y)), [0, 1], [0, 1]), euler) + +def test_double_3(): + assert ae(quadts(lambda x, y: 1/sqrt(1+x*x+y*y), [-1, 1], [-1, 1]), 4*log(2+sqrt(3))-2*pi/3) + +def test_double_4(): + assert ae(quadts(lambda x, y: 1/(1-x*x * y*y), [0, 1], [0, 1]), pi**2 / 8) + +def test_double_5(): + assert ae(quadts(lambda x, y: 1/(1-x*y), [0, 1], [0, 1]), pi**2 / 6) + +def test_double_6(): + assert ae(quadts(lambda x, y: exp(-(x+y)), [0, inf], [0, inf]), 1) + +def test_double_7(): + assert ae(quadts(lambda x, y: exp(-x*x-y*y), [-inf, inf], [-inf, inf]), pi) + + +# Test integrals from "Experimentation in Mathematics" by Borwein, +# Bailey & Girgensohn +def test_expmath_integrals(): + for prec in [15, 30, 50]: + mp.dps = prec + assert ae(quadts(lambda x: x/sinh(x), [0, inf]), pi**2 / 4) + assert ae(quadts(lambda x: log(x)**2 / (1+x**2), [0, inf]), pi**3 / 8) + assert ae(quadts(lambda x: (1+x**2)/(1+x**4), [0, inf]), pi/sqrt(2)) + assert ae(quadts(lambda x: log(x)/cosh(x)**2, [0, inf]), log(pi)-2*log(2)-euler) + assert ae(quadts(lambda x: log(1+x**3)/(1-x+x**2), [0, inf]), 2*pi*log(3)/sqrt(3)) + assert ae(quadts(lambda x: log(x)**2 / (x**2+x+1), [0, 1]), 8*pi**3 / (81*sqrt(3))) + assert ae(quadts(lambda x: log(cos(x))**2, [0, pi/2]), pi/2 * (log(2)**2+pi**2/12)) + assert ae(quadts(lambda x: x**2 / sin(x)**2, [0, pi/2]), pi*log(2)) + assert ae(quadts(lambda x: x**2/sqrt(exp(x)-1), [0, inf]), 4*pi*(log(2)**2 + pi**2/12)) + assert ae(quadts(lambda x: x*exp(-x)*sqrt(1-exp(-2*x)), [0, inf]), pi*(1+2*log(2))/8) + mp.dps = 15 + +# Do not reach full accuracy +@pytest.mark.xfail +def test_expmath_fail(): + assert ae(quadts(lambda x: sqrt(tan(x)), [0, pi/2]), pi*sqrt(2)/2) + assert ae(quadts(lambda x: atan(x)/(x*sqrt(1-x**2)), [0, 1]), pi*log(1+sqrt(2))/2) + assert ae(quadts(lambda x: log(1+x**2)/x**2, [0, 1]), pi/2-log(2)) + assert ae(quadts(lambda x: x**2/((1+x**4)*sqrt(1-x**4)), [0, 1]), pi/8) diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_rootfinding.py b/MLPY/Lib/site-packages/mpmath/tests/test_rootfinding.py new file mode 100644 index 0000000000000000000000000000000000000000..7c3c06463682eb1fd60efeb75b809bbb932a241c --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_rootfinding.py @@ -0,0 +1,91 @@ +import pytest +from mpmath import * +from mpmath.calculus.optimization import Secant, Muller, Bisection, Illinois, \ + Pegasus, Anderson, Ridder, ANewton, Newton, MNewton, MDNewton + +def test_findroot(): + # old tests, assuming secant + mp.dps = 15 + assert findroot(lambda x: 4*x-3, mpf(5)).ae(0.75) + assert findroot(sin, mpf(3)).ae(pi) + assert findroot(sin, (mpf(3), mpf(3.14))).ae(pi) + assert findroot(lambda x: x*x+1, mpc(2+2j)).ae(1j) + # test all solvers with 1 starting point + f = lambda x: cos(x) + for solver in [Newton, Secant, MNewton, Muller, ANewton]: + x = findroot(f, 2., solver=solver) + assert abs(f(x)) < eps + # test all solvers with interval of 2 points + for solver in [Secant, Muller, Bisection, Illinois, Pegasus, Anderson, + Ridder]: + x = findroot(f, (1., 2.), solver=solver) + assert abs(f(x)) < eps + # test types + f = lambda x: (x - 2)**2 + + assert isinstance(findroot(f, 1, tol=1e-10), mpf) + assert isinstance(iv.findroot(f, 1., tol=1e-10), iv.mpf) + assert isinstance(fp.findroot(f, 1, tol=1e-10), float) + assert isinstance(fp.findroot(f, 1+0j, tol=1e-10), complex) + + # issue 401 + with pytest.raises(ValueError): + with workprec(2): + findroot(lambda x: x**2 - 4456178*x + 60372201703370, + mpc(real='5.278e+13', imag='-5.278e+13')) + + # issue 192 + with pytest.raises(ValueError): + findroot(lambda x: -1, 0) + + # issue 387 + with pytest.raises(ValueError): + findroot(lambda p: (1 - p)**30 - 1, 0.9) + +def test_bisection(): + # issue 273 + assert findroot(lambda x: x**2-1,(0,2),solver='bisect') == 1 + +def test_mnewton(): + f = lambda x: polyval([1,3,3,1],x) + x = findroot(f, -0.9, solver='mnewton') + assert abs(f(x)) < eps + +def test_anewton(): + f = lambda x: (x - 2)**100 + x = findroot(f, 1., solver=ANewton) + assert abs(f(x)) < eps + +def test_muller(): + f = lambda x: (2 + x)**3 + 2 + x = findroot(f, 1., solver=Muller) + assert abs(f(x)) < eps + +def test_multiplicity(): + for i in range(1, 5): + assert multiplicity(lambda x: (x - 1)**i, 1) == i + assert multiplicity(lambda x: x**2, 1) == 0 + +def test_multidimensional(): + def f(*x): + return [3*x[0]**2-2*x[1]**2-1, x[0]**2-2*x[0]+x[1]**2+2*x[1]-8] + assert mnorm(jacobian(f, (1,-2)) - matrix([[6,8],[0,-2]]),1) < 1.e-7 + for x, error in MDNewton(mp, f, (1,-2), verbose=0, + norm=lambda x: norm(x, inf)): + pass + assert norm(f(*x), 2) < 1e-14 + # The Chinese mathematician Zhu Shijie was the very first to solve this + # nonlinear system 700 years ago + f1 = lambda x, y: -x + 2*y + f2 = lambda x, y: (x**2 + x*(y**2 - 2) - 4*y) / (x + 4) + f3 = lambda x, y: sqrt(x**2 + y**2) + def f(x, y): + f1x = f1(x, y) + return (f2(x, y) - f1x, f3(x, y) - f1x) + x = findroot(f, (10, 10)) + assert [int(round(i)) for i in x] == [3, 4] + +def test_trivial(): + assert findroot(lambda x: 0, 1) == 1 + assert findroot(lambda x: x, 0) == 0 + #assert findroot(lambda x, y: x + y, (1, -1)) == (1, -1) diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_special.py b/MLPY/Lib/site-packages/mpmath/tests/test_special.py new file mode 100644 index 0000000000000000000000000000000000000000..30825abd89ada00f937260cb51ef649546be7021 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_special.py @@ -0,0 +1,113 @@ +from mpmath import * + +def test_special(): + assert inf == inf + assert inf != -inf + assert -inf == -inf + assert inf != nan + assert nan != nan + assert isnan(nan) + assert --inf == inf + assert abs(inf) == inf + assert abs(-inf) == inf + assert abs(nan) != abs(nan) + + assert isnan(inf - inf) + assert isnan(inf + (-inf)) + assert isnan(-inf - (-inf)) + + assert isnan(inf + nan) + assert isnan(-inf + nan) + + assert mpf(2) + inf == inf + assert 2 + inf == inf + assert mpf(2) - inf == -inf + assert 2 - inf == -inf + + assert inf > 3 + assert 3 < inf + assert 3 > -inf + assert -inf < 3 + assert inf > mpf(3) + assert mpf(3) < inf + assert mpf(3) > -inf + assert -inf < mpf(3) + + assert not (nan < 3) + assert not (nan > 3) + + assert isnan(inf * 0) + assert isnan(-inf * 0) + assert inf * 3 == inf + assert inf * -3 == -inf + assert -inf * 3 == -inf + assert -inf * -3 == inf + assert inf * inf == inf + assert -inf * -inf == inf + + assert isnan(nan / 3) + assert inf / -3 == -inf + assert inf / 3 == inf + assert 3 / inf == 0 + assert -3 / inf == 0 + assert 0 / inf == 0 + assert isnan(inf / inf) + assert isnan(inf / -inf) + assert isnan(inf / nan) + + assert mpf('inf') == mpf('+inf') == inf + assert mpf('-inf') == -inf + assert isnan(mpf('nan')) + + assert isinf(inf) + assert isinf(-inf) + assert not isinf(mpf(0)) + assert not isinf(nan) + +def test_special_powers(): + assert inf**3 == inf + assert isnan(inf**0) + assert inf**-3 == 0 + assert (-inf)**2 == inf + assert (-inf)**3 == -inf + assert isnan((-inf)**0) + assert (-inf)**-2 == 0 + assert (-inf)**-3 == 0 + assert isnan(nan**5) + assert isnan(nan**0) + +def test_functions_special(): + assert exp(inf) == inf + assert exp(-inf) == 0 + assert isnan(exp(nan)) + assert log(inf) == inf + assert isnan(log(nan)) + assert isnan(sin(inf)) + assert isnan(sin(nan)) + assert atan(inf).ae(pi/2) + assert atan(-inf).ae(-pi/2) + assert isnan(sqrt(nan)) + assert sqrt(inf) == inf + +def test_convert_special(): + float_inf = 1e300 * 1e300 + float_ninf = -float_inf + float_nan = float_inf/float_ninf + assert mpf(3) * float_inf == inf + assert mpf(3) * float_ninf == -inf + assert isnan(mpf(3) * float_nan) + assert not (mpf(3) < float_nan) + assert not (mpf(3) > float_nan) + assert not (mpf(3) <= float_nan) + assert not (mpf(3) >= float_nan) + assert float(mpf('1e1000')) == float_inf + assert float(mpf('-1e1000')) == float_ninf + assert float(mpf('1e100000000000000000')) == float_inf + assert float(mpf('-1e100000000000000000')) == float_ninf + assert float(mpf('1e-100000000000000000')) == 0.0 + +def test_div_bug(): + assert isnan(nan/1) + assert isnan(nan/2) + assert inf/2 == inf + assert (-inf)/2 == -inf diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_str.py b/MLPY/Lib/site-packages/mpmath/tests/test_str.py new file mode 100644 index 0000000000000000000000000000000000000000..569244f252c057ec1029b7efbd8b0ffbfbc47522 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_str.py @@ -0,0 +1,14 @@ +from mpmath import nstr, matrix, inf + +def test_nstr(): + m = matrix([[0.75, 0.190940654, -0.0299195971], + [0.190940654, 0.65625, 0.205663228], + [-0.0299195971, 0.205663228, 0.64453125e-20]]) + assert nstr(m, 4, min_fixed=-inf) == \ + '''[ 0.75 0.1909 -0.02992] +[ 0.1909 0.6563 0.2057] +[-0.02992 0.2057 0.000000000000000000006445]''' + assert nstr(m, 4) == \ + '''[ 0.75 0.1909 -0.02992] +[ 0.1909 0.6563 0.2057] +[-0.02992 0.2057 6.445e-21]''' diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_summation.py b/MLPY/Lib/site-packages/mpmath/tests/test_summation.py new file mode 100644 index 0000000000000000000000000000000000000000..04ffd29f994e1e6310678eec292c0e03f2d6c725 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_summation.py @@ -0,0 +1,53 @@ +from mpmath import * + +def test_sumem(): + mp.dps = 15 + assert sumem(lambda k: 1/k**2.5, [50, 100]).ae(0.0012524505324784962) + assert sumem(lambda k: k**4 + 3*k + 1, [10, 100]).ae(2050333103) + +def test_nsum(): + mp.dps = 15 + assert nsum(lambda x: x**2, [1, 3]) == 14 + assert nsum(lambda k: 1/factorial(k), [0, inf]).ae(e) + assert nsum(lambda k: (-1)**(k+1) / k, [1, inf]).ae(log(2)) + assert nsum(lambda k: (-1)**(k+1) / k**2, [1, inf]).ae(pi**2 / 12) + assert nsum(lambda k: (-1)**k / log(k), [2, inf]).ae(0.9242998972229388) + assert nsum(lambda k: 1/k**2, [1, inf]).ae(pi**2 / 6) + assert nsum(lambda k: 2**k/fac(k), [0, inf]).ae(exp(2)) + assert nsum(lambda k: 1/k**2, [4, inf], method='e').ae(0.2838229557371153) + assert abs(fp.nsum(lambda k: 1/k**4, [1, fp.inf]) - 1.082323233711138) < 1e-5 + assert abs(fp.nsum(lambda k: 1/k**4, [1, fp.inf], method='e') - 1.082323233711138) < 1e-4 + +def test_nprod(): + mp.dps = 15 + assert nprod(lambda k: exp(1/k**2), [1,inf], method='r').ae(exp(pi**2/6)) + assert nprod(lambda x: x**2, [1, 3]) == 36 + +def test_fsum(): + mp.dps = 15 + assert fsum([]) == 0 + assert fsum([-4]) == -4 + assert fsum([2,3]) == 5 + assert fsum([1e-100,1]) == 1 + assert fsum([1,1e-100]) == 1 + assert fsum([1e100,1]) == 1e100 + assert fsum([1,1e100]) == 1e100 + assert fsum([1e-100,0]) == 1e-100 + assert fsum([1e-100,1e100,1e-100]) == 1e100 + assert fsum([2,1+1j,1]) == 4+1j + assert fsum([2,inf,3]) == inf + assert fsum([2,-1], absolute=1) == 3 + assert fsum([2,-1], squared=1) == 5 + assert fsum([1,1+j], squared=1) == 1+2j + assert fsum([1,3+4j], absolute=1) == 6 + assert fsum([1,2+3j], absolute=1, squared=1) == 14 + assert isnan(fsum([inf,-inf])) + assert fsum([inf,-inf], absolute=1) == inf + assert fsum([inf,-inf], squared=1) == inf + assert fsum([inf,-inf], absolute=1, squared=1) == inf + assert iv.fsum([1,mpi(2,3)]) == mpi(3,4) + +def test_fprod(): + mp.dps = 15 + assert fprod([]) == 1 + assert fprod([2,3]) == 6 diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_trig.py b/MLPY/Lib/site-packages/mpmath/tests/test_trig.py new file mode 100644 index 0000000000000000000000000000000000000000..c70a2a0ff4c44c784404ecdb15357d5b91a992d6 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_trig.py @@ -0,0 +1,136 @@ +from mpmath import * +from mpmath.libmp import * + +def test_trig_misc_hard(): + mp.prec = 53 + # Worst-case input for an IEEE double, from a paper by Kahan + x = ldexp(6381956970095103,797) + assert cos(x) == mpf('-4.6871659242546277e-19') + assert sin(x) == 1 + + mp.prec = 150 + a = mpf(10**50) + mp.prec = 53 + assert sin(a).ae(-0.7896724934293100827) + assert cos(a).ae(-0.6135286082336635622) + + # Check relative accuracy close to x = zero + assert sin(1e-100) == 1e-100 # when rounding to nearest + assert sin(1e-6).ae(9.999999999998333e-007, rel_eps=2e-15, abs_eps=0) + assert sin(1e-6j).ae(1.0000000000001666e-006j, rel_eps=2e-15, abs_eps=0) + assert sin(-1e-6j).ae(-1.0000000000001666e-006j, rel_eps=2e-15, abs_eps=0) + assert cos(1e-100) == 1 + assert cos(1e-6).ae(0.9999999999995) + assert cos(-1e-6j).ae(1.0000000000005) + assert tan(1e-100) == 1e-100 + assert tan(1e-6).ae(1.0000000000003335e-006, rel_eps=2e-15, abs_eps=0) + assert tan(1e-6j).ae(9.9999999999966644e-007j, rel_eps=2e-15, abs_eps=0) + assert tan(-1e-6j).ae(-9.9999999999966644e-007j, rel_eps=2e-15, abs_eps=0) + +def test_trig_near_zero(): + mp.dps = 15 + + for r in [round_nearest, round_down, round_up, round_floor, round_ceiling]: + assert sin(0, rounding=r) == 0 + assert cos(0, rounding=r) == 1 + + a = mpf('1e-100') + b = mpf('-1e-100') + + assert sin(a, rounding=round_nearest) == a + assert sin(a, rounding=round_down) < a + assert sin(a, rounding=round_floor) < a + assert sin(a, rounding=round_up) >= a + assert sin(a, rounding=round_ceiling) >= a + assert sin(b, rounding=round_nearest) == b + assert sin(b, rounding=round_down) > b + assert sin(b, rounding=round_floor) <= b + assert sin(b, rounding=round_up) <= b + assert sin(b, rounding=round_ceiling) > b + + assert cos(a, rounding=round_nearest) == 1 + assert cos(a, rounding=round_down) < 1 + assert cos(a, rounding=round_floor) < 1 + assert cos(a, rounding=round_up) == 1 + assert cos(a, rounding=round_ceiling) == 1 + assert cos(b, rounding=round_nearest) == 1 + assert cos(b, rounding=round_down) < 1 + assert cos(b, rounding=round_floor) < 1 + assert cos(b, rounding=round_up) == 1 + assert cos(b, rounding=round_ceiling) == 1 + + +def test_trig_near_n_pi(): + + mp.dps = 15 + a = [n*pi for n in [1, 2, 6, 11, 100, 1001, 10000, 100001]] + mp.dps = 135 + a.append(10**100 * pi) + mp.dps = 15 + + assert sin(a[0]) == mpf('1.2246467991473531772e-16') + assert sin(a[1]) == mpf('-2.4492935982947063545e-16') + assert sin(a[2]) == mpf('-7.3478807948841190634e-16') + assert sin(a[3]) == mpf('4.8998251578625894243e-15') + assert sin(a[4]) == mpf('1.9643867237284719452e-15') + assert sin(a[5]) == mpf('-8.8632615209684813458e-15') + assert sin(a[6]) == mpf('-4.8568235395684898392e-13') + assert sin(a[7]) == mpf('3.9087342299491231029e-11') + assert sin(a[8]) == mpf('-1.369235466754566993528e-36') + + r = round_nearest + assert cos(a[0], rounding=r) == -1 + assert cos(a[1], rounding=r) == 1 + assert cos(a[2], rounding=r) == 1 + assert cos(a[3], rounding=r) == -1 + assert cos(a[4], rounding=r) == 1 + assert cos(a[5], rounding=r) == -1 + assert cos(a[6], rounding=r) == 1 + assert cos(a[7], rounding=r) == -1 + assert cos(a[8], rounding=r) == 1 + + r = round_up + assert cos(a[0], rounding=r) == -1 + assert cos(a[1], rounding=r) == 1 + assert cos(a[2], rounding=r) == 1 + assert cos(a[3], rounding=r) == -1 + assert cos(a[4], rounding=r) == 1 + assert cos(a[5], rounding=r) == -1 + assert cos(a[6], rounding=r) == 1 + assert cos(a[7], rounding=r) == -1 + assert cos(a[8], rounding=r) == 1 + + r = round_down + assert cos(a[0], rounding=r) > -1 + assert cos(a[1], rounding=r) < 1 + assert cos(a[2], rounding=r) < 1 + assert cos(a[3], rounding=r) > -1 + assert cos(a[4], rounding=r) < 1 + assert cos(a[5], rounding=r) > -1 + assert cos(a[6], rounding=r) < 1 + assert cos(a[7], rounding=r) > -1 + assert cos(a[8], rounding=r) < 1 + + r = round_floor + assert cos(a[0], rounding=r) == -1 + assert cos(a[1], rounding=r) < 1 + assert cos(a[2], rounding=r) < 1 + assert cos(a[3], rounding=r) == -1 + assert cos(a[4], rounding=r) < 1 + assert cos(a[5], rounding=r) == -1 + assert cos(a[6], rounding=r) < 1 + assert cos(a[7], rounding=r) == -1 + assert cos(a[8], rounding=r) < 1 + + r = round_ceiling + assert cos(a[0], rounding=r) > -1 + assert cos(a[1], rounding=r) == 1 + assert cos(a[2], rounding=r) == 1 + assert cos(a[3], rounding=r) > -1 + assert cos(a[4], rounding=r) == 1 + assert cos(a[5], rounding=r) > -1 + assert cos(a[6], rounding=r) == 1 + assert cos(a[7], rounding=r) > -1 + assert cos(a[8], rounding=r) == 1 + + mp.dps = 15 diff --git a/MLPY/Lib/site-packages/mpmath/tests/test_visualization.py b/MLPY/Lib/site-packages/mpmath/tests/test_visualization.py new file mode 100644 index 0000000000000000000000000000000000000000..81ffd05194322f00e4c75dc02bc862b383468bff --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/test_visualization.py @@ -0,0 +1,32 @@ +""" +Limited tests of the visualization module. Right now it just makes +sure that passing custom Axes works. + +""" + +from mpmath import mp, fp + +def test_axes(): + try: + import matplotlib + version = matplotlib.__version__.split("-")[0] + version = version.split(".")[:2] + if [int(_) for _ in version] < [0,99]: + raise ImportError + import pylab + except ImportError: + print("\nSkipping test (pylab not available or too old version)\n") + return + fig = pylab.figure() + axes = fig.add_subplot(111) + for ctx in [mp, fp]: + ctx.plot(lambda x: x**2, [0, 3], axes=axes) + assert axes.get_xlabel() == 'x' + assert axes.get_ylabel() == 'f(x)' + + fig = pylab.figure() + axes = fig.add_subplot(111) + for ctx in [mp, fp]: + ctx.cplot(lambda z: z, [-2, 2], [-10, 10], axes=axes) + assert axes.get_xlabel() == 'Re(z)' + assert axes.get_ylabel() == 'Im(z)' diff --git a/MLPY/Lib/site-packages/mpmath/tests/torture.py b/MLPY/Lib/site-packages/mpmath/tests/torture.py new file mode 100644 index 0000000000000000000000000000000000000000..845d5c6d7d017e51e1ed9a8fe3106cfa32fd967f --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/tests/torture.py @@ -0,0 +1,224 @@ +""" +Torture tests for asymptotics and high precision evaluation of +special functions. + +(Other torture tests may also be placed here.) + +Running this file (gmpy recommended!) takes several CPU minutes. +With Python 2.6+, multiprocessing is used automatically to run tests +in parallel if many cores are available. (A single test may take between +a second and several minutes; possibly more.) + +The idea: + +* We evaluate functions at positive, negative, imaginary, 45- and 135-degree + complex values with magnitudes between 10^-20 to 10^20, at precisions between + 5 and 150 digits (we can go even higher for fast functions). + +* Comparing the result from two different precision levels provides + a strong consistency check (particularly for functions that use + different algorithms at different precision levels). + +* That the computation finishes at all (without failure), within reasonable + time, provides a check that evaluation works at all: that the code runs, + that it doesn't get stuck in an infinite loop, and that it doesn't use + some extremely slowly algorithm where it could use a faster one. + +TODO: + +* Speed up those functions that take long to finish! +* Generalize to test more cases; more options. +* Implement a timeout mechanism. +* Some functions are notably absent, including the following: + * inverse trigonometric functions (some become inaccurate for complex arguments) + * ci, si (not implemented properly for large complex arguments) + * zeta functions (need to modify test not to try too large imaginary values) + * and others... + +""" + + +import sys, os +from timeit import default_timer as clock + +if "-nogmpy" in sys.argv: + sys.argv.remove('-nogmpy') + os.environ['MPMATH_NOGMPY'] = 'Y' + +filt = '' +if not sys.argv[-1].endswith(".py"): + filt = sys.argv[-1] + +from mpmath import * +from mpmath.libmp.backend import exec_ + +def test_asymp(f, maxdps=150, verbose=False, huge_range=False): + dps = [5,15,25,50,90,150,500,1500,5000,10000] + dps = [p for p in dps if p <= maxdps] + def check(x,y,p,inpt): + if abs(x-y)/abs(y) < workprec(20)(power)(10, -p+1): + return + print() + print("Error!") + print("Input:", inpt) + print("dps =", p) + print("Result 1:", x) + print("Result 2:", y) + print("Absolute error:", abs(x-y)) + print("Relative error:", abs(x-y)/abs(y)) + raise AssertionError + exponents = range(-20,20) + if huge_range: + exponents += [-1000, -100, -50, 50, 100, 1000] + for n in exponents: + if verbose: + sys.stdout.write(". ") + mp.dps = 25 + xpos = mpf(10)**n / 1.1287 + xneg = -xpos + ximag = xpos*j + xcomplex1 = xpos*(1+j) + xcomplex2 = xpos*(-1+j) + for i in range(len(dps)): + if verbose: + print("Testing dps = %s" % dps[i]) + mp.dps = dps[i] + new = f(xpos), f(xneg), f(ximag), f(xcomplex1), f(xcomplex2) + if i != 0: + p = dps[i-1] + check(prev[0], new[0], p, xpos) + check(prev[1], new[1], p, xneg) + check(prev[2], new[2], p, ximag) + check(prev[3], new[3], p, xcomplex1) + check(prev[4], new[4], p, xcomplex2) + prev = new + if verbose: + print() + +a1, a2, a3, a4, a5 = 1.5, -2.25, 3.125, 4, 2 + +def test_bernoulli_huge(): + p, q = bernfrac(9000) + assert p % 10**10 == 9636701091 + assert q == 4091851784687571609141381951327092757255270 + mp.dps = 15 + assert str(bernoulli(10**100)) == '-2.58183325604736e+987675256497386331227838638980680030172857347883537824464410652557820800494271520411283004120790908623' + mp.dps = 50 + assert str(bernoulli(10**100)) == '-2.5818332560473632073252488656039475548106223822913e+987675256497386331227838638980680030172857347883537824464410652557820800494271520411283004120790908623' + mp.dps = 15 + +cases = """\ +test_bernoulli_huge() +test_asymp(lambda z: +pi, maxdps=10000) +test_asymp(lambda z: +e, maxdps=10000) +test_asymp(lambda z: +ln2, maxdps=10000) +test_asymp(lambda z: +ln10, maxdps=10000) +test_asymp(lambda z: +phi, maxdps=10000) +test_asymp(lambda z: +catalan, maxdps=5000) +test_asymp(lambda z: +euler, maxdps=5000) +test_asymp(lambda z: +glaisher, maxdps=1000) +test_asymp(lambda z: +khinchin, maxdps=1000) +test_asymp(lambda z: +twinprime, maxdps=150) +test_asymp(lambda z: stieltjes(2), maxdps=150) +test_asymp(lambda z: +mertens, maxdps=150) +test_asymp(lambda z: +apery, maxdps=5000) +test_asymp(sqrt, maxdps=10000, huge_range=True) +test_asymp(cbrt, maxdps=5000, huge_range=True) +test_asymp(lambda z: root(z,4), maxdps=5000, huge_range=True) +test_asymp(lambda z: root(z,-5), maxdps=5000, huge_range=True) +test_asymp(exp, maxdps=5000, huge_range=True) +test_asymp(expm1, maxdps=1500) +test_asymp(ln, maxdps=5000, huge_range=True) +test_asymp(cosh, maxdps=5000) +test_asymp(sinh, maxdps=5000) +test_asymp(tanh, maxdps=1500) +test_asymp(sin, maxdps=5000, huge_range=True) +test_asymp(cos, maxdps=5000, huge_range=True) +test_asymp(tan, maxdps=1500) +test_asymp(agm, maxdps=1500, huge_range=True) +test_asymp(ellipk, maxdps=1500) +test_asymp(ellipe, maxdps=1500) +test_asymp(lambertw, huge_range=True) +test_asymp(lambda z: lambertw(z,-1)) +test_asymp(lambda z: lambertw(z,1)) +test_asymp(lambda z: lambertw(z,4)) +test_asymp(gamma) +test_asymp(loggamma) # huge_range=True ? +test_asymp(ei) +test_asymp(e1) +test_asymp(li, huge_range=True) +test_asymp(ci) +test_asymp(si) +test_asymp(chi) +test_asymp(shi) +test_asymp(erf) +test_asymp(erfc) +test_asymp(erfi) +test_asymp(lambda z: besselj(2, z)) +test_asymp(lambda z: bessely(2, z)) +test_asymp(lambda z: besseli(2, z)) +test_asymp(lambda z: besselk(2, z)) +test_asymp(lambda z: besselj(-2.25, z)) +test_asymp(lambda z: bessely(-2.25, z)) +test_asymp(lambda z: besseli(-2.25, z)) +test_asymp(lambda z: besselk(-2.25, z)) +test_asymp(airyai) +test_asymp(airybi) +test_asymp(lambda z: hyp0f1(a1, z)) +test_asymp(lambda z: hyp1f1(a1, a2, z)) +test_asymp(lambda z: hyp1f2(a1, a2, a3, z)) +test_asymp(lambda z: hyp2f0(a1, a2, z)) +test_asymp(lambda z: hyperu(a1, a2, z)) +test_asymp(lambda z: hyp2f1(a1, a2, a3, z)) +test_asymp(lambda z: hyp2f2(a1, a2, a3, a4, z)) +test_asymp(lambda z: hyp2f3(a1, a2, a3, a4, a5, z)) +test_asymp(lambda z: coulombf(a1, a2, z)) +test_asymp(lambda z: coulombg(a1, a2, z)) +test_asymp(lambda z: polylog(2,z)) +test_asymp(lambda z: polylog(3,z)) +test_asymp(lambda z: polylog(-2,z)) +test_asymp(lambda z: expint(4, z)) +test_asymp(lambda z: expint(-4, z)) +test_asymp(lambda z: expint(2.25, z)) +test_asymp(lambda z: gammainc(2.5, z, 5)) +test_asymp(lambda z: gammainc(2.5, 5, z)) +test_asymp(lambda z: hermite(3, z)) +test_asymp(lambda z: hermite(2.5, z)) +test_asymp(lambda z: legendre(3, z)) +test_asymp(lambda z: legendre(4, z)) +test_asymp(lambda z: legendre(2.5, z)) +test_asymp(lambda z: legenp(a1, a2, z)) +test_asymp(lambda z: legenq(a1, a2, z), maxdps=90) # abnormally slow +test_asymp(lambda z: jtheta(1, z, 0.5)) +test_asymp(lambda z: jtheta(2, z, 0.5)) +test_asymp(lambda z: jtheta(3, z, 0.5)) +test_asymp(lambda z: jtheta(4, z, 0.5)) +test_asymp(lambda z: jtheta(1, z, 0.5, 1)) +test_asymp(lambda z: jtheta(2, z, 0.5, 1)) +test_asymp(lambda z: jtheta(3, z, 0.5, 1)) +test_asymp(lambda z: jtheta(4, z, 0.5, 1)) +test_asymp(barnesg, maxdps=90) +""" + +def testit(line): + if filt in line: + print(line) + t1 = clock() + exec_(line, globals(), locals()) + t2 = clock() + elapsed = t2-t1 + print("Time:", elapsed, "for", line, "(OK)") + +if __name__ == '__main__': + try: + from multiprocessing import Pool + mapf = Pool(None).map + print("Running tests with multiprocessing") + except ImportError: + print("Not using multiprocessing") + mapf = map + t1 = clock() + tasks = cases.splitlines() + mapf(testit, tasks) + t2 = clock() + print("Cumulative wall time:", t2-t1) diff --git a/MLPY/Lib/site-packages/mpmath/usertools.py b/MLPY/Lib/site-packages/mpmath/usertools.py new file mode 100644 index 0000000000000000000000000000000000000000..8028a4c46f1c635a6857f1f2de48ac6675d3c6d3 --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/usertools.py @@ -0,0 +1,93 @@ + +def monitor(f, input='print', output='print'): + """ + Returns a wrapped copy of *f* that monitors evaluation by calling + *input* with every input (*args*, *kwargs*) passed to *f* and + *output* with every value returned from *f*. The default action + (specify using the special string value ``'print'``) is to print + inputs and outputs to stdout, along with the total evaluation + count:: + + >>> from mpmath import * + >>> mp.dps = 5; mp.pretty = False + >>> diff(monitor(exp), 1) # diff will eval f(x-h) and f(x+h) + in 0 (mpf('0.99999999906867742538452148'),) {} + out 0 mpf('2.7182818259274480055282064') + in 1 (mpf('1.0000000009313225746154785'),) {} + out 1 mpf('2.7182818309906424675501024') + mpf('2.7182808') + + To disable either the input or the output handler, you may + pass *None* as argument. + + Custom input and output handlers may be used e.g. to store + results for later analysis:: + + >>> mp.dps = 15 + >>> input = [] + >>> output = [] + >>> findroot(monitor(sin, input.append, output.append), 3.0) + mpf('3.1415926535897932') + >>> len(input) # Count number of evaluations + 9 + >>> print(input[3]); print(output[3]) + ((mpf('3.1415076583334066'),), {}) + 8.49952562843408e-5 + >>> print(input[4]); print(output[4]) + ((mpf('3.1415928201669122'),), {}) + -1.66577118985331e-7 + + """ + if not input: + input = lambda v: None + elif input == 'print': + incount = [0] + def input(value): + args, kwargs = value + print("in %s %r %r" % (incount[0], args, kwargs)) + incount[0] += 1 + if not output: + output = lambda v: None + elif output == 'print': + outcount = [0] + def output(value): + print("out %s %r" % (outcount[0], value)) + outcount[0] += 1 + def f_monitored(*args, **kwargs): + input((args, kwargs)) + v = f(*args, **kwargs) + output(v) + return v + return f_monitored + +def timing(f, *args, **kwargs): + """ + Returns time elapsed for evaluating ``f()``. Optionally arguments + may be passed to time the execution of ``f(*args, **kwargs)``. + + If the first call is very quick, ``f`` is called + repeatedly and the best time is returned. + """ + once = kwargs.get('once') + if 'once' in kwargs: + del kwargs['once'] + if args or kwargs: + if len(args) == 1 and not kwargs: + arg = args[0] + g = lambda: f(arg) + else: + g = lambda: f(*args, **kwargs) + else: + g = f + from timeit import default_timer as clock + t1=clock(); v=g(); t2=clock(); t=t2-t1 + if t > 0.05 or once: + return t + for i in range(3): + t1=clock(); + # Evaluate multiple times because the timer function + # has a significant overhead + g();g();g();g();g();g();g();g();g();g() + t2=clock() + t=min(t,(t2-t1)/10) + return t diff --git a/MLPY/Lib/site-packages/mpmath/visualization.py b/MLPY/Lib/site-packages/mpmath/visualization.py new file mode 100644 index 0000000000000000000000000000000000000000..17e12e97bead4f2977b59361a4de7672f0e9b75f --- /dev/null +++ b/MLPY/Lib/site-packages/mpmath/visualization.py @@ -0,0 +1,313 @@ +""" +Plotting (requires matplotlib) +""" + +from colorsys import hsv_to_rgb, hls_to_rgb +from .libmp import NoConvergence +from .libmp.backend import xrange + +class VisualizationMethods(object): + plot_ignore = (ValueError, ArithmeticError, ZeroDivisionError, NoConvergence) + +def plot(ctx, f, xlim=[-5,5], ylim=None, points=200, file=None, dpi=None, + singularities=[], axes=None): + r""" + Shows a simple 2D plot of a function `f(x)` or list of functions + `[f_0(x), f_1(x), \ldots, f_n(x)]` over a given interval + specified by *xlim*. Some examples:: + + plot(lambda x: exp(x)*li(x), [1, 4]) + plot([cos, sin], [-4, 4]) + plot([fresnels, fresnelc], [-4, 4]) + plot([sqrt, cbrt], [-4, 4]) + plot(lambda t: zeta(0.5+t*j), [-20, 20]) + plot([floor, ceil, abs, sign], [-5, 5]) + + Points where the function raises a numerical exception or + returns an infinite value are removed from the graph. + Singularities can also be excluded explicitly + as follows (useful for removing erroneous vertical lines):: + + plot(cot, ylim=[-5, 5]) # bad + plot(cot, ylim=[-5, 5], singularities=[-pi, 0, pi]) # good + + For parts where the function assumes complex values, the + real part is plotted with dashes and the imaginary part + is plotted with dots. + + .. note :: This function requires matplotlib (pylab). + """ + if file: + axes = None + fig = None + if not axes: + import pylab + fig = pylab.figure() + axes = fig.add_subplot(111) + if not isinstance(f, (tuple, list)): + f = [f] + a, b = xlim + colors = ['b', 'r', 'g', 'm', 'k'] + for n, func in enumerate(f): + x = ctx.arange(a, b, (b-a)/float(points)) + segments = [] + segment = [] + in_complex = False + for i in xrange(len(x)): + try: + if i != 0: + for sing in singularities: + if x[i-1] <= sing and x[i] >= sing: + raise ValueError + v = func(x[i]) + if ctx.isnan(v) or abs(v) > 1e300: + raise ValueError + if hasattr(v, "imag") and v.imag: + re = float(v.real) + im = float(v.imag) + if not in_complex: + in_complex = True + segments.append(segment) + segment = [] + segment.append((float(x[i]), re, im)) + else: + if in_complex: + in_complex = False + segments.append(segment) + segment = [] + if hasattr(v, "real"): + v = v.real + segment.append((float(x[i]), v)) + except ctx.plot_ignore: + if segment: + segments.append(segment) + segment = [] + if segment: + segments.append(segment) + for segment in segments: + x = [s[0] for s in segment] + y = [s[1] for s in segment] + if not x: + continue + c = colors[n % len(colors)] + if len(segment[0]) == 3: + z = [s[2] for s in segment] + axes.plot(x, y, '--'+c, linewidth=3) + axes.plot(x, z, ':'+c, linewidth=3) + else: + axes.plot(x, y, c, linewidth=3) + axes.set_xlim([float(_) for _ in xlim]) + if ylim: + axes.set_ylim([float(_) for _ in ylim]) + axes.set_xlabel('x') + axes.set_ylabel('f(x)') + axes.grid(True) + if fig: + if file: + pylab.savefig(file, dpi=dpi) + else: + pylab.show() + +def default_color_function(ctx, z): + if ctx.isinf(z): + return (1.0, 1.0, 1.0) + if ctx.isnan(z): + return (0.5, 0.5, 0.5) + pi = 3.1415926535898 + a = (float(ctx.arg(z)) + ctx.pi) / (2*ctx.pi) + a = (a + 0.5) % 1.0 + b = 1.0 - float(1/(1.0+abs(z)**0.3)) + return hls_to_rgb(a, b, 0.8) + +blue_orange_colors = [ + (-1.0, (0.0, 0.0, 0.0)), + (-0.95, (0.1, 0.2, 0.5)), # dark blue + (-0.5, (0.0, 0.5, 1.0)), # blueish + (-0.05, (0.4, 0.8, 0.8)), # cyanish + ( 0.0, (1.0, 1.0, 1.0)), + ( 0.05, (1.0, 0.9, 0.3)), # yellowish + ( 0.5, (0.9, 0.5, 0.0)), # orangeish + ( 0.95, (0.7, 0.1, 0.0)), # redish + ( 1.0, (0.0, 0.0, 0.0)), + ( 2.0, (0.0, 0.0, 0.0)), +] + +def phase_color_function(ctx, z): + if ctx.isinf(z): + return (1.0, 1.0, 1.0) + if ctx.isnan(z): + return (0.5, 0.5, 0.5) + pi = 3.1415926535898 + w = float(ctx.arg(z)) / pi + w = max(min(w, 1.0), -1.0) + for i in range(1,len(blue_orange_colors)): + if blue_orange_colors[i][0] > w: + a, (ra, ga, ba) = blue_orange_colors[i-1] + b, (rb, gb, bb) = blue_orange_colors[i] + s = (w-a) / (b-a) + return ra+(rb-ra)*s, ga+(gb-ga)*s, ba+(bb-ba)*s + +def cplot(ctx, f, re=[-5,5], im=[-5,5], points=2000, color=None, + verbose=False, file=None, dpi=None, axes=None): + """ + Plots the given complex-valued function *f* over a rectangular part + of the complex plane specified by the pairs of intervals *re* and *im*. + For example:: + + cplot(lambda z: z, [-2, 2], [-10, 10]) + cplot(exp) + cplot(zeta, [0, 1], [0, 50]) + + By default, the complex argument (phase) is shown as color (hue) and + the magnitude is show as brightness. You can also supply a + custom color function (*color*). This function should take a + complex number as input and return an RGB 3-tuple containing + floats in the range 0.0-1.0. + + Alternatively, you can select a builtin color function by passing + a string as *color*: + + * "default" - default color scheme + * "phase" - a color scheme that only renders the phase of the function, + with white for positive reals, black for negative reals, gold in the + upper half plane, and blue in the lower half plane. + + To obtain a sharp image, the number of points may need to be + increased to 100,000 or thereabout. Since evaluating the + function that many times is likely to be slow, the 'verbose' + option is useful to display progress. + + .. note :: This function requires matplotlib (pylab). + """ + if color is None or color == "default": + color = ctx.default_color_function + if color == "phase": + color = ctx.phase_color_function + import pylab + if file: + axes = None + fig = None + if not axes: + fig = pylab.figure() + axes = fig.add_subplot(111) + rea, reb = re + ima, imb = im + dre = reb - rea + dim = imb - ima + M = int(ctx.sqrt(points*dre/dim)+1) + N = int(ctx.sqrt(points*dim/dre)+1) + x = pylab.linspace(rea, reb, M) + y = pylab.linspace(ima, imb, N) + # Note: we have to be careful to get the right rotation. + # Test with these plots: + # cplot(lambda z: z if z.real < 0 else 0) + # cplot(lambda z: z if z.imag < 0 else 0) + w = pylab.zeros((N, M, 3)) + for n in xrange(N): + for m in xrange(M): + z = ctx.mpc(x[m], y[n]) + try: + v = color(f(z)) + except ctx.plot_ignore: + v = (0.5, 0.5, 0.5) + w[n,m] = v + if verbose: + print(str(n) + ' of ' + str(N)) + rea, reb, ima, imb = [float(_) for _ in [rea, reb, ima, imb]] + axes.imshow(w, extent=(rea, reb, ima, imb), origin='lower') + axes.set_xlabel('Re(z)') + axes.set_ylabel('Im(z)') + if fig: + if file: + pylab.savefig(file, dpi=dpi) + else: + pylab.show() + +def splot(ctx, f, u=[-5,5], v=[-5,5], points=100, keep_aspect=True, \ + wireframe=False, file=None, dpi=None, axes=None): + """ + Plots the surface defined by `f`. + + If `f` returns a single component, then this plots the surface + defined by `z = f(x,y)` over the rectangular domain with + `x = u` and `y = v`. + + If `f` returns three components, then this plots the parametric + surface `x, y, z = f(u,v)` over the pairs of intervals `u` and `v`. + + For example, to plot a simple function:: + + >>> from mpmath import * + >>> f = lambda x, y: sin(x+y)*cos(y) + >>> splot(f, [-pi,pi], [-pi,pi]) # doctest: +SKIP + + Plotting a donut:: + + >>> r, R = 1, 2.5 + >>> f = lambda u, v: [r*cos(u), (R+r*sin(u))*cos(v), (R+r*sin(u))*sin(v)] + >>> splot(f, [0, 2*pi], [0, 2*pi]) # doctest: +SKIP + + .. note :: This function requires matplotlib (pylab) 0.98.5.3 or higher. + """ + import pylab + import mpl_toolkits.mplot3d as mplot3d + if file: + axes = None + fig = None + if not axes: + fig = pylab.figure() + axes = mplot3d.axes3d.Axes3D(fig) + ua, ub = u + va, vb = v + du = ub - ua + dv = vb - va + if not isinstance(points, (list, tuple)): + points = [points, points] + M, N = points + u = pylab.linspace(ua, ub, M) + v = pylab.linspace(va, vb, N) + x, y, z = [pylab.zeros((M, N)) for i in xrange(3)] + xab, yab, zab = [[0, 0] for i in xrange(3)] + for n in xrange(N): + for m in xrange(M): + fdata = f(ctx.convert(u[m]), ctx.convert(v[n])) + try: + x[m,n], y[m,n], z[m,n] = fdata + except TypeError: + x[m,n], y[m,n], z[m,n] = u[m], v[n], fdata + for c, cab in [(x[m,n], xab), (y[m,n], yab), (z[m,n], zab)]: + if c < cab[0]: + cab[0] = c + if c > cab[1]: + cab[1] = c + if wireframe: + axes.plot_wireframe(x, y, z, rstride=4, cstride=4) + else: + axes.plot_surface(x, y, z, rstride=4, cstride=4) + axes.set_xlabel('x') + axes.set_ylabel('y') + axes.set_zlabel('z') + if keep_aspect: + dx, dy, dz = [cab[1] - cab[0] for cab in [xab, yab, zab]] + maxd = max(dx, dy, dz) + if dx < maxd: + delta = maxd - dx + axes.set_xlim3d(xab[0] - delta / 2.0, xab[1] + delta / 2.0) + if dy < maxd: + delta = maxd - dy + axes.set_ylim3d(yab[0] - delta / 2.0, yab[1] + delta / 2.0) + if dz < maxd: + delta = maxd - dz + axes.set_zlim3d(zab[0] - delta / 2.0, zab[1] + delta / 2.0) + if fig: + if file: + pylab.savefig(file, dpi=dpi) + else: + pylab.show() + + +VisualizationMethods.plot = plot +VisualizationMethods.default_color_function = default_color_function +VisualizationMethods.phase_color_function = phase_color_function +VisualizationMethods.cplot = cplot +VisualizationMethods.splot = splot diff --git a/MLPY/Lib/site-packages/networkx-3.2.1.dist-info/INSTALLER b/MLPY/Lib/site-packages/networkx-3.2.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx-3.2.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/MLPY/Lib/site-packages/networkx-3.2.1.dist-info/LICENSE.txt b/MLPY/Lib/site-packages/networkx-3.2.1.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..42b6f17a65ef6636d00c93aefb514441145ecc94 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx-3.2.1.dist-info/LICENSE.txt @@ -0,0 +1,37 @@ +NetworkX is distributed with the 3-clause BSD license. + +:: + + Copyright (C) 2004-2023, NetworkX Developers + Aric Hagberg + Dan Schult + Pieter Swart + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NetworkX Developers nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/MLPY/Lib/site-packages/networkx-3.2.1.dist-info/METADATA b/MLPY/Lib/site-packages/networkx-3.2.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..ac51fe019585ed14b015f89a8d8908ccc7c356c2 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx-3.2.1.dist-info/METADATA @@ -0,0 +1,135 @@ +Metadata-Version: 2.1 +Name: networkx +Version: 3.2.1 +Summary: Python package for creating and manipulating graphs and networks +Author-email: Aric Hagberg +Maintainer-email: NetworkX Developers +Project-URL: Homepage, https://networkx.org/ +Project-URL: Bug Tracker, https://github.com/networkx/networkx/issues +Project-URL: Documentation, https://networkx.org/documentation/stable/ +Project-URL: Source Code, https://github.com/networkx/networkx +Keywords: Networks,Graph Theory,Mathematics,network,graph,discrete mathematics,math +Platform: Linux +Platform: Mac OSX +Platform: Windows +Platform: Unix +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Scientific/Engineering :: Bio-Informatics +Classifier: Topic :: Scientific/Engineering :: Information Analysis +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Physics +Requires-Python: >=3.9 +Description-Content-Type: text/x-rst +License-File: LICENSE.txt +Provides-Extra: default +Requires-Dist: numpy >=1.22 ; extra == 'default' +Requires-Dist: scipy !=1.11.0,!=1.11.1,>=1.9 ; extra == 'default' +Requires-Dist: matplotlib >=3.5 ; extra == 'default' +Requires-Dist: pandas >=1.4 ; extra == 'default' +Provides-Extra: developer +Requires-Dist: changelist ==0.4 ; extra == 'developer' +Requires-Dist: pre-commit >=3.2 ; extra == 'developer' +Requires-Dist: mypy >=1.1 ; extra == 'developer' +Requires-Dist: rtoml ; extra == 'developer' +Provides-Extra: doc +Requires-Dist: sphinx >=7 ; extra == 'doc' +Requires-Dist: pydata-sphinx-theme >=0.14 ; extra == 'doc' +Requires-Dist: sphinx-gallery >=0.14 ; extra == 'doc' +Requires-Dist: numpydoc >=1.6 ; extra == 'doc' +Requires-Dist: pillow >=9.4 ; extra == 'doc' +Requires-Dist: nb2plots >=0.7 ; extra == 'doc' +Requires-Dist: texext >=0.6.7 ; extra == 'doc' +Requires-Dist: nbconvert <7.9 ; extra == 'doc' +Provides-Extra: extra +Requires-Dist: lxml >=4.6 ; extra == 'extra' +Requires-Dist: pygraphviz >=1.11 ; extra == 'extra' +Requires-Dist: pydot >=1.4.2 ; extra == 'extra' +Requires-Dist: sympy >=1.10 ; extra == 'extra' +Provides-Extra: test +Requires-Dist: pytest >=7.2 ; extra == 'test' +Requires-Dist: pytest-cov >=4.0 ; extra == 'test' + +NetworkX +======== + + +.. image:: https://github.com/networkx/networkx/workflows/test/badge.svg?branch=main + :target: https://github.com/networkx/networkx/actions?query=workflow%3A%22test%22 + +.. image:: https://codecov.io/gh/networkx/networkx/branch/main/graph/badge.svg + :target: https://app.codecov.io/gh/networkx/networkx/branch/main + +.. image:: https://img.shields.io/github/labels/networkx/networkx/Good%20First%20Issue?color=green&label=Contribute%20&style=flat-square + :target: https://github.com/networkx/networkx/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+First+Issue%22 + + +NetworkX is a Python package for the creation, manipulation, +and study of the structure, dynamics, and functions +of complex networks. + +- **Website (including documentation):** https://networkx.org +- **Mailing list:** https://groups.google.com/forum/#!forum/networkx-discuss +- **Source:** https://github.com/networkx/networkx +- **Bug reports:** https://github.com/networkx/networkx/issues +- **Report a security vulnerability:** https://tidelift.com/security +- **Tutorial:** https://networkx.org/documentation/latest/tutorial.html +- **GitHub Discussions:** https://github.com/networkx/networkx/discussions + +Simple example +-------------- + +Find the shortest path between two nodes in an undirected graph: + +.. code:: pycon + + >>> import networkx as nx + >>> G = nx.Graph() + >>> G.add_edge("A", "B", weight=4) + >>> G.add_edge("B", "D", weight=2) + >>> G.add_edge("A", "C", weight=3) + >>> G.add_edge("C", "D", weight=4) + >>> nx.shortest_path(G, "A", "D", weight="weight") + ['A', 'B', 'D'] + +Install +------- + +Install the latest version of NetworkX:: + + $ pip install networkx + +Install with all optional dependencies:: + + $ pip install networkx[all] + +For additional details, please see `INSTALL.rst`. + +Bugs +---- + +Please report any bugs that you find `here `_. +Or, even better, fork the repository on `GitHub `_ +and create a pull request (PR). We welcome all changes, big or small, and we +will help you make the PR if you are new to `git` (just ask on the issue and/or +see `CONTRIBUTING.rst`). + +License +------- + +Released under the 3-Clause BSD license (see `LICENSE.txt`):: + + Copyright (C) 2004-2023 NetworkX Developers + Aric Hagberg + Dan Schult + Pieter Swart diff --git a/MLPY/Lib/site-packages/networkx-3.2.1.dist-info/RECORD b/MLPY/Lib/site-packages/networkx-3.2.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..8c3ee9d034dcd55b6e336bfedb24182f8b726b7b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx-3.2.1.dist-info/RECORD @@ -0,0 +1,1137 @@ +networkx-3.2.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +networkx-3.2.1.dist-info/LICENSE.txt,sha256=ULWifLQ_eiDO3nqnuasgM1UuBBLJof3lHTiIXBQX6V8,1763 +networkx-3.2.1.dist-info/METADATA,sha256=tEByL1NhNlpdXiGfQDexQA_h5H6sFB1UMtQUJwDr3xQ,5232 +networkx-3.2.1.dist-info/RECORD,, +networkx-3.2.1.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92 +networkx-3.2.1.dist-info/entry_points.txt,sha256=b0FW-zm-m9itB-Zkm7w_8c9yX9WGGTg-r_N_A32PAGs,87 +networkx-3.2.1.dist-info/top_level.txt,sha256=s3Mk-7KOlu-kD39w8Xg_KXoP5Z_MVvgB-upkyuOE4Hk,9 +networkx/__init__.py,sha256=WwK4KM7w30c5F2xUgs4N0ylwaixla1rJ-4qThcPnjho,1091 +networkx/__pycache__/__init__.cpython-39.pyc,, +networkx/__pycache__/conftest.cpython-39.pyc,, +networkx/__pycache__/convert.cpython-39.pyc,, +networkx/__pycache__/convert_matrix.cpython-39.pyc,, +networkx/__pycache__/exception.cpython-39.pyc,, +networkx/__pycache__/lazy_imports.cpython-39.pyc,, +networkx/__pycache__/relabel.cpython-39.pyc,, +networkx/algorithms/__init__.py,sha256=Rz_AEhB6u0naGx7ejzbvXrMXixWM47ydmjnGOuofzqo,6512 +networkx/algorithms/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/__pycache__/asteroidal.cpython-39.pyc,, +networkx/algorithms/__pycache__/boundary.cpython-39.pyc,, +networkx/algorithms/__pycache__/bridges.cpython-39.pyc,, +networkx/algorithms/__pycache__/chains.cpython-39.pyc,, +networkx/algorithms/__pycache__/chordal.cpython-39.pyc,, +networkx/algorithms/__pycache__/clique.cpython-39.pyc,, +networkx/algorithms/__pycache__/cluster.cpython-39.pyc,, +networkx/algorithms/__pycache__/communicability_alg.cpython-39.pyc,, +networkx/algorithms/__pycache__/core.cpython-39.pyc,, +networkx/algorithms/__pycache__/covering.cpython-39.pyc,, +networkx/algorithms/__pycache__/cuts.cpython-39.pyc,, +networkx/algorithms/__pycache__/cycles.cpython-39.pyc,, +networkx/algorithms/__pycache__/d_separation.cpython-39.pyc,, +networkx/algorithms/__pycache__/dag.cpython-39.pyc,, +networkx/algorithms/__pycache__/distance_measures.cpython-39.pyc,, +networkx/algorithms/__pycache__/distance_regular.cpython-39.pyc,, +networkx/algorithms/__pycache__/dominance.cpython-39.pyc,, +networkx/algorithms/__pycache__/dominating.cpython-39.pyc,, +networkx/algorithms/__pycache__/efficiency_measures.cpython-39.pyc,, +networkx/algorithms/__pycache__/euler.cpython-39.pyc,, +networkx/algorithms/__pycache__/graph_hashing.cpython-39.pyc,, +networkx/algorithms/__pycache__/graphical.cpython-39.pyc,, +networkx/algorithms/__pycache__/hierarchy.cpython-39.pyc,, +networkx/algorithms/__pycache__/hybrid.cpython-39.pyc,, +networkx/algorithms/__pycache__/isolate.cpython-39.pyc,, +networkx/algorithms/__pycache__/link_prediction.cpython-39.pyc,, +networkx/algorithms/__pycache__/lowest_common_ancestors.cpython-39.pyc,, +networkx/algorithms/__pycache__/matching.cpython-39.pyc,, +networkx/algorithms/__pycache__/mis.cpython-39.pyc,, +networkx/algorithms/__pycache__/moral.cpython-39.pyc,, +networkx/algorithms/__pycache__/node_classification.cpython-39.pyc,, +networkx/algorithms/__pycache__/non_randomness.cpython-39.pyc,, +networkx/algorithms/__pycache__/planar_drawing.cpython-39.pyc,, +networkx/algorithms/__pycache__/planarity.cpython-39.pyc,, +networkx/algorithms/__pycache__/polynomials.cpython-39.pyc,, +networkx/algorithms/__pycache__/reciprocity.cpython-39.pyc,, +networkx/algorithms/__pycache__/regular.cpython-39.pyc,, +networkx/algorithms/__pycache__/richclub.cpython-39.pyc,, +networkx/algorithms/__pycache__/similarity.cpython-39.pyc,, +networkx/algorithms/__pycache__/simple_paths.cpython-39.pyc,, +networkx/algorithms/__pycache__/smallworld.cpython-39.pyc,, +networkx/algorithms/__pycache__/smetric.cpython-39.pyc,, +networkx/algorithms/__pycache__/sparsifiers.cpython-39.pyc,, +networkx/algorithms/__pycache__/structuralholes.cpython-39.pyc,, +networkx/algorithms/__pycache__/summarization.cpython-39.pyc,, +networkx/algorithms/__pycache__/swap.cpython-39.pyc,, +networkx/algorithms/__pycache__/threshold.cpython-39.pyc,, +networkx/algorithms/__pycache__/time_dependent.cpython-39.pyc,, +networkx/algorithms/__pycache__/tournament.cpython-39.pyc,, +networkx/algorithms/__pycache__/triads.cpython-39.pyc,, +networkx/algorithms/__pycache__/vitality.cpython-39.pyc,, +networkx/algorithms/__pycache__/voronoi.cpython-39.pyc,, +networkx/algorithms/__pycache__/walks.cpython-39.pyc,, +networkx/algorithms/__pycache__/wiener.cpython-39.pyc,, +networkx/algorithms/approximation/__init__.py,sha256=zf9NM64g-aZwEGqI5C0DpU5FML2GrkaaQsO6SW85atE,1177 +networkx/algorithms/approximation/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/clique.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/clustering_coefficient.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/connectivity.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/distance_measures.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/dominating_set.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/kcomponents.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/matching.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/maxcut.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/ramsey.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/steinertree.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/traveling_salesman.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/treewidth.cpython-39.pyc,, +networkx/algorithms/approximation/__pycache__/vertex_cover.cpython-39.pyc,, +networkx/algorithms/approximation/clique.py,sha256=y4AeIvmGpMmM0kKRaFQKEORyUVmwakM_Vqz0Mdgnd94,7674 +networkx/algorithms/approximation/clustering_coefficient.py,sha256=jbWKI0fd79_vH4lHkzPXU1cyD8mb9MDej3v9OKQnv5s,2084 +networkx/algorithms/approximation/connectivity.py,sha256=PbGbwOk3dE4phai74vxz7pj1lK3q_gbYrNtMltu2MDw,13107 +networkx/algorithms/approximation/distance_measures.py,sha256=9MUKk23jP7klSgN718h7ttVM_xKPE4dMkxop7BvjkcU,5593 +networkx/algorithms/approximation/dominating_set.py,sha256=oGr5hRltwqd3JltGX-LIC7Sq7LGjltfdUGmBXWdAR7s,4214 +networkx/algorithms/approximation/kcomponents.py,sha256=AN9co8R7Lmgx5N_CbSrSmkQhfFbfbsJtwrUxSDBd284,13282 +networkx/algorithms/approximation/matching.py,sha256=1xfEMDlvcV8J78kgUhbC9FkaTnwC6mjYtNesGTYgmVg,1170 +networkx/algorithms/approximation/maxcut.py,sha256=rWHElMJvy5g4Yy_Tk11tlFFwQ16AV07K6rW1bIwWHTU,3664 +networkx/algorithms/approximation/ramsey.py,sha256=xOZDmJqCm-ya7utQhQOmVM_5LO-O_1e5UDw-YIWr9rY,1353 +networkx/algorithms/approximation/steinertree.py,sha256=tE-4_f1fPpR_hvCudJlzNzkYq_5kyT4OxxJBbfErWJ8,7487 +networkx/algorithms/approximation/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/approximation/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_approx_clust_coeff.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_clique.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_connectivity.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_distance_measures.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_dominating_set.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_kcomponents.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_matching.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_maxcut.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_ramsey.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_steinertree.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_traveling_salesman.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_treewidth.cpython-39.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_vertex_cover.cpython-39.pyc,, +networkx/algorithms/approximation/tests/test_approx_clust_coeff.py,sha256=PGOVEKf2BcJu1vvjZrgTlBBpwM8V6t7yCANjyS9nWF0,1171 +networkx/algorithms/approximation/tests/test_clique.py,sha256=JZ_ja03aVU7vnZ42Joy1ze0vjdcm_CnDhD96Z4W_Dcc,3022 +networkx/algorithms/approximation/tests/test_connectivity.py,sha256=gDG6tsgP3ux7Dgu0x7r0nso7_yknIxicV42Gq0It5pc,5952 +networkx/algorithms/approximation/tests/test_distance_measures.py,sha256=GSyupA_jqSc_pLPSMnZFNcBgZc8-KFWgt6Q7uFegTqg,2024 +networkx/algorithms/approximation/tests/test_dominating_set.py,sha256=l4pBDY7pK7Fxw-S4tOlNcxf-j2j5GpHPJ9f4TrMs1sI,2686 +networkx/algorithms/approximation/tests/test_kcomponents.py,sha256=tTljP1FHzXrUwi-oBz5AQcibRw1NgR4N5UE0a2OrOUA,9346 +networkx/algorithms/approximation/tests/test_matching.py,sha256=nitZncaM0605kaIu1NO6_5TFV2--nohUCO46XTD_lnM,186 +networkx/algorithms/approximation/tests/test_maxcut.py,sha256=HDFNx896WYi7do42P6C5tGTZsBpiqx7sUWm_2riE3nk,2426 +networkx/algorithms/approximation/tests/test_ramsey.py,sha256=h36Ol39csHbIoTDBxbxMgn4371iVUGZ3a2N6l7d56lI,1143 +networkx/algorithms/approximation/tests/test_steinertree.py,sha256=H6IKKl1kFeH96bJaI8CgSkXBJz34ceCft8DA7HNG-Mk,6901 +networkx/algorithms/approximation/tests/test_traveling_salesman.py,sha256=El7VoCuHfmb_DQxlQgo5k9L6lL6U4DBu70BgJ0REJyg,30697 +networkx/algorithms/approximation/tests/test_treewidth.py,sha256=MWFFcmjO0QxM8FS8iXSCtfGnk6eqG2kFyv1u2qnSeUo,9096 +networkx/algorithms/approximation/tests/test_vertex_cover.py,sha256=FobHNhG9CAMeB_AOEprUs-7XQdPoc1YvfmXhozDZ8pM,1942 +networkx/algorithms/approximation/traveling_salesman.py,sha256=FbysLItH41SzBjKIoByYqcvuqIboO8HwNYCO2DJjQ4g,54465 +networkx/algorithms/approximation/treewidth.py,sha256=MRGfLtAanCzDk1G6I6jTbC6MKn6lYreIe9XQdfRXGHE,8148 +networkx/algorithms/approximation/vertex_cover.py,sha256=s7s5v4TGqIlvgTAg2FVxRRUSA2BEp7szZg7FS_UpWAA,2798 +networkx/algorithms/assortativity/__init__.py,sha256=ov3HRRbeYB_6Qezvxp1OTl77GBpw-EWkWGUzgfT8G9c,294 +networkx/algorithms/assortativity/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/assortativity/__pycache__/connectivity.cpython-39.pyc,, +networkx/algorithms/assortativity/__pycache__/correlation.cpython-39.pyc,, +networkx/algorithms/assortativity/__pycache__/mixing.cpython-39.pyc,, +networkx/algorithms/assortativity/__pycache__/neighbor_degree.cpython-39.pyc,, +networkx/algorithms/assortativity/__pycache__/pairs.cpython-39.pyc,, +networkx/algorithms/assortativity/connectivity.py,sha256=O1b3Iky0hlpdM6_QBmBNFfF4XeUsKDMj8fCid_bBRQE,4216 +networkx/algorithms/assortativity/correlation.py,sha256=6XUlbqlBgLyb8GDKsSvIrSgsL47Ti1jbLQoIdRVAj_k,8654 +networkx/algorithms/assortativity/mixing.py,sha256=adB-iqzA_lhjhnoOOZG9qK4ghRTZCjONYN9FePYqcj8,7551 +networkx/algorithms/assortativity/neighbor_degree.py,sha256=H1XQ9BenXxxHK_e6ZWtdIb3xYwYCrbtqEQ69Gasm7cA,5278 +networkx/algorithms/assortativity/pairs.py,sha256=qHALwEx_Q8N1B2ZszX8vs2BK2_0kc4lmbth4kMU6Nog,3393 +networkx/algorithms/assortativity/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/assortativity/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/base_test.cpython-39.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/test_connectivity.cpython-39.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-39.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/test_mixing.cpython-39.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/test_neighbor_degree.cpython-39.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/test_pairs.cpython-39.pyc,, +networkx/algorithms/assortativity/tests/base_test.py,sha256=MNeQMLA3oBUCM8TSyNbBQ_uW0nDc1GEZYdNdUwePAm4,2651 +networkx/algorithms/assortativity/tests/test_connectivity.py,sha256=Js841GQLYTLWvc6xZhnyqj-JtyrnS0ska1TFYntxyXA,4978 +networkx/algorithms/assortativity/tests/test_correlation.py,sha256=1_D9GjLDnlT8Uy28lUn2fS1AHp2XBwiMpIl2OhRNDXk,5069 +networkx/algorithms/assortativity/tests/test_mixing.py,sha256=u-LIccNn-TeIAM766UtzUJQlY7NAbxF4EsUoKINzmlo,6820 +networkx/algorithms/assortativity/tests/test_neighbor_degree.py,sha256=ODP2M8jCaFr_l3ODwpwaz20-KqU2IFaEfJRBK53mpE8,3968 +networkx/algorithms/assortativity/tests/test_pairs.py,sha256=t05qP_-gfkbiR6aTLtE1owYl9otBSsuJcRkuZsa63UQ,3008 +networkx/algorithms/asteroidal.py,sha256=ARFht3oQvn95xCaaBEhy42djMIx4BuqsNf8VVlwBCEI,5852 +networkx/algorithms/bipartite/__init__.py,sha256=NQtAEpZ0IkjGVwfUbOzD7eoPLwulb_iZfh7-aDnyPWo,3826 +networkx/algorithms/bipartite/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/basic.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/centrality.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/cluster.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/covering.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/edgelist.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/extendability.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/generators.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/matching.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/matrix.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/projection.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/redundancy.cpython-39.pyc,, +networkx/algorithms/bipartite/__pycache__/spectral.cpython-39.pyc,, +networkx/algorithms/bipartite/basic.py,sha256=iqgNX-FUDwK2owu1APFTu6ldlw6QE2PaOuNiWEgHafQ,8350 +networkx/algorithms/bipartite/centrality.py,sha256=vkjnOLv5CQtfTOFpa2YhFZWRnMBFUCetGn1w7akAvq8,9144 +networkx/algorithms/bipartite/cluster.py,sha256=S9h8lu-usXFcXEJf6qUxZinf0LneqvKnEiUi9YKp7bo,6925 +networkx/algorithms/bipartite/covering.py,sha256=8pQEStjAGygcu83Cz88RfNAifUV7x8pC84LE2wWapsY,2160 +networkx/algorithms/bipartite/edgelist.py,sha256=aa5sHvwCLe0Lk7BK58tR5vMNjpnlfSaNSs6UY6G5vbc,11317 +networkx/algorithms/bipartite/extendability.py,sha256=CvF0zI__9899cMkq40vu_FfEcU-OeyCB4C2bHtMxxgE,3973 +networkx/algorithms/bipartite/generators.py,sha256=Hj-kPfih-bd74gHrZFpyeWtrMQbKS1uQyYiBqv5RxKQ,20231 +networkx/algorithms/bipartite/matching.py,sha256=kXgpv14FuL6k4KrKN68Z85dkKNgfBmxahTMn4N8aVoI,21620 +networkx/algorithms/bipartite/matrix.py,sha256=w9P7y4oS7vUFdv2dRQHAosuFDrH34YgMgUffQCSFRDE,6127 +networkx/algorithms/bipartite/projection.py,sha256=L9mkbufsE885rGhJ9t-7p-TqowBtUmrL8Zm95LeBygQ,17165 +networkx/algorithms/bipartite/redundancy.py,sha256=T2kDtj1xpSudwelE_ZnWFHnIXneZAMaXvv1m7pVF3Io,3397 +networkx/algorithms/bipartite/spectral.py,sha256=xxkLlaSByMJUP4Kz-XfuRhzTse_DqqsOtGzfiKIgdXc,1880 +networkx/algorithms/bipartite/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/bipartite/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_basic.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_cluster.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_covering.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_edgelist.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_matrix.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_project.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-39.pyc,, +networkx/algorithms/bipartite/tests/test_basic.py,sha256=gzbtsQqPi85BznX5REdGBBJVyr9aH4nO06c3eEI4634,4291 +networkx/algorithms/bipartite/tests/test_centrality.py,sha256=PABPbrIyoAziEEQKXsZLl2jT36N8DZpNRzEO-jeu89Y,6362 +networkx/algorithms/bipartite/tests/test_cluster.py,sha256=O0VsPVt8vcY_E1FjjLJX2xaUbhVViI5MP6_gLTbEpos,2801 +networkx/algorithms/bipartite/tests/test_covering.py,sha256=EGVxYQsyLXE5yY5N5u6D4wZq2NcZe9OwlYpEuY6DF3o,1221 +networkx/algorithms/bipartite/tests/test_edgelist.py,sha256=UE7vm3iZshnlzIrcupso48en0kncxGUPU7XTQskgowg,7996 +networkx/algorithms/bipartite/tests/test_extendability.py,sha256=MsiRLfldka3Cz_h21BwPxnEOuKChntuI6mVCnIFnSs0,6780 +networkx/algorithms/bipartite/tests/test_generators.py,sha256=GLMThTKIfZ96NwTxIL0P0o0OAESZFfnySRkRjtKhao8,12794 +networkx/algorithms/bipartite/tests/test_matching.py,sha256=wFw095skCjW5YvQAnIie8mLacECVt0yUoeJFSj8ONAk,11972 +networkx/algorithms/bipartite/tests/test_matrix.py,sha256=EoqQKTMcPPPPUZYTzc-AAtl5F77qT0X3FI3E1tYppxM,2900 +networkx/algorithms/bipartite/tests/test_project.py,sha256=FBjkys3JYYzEG4aq_CsQrtm41edZibWI_uDAQ0b4wqM,15134 +networkx/algorithms/bipartite/tests/test_redundancy.py,sha256=ddjUzOQ0gkiWBLtVwVFYTJydaIdW3qAc4BCVscxj7-Q,919 +networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py,sha256=1jGDgrIx3-TWOCNMSC4zxmZa7LHyMU69DXh3h12Bjag,2358 +networkx/algorithms/boundary.py,sha256=GNuNDL280F7RXMzgkhUqFx-Zcg1LtP_Q0bNCYbMWIYU,5330 +networkx/algorithms/bridges.py,sha256=MuH_zEBqeSuyou8wszhnEzjJrUZSN-PpNtaScqUmR6E,6075 +networkx/algorithms/centrality/__init__.py,sha256=Er3YoYoj76UfY4P6I0L-0fCQkO7mMU0b3NLsTT2RGWI,558 +networkx/algorithms/centrality/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/betweenness.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/betweenness_subset.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/closeness.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/current_flow_betweenness.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/current_flow_betweenness_subset.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/current_flow_closeness.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/degree_alg.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/dispersion.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/eigenvector.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/flow_matrix.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/group.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/harmonic.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/katz.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/laplacian.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/load.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/percolation.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/reaching.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/second_order.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/subgraph_alg.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/trophic.cpython-39.pyc,, +networkx/algorithms/centrality/__pycache__/voterank_alg.cpython-39.pyc,, +networkx/algorithms/centrality/betweenness.py,sha256=Uy9LCiUxzx1Jvgi7SgV-4AEG1BfvDy2ypWg6Xzfd0_8,14374 +networkx/algorithms/centrality/betweenness_subset.py,sha256=SW7uh0SyGhD_99gwAIGOHrJ9rlO3jXw7xHi-tOHmspE,9327 +networkx/algorithms/centrality/closeness.py,sha256=97qc3gCkitgyLh66sYcdpSRnl7cdUrDgRBP49jDlNNw,10252 +networkx/algorithms/centrality/current_flow_betweenness.py,sha256=eDoDGCVR1PIL5hIY07xRJ6Ze74S4v1HUGJGZ9DbfQW8,11871 +networkx/algorithms/centrality/current_flow_betweenness_subset.py,sha256=QAxgfH20BkeoGgjKPVHVRDrJ6kkGQ1MNmZIVV5baWaM,8046 +networkx/algorithms/centrality/current_flow_closeness.py,sha256=jId4MzTctT0NIJOrzTPsd_gomLSMAg_1SXuXGDZja18,3351 +networkx/algorithms/centrality/degree_alg.py,sha256=xwK263egt-sy-BBxVqL9CE7uF22UhJQls8NlVu3QfZU,3881 +networkx/algorithms/centrality/dispersion.py,sha256=Eld3WK97coVbsHjAJ3ewYI1vsJ0c4Nm3Yrznbec5G8c,3627 +networkx/algorithms/centrality/eigenvector.py,sha256=o0qmWOiMf18Hofdw0ACaAO3TASeJC89R1meCr3ILJi0,12738 +networkx/algorithms/centrality/flow_matrix.py,sha256=G7o6qTnkOlhUZ-DowDu5Xb0vQAiulXcP_veiuYpaPdU,3829 +networkx/algorithms/centrality/group.py,sha256=XBfaGSIgVc-an4Ecqvokd1smETuQ_lUTAtOduKA8YDg,27866 +networkx/algorithms/centrality/harmonic.py,sha256=0eFa5Kv-7Ff_TFuygH753cnQjDqeImkPgrKohtXhGFE,2626 +networkx/algorithms/centrality/katz.py,sha256=K3KVHs3RMGjywLa5tvwEk4xqoBedgytEXINcSjHzy4s,10941 +networkx/algorithms/centrality/laplacian.py,sha256=BHdSIiFBjqZGijThqYQHS3J2-KHnd8DdzQDwAXJwyt0,5403 +networkx/algorithms/centrality/load.py,sha256=6lFI7KqDsVit1QF1UXqrx5G_frELnG8UKOmqtETC5bs,6850 +networkx/algorithms/centrality/percolation.py,sha256=PcWtbDaXvTLeqr1PCbnIHjoD8XcfQTp1meff7FHU1As,4415 +networkx/algorithms/centrality/reaching.py,sha256=nMKAOEbpHajkGKIYxGYEhO70fI2KXeReVsJgaRplN0E,7017 +networkx/algorithms/centrality/second_order.py,sha256=hFk_RFwYpIRlh_iGPO-ZSNzp6Anb73jod_fLYTMvfYk,4966 +networkx/algorithms/centrality/subgraph_alg.py,sha256=BIxHyH7E3I1Ri2dDnnFPy9IOc65ouwk8Y_jc5JMEGRM,9472 +networkx/algorithms/centrality/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/centrality/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality_subset.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_closeness_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality_subset.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_current_flow_closeness.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_degree_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_dispersion.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_eigenvector_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_group.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_harmonic_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_katz_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_laplacian_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_load_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_percolation_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_reaching.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_second_order_centrality.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_subgraph.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_trophic.cpython-39.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_voterank.cpython-39.pyc,, +networkx/algorithms/centrality/tests/test_betweenness_centrality.py,sha256=pKoPAP1hnQSgrOxYeW5-LdUiFDANiwTn_NdOdgccbo8,26795 +networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py,sha256=HrHMcgOL69Z6y679SbqZIjkQOnqrYSz24gt17AJ9q-o,12554 +networkx/algorithms/centrality/tests/test_closeness_centrality.py,sha256=XWZivyLjxYlF41U4ktUmvULC2PMvxKs2U6BHDXRZVdE,10209 +networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py,sha256=VOxx1A7iSGtdEbzJYea_sW_Hv0S71-oo1CVX7Rqd5RY,7870 +networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py,sha256=JfRGgPuiF-vJu5fc2_pcJYREEboxcK_dmy-np39c4Aw,5839 +networkx/algorithms/centrality/tests/test_current_flow_closeness.py,sha256=vflQeoNKngrGUiRb3XNlm2X9wR4vKgMSW_sCyMUCQi8,1379 +networkx/algorithms/centrality/tests/test_degree_centrality.py,sha256=TxD7UBtezF4RCdbCAuTsSB5lcFOQZrGnLOuCMa0XWY0,4105 +networkx/algorithms/centrality/tests/test_dispersion.py,sha256=ROgl_5bGhcNXonNW3ylsvUcA0NCwynsQu_scic371Gw,1959 +networkx/algorithms/centrality/tests/test_eigenvector_centrality.py,sha256=MsHKkQX7oip4v0kF28K1RjtKqxSNVykiSjg8wT20YyE,4897 +networkx/algorithms/centrality/tests/test_group.py,sha256=YmWifoTgw2gSS5BnA9G2T_Voauk_WG6v90JrZEt-Kjk,8686 +networkx/algorithms/centrality/tests/test_harmonic_centrality.py,sha256=wYP0msmB5hh5OMIxPl9t0G4QSpG3Brxw98Kh9BrRoag,3658 +networkx/algorithms/centrality/tests/test_katz_centrality.py,sha256=JL0bZZsJe2MQFL6urXgY82wCAwucUvhjaShYZPxpL6U,11240 +networkx/algorithms/centrality/tests/test_laplacian_centrality.py,sha256=vY-NULtr_U_GxUMwfAZB-iccxIRTiqqUN4Q8HRNpzSo,5916 +networkx/algorithms/centrality/tests/test_load_centrality.py,sha256=Vv3zSW89iELN-8KNbUclmkhOe1LzKdF7U_w34nYovIo,11343 +networkx/algorithms/centrality/tests/test_percolation_centrality.py,sha256=ycQ1fvEZZcWAfqL11urT7yHiEP77usJDSG25OQiDM2s,2591 +networkx/algorithms/centrality/tests/test_reaching.py,sha256=sqQUPspoiWxs9tD77UwngBkMVFYjRzhayVxPqX9_XbY,4143 +networkx/algorithms/centrality/tests/test_second_order_centrality.py,sha256=ce0wQ4T33lu23wskzGUnBS7X4BSODlvAX1S5KxlLzOA,1999 +networkx/algorithms/centrality/tests/test_subgraph.py,sha256=vhE9Uh-_Hlk49k-ny6ORHCgqk7LWH8OHIYOEYM96uz0,3729 +networkx/algorithms/centrality/tests/test_trophic.py,sha256=AzV6rwcTa4b4tcenoKh95o6VF-z7w75l81ZOdhhi6yE,8705 +networkx/algorithms/centrality/tests/test_voterank.py,sha256=7Z9aQYKqEw_txBbWTz1FZWJzUmhjlMfDFSRIKHBdkOk,1692 +networkx/algorithms/centrality/trophic.py,sha256=ay_R2GtxxfP5muSoxSETt6wdqYka14J1f1Z9zafY790,4654 +networkx/algorithms/centrality/voterank_alg.py,sha256=UG71jAEm4b0vqj6ZQ-so8yqQtdxDoK8XI5CWTdjAyhg,3227 +networkx/algorithms/chains.py,sha256=wUNxO0v_nH9m5efeV0IXzog_5EbBl0PkVzgDJoCAJts,6964 +networkx/algorithms/chordal.py,sha256=H5fdhTaX5UzKWVj4mQQd7cjj_oj84R91rUGRiuug_Gk,13285 +networkx/algorithms/clique.py,sha256=gM-ksRAEX1eDr_Irk85uuUGVKRpFIYijeOo385mVsxs,25802 +networkx/algorithms/cluster.py,sha256=qkwVMieIgXyK2u01cYD9xrZtgpDAODmH3aggvUxxPVc,20281 +networkx/algorithms/coloring/__init__.py,sha256=P1cmqrAjcaCdObkNZ1e6Hp__ZpxBAhQx0iIipOVW8jg,182 +networkx/algorithms/coloring/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/coloring/__pycache__/equitable_coloring.cpython-39.pyc,, +networkx/algorithms/coloring/__pycache__/greedy_coloring.cpython-39.pyc,, +networkx/algorithms/coloring/equitable_coloring.py,sha256=JnL_TM3sTewENSKkbHIOOfNoaqXYkoEjzmfgf3xD9C8,16279 +networkx/algorithms/coloring/greedy_coloring.py,sha256=Tuo215orZ6k7znMd9wkyv3irZNAl1dyEryi72mC_im4,20170 +networkx/algorithms/coloring/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/coloring/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/coloring/tests/__pycache__/test_coloring.cpython-39.pyc,, +networkx/algorithms/coloring/tests/test_coloring.py,sha256=A2cAG--i7pTVolIK96mxNuCTtLvhbLVRqJ4MAfWUBEQ,23712 +networkx/algorithms/communicability_alg.py,sha256=8omQmig1RyfVYfB_bfX40AX3Vg4PD4D-cmjelQzzKFc,4536 +networkx/algorithms/community/__init__.py,sha256=gKUySRds_lxaCw0kEpPJ1vluQwN4cV3ayt4U_8fok_M,1125 +networkx/algorithms/community/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/community/__pycache__/asyn_fluid.cpython-39.pyc,, +networkx/algorithms/community/__pycache__/centrality.cpython-39.pyc,, +networkx/algorithms/community/__pycache__/community_utils.cpython-39.pyc,, +networkx/algorithms/community/__pycache__/kclique.cpython-39.pyc,, +networkx/algorithms/community/__pycache__/kernighan_lin.cpython-39.pyc,, +networkx/algorithms/community/__pycache__/label_propagation.cpython-39.pyc,, +networkx/algorithms/community/__pycache__/louvain.cpython-39.pyc,, +networkx/algorithms/community/__pycache__/lukes.cpython-39.pyc,, +networkx/algorithms/community/__pycache__/modularity_max.cpython-39.pyc,, +networkx/algorithms/community/__pycache__/quality.cpython-39.pyc,, +networkx/algorithms/community/asyn_fluid.py,sha256=Qn8tNzdrXA1DVVHuZi-YjvBsUSbj3J6WwekrSVXBs74,5912 +networkx/algorithms/community/centrality.py,sha256=AEuGeTP_vWSyWr4vC3r-63cTfQ7mV2vDTqv_vZUkoxQ,6631 +networkx/algorithms/community/community_utils.py,sha256=u4q9DSo_QyROG7Qci2-Cvphu4n_VM8AeYXYKkXQXxws,903 +networkx/algorithms/community/kclique.py,sha256=tG0GOot8kY-wnaGA0XdNo0VKKoa1hJprMqXszcA00Pc,2456 +networkx/algorithms/community/kernighan_lin.py,sha256=-pQEXeOBE6JnHqMo-5M6igzGcBNbeOWK8AQ51joeN-E,4345 +networkx/algorithms/community/label_propagation.py,sha256=pcGwq8qhZQPK7LSrmsL54lf0ljC4PzBQaE2s_vvsWmU,11846 +networkx/algorithms/community/louvain.py,sha256=smccDNEHuRC3aqBJJ7ijKTQQ8vy1c-zcSuPaeqoQqDw,14764 +networkx/algorithms/community/lukes.py,sha256=OxwTxVKYNEd4evk4htBNDw_IeNujUIvPuydAfT-ewZk,8086 +networkx/algorithms/community/modularity_max.py,sha256=mcQxD2iQduY8H2-Ep3Agg7BSb-UTUqzOEcHm-424sC8,18020 +networkx/algorithms/community/quality.py,sha256=G7ogU-CYh-78EWGUyPKKR55K0iFrZclHawzy9gvBW-4,11919 +networkx/algorithms/community/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/community/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/community/tests/__pycache__/test_asyn_fluid.cpython-39.pyc,, +networkx/algorithms/community/tests/__pycache__/test_centrality.cpython-39.pyc,, +networkx/algorithms/community/tests/__pycache__/test_kclique.cpython-39.pyc,, +networkx/algorithms/community/tests/__pycache__/test_kernighan_lin.cpython-39.pyc,, +networkx/algorithms/community/tests/__pycache__/test_label_propagation.cpython-39.pyc,, +networkx/algorithms/community/tests/__pycache__/test_louvain.cpython-39.pyc,, +networkx/algorithms/community/tests/__pycache__/test_lukes.cpython-39.pyc,, +networkx/algorithms/community/tests/__pycache__/test_modularity_max.cpython-39.pyc,, +networkx/algorithms/community/tests/__pycache__/test_quality.cpython-39.pyc,, +networkx/algorithms/community/tests/__pycache__/test_utils.cpython-39.pyc,, +networkx/algorithms/community/tests/test_asyn_fluid.py,sha256=5DDArgCUSRVXrlG21R5Yu6Gg96xsivqvEib17VGOZLM,3057 +networkx/algorithms/community/tests/test_centrality.py,sha256=ADU1mFn7yl9kTtQjOkfPtjpmkBR_i_6hwbVkWh5qZmw,2931 +networkx/algorithms/community/tests/test_kclique.py,sha256=iA0SBqwbDfaD2u7KM6ccs6LfgAQY_xxrnW05UIT_tFA,2413 +networkx/algorithms/community/tests/test_kernighan_lin.py,sha256=s8bK53Y1a87zvlZ1AJE-QJ2vItnbscSOlHQSrMpetGI,2709 +networkx/algorithms/community/tests/test_label_propagation.py,sha256=uOyx9-rLQCNidVwJ5EcjAlAOubjkK6HooZff5CCYki4,7870 +networkx/algorithms/community/tests/test_louvain.py,sha256=m8TQDH3fX2ygvWVn-mtP3EEmi1F7JF-J_WYuSAxvGXs,7257 +networkx/algorithms/community/tests/test_lukes.py,sha256=f_JU-EzY6PwXEkPN8kk5_3NVg6phlX0nrj1f57M49lk,3961 +networkx/algorithms/community/tests/test_modularity_max.py,sha256=mqtalSff4cmpAPyOExOolfICOE7YuOtHA3BqT84eZlg,10365 +networkx/algorithms/community/tests/test_quality.py,sha256=_kbOlYD1mpPduNQU1wJx58we6Z8CbmQ8wsDwOqTE4hg,5274 +networkx/algorithms/community/tests/test_utils.py,sha256=r_YEdGUaGZo8B16FxzocmkgpRrWgqyN7ehvx_qFiYu4,706 +networkx/algorithms/components/__init__.py,sha256=Dt74KZWp_cJ_j0lL5hd_S50_hia5DKcC2SjuRnubr6M,173 +networkx/algorithms/components/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/components/__pycache__/attracting.cpython-39.pyc,, +networkx/algorithms/components/__pycache__/biconnected.cpython-39.pyc,, +networkx/algorithms/components/__pycache__/connected.cpython-39.pyc,, +networkx/algorithms/components/__pycache__/semiconnected.cpython-39.pyc,, +networkx/algorithms/components/__pycache__/strongly_connected.cpython-39.pyc,, +networkx/algorithms/components/__pycache__/weakly_connected.cpython-39.pyc,, +networkx/algorithms/components/attracting.py,sha256=DYv4WYi7o65w2gszDcNVPlxPYDESDA_r0Z4gDzfpEDA,2699 +networkx/algorithms/components/biconnected.py,sha256=6GRTNyPgwvboDpUdjA9GODDa9vtxTNELEirHkcDHuXs,12765 +networkx/algorithms/components/connected.py,sha256=CiwwhpZo_ppuSCm63cMkm64IJybY_OAOxewWCaGUU7s,4312 +networkx/algorithms/components/semiconnected.py,sha256=M_bCya75ayQONDqv4HCfV8fAXPITAQbP7pdOl7mt8BQ,2025 +networkx/algorithms/components/strongly_connected.py,sha256=EoxDU6BDAp11v57vQvsiQGmZ_1C9iszd4ZDutN4KWAc,11712 +networkx/algorithms/components/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/components/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/components/tests/__pycache__/test_attracting.cpython-39.pyc,, +networkx/algorithms/components/tests/__pycache__/test_biconnected.cpython-39.pyc,, +networkx/algorithms/components/tests/__pycache__/test_connected.cpython-39.pyc,, +networkx/algorithms/components/tests/__pycache__/test_semiconnected.cpython-39.pyc,, +networkx/algorithms/components/tests/__pycache__/test_strongly_connected.cpython-39.pyc,, +networkx/algorithms/components/tests/__pycache__/test_weakly_connected.cpython-39.pyc,, +networkx/algorithms/components/tests/test_attracting.py,sha256=b3N3ZR9E5gLSQWGgaqhcRfRs4KBW6GnnkVYeAjdxC_o,2243 +networkx/algorithms/components/tests/test_biconnected.py,sha256=N-J-dgBgI77ytYUUrXjduLxtDydH7jS-af98fyPBkYc,6036 +networkx/algorithms/components/tests/test_connected.py,sha256=BTbxVcorGH8wKVXOO7D3bn0WR8lXK-Kijm-XDmQhiMY,3983 +networkx/algorithms/components/tests/test_semiconnected.py,sha256=q860lIxZF5M2JmDwwdzy-SGSXnrillOefMx23GcJpw0,1792 +networkx/algorithms/components/tests/test_strongly_connected.py,sha256=66c4bPIdcl1hKEZAY5Wjpglk_mrcVCoDxaKBOaZz754,6639 +networkx/algorithms/components/tests/test_weakly_connected.py,sha256=yi23wxW2Vw6JOMqaWMEuqNRxnleriuAQrZ5JGWE48Jk,2887 +networkx/algorithms/components/weakly_connected.py,sha256=mDxdyU7oGqWTYWY0Rh_VRbR5hcMFhy6yXFb_W20LkxU,4366 +networkx/algorithms/connectivity/__init__.py,sha256=VuUXTkagxX-tHjgmeYJ3K4Eq_luK6kSpv1nZwiwGFd8,281 +networkx/algorithms/connectivity/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/connectivity/__pycache__/connectivity.cpython-39.pyc,, +networkx/algorithms/connectivity/__pycache__/cuts.cpython-39.pyc,, +networkx/algorithms/connectivity/__pycache__/disjoint_paths.cpython-39.pyc,, +networkx/algorithms/connectivity/__pycache__/edge_augmentation.cpython-39.pyc,, +networkx/algorithms/connectivity/__pycache__/edge_kcomponents.cpython-39.pyc,, +networkx/algorithms/connectivity/__pycache__/kcomponents.cpython-39.pyc,, +networkx/algorithms/connectivity/__pycache__/kcutsets.cpython-39.pyc,, +networkx/algorithms/connectivity/__pycache__/stoerwagner.cpython-39.pyc,, +networkx/algorithms/connectivity/__pycache__/utils.cpython-39.pyc,, +networkx/algorithms/connectivity/connectivity.py,sha256=X5tB-FevO5B-514-zb3LoaSOHlcBX0ockB5fBdh2E58,29912 +networkx/algorithms/connectivity/cuts.py,sha256=vCr5z2lvAa4cYIAhmnL-cYr7jVRL5a0TbbrV3Qb_xtQ,23183 +networkx/algorithms/connectivity/disjoint_paths.py,sha256=rQ1qZepPW4j0RnzMefaFtFbd4hsnjZ6tpiUSQwEpDxE,14852 +networkx/algorithms/connectivity/edge_augmentation.py,sha256=IJmZg75CiEmpIE0tQyFzbd6ZKFKGH--hBs7yuYMLzAA,43988 +networkx/algorithms/connectivity/edge_kcomponents.py,sha256=8jQ-ba3qxdCRK8dFDTAAcqG55-vOfJ_smRXeQAIw6FU,20709 +networkx/algorithms/connectivity/kcomponents.py,sha256=Ax0v4yudKFbkuzmek2TUEh3UPFIPoy6gJ7N4ZtUiO-A,8166 +networkx/algorithms/connectivity/kcutsets.py,sha256=rtSXzS7uIaNewh7RT_-lukXvr48_Cdl56VKalb8bQ50,9423 +networkx/algorithms/connectivity/stoerwagner.py,sha256=RW_Zx4wsdikYH8UB34zLDVLBBfpPQ4UBSOh_oYwhkMI,5375 +networkx/algorithms/connectivity/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/connectivity/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_connectivity.cpython-39.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_cuts.cpython-39.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_disjoint_paths.cpython-39.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_edge_augmentation.cpython-39.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_edge_kcomponents.cpython-39.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_kcomponents.cpython-39.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_kcutsets.cpython-39.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_stoer_wagner.cpython-39.pyc,, +networkx/algorithms/connectivity/tests/test_connectivity.py,sha256=eSmsi8uQk6MI591JgtSu2elIusb08bmSZS0h9gxb76I,15027 +networkx/algorithms/connectivity/tests/test_cuts.py,sha256=4F8seWb-sPDDjjVMkh14gst5UQa5f-zDkCsZIdJjVzo,10353 +networkx/algorithms/connectivity/tests/test_disjoint_paths.py,sha256=NLHReLoXSKoA6KPBNRbjF84ktg5PEaaktIj2AII3SDY,8392 +networkx/algorithms/connectivity/tests/test_edge_augmentation.py,sha256=d3ymFHyY2G4cpy1Y6wu4ze339qfF2LRp2HmGAIVjnMM,15731 +networkx/algorithms/connectivity/tests/test_edge_kcomponents.py,sha256=CZ26Dy91WOUqhw1X73mqLGX-WHWzBBIeBCgrp6KK4Zo,16453 +networkx/algorithms/connectivity/tests/test_kcomponents.py,sha256=ohoSX8GACeszRZdzTiNuWXSFitfU9DzP0hqllS2gvMU,8554 +networkx/algorithms/connectivity/tests/test_kcutsets.py,sha256=TU6vl9cVtl7GstL2OrPGwVX2PY1R_AGQ6lJ9QQX5UBQ,8458 +networkx/algorithms/connectivity/tests/test_stoer_wagner.py,sha256=A291C30_t2CI1erPCqN1W0DoAj3zqNA8fThPIj4Rku0,3011 +networkx/algorithms/connectivity/utils.py,sha256=8h29TgBEeaZbF_4OFNgtY2XLqURD_va_wezmz709Qfs,3168 +networkx/algorithms/core.py,sha256=mNKH8fwCgbCbaQAcIdZq8Dx9p_bsHiyQn4fAXOdR5K4,15990 +networkx/algorithms/covering.py,sha256=4SiBc9eJi4vQ0N5juRfw6atcyjH1xB_iSWwT6WOZhYk,5290 +networkx/algorithms/cuts.py,sha256=VSEhUHwqRFhfS70L-PzsUGEzN09uhC69B-5hwSixj7A,9960 +networkx/algorithms/cycles.py,sha256=eHduXG1NbfMOIT-RNDWHMyDeRiWC-3IAOokh0kyIDhs,43080 +networkx/algorithms/d_separation.py,sha256=2l6sRqNEldQQltIlVo7lK6ew115PYLT0OJcCjeQBnJY,15440 +networkx/algorithms/dag.py,sha256=1LkfG8kYN-dAjXk21Tmo7eCr9FaYpp4kMX37JCktlY4,39144 +networkx/algorithms/distance_measures.py,sha256=pMMLUutcc93wdbTfKkltEic7dcj5st95eEPpuxtHiNc,29136 +networkx/algorithms/distance_regular.py,sha256=KksQ9jiqigD5DzD63DXmxoeV8RAj1TF7hjyi9FLjbws,6914 +networkx/algorithms/dominance.py,sha256=wO5FnplVOSkPBdFSBcPQMgt-0bykxaEtXmdqRjGg3d8,3422 +networkx/algorithms/dominating.py,sha256=lA9lP6SXtjZsUPyKM58Uflgp58aSqfveTeoe9aQuy7Y,2675 +networkx/algorithms/efficiency_measures.py,sha256=OGkRnD5lrXUqr4TAPW9f_y2g89k9V_-L1EkvWy9Yibo,4786 +networkx/algorithms/euler.py,sha256=hf2HPmE6GkRm6DhG0-DD4wIJmvnAXy__ddXx2BGqsBU,14160 +networkx/algorithms/flow/__init__.py,sha256=rVtMUy6dViPLewjDRntmn15QF0bQwiDdQbZZx9j7Drc,341 +networkx/algorithms/flow/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/boykovkolmogorov.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/capacityscaling.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/dinitz_alg.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/edmondskarp.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/gomory_hu.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/maxflow.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/mincost.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/networksimplex.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/preflowpush.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/shortestaugmentingpath.cpython-39.pyc,, +networkx/algorithms/flow/__pycache__/utils.cpython-39.pyc,, +networkx/algorithms/flow/boykovkolmogorov.py,sha256=gJFnK5qZMg8xMWs2-aGx5-LLM25C48x2IPy-_50V6_c,13435 +networkx/algorithms/flow/capacityscaling.py,sha256=G4wdqfhQ4Gf7Fx3Eoh5_DUnpzD_qOT-8yWfaZ1dbbWA,14459 +networkx/algorithms/flow/dinitz_alg.py,sha256=mtSov40Oay_kz2v381MkOp5OSpDhKiH2OSINkrImsE0,7310 +networkx/algorithms/flow/edmondskarp.py,sha256=iafmZIMPO8euDc7uJQ1dg84s4a9OzK4tKMthF2jQoeo,8292 +networkx/algorithms/flow/gomory_hu.py,sha256=5fEaPaTi9_ox7CarltPwqSEGnF3OyxJJfWf04g-Aa50,6320 +networkx/algorithms/flow/maxflow.py,sha256=xyVgIMRtxRSUHZb-4txwc78ZEIFixzCQffG5NxioR98,22809 +networkx/algorithms/flow/mincost.py,sha256=JA6lLUmQ-UyxrdWdzbS3x8oK3t4MaPPBf_44Pgor9yA,12248 +networkx/algorithms/flow/networksimplex.py,sha256=eimOUJ4n2-jRBN8LgPoeuFrU38O2JRCB1O0uEkG7wkg,25175 +networkx/algorithms/flow/preflowpush.py,sha256=wIl2b0MpnhunZb4HpShxMVy1NgPggsAGF7chP0crKKw,15823 +networkx/algorithms/flow/shortestaugmentingpath.py,sha256=7vfa73BxJ6cHfzTy6ibXoD8DuOjK2lgp-EshUeimvZA,10474 +networkx/algorithms/flow/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/flow/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/flow/tests/__pycache__/test_gomory_hu.cpython-39.pyc,, +networkx/algorithms/flow/tests/__pycache__/test_maxflow.cpython-39.pyc,, +networkx/algorithms/flow/tests/__pycache__/test_maxflow_large_graph.cpython-39.pyc,, +networkx/algorithms/flow/tests/__pycache__/test_mincost.cpython-39.pyc,, +networkx/algorithms/flow/tests/__pycache__/test_networksimplex.cpython-39.pyc,, +networkx/algorithms/flow/tests/gl1.gpickle.bz2,sha256=z4-BzrXqruFiGqYLiS2D5ZamFz9vZRc1m2ef89qhsPg,44623 +networkx/algorithms/flow/tests/gw1.gpickle.bz2,sha256=b3nw6Q-kxR7HkWXxWWPh7YlHdXbga8qmeuYiwmBBGTE,42248 +networkx/algorithms/flow/tests/netgen-2.gpickle.bz2,sha256=OxfmbN7ajtuNHexyYmx38fZd1GdeP3bcL8T9hKoDjjA,18972 +networkx/algorithms/flow/tests/test_gomory_hu.py,sha256=aWtbI3AHofIK6LDJnmj9UH1QOfulXsi5NyB7bNyV2Vw,4471 +networkx/algorithms/flow/tests/test_maxflow.py,sha256=YRgkrdRj6NMHOXio2Zgr7-ErEzCbq7Z0w90azNffCC4,18727 +networkx/algorithms/flow/tests/test_maxflow_large_graph.py,sha256=fMweTQ3MzsZWYI-ul2dGR8OfGQeo8df2fLeCleHqxZw,4623 +networkx/algorithms/flow/tests/test_mincost.py,sha256=n4fFLDwDLy7Tau-_ey1CoxZwKhFjk28GLGJjCyxhClk,17816 +networkx/algorithms/flow/tests/test_networksimplex.py,sha256=bsVxlvHAD0K7aDevCcVaa9uRNNsWAevw6yUKlj2T8No,12103 +networkx/algorithms/flow/tests/wlm3.gpickle.bz2,sha256=zKy6Hg-_swvsNh8OSOyIyZnTR0_Npd35O9RErOF8-g4,88132 +networkx/algorithms/flow/utils.py,sha256=TyckjUeH5qcBUSARpkuZDXaVirYGuo9xvJK8cno0T38,6001 +networkx/algorithms/graph_hashing.py,sha256=cOAW2XlFvuYokbmhEeKiX4KQgIHL6PjraBqsZizcj_A,11887 +networkx/algorithms/graphical.py,sha256=BYh1nXb2Kg1AYLJBz926QIKcHkAtXqInpsb7QSzykAQ,15807 +networkx/algorithms/hierarchy.py,sha256=jDj8Ld7InknG7OVLVSnT82cqzNvBQ4FT5Qso107kQVQ,1541 +networkx/algorithms/hybrid.py,sha256=fFfA7Ki4zKxm9r8VEvreUqyCvGeCygsBbJZPsvXUQ7A,6180 +networkx/algorithms/isolate.py,sha256=toiuRPi4qb06D_hREZWbAcDJ4c8yx8aKftWez22Efj0,2325 +networkx/algorithms/isomorphism/__init__.py,sha256=gPRQ-_X6xN2lJZPQNw86IVj4NemGmbQYTejf5yJ32N4,406 +networkx/algorithms/isomorphism/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/isomorphism/__pycache__/ismags.cpython-39.pyc,, +networkx/algorithms/isomorphism/__pycache__/isomorph.cpython-39.pyc,, +networkx/algorithms/isomorphism/__pycache__/isomorphvf2.cpython-39.pyc,, +networkx/algorithms/isomorphism/__pycache__/matchhelpers.cpython-39.pyc,, +networkx/algorithms/isomorphism/__pycache__/temporalisomorphvf2.cpython-39.pyc,, +networkx/algorithms/isomorphism/__pycache__/tree_isomorphism.cpython-39.pyc,, +networkx/algorithms/isomorphism/__pycache__/vf2pp.cpython-39.pyc,, +networkx/algorithms/isomorphism/__pycache__/vf2userfunc.cpython-39.pyc,, +networkx/algorithms/isomorphism/ismags.py,sha256=5KRimh6jxs7BIDqAo48d01i8_1WkkckV0xVWkQ64czs,43529 +networkx/algorithms/isomorphism/isomorph.py,sha256=PQONDdw4Mc6neaPhW7yVQxoxOrbOqYorgBse4OjYtBA,7097 +networkx/algorithms/isomorphism/isomorphvf2.py,sha256=1LWpe54aulfYukTS87DoS4l1reCpOqZEr-74MOQLrRc,40528 +networkx/algorithms/isomorphism/matchhelpers.py,sha256=b7A7SwbqXj8CKAw-vrISgBNhDcEXobPjljkOhyWn4aM,10891 +networkx/algorithms/isomorphism/temporalisomorphvf2.py,sha256=-1NW81l8kM9orQ2ni9tcNizQzEhOUE9BaBJXjUWqhiI,10948 +networkx/algorithms/isomorphism/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/isomorphism/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_ismags.cpython-39.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphism.cpython-39.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphvf2.cpython-39.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_match_helpers.cpython-39.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_temporalisomorphvf2.cpython-39.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_tree_isomorphism.cpython-39.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp.cpython-39.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp_helpers.cpython-39.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_vf2userfunc.cpython-39.pyc,, +networkx/algorithms/isomorphism/tests/iso_r01_s80.A99,sha256=hKzMtYLUR8Oqp9pmJR6RwG7qo31aNPZcnXy4KHDGhqU,1442 +networkx/algorithms/isomorphism/tests/iso_r01_s80.B99,sha256=AHx_W2xG4JEcz1xKoN5TwCHVE6-UO2PiMByynkd4TPE,1442 +networkx/algorithms/isomorphism/tests/si2_b06_m200.A99,sha256=NVnPFA52amNl3qM55G1V9eL9ZlP9NwugBlPf-zekTFU,310 +networkx/algorithms/isomorphism/tests/si2_b06_m200.B99,sha256=-clIDp05LFNRHA2BghhGTeyuXDqBBqA9XpEzpB7Ku7M,1602 +networkx/algorithms/isomorphism/tests/test_ismags.py,sha256=2sOkbB7Aejnq4zDx9BhJyfavf5DLiKJaUPusb3fhGRk,10585 +networkx/algorithms/isomorphism/tests/test_isomorphism.py,sha256=1GZmmqNWk605Qq9h55V_5SfEKPM50Ceq6DSICdh6ufs,1663 +networkx/algorithms/isomorphism/tests/test_isomorphvf2.py,sha256=s4yO4cHJk5qIpRemnSzD1MJEeSJPNpZcOU6LeWVhGXI,11751 +networkx/algorithms/isomorphism/tests/test_match_helpers.py,sha256=uuTcvjgf2LPqSQzzECPIh0dezw8-a1IN0u42u8TxwAw,2483 +networkx/algorithms/isomorphism/tests/test_temporalisomorphvf2.py,sha256=DZy2zAt74jiTAM-jGK5H9aGRn1ZsMgQl9K5UNsu178Y,7346 +networkx/algorithms/isomorphism/tests/test_tree_isomorphism.py,sha256=yj3C8ZBhi57I6kxetfneGpTse9hrYBvJQfGb0qks_G0,7066 +networkx/algorithms/isomorphism/tests/test_vf2pp.py,sha256=65RkN1mPWLoxirE7SlIvfaKMJk80b_ZwWG6HTJtlkPg,49924 +networkx/algorithms/isomorphism/tests/test_vf2pp_helpers.py,sha256=s4zz4IYYm2q8nHmkG0eRI2yJjcTx6zjRL7HTVIl1a-s,90080 +networkx/algorithms/isomorphism/tests/test_vf2userfunc.py,sha256=yby-vt4sYxc1uzlnD-iETREbojgNkpQGbLkrPER_Sss,6629 +networkx/algorithms/isomorphism/tree_isomorphism.py,sha256=HKPUDU1oCYCfEgyNYN0e31K9PqE3hsqHZMX8Iuq7i1Q,9397 +networkx/algorithms/isomorphism/vf2pp.py,sha256=oKBYHbwS0j3UihEI7LgaUZ7sMhv3nLLFrnCk3jETxnw,36383 +networkx/algorithms/isomorphism/vf2userfunc.py,sha256=VVTNWEzHnRaZrjtinBnkStRNsvC9FVvivXWs-pqG6LM,7475 +networkx/algorithms/link_analysis/__init__.py,sha256=UkcgTDdzsIu-jsJ4jBwP8sF2CsRPC1YcZZT-q5Wlj3I,118 +networkx/algorithms/link_analysis/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/link_analysis/__pycache__/hits_alg.cpython-39.pyc,, +networkx/algorithms/link_analysis/__pycache__/pagerank_alg.cpython-39.pyc,, +networkx/algorithms/link_analysis/hits_alg.py,sha256=5ntPDFZNGYbrw0Bq4WNvmoIWBxSa6PtIoBVHhHmv-8M,10244 +networkx/algorithms/link_analysis/pagerank_alg.py,sha256=0l7xABhW3Vkhx07y87NynTdYqcTql0UAVfCghURhZFk,17183 +networkx/algorithms/link_analysis/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/link_analysis/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/link_analysis/tests/__pycache__/test_hits.cpython-39.pyc,, +networkx/algorithms/link_analysis/tests/__pycache__/test_pagerank.cpython-39.pyc,, +networkx/algorithms/link_analysis/tests/test_hits.py,sha256=BXMNyKv4OfRKXH9W8u8qCV3zaghDlEItRhYLN0TB-CM,2525 +networkx/algorithms/link_analysis/tests/test_pagerank.py,sha256=g0HyPn5HBXZeu-TQSWqqTzOfnzaejRfBuIIpKYSGecE,7530 +networkx/algorithms/link_prediction.py,sha256=z4abyN_TIEZiq6TN9YP7TYvrdiDhzgg-Irn9UW1IkYQ,19968 +networkx/algorithms/lowest_common_ancestors.py,sha256=popl_tFPaN5r4P1UQ47-qzEQ9EM0EQCeA7F_9th71Ek,9186 +networkx/algorithms/matching.py,sha256=ePjtahy-HIMke90HjmcgO1mJOUcG7WYJFfFKuAqQ8Jk,44530 +networkx/algorithms/minors/__init__.py,sha256=ceeKdsZ6U1H40ED-KmtVGkbADxeWMTVG07Ja8P7N_Pg,587 +networkx/algorithms/minors/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/minors/__pycache__/contraction.cpython-39.pyc,, +networkx/algorithms/minors/contraction.py,sha256=dS3lUcojiGydTV2IOrpyD3UxChG5ZYqHy74c-Euulns,22735 +networkx/algorithms/minors/tests/__pycache__/test_contraction.cpython-39.pyc,, +networkx/algorithms/minors/tests/test_contraction.py,sha256=rob7wHlt3xoXYxpcXQOwm7zP0TLyRqWV1JxsZlE8kfo,14212 +networkx/algorithms/mis.py,sha256=9ZdCuXXAlKOVHgWJMtillI1vLOXQGlRpSoz-rbnMU3I,2339 +networkx/algorithms/moral.py,sha256=k9uZGz0S6YK3U5hoiRLzR--PRQNktNdaI9-lrzsrgBg,1511 +networkx/algorithms/node_classification.py,sha256=ACq6C3i2p-f5E4UGHK8XQ6ng6ZVf6DxHdx4xR0x1zrA,6461 +networkx/algorithms/non_randomness.py,sha256=wEXsl0fat8w0SAROXT_mB0B4fidVD8N5Ue5rLDMcu7Q,2893 +networkx/algorithms/operators/__init__.py,sha256=dJ3xOXvHxSzzM3-YcfvjGTJ_ndxULF1TybkIRzUS87Y,201 +networkx/algorithms/operators/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/operators/__pycache__/all.cpython-39.pyc,, +networkx/algorithms/operators/__pycache__/binary.cpython-39.pyc,, +networkx/algorithms/operators/__pycache__/product.cpython-39.pyc,, +networkx/algorithms/operators/__pycache__/unary.cpython-39.pyc,, +networkx/algorithms/operators/all.py,sha256=_1em4J-Y6GQK0_UlkTIKZ7DNdTSzdXkZ748gAq9dxkg,9544 +networkx/algorithms/operators/binary.py,sha256=Un3NpZQxmGB3EhQY-_-qb-Kzr6lJy9RRVJZR0FOSrVI,12689 +networkx/algorithms/operators/product.py,sha256=3hm3Q1K3BaH6RYUTP369jGgFSVsyAF3crDS74PKlVDM,16115 +networkx/algorithms/operators/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/operators/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/operators/tests/__pycache__/test_all.cpython-39.pyc,, +networkx/algorithms/operators/tests/__pycache__/test_binary.cpython-39.pyc,, +networkx/algorithms/operators/tests/__pycache__/test_product.cpython-39.pyc,, +networkx/algorithms/operators/tests/__pycache__/test_unary.cpython-39.pyc,, +networkx/algorithms/operators/tests/test_all.py,sha256=Pqjv9QiA0875Yl9D5o6c5Ml0t4KHpH2a5jbpAoZQXFc,8250 +networkx/algorithms/operators/tests/test_binary.py,sha256=N_HEuvjUPneQK44rYo8AMhR7OdfQk76U9EqIXCt65X4,12795 +networkx/algorithms/operators/tests/test_product.py,sha256=hbnfR6gKXhl2BiEHKrgi4hMYIie95noooNmVBws1iLo,13402 +networkx/algorithms/operators/tests/test_unary.py,sha256=UZdzbt5GI9hnflEizUWXihGqBWmSFJDkzjwVv6wziQE,1415 +networkx/algorithms/operators/unary.py,sha256=9cLxWpgt7aleGAgL608nW99s1gaw5kVGa-LybPt--qY,1745 +networkx/algorithms/planar_drawing.py,sha256=q9QJYn3PhDHzfNO1t5xAStN1XDzTPybFxtorb60KF5k,16289 +networkx/algorithms/planarity.py,sha256=j1SbhjE620Jda4PHALJh6PIwu_GHJKwACHnB6_q02aA,39476 +networkx/algorithms/polynomials.py,sha256=WNcCyqedQTPhhiwp9TJRqZXuo4KGTPjb85GF86XvDcU,11270 +networkx/algorithms/reciprocity.py,sha256=vKwcggMOSOz29-_j0R3do52qosaYFbfJJBXOkW-5jH8,2846 +networkx/algorithms/regular.py,sha256=0zEftUGLYqxeUEAV2cerg6bda98fQuqrDyKkNdkvvpo,6680 +networkx/algorithms/richclub.py,sha256=EP7v7VT6GhNAqFQ15I-WeDdksFISBArkS-guAPpdJoo,4166 +networkx/algorithms/shortest_paths/__init__.py,sha256=Rmxtsje-mPdQyeYhE8TP2NId-iZEOu4eAsWhVRm2Xqk,285 +networkx/algorithms/shortest_paths/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/shortest_paths/__pycache__/astar.cpython-39.pyc,, +networkx/algorithms/shortest_paths/__pycache__/dense.cpython-39.pyc,, +networkx/algorithms/shortest_paths/__pycache__/generic.cpython-39.pyc,, +networkx/algorithms/shortest_paths/__pycache__/unweighted.cpython-39.pyc,, +networkx/algorithms/shortest_paths/__pycache__/weighted.cpython-39.pyc,, +networkx/algorithms/shortest_paths/astar.py,sha256=mb9Z0nmHJhTCuF_tAvS3oiNrpplCNQReunAwrLJcYME,7674 +networkx/algorithms/shortest_paths/dense.py,sha256=xBzv4NHJ-J2ehGnDEgjHJ6a3LidSKYBy2p36qBTdgfI,8151 +networkx/algorithms/shortest_paths/generic.py,sha256=uN1-eUXz6n9R0WWwCgpiynJTfyYyp5RYgCUeroeizWo,25321 +networkx/algorithms/shortest_paths/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/shortest_paths/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_astar.cpython-39.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_dense.cpython-39.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_dense_numpy.cpython-39.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_generic.cpython-39.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_unweighted.cpython-39.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_weighted.cpython-39.pyc,, +networkx/algorithms/shortest_paths/tests/test_astar.py,sha256=X9tLO2OVDrHjSy1-nNaNFIRngnOo-ITT5HAj6PAXafk,7176 +networkx/algorithms/shortest_paths/tests/test_dense.py,sha256=ievl4gu3Exl_31hp4OKcsAGPb3g3_xFUM4t3NnvrG_A,6747 +networkx/algorithms/shortest_paths/tests/test_dense_numpy.py,sha256=BNwXCe2wgNPE8o35-shPsFj8l19c_QG6Ye8tkIGphf8,2300 +networkx/algorithms/shortest_paths/tests/test_generic.py,sha256=aR3pUbMS-s3vBZJg7kauoY6rmZbjlx-DweCC1wyZQI4,18156 +networkx/algorithms/shortest_paths/tests/test_unweighted.py,sha256=fjpDkp38DmW8R2qpLRwRjcbYZp4an0f0yIq40XsFKJ8,5899 +networkx/algorithms/shortest_paths/tests/test_weighted.py,sha256=dmzFBYN3QEDZoun7RAtSe_spsGSbvkDiJSgUf9e-1K8,35038 +networkx/algorithms/shortest_paths/unweighted.py,sha256=EuiZiHEQEOrFZpmUMm52ThANjyfWeWhKYFIrnvu9G_s,15494 +networkx/algorithms/shortest_paths/weighted.py,sha256=esTy6BYmWqBcCGN15Ld3jJCfjTNZtw4pyphEK4g2NqQ,82339 +networkx/algorithms/similarity.py,sha256=Y5GRl1NW-IFVqwogOdHJs-SAHmpv1SgfN7MIF7y8odQ,59062 +networkx/algorithms/simple_paths.py,sha256=cliV45VPZSKngEAEI6jQX5o8JD9G7Yhe_kFnCCvZP1k,30535 +networkx/algorithms/smallworld.py,sha256=sD1yv28XfqfwSi0Y88GOKXPzVwMJlWpNf8S14ZLOw_k,13494 +networkx/algorithms/smetric.py,sha256=jHdRFK7HyeFkhQWkKApa1MpAyi512pY4Ksulkh8joo8,1933 +networkx/algorithms/sparsifiers.py,sha256=InQAPhcRTI5O_l9ckBc2LAheONfBVx1Wn6yYY0NVOiA,10073 +networkx/algorithms/structuralholes.py,sha256=58c8f6hBwxhp5-GL1JPWnR5dRLZU3gNziddeFVIeXSE,9319 +networkx/algorithms/summarization.py,sha256=ymt635-uEH_uocXwF0_tU8EbGlW_1wf3ZM-v0CG7dH4,23251 +networkx/algorithms/swap.py,sha256=uYRzbxhEONmGQlWWDsDw_SJQgIWJfXzvSKYQAd50HfE,14579 +networkx/algorithms/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_asteroidal.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_boundary.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_bridges.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_chains.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_chordal.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_clique.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_cluster.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_communicability.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_core.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_covering.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_cuts.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_cycles.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_d_separation.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_dag.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_distance_measures.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_distance_regular.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_dominance.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_dominating.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_efficiency.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_euler.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_graph_hashing.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_graphical.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_hierarchy.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_hybrid.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_isolate.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_link_prediction.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_lowest_common_ancestors.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_matching.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_max_weight_clique.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_mis.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_moral.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_node_classification.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_non_randomness.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_planar_drawing.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_planarity.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_polynomials.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_reciprocity.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_regular.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_richclub.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_similarity.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_simple_paths.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_smallworld.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_smetric.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_sparsifiers.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_structuralholes.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_summarization.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_swap.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_threshold.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_time_dependent.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_tournament.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_triads.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_vitality.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_voronoi.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_walks.cpython-39.pyc,, +networkx/algorithms/tests/__pycache__/test_wiener.cpython-39.pyc,, +networkx/algorithms/tests/test_asteroidal.py,sha256=DnWI5_jnaaZMxtG44XD0K690HZs8ez7HU_9dSR-p6eA,502 +networkx/algorithms/tests/test_boundary.py,sha256=1OSJh32FYFhAVYB5zqxhZGEXZLS0HPp9kvfHZvWmD3o,6227 +networkx/algorithms/tests/test_bridges.py,sha256=FS34gA5cia8di_a2X4meeB7qI0JrsVtpQlL4fe_i1CA,4027 +networkx/algorithms/tests/test_chains.py,sha256=SofaAxDEJDf1gt5sIGVC_O8vT9YcTc8Jq1vfnwVPhkM,4363 +networkx/algorithms/tests/test_chordal.py,sha256=DPdNPY7KtqCsCwYVb4xQfnIm-z35dUJIWxNHtAiQLAQ,4438 +networkx/algorithms/tests/test_clique.py,sha256=FPIF2f8NLODsz-k_qrHt7DolClV_VdNWSh68oe8-ygI,9413 +networkx/algorithms/tests/test_cluster.py,sha256=AltwLWAblpSLa-24KvNuxYxM2IeVl5p2d-kozA9QJ-0,15595 +networkx/algorithms/tests/test_communicability.py,sha256=4KK9wU9gAUqHAAAyHwAKpq2dV9g415s_X0qd7Tt83gU,2938 +networkx/algorithms/tests/test_core.py,sha256=ZmLePvuK-Tv8aQ6tGCJd9965BHKUviNNVV7o3PzwfEE,7016 +networkx/algorithms/tests/test_covering.py,sha256=EeBjQ5mxcVctgavqXZ255T8ryFocuxjxdVpIxVUNFvw,2718 +networkx/algorithms/tests/test_cuts.py,sha256=2Ir5xyIG4cTC4Dgg1cceLXaEFiOCJ60ZTDDn33vz0Ns,5377 +networkx/algorithms/tests/test_cycles.py,sha256=mrID4F3wdoZV1oBPETd8Ebx9UXasC2dWsccx8bq_5C8,34243 +networkx/algorithms/tests/test_d_separation.py,sha256=md90cCjC409qAolNGTFwGTu5N577GdgK2RYW83lxopk,6600 +networkx/algorithms/tests/test_dag.py,sha256=MMSD9Flgl_h2fCDIr7gPr9MACj-cU6Xnn5qK1-LeCSc,27722 +networkx/algorithms/tests/test_distance_measures.py,sha256=LdVbsbebvMZghK2gOgecNGxs7v_WjO8z2okG9uF8rAY,22327 +networkx/algorithms/tests/test_distance_regular.py,sha256=pPZ2CPKo4QLjhxlcJhBQZif6-_2qwfh1kpbrN_mu5tg,2312 +networkx/algorithms/tests/test_dominance.py,sha256=nPqRGSF1GEvUR16ryo-dOql6fLdTvzBmYk8Y3ML-ONc,9373 +networkx/algorithms/tests/test_dominating.py,sha256=hyta7ln6BbHaGlpEUla6jVzh2PRuSjvujLSGXrmwZbc,1228 +networkx/algorithms/tests/test_efficiency.py,sha256=QKWMvyjCG1Byt-oNp7Rz_qxnVeT77Zk27lrzI1qH0mA,1894 +networkx/algorithms/tests/test_euler.py,sha256=4ajCsO3PwKBaz8jTB_b_nHh_yOz9qSPTOhNBloIRAF8,10987 +networkx/algorithms/tests/test_graph_hashing.py,sha256=duR9DQLUpRuy9bv0ZKQPt9gy9WxiX_K0-BVMlnF-WHY,23517 +networkx/algorithms/tests/test_graphical.py,sha256=uhFjvs04odxABToY4IRig_CaUTpAC3SfZRu1p1T7FwY,5366 +networkx/algorithms/tests/test_hierarchy.py,sha256=g3-0pNfzRo-RDW1BsiLXxyi2LwWIJukXx2i4JCpN2fg,941 +networkx/algorithms/tests/test_hybrid.py,sha256=kQLzaMoqZcKFaJ3D7PKbY2O-FX59XDZ1pN5un8My-tk,720 +networkx/algorithms/tests/test_isolate.py,sha256=LyR0YYHJDH5vppQzGzGiJK-aaIV17_Jmla8dMf93olg,555 +networkx/algorithms/tests/test_link_prediction.py,sha256=7c322xESYdH5WEA0TsMw4Jcc_-lqfIsj-SjXP6Y0TVc,19442 +networkx/algorithms/tests/test_lowest_common_ancestors.py,sha256=GvhYCQMnVYD9LHPCNFgWMAUmOV8V5gko0fe05zi1JwU,13153 +networkx/algorithms/tests/test_matching.py,sha256=jhehNkApE5RuMPtbjWNeHn0tPqhVz65mL7QakfRA3Vw,20174 +networkx/algorithms/tests/test_max_weight_clique.py,sha256=JWGZpbQfUaCklCGI170Gfpp3b5ICYwY7RH_DQ1mYQbc,6741 +networkx/algorithms/tests/test_mis.py,sha256=jusLniyKcNWs0994srLJxY3SVeAQqkkXf-h-qtlrfGw,1875 +networkx/algorithms/tests/test_moral.py,sha256=15PZgkx7O9aXQB1npQ2JNqBBkEqPPP2RfeZzKqY-GNU,452 +networkx/algorithms/tests/test_node_classification.py,sha256=NgJJKUHH1GoD1GE3F4QRYBLM3fUo_En3RNtZvhqCjlg,4663 +networkx/algorithms/tests/test_non_randomness.py,sha256=-8s-fJLYRxVNp7QpaMe5Dxrxi0kvewY78d4ja-nXNBk,782 +networkx/algorithms/tests/test_planar_drawing.py,sha256=CBJv6U9tT0BzYVrmEBlARBZSMxBwTsX3krACAnAPfHg,8771 +networkx/algorithms/tests/test_planarity.py,sha256=h9kUOsn0skbvYBcIYzKy5XDGmyP3sTwtvoXYKr_X230,13148 +networkx/algorithms/tests/test_polynomials.py,sha256=baI0Kua1pRngRC6Scm5gRRwi1bl0iET5_Xxo3AZTP3A,1983 +networkx/algorithms/tests/test_reciprocity.py,sha256=X_PXWFOTzuEcyMWpRdwEJfm8lJOfNE_1rb9AAybf4is,1296 +networkx/algorithms/tests/test_regular.py,sha256=zGf7Mmh7XPtwunOoeTfgiICnfsVeCEbMop3NrDgIfqY,2457 +networkx/algorithms/tests/test_richclub.py,sha256=hhRGQGNQ2EINvmTF-XkJxGZXROvQJZuWwubCYq8Mx9U,2585 +networkx/algorithms/tests/test_similarity.py,sha256=JJYVUV-WtjswW-kDbY5tUuyjLI_3mVKOLUbaRz8wCM8,32216 +networkx/algorithms/tests/test_simple_paths.py,sha256=mmuKfi8t9iXLO8tSIuQGbupFe9c6X6cSiGKEWYiWiqM,24075 +networkx/algorithms/tests/test_smallworld.py,sha256=rfgNCRU6YF55f8sCuA5WmX6MmhDci89Tb4jaz4ALjcQ,2405 +networkx/algorithms/tests/test_smetric.py,sha256=wihpgjZS4PaajOuE72RiDEbBWpQcoKPSAfjoAezuRxg,980 +networkx/algorithms/tests/test_sparsifiers.py,sha256=A12V4ljWxvXaSFJ73mHSFK2YNO-k8ax6Me4yEWTsI4s,4043 +networkx/algorithms/tests/test_structuralholes.py,sha256=-48vhIVXcUlmLAi603FdBP6afbVu447JZ1piSCIpRTE,5536 +networkx/algorithms/tests/test_summarization.py,sha256=cGAep6r-v141uAdsPF9r8YTuT-nO7L7puOqPPv339wo,21313 +networkx/algorithms/tests/test_swap.py,sha256=YRpN79MNL1i5Hm2FVb-mNl9SRfHDWAuDnn2Wx95_UYY,5307 +networkx/algorithms/tests/test_threshold.py,sha256=RF_SM5tdMGJfEHETO19mFicnt69UIlvVeuCwI7rxb0M,9751 +networkx/algorithms/tests/test_time_dependent.py,sha256=NmuV2kDo4nh2MeN0hwcJf0QSDtqMD0dfSeeKSsYBtQ8,13342 +networkx/algorithms/tests/test_tournament.py,sha256=xxmLb9Lrmjkh9tKmyv2yYJrhB2PHWh-Bq71M-d1NjQo,4158 +networkx/algorithms/tests/test_triads.py,sha256=tPMzSQDVHZQOmDOKa9Hyem76UO1zh7wcdaM9X_BhxG4,9088 +networkx/algorithms/tests/test_vitality.py,sha256=p5lPWCtVMtbvxDw6TJUaf8vpb0zKPoz5pND722xiypQ,1380 +networkx/algorithms/tests/test_voronoi.py,sha256=M4B6JtkJUw56ULEWRs1kyVEUsroNrnb5FBq9OioAyHM,3477 +networkx/algorithms/tests/test_walks.py,sha256=X8cb-YvGHiiqbMEXuKMSdTAb9WtVtbHjIESNSqpJTmU,1499 +networkx/algorithms/tests/test_wiener.py,sha256=NJJbXZ9L5ZeFGQpCpvYVWFNqyX3amkbuDQEBL7wCixw,2080 +networkx/algorithms/threshold.py,sha256=1GUMnQQvN_6mR5oowUbgoykaWPyV4zO1mO1Dyb9NdAE,31088 +networkx/algorithms/time_dependent.py,sha256=73WgWETl4IP4qsOrSDkIEIU7MJM3qc_j-LKRZUJlC4c,5757 +networkx/algorithms/tournament.py,sha256=nTgdIzkkFhIyie_5jDh97USz2jQx9TgY34vHOPVXkN0,11676 +networkx/algorithms/traversal/__init__.py,sha256=YtFrfNjciqTOI6jGePQaJ01tRSEQXTHqTGGNhDEDb_8,142 +networkx/algorithms/traversal/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/traversal/__pycache__/beamsearch.cpython-39.pyc,, +networkx/algorithms/traversal/__pycache__/breadth_first_search.cpython-39.pyc,, +networkx/algorithms/traversal/__pycache__/depth_first_search.cpython-39.pyc,, +networkx/algorithms/traversal/__pycache__/edgebfs.cpython-39.pyc,, +networkx/algorithms/traversal/__pycache__/edgedfs.cpython-39.pyc,, +networkx/algorithms/traversal/beamsearch.py,sha256=ABR8pOl4G3CbUyaHpR9F93jh3J0XjVKZe7DK5LetVWc,3424 +networkx/algorithms/traversal/breadth_first_search.py,sha256=2sIfMwqc2qjGW2W3UcGwGBSCcc-_FWWa_ZRYQjhfro8,18107 +networkx/algorithms/traversal/depth_first_search.py,sha256=aJ-3wtaLVLslz5dJD7nW3biDdXsjJi_rAJB8QQTg_8w,13730 +networkx/algorithms/traversal/edgebfs.py,sha256=2eUhaoP2__-QSkDu1ie6wPd1WjuK3u8pU8rf87iAtKc,6239 +networkx/algorithms/traversal/edgedfs.py,sha256=NaZfoYV5jl8mmo5UhgdQqM2YJ_PjZ--VKYF5T9qa6Ms,5952 +networkx/algorithms/traversal/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/traversal/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/traversal/tests/__pycache__/test_beamsearch.cpython-39.pyc,, +networkx/algorithms/traversal/tests/__pycache__/test_bfs.cpython-39.pyc,, +networkx/algorithms/traversal/tests/__pycache__/test_dfs.cpython-39.pyc,, +networkx/algorithms/traversal/tests/__pycache__/test_edgebfs.cpython-39.pyc,, +networkx/algorithms/traversal/tests/__pycache__/test_edgedfs.cpython-39.pyc,, +networkx/algorithms/traversal/tests/test_beamsearch.py,sha256=b1fXCI0_BuWbnA536PZrXMMUfG1ejnHX1fpQGY-5hqI,1076 +networkx/algorithms/traversal/tests/test_bfs.py,sha256=fC6HUKzd5Jd9LerxgODpfvCRE15BU5PbMzEaMLoXPZs,6796 +networkx/algorithms/traversal/tests/test_dfs.py,sha256=4Gc1ACJQJ63rfOlPz0X0Tv6xW6k83ewMRVojBEnKMmk,8616 +networkx/algorithms/traversal/tests/test_edgebfs.py,sha256=8oplCu0fct3QipT0JB0-292EA2aOm8zWlMkPedfe6iY,4702 +networkx/algorithms/traversal/tests/test_edgedfs.py,sha256=HGmC3GUYSn9XLMHQpdefdE6g-Uh3KqbmgEEXBcckdYc,4775 +networkx/algorithms/tree/__init__.py,sha256=wm_FjX3G7hqJfyNmeEaJsRjZI-8Kkv0Nb5jAmQNXzSc,149 +networkx/algorithms/tree/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/tree/__pycache__/branchings.cpython-39.pyc,, +networkx/algorithms/tree/__pycache__/coding.cpython-39.pyc,, +networkx/algorithms/tree/__pycache__/decomposition.cpython-39.pyc,, +networkx/algorithms/tree/__pycache__/mst.cpython-39.pyc,, +networkx/algorithms/tree/__pycache__/operations.cpython-39.pyc,, +networkx/algorithms/tree/__pycache__/recognition.cpython-39.pyc,, +networkx/algorithms/tree/branchings.py,sha256=ZJmoLcn_rtQ9VKBwsFqdx1Rkh8edqjNGgYWBUezNpvE,56003 +networkx/algorithms/tree/coding.py,sha256=zB5ISLd1Jn-6wWfqrQaxYa2hbwchp59qzGKmCiuLVd8,13407 +networkx/algorithms/tree/decomposition.py,sha256=MFV3zHYOt8y7n3jNBxQCQvO60IFJ9rl5XIgvBqjT5RQ,3047 +networkx/algorithms/tree/mst.py,sha256=STCiAXhYmbt3x0yaqRQehwlp_GMJFoeU9vwdmzjFHNk,40276 +networkx/algorithms/tree/operations.py,sha256=46nnbX2qF_iyzeuzsIJXrlXiJOEQfo2KguFB4rP5Ttg,4702 +networkx/algorithms/tree/recognition.py,sha256=ZOdFP-cdG2Lv7jiX3M2nPi7g-e327wPN36_xbn4KiDs,7553 +networkx/algorithms/tree/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/tree/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_branchings.cpython-39.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_coding.cpython-39.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_decomposition.cpython-39.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_mst.cpython-39.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_operations.cpython-39.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_recognition.cpython-39.pyc,, +networkx/algorithms/tree/tests/test_branchings.py,sha256=chEEI0evEkVVJSphcP_kIqhMEdv0nhBONYGP8ffW40s,18008 +networkx/algorithms/tree/tests/test_coding.py,sha256=f3A5dvfkWImC6Jp2qkuw2Sz3whOsabnaOfu6Eh9r65I,3954 +networkx/algorithms/tree/tests/test_decomposition.py,sha256=vnl_xoQzi1LnlZL25vXOZWwvaWmon3-x222OKt4eDqE,1871 +networkx/algorithms/tree/tests/test_mst.py,sha256=NgvEi2kwn18HN8ywvj1V20pZS98JIuo9vJOv31DqW2w,24749 +networkx/algorithms/tree/tests/test_operations.py,sha256=ybU96kROTVJRTyjLG7JSJjYlPxaWmYjUVJqbXV5VGGI,1961 +networkx/algorithms/tree/tests/test_recognition.py,sha256=hbS6q1lbshRClWH7o8Zj7Osd-TZuk_YOomUdTczHs3s,4171 +networkx/algorithms/triads.py,sha256=4EF74EolTV1PJheA-J371tP_pTJA7-aGLnq4D8pac2E,15517 +networkx/algorithms/vitality.py,sha256=yBXiKzewpz40SK0UzFpeZZDvOXXDSt_vh4jGLrNgnHg,2331 +networkx/algorithms/voronoi.py,sha256=zCiFFQiklBS5afAEfln44GfiPsoAI9c7E6m0gbQ2Big,3178 +networkx/algorithms/walks.py,sha256=3ulDmITV-YN8yBTr2qmVEDsSe-Gi8yzrnGyLP5dXQbQ,2419 +networkx/algorithms/wiener.py,sha256=o8tYpFn-W9sneu3QMhpMZlCkqptgRo2ZdUFKfVCOL1s,2328 +networkx/classes/__init__.py,sha256=Q9oONJrnTFs874SGpwcbV_kyJTDcrLI69GFt99MiE6I,364 +networkx/classes/__pycache__/__init__.cpython-39.pyc,, +networkx/classes/__pycache__/coreviews.cpython-39.pyc,, +networkx/classes/__pycache__/digraph.cpython-39.pyc,, +networkx/classes/__pycache__/filters.cpython-39.pyc,, +networkx/classes/__pycache__/function.cpython-39.pyc,, +networkx/classes/__pycache__/graph.cpython-39.pyc,, +networkx/classes/__pycache__/graphviews.cpython-39.pyc,, +networkx/classes/__pycache__/multidigraph.cpython-39.pyc,, +networkx/classes/__pycache__/multigraph.cpython-39.pyc,, +networkx/classes/__pycache__/reportviews.cpython-39.pyc,, +networkx/classes/coreviews.py,sha256=jkbsDaqebCcFH952hAAAuXw3qZpi7xUdCHyKbUrFsc8,11010 +networkx/classes/digraph.py,sha256=CnnSfxWTjOkabNkSNhBfE2bgwgSG32ylDD3scWxaPP0,47159 +networkx/classes/filters.py,sha256=47OFApfkvvohVMoZ2v9sniM6sgv9rka869BDwmbdww4,1715 +networkx/classes/function.py,sha256=55dS0xS5p7oiyeJQ03mGcSo7NwJLbfZ54_kIhVOx-q0,36323 +networkx/classes/graph.py,sha256=DigVc4mmBx9v1ctgqhDYu8GZwT2wa4iyci85otpYZ6Q,70379 +networkx/classes/graphviews.py,sha256=7rSoE4Pkh8SjjsP2G6t0U0SRAwnkUp5QLSgnJPZgPUQ,8558 +networkx/classes/multidigraph.py,sha256=_5yJvVz99QMkp92iS2qONcahEBhsmS3C7bfaKrNNKoA,36283 +networkx/classes/multigraph.py,sha256=5X1_tB0LJgfif8f1HckiL1MTm9tPDJ4YTRe_H22WFbA,47127 +networkx/classes/reportviews.py,sha256=WQU6LBq2tXIohU6bDJALKeefFaeGMW46Ln8sUJoJ-yM,45606 +networkx/classes/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/classes/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/classes/tests/__pycache__/dispatch_interface.cpython-39.pyc,, +networkx/classes/tests/__pycache__/historical_tests.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_backends.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_coreviews.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_digraph.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_digraph_historical.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_filters.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_function.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_graph.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_graph_historical.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_graphviews.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_multidigraph.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_multigraph.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_reportviews.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_special.cpython-39.pyc,, +networkx/classes/tests/__pycache__/test_subgraphviews.cpython-39.pyc,, +networkx/classes/tests/dispatch_interface.py,sha256=bOQdru35uGmAMKHNZEok-UBAeiuA0rF20JZEeo4ePT4,6683 +networkx/classes/tests/historical_tests.py,sha256=3lbZKaRvv8uodIEzSbBJDguTPpO2MhqBqh-Pk1soZBM,16173 +networkx/classes/tests/test_backends.py,sha256=IR8qFYv4cJX0y1q48lRV0zJEouj1sfSOmcwcm4FBxEg,2579 +networkx/classes/tests/test_coreviews.py,sha256=qzdozzWK8vLag-CAUqrXAM2CZZwMFN5vMu6Tdrwdf-E,12128 +networkx/classes/tests/test_digraph.py,sha256=uw0FuEu3y_YI-PSGuQCRytFpXLF7Eye2fqLJaKbXkBc,12283 +networkx/classes/tests/test_digraph_historical.py,sha256=s9FpuIP81zIbGCiMfiDqB3OxqWU2p3GwWdhpGIOjD5Y,3683 +networkx/classes/tests/test_filters.py,sha256=fBLig8z548gsBBlQw6VJdGZb4IcqJj7_0mi2Fd2ncEM,5851 +networkx/classes/tests/test_function.py,sha256=e5vg_SjtC8nHrMDemCcEVTHcudcSiWToMsZF655eQi4,25770 +networkx/classes/tests/test_graph.py,sha256=77t7pk1Pmz-txewyD2Dv19Vva6vWpWCtJSPtFx-EY_Y,30913 +networkx/classes/tests/test_graph_historical.py,sha256=-jf961vQCuQLyly0ju50q9dbzWG5m2OAs9H6IVS670c,273 +networkx/classes/tests/test_graphviews.py,sha256=i4x3ii8--PPg_pK4YA8aMR1axUQCdXZYpzmB05iEAOg,11466 +networkx/classes/tests/test_multidigraph.py,sha256=ryTKegCoYixXbAqOn3mIt9vSMb5666Dv-pfMkXEjoUE,16342 +networkx/classes/tests/test_multigraph.py,sha256=0vFQO3RCJaBpzXvnQzdWa_qYLHNo_I9DICYhPZJNUMk,18777 +networkx/classes/tests/test_reportviews.py,sha256=-4Vd42cOvTdZfsPiWuQuAAvVDafosjB47RosYglmXUw,41470 +networkx/classes/tests/test_special.py,sha256=IJsmqCS9LrTDoZ11KPmo-UOI7xEskL7NyduEJNPMNqs,4103 +networkx/classes/tests/test_subgraphviews.py,sha256=1dcJHq3F00LyoFSu6CTFPqS7DFIkWK1PyQu4QvJh5ko,13223 +networkx/conftest.py,sha256=v1f_RONRmJwf__rPjF4l-uFl2cyyF08mtst8WTnHXr4,7944 +networkx/convert.py,sha256=0EbsMbOgm8pdKpW4A6p5Ea4T_KGEcq-P1a0gCnM-jzY,15977 +networkx/convert_matrix.py,sha256=cnI56RdwsBke_XOz6dAbfgj_qVIwU2JeirRqMN9tM2w,41069 +networkx/drawing/__init__.py,sha256=rnTFNzLc4fis1hTAEpnWTC80neAR88-llVQ-LObN-i4,160 +networkx/drawing/__pycache__/__init__.cpython-39.pyc,, +networkx/drawing/__pycache__/layout.cpython-39.pyc,, +networkx/drawing/__pycache__/nx_agraph.cpython-39.pyc,, +networkx/drawing/__pycache__/nx_latex.cpython-39.pyc,, +networkx/drawing/__pycache__/nx_pydot.cpython-39.pyc,, +networkx/drawing/__pycache__/nx_pylab.cpython-39.pyc,, +networkx/drawing/layout.py,sha256=fvfHjubEdZZoRexknTKF2j0zO_Xj2N1nhzfuNTKY5BQ,38829 +networkx/drawing/nx_agraph.py,sha256=9Q6bz0oT7u-iU2hDIjhjvQ3jmxWsyAHA5fMIr4BmhEE,14009 +networkx/drawing/nx_latex.py,sha256=EZWQ1GJ9SWS7ufyAz8ey30gG1EtnCUVMgbLJ4-tIjgY,24805 +networkx/drawing/nx_pydot.py,sha256=vS_lJC9ASmBPyXpecinbNs_OuhjgUNIj82CLks3SjPk,14135 +networkx/drawing/nx_pylab.py,sha256=LOTf6wOfOkB_t-ml5EY-6SEOfMYAxdDzE-4uGJAxBuo,51138 +networkx/drawing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/drawing/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/drawing/tests/__pycache__/test_agraph.cpython-39.pyc,, +networkx/drawing/tests/__pycache__/test_latex.cpython-39.pyc,, +networkx/drawing/tests/__pycache__/test_layout.cpython-39.pyc,, +networkx/drawing/tests/__pycache__/test_pydot.cpython-39.pyc,, +networkx/drawing/tests/__pycache__/test_pylab.cpython-39.pyc,, +networkx/drawing/tests/baseline/test_house_with_colors.png,sha256=FQi9pIRFwjq4gvgB8cDdBHL5euQUJFw6sQlABf2kRVo,21918 +networkx/drawing/tests/test_agraph.py,sha256=7qDwr3AruwHxoSUGNRACyL5OTK7_2qDM5bkCANSMql4,9045 +networkx/drawing/tests/test_latex.py,sha256=_Wng73kMltC-_sUoxdo2uBL2bkEc7HMqkKhwo9ZDJGA,8710 +networkx/drawing/tests/test_layout.py,sha256=JUHMitAFs28rgYcEvk3-8-8Ri0Qui9ir3DjY24L6MfU,17841 +networkx/drawing/tests/test_pydot.py,sha256=-eRn39-HTFAZ1oVMR6ULSZxCS2531-HrlNjQaUa3i-8,6242 +networkx/drawing/tests/test_pylab.py,sha256=hlStKEitfl74u2wnEYlajmWHVd0IwHdyFfxDcgooUfc,27576 +networkx/exception.py,sha256=5v8tPTpYcuu3OFgSitgC8-wMUGNwfgxZog2gsBNeRPk,3537 +networkx/generators/__init__.py,sha256=tEbG2IO2NkxVzAFjeApCpATxhjRYopOXes6iffqC6DI,1318 +networkx/generators/__pycache__/__init__.cpython-39.pyc,, +networkx/generators/__pycache__/atlas.cpython-39.pyc,, +networkx/generators/__pycache__/classic.cpython-39.pyc,, +networkx/generators/__pycache__/cographs.cpython-39.pyc,, +networkx/generators/__pycache__/community.cpython-39.pyc,, +networkx/generators/__pycache__/degree_seq.cpython-39.pyc,, +networkx/generators/__pycache__/directed.cpython-39.pyc,, +networkx/generators/__pycache__/duplication.cpython-39.pyc,, +networkx/generators/__pycache__/ego.cpython-39.pyc,, +networkx/generators/__pycache__/expanders.cpython-39.pyc,, +networkx/generators/__pycache__/geometric.cpython-39.pyc,, +networkx/generators/__pycache__/harary_graph.cpython-39.pyc,, +networkx/generators/__pycache__/internet_as_graphs.cpython-39.pyc,, +networkx/generators/__pycache__/intersection.cpython-39.pyc,, +networkx/generators/__pycache__/interval_graph.cpython-39.pyc,, +networkx/generators/__pycache__/joint_degree_seq.cpython-39.pyc,, +networkx/generators/__pycache__/lattice.cpython-39.pyc,, +networkx/generators/__pycache__/line.cpython-39.pyc,, +networkx/generators/__pycache__/mycielski.cpython-39.pyc,, +networkx/generators/__pycache__/nonisomorphic_trees.cpython-39.pyc,, +networkx/generators/__pycache__/random_clustered.cpython-39.pyc,, +networkx/generators/__pycache__/random_graphs.cpython-39.pyc,, +networkx/generators/__pycache__/small.cpython-39.pyc,, +networkx/generators/__pycache__/social.cpython-39.pyc,, +networkx/generators/__pycache__/spectral_graph_forge.cpython-39.pyc,, +networkx/generators/__pycache__/stochastic.cpython-39.pyc,, +networkx/generators/__pycache__/sudoku.cpython-39.pyc,, +networkx/generators/__pycache__/time_series.cpython-39.pyc,, +networkx/generators/__pycache__/trees.cpython-39.pyc,, +networkx/generators/__pycache__/triads.cpython-39.pyc,, +networkx/generators/atlas.dat.gz,sha256=c_xBbfAWSSNgd1HLdZ9K6B3rX2VQvyW-Wcht47dH5B0,8887 +networkx/generators/atlas.py,sha256=NG5jMwud76LrkzBCl9tNGrv0j3weSi4-A_wtPChdglY,5557 +networkx/generators/classic.py,sha256=hmHiIsSsld_DVY8lpULBF8bGRLYDnhCWDD-Y_8u4o-k,28395 +networkx/generators/cographs.py,sha256=oDJVZRiviNZOrHSGXxQNcXFbRvtB3DW0aFFEJ8bfQ_Y,1866 +networkx/generators/community.py,sha256=bgbD7UfKYXf9lSUe9WJeo20pioNJJMyh1ZpenYOex6E,34690 +networkx/generators/degree_seq.py,sha256=0Zpv4q5rYgrUepprOGaQFTw3PVg6EEWobo9i3Ira9ZI,30006 +networkx/generators/directed.py,sha256=qlm-ArjKbqnGWJi2Ax9HuqaUQm_IUFPuwwDYvAUW4o8,15554 +networkx/generators/duplication.py,sha256=1Ys4nb9suq49ooAVXSnkuKQjvW3H_llprd-xdp70wSs,5013 +networkx/generators/ego.py,sha256=V373eWi-qyJXxgsa49NdLyVqFVCpqC-CMY9ATwQYws8,1873 +networkx/generators/expanders.py,sha256=xDfzdnl2XYrVItnCiU_09szMKQB6S9xOaxWx6rFTHCE,6447 +networkx/generators/geometric.py,sha256=8slijaPVv36oTzU5EsKx0bqtLmgICpbV5zl0vh7MfM8,30858 +networkx/generators/harary_graph.py,sha256=-9SU_IDZklbmiz4bwyCOPbQ4DEnXHwKStim2fHbPz6A,6111 +networkx/generators/internet_as_graphs.py,sha256=tMXL8U9YCAerfYYOV0wpEjPL16HIAJpEOzL8GdZAjxw,14148 +networkx/generators/intersection.py,sha256=WW3yE7TbjDtjggrLU8UysmiY_ip6tLy_hggUYWXVRy8,4028 +networkx/generators/interval_graph.py,sha256=LuXcLfLiRToVhmwz6nz1yWKivzqujFOB1MNZ0WRgnmg,2213 +networkx/generators/joint_degree_seq.py,sha256=p49dvZCcda6NAEsz4zvbiogIw4deqT51HpgIb8rU2zY,24717 +networkx/generators/lattice.py,sha256=8yw3GgxEH2erK2xBCW3oMT2hT2R5eCJC9OcBKz-R-vs,13380 +networkx/generators/line.py,sha256=R5Nz58zz_Fq8LbbLPEDRrYbCGLlbaRj2htmRZJbEv3M,17500 +networkx/generators/mycielski.py,sha256=0f_XLLjdpKsV_VS31PESe6LGg4yJI7nyX4DUWtyMU0U,3266 +networkx/generators/nonisomorphic_trees.py,sha256=I6T3QaH-Asw1B5g0_jnQJO9tOydc4qEbSmNIFUvq6ik,5232 +networkx/generators/random_clustered.py,sha256=4msP74PFIQvmWCxIs7F9WqYWvOTqDXXSf1wSuExEXF8,4159 +networkx/generators/random_graphs.py,sha256=SSYM8SZWgRglwMI6vxBWukDnQDJEk_9lCFKWMm_NkjA,44729 +networkx/generators/small.py,sha256=ADc-LfX8aEg_UYkPVfZk9YN4Na2KxpZxHsSvnVhj5D8,27217 +networkx/generators/social.py,sha256=caDTR23cNv1nlA5j3OZvs3d4HuwExxNbjfO9Ijqv7qY,22867 +networkx/generators/spectral_graph_forge.py,sha256=PPU6w9Z0_6iKQODsXSCE4ftVMYNrECBeH1Q_Y5Ea95U,4217 +networkx/generators/stochastic.py,sha256=K_4B3xmc6EDTQkc53eaOK1wHzF6qijAogWmEMDIjwjc,1897 +networkx/generators/sudoku.py,sha256=l-j2mo0KLarWRvMmjncZZfxJzoYn9ZGuz_3rlGZ5CRM,4264 +networkx/generators/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/generators/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_atlas.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_classic.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_cographs.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_community.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_degree_seq.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_directed.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_duplication.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_ego.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_expanders.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_geometric.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_harary_graph.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_internet_as_graphs.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_intersection.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_interval_graph.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_joint_degree_seq.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_lattice.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_line.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_mycielski.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_nonisomorphic_trees.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_random_clustered.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_random_graphs.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_small.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_spectral_graph_forge.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_stochastic.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_sudoku.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_time_series.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_trees.cpython-39.pyc,, +networkx/generators/tests/__pycache__/test_triads.cpython-39.pyc,, +networkx/generators/tests/test_atlas.py,sha256=nwXJL4O5jUqhTwqhkPxHY8s3KXHQTDEdsfbg4MsSzVQ,2530 +networkx/generators/tests/test_classic.py,sha256=RAELDkMqAVYaIo1nhBbjDbmAfzSrn6RBqSSd--mWqvs,22495 +networkx/generators/tests/test_cographs.py,sha256=DkiQzP69sjw3QtjWVX2XV0EXoOuEvR42dixPWwuawSE,460 +networkx/generators/tests/test_community.py,sha256=FGcDo3Ajb-yYc5kUkFbVfOJVMG-YppbAtjgBPcVzjLc,11311 +networkx/generators/tests/test_degree_seq.py,sha256=in6lg1pwcAg1N08MA3lQdr3lnm2-aoUy3BRm6Yj_OBQ,7093 +networkx/generators/tests/test_directed.py,sha256=00widU8dJGkdnU_b6-ZxL8KGtx-gSh4sRG7cwbMHvjQ,5258 +networkx/generators/tests/test_duplication.py,sha256=IIzcHEfHp0NHsH7GTXSb4E4kgXAlt83q4IMibfx2FBw,1915 +networkx/generators/tests/test_ego.py,sha256=8v1Qjmkli9wIhhUuqzgqCzysr0C1Z2C3oJMCUoNvgY4,1327 +networkx/generators/tests/test_expanders.py,sha256=O6O68S5VFWycg-Ml-gvZXjX_vjwO0MBfaBcNYIBP9Io,2896 +networkx/generators/tests/test_geometric.py,sha256=1wzo-eTOP937aOEk8lQGrsW7u2BDfRhyYQ1HpHQqslQ,12512 +networkx/generators/tests/test_harary_graph.py,sha256=U5GfsoekBwVwTGMvk33e2eFOzHEL4czRIWv57j3nt_g,4937 +networkx/generators/tests/test_internet_as_graphs.py,sha256=QmzkOnWg9bcSrv31UcaD6Cko55AV-GPLLY5Aqb_Dmvs,6795 +networkx/generators/tests/test_intersection.py,sha256=hcIit5fKfOn3VjMhz9KqovZK9tzxZfmC6ezvA7gZAvM,819 +networkx/generators/tests/test_interval_graph.py,sha256=-1yXDZDW-ygmNva9Bu-TsS_SYGLcW1KJplwZHFFYyWM,4278 +networkx/generators/tests/test_joint_degree_seq.py,sha256=8TXTZI3Um2gBXtP-4yhGKf9vCi78-NVmWZw9r9WG3F8,4270 +networkx/generators/tests/test_lattice.py,sha256=q4Ri-dH9mKhfq0PNX9xMeYRUiP0JlPBr7piSruZlFlg,9290 +networkx/generators/tests/test_line.py,sha256=vXncJuny2j5ulCJyT01Rt1tTwPib4XelS3dJDdJXjx0,10378 +networkx/generators/tests/test_mycielski.py,sha256=cAg2J6o_RrbwEdAc0vCuSF6zeS6w1KT4leTM0vkIeoA,822 +networkx/generators/tests/test_nonisomorphic_trees.py,sha256=Y_qWyj_qZU9O_DC4BHEVD9xnIEALCmfdmZAYJjTxUYE,2384 +networkx/generators/tests/test_random_clustered.py,sha256=LTfigb1swnYWS59OJoBmNcjFcUjsodnHVOwFxBXl7xg,979 +networkx/generators/tests/test_random_graphs.py,sha256=DKEPbvKiFzZQsuofuj_MphGX2KJ8Bvz6ofIttDGMANk,13121 +networkx/generators/tests/test_small.py,sha256=u_CTdGXfwnqvIYWjYv8VX_r_KB5Y1aCxXxQkxhx-WHs,6906 +networkx/generators/tests/test_spectral_graph_forge.py,sha256=x4jyTiQiydaUPWYaGsNFsIB47PAzSSwQYCNXGa2B4SU,1594 +networkx/generators/tests/test_stochastic.py,sha256=xdytPcz4ETnuqGtjMr0CI3zR4xWJqi91Zxbkly8Ijf8,2178 +networkx/generators/tests/test_sudoku.py,sha256=dgOmk-B7MxCVkbHdZzsLZppQ61FAArVy4McSVL8Afzo,1968 +networkx/generators/tests/test_time_series.py,sha256=74kHpcBfbed7zmd1Ofh2XoLIhIaEEFpEf51j1e2muMo,2229 +networkx/generators/tests/test_trees.py,sha256=hv8oNYZOcYcaARXvaMQZptCVBvk-huk-nKI5mH9sB-8,7634 +networkx/generators/tests/test_triads.py,sha256=mgpHFf0Z34CqtnXgkdf7gK1dC77ppYAqwviXsaU1HVs,332 +networkx/generators/time_series.py,sha256=Jz33n3mprkLrVbwLRLznQg_lDQvmIL2jWNoY4LCla80,2414 +networkx/generators/trees.py,sha256=xR9H01HkI1WR24KdSEv8inTz6rgbzjwt1vL0GfOImts,39067 +networkx/generators/triads.py,sha256=bXfoxFUH9CJaO7PMxjwiB9SvhPp4ZL3HxlUsL-67Yv4,2233 +networkx/lazy_imports.py,sha256=MDfQ4C99G30uYBmLJBGtGEcrOzHMUcxvOyZpN674DYw,5784 +networkx/linalg/__init__.py,sha256=7iyNZ_YYBnlsW8zSfhUgvEkywOrUWfpIuyS86ZOKlG8,568 +networkx/linalg/__pycache__/__init__.cpython-39.pyc,, +networkx/linalg/__pycache__/algebraicconnectivity.cpython-39.pyc,, +networkx/linalg/__pycache__/attrmatrix.cpython-39.pyc,, +networkx/linalg/__pycache__/bethehessianmatrix.cpython-39.pyc,, +networkx/linalg/__pycache__/graphmatrix.cpython-39.pyc,, +networkx/linalg/__pycache__/laplacianmatrix.cpython-39.pyc,, +networkx/linalg/__pycache__/modularitymatrix.cpython-39.pyc,, +networkx/linalg/__pycache__/spectrum.cpython-39.pyc,, +networkx/linalg/algebraicconnectivity.py,sha256=CScaTuN7V1BfEBPA8LSSe1FLV7uE-Am8Iaf9Q9Crgtw,21106 +networkx/linalg/attrmatrix.py,sha256=93xWJq-tIvayQTEUp7UaHLmAQQpSPYMHKF4KqPEfSUE,15504 +networkx/linalg/bethehessianmatrix.py,sha256=sNCKJoRe9jidTrqdX5I8mwl1t08GKzaL14R4ujk42Vk,2692 +networkx/linalg/graphmatrix.py,sha256=TRbhk2cHeJtKW15Gl-lGD7Ih1M3eHkSOC7yoerb7WBU,5513 +networkx/linalg/laplacianmatrix.py,sha256=WQJnmPEVsDTL8kUfQmqg-GJEPeHx3HO4PQLTO-XUp44,13330 +networkx/linalg/modularitymatrix.py,sha256=gtFN_MajMpZlvfsUqzBygLS6l5hZKi2_g1YqzPPedJw,4698 +networkx/linalg/spectrum.py,sha256=oxo9HSRM6jP2g8hKT1lk54Yinhr38AcanSK6RwHnDG0,4194 +networkx/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/linalg/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/linalg/tests/__pycache__/test_algebraic_connectivity.cpython-39.pyc,, +networkx/linalg/tests/__pycache__/test_attrmatrix.cpython-39.pyc,, +networkx/linalg/tests/__pycache__/test_bethehessian.cpython-39.pyc,, +networkx/linalg/tests/__pycache__/test_graphmatrix.cpython-39.pyc,, +networkx/linalg/tests/__pycache__/test_laplacian.cpython-39.pyc,, +networkx/linalg/tests/__pycache__/test_modularity.cpython-39.pyc,, +networkx/linalg/tests/__pycache__/test_spectrum.cpython-39.pyc,, +networkx/linalg/tests/test_algebraic_connectivity.py,sha256=Kj2ct6gQ71xXFP7usAbFLJxD7ZdtTzneHiFJQOoVCUQ,13737 +networkx/linalg/tests/test_attrmatrix.py,sha256=XD3YuPc5yXKWbhwVSI8YiV_wABWM-rLtwf1uwwWlnI0,2833 +networkx/linalg/tests/test_bethehessian.py,sha256=0r-Do902ywV10TyqTlIJ2Ls3iMqM6sSs2PZbod7kWBM,1327 +networkx/linalg/tests/test_graphmatrix.py,sha256=e5YSH9ih1VL64nnYgZFDvLyKbP3BFqpp0jY6t-8b2eY,8708 +networkx/linalg/tests/test_laplacian.py,sha256=K8p2upJTJLfNHfAf0B9ohPXBZ4k_2VMpSvIc-jXZ_rM,9934 +networkx/linalg/tests/test_modularity.py,sha256=mfKUvwc3bj6Rud1aG4oK3Eu1qg12o6cB8-pv5ZFicYY,3115 +networkx/linalg/tests/test_spectrum.py,sha256=agP2DsiEIvtkNUkT94mdPtJjwnobnjMTUOwjIQa4giA,2828 +networkx/readwrite/__init__.py,sha256=iHycAh1rjr4bCPQMNiHiqm8cP3iu-g1v_uKiGZtkuXY,562 +networkx/readwrite/__pycache__/__init__.cpython-39.pyc,, +networkx/readwrite/__pycache__/adjlist.cpython-39.pyc,, +networkx/readwrite/__pycache__/edgelist.cpython-39.pyc,, +networkx/readwrite/__pycache__/gexf.cpython-39.pyc,, +networkx/readwrite/__pycache__/gml.cpython-39.pyc,, +networkx/readwrite/__pycache__/graph6.cpython-39.pyc,, +networkx/readwrite/__pycache__/graphml.cpython-39.pyc,, +networkx/readwrite/__pycache__/leda.cpython-39.pyc,, +networkx/readwrite/__pycache__/multiline_adjlist.cpython-39.pyc,, +networkx/readwrite/__pycache__/p2g.cpython-39.pyc,, +networkx/readwrite/__pycache__/pajek.cpython-39.pyc,, +networkx/readwrite/__pycache__/sparse6.cpython-39.pyc,, +networkx/readwrite/__pycache__/text.cpython-39.pyc,, +networkx/readwrite/adjlist.py,sha256=P8W_dQu-1NQCC8FX-Zpyta_b0L-NuruRE4X-GKSWNSQ,8386 +networkx/readwrite/edgelist.py,sha256=qkS9reBZWrSviBp2ZkUMt5gEo4FbFcBfQXhO14OSI-E,14160 +networkx/readwrite/gexf.py,sha256=UZPVSIlQNH_t0KNTEjclEcJ96FdeLWLVQ84f7u1NoDM,39668 +networkx/readwrite/gml.py,sha256=NK9jDDQhShmf70wrH2P2hNV59KdFUbC866vhvRc_qV8,31104 +networkx/readwrite/graph6.py,sha256=P2jrsgiX75XGMCK4wyIGpXo8snOJ--ZZ6r6N5m1jusE,11355 +networkx/readwrite/graphml.py,sha256=YIxHdP3zXFixYb_rVL051-yyfN9rJwFs_D1tB39NH8w,39183 +networkx/readwrite/json_graph/__init__.py,sha256=31_5zVLXYEZkjOB-TKXZ5bi83JybPWgpCaRKOXIGoOA,676 +networkx/readwrite/json_graph/__pycache__/__init__.cpython-39.pyc,, +networkx/readwrite/json_graph/__pycache__/adjacency.cpython-39.pyc,, +networkx/readwrite/json_graph/__pycache__/cytoscape.cpython-39.pyc,, +networkx/readwrite/json_graph/__pycache__/node_link.cpython-39.pyc,, +networkx/readwrite/json_graph/__pycache__/tree.cpython-39.pyc,, +networkx/readwrite/json_graph/adjacency.py,sha256=QAUoN4LI5ehEjBv__T_hpmHA2n0eHuE3f3OwQv4kiqA,4692 +networkx/readwrite/json_graph/cytoscape.py,sha256=1UqpoAB-96c4sFGKqTjCziInavrHcFJRHFQo4_iits4,5234 +networkx/readwrite/json_graph/node_link.py,sha256=8ujpdgQapwClUautmCOF3Up3JoFiLgZRHu6PUxF1B7Q,7450 +networkx/readwrite/json_graph/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/readwrite/json_graph/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/readwrite/json_graph/tests/__pycache__/test_adjacency.cpython-39.pyc,, +networkx/readwrite/json_graph/tests/__pycache__/test_cytoscape.cpython-39.pyc,, +networkx/readwrite/json_graph/tests/__pycache__/test_node_link.cpython-39.pyc,, +networkx/readwrite/json_graph/tests/__pycache__/test_tree.cpython-39.pyc,, +networkx/readwrite/json_graph/tests/test_adjacency.py,sha256=jueQE3Z_W5BZuCjr0hEsOWSfoQ2fP51p0o0m7IcXUuE,2456 +networkx/readwrite/json_graph/tests/test_cytoscape.py,sha256=vFoDzcSRI9THlmp4Fu2HHhIF9AUmECWs5mftVWjaWWs,2044 +networkx/readwrite/json_graph/tests/test_node_link.py,sha256=bDe2Vv1M4h0IDbKjS482p8ZE7SZtBfHDgZ1OEPibwoo,4536 +networkx/readwrite/json_graph/tests/test_tree.py,sha256=zBXv3_db2XGxFs3XQ35btNf_ku52aLXXiHZmmX4ixAs,1352 +networkx/readwrite/json_graph/tree.py,sha256=ETjeYnUMyqZ0PbvSR97ar9rJnH8ncLI_Qd16qGFb-jw,3827 +networkx/readwrite/leda.py,sha256=No8DKw26vB2fzaDQNmfOEOuiUS9XGdvBXSKrctJhipg,2749 +networkx/readwrite/multiline_adjlist.py,sha256=Zo7K6gE-FpOBao4KiVaA-yRk4mbQzCJzIl4uUgrWYfM,11255 +networkx/readwrite/p2g.py,sha256=j8vNdr8KKD3o0d1zvfYUJnhe8J9vWxAHLzw3mW85apE,3043 +networkx/readwrite/pajek.py,sha256=aFKB04KvFuCqBawdjvR7U4N8BzPDDatqkf97s8zY_eI,8690 +networkx/readwrite/sparse6.py,sha256=sPL2NBYvB9blDaSJEEdYocHxaU7SmV1OVhYf8a_-LsI,10269 +networkx/readwrite/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/readwrite/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_adjlist.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_edgelist.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_gexf.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_gml.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_graph6.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_graphml.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_leda.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_p2g.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_pajek.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_sparse6.cpython-39.pyc,, +networkx/readwrite/tests/__pycache__/test_text.cpython-39.pyc,, +networkx/readwrite/tests/test_adjlist.py,sha256=dLEv3txnBrHYxajOYAQhA8CA7axiuPw1ECbaHL5p338,9922 +networkx/readwrite/tests/test_edgelist.py,sha256=atBg6Qjhk8boXs3gUZk4gmg-6GOT5rCosEf30sqOZO4,9969 +networkx/readwrite/tests/test_gexf.py,sha256=Tbqueeh0XRQ8vtmGwXcyy9K3tWPlnLu6Gop0Hy4cZcc,19405 +networkx/readwrite/tests/test_gml.py,sha256=GF8rfOj2M3tMtdQ65DMsXypXSFMeWzKEI1qYV-jd5xA,21334 +networkx/readwrite/tests/test_graph6.py,sha256=IjBpfTr-czBLHb8UT_JzvOTBROpnOf5TKKkfCnEeQT8,6069 +networkx/readwrite/tests/test_graphml.py,sha256=u4u-udRXPCXFUZ9oB0_X4UUx3MVULQjx9tkXUdwebhI,67149 +networkx/readwrite/tests/test_leda.py,sha256=_5F4nLLQ1oAZQMZtTQoFncZL0Oc-IsztFBglEdQeH3k,1392 +networkx/readwrite/tests/test_p2g.py,sha256=drsdod5amV9TGCk-qE2RwsvAop78IKEI1WguVFfd9rs,1320 +networkx/readwrite/tests/test_pajek.py,sha256=XTsnaCaYjroysCHlTsYwMGGrDR0B1MRwWkA-WXbAXTg,4703 +networkx/readwrite/tests/test_sparse6.py,sha256=fLpTG0YgcptNOpUipcCcVlni5i8IyC21kkk3ZeD0XhM,5470 +networkx/readwrite/tests/test_text.py,sha256=w17FdFQ4vK3J8d2UKPZUEtIo5udp6UyilPXyIr8JfpE,56562 +networkx/readwrite/text.py,sha256=4rWNkDgtUSmqGUAIXPkOKU7u9PFQOlp5eBqn6yt9QKk,32132 +networkx/relabel.py,sha256=m8R1KovP9IBtzTE2_6NYgMRxB5AFDwZmUCOFYkDoNXI,10279 +networkx/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/tests/__pycache__/test_all_random_functions.cpython-39.pyc,, +networkx/tests/__pycache__/test_convert.cpython-39.pyc,, +networkx/tests/__pycache__/test_convert_numpy.cpython-39.pyc,, +networkx/tests/__pycache__/test_convert_pandas.cpython-39.pyc,, +networkx/tests/__pycache__/test_convert_scipy.cpython-39.pyc,, +networkx/tests/__pycache__/test_exceptions.cpython-39.pyc,, +networkx/tests/__pycache__/test_import.cpython-39.pyc,, +networkx/tests/__pycache__/test_lazy_imports.cpython-39.pyc,, +networkx/tests/__pycache__/test_relabel.cpython-39.pyc,, +networkx/tests/test_all_random_functions.py,sha256=tbFGmaqLrF8lEp0Hn8sOuPzD5rzIpOVOeeBoBk3_W6g,8653 +networkx/tests/test_convert.py,sha256=SoIVrqJFF9Gu9Jff_apfbpqg8QhkfC6QW4qzoSM-ukM,12731 +networkx/tests/test_convert_numpy.py,sha256=R4y5ud0hVZFSGrFjUHD6Anu_aaasy2O_Eke4FaOhPqU,14951 +networkx/tests/test_convert_pandas.py,sha256=cZJEdV0jP8afRZMqJ8-aL9Ma5NdXSWMuj1hVbjGMR2g,12257 +networkx/tests/test_convert_scipy.py,sha256=C2cY_8dgBksO0uttkhyCnjACXtC6KHjxqHUk47P5wH8,10436 +networkx/tests/test_exceptions.py,sha256=XYkpPzqMepSw3MPRUJN5LcFsUsy3YT_fiRDhm0OeAeQ,927 +networkx/tests/test_import.py,sha256=Gm4ujfH9JkQtDrSjOlwXXXUuubI057wskKLCkF6Z92k,220 +networkx/tests/test_lazy_imports.py,sha256=nKykNQPt_ZV8JxCH_EkwwcPNayAgZGQVf89e8I7uIlI,2680 +networkx/tests/test_relabel.py,sha256=dffbjiW_VUAQe7iD8knFS_KepUITt0F6xuwf7daWwKw,14517 +networkx/utils/__init__.py,sha256=T8IdHaWU2MOGbU-1a7JZcAn5YFtO9iDQVt6ky-BRkJg,227 +networkx/utils/__pycache__/__init__.cpython-39.pyc,, +networkx/utils/__pycache__/backends.cpython-39.pyc,, +networkx/utils/__pycache__/decorators.cpython-39.pyc,, +networkx/utils/__pycache__/heaps.cpython-39.pyc,, +networkx/utils/__pycache__/mapped_queue.cpython-39.pyc,, +networkx/utils/__pycache__/misc.cpython-39.pyc,, +networkx/utils/__pycache__/random_sequence.cpython-39.pyc,, +networkx/utils/__pycache__/rcm.cpython-39.pyc,, +networkx/utils/__pycache__/union_find.cpython-39.pyc,, +networkx/utils/backends.py,sha256=5z_pQQrT3kGUCtihbDDqy3TvR9t82KxyMGAaAdsgKLs,40939 +networkx/utils/decorators.py,sha256=Z3U3-pXWD1OKa3cciHG_LSwwylVrpSXdP2rPALq_gZc,46464 +networkx/utils/heaps.py,sha256=HUZuETHfELEqiXdMBPmD9fA2KiACVhp6iEahcrjFxYM,10391 +networkx/utils/mapped_queue.py,sha256=ywJN0Z32EAQ1dezF8ORXP_ca0n16sxQlcIrkQQn5i7I,10185 +networkx/utils/misc.py,sha256=pyN1TGuUFHFfPLubIL-wP-zl_Ybm_U_UWxVw37_tI3g,14351 +networkx/utils/random_sequence.py,sha256=KzKh0BRMri0MBZlzxHNMl3qRTy2DnBexW3eDzmxKab4,4237 +networkx/utils/rcm.py,sha256=MeOhFkv91ALieKJtGHqkhxgO7KJBz53mB8tRcYCX3xk,4623 +networkx/utils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/utils/tests/__pycache__/__init__.cpython-39.pyc,, +networkx/utils/tests/__pycache__/test__init.cpython-39.pyc,, +networkx/utils/tests/__pycache__/test_decorators.cpython-39.pyc,, +networkx/utils/tests/__pycache__/test_heaps.cpython-39.pyc,, +networkx/utils/tests/__pycache__/test_mapped_queue.cpython-39.pyc,, +networkx/utils/tests/__pycache__/test_misc.cpython-39.pyc,, +networkx/utils/tests/__pycache__/test_random_sequence.cpython-39.pyc,, +networkx/utils/tests/__pycache__/test_rcm.cpython-39.pyc,, +networkx/utils/tests/__pycache__/test_unionfind.cpython-39.pyc,, +networkx/utils/tests/test__init.py,sha256=QE0i-lNE4pG2eYjB2mZ0uw7jPD-7TdL7Y9p73JoWQmo,363 +networkx/utils/tests/test_decorators.py,sha256=AfxQ_C4BcKG8q9wepyglzIebzD_pPpGRPR4dovl6JR4,13334 +networkx/utils/tests/test_heaps.py,sha256=qCuWMzpcMH1Gwu014CAams78o151QD5YL0mB1fz16Yw,3711 +networkx/utils/tests/test_mapped_queue.py,sha256=l1Nguzz68Fv91FnAT7y7B0GXSoje9uoWiObHo7TliGM,7354 +networkx/utils/tests/test_misc.py,sha256=3oa6D5fnxm9VFODhEwM540hU4IBzEucOoD6DiGvP5gc,8218 +networkx/utils/tests/test_random_sequence.py,sha256=Ou-IeCFybibZuycoin5gUQzzC-iy5yanZFmrqvdGt6Q,925 +networkx/utils/tests/test_rcm.py,sha256=UvUAkgmQMGk_Nn94TJyQsle4A5SLQFqMQWld1tiQ2lk,1421 +networkx/utils/tests/test_unionfind.py,sha256=j-DF5XyeJzq1hoeAgN5Nye2Au7EPD040t8oS4Aw2IwU,1579 +networkx/utils/union_find.py,sha256=NxKlBlyS71A1Wlnt28L-wyZoI9ExZvJth_0e2XSVris,3338 diff --git a/MLPY/Lib/site-packages/networkx-3.2.1.dist-info/WHEEL b/MLPY/Lib/site-packages/networkx-3.2.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..7e688737d490be3643d705bc16b5a77f7bd567b7 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx-3.2.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/MLPY/Lib/site-packages/networkx-3.2.1.dist-info/entry_points.txt b/MLPY/Lib/site-packages/networkx-3.2.1.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..2170e9f4285422f4f95b05fa682a9a479c19bf24 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx-3.2.1.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[networkx.backends] +nx-loopback = networkx.classes.tests.dispatch_interface:dispatcher diff --git a/MLPY/Lib/site-packages/networkx-3.2.1.dist-info/top_level.txt b/MLPY/Lib/site-packages/networkx-3.2.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..4d07dfe2f85d6849d7f416dcce756b2501ba847e --- /dev/null +++ b/MLPY/Lib/site-packages/networkx-3.2.1.dist-info/top_level.txt @@ -0,0 +1 @@ +networkx diff --git a/MLPY/Lib/site-packages/networkx/__init__.py b/MLPY/Lib/site-packages/networkx/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eda9418b9866eecbb8304d5a665facca6a7545ee --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/__init__.py @@ -0,0 +1,49 @@ +""" +NetworkX +======== + +NetworkX is a Python package for the creation, manipulation, and study of the +structure, dynamics, and functions of complex networks. + +See https://networkx.org for complete documentation. +""" + +__version__ = "3.2.1" + + +# These are imported in order as listed +from networkx.lazy_imports import _lazy_import + +from networkx.exception import * + +from networkx import utils +from networkx.utils.backends import _dispatch + +from networkx import classes +from networkx.classes import filters +from networkx.classes import * + +from networkx import convert +from networkx.convert import * + +from networkx import convert_matrix +from networkx.convert_matrix import * + +from networkx import relabel +from networkx.relabel import * + +from networkx import generators +from networkx.generators import * + +from networkx import readwrite +from networkx.readwrite import * + +# Need to test with SciPy, when available +from networkx import algorithms +from networkx.algorithms import * + +from networkx import linalg +from networkx.linalg import * + +from networkx import drawing +from networkx.drawing import * diff --git a/MLPY/Lib/site-packages/networkx/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3a49473c9f6eb53003d294fb7dfafa5b829fa11 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/__pycache__/conftest.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/__pycache__/conftest.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f4b240831b312249d86358ff6d79c8c280e12ed Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/__pycache__/conftest.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/__pycache__/convert.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/__pycache__/convert.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8445eae55a520ae9d9ec5ee9cf28d7fd866cbcf Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/__pycache__/convert.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/__pycache__/convert_matrix.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/__pycache__/convert_matrix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4248ffedb92b4941eb73e1adc1b1a7f8470b4058 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/__pycache__/convert_matrix.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/__pycache__/exception.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/__pycache__/exception.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1449142d97183a89f32ee13aa585e48d01925dc Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/__pycache__/exception.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/__pycache__/lazy_imports.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/__pycache__/lazy_imports.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8776233db226bdd382cd85dc432170c5ed0db36c Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/__pycache__/lazy_imports.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/__pycache__/relabel.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/__pycache__/relabel.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..773b6b66659fa0f3816ab952efa0f63e87d9b3dc Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/__pycache__/relabel.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..db6d6cebb1900a636fc34822102bf0271f703952 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/__init__.py @@ -0,0 +1,132 @@ +from networkx.algorithms.assortativity import * +from networkx.algorithms.asteroidal import * +from networkx.algorithms.boundary import * +from networkx.algorithms.bridges import * +from networkx.algorithms.chains import * +from networkx.algorithms.centrality import * +from networkx.algorithms.chordal import * +from networkx.algorithms.cluster import * +from networkx.algorithms.clique import * +from networkx.algorithms.communicability_alg import * +from networkx.algorithms.components import * +from networkx.algorithms.coloring import * +from networkx.algorithms.core import * +from networkx.algorithms.covering import * +from networkx.algorithms.cycles import * +from networkx.algorithms.cuts import * +from networkx.algorithms.d_separation import * +from networkx.algorithms.dag import * +from networkx.algorithms.distance_measures import * +from networkx.algorithms.distance_regular import * +from networkx.algorithms.dominance import * +from networkx.algorithms.dominating import * +from networkx.algorithms.efficiency_measures import * +from networkx.algorithms.euler import * +from networkx.algorithms.graphical import * +from networkx.algorithms.hierarchy import * +from networkx.algorithms.hybrid import * +from networkx.algorithms.link_analysis import * +from networkx.algorithms.link_prediction import * +from networkx.algorithms.lowest_common_ancestors import * +from networkx.algorithms.isolate import * +from networkx.algorithms.matching import * +from networkx.algorithms.minors import * +from networkx.algorithms.mis import * +from networkx.algorithms.moral import * +from networkx.algorithms.non_randomness import * +from networkx.algorithms.operators import * +from networkx.algorithms.planarity import * +from networkx.algorithms.planar_drawing import * +from networkx.algorithms.reciprocity import * +from networkx.algorithms.regular import * +from networkx.algorithms.richclub import * +from networkx.algorithms.shortest_paths import * +from networkx.algorithms.similarity import * +from networkx.algorithms.graph_hashing import * +from networkx.algorithms.simple_paths import * +from networkx.algorithms.smallworld import * +from networkx.algorithms.smetric import * +from networkx.algorithms.structuralholes import * +from networkx.algorithms.sparsifiers import * +from networkx.algorithms.summarization import * +from networkx.algorithms.swap import * +from networkx.algorithms.time_dependent import * +from networkx.algorithms.traversal import * +from networkx.algorithms.triads import * +from networkx.algorithms.vitality import * +from networkx.algorithms.voronoi import * +from networkx.algorithms.walks import * +from networkx.algorithms.wiener import * +from networkx.algorithms.polynomials import * + +# Make certain subpackages available to the user as direct imports from +# the `networkx` namespace. +from networkx.algorithms import approximation +from networkx.algorithms import assortativity +from networkx.algorithms import bipartite +from networkx.algorithms import node_classification +from networkx.algorithms import centrality +from networkx.algorithms import chordal +from networkx.algorithms import cluster +from networkx.algorithms import clique +from networkx.algorithms import components +from networkx.algorithms import connectivity +from networkx.algorithms import community +from networkx.algorithms import coloring +from networkx.algorithms import flow +from networkx.algorithms import isomorphism +from networkx.algorithms import link_analysis +from networkx.algorithms import lowest_common_ancestors +from networkx.algorithms import operators +from networkx.algorithms import shortest_paths +from networkx.algorithms import tournament +from networkx.algorithms import traversal +from networkx.algorithms import tree + +# Make certain functions from some of the previous subpackages available +# to the user as direct imports from the `networkx` namespace. +from networkx.algorithms.bipartite import complete_bipartite_graph +from networkx.algorithms.bipartite import is_bipartite +from networkx.algorithms.bipartite import projected_graph +from networkx.algorithms.connectivity import all_pairs_node_connectivity +from networkx.algorithms.connectivity import all_node_cuts +from networkx.algorithms.connectivity import average_node_connectivity +from networkx.algorithms.connectivity import edge_connectivity +from networkx.algorithms.connectivity import edge_disjoint_paths +from networkx.algorithms.connectivity import k_components +from networkx.algorithms.connectivity import k_edge_components +from networkx.algorithms.connectivity import k_edge_subgraphs +from networkx.algorithms.connectivity import k_edge_augmentation +from networkx.algorithms.connectivity import is_k_edge_connected +from networkx.algorithms.connectivity import minimum_edge_cut +from networkx.algorithms.connectivity import minimum_node_cut +from networkx.algorithms.connectivity import node_connectivity +from networkx.algorithms.connectivity import node_disjoint_paths +from networkx.algorithms.connectivity import stoer_wagner +from networkx.algorithms.flow import capacity_scaling +from networkx.algorithms.flow import cost_of_flow +from networkx.algorithms.flow import gomory_hu_tree +from networkx.algorithms.flow import max_flow_min_cost +from networkx.algorithms.flow import maximum_flow +from networkx.algorithms.flow import maximum_flow_value +from networkx.algorithms.flow import min_cost_flow +from networkx.algorithms.flow import min_cost_flow_cost +from networkx.algorithms.flow import minimum_cut +from networkx.algorithms.flow import minimum_cut_value +from networkx.algorithms.flow import network_simplex +from networkx.algorithms.isomorphism import could_be_isomorphic +from networkx.algorithms.isomorphism import fast_could_be_isomorphic +from networkx.algorithms.isomorphism import faster_could_be_isomorphic +from networkx.algorithms.isomorphism import is_isomorphic +from networkx.algorithms.isomorphism.vf2pp import * +from networkx.algorithms.tree.branchings import maximum_branching +from networkx.algorithms.tree.branchings import maximum_spanning_arborescence +from networkx.algorithms.tree.branchings import minimum_branching +from networkx.algorithms.tree.branchings import minimum_spanning_arborescence +from networkx.algorithms.tree.branchings import ArborescenceIterator +from networkx.algorithms.tree.coding import * +from networkx.algorithms.tree.decomposition import * +from networkx.algorithms.tree.mst import * +from networkx.algorithms.tree.operations import * +from networkx.algorithms.tree.recognition import * +from networkx.algorithms.tournament import is_tournament diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ba5fdf14e70873b1efa46e2758d074809ca2f1d Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/asteroidal.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/asteroidal.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bd3fb37824a35c5e51901350fb5effea019b8ce Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/asteroidal.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/boundary.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/boundary.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d332e3863bdf29e29b7bdd4190f815cf5b66a47 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/boundary.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/bridges.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/bridges.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61d17d794b97df63ebd58c396c96463a321f0ec5 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/bridges.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/chains.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/chains.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2935deb1eb2fe3c6e48f0b7eb2b7fe51d6fa580c Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/chains.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/chordal.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/chordal.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab496a130fc6ee44deb4d885df84fc64213813a7 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/chordal.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/clique.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/clique.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30fd3855ab493387a138b5b4322b1ece424c6803 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/clique.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/cluster.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/cluster.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81922773609f585b6ba01f943982dd1fbc053e5a Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/cluster.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/communicability_alg.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/communicability_alg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..537745d126e64a732fb1b5c123e78f37a9eba32f Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/communicability_alg.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/core.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/core.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbe532dd709392ed7ca29b8517ae9284b42a53a1 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/core.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/covering.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/covering.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b09fc14849db94a53029e425e974902744d1a704 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/covering.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/cuts.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/cuts.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae3cb8477b42a955845e37fc5abba3bc864c7e61 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/cuts.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/cycles.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/cycles.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f65418a8d9ce3dd8e4380e16114057ab61c2a031 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/cycles.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/d_separation.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/d_separation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6592fcd4d983edd41c5828c072789afac7283776 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/d_separation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/dag.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/dag.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8013631614daaa7499d6cd2628a9e5e9d3bfcb5 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/dag.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/distance_measures.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/distance_measures.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e97a7eadceb60fc82f390418143a1415bd54a66a Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/distance_measures.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/distance_regular.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/distance_regular.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21e704982dbdffc829d884dad3f303edecdbba19 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/distance_regular.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/dominance.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/dominance.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df15a2292e0784e79013bb8f13b530a4fa5a819f Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/dominance.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/dominating.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/dominating.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14b6bbfdb7f1349583041e933cef4715e971cb83 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/dominating.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/efficiency_measures.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/efficiency_measures.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..689f93d78a6b9c22b39861029228c3268b51e38b Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/efficiency_measures.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/euler.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/euler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d348900490eef4df41ab2a8820d45bb1b0ae427 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/euler.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/graph_hashing.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/graph_hashing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8afd9dcc94959ff7be8626ff2e4c6a03c193dd9 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/graph_hashing.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/graphical.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/graphical.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fc163febbcc8af811e1d8fda48fc0bb5e6a9ad5 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/graphical.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/hierarchy.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/hierarchy.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ad5b9fde4fd966e6305fad155a4f82b42566577 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/hierarchy.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/hybrid.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/hybrid.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4322bdb8f8432f2b389525889a1820a69ca9276f Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/hybrid.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/isolate.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/isolate.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0e7c02cb77ee557a03e52dfa4893eaba2eeb5d5 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/isolate.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/link_prediction.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/link_prediction.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3483c262ecdb53f981e5208b6cb1113ea709fd4 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/link_prediction.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/lowest_common_ancestors.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/lowest_common_ancestors.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc7a304c44b75a70181acac07f7f251a033cdfdb Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/lowest_common_ancestors.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/matching.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/matching.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0381376191efdde454a644358ebe5fb58adfca89 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/matching.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/mis.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/mis.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab2f166dda45e60418440ef78b2cf20553a1b771 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/mis.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/moral.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/moral.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bacfae9996eec3162e3b69239885c47d5cfce0c Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/moral.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/node_classification.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/node_classification.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c864361a00d4098f93f443630ac7db5f017fcd1b Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/node_classification.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/non_randomness.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/non_randomness.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76339f14b0c2e52cbb4d4a881bd291625aca420c Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/non_randomness.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/planar_drawing.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/planar_drawing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d30a11350ef31cdca28136dfd3486b8936c0743 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/planar_drawing.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/planarity.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/planarity.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34cfaf0a443671461e299a63fb7601ff121d5f4e Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/planarity.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/polynomials.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/polynomials.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f21994fce30beac7eccf5d8b4d59dce47417acfc Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/polynomials.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/reciprocity.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/reciprocity.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..889406c1f5ec6ede4abf49ccca397d1e31c86901 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/reciprocity.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/regular.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/regular.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d71d848939a81d4af0e3700251e6989e43beb458 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/regular.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/richclub.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/richclub.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df08aaf60e4f3db3fc7f0002e958acf3d6af6ab8 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/richclub.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/similarity.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/similarity.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0169a555cd0b7825abbeb2b9d70536a5c72a1290 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/similarity.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/simple_paths.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/simple_paths.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7d3637d4467aaae0a8f33c968d4b8f66fb656e6 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/simple_paths.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/smallworld.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/smallworld.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af7284bf1d149b12409eb732af30847a75b69823 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/smallworld.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/smetric.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/smetric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ad4ceddcf68a286657e47e59ae1f4a665b9250e Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/smetric.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/sparsifiers.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/sparsifiers.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad383d6036b289fea836169b296844a92b6a0800 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/sparsifiers.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/structuralholes.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/structuralholes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..660990b90960d2391eaf1f51d37c49d14bde9384 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/structuralholes.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/summarization.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/summarization.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19ed8fa7ed639b5c3e4f2507ed68b291ed3bc753 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/summarization.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/swap.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/swap.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b43744f488fc22356344997955c2d8d60b4d196c Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/swap.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/threshold.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/threshold.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5624618b5f017dc9669d1ad06315d4e8c15faeb0 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/threshold.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/time_dependent.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/time_dependent.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..baa1a6c076be05bf0374b325a0f4ec8d07cb6548 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/time_dependent.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/tournament.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/tournament.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..996a45387d46c14e6929b12b424a594e2ce84cf0 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/tournament.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/triads.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/triads.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e0d4fc5fd943cf46ba9bee6f39c2b7d304752a2 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/triads.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/vitality.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/vitality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9747bebde8bd788da170cbc1b5fcd86ddce975d4 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/vitality.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/voronoi.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/voronoi.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30a892abda768e00991151dc7525c4e3a8ca179f Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/voronoi.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/walks.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/walks.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ac65c14f9dc3d8bc6ff74b6709a5a2e720746b8 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/walks.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/wiener.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/wiener.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97be920b9b86ba8ffaa1b5e677e146e3140bf587 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/__pycache__/wiener.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e39dc00aa250b05cbd8f0ce9b38cf32ecc752946 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__init__.py @@ -0,0 +1,24 @@ +"""Approximations of graph properties and Heuristic methods for optimization. + +The functions in this class are not imported into the top-level ``networkx`` +namespace so the easiest way to use them is with:: + + >>> from networkx.algorithms import approximation + +Another option is to import the specific function with +``from networkx.algorithms.approximation import function_name``. + +""" +from networkx.algorithms.approximation.clustering_coefficient import * +from networkx.algorithms.approximation.clique import * +from networkx.algorithms.approximation.connectivity import * +from networkx.algorithms.approximation.distance_measures import * +from networkx.algorithms.approximation.dominating_set import * +from networkx.algorithms.approximation.kcomponents import * +from networkx.algorithms.approximation.matching import * +from networkx.algorithms.approximation.ramsey import * +from networkx.algorithms.approximation.steinertree import * +from networkx.algorithms.approximation.traveling_salesman import * +from networkx.algorithms.approximation.treewidth import * +from networkx.algorithms.approximation.vertex_cover import * +from networkx.algorithms.approximation.maxcut import * diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44d967a1bdca2ca4d2ba0150f36f14a0589377cd Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/clique.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/clique.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52ee609986812777a3e9c8de7fb662e0444eb227 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/clique.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/clustering_coefficient.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/clustering_coefficient.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7b3ba5f6e482aa907d726ec95b5f87ec78b40b6 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/clustering_coefficient.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/connectivity.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/connectivity.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..648bd9b5a8f1f4906b7b13344042b23f8b60c8bb Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/connectivity.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/distance_measures.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/distance_measures.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..791412667a6b2720c4ca5812ab53068a2c53aa20 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/distance_measures.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/dominating_set.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/dominating_set.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9a4d1b8831f0a3e88de64406ff0b79de7f57554 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/dominating_set.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/kcomponents.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/kcomponents.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2a027fe8ef3cfca4faafc54ee544aedf19002fb Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/kcomponents.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/matching.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/matching.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20324dfc3b93571b7f1c4e808b54061b84468cb9 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/matching.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/maxcut.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/maxcut.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f827350d510813a0e4e7964fbadc1ab837cf7c64 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/maxcut.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/ramsey.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/ramsey.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e36e256926f54488ab5726d3466e0967eaa490d3 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/ramsey.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/steinertree.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/steinertree.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8ea5fbe3c1b008987da0e5b68d604ef99200e86 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/steinertree.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/traveling_salesman.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/traveling_salesman.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4acea6c4bce3996706a63e06c0af6dba349cb931 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/traveling_salesman.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/treewidth.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/treewidth.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fc88f94fc6ae6a653cb7047ea4de62e12f10f69 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/treewidth.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/vertex_cover.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/vertex_cover.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8803ec9dbb4e5d0b28a8403e37604f027b9382f4 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/__pycache__/vertex_cover.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/clique.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/clique.py new file mode 100644 index 0000000000000000000000000000000000000000..4a3d8beba6103172988f49aa1d7a91bf670f7201 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/clique.py @@ -0,0 +1,258 @@ +"""Functions for computing large cliques and maximum independent sets.""" +import networkx as nx +from networkx.algorithms.approximation import ramsey +from networkx.utils import not_implemented_for + +__all__ = [ + "clique_removal", + "max_clique", + "large_clique_size", + "maximum_independent_set", +] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def maximum_independent_set(G): + """Returns an approximate maximum independent set. + + Independent set or stable set is a set of vertices in a graph, no two of + which are adjacent. That is, it is a set I of vertices such that for every + two vertices in I, there is no edge connecting the two. Equivalently, each + edge in the graph has at most one endpoint in I. The size of an independent + set is the number of vertices it contains [1]_. + + A maximum independent set is a largest independent set for a given graph G + and its size is denoted $\\alpha(G)$. The problem of finding such a set is called + the maximum independent set problem and is an NP-hard optimization problem. + As such, it is unlikely that there exists an efficient algorithm for finding + a maximum independent set of a graph. + + The Independent Set algorithm is based on [2]_. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + iset : Set + The apx-maximum independent set + + Examples + -------- + >>> G = nx.path_graph(10) + >>> nx.approximation.maximum_independent_set(G) + {0, 2, 4, 6, 9} + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + Notes + ----- + Finds the $O(|V|/(log|V|)^2)$ apx of independent set in the worst case. + + References + ---------- + .. [1] `Wikipedia: Independent set + `_ + .. [2] Boppana, R., & Halldórsson, M. M. (1992). + Approximating maximum independent sets by excluding subgraphs. + BIT Numerical Mathematics, 32(2), 180–196. Springer. + """ + iset, _ = clique_removal(G) + return iset + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def max_clique(G): + r"""Find the Maximum Clique + + Finds the $O(|V|/(log|V|)^2)$ apx of maximum clique/independent set + in the worst case. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + clique : set + The apx-maximum clique of the graph + + Examples + -------- + >>> G = nx.path_graph(10) + >>> nx.approximation.max_clique(G) + {8, 9} + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + Notes + ----- + A clique in an undirected graph G = (V, E) is a subset of the vertex set + `C \subseteq V` such that for every two vertices in C there exists an edge + connecting the two. This is equivalent to saying that the subgraph + induced by C is complete (in some cases, the term clique may also refer + to the subgraph). + + A maximum clique is a clique of the largest possible size in a given graph. + The clique number `\omega(G)` of a graph G is the number of + vertices in a maximum clique in G. The intersection number of + G is the smallest number of cliques that together cover all edges of G. + + https://en.wikipedia.org/wiki/Maximum_clique + + References + ---------- + .. [1] Boppana, R., & Halldórsson, M. M. (1992). + Approximating maximum independent sets by excluding subgraphs. + BIT Numerical Mathematics, 32(2), 180–196. Springer. + doi:10.1007/BF01994876 + """ + # finding the maximum clique in a graph is equivalent to finding + # the independent set in the complementary graph + cgraph = nx.complement(G) + iset, _ = clique_removal(cgraph) + return iset + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def clique_removal(G): + r"""Repeatedly remove cliques from the graph. + + Results in a $O(|V|/(\log |V|)^2)$ approximation of maximum clique + and independent set. Returns the largest independent set found, along + with found maximal cliques. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + max_ind_cliques : (set, list) tuple + 2-tuple of Maximal Independent Set and list of maximal cliques (sets). + + Examples + -------- + >>> G = nx.path_graph(10) + >>> nx.approximation.clique_removal(G) + ({0, 2, 4, 6, 9}, [{0, 1}, {2, 3}, {4, 5}, {6, 7}, {8, 9}]) + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + References + ---------- + .. [1] Boppana, R., & Halldórsson, M. M. (1992). + Approximating maximum independent sets by excluding subgraphs. + BIT Numerical Mathematics, 32(2), 180–196. Springer. + """ + graph = G.copy() + c_i, i_i = ramsey.ramsey_R2(graph) + cliques = [c_i] + isets = [i_i] + while graph: + graph.remove_nodes_from(c_i) + c_i, i_i = ramsey.ramsey_R2(graph) + if c_i: + cliques.append(c_i) + if i_i: + isets.append(i_i) + # Determine the largest independent set as measured by cardinality. + maxiset = max(isets, key=len) + return maxiset, cliques + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def large_clique_size(G): + """Find the size of a large clique in a graph. + + A *clique* is a subset of nodes in which each pair of nodes is + adjacent. This function is a heuristic for finding the size of a + large clique in the graph. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + k: integer + The size of a large clique in the graph. + + Examples + -------- + >>> G = nx.path_graph(10) + >>> nx.approximation.large_clique_size(G) + 2 + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + + Notes + ----- + This implementation is from [1]_. Its worst case time complexity is + :math:`O(n d^2)`, where *n* is the number of nodes in the graph and + *d* is the maximum degree. + + This function is a heuristic, which means it may work well in + practice, but there is no rigorous mathematical guarantee on the + ratio between the returned number and the actual largest clique size + in the graph. + + References + ---------- + .. [1] Pattabiraman, Bharath, et al. + "Fast Algorithms for the Maximum Clique Problem on Massive Graphs + with Applications to Overlapping Community Detection." + *Internet Mathematics* 11.4-5 (2015): 421--448. + + + See also + -------- + + :func:`networkx.algorithms.approximation.clique.max_clique` + A function that returns an approximate maximum clique with a + guarantee on the approximation ratio. + + :mod:`networkx.algorithms.clique` + Functions for finding the exact maximum clique in a graph. + + """ + degrees = G.degree + + def _clique_heuristic(G, U, size, best_size): + if not U: + return max(best_size, size) + u = max(U, key=degrees) + U.remove(u) + N_prime = {v for v in G[u] if degrees[v] >= best_size} + return _clique_heuristic(G, U & N_prime, size + 1, best_size) + + best_size = 0 + nodes = (u for u in G if degrees[u] >= best_size) + for u in nodes: + neighbors = {v for v in G[u] if degrees[v] >= best_size} + best_size = _clique_heuristic(G, neighbors, 1, best_size) + return best_size diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/clustering_coefficient.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/clustering_coefficient.py new file mode 100644 index 0000000000000000000000000000000000000000..e15ac68460bb5704fc3bf3726f5a6d6405efa320 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/clustering_coefficient.py @@ -0,0 +1,66 @@ +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = ["average_clustering"] + + +@not_implemented_for("directed") +@py_random_state(2) +@nx._dispatch(name="approximate_average_clustering") +def average_clustering(G, trials=1000, seed=None): + r"""Estimates the average clustering coefficient of G. + + The local clustering of each node in `G` is the fraction of triangles + that actually exist over all possible triangles in its neighborhood. + The average clustering coefficient of a graph `G` is the mean of + local clusterings. + + This function finds an approximate average clustering coefficient + for G by repeating `n` times (defined in `trials`) the following + experiment: choose a node at random, choose two of its neighbors + at random, and check if they are connected. The approximate + coefficient is the fraction of triangles found over the number + of trials [1]_. + + Parameters + ---------- + G : NetworkX graph + + trials : integer + Number of trials to perform (default 1000). + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + c : float + Approximated average clustering coefficient. + + Examples + -------- + >>> from networkx.algorithms import approximation + >>> G = nx.erdos_renyi_graph(10, 0.2, seed=10) + >>> approximation.average_clustering(G, trials=1000, seed=10) + 0.214 + + References + ---------- + .. [1] Schank, Thomas, and Dorothea Wagner. Approximating clustering + coefficient and transitivity. Universität Karlsruhe, Fakultät für + Informatik, 2004. + https://doi.org/10.5445/IR/1000001239 + + """ + n = len(G) + triangles = 0 + nodes = list(G) + for i in [int(seed.random() * n) for i in range(trials)]: + nbrs = list(G[nodes[i]]) + if len(nbrs) < 2: + continue + u, v = seed.sample(nbrs, 2) + if u in G[v]: + triangles += 1 + return triangles / trials diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/connectivity.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/connectivity.py new file mode 100644 index 0000000000000000000000000000000000000000..bc5e7125937309e53ab3a2312434406447f5b0cd --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/connectivity.py @@ -0,0 +1,412 @@ +""" Fast approximation for node connectivity +""" +import itertools +from operator import itemgetter + +import networkx as nx + +__all__ = [ + "local_node_connectivity", + "node_connectivity", + "all_pairs_node_connectivity", +] + + +@nx._dispatch(name="approximate_local_node_connectivity") +def local_node_connectivity(G, source, target, cutoff=None): + """Compute node connectivity between source and target. + + Pairwise or local node connectivity between two distinct and nonadjacent + nodes is the minimum number of nodes that must be removed (minimum + separating cutset) to disconnect them. By Menger's theorem, this is equal + to the number of node independent paths (paths that share no nodes other + than source and target). Which is what we compute in this function. + + This algorithm is a fast approximation that gives an strict lower + bound on the actual number of node independent paths between two nodes [1]_. + It works for both directed and undirected graphs. + + Parameters + ---------- + + G : NetworkX graph + + source : node + Starting node for node connectivity + + target : node + Ending node for node connectivity + + cutoff : integer + Maximum node connectivity to consider. If None, the minimum degree + of source or target is used as a cutoff. Default value None. + + Returns + ------- + k: integer + pairwise node connectivity + + Examples + -------- + >>> # Platonic octahedral graph has node connectivity 4 + >>> # for each non adjacent node pair + >>> from networkx.algorithms import approximation as approx + >>> G = nx.octahedral_graph() + >>> approx.local_node_connectivity(G, 0, 5) + 4 + + Notes + ----- + This algorithm [1]_ finds node independents paths between two nodes by + computing their shortest path using BFS, marking the nodes of the path + found as 'used' and then searching other shortest paths excluding the + nodes marked as used until no more paths exist. It is not exact because + a shortest path could use nodes that, if the path were longer, may belong + to two different node independent paths. Thus it only guarantees an + strict lower bound on node connectivity. + + Note that the authors propose a further refinement, losing accuracy and + gaining speed, which is not implemented yet. + + See also + -------- + all_pairs_node_connectivity + node_connectivity + + References + ---------- + .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for + Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 + http://eclectic.ss.uci.edu/~drwhite/working.pdf + + """ + if target == source: + raise nx.NetworkXError("source and target have to be different nodes.") + + # Maximum possible node independent paths + if G.is_directed(): + possible = min(G.out_degree(source), G.in_degree(target)) + else: + possible = min(G.degree(source), G.degree(target)) + + K = 0 + if not possible: + return K + + if cutoff is None: + cutoff = float("inf") + + exclude = set() + for i in range(min(possible, cutoff)): + try: + path = _bidirectional_shortest_path(G, source, target, exclude) + exclude.update(set(path)) + K += 1 + except nx.NetworkXNoPath: + break + + return K + + +@nx._dispatch(name="approximate_node_connectivity") +def node_connectivity(G, s=None, t=None): + r"""Returns an approximation for node connectivity for a graph or digraph G. + + Node connectivity is equal to the minimum number of nodes that + must be removed to disconnect G or render it trivial. By Menger's theorem, + this is equal to the number of node independent paths (paths that + share no nodes other than source and target). + + If source and target nodes are provided, this function returns the + local node connectivity: the minimum number of nodes that must be + removed to break all paths from source to target in G. + + This algorithm is based on a fast approximation that gives an strict lower + bound on the actual number of node independent paths between two nodes [1]_. + It works for both directed and undirected graphs. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + s : node + Source node. Optional. Default value: None. + + t : node + Target node. Optional. Default value: None. + + Returns + ------- + K : integer + Node connectivity of G, or local node connectivity if source + and target are provided. + + Examples + -------- + >>> # Platonic octahedral graph is 4-node-connected + >>> from networkx.algorithms import approximation as approx + >>> G = nx.octahedral_graph() + >>> approx.node_connectivity(G) + 4 + + Notes + ----- + This algorithm [1]_ finds node independents paths between two nodes by + computing their shortest path using BFS, marking the nodes of the path + found as 'used' and then searching other shortest paths excluding the + nodes marked as used until no more paths exist. It is not exact because + a shortest path could use nodes that, if the path were longer, may belong + to two different node independent paths. Thus it only guarantees an + strict lower bound on node connectivity. + + See also + -------- + all_pairs_node_connectivity + local_node_connectivity + + References + ---------- + .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for + Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 + http://eclectic.ss.uci.edu/~drwhite/working.pdf + + """ + if (s is not None and t is None) or (s is None and t is not None): + raise nx.NetworkXError("Both source and target must be specified.") + + # Local node connectivity + if s is not None and t is not None: + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + return local_node_connectivity(G, s, t) + + # Global node connectivity + if G.is_directed(): + connected_func = nx.is_weakly_connected + iter_func = itertools.permutations + + def neighbors(v): + return itertools.chain(G.predecessors(v), G.successors(v)) + + else: + connected_func = nx.is_connected + iter_func = itertools.combinations + neighbors = G.neighbors + + if not connected_func(G): + return 0 + + # Choose a node with minimum degree + v, minimum_degree = min(G.degree(), key=itemgetter(1)) + # Node connectivity is bounded by minimum degree + K = minimum_degree + # compute local node connectivity with all non-neighbors nodes + # and store the minimum + for w in set(G) - set(neighbors(v)) - {v}: + K = min(K, local_node_connectivity(G, v, w, cutoff=K)) + # Same for non adjacent pairs of neighbors of v + for x, y in iter_func(neighbors(v), 2): + if y not in G[x] and x != y: + K = min(K, local_node_connectivity(G, x, y, cutoff=K)) + return K + + +@nx._dispatch(name="approximate_all_pairs_node_connectivity") +def all_pairs_node_connectivity(G, nbunch=None, cutoff=None): + """Compute node connectivity between all pairs of nodes. + + Pairwise or local node connectivity between two distinct and nonadjacent + nodes is the minimum number of nodes that must be removed (minimum + separating cutset) to disconnect them. By Menger's theorem, this is equal + to the number of node independent paths (paths that share no nodes other + than source and target). Which is what we compute in this function. + + This algorithm is a fast approximation that gives an strict lower + bound on the actual number of node independent paths between two nodes [1]_. + It works for both directed and undirected graphs. + + + Parameters + ---------- + G : NetworkX graph + + nbunch: container + Container of nodes. If provided node connectivity will be computed + only over pairs of nodes in nbunch. + + cutoff : integer + Maximum node connectivity to consider. If None, the minimum degree + of source or target is used as a cutoff in each pair of nodes. + Default value None. + + Returns + ------- + K : dictionary + Dictionary, keyed by source and target, of pairwise node connectivity + + Examples + -------- + A 3 node cycle with one extra node attached has connectivity 2 between all + nodes in the cycle and connectivity 1 between the extra node and the rest: + + >>> G = nx.cycle_graph(3) + >>> G.add_edge(2, 3) + >>> import pprint # for nice dictionary formatting + >>> pprint.pprint(nx.all_pairs_node_connectivity(G)) + {0: {1: 2, 2: 2, 3: 1}, + 1: {0: 2, 2: 2, 3: 1}, + 2: {0: 2, 1: 2, 3: 1}, + 3: {0: 1, 1: 1, 2: 1}} + + See Also + -------- + local_node_connectivity + node_connectivity + + References + ---------- + .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for + Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 + http://eclectic.ss.uci.edu/~drwhite/working.pdf + """ + if nbunch is None: + nbunch = G + else: + nbunch = set(nbunch) + + directed = G.is_directed() + if directed: + iter_func = itertools.permutations + else: + iter_func = itertools.combinations + + all_pairs = {n: {} for n in nbunch} + + for u, v in iter_func(nbunch, 2): + k = local_node_connectivity(G, u, v, cutoff=cutoff) + all_pairs[u][v] = k + if not directed: + all_pairs[v][u] = k + + return all_pairs + + +def _bidirectional_shortest_path(G, source, target, exclude): + """Returns shortest path between source and target ignoring nodes in the + container 'exclude'. + + Parameters + ---------- + + G : NetworkX graph + + source : node + Starting node for path + + target : node + Ending node for path + + exclude: container + Container for nodes to exclude from the search for shortest paths + + Returns + ------- + path: list + Shortest path between source and target ignoring nodes in 'exclude' + + Raises + ------ + NetworkXNoPath + If there is no path or if nodes are adjacent and have only one path + between them + + Notes + ----- + This function and its helper are originally from + networkx.algorithms.shortest_paths.unweighted and are modified to + accept the extra parameter 'exclude', which is a container for nodes + already used in other paths that should be ignored. + + References + ---------- + .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for + Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 + http://eclectic.ss.uci.edu/~drwhite/working.pdf + + """ + # call helper to do the real work + results = _bidirectional_pred_succ(G, source, target, exclude) + pred, succ, w = results + + # build path from pred+w+succ + path = [] + # from source to w + while w is not None: + path.append(w) + w = pred[w] + path.reverse() + # from w to target + w = succ[path[-1]] + while w is not None: + path.append(w) + w = succ[w] + + return path + + +def _bidirectional_pred_succ(G, source, target, exclude): + # does BFS from both source and target and meets in the middle + # excludes nodes in the container "exclude" from the search + + # handle either directed or undirected + if G.is_directed(): + Gpred = G.predecessors + Gsucc = G.successors + else: + Gpred = G.neighbors + Gsucc = G.neighbors + + # predecessor and successors in search + pred = {source: None} + succ = {target: None} + + # initialize fringes, start with forward + forward_fringe = [source] + reverse_fringe = [target] + + level = 0 + + while forward_fringe and reverse_fringe: + # Make sure that we iterate one step forward and one step backwards + # thus source and target will only trigger "found path" when they are + # adjacent and then they can be safely included in the container 'exclude' + level += 1 + if level % 2 != 0: + this_level = forward_fringe + forward_fringe = [] + for v in this_level: + for w in Gsucc(v): + if w in exclude: + continue + if w not in pred: + forward_fringe.append(w) + pred[w] = v + if w in succ: + return pred, succ, w # found path + else: + this_level = reverse_fringe + reverse_fringe = [] + for v in this_level: + for w in Gpred(v): + if w in exclude: + continue + if w not in succ: + succ[w] = v + reverse_fringe.append(w) + if w in pred: + return pred, succ, w # found path + + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/distance_measures.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/distance_measures.py new file mode 100644 index 0000000000000000000000000000000000000000..9b817b3317c8d793a66c44b1ae98f29417fb777f --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/distance_measures.py @@ -0,0 +1,141 @@ +"""Distance measures approximated metrics.""" + +import networkx as nx +from networkx.utils.decorators import py_random_state + +__all__ = ["diameter"] + + +@py_random_state(1) +@nx._dispatch(name="approximate_diameter") +def diameter(G, seed=None): + """Returns a lower bound on the diameter of the graph G. + + The function computes a lower bound on the diameter (i.e., the maximum eccentricity) + of a directed or undirected graph G. The procedure used varies depending on the graph + being directed or not. + + If G is an `undirected` graph, then the function uses the `2-sweep` algorithm [1]_. + The main idea is to pick the farthest node from a random node and return its eccentricity. + + Otherwise, if G is a `directed` graph, the function uses the `2-dSweep` algorithm [2]_, + The procedure starts by selecting a random source node $s$ from which it performs a + forward and a backward BFS. Let $a_1$ and $a_2$ be the farthest nodes in the forward and + backward cases, respectively. Then, it computes the backward eccentricity of $a_1$ using + a backward BFS and the forward eccentricity of $a_2$ using a forward BFS. + Finally, it returns the best lower bound between the two. + + In both cases, the time complexity is linear with respect to the size of G. + + Parameters + ---------- + G : NetworkX graph + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + d : integer + Lower Bound on the Diameter of G + + Raises + ------ + NetworkXError + If the graph is empty or + If the graph is undirected and not connected or + If the graph is directed and not strongly connected. + + See Also + -------- + networkx.algorithms.distance_measures.diameter + + References + ---------- + .. [1] Magnien, Clémence, Matthieu Latapy, and Michel Habib. + *Fast computation of empirically tight bounds for the diameter of massive graphs.* + Journal of Experimental Algorithmics (JEA), 2009. + https://arxiv.org/pdf/0904.2728.pdf + .. [2] Crescenzi, Pierluigi, Roberto Grossi, Leonardo Lanzi, and Andrea Marino. + *On computing the diameter of real-world directed (weighted) graphs.* + International Symposium on Experimental Algorithms. Springer, Berlin, Heidelberg, 2012. + https://courses.cs.ut.ee/MTAT.03.238/2014_fall/uploads/Main/diameter.pdf + """ + # if G is empty + if not G: + raise nx.NetworkXError("Expected non-empty NetworkX graph!") + # if there's only a node + if G.number_of_nodes() == 1: + return 0 + # if G is directed + if G.is_directed(): + return _two_sweep_directed(G, seed) + # else if G is undirected + return _two_sweep_undirected(G, seed) + + +def _two_sweep_undirected(G, seed): + """Helper function for finding a lower bound on the diameter + for undirected Graphs. + + The idea is to pick the farthest node from a random node + and return its eccentricity. + + ``G`` is a NetworkX undirected graph. + + .. note:: + + ``seed`` is a random.Random or numpy.random.RandomState instance + """ + # select a random source node + source = seed.choice(list(G)) + # get the distances to the other nodes + distances = nx.shortest_path_length(G, source) + # if some nodes have not been visited, then the graph is not connected + if len(distances) != len(G): + raise nx.NetworkXError("Graph not connected.") + # take a node that is (one of) the farthest nodes from the source + *_, node = distances + # return the eccentricity of the node + return nx.eccentricity(G, node) + + +def _two_sweep_directed(G, seed): + """Helper function for finding a lower bound on the diameter + for directed Graphs. + + It implements 2-dSweep, the directed version of the 2-sweep algorithm. + The algorithm follows the following steps. + 1. Select a source node $s$ at random. + 2. Perform a forward BFS from $s$ to select a node $a_1$ at the maximum + distance from the source, and compute $LB_1$, the backward eccentricity of $a_1$. + 3. Perform a backward BFS from $s$ to select a node $a_2$ at the maximum + distance from the source, and compute $LB_2$, the forward eccentricity of $a_2$. + 4. Return the maximum between $LB_1$ and $LB_2$. + + ``G`` is a NetworkX directed graph. + + .. note:: + + ``seed`` is a random.Random or numpy.random.RandomState instance + """ + # get a new digraph G' with the edges reversed in the opposite direction + G_reversed = G.reverse() + # select a random source node + source = seed.choice(list(G)) + # compute forward distances from source + forward_distances = nx.shortest_path_length(G, source) + # compute backward distances from source + backward_distances = nx.shortest_path_length(G_reversed, source) + # if either the source can't reach every node or not every node + # can reach the source, then the graph is not strongly connected + n = len(G) + if len(forward_distances) != n or len(backward_distances) != n: + raise nx.NetworkXError("DiGraph not strongly connected.") + # take a node a_1 at the maximum distance from the source in G + *_, a_1 = forward_distances + # take a node a_2 at the maximum distance from the source in G_reversed + *_, a_2 = backward_distances + # return the max between the backward eccentricity of a_1 and the forward eccentricity of a_2 + return max(nx.eccentricity(G_reversed, a_1), nx.eccentricity(G, a_2)) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/dominating_set.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/dominating_set.py new file mode 100644 index 0000000000000000000000000000000000000000..97edb172f94bf0bd078b5f92b05c2f757ae79d8c --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/dominating_set.py @@ -0,0 +1,126 @@ +"""Functions for finding node and edge dominating sets. + +A `dominating set`_ for an undirected graph *G* with vertex set *V* +and edge set *E* is a subset *D* of *V* such that every vertex not in +*D* is adjacent to at least one member of *D*. An `edge dominating set`_ +is a subset *F* of *E* such that every edge not in *F* is +incident to an endpoint of at least one edge in *F*. + +.. _dominating set: https://en.wikipedia.org/wiki/Dominating_set +.. _edge dominating set: https://en.wikipedia.org/wiki/Edge_dominating_set + +""" +import networkx as nx + +from ...utils import not_implemented_for +from ..matching import maximal_matching + +__all__ = ["min_weighted_dominating_set", "min_edge_dominating_set"] + + +# TODO Why doesn't this algorithm work for directed graphs? +@not_implemented_for("directed") +@nx._dispatch(node_attrs="weight") +def min_weighted_dominating_set(G, weight=None): + r"""Returns a dominating set that approximates the minimum weight node + dominating set. + + Parameters + ---------- + G : NetworkX graph + Undirected graph. + + weight : string + The node attribute storing the weight of an node. If provided, + the node attribute with this key must be a number for each + node. If not provided, each node is assumed to have weight one. + + Returns + ------- + min_weight_dominating_set : set + A set of nodes, the sum of whose weights is no more than `(\log + w(V)) w(V^*)`, where `w(V)` denotes the sum of the weights of + each node in the graph and `w(V^*)` denotes the sum of the + weights of each node in the minimum weight dominating set. + + Notes + ----- + This algorithm computes an approximate minimum weighted dominating + set for the graph `G`. The returned solution has weight `(\log + w(V)) w(V^*)`, where `w(V)` denotes the sum of the weights of each + node in the graph and `w(V^*)` denotes the sum of the weights of + each node in the minimum weight dominating set for the graph. + + This implementation of the algorithm runs in $O(m)$ time, where $m$ + is the number of edges in the graph. + + References + ---------- + .. [1] Vazirani, Vijay V. + *Approximation Algorithms*. + Springer Science & Business Media, 2001. + + """ + # The unique dominating set for the null graph is the empty set. + if len(G) == 0: + return set() + + # This is the dominating set that will eventually be returned. + dom_set = set() + + def _cost(node_and_neighborhood): + """Returns the cost-effectiveness of greedily choosing the given + node. + + `node_and_neighborhood` is a two-tuple comprising a node and its + closed neighborhood. + + """ + v, neighborhood = node_and_neighborhood + return G.nodes[v].get(weight, 1) / len(neighborhood - dom_set) + + # This is a set of all vertices not already covered by the + # dominating set. + vertices = set(G) + # This is a dictionary mapping each node to the closed neighborhood + # of that node. + neighborhoods = {v: {v} | set(G[v]) for v in G} + + # Continue until all vertices are adjacent to some node in the + # dominating set. + while vertices: + # Find the most cost-effective node to add, along with its + # closed neighborhood. + dom_node, min_set = min(neighborhoods.items(), key=_cost) + # Add the node to the dominating set and reduce the remaining + # set of nodes to cover. + dom_set.add(dom_node) + del neighborhoods[dom_node] + vertices -= min_set + + return dom_set + + +@nx._dispatch +def min_edge_dominating_set(G): + r"""Returns minimum cardinality edge dominating set. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + min_edge_dominating_set : set + Returns a set of dominating edges whose size is no more than 2 * OPT. + + Notes + ----- + The algorithm computes an approximate solution to the edge dominating set + problem. The result is no more than 2 * OPT in terms of size of the set. + Runtime of the algorithm is $O(|E|)$. + """ + if not G: + raise ValueError("Expected non-empty NetworkX graph!") + return maximal_matching(G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/kcomponents.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/kcomponents.py new file mode 100644 index 0000000000000000000000000000000000000000..a5df6cc686c381c954b056e242b10ab8b7a164f5 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/kcomponents.py @@ -0,0 +1,369 @@ +""" Fast approximation for k-component structure +""" +import itertools +from collections import defaultdict +from collections.abc import Mapping +from functools import cached_property + +import networkx as nx +from networkx.algorithms.approximation import local_node_connectivity +from networkx.exception import NetworkXError +from networkx.utils import not_implemented_for + +__all__ = ["k_components"] + + +@not_implemented_for("directed") +@nx._dispatch(name="approximate_k_components") +def k_components(G, min_density=0.95): + r"""Returns the approximate k-component structure of a graph G. + + A `k`-component is a maximal subgraph of a graph G that has, at least, + node connectivity `k`: we need to remove at least `k` nodes to break it + into more components. `k`-components have an inherent hierarchical + structure because they are nested in terms of connectivity: a connected + graph can contain several 2-components, each of which can contain + one or more 3-components, and so forth. + + This implementation is based on the fast heuristics to approximate + the `k`-component structure of a graph [1]_. Which, in turn, it is based on + a fast approximation algorithm for finding good lower bounds of the number + of node independent paths between two nodes [2]_. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + min_density : Float + Density relaxation threshold. Default value 0.95 + + Returns + ------- + k_components : dict + Dictionary with connectivity level `k` as key and a list of + sets of nodes that form a k-component of level `k` as values. + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + Examples + -------- + >>> # Petersen graph has 10 nodes and it is triconnected, thus all + >>> # nodes are in a single component on all three connectivity levels + >>> from networkx.algorithms import approximation as apxa + >>> G = nx.petersen_graph() + >>> k_components = apxa.k_components(G) + + Notes + ----- + The logic of the approximation algorithm for computing the `k`-component + structure [1]_ is based on repeatedly applying simple and fast algorithms + for `k`-cores and biconnected components in order to narrow down the + number of pairs of nodes over which we have to compute White and Newman's + approximation algorithm for finding node independent paths [2]_. More + formally, this algorithm is based on Whitney's theorem, which states + an inclusion relation among node connectivity, edge connectivity, and + minimum degree for any graph G. This theorem implies that every + `k`-component is nested inside a `k`-edge-component, which in turn, + is contained in a `k`-core. Thus, this algorithm computes node independent + paths among pairs of nodes in each biconnected part of each `k`-core, + and repeats this procedure for each `k` from 3 to the maximal core number + of a node in the input graph. + + Because, in practice, many nodes of the core of level `k` inside a + bicomponent actually are part of a component of level k, the auxiliary + graph needed for the algorithm is likely to be very dense. Thus, we use + a complement graph data structure (see `AntiGraph`) to save memory. + AntiGraph only stores information of the edges that are *not* present + in the actual auxiliary graph. When applying algorithms to this + complement graph data structure, it behaves as if it were the dense + version. + + See also + -------- + k_components + + References + ---------- + .. [1] Torrents, J. and F. Ferraro (2015) Structural Cohesion: + Visualization and Heuristics for Fast Computation. + https://arxiv.org/pdf/1503.04476v1 + + .. [2] White, Douglas R., and Mark Newman (2001) A Fast Algorithm for + Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035 + https://www.santafe.edu/research/results/working-papers/fast-approximation-algorithms-for-finding-node-ind + + .. [3] Moody, J. and D. White (2003). Social cohesion and embeddedness: + A hierarchical conception of social groups. + American Sociological Review 68(1), 103--28. + https://doi.org/10.2307/3088904 + + """ + # Dictionary with connectivity level (k) as keys and a list of + # sets of nodes that form a k-component as values + k_components = defaultdict(list) + # make a few functions local for speed + node_connectivity = local_node_connectivity + k_core = nx.k_core + core_number = nx.core_number + biconnected_components = nx.biconnected_components + combinations = itertools.combinations + # Exact solution for k = {1,2} + # There is a linear time algorithm for triconnectivity, if we had an + # implementation available we could start from k = 4. + for component in nx.connected_components(G): + # isolated nodes have connectivity 0 + comp = set(component) + if len(comp) > 1: + k_components[1].append(comp) + for bicomponent in nx.biconnected_components(G): + # avoid considering dyads as bicomponents + bicomp = set(bicomponent) + if len(bicomp) > 2: + k_components[2].append(bicomp) + # There is no k-component of k > maximum core number + # \kappa(G) <= \lambda(G) <= \delta(G) + g_cnumber = core_number(G) + max_core = max(g_cnumber.values()) + for k in range(3, max_core + 1): + C = k_core(G, k, core_number=g_cnumber) + for nodes in biconnected_components(C): + # Build a subgraph SG induced by the nodes that are part of + # each biconnected component of the k-core subgraph C. + if len(nodes) < k: + continue + SG = G.subgraph(nodes) + # Build auxiliary graph + H = _AntiGraph() + H.add_nodes_from(SG.nodes()) + for u, v in combinations(SG, 2): + K = node_connectivity(SG, u, v, cutoff=k) + if k > K: + H.add_edge(u, v) + for h_nodes in biconnected_components(H): + if len(h_nodes) <= k: + continue + SH = H.subgraph(h_nodes) + for Gc in _cliques_heuristic(SG, SH, k, min_density): + for k_nodes in biconnected_components(Gc): + Gk = nx.k_core(SG.subgraph(k_nodes), k) + if len(Gk) <= k: + continue + k_components[k].append(set(Gk)) + return k_components + + +def _cliques_heuristic(G, H, k, min_density): + h_cnumber = nx.core_number(H) + for i, c_value in enumerate(sorted(set(h_cnumber.values()), reverse=True)): + cands = {n for n, c in h_cnumber.items() if c == c_value} + # Skip checking for overlap for the highest core value + if i == 0: + overlap = False + else: + overlap = set.intersection( + *[{x for x in H[n] if x not in cands} for n in cands] + ) + if overlap and len(overlap) < k: + SH = H.subgraph(cands | overlap) + else: + SH = H.subgraph(cands) + sh_cnumber = nx.core_number(SH) + SG = nx.k_core(G.subgraph(SH), k) + while not (_same(sh_cnumber) and nx.density(SH) >= min_density): + # This subgraph must be writable => .copy() + SH = H.subgraph(SG).copy() + if len(SH) <= k: + break + sh_cnumber = nx.core_number(SH) + sh_deg = dict(SH.degree()) + min_deg = min(sh_deg.values()) + SH.remove_nodes_from(n for n, d in sh_deg.items() if d == min_deg) + SG = nx.k_core(G.subgraph(SH), k) + else: + yield SG + + +def _same(measure, tol=0): + vals = set(measure.values()) + if (max(vals) - min(vals)) <= tol: + return True + return False + + +class _AntiGraph(nx.Graph): + """ + Class for complement graphs. + + The main goal is to be able to work with big and dense graphs with + a low memory footprint. + + In this class you add the edges that *do not exist* in the dense graph, + the report methods of the class return the neighbors, the edges and + the degree as if it was the dense graph. Thus it's possible to use + an instance of this class with some of NetworkX functions. In this + case we only use k-core, connected_components, and biconnected_components. + """ + + all_edge_dict = {"weight": 1} + + def single_edge_dict(self): + return self.all_edge_dict + + edge_attr_dict_factory = single_edge_dict # type: ignore[assignment] + + def __getitem__(self, n): + """Returns a dict of neighbors of node n in the dense graph. + + Parameters + ---------- + n : node + A node in the graph. + + Returns + ------- + adj_dict : dictionary + The adjacency dictionary for nodes connected to n. + + """ + all_edge_dict = self.all_edge_dict + return { + node: all_edge_dict for node in set(self._adj) - set(self._adj[n]) - {n} + } + + def neighbors(self, n): + """Returns an iterator over all neighbors of node n in the + dense graph. + """ + try: + return iter(set(self._adj) - set(self._adj[n]) - {n}) + except KeyError as err: + raise NetworkXError(f"The node {n} is not in the graph.") from err + + class AntiAtlasView(Mapping): + """An adjacency inner dict for AntiGraph""" + + def __init__(self, graph, node): + self._graph = graph + self._atlas = graph._adj[node] + self._node = node + + def __len__(self): + return len(self._graph) - len(self._atlas) - 1 + + def __iter__(self): + return (n for n in self._graph if n not in self._atlas and n != self._node) + + def __getitem__(self, nbr): + nbrs = set(self._graph._adj) - set(self._atlas) - {self._node} + if nbr in nbrs: + return self._graph.all_edge_dict + raise KeyError(nbr) + + class AntiAdjacencyView(AntiAtlasView): + """An adjacency outer dict for AntiGraph""" + + def __init__(self, graph): + self._graph = graph + self._atlas = graph._adj + + def __len__(self): + return len(self._atlas) + + def __iter__(self): + return iter(self._graph) + + def __getitem__(self, node): + if node not in self._graph: + raise KeyError(node) + return self._graph.AntiAtlasView(self._graph, node) + + @cached_property + def adj(self): + return self.AntiAdjacencyView(self) + + def subgraph(self, nodes): + """This subgraph method returns a full AntiGraph. Not a View""" + nodes = set(nodes) + G = _AntiGraph() + G.add_nodes_from(nodes) + for n in G: + Gnbrs = G.adjlist_inner_dict_factory() + G._adj[n] = Gnbrs + for nbr, d in self._adj[n].items(): + if nbr in G._adj: + Gnbrs[nbr] = d + G._adj[nbr][n] = d + G.graph = self.graph + return G + + class AntiDegreeView(nx.reportviews.DegreeView): + def __iter__(self): + all_nodes = set(self._succ) + for n in self._nodes: + nbrs = all_nodes - set(self._succ[n]) - {n} + yield (n, len(nbrs)) + + def __getitem__(self, n): + nbrs = set(self._succ) - set(self._succ[n]) - {n} + # AntiGraph is a ThinGraph so all edges have weight 1 + return len(nbrs) + (n in nbrs) + + @cached_property + def degree(self): + """Returns an iterator for (node, degree) and degree for single node. + + The node degree is the number of edges adjacent to the node. + + Parameters + ---------- + nbunch : iterable container, optional (default=all nodes) + A container of nodes. The container will be iterated + through once. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + deg: + Degree of the node, if a single node is passed as argument. + nd_iter : an iterator + The iterator returns two-tuples of (node, degree). + + See Also + -------- + degree + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G.degree(0) # node 0 with degree 1 + 1 + >>> list(G.degree([0, 1])) + [(0, 1), (1, 2)] + + """ + return self.AntiDegreeView(self) + + def adjacency(self): + """Returns an iterator of (node, adjacency set) tuples for all nodes + in the dense graph. + + This is the fastest way to look at every edge. + For directed graphs, only outgoing adjacencies are included. + + Returns + ------- + adj_iter : iterator + An iterator of (node, adjacency set) for all nodes in + the graph. + + """ + for n in self._adj: + yield (n, set(self._adj) - set(self._adj[n]) - {n}) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/matching.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/matching.py new file mode 100644 index 0000000000000000000000000000000000000000..8f1c35016665a0b22d51c4d9d66fe8bf3a8a593a --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/matching.py @@ -0,0 +1,43 @@ +""" +************** +Graph Matching +************** + +Given a graph G = (V,E), a matching M in G is a set of pairwise non-adjacent +edges; that is, no two edges share a common vertex. + +`Wikipedia: Matching `_ +""" +import networkx as nx + +__all__ = ["min_maximal_matching"] + + +@nx._dispatch +def min_maximal_matching(G): + r"""Returns the minimum maximal matching of G. That is, out of all maximal + matchings of the graph G, the smallest is returned. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + min_maximal_matching : set + Returns a set of edges such that no two edges share a common endpoint + and every edge not in the set shares some common endpoint in the set. + Cardinality will be 2*OPT in the worst case. + + Notes + ----- + The algorithm computes an approximate solution for the minimum maximal + cardinality matching problem. The solution is no more than 2 * OPT in size. + Runtime is $O(|E|)$. + + References + ---------- + .. [1] Vazirani, Vijay Approximation Algorithms (2001) + """ + return nx.maximal_matching(G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/maxcut.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/maxcut.py new file mode 100644 index 0000000000000000000000000000000000000000..ec62b346bb499ce7c1fff499b7482eaa74240382 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/maxcut.py @@ -0,0 +1,113 @@ +import networkx as nx +from networkx.utils.decorators import not_implemented_for, py_random_state + +__all__ = ["randomized_partitioning", "one_exchange"] + + +@not_implemented_for("directed", "multigraph") +@py_random_state(1) +@nx._dispatch(edge_attrs="weight") +def randomized_partitioning(G, seed=None, p=0.5, weight=None): + """Compute a random partitioning of the graph nodes and its cut value. + + A partitioning is calculated by observing each node + and deciding to add it to the partition with probability `p`, + returning a random cut and its corresponding value (the + sum of weights of edges connecting different partitions). + + Parameters + ---------- + G : NetworkX graph + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + p : scalar + Probability for each node to be part of the first partition. + Should be in [0,1] + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + cut_size : scalar + Value of the minimum cut. + + partition : pair of node sets + A partitioning of the nodes that defines a minimum cut. + """ + cut = {node for node in G.nodes() if seed.random() < p} + cut_size = nx.algorithms.cut_size(G, cut, weight=weight) + partition = (cut, G.nodes - cut) + return cut_size, partition + + +def _swap_node_partition(cut, node): + return cut - {node} if node in cut else cut.union({node}) + + +@not_implemented_for("directed", "multigraph") +@py_random_state(2) +@nx._dispatch(edge_attrs="weight") +def one_exchange(G, initial_cut=None, seed=None, weight=None): + """Compute a partitioning of the graphs nodes and the corresponding cut value. + + Use a greedy one exchange strategy to find a locally maximal cut + and its value, it works by finding the best node (one that gives + the highest gain to the cut value) to add to the current cut + and repeats this process until no improvement can be made. + + Parameters + ---------- + G : networkx Graph + Graph to find a maximum cut for. + + initial_cut : set + Cut to use as a starting point. If not supplied the algorithm + starts with an empty cut. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + cut_value : scalar + Value of the maximum cut. + + partition : pair of node sets + A partitioning of the nodes that defines a maximum cut. + """ + if initial_cut is None: + initial_cut = set() + cut = set(initial_cut) + current_cut_size = nx.algorithms.cut_size(G, cut, weight=weight) + while True: + nodes = list(G.nodes()) + # Shuffling the nodes ensures random tie-breaks in the following call to max + seed.shuffle(nodes) + best_node_to_swap = max( + nodes, + key=lambda v: nx.algorithms.cut_size( + G, _swap_node_partition(cut, v), weight=weight + ), + default=None, + ) + potential_cut = _swap_node_partition(cut, best_node_to_swap) + potential_cut_size = nx.algorithms.cut_size(G, potential_cut, weight=weight) + + if potential_cut_size > current_cut_size: + cut = potential_cut + current_cut_size = potential_cut_size + else: + break + + partition = (cut, G.nodes - cut) + return current_cut_size, partition diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/ramsey.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/ramsey.py new file mode 100644 index 0000000000000000000000000000000000000000..6f45c4f49717a1fe77da7b75d974eb5a55546642 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/ramsey.py @@ -0,0 +1,52 @@ +""" +Ramsey numbers. +""" +import networkx as nx +from networkx.utils import not_implemented_for + +from ...utils import arbitrary_element + +__all__ = ["ramsey_R2"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def ramsey_R2(G): + r"""Compute the largest clique and largest independent set in `G`. + + This can be used to estimate bounds for the 2-color + Ramsey number `R(2;s,t)` for `G`. + + This is a recursive implementation which could run into trouble + for large recursions. Note that self-loop edges are ignored. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + max_pair : (set, set) tuple + Maximum clique, Maximum independent set. + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or is a multigraph. + """ + if not G: + return set(), set() + + node = arbitrary_element(G) + nbrs = (nbr for nbr in nx.all_neighbors(G, node) if nbr != node) + nnbrs = nx.non_neighbors(G, node) + c_1, i_1 = ramsey_R2(G.subgraph(nbrs).copy()) + c_2, i_2 = ramsey_R2(G.subgraph(nnbrs).copy()) + + c_1.add(node) + i_2.add(node) + # Choose the larger of the two cliques and the larger of the two + # independent sets, according to cardinality. + return max(c_1, c_2, key=len), max(i_1, i_2, key=len) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/steinertree.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/steinertree.py new file mode 100644 index 0000000000000000000000000000000000000000..50aea045feae0e1ee4b509a5d6038e43913dec13 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/steinertree.py @@ -0,0 +1,220 @@ +from itertools import chain + +import networkx as nx +from networkx.utils import not_implemented_for, pairwise + +__all__ = ["metric_closure", "steiner_tree"] + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def metric_closure(G, weight="weight"): + """Return the metric closure of a graph. + + The metric closure of a graph *G* is the complete graph in which each edge + is weighted by the shortest path distance between the nodes in *G* . + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + NetworkX graph + Metric closure of the graph `G`. + + """ + M = nx.Graph() + + Gnodes = set(G) + + # check for connected graph while processing first node + all_paths_iter = nx.all_pairs_dijkstra(G, weight=weight) + u, (distance, path) = next(all_paths_iter) + if Gnodes - set(distance): + msg = "G is not a connected graph. metric_closure is not defined." + raise nx.NetworkXError(msg) + Gnodes.remove(u) + for v in Gnodes: + M.add_edge(u, v, distance=distance[v], path=path[v]) + + # first node done -- now process the rest + for u, (distance, path) in all_paths_iter: + Gnodes.remove(u) + for v in Gnodes: + M.add_edge(u, v, distance=distance[v], path=path[v]) + + return M + + +def _mehlhorn_steiner_tree(G, terminal_nodes, weight): + paths = nx.multi_source_dijkstra_path(G, terminal_nodes) + + d_1 = {} + s = {} + for v in G.nodes(): + s[v] = paths[v][0] + d_1[(v, s[v])] = len(paths[v]) - 1 + + # G1-G4 names match those from the Mehlhorn 1988 paper. + G_1_prime = nx.Graph() + for u, v, data in G.edges(data=True): + su, sv = s[u], s[v] + weight_here = d_1[(u, su)] + data.get(weight, 1) + d_1[(v, sv)] + if not G_1_prime.has_edge(su, sv): + G_1_prime.add_edge(su, sv, weight=weight_here) + else: + new_weight = min(weight_here, G_1_prime[su][sv][weight]) + G_1_prime.add_edge(su, sv, weight=new_weight) + + G_2 = nx.minimum_spanning_edges(G_1_prime, data=True) + + G_3 = nx.Graph() + for u, v, d in G_2: + path = nx.shortest_path(G, u, v, weight) + for n1, n2 in pairwise(path): + G_3.add_edge(n1, n2) + + G_3_mst = list(nx.minimum_spanning_edges(G_3, data=False)) + if G.is_multigraph(): + G_3_mst = ( + (u, v, min(G[u][v], key=lambda k: G[u][v][k][weight])) for u, v in G_3_mst + ) + G_4 = G.edge_subgraph(G_3_mst).copy() + _remove_nonterminal_leaves(G_4, terminal_nodes) + return G_4.edges() + + +def _kou_steiner_tree(G, terminal_nodes, weight): + # H is the subgraph induced by terminal_nodes in the metric closure M of G. + M = metric_closure(G, weight=weight) + H = M.subgraph(terminal_nodes) + + # Use the 'distance' attribute of each edge provided by M. + mst_edges = nx.minimum_spanning_edges(H, weight="distance", data=True) + + # Create an iterator over each edge in each shortest path; repeats are okay + mst_all_edges = chain.from_iterable(pairwise(d["path"]) for u, v, d in mst_edges) + if G.is_multigraph(): + mst_all_edges = ( + (u, v, min(G[u][v], key=lambda k: G[u][v][k][weight])) + for u, v in mst_all_edges + ) + + # Find the MST again, over this new set of edges + G_S = G.edge_subgraph(mst_all_edges) + T_S = nx.minimum_spanning_edges(G_S, weight="weight", data=False) + + # Leaf nodes that are not terminal might still remain; remove them here + T_H = G.edge_subgraph(T_S).copy() + _remove_nonterminal_leaves(T_H, terminal_nodes) + + return T_H.edges() + + +def _remove_nonterminal_leaves(G, terminals): + terminals_set = set(terminals) + for n in list(G.nodes): + if n not in terminals_set and G.degree(n) == 1: + G.remove_node(n) + + +ALGORITHMS = { + "kou": _kou_steiner_tree, + "mehlhorn": _mehlhorn_steiner_tree, +} + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def steiner_tree(G, terminal_nodes, weight="weight", method=None): + r"""Return an approximation to the minimum Steiner tree of a graph. + + The minimum Steiner tree of `G` w.r.t a set of `terminal_nodes` (also *S*) + is a tree within `G` that spans those nodes and has minimum size (sum of + edge weights) among all such trees. + + The approximation algorithm is specified with the `method` keyword + argument. All three available algorithms produce a tree whose weight is + within a ``(2 - (2 / l))`` factor of the weight of the optimal Steiner tree, + where ``l`` is the minimum number of leaf nodes across all possible Steiner + trees. + + * ``"kou"`` [2]_ (runtime $O(|S| |V|^2)$) computes the minimum spanning tree of + the subgraph of the metric closure of *G* induced by the terminal nodes, + where the metric closure of *G* is the complete graph in which each edge is + weighted by the shortest path distance between the nodes in *G*. + + * ``"mehlhorn"`` [3]_ (runtime $O(|E|+|V|\log|V|)$) modifies Kou et al.'s + algorithm, beginning by finding the closest terminal node for each + non-terminal. This data is used to create a complete graph containing only + the terminal nodes, in which edge is weighted with the shortest path + distance between them. The algorithm then proceeds in the same way as Kou + et al.. + + Parameters + ---------- + G : NetworkX graph + + terminal_nodes : list + A list of terminal nodes for which minimum steiner tree is + to be found. + + weight : string (default = 'weight') + Use the edge attribute specified by this string as the edge weight. + Any edge attribute not present defaults to 1. + + method : string, optional (default = 'kou') + The algorithm to use to approximate the Steiner tree. + Supported options: 'kou', 'mehlhorn'. + Other inputs produce a ValueError. + + Returns + ------- + NetworkX graph + Approximation to the minimum steiner tree of `G` induced by + `terminal_nodes` . + + Notes + ----- + For multigraphs, the edge between two nodes with minimum weight is the + edge put into the Steiner tree. + + + References + ---------- + .. [1] Steiner_tree_problem on Wikipedia. + https://en.wikipedia.org/wiki/Steiner_tree_problem + .. [2] Kou, L., G. Markowsky, and L. Berman. 1981. + ‘A Fast Algorithm for Steiner Trees’. + Acta Informatica 15 (2): 141–45. + https://doi.org/10.1007/BF00288961. + .. [3] Mehlhorn, Kurt. 1988. + ‘A Faster Approximation Algorithm for the Steiner Problem in Graphs’. + Information Processing Letters 27 (3): 125–28. + https://doi.org/10.1016/0020-0190(88)90066-X. + """ + if method is None: + import warnings + + msg = ( + "steiner_tree will change default method from 'kou' to 'mehlhorn' " + "in version 3.2.\nSet the `method` kwarg to remove this warning." + ) + warnings.warn(msg, FutureWarning, stacklevel=4) + method = "kou" + + try: + algo = ALGORITHMS[method] + except KeyError as e: + msg = f"{method} is not a valid choice for an algorithm." + raise ValueError(msg) from e + + edges = algo(G, terminal_nodes, weight) + # For multigraph we should add the minimal weight edge keys + if G.is_multigraph(): + edges = ( + (u, v, min(G[u][v], key=lambda k: G[u][v][k][weight])) for u, v in edges + ) + T = G.edge_subgraph(edges) + return T diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fcdc274847b13ef4e2f96d7d6158c847ae967b6 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_approx_clust_coeff.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_approx_clust_coeff.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffc3e066c908f016e21d27e4148c2ee53f34bc3d Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_approx_clust_coeff.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_clique.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_clique.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52b161823fde27f59b2e8354d23dc2cdb0248728 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_clique.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_connectivity.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_connectivity.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e316016b3dfc3a0f48287cd80b5e42f6fb9fd59c Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_connectivity.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_distance_measures.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_distance_measures.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55dcfb90492626e9eb853afabaa97816f1d8b7ef Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_distance_measures.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_dominating_set.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_dominating_set.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25542a6231f6c44281aecf7e5e116c8aa653f9ab Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_dominating_set.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_kcomponents.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_kcomponents.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdace54d5e89c87d40d1be29abaaaa74267acd15 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_kcomponents.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_matching.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_matching.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..926a67f9e279216d90d5bb25d0af6a5e3cafb711 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_matching.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_maxcut.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_maxcut.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb7a8b3e35dae96b7822334374f3f5fc04e40988 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_maxcut.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_ramsey.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_ramsey.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cc31c58cbe2dda0bb9f6ac97ce4ae08c32e69af Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_ramsey.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_steinertree.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_steinertree.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8916076069c71b8b42723496508260b2c8c11865 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_steinertree.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_traveling_salesman.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_traveling_salesman.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed59cd15309f53ce19c4afda9a572b3702a20dda Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_traveling_salesman.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_treewidth.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_treewidth.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e75daad2ebfb98e728e94ca43edc768b005661d Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_treewidth.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_vertex_cover.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_vertex_cover.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17f9d71f3579cd952590fefe9b98647ef65e9312 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_vertex_cover.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_approx_clust_coeff.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_approx_clust_coeff.py new file mode 100644 index 0000000000000000000000000000000000000000..5eab5c1ee79408c9f90a1993415a6c3d7d957141 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_approx_clust_coeff.py @@ -0,0 +1,41 @@ +import networkx as nx +from networkx.algorithms.approximation import average_clustering + +# This approximation has to be exact in regular graphs +# with no triangles or with all possible triangles. + + +def test_petersen(): + # Actual coefficient is 0 + G = nx.petersen_graph() + assert average_clustering(G, trials=len(G) // 2) == nx.average_clustering(G) + + +def test_petersen_seed(): + # Actual coefficient is 0 + G = nx.petersen_graph() + assert average_clustering(G, trials=len(G) // 2, seed=1) == nx.average_clustering(G) + + +def test_tetrahedral(): + # Actual coefficient is 1 + G = nx.tetrahedral_graph() + assert average_clustering(G, trials=len(G) // 2) == nx.average_clustering(G) + + +def test_dodecahedral(): + # Actual coefficient is 0 + G = nx.dodecahedral_graph() + assert average_clustering(G, trials=len(G) // 2) == nx.average_clustering(G) + + +def test_empty(): + G = nx.empty_graph(5) + assert average_clustering(G, trials=len(G) // 2) == 0 + + +def test_complete(): + G = nx.complete_graph(5) + assert average_clustering(G, trials=len(G) // 2) == 1 + G = nx.complete_graph(7) + assert average_clustering(G, trials=len(G) // 2) == 1 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_clique.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_clique.py new file mode 100644 index 0000000000000000000000000000000000000000..ebda285b7d8c887a37cc7064cb41a10acdb074d5 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_clique.py @@ -0,0 +1,113 @@ +"""Unit tests for the :mod:`networkx.algorithms.approximation.clique` module.""" + + +import networkx as nx +from networkx.algorithms.approximation import ( + clique_removal, + large_clique_size, + max_clique, + maximum_independent_set, +) + + +def is_independent_set(G, nodes): + """Returns True if and only if `nodes` is a clique in `G`. + + `G` is a NetworkX graph. `nodes` is an iterable of nodes in + `G`. + + """ + return G.subgraph(nodes).number_of_edges() == 0 + + +def is_clique(G, nodes): + """Returns True if and only if `nodes` is an independent set + in `G`. + + `G` is an undirected simple graph. `nodes` is an iterable of + nodes in `G`. + + """ + H = G.subgraph(nodes) + n = len(H) + return H.number_of_edges() == n * (n - 1) // 2 + + +class TestCliqueRemoval: + """Unit tests for the + :func:`~networkx.algorithms.approximation.clique_removal` function. + + """ + + def test_trivial_graph(self): + G = nx.trivial_graph() + independent_set, cliques = clique_removal(G) + assert is_independent_set(G, independent_set) + assert all(is_clique(G, clique) for clique in cliques) + # In fact, we should only have 1-cliques, that is, singleton nodes. + assert all(len(clique) == 1 for clique in cliques) + + def test_complete_graph(self): + G = nx.complete_graph(10) + independent_set, cliques = clique_removal(G) + assert is_independent_set(G, independent_set) + assert all(is_clique(G, clique) for clique in cliques) + + def test_barbell_graph(self): + G = nx.barbell_graph(10, 5) + independent_set, cliques = clique_removal(G) + assert is_independent_set(G, independent_set) + assert all(is_clique(G, clique) for clique in cliques) + + +class TestMaxClique: + """Unit tests for the :func:`networkx.algorithms.approximation.max_clique` + function. + + """ + + def test_null_graph(self): + G = nx.null_graph() + assert len(max_clique(G)) == 0 + + def test_complete_graph(self): + graph = nx.complete_graph(30) + # this should return the entire graph + mc = max_clique(graph) + assert 30 == len(mc) + + def test_maximal_by_cardinality(self): + """Tests that the maximal clique is computed according to maximum + cardinality of the sets. + + For more information, see pull request #1531. + + """ + G = nx.complete_graph(5) + G.add_edge(4, 5) + clique = max_clique(G) + assert len(clique) > 1 + + G = nx.lollipop_graph(30, 2) + clique = max_clique(G) + assert len(clique) > 2 + + +def test_large_clique_size(): + G = nx.complete_graph(9) + nx.add_cycle(G, [9, 10, 11]) + G.add_edge(8, 9) + G.add_edge(1, 12) + G.add_node(13) + + assert large_clique_size(G) == 9 + G.remove_node(5) + assert large_clique_size(G) == 8 + G.remove_edge(2, 3) + assert large_clique_size(G) == 7 + + +def test_independent_set(): + # smoke test + G = nx.Graph() + assert len(maximum_independent_set(G)) == 0 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_connectivity.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_connectivity.py new file mode 100644 index 0000000000000000000000000000000000000000..887db20bcaef8dd2641c64e963c789234aecbb20 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_connectivity.py @@ -0,0 +1,199 @@ +import pytest + +import networkx as nx +from networkx.algorithms import approximation as approx + + +def test_global_node_connectivity(): + # Figure 1 chapter on Connectivity + G = nx.Graph() + G.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (2, 3), + (2, 6), + (3, 4), + (3, 6), + (4, 6), + (4, 7), + (5, 7), + (6, 8), + (6, 9), + (7, 8), + (7, 10), + (8, 11), + (9, 10), + (9, 11), + (10, 11), + ] + ) + assert 2 == approx.local_node_connectivity(G, 1, 11) + assert 2 == approx.node_connectivity(G) + assert 2 == approx.node_connectivity(G, 1, 11) + + +def test_white_harary1(): + # Figure 1b white and harary (2001) + # A graph with high adhesion (edge connectivity) and low cohesion + # (node connectivity) + G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4)) + G.remove_node(7) + for i in range(4, 7): + G.add_edge(0, i) + G = nx.disjoint_union(G, nx.complete_graph(4)) + G.remove_node(G.order() - 1) + for i in range(7, 10): + G.add_edge(0, i) + assert 1 == approx.node_connectivity(G) + + +def test_complete_graphs(): + for n in range(5, 25, 5): + G = nx.complete_graph(n) + assert n - 1 == approx.node_connectivity(G) + assert n - 1 == approx.node_connectivity(G, 0, 3) + + +def test_empty_graphs(): + for k in range(5, 25, 5): + G = nx.empty_graph(k) + assert 0 == approx.node_connectivity(G) + assert 0 == approx.node_connectivity(G, 0, 3) + + +def test_petersen(): + G = nx.petersen_graph() + assert 3 == approx.node_connectivity(G) + assert 3 == approx.node_connectivity(G, 0, 5) + + +# Approximation fails with tutte graph +# def test_tutte(): +# G = nx.tutte_graph() +# assert_equal(3, approx.node_connectivity(G)) + + +def test_dodecahedral(): + G = nx.dodecahedral_graph() + assert 3 == approx.node_connectivity(G) + assert 3 == approx.node_connectivity(G, 0, 5) + + +def test_octahedral(): + G = nx.octahedral_graph() + assert 4 == approx.node_connectivity(G) + assert 4 == approx.node_connectivity(G, 0, 5) + + +# Approximation can fail with icosahedral graph depending +# on iteration order. +# def test_icosahedral(): +# G=nx.icosahedral_graph() +# assert_equal(5, approx.node_connectivity(G)) +# assert_equal(5, approx.node_connectivity(G, 0, 5)) + + +def test_only_source(): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXError, approx.node_connectivity, G, s=0) + + +def test_only_target(): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXError, approx.node_connectivity, G, t=0) + + +def test_missing_source(): + G = nx.path_graph(4) + pytest.raises(nx.NetworkXError, approx.node_connectivity, G, 10, 1) + + +def test_missing_target(): + G = nx.path_graph(4) + pytest.raises(nx.NetworkXError, approx.node_connectivity, G, 1, 10) + + +def test_source_equals_target(): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXError, approx.local_node_connectivity, G, 0, 0) + + +def test_directed_node_connectivity(): + G = nx.cycle_graph(10, create_using=nx.DiGraph()) # only one direction + D = nx.cycle_graph(10).to_directed() # 2 reciprocal edges + assert 1 == approx.node_connectivity(G) + assert 1 == approx.node_connectivity(G, 1, 4) + assert 2 == approx.node_connectivity(D) + assert 2 == approx.node_connectivity(D, 1, 4) + + +class TestAllPairsNodeConnectivityApprox: + @classmethod + def setup_class(cls): + cls.path = nx.path_graph(7) + cls.directed_path = nx.path_graph(7, create_using=nx.DiGraph()) + cls.cycle = nx.cycle_graph(7) + cls.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph()) + cls.gnp = nx.gnp_random_graph(30, 0.1) + cls.directed_gnp = nx.gnp_random_graph(30, 0.1, directed=True) + cls.K20 = nx.complete_graph(20) + cls.K10 = nx.complete_graph(10) + cls.K5 = nx.complete_graph(5) + cls.G_list = [ + cls.path, + cls.directed_path, + cls.cycle, + cls.directed_cycle, + cls.gnp, + cls.directed_gnp, + cls.K10, + cls.K5, + cls.K20, + ] + + def test_cycles(self): + K_undir = approx.all_pairs_node_connectivity(self.cycle) + for source in K_undir: + for target, k in K_undir[source].items(): + assert k == 2 + K_dir = approx.all_pairs_node_connectivity(self.directed_cycle) + for source in K_dir: + for target, k in K_dir[source].items(): + assert k == 1 + + def test_complete(self): + for G in [self.K10, self.K5, self.K20]: + K = approx.all_pairs_node_connectivity(G) + for source in K: + for target, k in K[source].items(): + assert k == len(G) - 1 + + def test_paths(self): + K_undir = approx.all_pairs_node_connectivity(self.path) + for source in K_undir: + for target, k in K_undir[source].items(): + assert k == 1 + K_dir = approx.all_pairs_node_connectivity(self.directed_path) + for source in K_dir: + for target, k in K_dir[source].items(): + if source < target: + assert k == 1 + else: + assert k == 0 + + def test_cutoff(self): + for G in [self.K10, self.K5, self.K20]: + for mp in [2, 3, 4]: + paths = approx.all_pairs_node_connectivity(G, cutoff=mp) + for source in paths: + for target, K in paths[source].items(): + assert K == mp + + def test_all_pairs_connectivity_nbunch(self): + G = nx.complete_graph(5) + nbunch = [0, 2, 3] + C = approx.all_pairs_node_connectivity(G, nbunch=nbunch) + assert len(C) == len(nbunch) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_distance_measures.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_distance_measures.py new file mode 100644 index 0000000000000000000000000000000000000000..81251503c5d55a6a2d50071414ecc6e1e8cc8a67 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_distance_measures.py @@ -0,0 +1,60 @@ +"""Unit tests for the :mod:`networkx.algorithms.approximation.distance_measures` module. +""" + +import pytest + +import networkx as nx +from networkx.algorithms.approximation import diameter + + +class TestDiameter: + """Unit tests for the approximate diameter function + :func:`~networkx.algorithms.approximation.distance_measures.diameter`. + """ + + def test_null_graph(self): + """Test empty graph.""" + G = nx.null_graph() + with pytest.raises( + nx.NetworkXError, match="Expected non-empty NetworkX graph!" + ): + diameter(G) + + def test_undirected_non_connected(self): + """Test an undirected disconnected graph.""" + graph = nx.path_graph(10) + graph.remove_edge(3, 4) + with pytest.raises(nx.NetworkXError, match="Graph not connected."): + diameter(graph) + + def test_directed_non_strongly_connected(self): + """Test a directed non strongly connected graph.""" + graph = nx.path_graph(10, create_using=nx.DiGraph()) + with pytest.raises(nx.NetworkXError, match="DiGraph not strongly connected."): + diameter(graph) + + def test_complete_undirected_graph(self): + """Test a complete undirected graph.""" + graph = nx.complete_graph(10) + assert diameter(graph) == 1 + + def test_complete_directed_graph(self): + """Test a complete directed graph.""" + graph = nx.complete_graph(10, create_using=nx.DiGraph()) + assert diameter(graph) == 1 + + def test_undirected_path_graph(self): + """Test an undirected path graph with 10 nodes.""" + graph = nx.path_graph(10) + assert diameter(graph) == 9 + + def test_directed_path_graph(self): + """Test a directed path graph with 10 nodes.""" + graph = nx.path_graph(10).to_directed() + assert diameter(graph) == 9 + + def test_single_node(self): + """Test a graph which contains just a node.""" + graph = nx.Graph() + graph.add_node(1) + assert diameter(graph) == 0 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_dominating_set.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_dominating_set.py new file mode 100644 index 0000000000000000000000000000000000000000..6b90d85ecf73bb56370fd92fdec25e3bbbb91ce3 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_dominating_set.py @@ -0,0 +1,78 @@ +import pytest + +import networkx as nx +from networkx.algorithms.approximation import ( + min_edge_dominating_set, + min_weighted_dominating_set, +) + + +class TestMinWeightDominatingSet: + def test_min_weighted_dominating_set(self): + graph = nx.Graph() + graph.add_edge(1, 2) + graph.add_edge(1, 5) + graph.add_edge(2, 3) + graph.add_edge(2, 5) + graph.add_edge(3, 4) + graph.add_edge(3, 6) + graph.add_edge(5, 6) + + vertices = {1, 2, 3, 4, 5, 6} + # due to ties, this might be hard to test tight bounds + dom_set = min_weighted_dominating_set(graph) + for vertex in vertices - dom_set: + neighbors = set(graph.neighbors(vertex)) + assert len(neighbors & dom_set) > 0, "Non dominating set found!" + + def test_star_graph(self): + """Tests that an approximate dominating set for the star graph, + even when the center node does not have the smallest integer + label, gives just the center node. + + For more information, see #1527. + + """ + # Create a star graph in which the center node has the highest + # label instead of the lowest. + G = nx.star_graph(10) + G = nx.relabel_nodes(G, {0: 9, 9: 0}) + assert min_weighted_dominating_set(G) == {9} + + def test_null_graph(self): + """Tests that the unique dominating set for the null graph is an empty set""" + G = nx.Graph() + assert min_weighted_dominating_set(G) == set() + + def test_min_edge_dominating_set(self): + graph = nx.path_graph(5) + dom_set = min_edge_dominating_set(graph) + + # this is a crappy way to test, but good enough for now. + for edge in graph.edges(): + if edge in dom_set: + continue + else: + u, v = edge + found = False + for dom_edge in dom_set: + found |= u == dom_edge[0] or u == dom_edge[1] + assert found, "Non adjacent edge found!" + + graph = nx.complete_graph(10) + dom_set = min_edge_dominating_set(graph) + + # this is a crappy way to test, but good enough for now. + for edge in graph.edges(): + if edge in dom_set: + continue + else: + u, v = edge + found = False + for dom_edge in dom_set: + found |= u == dom_edge[0] or u == dom_edge[1] + assert found, "Non adjacent edge found!" + + graph = nx.Graph() # empty Networkx graph + with pytest.raises(ValueError, match="Expected non-empty NetworkX graph!"): + min_edge_dominating_set(graph) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_kcomponents.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_kcomponents.py new file mode 100644 index 0000000000000000000000000000000000000000..65ba802171a6b43a5157f12010c8164e5e867eb8 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_kcomponents.py @@ -0,0 +1,303 @@ +# Test for approximation to k-components algorithm +import pytest + +import networkx as nx +from networkx.algorithms.approximation import k_components +from networkx.algorithms.approximation.kcomponents import _AntiGraph, _same + + +def build_k_number_dict(k_components): + k_num = {} + for k, comps in sorted(k_components.items()): + for comp in comps: + for node in comp: + k_num[node] = k + return k_num + + +## +# Some nice synthetic graphs +## + + +def graph_example_1(): + G = nx.convert_node_labels_to_integers( + nx.grid_graph([5, 5]), label_attribute="labels" + ) + rlabels = nx.get_node_attributes(G, "labels") + labels = {v: k for k, v in rlabels.items()} + + for nodes in [ + (labels[(0, 0)], labels[(1, 0)]), + (labels[(0, 4)], labels[(1, 4)]), + (labels[(3, 0)], labels[(4, 0)]), + (labels[(3, 4)], labels[(4, 4)]), + ]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing a node + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + G.add_edge(new_node + 16, new_node + 5) + return G + + +def torrents_and_ferraro_graph(): + G = nx.convert_node_labels_to_integers( + nx.grid_graph([5, 5]), label_attribute="labels" + ) + rlabels = nx.get_node_attributes(G, "labels") + labels = {v: k for k, v in rlabels.items()} + + for nodes in [(labels[(0, 4)], labels[(1, 4)]), (labels[(3, 4)], labels[(4, 4)])]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing a node + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + # Commenting this makes the graph not biconnected !! + # This stupid mistake make one reviewer very angry :P + G.add_edge(new_node + 16, new_node + 8) + + for nodes in [(labels[(0, 0)], labels[(1, 0)]), (labels[(3, 0)], labels[(4, 0)])]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing two nodes + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + nbrs2 = G[new_node + 9] + G.remove_node(new_node + 9) + for nbr in nbrs2: + G.add_edge(new_node + 18, nbr) + return G + + +# Helper function + + +def _check_connectivity(G): + result = k_components(G) + for k, components in result.items(): + if k < 3: + continue + for component in components: + C = G.subgraph(component) + K = nx.node_connectivity(C) + assert K >= k + + +def test_torrents_and_ferraro_graph(): + G = torrents_and_ferraro_graph() + _check_connectivity(G) + + +def test_example_1(): + G = graph_example_1() + _check_connectivity(G) + + +def test_karate_0(): + G = nx.karate_club_graph() + _check_connectivity(G) + + +def test_karate_1(): + karate_k_num = { + 0: 4, + 1: 4, + 2: 4, + 3: 4, + 4: 3, + 5: 3, + 6: 3, + 7: 4, + 8: 4, + 9: 2, + 10: 3, + 11: 1, + 12: 2, + 13: 4, + 14: 2, + 15: 2, + 16: 2, + 17: 2, + 18: 2, + 19: 3, + 20: 2, + 21: 2, + 22: 2, + 23: 3, + 24: 3, + 25: 3, + 26: 2, + 27: 3, + 28: 3, + 29: 3, + 30: 4, + 31: 3, + 32: 4, + 33: 4, + } + approx_karate_k_num = karate_k_num.copy() + approx_karate_k_num[24] = 2 + approx_karate_k_num[25] = 2 + G = nx.karate_club_graph() + k_comps = k_components(G) + k_num = build_k_number_dict(k_comps) + assert k_num in (karate_k_num, approx_karate_k_num) + + +def test_example_1_detail_3_and_4(): + G = graph_example_1() + result = k_components(G) + # In this example graph there are 8 3-components, 4 with 15 nodes + # and 4 with 5 nodes. + assert len(result[3]) == 8 + assert len([c for c in result[3] if len(c) == 15]) == 4 + assert len([c for c in result[3] if len(c) == 5]) == 4 + # There are also 8 4-components all with 5 nodes. + assert len(result[4]) == 8 + assert all(len(c) == 5 for c in result[4]) + # Finally check that the k-components detected have actually node + # connectivity >= k. + for k, components in result.items(): + if k < 3: + continue + for component in components: + K = nx.node_connectivity(G.subgraph(component)) + assert K >= k + + +def test_directed(): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.gnp_random_graph(10, 0.4, directed=True) + kc = k_components(G) + + +def test_same(): + equal = {"A": 2, "B": 2, "C": 2} + slightly_different = {"A": 2, "B": 1, "C": 2} + different = {"A": 2, "B": 8, "C": 18} + assert _same(equal) + assert not _same(slightly_different) + assert _same(slightly_different, tol=1) + assert not _same(different) + assert not _same(different, tol=4) + + +class TestAntiGraph: + @classmethod + def setup_class(cls): + cls.Gnp = nx.gnp_random_graph(20, 0.8, seed=42) + cls.Anp = _AntiGraph(nx.complement(cls.Gnp)) + cls.Gd = nx.davis_southern_women_graph() + cls.Ad = _AntiGraph(nx.complement(cls.Gd)) + cls.Gk = nx.karate_club_graph() + cls.Ak = _AntiGraph(nx.complement(cls.Gk)) + cls.GA = [(cls.Gnp, cls.Anp), (cls.Gd, cls.Ad), (cls.Gk, cls.Ak)] + + def test_size(self): + for G, A in self.GA: + n = G.order() + s = len(list(G.edges())) + len(list(A.edges())) + assert s == (n * (n - 1)) / 2 + + def test_degree(self): + for G, A in self.GA: + assert sorted(G.degree()) == sorted(A.degree()) + + def test_core_number(self): + for G, A in self.GA: + assert nx.core_number(G) == nx.core_number(A) + + def test_connected_components(self): + # ccs are same unless isolated nodes or any node has degree=len(G)-1 + # graphs in self.GA avoid this problem + for G, A in self.GA: + gc = [set(c) for c in nx.connected_components(G)] + ac = [set(c) for c in nx.connected_components(A)] + for comp in ac: + assert comp in gc + + def test_adj(self): + for G, A in self.GA: + for n, nbrs in G.adj.items(): + a_adj = sorted((n, sorted(ad)) for n, ad in A.adj.items()) + g_adj = sorted((n, sorted(ad)) for n, ad in G.adj.items()) + assert a_adj == g_adj + + def test_adjacency(self): + for G, A in self.GA: + a_adj = list(A.adjacency()) + for n, nbrs in G.adjacency(): + assert (n, set(nbrs)) in a_adj + + def test_neighbors(self): + for G, A in self.GA: + node = list(G.nodes())[0] + assert set(G.neighbors(node)) == set(A.neighbors(node)) + + def test_node_not_in_graph(self): + for G, A in self.GA: + node = "non_existent_node" + pytest.raises(nx.NetworkXError, A.neighbors, node) + pytest.raises(nx.NetworkXError, G.neighbors, node) + + def test_degree_thingraph(self): + for G, A in self.GA: + node = list(G.nodes())[0] + nodes = list(G.nodes())[1:4] + assert G.degree(node) == A.degree(node) + assert sum(d for n, d in G.degree()) == sum(d for n, d in A.degree()) + # AntiGraph is a ThinGraph, so all the weights are 1 + assert sum(d for n, d in A.degree()) == sum( + d for n, d in A.degree(weight="weight") + ) + assert sum(d for n, d in G.degree(nodes)) == sum( + d for n, d in A.degree(nodes) + ) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_matching.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_matching.py new file mode 100644 index 0000000000000000000000000000000000000000..f50da3d2e07310fc19e1db2bd18fdce23223771c --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_matching.py @@ -0,0 +1,8 @@ +import networkx as nx +import networkx.algorithms.approximation as a + + +def test_min_maximal_matching(): + # smoke test + G = nx.Graph() + assert len(a.min_maximal_matching(G)) == 0 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_maxcut.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_maxcut.py new file mode 100644 index 0000000000000000000000000000000000000000..39291fbf14d5b3d411cdef50e4f367aa67132a1c --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_maxcut.py @@ -0,0 +1,82 @@ +import random + +import networkx as nx +from networkx.algorithms.approximation import maxcut + + +def _is_valid_cut(G, set1, set2): + union = set1.union(set2) + assert union == set(G.nodes) + assert len(set1) + len(set2) == G.number_of_nodes() + + +def _cut_is_locally_optimal(G, cut_size, set1): + # test if cut can be locally improved + for i, node in enumerate(set1): + cut_size_without_node = nx.algorithms.cut_size( + G, set1 - {node}, weight="weight" + ) + assert cut_size_without_node <= cut_size + + +def test_random_partitioning(): + G = nx.complete_graph(5) + _, (set1, set2) = maxcut.randomized_partitioning(G, seed=5) + _is_valid_cut(G, set1, set2) + + +def test_random_partitioning_all_to_one(): + G = nx.complete_graph(5) + _, (set1, set2) = maxcut.randomized_partitioning(G, p=1) + _is_valid_cut(G, set1, set2) + assert len(set1) == G.number_of_nodes() + assert len(set2) == 0 + + +def test_one_exchange_basic(): + G = nx.complete_graph(5) + random.seed(5) + for u, v, w in G.edges(data=True): + w["weight"] = random.randrange(-100, 100, 1) / 10 + + initial_cut = set(random.sample(sorted(G.nodes()), k=5)) + cut_size, (set1, set2) = maxcut.one_exchange( + G, initial_cut, weight="weight", seed=5 + ) + + _is_valid_cut(G, set1, set2) + _cut_is_locally_optimal(G, cut_size, set1) + + +def test_one_exchange_optimal(): + # Greedy one exchange should find the optimal solution for this graph (14) + G = nx.Graph() + G.add_edge(1, 2, weight=3) + G.add_edge(1, 3, weight=3) + G.add_edge(1, 4, weight=3) + G.add_edge(1, 5, weight=3) + G.add_edge(2, 3, weight=5) + + cut_size, (set1, set2) = maxcut.one_exchange(G, weight="weight", seed=5) + + _is_valid_cut(G, set1, set2) + _cut_is_locally_optimal(G, cut_size, set1) + # check global optimality + assert cut_size == 14 + + +def test_negative_weights(): + G = nx.complete_graph(5) + random.seed(5) + for u, v, w in G.edges(data=True): + w["weight"] = -1 * random.random() + + initial_cut = set(random.sample(sorted(G.nodes()), k=5)) + cut_size, (set1, set2) = maxcut.one_exchange(G, initial_cut, weight="weight") + + # make sure it is a valid cut + _is_valid_cut(G, set1, set2) + # check local optimality + _cut_is_locally_optimal(G, cut_size, set1) + # test that all nodes are in the same partition + assert len(set1) == len(G.nodes) or len(set2) == len(G.nodes) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_ramsey.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_ramsey.py new file mode 100644 index 0000000000000000000000000000000000000000..32fe1fb8fa917c557954d9da0d960895a6953a11 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_ramsey.py @@ -0,0 +1,31 @@ +import networkx as nx +import networkx.algorithms.approximation as apxa + + +def test_ramsey(): + # this should only find the complete graph + graph = nx.complete_graph(10) + c, i = apxa.ramsey_R2(graph) + cdens = nx.density(graph.subgraph(c)) + assert cdens == 1.0, "clique not correctly found by ramsey!" + idens = nx.density(graph.subgraph(i)) + assert idens == 0.0, "i-set not correctly found by ramsey!" + + # this trivial graph has no cliques. should just find i-sets + graph = nx.trivial_graph() + c, i = apxa.ramsey_R2(graph) + assert c == {0}, "clique not correctly found by ramsey!" + assert i == {0}, "i-set not correctly found by ramsey!" + + graph = nx.barbell_graph(10, 5, nx.Graph()) + c, i = apxa.ramsey_R2(graph) + cdens = nx.density(graph.subgraph(c)) + assert cdens == 1.0, "clique not correctly found by ramsey!" + idens = nx.density(graph.subgraph(i)) + assert idens == 0.0, "i-set not correctly found by ramsey!" + + # add self-loops and test again + graph.add_edges_from([(n, n) for n in range(0, len(graph), 2)]) + cc, ii = apxa.ramsey_R2(graph) + assert cc == c + assert ii == i diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_steinertree.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_steinertree.py new file mode 100644 index 0000000000000000000000000000000000000000..d7af1a1af4101d4ac702d95a7104cff466ecdb7b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_steinertree.py @@ -0,0 +1,191 @@ +import pytest + +import networkx as nx +from networkx.algorithms.approximation.steinertree import metric_closure, steiner_tree +from networkx.utils import edges_equal + + +class TestSteinerTree: + @classmethod + def setup_class(cls): + G1 = nx.Graph() + G1.add_edge(1, 2, weight=10) + G1.add_edge(2, 3, weight=10) + G1.add_edge(3, 4, weight=10) + G1.add_edge(4, 5, weight=10) + G1.add_edge(5, 6, weight=10) + G1.add_edge(2, 7, weight=1) + G1.add_edge(7, 5, weight=1) + + G2 = nx.Graph() + G2.add_edge(0, 5, weight=6) + G2.add_edge(1, 2, weight=2) + G2.add_edge(1, 5, weight=3) + G2.add_edge(2, 4, weight=4) + G2.add_edge(3, 5, weight=5) + G2.add_edge(4, 5, weight=1) + + G3 = nx.Graph() + G3.add_edge(1, 2, weight=8) + G3.add_edge(1, 9, weight=3) + G3.add_edge(1, 8, weight=6) + G3.add_edge(1, 10, weight=2) + G3.add_edge(1, 14, weight=3) + G3.add_edge(2, 3, weight=6) + G3.add_edge(3, 4, weight=3) + G3.add_edge(3, 10, weight=2) + G3.add_edge(3, 11, weight=1) + G3.add_edge(4, 5, weight=1) + G3.add_edge(4, 11, weight=1) + G3.add_edge(5, 6, weight=4) + G3.add_edge(5, 11, weight=2) + G3.add_edge(5, 12, weight=1) + G3.add_edge(5, 13, weight=3) + G3.add_edge(6, 7, weight=2) + G3.add_edge(6, 12, weight=3) + G3.add_edge(6, 13, weight=1) + G3.add_edge(7, 8, weight=3) + G3.add_edge(7, 9, weight=3) + G3.add_edge(7, 11, weight=5) + G3.add_edge(7, 13, weight=2) + G3.add_edge(7, 14, weight=4) + G3.add_edge(8, 9, weight=2) + G3.add_edge(9, 14, weight=1) + G3.add_edge(10, 11, weight=2) + G3.add_edge(10, 14, weight=1) + G3.add_edge(11, 12, weight=1) + G3.add_edge(11, 14, weight=7) + G3.add_edge(12, 14, weight=3) + G3.add_edge(12, 15, weight=1) + G3.add_edge(13, 14, weight=4) + G3.add_edge(13, 15, weight=1) + G3.add_edge(14, 15, weight=2) + + cls.G1 = G1 + cls.G2 = G2 + cls.G3 = G3 + cls.G1_term_nodes = [1, 2, 3, 4, 5] + cls.G2_term_nodes = [0, 2, 3] + cls.G3_term_nodes = [1, 3, 5, 6, 8, 10, 11, 12, 13] + + cls.methods = ["kou", "mehlhorn"] + + def test_connected_metric_closure(self): + G = self.G1.copy() + G.add_node(100) + pytest.raises(nx.NetworkXError, metric_closure, G) + + def test_metric_closure(self): + M = metric_closure(self.G1) + mc = [ + (1, 2, {"distance": 10, "path": [1, 2]}), + (1, 3, {"distance": 20, "path": [1, 2, 3]}), + (1, 4, {"distance": 22, "path": [1, 2, 7, 5, 4]}), + (1, 5, {"distance": 12, "path": [1, 2, 7, 5]}), + (1, 6, {"distance": 22, "path": [1, 2, 7, 5, 6]}), + (1, 7, {"distance": 11, "path": [1, 2, 7]}), + (2, 3, {"distance": 10, "path": [2, 3]}), + (2, 4, {"distance": 12, "path": [2, 7, 5, 4]}), + (2, 5, {"distance": 2, "path": [2, 7, 5]}), + (2, 6, {"distance": 12, "path": [2, 7, 5, 6]}), + (2, 7, {"distance": 1, "path": [2, 7]}), + (3, 4, {"distance": 10, "path": [3, 4]}), + (3, 5, {"distance": 12, "path": [3, 2, 7, 5]}), + (3, 6, {"distance": 22, "path": [3, 2, 7, 5, 6]}), + (3, 7, {"distance": 11, "path": [3, 2, 7]}), + (4, 5, {"distance": 10, "path": [4, 5]}), + (4, 6, {"distance": 20, "path": [4, 5, 6]}), + (4, 7, {"distance": 11, "path": [4, 5, 7]}), + (5, 6, {"distance": 10, "path": [5, 6]}), + (5, 7, {"distance": 1, "path": [5, 7]}), + (6, 7, {"distance": 11, "path": [6, 5, 7]}), + ] + assert edges_equal(list(M.edges(data=True)), mc) + + def test_steiner_tree(self): + valid_steiner_trees = [ + [ + [ + (1, 2, {"weight": 10}), + (2, 3, {"weight": 10}), + (2, 7, {"weight": 1}), + (3, 4, {"weight": 10}), + (5, 7, {"weight": 1}), + ], + [ + (1, 2, {"weight": 10}), + (2, 7, {"weight": 1}), + (3, 4, {"weight": 10}), + (4, 5, {"weight": 10}), + (5, 7, {"weight": 1}), + ], + [ + (1, 2, {"weight": 10}), + (2, 3, {"weight": 10}), + (2, 7, {"weight": 1}), + (4, 5, {"weight": 10}), + (5, 7, {"weight": 1}), + ], + ], + [ + [ + (0, 5, {"weight": 6}), + (1, 2, {"weight": 2}), + (1, 5, {"weight": 3}), + (3, 5, {"weight": 5}), + ], + [ + (0, 5, {"weight": 6}), + (4, 2, {"weight": 4}), + (4, 5, {"weight": 1}), + (3, 5, {"weight": 5}), + ], + ], + [ + [ + (1, 10, {"weight": 2}), + (3, 10, {"weight": 2}), + (3, 11, {"weight": 1}), + (5, 12, {"weight": 1}), + (6, 13, {"weight": 1}), + (8, 9, {"weight": 2}), + (9, 14, {"weight": 1}), + (10, 14, {"weight": 1}), + (11, 12, {"weight": 1}), + (12, 15, {"weight": 1}), + (13, 15, {"weight": 1}), + ] + ], + ] + for method in self.methods: + for G, term_nodes, valid_trees in zip( + [self.G1, self.G2, self.G3], + [self.G1_term_nodes, self.G2_term_nodes, self.G3_term_nodes], + valid_steiner_trees, + ): + S = steiner_tree(G, term_nodes, method=method) + assert any( + edges_equal(list(S.edges(data=True)), valid_tree) + for valid_tree in valid_trees + ) + + def test_multigraph_steiner_tree(self): + G = nx.MultiGraph() + G.add_edges_from( + [ + (1, 2, 0, {"weight": 1}), + (2, 3, 0, {"weight": 999}), + (2, 3, 1, {"weight": 1}), + (3, 4, 0, {"weight": 1}), + (3, 5, 0, {"weight": 1}), + ] + ) + terminal_nodes = [2, 4, 5] + expected_edges = [ + (2, 3, 1, {"weight": 1}), # edge with key 1 has lower weight + (3, 4, 0, {"weight": 1}), + (3, 5, 0, {"weight": 1}), + ] + for method in self.methods: + S = steiner_tree(G, terminal_nodes, method=method) + assert edges_equal(S.edges(data=True, keys=True), expected_edges) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_traveling_salesman.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_traveling_salesman.py new file mode 100644 index 0000000000000000000000000000000000000000..ccb553e1cc7129837996dfe0d5e8fb2435e0b4fd --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_traveling_salesman.py @@ -0,0 +1,963 @@ +"""Unit tests for the traveling_salesman module.""" +import random + +import pytest + +import networkx as nx +import networkx.algorithms.approximation as nx_app + +pairwise = nx.utils.pairwise + + +def test_christofides_hamiltonian(): + random.seed(42) + G = nx.complete_graph(20) + for u, v in G.edges(): + G[u][v]["weight"] = random.randint(0, 10) + + H = nx.Graph() + H.add_edges_from(pairwise(nx_app.christofides(G))) + H.remove_edges_from(nx.find_cycle(H)) + assert len(H.edges) == 0 + + tree = nx.minimum_spanning_tree(G, weight="weight") + H = nx.Graph() + H.add_edges_from(pairwise(nx_app.christofides(G, tree))) + H.remove_edges_from(nx.find_cycle(H)) + assert len(H.edges) == 0 + + +def test_christofides_incomplete_graph(): + G = nx.complete_graph(10) + G.remove_edge(0, 1) + pytest.raises(nx.NetworkXError, nx_app.christofides, G) + + +def test_christofides_ignore_selfloops(): + G = nx.complete_graph(5) + G.add_edge(3, 3) + cycle = nx_app.christofides(G) + assert len(cycle) - 1 == len(G) == len(set(cycle)) + + +# set up graphs for other tests +class TestBase: + @classmethod + def setup_class(cls): + cls.DG = nx.DiGraph() + cls.DG.add_weighted_edges_from( + { + ("A", "B", 3), + ("A", "C", 17), + ("A", "D", 14), + ("B", "A", 3), + ("B", "C", 12), + ("B", "D", 16), + ("C", "A", 13), + ("C", "B", 12), + ("C", "D", 4), + ("D", "A", 14), + ("D", "B", 15), + ("D", "C", 2), + } + ) + cls.DG_cycle = ["D", "C", "B", "A", "D"] + cls.DG_cost = 31.0 + + cls.DG2 = nx.DiGraph() + cls.DG2.add_weighted_edges_from( + { + ("A", "B", 3), + ("A", "C", 17), + ("A", "D", 14), + ("B", "A", 30), + ("B", "C", 2), + ("B", "D", 16), + ("C", "A", 33), + ("C", "B", 32), + ("C", "D", 34), + ("D", "A", 14), + ("D", "B", 15), + ("D", "C", 2), + } + ) + cls.DG2_cycle = ["D", "A", "B", "C", "D"] + cls.DG2_cost = 53.0 + + cls.unweightedUG = nx.complete_graph(5, nx.Graph()) + cls.unweightedDG = nx.complete_graph(5, nx.DiGraph()) + + cls.incompleteUG = nx.Graph() + cls.incompleteUG.add_weighted_edges_from({(0, 1, 1), (1, 2, 3)}) + cls.incompleteDG = nx.DiGraph() + cls.incompleteDG.add_weighted_edges_from({(0, 1, 1), (1, 2, 3)}) + + cls.UG = nx.Graph() + cls.UG.add_weighted_edges_from( + { + ("A", "B", 3), + ("A", "C", 17), + ("A", "D", 14), + ("B", "C", 12), + ("B", "D", 16), + ("C", "D", 4), + } + ) + cls.UG_cycle = ["D", "C", "B", "A", "D"] + cls.UG_cost = 33.0 + + cls.UG2 = nx.Graph() + cls.UG2.add_weighted_edges_from( + { + ("A", "B", 1), + ("A", "C", 15), + ("A", "D", 5), + ("B", "C", 16), + ("B", "D", 8), + ("C", "D", 3), + } + ) + cls.UG2_cycle = ["D", "C", "B", "A", "D"] + cls.UG2_cost = 25.0 + + +def validate_solution(soln, cost, exp_soln, exp_cost): + assert soln == exp_soln + assert cost == exp_cost + + +def validate_symmetric_solution(soln, cost, exp_soln, exp_cost): + assert soln == exp_soln or soln == exp_soln[::-1] + assert cost == exp_cost + + +class TestGreedyTSP(TestBase): + def test_greedy(self): + cycle = nx_app.greedy_tsp(self.DG, source="D") + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, ["D", "C", "B", "A", "D"], 31.0) + + cycle = nx_app.greedy_tsp(self.DG2, source="D") + cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, ["D", "C", "B", "A", "D"], 78.0) + + cycle = nx_app.greedy_tsp(self.UG, source="D") + cost = sum(self.UG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, ["D", "C", "B", "A", "D"], 33.0) + + cycle = nx_app.greedy_tsp(self.UG2, source="D") + cost = sum(self.UG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, ["D", "C", "A", "B", "D"], 27.0) + + def test_not_complete_graph(self): + pytest.raises(nx.NetworkXError, nx_app.greedy_tsp, self.incompleteUG) + pytest.raises(nx.NetworkXError, nx_app.greedy_tsp, self.incompleteDG) + + def test_not_weighted_graph(self): + nx_app.greedy_tsp(self.unweightedUG) + nx_app.greedy_tsp(self.unweightedDG) + + def test_two_nodes(self): + G = nx.Graph() + G.add_weighted_edges_from({(1, 2, 1)}) + cycle = nx_app.greedy_tsp(G) + cost = sum(G[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, [1, 2, 1], 2) + + def test_ignore_selfloops(self): + G = nx.complete_graph(5) + G.add_edge(3, 3) + cycle = nx_app.greedy_tsp(G) + assert len(cycle) - 1 == len(G) == len(set(cycle)) + + +class TestSimulatedAnnealingTSP(TestBase): + tsp = staticmethod(nx_app.simulated_annealing_tsp) + + def test_simulated_annealing_directed(self): + cycle = self.tsp(self.DG, "greedy", source="D", seed=42) + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.DG_cycle, self.DG_cost) + + initial_sol = ["D", "B", "A", "C", "D"] + cycle = self.tsp(self.DG, initial_sol, source="D", seed=42) + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.DG_cycle, self.DG_cost) + + initial_sol = ["D", "A", "C", "B", "D"] + cycle = self.tsp(self.DG, initial_sol, move="1-0", source="D", seed=42) + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.DG_cycle, self.DG_cost) + + cycle = self.tsp(self.DG2, "greedy", source="D", seed=42) + cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.DG2_cycle, self.DG2_cost) + + cycle = self.tsp(self.DG2, "greedy", move="1-0", source="D", seed=42) + cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.DG2_cycle, self.DG2_cost) + + def test_simulated_annealing_undirected(self): + cycle = self.tsp(self.UG, "greedy", source="D", seed=42) + cost = sum(self.UG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, self.UG_cycle, self.UG_cost) + + cycle = self.tsp(self.UG2, "greedy", source="D", seed=42) + cost = sum(self.UG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_symmetric_solution(cycle, cost, self.UG2_cycle, self.UG2_cost) + + cycle = self.tsp(self.UG2, "greedy", move="1-0", source="D", seed=42) + cost = sum(self.UG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_symmetric_solution(cycle, cost, self.UG2_cycle, self.UG2_cost) + + def test_error_on_input_order_mistake(self): + # see issue #4846 https://github.com/networkx/networkx/issues/4846 + pytest.raises(TypeError, self.tsp, self.UG, weight="weight") + pytest.raises(nx.NetworkXError, self.tsp, self.UG, "weight") + + def test_not_complete_graph(self): + pytest.raises(nx.NetworkXError, self.tsp, self.incompleteUG, "greedy", source=0) + pytest.raises(nx.NetworkXError, self.tsp, self.incompleteDG, "greedy", source=0) + + def test_ignore_selfloops(self): + G = nx.complete_graph(5) + G.add_edge(3, 3) + cycle = self.tsp(G, "greedy") + assert len(cycle) - 1 == len(G) == len(set(cycle)) + + def test_not_weighted_graph(self): + self.tsp(self.unweightedUG, "greedy") + self.tsp(self.unweightedDG, "greedy") + + def test_two_nodes(self): + G = nx.Graph() + G.add_weighted_edges_from({(1, 2, 1)}) + + cycle = self.tsp(G, "greedy", source=1, seed=42) + cost = sum(G[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, [1, 2, 1], 2) + + cycle = self.tsp(G, [1, 2, 1], source=1, seed=42) + cost = sum(G[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + validate_solution(cycle, cost, [1, 2, 1], 2) + + def test_failure_of_costs_too_high_when_iterations_low(self): + # Simulated Annealing Version: + # set number of moves low and alpha high + cycle = self.tsp( + self.DG2, "greedy", source="D", move="1-0", alpha=1, N_inner=1, seed=42 + ) + cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + print(cycle, cost) + assert cost > self.DG2_cost + + # Try with an incorrect initial guess + initial_sol = ["D", "A", "B", "C", "D"] + cycle = self.tsp( + self.DG, + initial_sol, + source="D", + move="1-0", + alpha=0.1, + N_inner=1, + max_iterations=1, + seed=42, + ) + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + print(cycle, cost) + assert cost > self.DG_cost + + +class TestThresholdAcceptingTSP(TestSimulatedAnnealingTSP): + tsp = staticmethod(nx_app.threshold_accepting_tsp) + + def test_failure_of_costs_too_high_when_iterations_low(self): + # Threshold Version: + # set number of moves low and number of iterations low + cycle = self.tsp( + self.DG2, + "greedy", + source="D", + move="1-0", + N_inner=1, + max_iterations=1, + seed=4, + ) + cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + assert cost > self.DG2_cost + + # set threshold too low + initial_sol = ["D", "A", "B", "C", "D"] + cycle = self.tsp( + self.DG, initial_sol, source="D", move="1-0", threshold=-3, seed=42 + ) + cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle)) + assert cost > self.DG_cost + + +# Tests for function traveling_salesman_problem +def test_TSP_method(): + G = nx.cycle_graph(9) + G[4][5]["weight"] = 10 + + def my_tsp_method(G, weight): + return nx_app.simulated_annealing_tsp(G, "greedy", weight, source=4, seed=1) + + path = nx_app.traveling_salesman_problem(G, method=my_tsp_method, cycle=False) + print(path) + assert path == [4, 3, 2, 1, 0, 8, 7, 6, 5] + + +def test_TSP_unweighted(): + G = nx.cycle_graph(9) + path = nx_app.traveling_salesman_problem(G, nodes=[3, 6], cycle=False) + assert path in ([3, 4, 5, 6], [6, 5, 4, 3]) + + cycle = nx_app.traveling_salesman_problem(G, nodes=[3, 6]) + assert cycle in ([3, 4, 5, 6, 5, 4, 3], [6, 5, 4, 3, 4, 5, 6]) + + +def test_TSP_weighted(): + G = nx.cycle_graph(9) + G[0][1]["weight"] = 2 + G[1][2]["weight"] = 2 + G[2][3]["weight"] = 2 + G[3][4]["weight"] = 4 + G[4][5]["weight"] = 5 + G[5][6]["weight"] = 4 + G[6][7]["weight"] = 2 + G[7][8]["weight"] = 2 + G[8][0]["weight"] = 2 + tsp = nx_app.traveling_salesman_problem + + # path between 3 and 6 + expected_paths = ([3, 2, 1, 0, 8, 7, 6], [6, 7, 8, 0, 1, 2, 3]) + # cycle between 3 and 6 + expected_cycles = ( + [3, 2, 1, 0, 8, 7, 6, 7, 8, 0, 1, 2, 3], + [6, 7, 8, 0, 1, 2, 3, 2, 1, 0, 8, 7, 6], + ) + # path through all nodes + expected_tourpaths = ([5, 6, 7, 8, 0, 1, 2, 3, 4], [4, 3, 2, 1, 0, 8, 7, 6, 5]) + + # Check default method + cycle = tsp(G, nodes=[3, 6], weight="weight") + assert cycle in expected_cycles + + path = tsp(G, nodes=[3, 6], weight="weight", cycle=False) + assert path in expected_paths + + tourpath = tsp(G, weight="weight", cycle=False) + assert tourpath in expected_tourpaths + + # Check all methods + methods = [ + nx_app.christofides, + nx_app.greedy_tsp, + lambda G, wt: nx_app.simulated_annealing_tsp(G, "greedy", weight=wt), + lambda G, wt: nx_app.threshold_accepting_tsp(G, "greedy", weight=wt), + ] + for method in methods: + cycle = tsp(G, nodes=[3, 6], weight="weight", method=method) + assert cycle in expected_cycles + + path = tsp(G, nodes=[3, 6], weight="weight", method=method, cycle=False) + assert path in expected_paths + + tourpath = tsp(G, weight="weight", method=method, cycle=False) + assert tourpath in expected_tourpaths + + +def test_TSP_incomplete_graph_short_path(): + G = nx.cycle_graph(9) + G.add_edges_from([(4, 9), (9, 10), (10, 11), (11, 0)]) + G[4][5]["weight"] = 5 + + cycle = nx_app.traveling_salesman_problem(G) + print(cycle) + assert len(cycle) == 17 and len(set(cycle)) == 12 + + # make sure that cutting one edge out of complete graph formulation + # cuts out many edges out of the path of the TSP + path = nx_app.traveling_salesman_problem(G, cycle=False) + print(path) + assert len(path) == 13 and len(set(path)) == 12 + + +def test_held_karp_ascent(): + """ + Test the Held-Karp relaxation with the ascent method + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + # Adjacency matrix from page 1153 of the 1970 Held and Karp paper + # which have been edited to be directional, but also symmetric + G_array = np.array( + [ + [0, 97, 60, 73, 17, 52], + [97, 0, 41, 52, 90, 30], + [60, 41, 0, 21, 35, 41], + [73, 52, 21, 0, 95, 46], + [17, 90, 35, 95, 0, 81], + [52, 30, 41, 46, 81, 0], + ] + ) + + solution_edges = [(1, 3), (2, 4), (3, 2), (4, 0), (5, 1), (0, 5)] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + # Check that the optimal weights are the same + assert round(opt_hk, 2) == 207.00 + # Check that the z_stars are the same + solution = nx.DiGraph() + solution.add_edges_from(solution_edges) + assert nx.utils.edges_equal(z_star.edges, solution.edges) + + +def test_ascent_fractional_solution(): + """ + Test the ascent method using a modified version of Figure 2 on page 1140 + in 'The Traveling Salesman Problem and Minimum Spanning Trees' by Held and + Karp + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + # This version of Figure 2 has all of the edge weights multiplied by 100 + # and is a complete directed graph with infinite edge weights for the + # edges not listed in the original graph + G_array = np.array( + [ + [0, 100, 100, 100000, 100000, 1], + [100, 0, 100, 100000, 1, 100000], + [100, 100, 0, 1, 100000, 100000], + [100000, 100000, 1, 0, 100, 100], + [100000, 1, 100000, 100, 0, 100], + [1, 100000, 100000, 100, 100, 0], + ] + ) + + solution_z_star = { + (0, 1): 5 / 12, + (0, 2): 5 / 12, + (0, 5): 5 / 6, + (1, 0): 5 / 12, + (1, 2): 1 / 3, + (1, 4): 5 / 6, + (2, 0): 5 / 12, + (2, 1): 1 / 3, + (2, 3): 5 / 6, + (3, 2): 5 / 6, + (3, 4): 1 / 3, + (3, 5): 1 / 2, + (4, 1): 5 / 6, + (4, 3): 1 / 3, + (4, 5): 1 / 2, + (5, 0): 5 / 6, + (5, 3): 1 / 2, + (5, 4): 1 / 2, + } + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + # Check that the optimal weights are the same + assert round(opt_hk, 2) == 303.00 + # Check that the z_stars are the same + assert {key: round(z_star[key], 4) for key in z_star} == { + key: round(solution_z_star[key], 4) for key in solution_z_star + } + + +def test_ascent_method_asymmetric(): + """ + Tests the ascent method using a truly asymmetric graph for which the + solution has been brute forced + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + [0, 26, 63, 59, 69, 31, 41], + [62, 0, 91, 53, 75, 87, 47], + [47, 82, 0, 90, 15, 9, 18], + [68, 19, 5, 0, 58, 34, 93], + [11, 58, 53, 55, 0, 61, 79], + [88, 75, 13, 76, 98, 0, 40], + [41, 61, 55, 88, 46, 45, 0], + ] + ) + + solution_edges = [(0, 1), (1, 3), (3, 2), (2, 5), (5, 6), (4, 0), (6, 4)] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + # Check that the optimal weights are the same + assert round(opt_hk, 2) == 190.00 + # Check that the z_stars match. + solution = nx.DiGraph() + solution.add_edges_from(solution_edges) + assert nx.utils.edges_equal(z_star.edges, solution.edges) + + +def test_ascent_method_asymmetric_2(): + """ + Tests the ascent method using a truly asymmetric graph for which the + solution has been brute forced + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + [0, 45, 39, 92, 29, 31], + [72, 0, 4, 12, 21, 60], + [81, 6, 0, 98, 70, 53], + [49, 71, 59, 0, 98, 94], + [74, 95, 24, 43, 0, 47], + [56, 43, 3, 65, 22, 0], + ] + ) + + solution_edges = [(0, 5), (5, 4), (1, 3), (3, 0), (2, 1), (4, 2)] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + # Check that the optimal weights are the same + assert round(opt_hk, 2) == 144.00 + # Check that the z_stars match. + solution = nx.DiGraph() + solution.add_edges_from(solution_edges) + assert nx.utils.edges_equal(z_star.edges, solution.edges) + + +def test_held_karp_ascent_asymmetric_3(): + """ + Tests the ascent method using a truly asymmetric graph with a fractional + solution for which the solution has been brute forced. + + In this graph their are two different optimal, integral solutions (which + are also the overall atsp solutions) to the Held Karp relaxation. However, + this particular graph has two different tours of optimal value and the + possible solutions in the held_karp_ascent function are not stored in an + ordered data structure. + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + [0, 1, 5, 2, 7, 4], + [7, 0, 7, 7, 1, 4], + [4, 7, 0, 9, 2, 1], + [7, 2, 7, 0, 4, 4], + [5, 5, 4, 4, 0, 3], + [3, 9, 1, 3, 4, 0], + ] + ) + + solution1_edges = [(0, 3), (1, 4), (2, 5), (3, 1), (4, 2), (5, 0)] + + solution2_edges = [(0, 3), (3, 1), (1, 4), (4, 5), (2, 0), (5, 2)] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + assert round(opt_hk, 2) == 13.00 + # Check that the z_stars are the same + solution1 = nx.DiGraph() + solution1.add_edges_from(solution1_edges) + solution2 = nx.DiGraph() + solution2.add_edges_from(solution2_edges) + assert nx.utils.edges_equal(z_star.edges, solution1.edges) or nx.utils.edges_equal( + z_star.edges, solution2.edges + ) + + +def test_held_karp_ascent_fractional_asymmetric(): + """ + Tests the ascent method using a truly asymmetric graph with a fractional + solution for which the solution has been brute forced + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + [0, 100, 150, 100000, 100000, 1], + [150, 0, 100, 100000, 1, 100000], + [100, 150, 0, 1, 100000, 100000], + [100000, 100000, 1, 0, 150, 100], + [100000, 2, 100000, 100, 0, 150], + [2, 100000, 100000, 150, 100, 0], + ] + ) + + solution_z_star = { + (0, 1): 5 / 12, + (0, 2): 5 / 12, + (0, 5): 5 / 6, + (1, 0): 5 / 12, + (1, 2): 5 / 12, + (1, 4): 5 / 6, + (2, 0): 5 / 12, + (2, 1): 5 / 12, + (2, 3): 5 / 6, + (3, 2): 5 / 6, + (3, 4): 5 / 12, + (3, 5): 5 / 12, + (4, 1): 5 / 6, + (4, 3): 5 / 12, + (4, 5): 5 / 12, + (5, 0): 5 / 6, + (5, 3): 5 / 12, + (5, 4): 5 / 12, + } + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + opt_hk, z_star = tsp.held_karp_ascent(G) + + # Check that the optimal weights are the same + assert round(opt_hk, 2) == 304.00 + # Check that the z_stars are the same + assert {key: round(z_star[key], 4) for key in z_star} == { + key: round(solution_z_star[key], 4) for key in solution_z_star + } + + +def test_spanning_tree_distribution(): + """ + Test that we can create an exponential distribution of spanning trees such + that the probability of each tree is proportional to the product of edge + weights. + + Results of this test have been confirmed with hypothesis testing from the + created distribution. + + This test uses the symmetric, fractional Held Karp solution. + """ + import networkx.algorithms.approximation.traveling_salesman as tsp + + pytest.importorskip("numpy") + pytest.importorskip("scipy") + + z_star = { + (0, 1): 5 / 12, + (0, 2): 5 / 12, + (0, 5): 5 / 6, + (1, 0): 5 / 12, + (1, 2): 1 / 3, + (1, 4): 5 / 6, + (2, 0): 5 / 12, + (2, 1): 1 / 3, + (2, 3): 5 / 6, + (3, 2): 5 / 6, + (3, 4): 1 / 3, + (3, 5): 1 / 2, + (4, 1): 5 / 6, + (4, 3): 1 / 3, + (4, 5): 1 / 2, + (5, 0): 5 / 6, + (5, 3): 1 / 2, + (5, 4): 1 / 2, + } + + solution_gamma = { + (0, 1): -0.6383, + (0, 2): -0.6827, + (0, 5): 0, + (1, 2): -1.0781, + (1, 4): 0, + (2, 3): 0, + (5, 3): -0.2820, + (5, 4): -0.3327, + (4, 3): -0.9927, + } + + # The undirected support of z_star + G = nx.MultiGraph() + for u, v in z_star: + if (u, v) in G.edges or (v, u) in G.edges: + continue + G.add_edge(u, v) + + gamma = tsp.spanning_tree_distribution(G, z_star) + + assert {key: round(gamma[key], 4) for key in gamma} == solution_gamma + + +def test_asadpour_tsp(): + """ + Test the complete asadpour tsp algorithm with the fractional, symmetric + Held Karp solution. This test also uses an incomplete graph as input. + """ + # This version of Figure 2 has all of the edge weights multiplied by 100 + # and the 0 weight edges have a weight of 1. + pytest.importorskip("numpy") + pytest.importorskip("scipy") + + edge_list = [ + (0, 1, 100), + (0, 2, 100), + (0, 5, 1), + (1, 2, 100), + (1, 4, 1), + (2, 3, 1), + (3, 4, 100), + (3, 5, 100), + (4, 5, 100), + (1, 0, 100), + (2, 0, 100), + (5, 0, 1), + (2, 1, 100), + (4, 1, 1), + (3, 2, 1), + (4, 3, 100), + (5, 3, 100), + (5, 4, 100), + ] + + G = nx.DiGraph() + G.add_weighted_edges_from(edge_list) + + def fixed_asadpour(G, weight): + return nx_app.asadpour_atsp(G, weight, 19) + + tour = nx_app.traveling_salesman_problem(G, weight="weight", method=fixed_asadpour) + + # Check that the returned list is a valid tour. Because this is an + # incomplete graph, the conditions are not as strict. We need the tour to + # + # Start and end at the same node + # Pass through every vertex at least once + # Have a total cost at most ln(6) / ln(ln(6)) = 3.0723 times the optimal + # + # For the second condition it is possible to have the tour pass through the + # same vertex more then. Imagine that the tour on the complete version takes + # an edge not in the original graph. In the output this is substituted with + # the shortest path between those vertices, allowing vertices to appear more + # than once. + # + # However, we are using a fixed random number generator so we know what the + # expected tour is. + expected_tours = [[1, 4, 5, 0, 2, 3, 2, 1], [3, 2, 0, 1, 4, 5, 3]] + + assert tour in expected_tours + + +def test_asadpour_real_world(): + """ + This test uses airline prices between the six largest cities in the US. + + * New York City -> JFK + * Los Angeles -> LAX + * Chicago -> ORD + * Houston -> IAH + * Phoenix -> PHX + * Philadelphia -> PHL + + Flight prices from August 2021 using Delta or American airlines to get + nonstop flight. The brute force solution found the optimal tour to cost $872 + + This test also uses the `source` keyword argument to ensure that the tour + always starts at city 0. + """ + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + # JFK LAX ORD IAH PHX PHL + [0, 243, 199, 208, 169, 183], # JFK + [277, 0, 217, 123, 127, 252], # LAX + [297, 197, 0, 197, 123, 177], # ORD + [303, 169, 197, 0, 117, 117], # IAH + [257, 127, 160, 117, 0, 319], # PHX + [183, 332, 217, 117, 319, 0], # PHL + ] + ) + + node_map = {0: "JFK", 1: "LAX", 2: "ORD", 3: "IAH", 4: "PHX", 5: "PHL"} + + expected_tours = [ + ["JFK", "LAX", "PHX", "ORD", "IAH", "PHL", "JFK"], + ["JFK", "ORD", "PHX", "LAX", "IAH", "PHL", "JFK"], + ] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + nx.relabel_nodes(G, node_map, copy=False) + + def fixed_asadpour(G, weight): + return nx_app.asadpour_atsp(G, weight, 37, source="JFK") + + tour = nx_app.traveling_salesman_problem(G, weight="weight", method=fixed_asadpour) + + assert tour in expected_tours + + +def test_asadpour_real_world_path(): + """ + This test uses airline prices between the six largest cities in the US. This + time using a path, not a cycle. + + * New York City -> JFK + * Los Angeles -> LAX + * Chicago -> ORD + * Houston -> IAH + * Phoenix -> PHX + * Philadelphia -> PHL + + Flight prices from August 2021 using Delta or American airlines to get + nonstop flight. The brute force solution found the optimal tour to cost $872 + """ + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + G_array = np.array( + [ + # JFK LAX ORD IAH PHX PHL + [0, 243, 199, 208, 169, 183], # JFK + [277, 0, 217, 123, 127, 252], # LAX + [297, 197, 0, 197, 123, 177], # ORD + [303, 169, 197, 0, 117, 117], # IAH + [257, 127, 160, 117, 0, 319], # PHX + [183, 332, 217, 117, 319, 0], # PHL + ] + ) + + node_map = {0: "JFK", 1: "LAX", 2: "ORD", 3: "IAH", 4: "PHX", 5: "PHL"} + + expected_paths = [ + ["ORD", "PHX", "LAX", "IAH", "PHL", "JFK"], + ["JFK", "PHL", "IAH", "ORD", "PHX", "LAX"], + ] + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + nx.relabel_nodes(G, node_map, copy=False) + + def fixed_asadpour(G, weight): + return nx_app.asadpour_atsp(G, weight, 56) + + path = nx_app.traveling_salesman_problem( + G, weight="weight", cycle=False, method=fixed_asadpour + ) + + assert path in expected_paths + + +def test_asadpour_disconnected_graph(): + """ + Test that the proper exception is raised when asadpour_atsp is given an + disconnected graph. + """ + + G = nx.complete_graph(4, create_using=nx.DiGraph) + # have to set edge weights so that if the exception is not raised, the + # function will complete and we will fail the test + nx.set_edge_attributes(G, 1, "weight") + G.add_node(5) + + pytest.raises(nx.NetworkXError, nx_app.asadpour_atsp, G) + + +def test_asadpour_incomplete_graph(): + """ + Test that the proper exception is raised when asadpour_atsp is given an + incomplete graph + """ + + G = nx.complete_graph(4, create_using=nx.DiGraph) + # have to set edge weights so that if the exception is not raised, the + # function will complete and we will fail the test + nx.set_edge_attributes(G, 1, "weight") + G.remove_edge(0, 1) + + pytest.raises(nx.NetworkXError, nx_app.asadpour_atsp, G) + + +def test_asadpour_empty_graph(): + """ + Test the asadpour_atsp function with an empty graph + """ + G = nx.DiGraph() + + pytest.raises(nx.NetworkXError, nx_app.asadpour_atsp, G) + + +@pytest.mark.slow +def test_asadpour_integral_held_karp(): + """ + This test uses an integral held karp solution and the held karp function + will return a graph rather than a dict, bypassing most of the asadpour + algorithm. + + At first glance, this test probably doesn't look like it ensures that we + skip the rest of the asadpour algorithm, but it does. We are not fixing a + see for the random number generator, so if we sample any spanning trees + the approximation would be different basically every time this test is + executed but it is not since held karp is deterministic and we do not + reach the portion of the code with the dependence on random numbers. + """ + np = pytest.importorskip("numpy") + + G_array = np.array( + [ + [0, 26, 63, 59, 69, 31, 41], + [62, 0, 91, 53, 75, 87, 47], + [47, 82, 0, 90, 15, 9, 18], + [68, 19, 5, 0, 58, 34, 93], + [11, 58, 53, 55, 0, 61, 79], + [88, 75, 13, 76, 98, 0, 40], + [41, 61, 55, 88, 46, 45, 0], + ] + ) + + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + + for _ in range(2): + tour = nx_app.traveling_salesman_problem(G, method=nx_app.asadpour_atsp) + + assert [1, 3, 2, 5, 2, 6, 4, 0, 1] == tour + + +def test_directed_tsp_impossible(): + """ + Test the asadpour algorithm with a graph without a hamiltonian circuit + """ + pytest.importorskip("numpy") + + # In this graph, once we leave node 0 we cannot return + edges = [ + (0, 1, 10), + (0, 2, 11), + (0, 3, 12), + (1, 2, 4), + (1, 3, 6), + (2, 1, 3), + (2, 3, 2), + (3, 1, 5), + (3, 2, 1), + ] + + G = nx.DiGraph() + G.add_weighted_edges_from(edges) + + pytest.raises(nx.NetworkXError, nx_app.traveling_salesman_problem, G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_treewidth.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_treewidth.py new file mode 100644 index 0000000000000000000000000000000000000000..461b0f2ed2dd4d043902d054e10a5f39ffb069c9 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_treewidth.py @@ -0,0 +1,280 @@ +import itertools + +import networkx as nx +from networkx.algorithms.approximation import ( + treewidth_min_degree, + treewidth_min_fill_in, +) +from networkx.algorithms.approximation.treewidth import ( + MinDegreeHeuristic, + min_fill_in_heuristic, +) + + +def is_tree_decomp(graph, decomp): + """Check if the given tree decomposition is valid.""" + for x in graph.nodes(): + appear_once = False + for bag in decomp.nodes(): + if x in bag: + appear_once = True + break + assert appear_once + + # Check if each connected pair of nodes are at least once together in a bag + for x, y in graph.edges(): + appear_together = False + for bag in decomp.nodes(): + if x in bag and y in bag: + appear_together = True + break + assert appear_together + + # Check if the nodes associated with vertex v form a connected subset of T + for v in graph.nodes(): + subset = [] + for bag in decomp.nodes(): + if v in bag: + subset.append(bag) + sub_graph = decomp.subgraph(subset) + assert nx.is_connected(sub_graph) + + +class TestTreewidthMinDegree: + """Unit tests for the min_degree function""" + + @classmethod + def setup_class(cls): + """Setup for different kinds of trees""" + cls.complete = nx.Graph() + cls.complete.add_edge(1, 2) + cls.complete.add_edge(2, 3) + cls.complete.add_edge(1, 3) + + cls.small_tree = nx.Graph() + cls.small_tree.add_edge(1, 3) + cls.small_tree.add_edge(4, 3) + cls.small_tree.add_edge(2, 3) + cls.small_tree.add_edge(3, 5) + cls.small_tree.add_edge(5, 6) + cls.small_tree.add_edge(5, 7) + cls.small_tree.add_edge(6, 7) + + cls.deterministic_graph = nx.Graph() + cls.deterministic_graph.add_edge(0, 1) # deg(0) = 1 + + cls.deterministic_graph.add_edge(1, 2) # deg(1) = 2 + + cls.deterministic_graph.add_edge(2, 3) + cls.deterministic_graph.add_edge(2, 4) # deg(2) = 3 + + cls.deterministic_graph.add_edge(3, 4) + cls.deterministic_graph.add_edge(3, 5) + cls.deterministic_graph.add_edge(3, 6) # deg(3) = 4 + + cls.deterministic_graph.add_edge(4, 5) + cls.deterministic_graph.add_edge(4, 6) + cls.deterministic_graph.add_edge(4, 7) # deg(4) = 5 + + cls.deterministic_graph.add_edge(5, 6) + cls.deterministic_graph.add_edge(5, 7) + cls.deterministic_graph.add_edge(5, 8) + cls.deterministic_graph.add_edge(5, 9) # deg(5) = 6 + + cls.deterministic_graph.add_edge(6, 7) + cls.deterministic_graph.add_edge(6, 8) + cls.deterministic_graph.add_edge(6, 9) # deg(6) = 6 + + cls.deterministic_graph.add_edge(7, 8) + cls.deterministic_graph.add_edge(7, 9) # deg(7) = 5 + + cls.deterministic_graph.add_edge(8, 9) # deg(8) = 4 + + def test_petersen_graph(self): + """Test Petersen graph tree decomposition result""" + G = nx.petersen_graph() + _, decomp = treewidth_min_degree(G) + is_tree_decomp(G, decomp) + + def test_small_tree_treewidth(self): + """Test small tree + + Test if the computed treewidth of the known self.small_tree is 2. + As we know which value we can expect from our heuristic, values other + than two are regressions + """ + G = self.small_tree + # the order of removal should be [1,2,4]3[5,6,7] + # (with [] denoting any order of the containing nodes) + # resulting in treewidth 2 for the heuristic + treewidth, _ = treewidth_min_fill_in(G) + assert treewidth == 2 + + def test_heuristic_abort(self): + """Test heuristic abort condition for fully connected graph""" + graph = {} + for u in self.complete: + graph[u] = set() + for v in self.complete[u]: + if u != v: # ignore self-loop + graph[u].add(v) + + deg_heuristic = MinDegreeHeuristic(graph) + node = deg_heuristic.best_node(graph) + if node is None: + pass + else: + assert False + + def test_empty_graph(self): + """Test empty graph""" + G = nx.Graph() + _, _ = treewidth_min_degree(G) + + def test_two_component_graph(self): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + treewidth, _ = treewidth_min_degree(G) + assert treewidth == 0 + + def test_not_sortable_nodes(self): + G = nx.Graph([(0, "a")]) + treewidth_min_degree(G) + + def test_heuristic_first_steps(self): + """Test first steps of min_degree heuristic""" + graph = { + n: set(self.deterministic_graph[n]) - {n} for n in self.deterministic_graph + } + deg_heuristic = MinDegreeHeuristic(graph) + elim_node = deg_heuristic.best_node(graph) + print(f"Graph {graph}:") + steps = [] + + while elim_node is not None: + print(f"Removing {elim_node}:") + steps.append(elim_node) + nbrs = graph[elim_node] + + for u, v in itertools.permutations(nbrs, 2): + if v not in graph[u]: + graph[u].add(v) + + for u in graph: + if elim_node in graph[u]: + graph[u].remove(elim_node) + + del graph[elim_node] + print(f"Graph {graph}:") + elim_node = deg_heuristic.best_node(graph) + + # check only the first 5 elements for equality + assert steps[:5] == [0, 1, 2, 3, 4] + + +class TestTreewidthMinFillIn: + """Unit tests for the treewidth_min_fill_in function.""" + + @classmethod + def setup_class(cls): + """Setup for different kinds of trees""" + cls.complete = nx.Graph() + cls.complete.add_edge(1, 2) + cls.complete.add_edge(2, 3) + cls.complete.add_edge(1, 3) + + cls.small_tree = nx.Graph() + cls.small_tree.add_edge(1, 2) + cls.small_tree.add_edge(2, 3) + cls.small_tree.add_edge(3, 4) + cls.small_tree.add_edge(1, 4) + cls.small_tree.add_edge(2, 4) + cls.small_tree.add_edge(4, 5) + cls.small_tree.add_edge(5, 6) + cls.small_tree.add_edge(5, 7) + cls.small_tree.add_edge(6, 7) + + cls.deterministic_graph = nx.Graph() + cls.deterministic_graph.add_edge(1, 2) + cls.deterministic_graph.add_edge(1, 3) + cls.deterministic_graph.add_edge(3, 4) + cls.deterministic_graph.add_edge(2, 4) + cls.deterministic_graph.add_edge(3, 5) + cls.deterministic_graph.add_edge(4, 5) + cls.deterministic_graph.add_edge(3, 6) + cls.deterministic_graph.add_edge(5, 6) + + def test_petersen_graph(self): + """Test Petersen graph tree decomposition result""" + G = nx.petersen_graph() + _, decomp = treewidth_min_fill_in(G) + is_tree_decomp(G, decomp) + + def test_small_tree_treewidth(self): + """Test if the computed treewidth of the known self.small_tree is 2""" + G = self.small_tree + # the order of removal should be [1,2,4]3[5,6,7] + # (with [] denoting any order of the containing nodes) + # resulting in treewidth 2 for the heuristic + treewidth, _ = treewidth_min_fill_in(G) + assert treewidth == 2 + + def test_heuristic_abort(self): + """Test if min_fill_in returns None for fully connected graph""" + graph = {} + for u in self.complete: + graph[u] = set() + for v in self.complete[u]: + if u != v: # ignore self-loop + graph[u].add(v) + next_node = min_fill_in_heuristic(graph) + if next_node is None: + pass + else: + assert False + + def test_empty_graph(self): + """Test empty graph""" + G = nx.Graph() + _, _ = treewidth_min_fill_in(G) + + def test_two_component_graph(self): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + treewidth, _ = treewidth_min_fill_in(G) + assert treewidth == 0 + + def test_not_sortable_nodes(self): + G = nx.Graph([(0, "a")]) + treewidth_min_fill_in(G) + + def test_heuristic_first_steps(self): + """Test first steps of min_fill_in heuristic""" + graph = { + n: set(self.deterministic_graph[n]) - {n} for n in self.deterministic_graph + } + print(f"Graph {graph}:") + elim_node = min_fill_in_heuristic(graph) + steps = [] + + while elim_node is not None: + print(f"Removing {elim_node}:") + steps.append(elim_node) + nbrs = graph[elim_node] + + for u, v in itertools.permutations(nbrs, 2): + if v not in graph[u]: + graph[u].add(v) + + for u in graph: + if elim_node in graph[u]: + graph[u].remove(elim_node) + + del graph[elim_node] + print(f"Graph {graph}:") + elim_node = min_fill_in_heuristic(graph) + + # check only the first 2 elements for equality + assert steps[:2] == [6, 5] diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_vertex_cover.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_vertex_cover.py new file mode 100644 index 0000000000000000000000000000000000000000..5cc5a38df9a4139684005491e0183cd563487154 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/tests/test_vertex_cover.py @@ -0,0 +1,68 @@ +import networkx as nx +from networkx.algorithms.approximation import min_weighted_vertex_cover + + +def is_cover(G, node_cover): + return all({u, v} & node_cover for u, v in G.edges()) + + +class TestMWVC: + """Unit tests for the approximate minimum weighted vertex cover + function, + :func:`~networkx.algorithms.approximation.vertex_cover.min_weighted_vertex_cover`. + + """ + + def test_unweighted_directed(self): + # Create a star graph in which half the nodes are directed in + # and half are directed out. + G = nx.DiGraph() + G.add_edges_from((0, v) for v in range(1, 26)) + G.add_edges_from((v, 0) for v in range(26, 51)) + cover = min_weighted_vertex_cover(G) + assert 1 == len(cover) + assert is_cover(G, cover) + + def test_unweighted_undirected(self): + # create a simple star graph + size = 50 + sg = nx.star_graph(size) + cover = min_weighted_vertex_cover(sg) + assert 1 == len(cover) + assert is_cover(sg, cover) + + def test_weighted(self): + wg = nx.Graph() + wg.add_node(0, weight=10) + wg.add_node(1, weight=1) + wg.add_node(2, weight=1) + wg.add_node(3, weight=1) + wg.add_node(4, weight=1) + + wg.add_edge(0, 1) + wg.add_edge(0, 2) + wg.add_edge(0, 3) + wg.add_edge(0, 4) + + wg.add_edge(1, 2) + wg.add_edge(2, 3) + wg.add_edge(3, 4) + wg.add_edge(4, 1) + + cover = min_weighted_vertex_cover(wg, weight="weight") + csum = sum(wg.nodes[node]["weight"] for node in cover) + assert 4 == csum + assert is_cover(wg, cover) + + def test_unweighted_self_loop(self): + slg = nx.Graph() + slg.add_node(0) + slg.add_node(1) + slg.add_node(2) + + slg.add_edge(0, 1) + slg.add_edge(2, 2) + + cover = min_weighted_vertex_cover(slg) + assert 2 == len(cover) + assert is_cover(slg, cover) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/traveling_salesman.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/traveling_salesman.py new file mode 100644 index 0000000000000000000000000000000000000000..cbc320486e262883d989c4f3489ac8ea08db1137 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/traveling_salesman.py @@ -0,0 +1,1442 @@ +""" +================================= +Travelling Salesman Problem (TSP) +================================= + +Implementation of approximate algorithms +for solving and approximating the TSP problem. + +Categories of algorithms which are implemented: + +- Christofides (provides a 3/2-approximation of TSP) +- Greedy +- Simulated Annealing (SA) +- Threshold Accepting (TA) +- Asadpour Asymmetric Traveling Salesman Algorithm + +The Travelling Salesman Problem tries to find, given the weight +(distance) between all points where a salesman has to visit, the +route so that: + +- The total distance (cost) which the salesman travels is minimized. +- The salesman returns to the starting point. +- Note that for a complete graph, the salesman visits each point once. + +The function `travelling_salesman_problem` allows for incomplete +graphs by finding all-pairs shortest paths, effectively converting +the problem to a complete graph problem. It calls one of the +approximate methods on that problem and then converts the result +back to the original graph using the previously found shortest paths. + +TSP is an NP-hard problem in combinatorial optimization, +important in operations research and theoretical computer science. + +http://en.wikipedia.org/wiki/Travelling_salesman_problem +""" +import math + +import networkx as nx +from networkx.algorithms.tree.mst import random_spanning_tree +from networkx.utils import not_implemented_for, pairwise, py_random_state + +__all__ = [ + "traveling_salesman_problem", + "christofides", + "asadpour_atsp", + "greedy_tsp", + "simulated_annealing_tsp", + "threshold_accepting_tsp", +] + + +def swap_two_nodes(soln, seed): + """Swap two nodes in `soln` to give a neighbor solution. + + Parameters + ---------- + soln : list of nodes + Current cycle of nodes + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + list + The solution after move is applied. (A neighbor solution.) + + Notes + ----- + This function assumes that the incoming list `soln` is a cycle + (that the first and last element are the same) and also that + we don't want any move to change the first node in the list + (and thus not the last node either). + + The input list is changed as well as returned. Make a copy if needed. + + See Also + -------- + move_one_node + """ + a, b = seed.sample(range(1, len(soln) - 1), k=2) + soln[a], soln[b] = soln[b], soln[a] + return soln + + +def move_one_node(soln, seed): + """Move one node to another position to give a neighbor solution. + + The node to move and the position to move to are chosen randomly. + The first and last nodes are left untouched as soln must be a cycle + starting at that node. + + Parameters + ---------- + soln : list of nodes + Current cycle of nodes + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + list + The solution after move is applied. (A neighbor solution.) + + Notes + ----- + This function assumes that the incoming list `soln` is a cycle + (that the first and last element are the same) and also that + we don't want any move to change the first node in the list + (and thus not the last node either). + + The input list is changed as well as returned. Make a copy if needed. + + See Also + -------- + swap_two_nodes + """ + a, b = seed.sample(range(1, len(soln) - 1), k=2) + soln.insert(b, soln.pop(a)) + return soln + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def christofides(G, weight="weight", tree=None): + """Approximate a solution of the traveling salesman problem + + Compute a 3/2-approximation of the traveling salesman problem + in a complete undirected graph using Christofides [1]_ algorithm. + + Parameters + ---------- + G : Graph + `G` should be a complete weighted undirected graph. + The distance between all pairs of nodes should be included. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + tree : NetworkX graph or None (default: None) + A minimum spanning tree of G. Or, if None, the minimum spanning + tree is computed using :func:`networkx.minimum_spanning_tree` + + Returns + ------- + list + List of nodes in `G` along a cycle with a 3/2-approximation of + the minimal Hamiltonian cycle. + + References + ---------- + .. [1] Christofides, Nicos. "Worst-case analysis of a new heuristic for + the travelling salesman problem." No. RR-388. Carnegie-Mellon Univ + Pittsburgh Pa Management Sciences Research Group, 1976. + """ + # Remove selfloops if necessary + loop_nodes = nx.nodes_with_selfloops(G) + try: + node = next(loop_nodes) + except StopIteration: + pass + else: + G = G.copy() + G.remove_edge(node, node) + G.remove_edges_from((n, n) for n in loop_nodes) + # Check that G is a complete graph + N = len(G) - 1 + # This check ignores selfloops which is what we want here. + if any(len(nbrdict) != N for n, nbrdict in G.adj.items()): + raise nx.NetworkXError("G must be a complete graph.") + + if tree is None: + tree = nx.minimum_spanning_tree(G, weight=weight) + L = G.copy() + L.remove_nodes_from([v for v, degree in tree.degree if not (degree % 2)]) + MG = nx.MultiGraph() + MG.add_edges_from(tree.edges) + edges = nx.min_weight_matching(L, weight=weight) + MG.add_edges_from(edges) + return _shortcutting(nx.eulerian_circuit(MG)) + + +def _shortcutting(circuit): + """Remove duplicate nodes in the path""" + nodes = [] + for u, v in circuit: + if v in nodes: + continue + if not nodes: + nodes.append(u) + nodes.append(v) + nodes.append(nodes[0]) + return nodes + + +@nx._dispatch(edge_attrs="weight") +def traveling_salesman_problem(G, weight="weight", nodes=None, cycle=True, method=None): + """Find the shortest path in `G` connecting specified nodes + + This function allows approximate solution to the traveling salesman + problem on networks that are not complete graphs and/or where the + salesman does not need to visit all nodes. + + This function proceeds in two steps. First, it creates a complete + graph using the all-pairs shortest_paths between nodes in `nodes`. + Edge weights in the new graph are the lengths of the paths + between each pair of nodes in the original graph. + Second, an algorithm (default: `christofides` for undirected and + `asadpour_atsp` for directed) is used to approximate the minimal Hamiltonian + cycle on this new graph. The available algorithms are: + + - christofides + - greedy_tsp + - simulated_annealing_tsp + - threshold_accepting_tsp + - asadpour_atsp + + Once the Hamiltonian Cycle is found, this function post-processes to + accommodate the structure of the original graph. If `cycle` is ``False``, + the biggest weight edge is removed to make a Hamiltonian path. + Then each edge on the new complete graph used for that analysis is + replaced by the shortest_path between those nodes on the original graph. + + Parameters + ---------- + G : NetworkX graph + A possibly weighted graph + + nodes : collection of nodes (default=G.nodes) + collection (list, set, etc.) of nodes to visit + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + cycle : bool (default: True) + Indicates whether a cycle should be returned, or a path. + Note: the cycle is the approximate minimal cycle. + The path simply removes the biggest edge in that cycle. + + method : function (default: None) + A function that returns a cycle on all nodes and approximates + the solution to the traveling salesman problem on a complete + graph. The returned cycle is then used to find a corresponding + solution on `G`. `method` should be callable; take inputs + `G`, and `weight`; and return a list of nodes along the cycle. + + Provided options include :func:`christofides`, :func:`greedy_tsp`, + :func:`simulated_annealing_tsp` and :func:`threshold_accepting_tsp`. + + If `method is None`: use :func:`christofides` for undirected `G` and + :func:`threshold_accepting_tsp` for directed `G`. + + To specify parameters for these provided functions, construct lambda + functions that state the specific value. `method` must have 2 inputs. + (See examples). + + Returns + ------- + list + List of nodes in `G` along a path with an approximation of the minimal + path through `nodes`. + + + Raises + ------ + NetworkXError + If `G` is a directed graph it has to be strongly connected or the + complete version cannot be generated. + + Examples + -------- + >>> tsp = nx.approximation.traveling_salesman_problem + >>> G = nx.cycle_graph(9) + >>> G[4][5]["weight"] = 5 # all other weights are 1 + >>> tsp(G, nodes=[3, 6]) + [3, 2, 1, 0, 8, 7, 6, 7, 8, 0, 1, 2, 3] + >>> path = tsp(G, cycle=False) + >>> path in ([4, 3, 2, 1, 0, 8, 7, 6, 5], [5, 6, 7, 8, 0, 1, 2, 3, 4]) + True + + Build (curry) your own function to provide parameter values to the methods. + + >>> SA_tsp = nx.approximation.simulated_annealing_tsp + >>> method = lambda G, wt: SA_tsp(G, "greedy", weight=wt, temp=500) + >>> path = tsp(G, cycle=False, method=method) + >>> path in ([4, 3, 2, 1, 0, 8, 7, 6, 5], [5, 6, 7, 8, 0, 1, 2, 3, 4]) + True + + """ + if method is None: + if G.is_directed(): + method = asadpour_atsp + else: + method = christofides + if nodes is None: + nodes = list(G.nodes) + + dist = {} + path = {} + for n, (d, p) in nx.all_pairs_dijkstra(G, weight=weight): + dist[n] = d + path[n] = p + + if G.is_directed(): + # If the graph is not strongly connected, raise an exception + if not nx.is_strongly_connected(G): + raise nx.NetworkXError("G is not strongly connected") + GG = nx.DiGraph() + else: + GG = nx.Graph() + for u in nodes: + for v in nodes: + if u == v: + continue + GG.add_edge(u, v, weight=dist[u][v]) + best_GG = method(GG, weight) + + if not cycle: + # find and remove the biggest edge + (u, v) = max(pairwise(best_GG), key=lambda x: dist[x[0]][x[1]]) + pos = best_GG.index(u) + 1 + while best_GG[pos] != v: + pos = best_GG[pos:].index(u) + 1 + best_GG = best_GG[pos:-1] + best_GG[:pos] + + best_path = [] + for u, v in pairwise(best_GG): + best_path.extend(path[u][v][:-1]) + best_path.append(v) + return best_path + + +@not_implemented_for("undirected") +@py_random_state(2) +@nx._dispatch(edge_attrs="weight") +def asadpour_atsp(G, weight="weight", seed=None, source=None): + """ + Returns an approximate solution to the traveling salesman problem. + + This approximate solution is one of the best known approximations for the + asymmetric traveling salesman problem developed by Asadpour et al, + [1]_. The algorithm first solves the Held-Karp relaxation to find a lower + bound for the weight of the cycle. Next, it constructs an exponential + distribution of undirected spanning trees where the probability of an + edge being in the tree corresponds to the weight of that edge using a + maximum entropy rounding scheme. Next we sample that distribution + $2 \\lceil \\ln n \\rceil$ times and save the minimum sampled tree once the + direction of the arcs is added back to the edges. Finally, we augment + then short circuit that graph to find the approximate tour for the + salesman. + + Parameters + ---------- + G : nx.DiGraph + The graph should be a complete weighted directed graph. The + distance between all paris of nodes should be included and the triangle + inequality should hold. That is, the direct edge between any two nodes + should be the path of least cost. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + source : node label (default=`None`) + If given, return the cycle starting and ending at the given node. + + Returns + ------- + cycle : list of nodes + Returns the cycle (list of nodes) that a salesman can follow to minimize + the total weight of the trip. + + Raises + ------ + NetworkXError + If `G` is not complete or has less than two nodes, the algorithm raises + an exception. + + NetworkXError + If `source` is not `None` and is not a node in `G`, the algorithm raises + an exception. + + NetworkXNotImplemented + If `G` is an undirected graph. + + References + ---------- + .. [1] A. Asadpour, M. X. Goemans, A. Madry, S. O. Gharan, and A. Saberi, + An o(log n/log log n)-approximation algorithm for the asymmetric + traveling salesman problem, Operations research, 65 (2017), + pp. 1043–1061 + + Examples + -------- + >>> import networkx as nx + >>> import networkx.algorithms.approximation as approx + >>> G = nx.complete_graph(3, create_using=nx.DiGraph) + >>> nx.set_edge_attributes(G, {(0, 1): 2, (1, 2): 2, (2, 0): 2, (0, 2): 1, (2, 1): 1, (1, 0): 1}, "weight") + >>> tour = approx.asadpour_atsp(G,source=0) + >>> tour + [0, 2, 1, 0] + """ + from math import ceil, exp + from math import log as ln + + # Check that G is a complete graph + N = len(G) - 1 + if N < 2: + raise nx.NetworkXError("G must have at least two nodes") + # This check ignores selfloops which is what we want here. + if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()): + raise nx.NetworkXError("G is not a complete DiGraph") + # Check that the source vertex, if given, is in the graph + if source is not None and source not in G.nodes: + raise nx.NetworkXError("Given source node not in G.") + + opt_hk, z_star = held_karp_ascent(G, weight) + + # Test to see if the ascent method found an integer solution or a fractional + # solution. If it is integral then z_star is a nx.Graph, otherwise it is + # a dict + if not isinstance(z_star, dict): + # Here we are using the shortcutting method to go from the list of edges + # returned from eulerian_circuit to a list of nodes + return _shortcutting(nx.eulerian_circuit(z_star, source=source)) + + # Create the undirected support of z_star + z_support = nx.MultiGraph() + for u, v in z_star: + if (u, v) not in z_support.edges: + edge_weight = min(G[u][v][weight], G[v][u][weight]) + z_support.add_edge(u, v, **{weight: edge_weight}) + + # Create the exponential distribution of spanning trees + gamma = spanning_tree_distribution(z_support, z_star) + + # Write the lambda values to the edges of z_support + z_support = nx.Graph(z_support) + lambda_dict = {(u, v): exp(gamma[(u, v)]) for u, v in z_support.edges()} + nx.set_edge_attributes(z_support, lambda_dict, "weight") + del gamma, lambda_dict + + # Sample 2 * ceil( ln(n) ) spanning trees and record the minimum one + minimum_sampled_tree = None + minimum_sampled_tree_weight = math.inf + for _ in range(2 * ceil(ln(G.number_of_nodes()))): + sampled_tree = random_spanning_tree(z_support, "weight", seed=seed) + sampled_tree_weight = sampled_tree.size(weight) + if sampled_tree_weight < minimum_sampled_tree_weight: + minimum_sampled_tree = sampled_tree.copy() + minimum_sampled_tree_weight = sampled_tree_weight + + # Orient the edges in that tree to keep the cost of the tree the same. + t_star = nx.MultiDiGraph() + for u, v, d in minimum_sampled_tree.edges(data=weight): + if d == G[u][v][weight]: + t_star.add_edge(u, v, **{weight: d}) + else: + t_star.add_edge(v, u, **{weight: d}) + + # Find the node demands needed to neutralize the flow of t_star in G + node_demands = {n: t_star.out_degree(n) - t_star.in_degree(n) for n in t_star} + nx.set_node_attributes(G, node_demands, "demand") + + # Find the min_cost_flow + flow_dict = nx.min_cost_flow(G, "demand") + + # Build the flow into t_star + for source, values in flow_dict.items(): + for target in values: + if (source, target) not in t_star.edges and values[target] > 0: + # IF values[target] > 0 we have to add that many edges + for _ in range(values[target]): + t_star.add_edge(source, target) + + # Return the shortcut eulerian circuit + circuit = nx.eulerian_circuit(t_star, source=source) + return _shortcutting(circuit) + + +@nx._dispatch(edge_attrs="weight") +def held_karp_ascent(G, weight="weight"): + """ + Minimizes the Held-Karp relaxation of the TSP for `G` + + Solves the Held-Karp relaxation of the input complete digraph and scales + the output solution for use in the Asadpour [1]_ ASTP algorithm. + + The Held-Karp relaxation defines the lower bound for solutions to the + ATSP, although it does return a fractional solution. This is used in the + Asadpour algorithm as an initial solution which is later rounded to a + integral tree within the spanning tree polytopes. This function solves + the relaxation with the branch and bound method in [2]_. + + Parameters + ---------- + G : nx.DiGraph + The graph should be a complete weighted directed graph. + The distance between all paris of nodes should be included. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + Returns + ------- + OPT : float + The cost for the optimal solution to the Held-Karp relaxation + z : dict or nx.Graph + A symmetrized and scaled version of the optimal solution to the + Held-Karp relaxation for use in the Asadpour algorithm. + + If an integral solution is found, then that is an optimal solution for + the ATSP problem and that is returned instead. + + References + ---------- + .. [1] A. Asadpour, M. X. Goemans, A. Madry, S. O. Gharan, and A. Saberi, + An o(log n/log log n)-approximation algorithm for the asymmetric + traveling salesman problem, Operations research, 65 (2017), + pp. 1043–1061 + + .. [2] M. Held, R. M. Karp, The traveling-salesman problem and minimum + spanning trees, Operations Research, 1970-11-01, Vol. 18 (6), + pp.1138-1162 + """ + import numpy as np + from scipy import optimize + + def k_pi(): + """ + Find the set of minimum 1-Arborescences for G at point pi. + + Returns + ------- + Set + The set of minimum 1-Arborescences + """ + # Create a copy of G without vertex 1. + G_1 = G.copy() + minimum_1_arborescences = set() + minimum_1_arborescence_weight = math.inf + + # node is node '1' in the Held and Karp paper + n = next(G.__iter__()) + G_1.remove_node(n) + + # Iterate over the spanning arborescences of the graph until we know + # that we have found the minimum 1-arborescences. My proposed strategy + # is to find the most extensive root to connect to from 'node 1' and + # the least expensive one. We then iterate over arborescences until + # the cost of the basic arborescence is the cost of the minimum one + # plus the difference between the most and least expensive roots, + # that way the cost of connecting 'node 1' will by definition not by + # minimum + min_root = {"node": None, weight: math.inf} + max_root = {"node": None, weight: -math.inf} + for u, v, d in G.edges(n, data=True): + if d[weight] < min_root[weight]: + min_root = {"node": v, weight: d[weight]} + if d[weight] > max_root[weight]: + max_root = {"node": v, weight: d[weight]} + + min_in_edge = min(G.in_edges(n, data=True), key=lambda x: x[2][weight]) + min_root[weight] = min_root[weight] + min_in_edge[2][weight] + max_root[weight] = max_root[weight] + min_in_edge[2][weight] + + min_arb_weight = math.inf + for arb in nx.ArborescenceIterator(G_1): + arb_weight = arb.size(weight) + if min_arb_weight == math.inf: + min_arb_weight = arb_weight + elif arb_weight > min_arb_weight + max_root[weight] - min_root[weight]: + break + # We have to pick the root node of the arborescence for the out + # edge of the first vertex as that is the only node without an + # edge directed into it. + for N, deg in arb.in_degree: + if deg == 0: + # root found + arb.add_edge(n, N, **{weight: G[n][N][weight]}) + arb_weight += G[n][N][weight] + break + + # We can pick the minimum weight in-edge for the vertex with + # a cycle. If there are multiple edges with the same, minimum + # weight, We need to add all of them. + # + # Delete the edge (N, v) so that we cannot pick it. + edge_data = G[N][n] + G.remove_edge(N, n) + min_weight = min(G.in_edges(n, data=weight), key=lambda x: x[2])[2] + min_edges = [ + (u, v, d) for u, v, d in G.in_edges(n, data=weight) if d == min_weight + ] + for u, v, d in min_edges: + new_arb = arb.copy() + new_arb.add_edge(u, v, **{weight: d}) + new_arb_weight = arb_weight + d + # Check to see the weight of the arborescence, if it is a + # new minimum, clear all of the old potential minimum + # 1-arborescences and add this is the only one. If its + # weight is above the known minimum, do not add it. + if new_arb_weight < minimum_1_arborescence_weight: + minimum_1_arborescences.clear() + minimum_1_arborescence_weight = new_arb_weight + # We have a 1-arborescence, add it to the set + if new_arb_weight == minimum_1_arborescence_weight: + minimum_1_arborescences.add(new_arb) + G.add_edge(N, n, **edge_data) + + return minimum_1_arborescences + + def direction_of_ascent(): + """ + Find the direction of ascent at point pi. + + See [1]_ for more information. + + Returns + ------- + dict + A mapping from the nodes of the graph which represents the direction + of ascent. + + References + ---------- + .. [1] M. Held, R. M. Karp, The traveling-salesman problem and minimum + spanning trees, Operations Research, 1970-11-01, Vol. 18 (6), + pp.1138-1162 + """ + # 1. Set d equal to the zero n-vector. + d = {} + for n in G: + d[n] = 0 + del n + # 2. Find a 1-Arborescence T^k such that k is in K(pi, d). + minimum_1_arborescences = k_pi() + while True: + # Reduce K(pi) to K(pi, d) + # Find the arborescence in K(pi) which increases the lest in + # direction d + min_k_d_weight = math.inf + min_k_d = None + for arborescence in minimum_1_arborescences: + weighted_cost = 0 + for n, deg in arborescence.degree: + weighted_cost += d[n] * (deg - 2) + if weighted_cost < min_k_d_weight: + min_k_d_weight = weighted_cost + min_k_d = arborescence + + # 3. If sum of d_i * v_{i, k} is greater than zero, terminate + if min_k_d_weight > 0: + return d, min_k_d + # 4. d_i = d_i + v_{i, k} + for n, deg in min_k_d.degree: + d[n] += deg - 2 + # Check that we do not need to terminate because the direction + # of ascent does not exist. This is done with linear + # programming. + c = np.full(len(minimum_1_arborescences), -1, dtype=int) + a_eq = np.empty((len(G) + 1, len(minimum_1_arborescences)), dtype=int) + b_eq = np.zeros(len(G) + 1, dtype=int) + b_eq[len(G)] = 1 + for arb_count, arborescence in enumerate(minimum_1_arborescences): + n_count = len(G) - 1 + for n, deg in arborescence.degree: + a_eq[n_count][arb_count] = deg - 2 + n_count -= 1 + a_eq[len(G)][arb_count] = 1 + program_result = optimize.linprog(c, A_eq=a_eq, b_eq=b_eq) + # If the constants exist, then the direction of ascent doesn't + if program_result.success: + # There is no direction of ascent + return None, minimum_1_arborescences + + # 5. GO TO 2 + + def find_epsilon(k, d): + """ + Given the direction of ascent at pi, find the maximum distance we can go + in that direction. + + Parameters + ---------- + k_xy : set + The set of 1-arborescences which have the minimum rate of increase + in the direction of ascent + + d : dict + The direction of ascent + + Returns + ------- + float + The distance we can travel in direction `d` + """ + min_epsilon = math.inf + for e_u, e_v, e_w in G.edges(data=weight): + if (e_u, e_v) in k.edges: + continue + # Now, I have found a condition which MUST be true for the edges to + # be a valid substitute. The edge in the graph which is the + # substitute is the one with the same terminated end. This can be + # checked rather simply. + # + # Find the edge within k which is the substitute. Because k is a + # 1-arborescence, we know that they is only one such edges + # leading into every vertex. + if len(k.in_edges(e_v, data=weight)) > 1: + raise Exception + sub_u, sub_v, sub_w = next(k.in_edges(e_v, data=weight).__iter__()) + k.add_edge(e_u, e_v, **{weight: e_w}) + k.remove_edge(sub_u, sub_v) + if ( + max(d for n, d in k.in_degree()) <= 1 + and len(G) == k.number_of_edges() + and nx.is_weakly_connected(k) + ): + # Ascent method calculation + if d[sub_u] == d[e_u] or sub_w == e_w: + # Revert to the original graph + k.remove_edge(e_u, e_v) + k.add_edge(sub_u, sub_v, **{weight: sub_w}) + continue + epsilon = (sub_w - e_w) / (d[e_u] - d[sub_u]) + if 0 < epsilon < min_epsilon: + min_epsilon = epsilon + # Revert to the original graph + k.remove_edge(e_u, e_v) + k.add_edge(sub_u, sub_v, **{weight: sub_w}) + + return min_epsilon + + # I have to know that the elements in pi correspond to the correct elements + # in the direction of ascent, even if the node labels are not integers. + # Thus, I will use dictionaries to made that mapping. + pi_dict = {} + for n in G: + pi_dict[n] = 0 + del n + original_edge_weights = {} + for u, v, d in G.edges(data=True): + original_edge_weights[(u, v)] = d[weight] + dir_ascent, k_d = direction_of_ascent() + while dir_ascent is not None: + max_distance = find_epsilon(k_d, dir_ascent) + for n, v in dir_ascent.items(): + pi_dict[n] += max_distance * v + for u, v, d in G.edges(data=True): + d[weight] = original_edge_weights[(u, v)] + pi_dict[u] + dir_ascent, k_d = direction_of_ascent() + # k_d is no longer an individual 1-arborescence but rather a set of + # minimal 1-arborescences at the maximum point of the polytope and should + # be reflected as such + k_max = k_d + + # Search for a cycle within k_max. If a cycle exists, return it as the + # solution + for k in k_max: + if len([n for n in k if k.degree(n) == 2]) == G.order(): + # Tour found + return k.size(weight), k + + # Write the original edge weights back to G and every member of k_max at + # the maximum point. Also average the number of times that edge appears in + # the set of minimal 1-arborescences. + x_star = {} + size_k_max = len(k_max) + for u, v, d in G.edges(data=True): + edge_count = 0 + d[weight] = original_edge_weights[(u, v)] + for k in k_max: + if (u, v) in k.edges(): + edge_count += 1 + k[u][v][weight] = original_edge_weights[(u, v)] + x_star[(u, v)] = edge_count / size_k_max + # Now symmetrize the edges in x_star and scale them according to (5) in + # reference [1] + z_star = {} + scale_factor = (G.order() - 1) / G.order() + for u, v in x_star: + frequency = x_star[(u, v)] + x_star[(v, u)] + if frequency > 0: + z_star[(u, v)] = scale_factor * frequency + del x_star + # Return the optimal weight and the z dict + return next(k_max.__iter__()).size(weight), z_star + + +@nx._dispatch +def spanning_tree_distribution(G, z): + """ + Find the asadpour exponential distribution of spanning trees. + + Solves the Maximum Entropy Convex Program in the Asadpour algorithm [1]_ + using the approach in section 7 to build an exponential distribution of + undirected spanning trees. + + This algorithm ensures that the probability of any edge in a spanning + tree is proportional to the sum of the probabilities of the tress + containing that edge over the sum of the probabilities of all spanning + trees of the graph. + + Parameters + ---------- + G : nx.MultiGraph + The undirected support graph for the Held Karp relaxation + + z : dict + The output of `held_karp_ascent()`, a scaled version of the Held-Karp + solution. + + Returns + ------- + gamma : dict + The probability distribution which approximately preserves the marginal + probabilities of `z`. + """ + from math import exp + from math import log as ln + + def q(e): + """ + The value of q(e) is described in the Asadpour paper is "the + probability that edge e will be included in a spanning tree T that is + chosen with probability proportional to exp(gamma(T))" which + basically means that it is the total probability of the edge appearing + across the whole distribution. + + Parameters + ---------- + e : tuple + The `(u, v)` tuple describing the edge we are interested in + + Returns + ------- + float + The probability that a spanning tree chosen according to the + current values of gamma will include edge `e`. + """ + # Create the laplacian matrices + for u, v, d in G.edges(data=True): + d[lambda_key] = exp(gamma[(u, v)]) + G_Kirchhoff = nx.total_spanning_tree_weight(G, lambda_key) + G_e = nx.contracted_edge(G, e, self_loops=False) + G_e_Kirchhoff = nx.total_spanning_tree_weight(G_e, lambda_key) + + # Multiply by the weight of the contracted edge since it is not included + # in the total weight of the contracted graph. + return exp(gamma[(e[0], e[1])]) * G_e_Kirchhoff / G_Kirchhoff + + # initialize gamma to the zero dict + gamma = {} + for u, v, _ in G.edges: + gamma[(u, v)] = 0 + + # set epsilon + EPSILON = 0.2 + + # pick an edge attribute name that is unlikely to be in the graph + lambda_key = "spanning_tree_distribution's secret attribute name for lambda" + + while True: + # We need to know that know that no values of q_e are greater than + # (1 + epsilon) * z_e, however changing one gamma value can increase the + # value of a different q_e, so we have to complete the for loop without + # changing anything for the condition to be meet + in_range_count = 0 + # Search for an edge with q_e > (1 + epsilon) * z_e + for u, v in gamma: + e = (u, v) + q_e = q(e) + z_e = z[e] + if q_e > (1 + EPSILON) * z_e: + delta = ln( + (q_e * (1 - (1 + EPSILON / 2) * z_e)) + / ((1 - q_e) * (1 + EPSILON / 2) * z_e) + ) + gamma[e] -= delta + # Check that delta had the desired effect + new_q_e = q(e) + desired_q_e = (1 + EPSILON / 2) * z_e + if round(new_q_e, 8) != round(desired_q_e, 8): + raise nx.NetworkXError( + f"Unable to modify probability for edge ({u}, {v})" + ) + else: + in_range_count += 1 + # Check if the for loop terminated without changing any gamma + if in_range_count == len(gamma): + break + + # Remove the new edge attributes + for _, _, d in G.edges(data=True): + if lambda_key in d: + del d[lambda_key] + + return gamma + + +@nx._dispatch(edge_attrs="weight") +def greedy_tsp(G, weight="weight", source=None): + """Return a low cost cycle starting at `source` and its cost. + + This approximates a solution to the traveling salesman problem. + It finds a cycle of all the nodes that a salesman can visit in order + to visit many nodes while minimizing total distance. + It uses a simple greedy algorithm. + In essence, this function returns a large cycle given a source point + for which the total cost of the cycle is minimized. + + Parameters + ---------- + G : Graph + The Graph should be a complete weighted undirected graph. + The distance between all pairs of nodes should be included. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + source : node, optional (default: first node in list(G)) + Starting node. If None, defaults to ``next(iter(G))`` + + Returns + ------- + cycle : list of nodes + Returns the cycle (list of nodes) that a salesman + can follow to minimize total weight of the trip. + + Raises + ------ + NetworkXError + If `G` is not complete, the algorithm raises an exception. + + Examples + -------- + >>> from networkx.algorithms import approximation as approx + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from({ + ... ("A", "B", 3), ("A", "C", 17), ("A", "D", 14), ("B", "A", 3), + ... ("B", "C", 12), ("B", "D", 16), ("C", "A", 13),("C", "B", 12), + ... ("C", "D", 4), ("D", "A", 14), ("D", "B", 15), ("D", "C", 2) + ... }) + >>> cycle = approx.greedy_tsp(G, source="D") + >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle)) + >>> cycle + ['D', 'C', 'B', 'A', 'D'] + >>> cost + 31 + + Notes + ----- + This implementation of a greedy algorithm is based on the following: + + - The algorithm adds a node to the solution at every iteration. + - The algorithm selects a node not already in the cycle whose connection + to the previous node adds the least cost to the cycle. + + A greedy algorithm does not always give the best solution. + However, it can construct a first feasible solution which can + be passed as a parameter to an iterative improvement algorithm such + as Simulated Annealing, or Threshold Accepting. + + Time complexity: It has a running time $O(|V|^2)$ + """ + # Check that G is a complete graph + N = len(G) - 1 + # This check ignores selfloops which is what we want here. + if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()): + raise nx.NetworkXError("G must be a complete graph.") + + if source is None: + source = nx.utils.arbitrary_element(G) + + if G.number_of_nodes() == 2: + neighbor = next(G.neighbors(source)) + return [source, neighbor, source] + + nodeset = set(G) + nodeset.remove(source) + cycle = [source] + next_node = source + while nodeset: + nbrdict = G[next_node] + next_node = min(nodeset, key=lambda n: nbrdict[n].get(weight, 1)) + cycle.append(next_node) + nodeset.remove(next_node) + cycle.append(cycle[0]) + return cycle + + +@py_random_state(9) +@nx._dispatch(edge_attrs="weight") +def simulated_annealing_tsp( + G, + init_cycle, + weight="weight", + source=None, + temp=100, + move="1-1", + max_iterations=10, + N_inner=100, + alpha=0.01, + seed=None, +): + """Returns an approximate solution to the traveling salesman problem. + + This function uses simulated annealing to approximate the minimal cost + cycle through the nodes. Starting from a suboptimal solution, simulated + annealing perturbs that solution, occasionally accepting changes that make + the solution worse to escape from a locally optimal solution. The chance + of accepting such changes decreases over the iterations to encourage + an optimal result. In summary, the function returns a cycle starting + at `source` for which the total cost is minimized. It also returns the cost. + + The chance of accepting a proposed change is related to a parameter called + the temperature (annealing has a physical analogue of steel hardening + as it cools). As the temperature is reduced, the chance of moves that + increase cost goes down. + + Parameters + ---------- + G : Graph + `G` should be a complete weighted graph. + The distance between all pairs of nodes should be included. + + init_cycle : list of all nodes or "greedy" + The initial solution (a cycle through all nodes returning to the start). + This argument has no default to make you think about it. + If "greedy", use `greedy_tsp(G, weight)`. + Other common starting cycles are `list(G) + [next(iter(G))]` or the final + result of `simulated_annealing_tsp` when doing `threshold_accepting_tsp`. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + source : node, optional (default: first node in list(G)) + Starting node. If None, defaults to ``next(iter(G))`` + + temp : int, optional (default=100) + The algorithm's temperature parameter. It represents the initial + value of temperature + + move : "1-1" or "1-0" or function, optional (default="1-1") + Indicator of what move to use when finding new trial solutions. + Strings indicate two special built-in moves: + + - "1-1": 1-1 exchange which transposes the position + of two elements of the current solution. + The function called is :func:`swap_two_nodes`. + For example if we apply 1-1 exchange in the solution + ``A = [3, 2, 1, 4, 3]`` + we can get the following by the transposition of 1 and 4 elements: + ``A' = [3, 2, 4, 1, 3]`` + - "1-0": 1-0 exchange which moves an node in the solution + to a new position. + The function called is :func:`move_one_node`. + For example if we apply 1-0 exchange in the solution + ``A = [3, 2, 1, 4, 3]`` + we can transfer the fourth element to the second position: + ``A' = [3, 4, 2, 1, 3]`` + + You may provide your own functions to enact a move from + one solution to a neighbor solution. The function must take + the solution as input along with a `seed` input to control + random number generation (see the `seed` input here). + Your function should maintain the solution as a cycle with + equal first and last node and all others appearing once. + Your function should return the new solution. + + max_iterations : int, optional (default=10) + Declared done when this number of consecutive iterations of + the outer loop occurs without any change in the best cost solution. + + N_inner : int, optional (default=100) + The number of iterations of the inner loop. + + alpha : float between (0, 1), optional (default=0.01) + Percentage of temperature decrease in each iteration + of outer loop + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + cycle : list of nodes + Returns the cycle (list of nodes) that a salesman + can follow to minimize total weight of the trip. + + Raises + ------ + NetworkXError + If `G` is not complete the algorithm raises an exception. + + Examples + -------- + >>> from networkx.algorithms import approximation as approx + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from({ + ... ("A", "B", 3), ("A", "C", 17), ("A", "D", 14), ("B", "A", 3), + ... ("B", "C", 12), ("B", "D", 16), ("C", "A", 13),("C", "B", 12), + ... ("C", "D", 4), ("D", "A", 14), ("D", "B", 15), ("D", "C", 2) + ... }) + >>> cycle = approx.simulated_annealing_tsp(G, "greedy", source="D") + >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle)) + >>> cycle + ['D', 'C', 'B', 'A', 'D'] + >>> cost + 31 + >>> incycle = ["D", "B", "A", "C", "D"] + >>> cycle = approx.simulated_annealing_tsp(G, incycle, source="D") + >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle)) + >>> cycle + ['D', 'C', 'B', 'A', 'D'] + >>> cost + 31 + + Notes + ----- + Simulated Annealing is a metaheuristic local search algorithm. + The main characteristic of this algorithm is that it accepts + even solutions which lead to the increase of the cost in order + to escape from low quality local optimal solutions. + + This algorithm needs an initial solution. If not provided, it is + constructed by a simple greedy algorithm. At every iteration, the + algorithm selects thoughtfully a neighbor solution. + Consider $c(x)$ cost of current solution and $c(x')$ cost of a + neighbor solution. + If $c(x') - c(x) <= 0$ then the neighbor solution becomes the current + solution for the next iteration. Otherwise, the algorithm accepts + the neighbor solution with probability $p = exp - ([c(x') - c(x)] / temp)$. + Otherwise the current solution is retained. + + `temp` is a parameter of the algorithm and represents temperature. + + Time complexity: + For $N_i$ iterations of the inner loop and $N_o$ iterations of the + outer loop, this algorithm has running time $O(N_i * N_o * |V|)$. + + For more information and how the algorithm is inspired see: + http://en.wikipedia.org/wiki/Simulated_annealing + """ + if move == "1-1": + move = swap_two_nodes + elif move == "1-0": + move = move_one_node + if init_cycle == "greedy": + # Construct an initial solution using a greedy algorithm. + cycle = greedy_tsp(G, weight=weight, source=source) + if G.number_of_nodes() == 2: + return cycle + + else: + cycle = list(init_cycle) + if source is None: + source = cycle[0] + elif source != cycle[0]: + raise nx.NetworkXError("source must be first node in init_cycle") + if cycle[0] != cycle[-1]: + raise nx.NetworkXError("init_cycle must be a cycle. (return to start)") + + if len(cycle) - 1 != len(G) or len(set(G.nbunch_iter(cycle))) != len(G): + raise nx.NetworkXError("init_cycle should be a cycle over all nodes in G.") + + # Check that G is a complete graph + N = len(G) - 1 + # This check ignores selfloops which is what we want here. + if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()): + raise nx.NetworkXError("G must be a complete graph.") + + if G.number_of_nodes() == 2: + neighbor = next(G.neighbors(source)) + return [source, neighbor, source] + + # Find the cost of initial solution + cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(cycle)) + + count = 0 + best_cycle = cycle.copy() + best_cost = cost + while count <= max_iterations and temp > 0: + count += 1 + for i in range(N_inner): + adj_sol = move(cycle, seed) + adj_cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(adj_sol)) + delta = adj_cost - cost + if delta <= 0: + # Set current solution the adjacent solution. + cycle = adj_sol + cost = adj_cost + + if cost < best_cost: + count = 0 + best_cycle = cycle.copy() + best_cost = cost + else: + # Accept even a worse solution with probability p. + p = math.exp(-delta / temp) + if p >= seed.random(): + cycle = adj_sol + cost = adj_cost + temp -= temp * alpha + + return best_cycle + + +@py_random_state(9) +@nx._dispatch(edge_attrs="weight") +def threshold_accepting_tsp( + G, + init_cycle, + weight="weight", + source=None, + threshold=1, + move="1-1", + max_iterations=10, + N_inner=100, + alpha=0.1, + seed=None, +): + """Returns an approximate solution to the traveling salesman problem. + + This function uses threshold accepting methods to approximate the minimal cost + cycle through the nodes. Starting from a suboptimal solution, threshold + accepting methods perturb that solution, accepting any changes that make + the solution no worse than increasing by a threshold amount. Improvements + in cost are accepted, but so are changes leading to small increases in cost. + This allows the solution to leave suboptimal local minima in solution space. + The threshold is decreased slowly as iterations proceed helping to ensure + an optimum. In summary, the function returns a cycle starting at `source` + for which the total cost is minimized. + + Parameters + ---------- + G : Graph + `G` should be a complete weighted graph. + The distance between all pairs of nodes should be included. + + init_cycle : list or "greedy" + The initial solution (a cycle through all nodes returning to the start). + This argument has no default to make you think about it. + If "greedy", use `greedy_tsp(G, weight)`. + Other common starting cycles are `list(G) + [next(iter(G))]` or the final + result of `simulated_annealing_tsp` when doing `threshold_accepting_tsp`. + + weight : string, optional (default="weight") + Edge data key corresponding to the edge weight. + If any edge does not have this attribute the weight is set to 1. + + source : node, optional (default: first node in list(G)) + Starting node. If None, defaults to ``next(iter(G))`` + + threshold : int, optional (default=1) + The algorithm's threshold parameter. It represents the initial + threshold's value + + move : "1-1" or "1-0" or function, optional (default="1-1") + Indicator of what move to use when finding new trial solutions. + Strings indicate two special built-in moves: + + - "1-1": 1-1 exchange which transposes the position + of two elements of the current solution. + The function called is :func:`swap_two_nodes`. + For example if we apply 1-1 exchange in the solution + ``A = [3, 2, 1, 4, 3]`` + we can get the following by the transposition of 1 and 4 elements: + ``A' = [3, 2, 4, 1, 3]`` + - "1-0": 1-0 exchange which moves an node in the solution + to a new position. + The function called is :func:`move_one_node`. + For example if we apply 1-0 exchange in the solution + ``A = [3, 2, 1, 4, 3]`` + we can transfer the fourth element to the second position: + ``A' = [3, 4, 2, 1, 3]`` + + You may provide your own functions to enact a move from + one solution to a neighbor solution. The function must take + the solution as input along with a `seed` input to control + random number generation (see the `seed` input here). + Your function should maintain the solution as a cycle with + equal first and last node and all others appearing once. + Your function should return the new solution. + + max_iterations : int, optional (default=10) + Declared done when this number of consecutive iterations of + the outer loop occurs without any change in the best cost solution. + + N_inner : int, optional (default=100) + The number of iterations of the inner loop. + + alpha : float between (0, 1), optional (default=0.1) + Percentage of threshold decrease when there is at + least one acceptance of a neighbor solution. + If no inner loop moves are accepted the threshold remains unchanged. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + cycle : list of nodes + Returns the cycle (list of nodes) that a salesman + can follow to minimize total weight of the trip. + + Raises + ------ + NetworkXError + If `G` is not complete the algorithm raises an exception. + + Examples + -------- + >>> from networkx.algorithms import approximation as approx + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from({ + ... ("A", "B", 3), ("A", "C", 17), ("A", "D", 14), ("B", "A", 3), + ... ("B", "C", 12), ("B", "D", 16), ("C", "A", 13),("C", "B", 12), + ... ("C", "D", 4), ("D", "A", 14), ("D", "B", 15), ("D", "C", 2) + ... }) + >>> cycle = approx.threshold_accepting_tsp(G, "greedy", source="D") + >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle)) + >>> cycle + ['D', 'C', 'B', 'A', 'D'] + >>> cost + 31 + >>> incycle = ["D", "B", "A", "C", "D"] + >>> cycle = approx.threshold_accepting_tsp(G, incycle, source="D") + >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle)) + >>> cycle + ['D', 'C', 'B', 'A', 'D'] + >>> cost + 31 + + Notes + ----- + Threshold Accepting is a metaheuristic local search algorithm. + The main characteristic of this algorithm is that it accepts + even solutions which lead to the increase of the cost in order + to escape from low quality local optimal solutions. + + This algorithm needs an initial solution. This solution can be + constructed by a simple greedy algorithm. At every iteration, it + selects thoughtfully a neighbor solution. + Consider $c(x)$ cost of current solution and $c(x')$ cost of + neighbor solution. + If $c(x') - c(x) <= threshold$ then the neighbor solution becomes the current + solution for the next iteration, where the threshold is named threshold. + + In comparison to the Simulated Annealing algorithm, the Threshold + Accepting algorithm does not accept very low quality solutions + (due to the presence of the threshold value). In the case of + Simulated Annealing, even a very low quality solution can + be accepted with probability $p$. + + Time complexity: + It has a running time $O(m * n * |V|)$ where $m$ and $n$ are the number + of times the outer and inner loop run respectively. + + For more information and how algorithm is inspired see: + https://doi.org/10.1016/0021-9991(90)90201-B + + See Also + -------- + simulated_annealing_tsp + + """ + if move == "1-1": + move = swap_two_nodes + elif move == "1-0": + move = move_one_node + if init_cycle == "greedy": + # Construct an initial solution using a greedy algorithm. + cycle = greedy_tsp(G, weight=weight, source=source) + if G.number_of_nodes() == 2: + return cycle + + else: + cycle = list(init_cycle) + if source is None: + source = cycle[0] + elif source != cycle[0]: + raise nx.NetworkXError("source must be first node in init_cycle") + if cycle[0] != cycle[-1]: + raise nx.NetworkXError("init_cycle must be a cycle. (return to start)") + + if len(cycle) - 1 != len(G) or len(set(G.nbunch_iter(cycle))) != len(G): + raise nx.NetworkXError("init_cycle is not all and only nodes.") + + # Check that G is a complete graph + N = len(G) - 1 + # This check ignores selfloops which is what we want here. + if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()): + raise nx.NetworkXError("G must be a complete graph.") + + if G.number_of_nodes() == 2: + neighbor = list(G.neighbors(source))[0] + return [source, neighbor, source] + + # Find the cost of initial solution + cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(cycle)) + + count = 0 + best_cycle = cycle.copy() + best_cost = cost + while count <= max_iterations: + count += 1 + accepted = False + for i in range(N_inner): + adj_sol = move(cycle, seed) + adj_cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(adj_sol)) + delta = adj_cost - cost + if delta <= threshold: + accepted = True + + # Set current solution the adjacent solution. + cycle = adj_sol + cost = adj_cost + + if cost < best_cost: + count = 0 + best_cycle = cycle.copy() + best_cost = cost + if accepted: + threshold -= threshold * alpha + + return best_cycle diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/treewidth.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/treewidth.py new file mode 100644 index 0000000000000000000000000000000000000000..ce673b6eda43e5f1fd0ab862f96e7cf8e08b62f3 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/treewidth.py @@ -0,0 +1,252 @@ +"""Functions for computing treewidth decomposition. + +Treewidth of an undirected graph is a number associated with the graph. +It can be defined as the size of the largest vertex set (bag) in a tree +decomposition of the graph minus one. + +`Wikipedia: Treewidth `_ + +The notions of treewidth and tree decomposition have gained their +attractiveness partly because many graph and network problems that are +intractable (e.g., NP-hard) on arbitrary graphs become efficiently +solvable (e.g., with a linear time algorithm) when the treewidth of the +input graphs is bounded by a constant [1]_ [2]_. + +There are two different functions for computing a tree decomposition: +:func:`treewidth_min_degree` and :func:`treewidth_min_fill_in`. + +.. [1] Hans L. Bodlaender and Arie M. C. A. Koster. 2010. "Treewidth + computations I.Upper bounds". Inf. Comput. 208, 3 (March 2010),259-275. + http://dx.doi.org/10.1016/j.ic.2009.03.008 + +.. [2] Hans L. Bodlaender. "Discovering Treewidth". Institute of Information + and Computing Sciences, Utrecht University. + Technical Report UU-CS-2005-018. + http://www.cs.uu.nl + +.. [3] K. Wang, Z. Lu, and J. Hicks *Treewidth*. + https://web.archive.org/web/20210507025929/http://web.eecs.utk.edu/~cphill25/cs594_spring2015_projects/treewidth.pdf + +""" + +import itertools +import sys +from heapq import heapify, heappop, heappush + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["treewidth_min_degree", "treewidth_min_fill_in"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def treewidth_min_degree(G): + """Returns a treewidth decomposition using the Minimum Degree heuristic. + + The heuristic chooses the nodes according to their degree, i.e., first + the node with the lowest degree is chosen, then the graph is updated + and the corresponding node is removed. Next, a new node with the lowest + degree is chosen, and so on. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + Treewidth decomposition : (int, Graph) tuple + 2-tuple with treewidth and the corresponding decomposed tree. + """ + deg_heuristic = MinDegreeHeuristic(G) + return treewidth_decomp(G, lambda graph: deg_heuristic.best_node(graph)) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def treewidth_min_fill_in(G): + """Returns a treewidth decomposition using the Minimum Fill-in heuristic. + + The heuristic chooses a node from the graph, where the number of edges + added turning the neighbourhood of the chosen node into clique is as + small as possible. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + Treewidth decomposition : (int, Graph) tuple + 2-tuple with treewidth and the corresponding decomposed tree. + """ + return treewidth_decomp(G, min_fill_in_heuristic) + + +class MinDegreeHeuristic: + """Implements the Minimum Degree heuristic. + + The heuristic chooses the nodes according to their degree + (number of neighbours), i.e., first the node with the lowest degree is + chosen, then the graph is updated and the corresponding node is + removed. Next, a new node with the lowest degree is chosen, and so on. + """ + + def __init__(self, graph): + self._graph = graph + + # nodes that have to be updated in the heap before each iteration + self._update_nodes = [] + + self._degreeq = [] # a heapq with 3-tuples (degree,unique_id,node) + self.count = itertools.count() + + # build heap with initial degrees + for n in graph: + self._degreeq.append((len(graph[n]), next(self.count), n)) + heapify(self._degreeq) + + def best_node(self, graph): + # update nodes in self._update_nodes + for n in self._update_nodes: + # insert changed degrees into degreeq + heappush(self._degreeq, (len(graph[n]), next(self.count), n)) + + # get the next valid (minimum degree) node + while self._degreeq: + (min_degree, _, elim_node) = heappop(self._degreeq) + if elim_node not in graph or len(graph[elim_node]) != min_degree: + # outdated entry in degreeq + continue + elif min_degree == len(graph) - 1: + # fully connected: abort condition + return None + + # remember to update nodes in the heap before getting the next node + self._update_nodes = graph[elim_node] + return elim_node + + # the heap is empty: abort + return None + + +def min_fill_in_heuristic(graph): + """Implements the Minimum Degree heuristic. + + Returns the node from the graph, where the number of edges added when + turning the neighbourhood of the chosen node into clique is as small as + possible. This algorithm chooses the nodes using the Minimum Fill-In + heuristic. The running time of the algorithm is :math:`O(V^3)` and it uses + additional constant memory.""" + + if len(graph) == 0: + return None + + min_fill_in_node = None + + min_fill_in = sys.maxsize + + # sort nodes by degree + nodes_by_degree = sorted(graph, key=lambda x: len(graph[x])) + min_degree = len(graph[nodes_by_degree[0]]) + + # abort condition (handle complete graph) + if min_degree == len(graph) - 1: + return None + + for node in nodes_by_degree: + num_fill_in = 0 + nbrs = graph[node] + for nbr in nbrs: + # count how many nodes in nbrs current nbr is not connected to + # subtract 1 for the node itself + num_fill_in += len(nbrs - graph[nbr]) - 1 + if num_fill_in >= 2 * min_fill_in: + break + + num_fill_in /= 2 # divide by 2 because of double counting + + if num_fill_in < min_fill_in: # update min-fill-in node + if num_fill_in == 0: + return node + min_fill_in = num_fill_in + min_fill_in_node = node + + return min_fill_in_node + + +@nx._dispatch +def treewidth_decomp(G, heuristic=min_fill_in_heuristic): + """Returns a treewidth decomposition using the passed heuristic. + + Parameters + ---------- + G : NetworkX graph + heuristic : heuristic function + + Returns + ------- + Treewidth decomposition : (int, Graph) tuple + 2-tuple with treewidth and the corresponding decomposed tree. + """ + + # make dict-of-sets structure + graph = {n: set(G[n]) - {n} for n in G} + + # stack containing nodes and neighbors in the order from the heuristic + node_stack = [] + + # get first node from heuristic + elim_node = heuristic(graph) + while elim_node is not None: + # connect all neighbours with each other + nbrs = graph[elim_node] + for u, v in itertools.permutations(nbrs, 2): + if v not in graph[u]: + graph[u].add(v) + + # push node and its current neighbors on stack + node_stack.append((elim_node, nbrs)) + + # remove node from graph + for u in graph[elim_node]: + graph[u].remove(elim_node) + + del graph[elim_node] + elim_node = heuristic(graph) + + # the abort condition is met; put all remaining nodes into one bag + decomp = nx.Graph() + first_bag = frozenset(graph.keys()) + decomp.add_node(first_bag) + + treewidth = len(first_bag) - 1 + + while node_stack: + # get node and its neighbors from the stack + (curr_node, nbrs) = node_stack.pop() + + # find a bag all neighbors are in + old_bag = None + for bag in decomp.nodes: + if nbrs <= bag: + old_bag = bag + break + + if old_bag is None: + # no old_bag was found: just connect to the first_bag + old_bag = first_bag + + # create new node for decomposition + nbrs.add(curr_node) + new_bag = frozenset(nbrs) + + # update treewidth + treewidth = max(treewidth, len(new_bag) - 1) + + # add edge to decomposition (implicitly also adds the new node) + decomp.add_edge(old_bag, new_bag) + + return treewidth, decomp diff --git a/MLPY/Lib/site-packages/networkx/algorithms/approximation/vertex_cover.py b/MLPY/Lib/site-packages/networkx/algorithms/approximation/vertex_cover.py new file mode 100644 index 0000000000000000000000000000000000000000..dbd7a123d02009e9fb825c512289b762614494f6 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/approximation/vertex_cover.py @@ -0,0 +1,82 @@ +"""Functions for computing an approximate minimum weight vertex cover. + +A |vertex cover|_ is a subset of nodes such that each edge in the graph +is incident to at least one node in the subset. + +.. _vertex cover: https://en.wikipedia.org/wiki/Vertex_cover +.. |vertex cover| replace:: *vertex cover* + +""" +import networkx as nx + +__all__ = ["min_weighted_vertex_cover"] + + +@nx._dispatch(node_attrs="weight") +def min_weighted_vertex_cover(G, weight=None): + r"""Returns an approximate minimum weighted vertex cover. + + The set of nodes returned by this function is guaranteed to be a + vertex cover, and the total weight of the set is guaranteed to be at + most twice the total weight of the minimum weight vertex cover. In + other words, + + .. math:: + + w(S) \leq 2 * w(S^*), + + where $S$ is the vertex cover returned by this function, + $S^*$ is the vertex cover of minimum weight out of all vertex + covers of the graph, and $w$ is the function that computes the + sum of the weights of each node in that given set. + + Parameters + ---------- + G : NetworkX graph + + weight : string, optional (default = None) + If None, every node has weight 1. If a string, use this node + attribute as the node weight. A node without this attribute is + assumed to have weight 1. + + Returns + ------- + min_weighted_cover : set + Returns a set of nodes whose weight sum is no more than twice + the weight sum of the minimum weight vertex cover. + + Notes + ----- + For a directed graph, a vertex cover has the same definition: a set + of nodes such that each edge in the graph is incident to at least + one node in the set. Whether the node is the head or tail of the + directed edge is ignored. + + This is the local-ratio algorithm for computing an approximate + vertex cover. The algorithm greedily reduces the costs over edges, + iteratively building a cover. The worst-case runtime of this + implementation is $O(m \log n)$, where $n$ is the number + of nodes and $m$ the number of edges in the graph. + + References + ---------- + .. [1] Bar-Yehuda, R., and Even, S. (1985). "A local-ratio theorem for + approximating the weighted vertex cover problem." + *Annals of Discrete Mathematics*, 25, 27–46 + + + """ + cost = dict(G.nodes(data=weight, default=1)) + # While there are uncovered edges, choose an uncovered and update + # the cost of the remaining edges. + cover = set() + for u, v in G.edges(): + if u in cover or v in cover: + continue + if cost[u] <= cost[v]: + cover.add(u) + cost[v] -= cost[u] + else: + cover.add(v) + cost[u] -= cost[v] + return cover diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4d9888609cbc43d4ba2121fcd0feda0985d1aebd --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/__init__.py @@ -0,0 +1,5 @@ +from networkx.algorithms.assortativity.connectivity import * +from networkx.algorithms.assortativity.correlation import * +from networkx.algorithms.assortativity.mixing import * +from networkx.algorithms.assortativity.neighbor_degree import * +from networkx.algorithms.assortativity.pairs import * diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c7a3d1fcd270c01d5a91f75b5144f895ec4a990 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/connectivity.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/connectivity.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95b1aad3290d6bd4aec3d52c767eed048d118187 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/connectivity.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/correlation.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/correlation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53a3aad79ddbf5f2b6aeae97ce0d8c4f4311cb1c Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/correlation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/mixing.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/mixing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f157e22513f344558978523381ee8c20bf9180dc Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/mixing.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/neighbor_degree.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/neighbor_degree.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94c48329ea8aa7d4db0d947705bdf88ccaf6e604 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/neighbor_degree.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/pairs.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/pairs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..877a64c3890db91cf1beb55c6cecbdcdd9a03443 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/pairs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/connectivity.py b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/connectivity.py new file mode 100644 index 0000000000000000000000000000000000000000..bd433ded595711ed526b99f598a9663223bf2555 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/connectivity.py @@ -0,0 +1,122 @@ +from collections import defaultdict + +import networkx as nx + +__all__ = ["average_degree_connectivity"] + + +@nx._dispatch(edge_attrs="weight") +def average_degree_connectivity( + G, source="in+out", target="in+out", nodes=None, weight=None +): + r"""Compute the average degree connectivity of graph. + + The average degree connectivity is the average nearest neighbor degree of + nodes with degree k. For weighted graphs, an analogous measure can + be computed using the weighted average neighbors degree defined in + [1]_, for a node `i`, as + + .. math:: + + k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j + + where `s_i` is the weighted degree of node `i`, + `w_{ij}` is the weight of the edge that links `i` and `j`, + and `N(i)` are the neighbors of node `i`. + + Parameters + ---------- + G : NetworkX graph + + source : "in"|"out"|"in+out" (default:"in+out") + Directed graphs only. Use "in"- or "out"-degree for source node. + + target : "in"|"out"|"in+out" (default:"in+out" + Directed graphs only. Use "in"- or "out"-degree for target node. + + nodes : list or iterable (optional) + Compute neighbor connectivity for these nodes. The default is all + nodes. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + + Returns + ------- + d : dict + A dictionary keyed by degree k with the value of average connectivity. + + Raises + ------ + NetworkXError + If either `source` or `target` are not one of 'in', + 'out', or 'in+out'. + If either `source` or `target` is passed for an undirected graph. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G.edges[1, 2]["weight"] = 3 + >>> nx.average_degree_connectivity(G) + {1: 2.0, 2: 1.5} + >>> nx.average_degree_connectivity(G, weight="weight") + {1: 2.0, 2: 1.75} + + See Also + -------- + average_neighbor_degree + + References + ---------- + .. [1] A. Barrat, M. Barthélemy, R. Pastor-Satorras, and A. Vespignani, + "The architecture of complex weighted networks". + PNAS 101 (11): 3747–3752 (2004). + """ + # First, determine the type of neighbors and the type of degree to use. + if G.is_directed(): + if source not in ("in", "out", "in+out"): + raise nx.NetworkXError('source must be one of "in", "out", or "in+out"') + if target not in ("in", "out", "in+out"): + raise nx.NetworkXError('target must be one of "in", "out", or "in+out"') + direction = {"out": G.out_degree, "in": G.in_degree, "in+out": G.degree} + neighbor_funcs = { + "out": G.successors, + "in": G.predecessors, + "in+out": G.neighbors, + } + source_degree = direction[source] + target_degree = direction[target] + neighbors = neighbor_funcs[source] + # `reverse` indicates whether to look at the in-edge when + # computing the weight of an edge. + reverse = source == "in" + else: + if source != "in+out" or target != "in+out": + raise nx.NetworkXError( + f"source and target arguments are only supported for directed graphs" + ) + source_degree = G.degree + target_degree = G.degree + neighbors = G.neighbors + reverse = False + dsum = defaultdict(int) + dnorm = defaultdict(int) + # Check if `source_nodes` is actually a single node in the graph. + source_nodes = source_degree(nodes) + if nodes in G: + source_nodes = [(nodes, source_degree(nodes))] + for n, k in source_nodes: + nbrdeg = target_degree(neighbors(n)) + if weight is None: + s = sum(d for n, d in nbrdeg) + else: # weight nbr degree by weight of (n,nbr) edge + if reverse: + s = sum(G[nbr][n].get(weight, 1) * d for nbr, d in nbrdeg) + else: + s = sum(G[n][nbr].get(weight, 1) * d for nbr, d in nbrdeg) + dnorm[k] += source_degree(n, weight=weight) + dsum[k] += s + + # normalize + return {k: avg if dnorm[k] == 0 else avg / dnorm[k] for k, avg in dsum.items()} diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/correlation.py b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/correlation.py new file mode 100644 index 0000000000000000000000000000000000000000..35ea78d6d523a38937c8fcfa6e8f0951f8194a23 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/correlation.py @@ -0,0 +1,302 @@ +"""Node assortativity coefficients and correlation measures. +""" +import networkx as nx +from networkx.algorithms.assortativity.mixing import ( + attribute_mixing_matrix, + degree_mixing_matrix, +) +from networkx.algorithms.assortativity.pairs import node_degree_xy + +__all__ = [ + "degree_pearson_correlation_coefficient", + "degree_assortativity_coefficient", + "attribute_assortativity_coefficient", + "numeric_assortativity_coefficient", +] + + +@nx._dispatch(edge_attrs="weight") +def degree_assortativity_coefficient(G, x="out", y="in", weight=None, nodes=None): + """Compute degree assortativity of graph. + + Assortativity measures the similarity of connections + in the graph with respect to the node degree. + + Parameters + ---------- + G : NetworkX graph + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + nodes: list or iterable (optional) + Compute degree assortativity only for nodes in container. + The default is all nodes. + + Returns + ------- + r : float + Assortativity of graph by degree. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> r = nx.degree_assortativity_coefficient(G) + >>> print(f"{r:3.1f}") + -0.5 + + See Also + -------- + attribute_assortativity_coefficient + numeric_assortativity_coefficient + degree_mixing_dict + degree_mixing_matrix + + Notes + ----- + This computes Eq. (21) in Ref. [1]_ , where e is the joint + probability distribution (mixing matrix) of the degrees. If G is + directed than the matrix e is the joint probability of the + user-specified degree type for the source and target. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks, + Physical Review E, 67 026126, 2003 + .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M. + Edge direction and the structure of networks, PNAS 107, 10815-20 (2010). + """ + if nodes is None: + nodes = G.nodes + + degrees = None + + if G.is_directed(): + indeg = ( + {d for _, d in G.in_degree(nodes, weight=weight)} + if "in" in (x, y) + else set() + ) + outdeg = ( + {d for _, d in G.out_degree(nodes, weight=weight)} + if "out" in (x, y) + else set() + ) + degrees = set.union(indeg, outdeg) + else: + degrees = {d for _, d in G.degree(nodes, weight=weight)} + + mapping = {d: i for i, d, in enumerate(degrees)} + M = degree_mixing_matrix(G, x=x, y=y, nodes=nodes, weight=weight, mapping=mapping) + + return _numeric_ac(M, mapping=mapping) + + +@nx._dispatch(edge_attrs="weight") +def degree_pearson_correlation_coefficient(G, x="out", y="in", weight=None, nodes=None): + """Compute degree assortativity of graph. + + Assortativity measures the similarity of connections + in the graph with respect to the node degree. + + This is the same as degree_assortativity_coefficient but uses the + potentially faster scipy.stats.pearsonr function. + + Parameters + ---------- + G : NetworkX graph + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + nodes: list or iterable (optional) + Compute pearson correlation of degrees only for specified nodes. + The default is all nodes. + + Returns + ------- + r : float + Assortativity of graph by degree. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> r = nx.degree_pearson_correlation_coefficient(G) + >>> print(f"{r:3.1f}") + -0.5 + + Notes + ----- + This calls scipy.stats.pearsonr. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks + Physical Review E, 67 026126, 2003 + .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M. + Edge direction and the structure of networks, PNAS 107, 10815-20 (2010). + """ + import scipy as sp + + xy = node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight) + x, y = zip(*xy) + return sp.stats.pearsonr(x, y)[0] + + +@nx._dispatch(node_attrs="attribute") +def attribute_assortativity_coefficient(G, attribute, nodes=None): + """Compute assortativity for node attributes. + + Assortativity measures the similarity of connections + in the graph with respect to the given attribute. + + Parameters + ---------- + G : NetworkX graph + + attribute : string + Node attribute key + + nodes: list or iterable (optional) + Compute attribute assortativity for nodes in container. + The default is all nodes. + + Returns + ------- + r: float + Assortativity of graph for given attribute + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from([0, 1], color="red") + >>> G.add_nodes_from([2, 3], color="blue") + >>> G.add_edges_from([(0, 1), (2, 3)]) + >>> print(nx.attribute_assortativity_coefficient(G, "color")) + 1.0 + + Notes + ----- + This computes Eq. (2) in Ref. [1]_ , (trace(M)-sum(M^2))/(1-sum(M^2)), + where M is the joint probability distribution (mixing matrix) + of the specified attribute. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks, + Physical Review E, 67 026126, 2003 + """ + M = attribute_mixing_matrix(G, attribute, nodes) + return attribute_ac(M) + + +@nx._dispatch(node_attrs="attribute") +def numeric_assortativity_coefficient(G, attribute, nodes=None): + """Compute assortativity for numerical node attributes. + + Assortativity measures the similarity of connections + in the graph with respect to the given numeric attribute. + + Parameters + ---------- + G : NetworkX graph + + attribute : string + Node attribute key. + + nodes: list or iterable (optional) + Compute numeric assortativity only for attributes of nodes in + container. The default is all nodes. + + Returns + ------- + r: float + Assortativity of graph for given attribute + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from([0, 1], size=2) + >>> G.add_nodes_from([2, 3], size=3) + >>> G.add_edges_from([(0, 1), (2, 3)]) + >>> print(nx.numeric_assortativity_coefficient(G, "size")) + 1.0 + + Notes + ----- + This computes Eq. (21) in Ref. [1]_ , which is the Pearson correlation + coefficient of the specified (scalar valued) attribute across edges. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks + Physical Review E, 67 026126, 2003 + """ + if nodes is None: + nodes = G.nodes + vals = {G.nodes[n][attribute] for n in nodes} + mapping = {d: i for i, d, in enumerate(vals)} + M = attribute_mixing_matrix(G, attribute, nodes, mapping) + return _numeric_ac(M, mapping) + + +def attribute_ac(M): + """Compute assortativity for attribute matrix M. + + Parameters + ---------- + M : numpy.ndarray + 2D ndarray representing the attribute mixing matrix. + + Notes + ----- + This computes Eq. (2) in Ref. [1]_ , (trace(e)-sum(e^2))/(1-sum(e^2)), + where e is the joint probability distribution (mixing matrix) + of the specified attribute. + + References + ---------- + .. [1] M. E. J. Newman, Mixing patterns in networks, + Physical Review E, 67 026126, 2003 + """ + if M.sum() != 1.0: + M = M / M.sum() + s = (M @ M).sum() + t = M.trace() + r = (t - s) / (1 - s) + return r + + +def _numeric_ac(M, mapping): + # M is a 2D numpy array + # numeric assortativity coefficient, pearsonr + import numpy as np + + if M.sum() != 1.0: + M = M / M.sum() + x = np.array(list(mapping.keys())) + y = x # x and y have the same support + idx = list(mapping.values()) + a = M.sum(axis=0) + b = M.sum(axis=1) + vara = (a[idx] * x**2).sum() - ((a[idx] * x).sum()) ** 2 + varb = (b[idx] * y**2).sum() - ((b[idx] * y).sum()) ** 2 + xy = np.outer(x, y) + ab = np.outer(a[idx], b[idx]) + return (xy * (M - ab)).sum() / np.sqrt(vara * varb) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/mixing.py b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/mixing.py new file mode 100644 index 0000000000000000000000000000000000000000..66b98797e69f473507347ea412cea01501426889 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/mixing.py @@ -0,0 +1,250 @@ +""" +Mixing matrices for node attributes and degree. +""" +import networkx as nx +from networkx.algorithms.assortativity.pairs import node_attribute_xy, node_degree_xy +from networkx.utils import dict_to_numpy_array + +__all__ = [ + "attribute_mixing_matrix", + "attribute_mixing_dict", + "degree_mixing_matrix", + "degree_mixing_dict", + "mixing_dict", +] + + +@nx._dispatch(node_attrs="attribute") +def attribute_mixing_dict(G, attribute, nodes=None, normalized=False): + """Returns dictionary representation of mixing matrix for attribute. + + Parameters + ---------- + G : graph + NetworkX graph object. + + attribute : string + Node attribute key. + + nodes: list or iterable (optional) + Unse nodes in container to build the dict. The default is all nodes. + + normalized : bool (default=False) + Return counts if False or probabilities if True. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from([0, 1], color="red") + >>> G.add_nodes_from([2, 3], color="blue") + >>> G.add_edge(1, 3) + >>> d = nx.attribute_mixing_dict(G, "color") + >>> print(d["red"]["blue"]) + 1 + >>> print(d["blue"]["red"]) # d symmetric for undirected graphs + 1 + + Returns + ------- + d : dictionary + Counts or joint probability of occurrence of attribute pairs. + """ + xy_iter = node_attribute_xy(G, attribute, nodes) + return mixing_dict(xy_iter, normalized=normalized) + + +@nx._dispatch(node_attrs="attribute") +def attribute_mixing_matrix(G, attribute, nodes=None, mapping=None, normalized=True): + """Returns mixing matrix for attribute. + + Parameters + ---------- + G : graph + NetworkX graph object. + + attribute : string + Node attribute key. + + nodes: list or iterable (optional) + Use only nodes in container to build the matrix. The default is + all nodes. + + mapping : dictionary, optional + Mapping from node attribute to integer index in matrix. + If not specified, an arbitrary ordering will be used. + + normalized : bool (default=True) + Return counts if False or probabilities if True. + + Returns + ------- + m: numpy array + Counts or joint probability of occurrence of attribute pairs. + + Notes + ----- + If each node has a unique attribute value, the unnormalized mixing matrix + will be equal to the adjacency matrix. To get a denser mixing matrix, + the rounding can be performed to form groups of nodes with equal values. + For example, the exact height of persons in cm (180.79155222, 163.9080892, + 163.30095355, 167.99016217, 168.21590163, ...) can be rounded to (180, 163, + 163, 168, 168, ...). + + Definitions of attribute mixing matrix vary on whether the matrix + should include rows for attribute values that don't arise. Here we + do not include such empty-rows. But you can force them to appear + by inputting a `mapping` that includes those values. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> gender = {0: 'male', 1: 'female', 2: 'female'} + >>> nx.set_node_attributes(G, gender, 'gender') + >>> mapping = {'male': 0, 'female': 1} + >>> mix_mat = nx.attribute_mixing_matrix(G, 'gender', mapping=mapping) + >>> # mixing from male nodes to female nodes + >>> mix_mat[mapping['male'], mapping['female']] + 0.25 + """ + d = attribute_mixing_dict(G, attribute, nodes) + a = dict_to_numpy_array(d, mapping=mapping) + if normalized: + a = a / a.sum() + return a + + +@nx._dispatch(edge_attrs="weight") +def degree_mixing_dict(G, x="out", y="in", weight=None, nodes=None, normalized=False): + """Returns dictionary representation of mixing matrix for degree. + + Parameters + ---------- + G : graph + NetworkX graph object. + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + normalized : bool (default=False) + Return counts if False or probabilities if True. + + Returns + ------- + d: dictionary + Counts or joint probability of occurrence of degree pairs. + """ + xy_iter = node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight) + return mixing_dict(xy_iter, normalized=normalized) + + +@nx._dispatch(edge_attrs="weight") +def degree_mixing_matrix( + G, x="out", y="in", weight=None, nodes=None, normalized=True, mapping=None +): + """Returns mixing matrix for attribute. + + Parameters + ---------- + G : graph + NetworkX graph object. + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + nodes: list or iterable (optional) + Build the matrix using only nodes in container. + The default is all nodes. + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + normalized : bool (default=True) + Return counts if False or probabilities if True. + + mapping : dictionary, optional + Mapping from node degree to integer index in matrix. + If not specified, an arbitrary ordering will be used. + + Returns + ------- + m: numpy array + Counts, or joint probability, of occurrence of node degree. + + Notes + ----- + Definitions of degree mixing matrix vary on whether the matrix + should include rows for degree values that don't arise. Here we + do not include such empty-rows. But you can force them to appear + by inputting a `mapping` that includes those values. See examples. + + Examples + -------- + >>> G = nx.star_graph(3) + >>> mix_mat = nx.degree_mixing_matrix(G) + >>> mix_mat[0, 1] # mixing from node degree 1 to node degree 3 + 0.5 + + If you want every possible degree to appear as a row, even if no nodes + have that degree, use `mapping` as follows, + + >>> max_degree = max(deg for n, deg in G.degree) + >>> mapping = {x: x for x in range(max_degree + 1)} # identity mapping + >>> mix_mat = nx.degree_mixing_matrix(G, mapping=mapping) + >>> mix_mat[3, 1] # mixing from node degree 3 to node degree 1 + 0.5 + """ + d = degree_mixing_dict(G, x=x, y=y, nodes=nodes, weight=weight) + a = dict_to_numpy_array(d, mapping=mapping) + if normalized: + a = a / a.sum() + return a + + +def mixing_dict(xy, normalized=False): + """Returns a dictionary representation of mixing matrix. + + Parameters + ---------- + xy : list or container of two-tuples + Pairs of (x,y) items. + + attribute : string + Node attribute key + + normalized : bool (default=False) + Return counts if False or probabilities if True. + + Returns + ------- + d: dictionary + Counts or Joint probability of occurrence of values in xy. + """ + d = {} + psum = 0.0 + for x, y in xy: + if x not in d: + d[x] = {} + if y not in d: + d[y] = {} + v = d[x].get(y, 0) + d[x][y] = v + 1 + psum += 1 + + if normalized: + for _, jdict in d.items(): + for j in jdict: + jdict[j] /= psum + return d diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/neighbor_degree.py b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/neighbor_degree.py new file mode 100644 index 0000000000000000000000000000000000000000..a8980da766f1e63e06990b35a3b403df5486cd50 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/neighbor_degree.py @@ -0,0 +1,160 @@ +import networkx as nx + +__all__ = ["average_neighbor_degree"] + + +@nx._dispatch(edge_attrs="weight") +def average_neighbor_degree(G, source="out", target="out", nodes=None, weight=None): + r"""Returns the average degree of the neighborhood of each node. + + In an undirected graph, the neighborhood `N(i)` of node `i` contains the + nodes that are connected to `i` by an edge. + + For directed graphs, `N(i)` is defined according to the parameter `source`: + + - if source is 'in', then `N(i)` consists of predecessors of node `i`. + - if source is 'out', then `N(i)` consists of successors of node `i`. + - if source is 'in+out', then `N(i)` is both predecessors and successors. + + The average neighborhood degree of a node `i` is + + .. math:: + + k_{nn,i} = \frac{1}{|N(i)|} \sum_{j \in N(i)} k_j + + where `N(i)` are the neighbors of node `i` and `k_j` is + the degree of node `j` which belongs to `N(i)`. For weighted + graphs, an analogous measure can be defined [1]_, + + .. math:: + + k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j + + where `s_i` is the weighted degree of node `i`, `w_{ij}` + is the weight of the edge that links `i` and `j` and + `N(i)` are the neighbors of node `i`. + + + Parameters + ---------- + G : NetworkX graph + + source : string ("in"|"out"|"in+out"), optional (default="out") + Directed graphs only. + Use "in"- or "out"-neighbors of source node. + + target : string ("in"|"out"|"in+out"), optional (default="out") + Directed graphs only. + Use "in"- or "out"-degree for target node. + + nodes : list or iterable, optional (default=G.nodes) + Compute neighbor degree only for specified nodes. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + + Returns + ------- + d: dict + A dictionary keyed by node to the average degree of its neighbors. + + Raises + ------ + NetworkXError + If either `source` or `target` are not one of 'in', 'out', or 'in+out'. + If either `source` or `target` is passed for an undirected graph. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G.edges[0, 1]["weight"] = 5 + >>> G.edges[2, 3]["weight"] = 3 + + >>> nx.average_neighbor_degree(G) + {0: 2.0, 1: 1.5, 2: 1.5, 3: 2.0} + >>> nx.average_neighbor_degree(G, weight="weight") + {0: 2.0, 1: 1.1666666666666667, 2: 1.25, 3: 2.0} + + >>> G = nx.DiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> nx.average_neighbor_degree(G, source="in", target="in") + {0: 0.0, 1: 0.0, 2: 1.0, 3: 1.0} + + >>> nx.average_neighbor_degree(G, source="out", target="out") + {0: 1.0, 1: 1.0, 2: 0.0, 3: 0.0} + + See Also + -------- + average_degree_connectivity + + References + ---------- + .. [1] A. Barrat, M. Barthélemy, R. Pastor-Satorras, and A. Vespignani, + "The architecture of complex weighted networks". + PNAS 101 (11): 3747–3752 (2004). + """ + if G.is_directed(): + if source == "in": + source_degree = G.in_degree + elif source == "out": + source_degree = G.out_degree + elif source == "in+out": + source_degree = G.degree + else: + raise nx.NetworkXError( + f"source argument {source} must be 'in', 'out' or 'in+out'" + ) + + if target == "in": + target_degree = G.in_degree + elif target == "out": + target_degree = G.out_degree + elif target == "in+out": + target_degree = G.degree + else: + raise nx.NetworkXError( + f"target argument {target} must be 'in', 'out' or 'in+out'" + ) + else: + if source != "out" or target != "out": + raise nx.NetworkXError( + f"source and target arguments are only supported for directed graphs" + ) + source_degree = target_degree = G.degree + + # precompute target degrees -- should *not* be weighted degree + t_deg = dict(target_degree()) + + # Set up both predecessor and successor neighbor dicts leaving empty if not needed + G_P = G_S = {n: {} for n in G} + if G.is_directed(): + # "in" or "in+out" cases: G_P contains predecessors + if "in" in source: + G_P = G.pred + # "out" or "in+out" cases: G_S contains successors + if "out" in source: + G_S = G.succ + else: + # undirected leave G_P empty but G_S is the adjacency + G_S = G.adj + + # Main loop: Compute average degree of neighbors + avg = {} + for n, deg in source_degree(nodes, weight=weight): + # handle degree zero average + if deg == 0: + avg[n] = 0.0 + continue + + # we sum over both G_P and G_S, but one of the two is usually empty. + if weight is None: + avg[n] = ( + sum(t_deg[nbr] for nbr in G_S[n]) + sum(t_deg[nbr] for nbr in G_P[n]) + ) / deg + else: + avg[n] = ( + sum(dd.get(weight, 1) * t_deg[nbr] for nbr, dd in G_S[n].items()) + + sum(dd.get(weight, 1) * t_deg[nbr] for nbr, dd in G_P[n].items()) + ) / deg + return avg diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/pairs.py b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/pairs.py new file mode 100644 index 0000000000000000000000000000000000000000..a3580d40324619c506146011f07e61f6f9b67ea0 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/pairs.py @@ -0,0 +1,118 @@ +"""Generators of x-y pairs of node data.""" +import networkx as nx + +__all__ = ["node_attribute_xy", "node_degree_xy"] + + +@nx._dispatch(node_attrs="attribute") +def node_attribute_xy(G, attribute, nodes=None): + """Returns iterator of node-attribute pairs for all edges in G. + + Parameters + ---------- + G: NetworkX graph + + attribute: key + The node attribute key. + + nodes: list or iterable (optional) + Use only edges that are incident to specified nodes. + The default is all nodes. + + Returns + ------- + (x, y): 2-tuple + Generates 2-tuple of (attribute, attribute) values. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_node(1, color="red") + >>> G.add_node(2, color="blue") + >>> G.add_edge(1, 2) + >>> list(nx.node_attribute_xy(G, "color")) + [('red', 'blue')] + + Notes + ----- + For undirected graphs each edge is produced twice, once for each edge + representation (u, v) and (v, u), with the exception of self-loop edges + which only appear once. + """ + if nodes is None: + nodes = set(G) + else: + nodes = set(nodes) + Gnodes = G.nodes + for u, nbrsdict in G.adjacency(): + if u not in nodes: + continue + uattr = Gnodes[u].get(attribute, None) + if G.is_multigraph(): + for v, keys in nbrsdict.items(): + vattr = Gnodes[v].get(attribute, None) + for _ in keys: + yield (uattr, vattr) + else: + for v in nbrsdict: + vattr = Gnodes[v].get(attribute, None) + yield (uattr, vattr) + + +@nx._dispatch(edge_attrs="weight") +def node_degree_xy(G, x="out", y="in", weight=None, nodes=None): + """Generate node degree-degree pairs for edges in G. + + Parameters + ---------- + G: NetworkX graph + + x: string ('in','out') + The degree type for source node (directed graphs only). + + y: string ('in','out') + The degree type for target node (directed graphs only). + + weight: string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + nodes: list or iterable (optional) + Use only edges that are adjacency to specified nodes. + The default is all nodes. + + Returns + ------- + (x, y): 2-tuple + Generates 2-tuple of (degree, degree) values. + + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge(1, 2) + >>> list(nx.node_degree_xy(G, x="out", y="in")) + [(1, 1)] + >>> list(nx.node_degree_xy(G, x="in", y="out")) + [(0, 0)] + + Notes + ----- + For undirected graphs each edge is produced twice, once for each edge + representation (u, v) and (v, u), with the exception of self-loop edges + which only appear once. + """ + nodes = set(G) if nodes is None else set(nodes) + if G.is_directed(): + direction = {"out": G.out_degree, "in": G.in_degree} + xdeg = direction[x] + ydeg = direction[y] + else: + xdeg = ydeg = G.degree + + for u, degu in xdeg(nodes, weight=weight): + # use G.edges to treat multigraphs correctly + neighbors = (nbr for _, nbr in G.edges(u) if nbr in nodes) + for _, degv in ydeg(neighbors, weight=weight): + yield degu, degv diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40d8b67e246ba18114607756d6ad115aade948b6 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/base_test.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/base_test.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..501f1d85c0f5f9b78000a4700b1252cd15be8b58 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/base_test.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_connectivity.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_connectivity.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d4ba065ae31f704f54c2f7266de685152638251 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_connectivity.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b62d5b76fc818e1df8bd3d971cddc3673260818 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_mixing.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_mixing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db11d37c82788f24dfb48d1864e314d68c8fe941 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_mixing.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_neighbor_degree.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_neighbor_degree.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b370a84b29d13534acc0a9c673717495c086ce4a Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_neighbor_degree.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_pairs.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_pairs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17af1083c7fe79dff5cb678e15232422a9d8cf70 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_pairs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/base_test.py b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/base_test.py new file mode 100644 index 0000000000000000000000000000000000000000..46d6300649d3b4658a7263cad04354988b4da312 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/base_test.py @@ -0,0 +1,81 @@ +import networkx as nx + + +class BaseTestAttributeMixing: + @classmethod + def setup_class(cls): + G = nx.Graph() + G.add_nodes_from([0, 1], fish="one") + G.add_nodes_from([2, 3], fish="two") + G.add_nodes_from([4], fish="red") + G.add_nodes_from([5], fish="blue") + G.add_edges_from([(0, 1), (2, 3), (0, 4), (2, 5)]) + cls.G = G + + D = nx.DiGraph() + D.add_nodes_from([0, 1], fish="one") + D.add_nodes_from([2, 3], fish="two") + D.add_nodes_from([4], fish="red") + D.add_nodes_from([5], fish="blue") + D.add_edges_from([(0, 1), (2, 3), (0, 4), (2, 5)]) + cls.D = D + + M = nx.MultiGraph() + M.add_nodes_from([0, 1], fish="one") + M.add_nodes_from([2, 3], fish="two") + M.add_nodes_from([4], fish="red") + M.add_nodes_from([5], fish="blue") + M.add_edges_from([(0, 1), (0, 1), (2, 3)]) + cls.M = M + + S = nx.Graph() + S.add_nodes_from([0, 1], fish="one") + S.add_nodes_from([2, 3], fish="two") + S.add_nodes_from([4], fish="red") + S.add_nodes_from([5], fish="blue") + S.add_edge(0, 0) + S.add_edge(2, 2) + cls.S = S + + N = nx.Graph() + N.add_nodes_from([0, 1], margin=-2) + N.add_nodes_from([2, 3], margin=-2) + N.add_nodes_from([4], margin=-3) + N.add_nodes_from([5], margin=-4) + N.add_edges_from([(0, 1), (2, 3), (0, 4), (2, 5)]) + cls.N = N + + F = nx.Graph() + F.add_edges_from([(0, 3), (1, 3), (2, 3)], weight=0.5) + F.add_edge(0, 2, weight=1) + nx.set_node_attributes(F, dict(F.degree(weight="weight")), "margin") + cls.F = F + + K = nx.Graph() + K.add_nodes_from([1, 2], margin=-1) + K.add_nodes_from([3], margin=1) + K.add_nodes_from([4], margin=2) + K.add_edges_from([(3, 4), (1, 2), (1, 3)]) + cls.K = K + + +class BaseTestDegreeMixing: + @classmethod + def setup_class(cls): + cls.P4 = nx.path_graph(4) + cls.D = nx.DiGraph() + cls.D.add_edges_from([(0, 2), (0, 3), (1, 3), (2, 3)]) + cls.D2 = nx.DiGraph() + cls.D2.add_edges_from([(0, 3), (1, 0), (1, 2), (2, 4), (4, 1), (4, 3), (4, 2)]) + cls.M = nx.MultiGraph() + nx.add_path(cls.M, range(4)) + cls.M.add_edge(0, 1) + cls.S = nx.Graph() + cls.S.add_edges_from([(0, 0), (1, 1)]) + cls.W = nx.Graph() + cls.W.add_edges_from([(0, 3), (1, 3), (2, 3)], weight=0.5) + cls.W.add_edge(0, 2, weight=1) + S1 = nx.star_graph(4) + S2 = nx.star_graph(4) + cls.DS = nx.disjoint_union(S1, S2) + cls.DS.add_edge(4, 5) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/test_connectivity.py b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/test_connectivity.py new file mode 100644 index 0000000000000000000000000000000000000000..21c6287bbe6b0bfc9aa41201b593f342b2d3976e --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/test_connectivity.py @@ -0,0 +1,143 @@ +from itertools import permutations + +import pytest + +import networkx as nx + + +class TestNeighborConnectivity: + def test_degree_p4(self): + G = nx.path_graph(4) + answer = {1: 2.0, 2: 1.5} + nd = nx.average_degree_connectivity(G) + assert nd == answer + + D = G.to_directed() + answer = {2: 2.0, 4: 1.5} + nd = nx.average_degree_connectivity(D) + assert nd == answer + + answer = {1: 2.0, 2: 1.5} + D = G.to_directed() + nd = nx.average_degree_connectivity(D, source="in", target="in") + assert nd == answer + + D = G.to_directed() + nd = nx.average_degree_connectivity(D, source="in", target="in") + assert nd == answer + + def test_degree_p4_weighted(self): + G = nx.path_graph(4) + G[1][2]["weight"] = 4 + answer = {1: 2.0, 2: 1.8} + nd = nx.average_degree_connectivity(G, weight="weight") + assert nd == answer + answer = {1: 2.0, 2: 1.5} + nd = nx.average_degree_connectivity(G) + assert nd == answer + + D = G.to_directed() + answer = {2: 2.0, 4: 1.8} + nd = nx.average_degree_connectivity(D, weight="weight") + assert nd == answer + + answer = {1: 2.0, 2: 1.8} + D = G.to_directed() + nd = nx.average_degree_connectivity( + D, weight="weight", source="in", target="in" + ) + assert nd == answer + + D = G.to_directed() + nd = nx.average_degree_connectivity( + D, source="in", target="out", weight="weight" + ) + assert nd == answer + + def test_weight_keyword(self): + G = nx.path_graph(4) + G[1][2]["other"] = 4 + answer = {1: 2.0, 2: 1.8} + nd = nx.average_degree_connectivity(G, weight="other") + assert nd == answer + answer = {1: 2.0, 2: 1.5} + nd = nx.average_degree_connectivity(G, weight=None) + assert nd == answer + + D = G.to_directed() + answer = {2: 2.0, 4: 1.8} + nd = nx.average_degree_connectivity(D, weight="other") + assert nd == answer + + answer = {1: 2.0, 2: 1.8} + D = G.to_directed() + nd = nx.average_degree_connectivity(D, weight="other", source="in", target="in") + assert nd == answer + + D = G.to_directed() + nd = nx.average_degree_connectivity(D, weight="other", source="in", target="in") + assert nd == answer + + def test_degree_barrat(self): + G = nx.star_graph(5) + G.add_edges_from([(5, 6), (5, 7), (5, 8), (5, 9)]) + G[0][5]["weight"] = 5 + nd = nx.average_degree_connectivity(G)[5] + assert nd == 1.8 + nd = nx.average_degree_connectivity(G, weight="weight")[5] + assert nd == pytest.approx(3.222222, abs=1e-5) + + def test_zero_deg(self): + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(1, 3) + G.add_edge(1, 4) + c = nx.average_degree_connectivity(G) + assert c == {1: 0, 3: 1} + c = nx.average_degree_connectivity(G, source="in", target="in") + assert c == {0: 0, 1: 0} + c = nx.average_degree_connectivity(G, source="in", target="out") + assert c == {0: 0, 1: 3} + c = nx.average_degree_connectivity(G, source="in", target="in+out") + assert c == {0: 0, 1: 3} + c = nx.average_degree_connectivity(G, source="out", target="out") + assert c == {0: 0, 3: 0} + c = nx.average_degree_connectivity(G, source="out", target="in") + assert c == {0: 0, 3: 1} + c = nx.average_degree_connectivity(G, source="out", target="in+out") + assert c == {0: 0, 3: 1} + + def test_in_out_weight(self): + G = nx.DiGraph() + G.add_edge(1, 2, weight=1) + G.add_edge(1, 3, weight=1) + G.add_edge(3, 1, weight=1) + for s, t in permutations(["in", "out", "in+out"], 2): + c = nx.average_degree_connectivity(G, source=s, target=t) + cw = nx.average_degree_connectivity(G, source=s, target=t, weight="weight") + assert c == cw + + def test_invalid_source(self): + with pytest.raises(nx.NetworkXError): + G = nx.DiGraph() + nx.average_degree_connectivity(G, source="bogus") + + def test_invalid_target(self): + with pytest.raises(nx.NetworkXError): + G = nx.DiGraph() + nx.average_degree_connectivity(G, target="bogus") + + def test_invalid_undirected_graph(self): + G = nx.Graph() + with pytest.raises(nx.NetworkXError): + nx.average_degree_connectivity(G, target="bogus") + with pytest.raises(nx.NetworkXError): + nx.average_degree_connectivity(G, source="bogus") + + def test_single_node(self): + # TODO Is this really the intended behavior for providing a + # single node as the argument `nodes`? Shouldn't the function + # just return the connectivity value itself? + G = nx.trivial_graph() + conn = nx.average_degree_connectivity(G, nodes=0) + assert conn == {0: 0} diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/test_correlation.py b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/test_correlation.py new file mode 100644 index 0000000000000000000000000000000000000000..5203f9449fd022525b97a19cbe78498e33fb09a3 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/test_correlation.py @@ -0,0 +1,123 @@ +import pytest + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +import networkx as nx +from networkx.algorithms.assortativity.correlation import attribute_ac + +from .base_test import BaseTestAttributeMixing, BaseTestDegreeMixing + + +class TestDegreeMixingCorrelation(BaseTestDegreeMixing): + def test_degree_assortativity_undirected(self): + r = nx.degree_assortativity_coefficient(self.P4) + np.testing.assert_almost_equal(r, -1.0 / 2, decimal=4) + + def test_degree_assortativity_node_kwargs(self): + G = nx.Graph() + edges = [(0, 1), (0, 3), (1, 2), (1, 3), (1, 4), (5, 9), (9, 0)] + G.add_edges_from(edges) + r = nx.degree_assortativity_coefficient(G, nodes=[1, 2, 4]) + np.testing.assert_almost_equal(r, -1.0, decimal=4) + + def test_degree_assortativity_directed(self): + r = nx.degree_assortativity_coefficient(self.D) + np.testing.assert_almost_equal(r, -0.57735, decimal=4) + + def test_degree_assortativity_directed2(self): + """Test degree assortativity for a directed graph where the set of + in/out degree does not equal the total degree.""" + r = nx.degree_assortativity_coefficient(self.D2) + np.testing.assert_almost_equal(r, 0.14852, decimal=4) + + def test_degree_assortativity_multigraph(self): + r = nx.degree_assortativity_coefficient(self.M) + np.testing.assert_almost_equal(r, -1.0 / 7.0, decimal=4) + + def test_degree_pearson_assortativity_undirected(self): + r = nx.degree_pearson_correlation_coefficient(self.P4) + np.testing.assert_almost_equal(r, -1.0 / 2, decimal=4) + + def test_degree_pearson_assortativity_directed(self): + r = nx.degree_pearson_correlation_coefficient(self.D) + np.testing.assert_almost_equal(r, -0.57735, decimal=4) + + def test_degree_pearson_assortativity_directed2(self): + """Test degree assortativity with Pearson for a directed graph where + the set of in/out degree does not equal the total degree.""" + r = nx.degree_pearson_correlation_coefficient(self.D2) + np.testing.assert_almost_equal(r, 0.14852, decimal=4) + + def test_degree_pearson_assortativity_multigraph(self): + r = nx.degree_pearson_correlation_coefficient(self.M) + np.testing.assert_almost_equal(r, -1.0 / 7.0, decimal=4) + + def test_degree_assortativity_weighted(self): + r = nx.degree_assortativity_coefficient(self.W, weight="weight") + np.testing.assert_almost_equal(r, -0.1429, decimal=4) + + def test_degree_assortativity_double_star(self): + r = nx.degree_assortativity_coefficient(self.DS) + np.testing.assert_almost_equal(r, -0.9339, decimal=4) + + +class TestAttributeMixingCorrelation(BaseTestAttributeMixing): + def test_attribute_assortativity_undirected(self): + r = nx.attribute_assortativity_coefficient(self.G, "fish") + assert r == 6.0 / 22.0 + + def test_attribute_assortativity_directed(self): + r = nx.attribute_assortativity_coefficient(self.D, "fish") + assert r == 1.0 / 3.0 + + def test_attribute_assortativity_multigraph(self): + r = nx.attribute_assortativity_coefficient(self.M, "fish") + assert r == 1.0 + + def test_attribute_assortativity_coefficient(self): + # from "Mixing patterns in networks" + # fmt: off + a = np.array([[0.258, 0.016, 0.035, 0.013], + [0.012, 0.157, 0.058, 0.019], + [0.013, 0.023, 0.306, 0.035], + [0.005, 0.007, 0.024, 0.016]]) + # fmt: on + r = attribute_ac(a) + np.testing.assert_almost_equal(r, 0.623, decimal=3) + + def test_attribute_assortativity_coefficient2(self): + # fmt: off + a = np.array([[0.18, 0.02, 0.01, 0.03], + [0.02, 0.20, 0.03, 0.02], + [0.01, 0.03, 0.16, 0.01], + [0.03, 0.02, 0.01, 0.22]]) + # fmt: on + r = attribute_ac(a) + np.testing.assert_almost_equal(r, 0.68, decimal=2) + + def test_attribute_assortativity(self): + a = np.array([[50, 50, 0], [50, 50, 0], [0, 0, 2]]) + r = attribute_ac(a) + np.testing.assert_almost_equal(r, 0.029, decimal=3) + + def test_attribute_assortativity_negative(self): + r = nx.numeric_assortativity_coefficient(self.N, "margin") + np.testing.assert_almost_equal(r, -0.2903, decimal=4) + + def test_assortativity_node_kwargs(self): + G = nx.Graph() + G.add_nodes_from([0, 1], size=2) + G.add_nodes_from([2, 3], size=3) + G.add_edges_from([(0, 1), (2, 3)]) + r = nx.numeric_assortativity_coefficient(G, "size", nodes=[0, 3]) + np.testing.assert_almost_equal(r, 1.0, decimal=4) + + def test_attribute_assortativity_float(self): + r = nx.numeric_assortativity_coefficient(self.F, "margin") + np.testing.assert_almost_equal(r, -0.1429, decimal=4) + + def test_attribute_assortativity_mixed(self): + r = nx.numeric_assortativity_coefficient(self.K, "margin") + np.testing.assert_almost_equal(r, 0.4340, decimal=4) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/test_mixing.py b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/test_mixing.py new file mode 100644 index 0000000000000000000000000000000000000000..9af09867235b9092837b517ca542e8a85eb602ac --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/test_mixing.py @@ -0,0 +1,176 @@ +import pytest + +np = pytest.importorskip("numpy") + + +import networkx as nx + +from .base_test import BaseTestAttributeMixing, BaseTestDegreeMixing + + +class TestDegreeMixingDict(BaseTestDegreeMixing): + def test_degree_mixing_dict_undirected(self): + d = nx.degree_mixing_dict(self.P4) + d_result = {1: {2: 2}, 2: {1: 2, 2: 2}} + assert d == d_result + + def test_degree_mixing_dict_undirected_normalized(self): + d = nx.degree_mixing_dict(self.P4, normalized=True) + d_result = {1: {2: 1.0 / 3}, 2: {1: 1.0 / 3, 2: 1.0 / 3}} + assert d == d_result + + def test_degree_mixing_dict_directed(self): + d = nx.degree_mixing_dict(self.D) + print(d) + d_result = {1: {3: 2}, 2: {1: 1, 3: 1}, 3: {}} + assert d == d_result + + def test_degree_mixing_dict_multigraph(self): + d = nx.degree_mixing_dict(self.M) + d_result = {1: {2: 1}, 2: {1: 1, 3: 3}, 3: {2: 3}} + assert d == d_result + + def test_degree_mixing_dict_weighted(self): + d = nx.degree_mixing_dict(self.W, weight="weight") + d_result = {0.5: {1.5: 1}, 1.5: {1.5: 6, 0.5: 1}} + assert d == d_result + + +class TestDegreeMixingMatrix(BaseTestDegreeMixing): + def test_degree_mixing_matrix_undirected(self): + # fmt: off + a_result = np.array([[0, 2], + [2, 2]] + ) + # fmt: on + a = nx.degree_mixing_matrix(self.P4, normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.P4) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_degree_mixing_matrix_directed(self): + # fmt: off + a_result = np.array([[0, 0, 2], + [1, 0, 1], + [0, 0, 0]] + ) + # fmt: on + a = nx.degree_mixing_matrix(self.D, normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.D) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_degree_mixing_matrix_multigraph(self): + # fmt: off + a_result = np.array([[0, 1, 0], + [1, 0, 3], + [0, 3, 0]] + ) + # fmt: on + a = nx.degree_mixing_matrix(self.M, normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.M) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_degree_mixing_matrix_selfloop(self): + # fmt: off + a_result = np.array([[2]]) + # fmt: on + a = nx.degree_mixing_matrix(self.S, normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.S) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_degree_mixing_matrix_weighted(self): + a_result = np.array([[0.0, 1.0], [1.0, 6.0]]) + a = nx.degree_mixing_matrix(self.W, weight="weight", normalized=False) + np.testing.assert_equal(a, a_result) + a = nx.degree_mixing_matrix(self.W, weight="weight") + np.testing.assert_equal(a, a_result / float(a_result.sum())) + + def test_degree_mixing_matrix_mapping(self): + a_result = np.array([[6.0, 1.0], [1.0, 0.0]]) + mapping = {0.5: 1, 1.5: 0} + a = nx.degree_mixing_matrix( + self.W, weight="weight", normalized=False, mapping=mapping + ) + np.testing.assert_equal(a, a_result) + + +class TestAttributeMixingDict(BaseTestAttributeMixing): + def test_attribute_mixing_dict_undirected(self): + d = nx.attribute_mixing_dict(self.G, "fish") + d_result = { + "one": {"one": 2, "red": 1}, + "two": {"two": 2, "blue": 1}, + "red": {"one": 1}, + "blue": {"two": 1}, + } + assert d == d_result + + def test_attribute_mixing_dict_directed(self): + d = nx.attribute_mixing_dict(self.D, "fish") + d_result = { + "one": {"one": 1, "red": 1}, + "two": {"two": 1, "blue": 1}, + "red": {}, + "blue": {}, + } + assert d == d_result + + def test_attribute_mixing_dict_multigraph(self): + d = nx.attribute_mixing_dict(self.M, "fish") + d_result = {"one": {"one": 4}, "two": {"two": 2}} + assert d == d_result + + +class TestAttributeMixingMatrix(BaseTestAttributeMixing): + def test_attribute_mixing_matrix_undirected(self): + mapping = {"one": 0, "two": 1, "red": 2, "blue": 3} + a_result = np.array([[2, 0, 1, 0], [0, 2, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0]]) + a = nx.attribute_mixing_matrix( + self.G, "fish", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.G, "fish", mapping=mapping) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_attribute_mixing_matrix_directed(self): + mapping = {"one": 0, "two": 1, "red": 2, "blue": 3} + a_result = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]]) + a = nx.attribute_mixing_matrix( + self.D, "fish", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.D, "fish", mapping=mapping) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_attribute_mixing_matrix_multigraph(self): + mapping = {"one": 0, "two": 1, "red": 2, "blue": 3} + a_result = np.array([[4, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) + a = nx.attribute_mixing_matrix( + self.M, "fish", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.M, "fish", mapping=mapping) + np.testing.assert_equal(a, a_result / a_result.sum()) + + def test_attribute_mixing_matrix_negative(self): + mapping = {-2: 0, -3: 1, -4: 2} + a_result = np.array([[4.0, 1.0, 1.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0]]) + a = nx.attribute_mixing_matrix( + self.N, "margin", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.N, "margin", mapping=mapping) + np.testing.assert_equal(a, a_result / float(a_result.sum())) + + def test_attribute_mixing_matrix_float(self): + mapping = {0.5: 1, 1.5: 0} + a_result = np.array([[6.0, 1.0], [1.0, 0.0]]) + a = nx.attribute_mixing_matrix( + self.F, "margin", mapping=mapping, normalized=False + ) + np.testing.assert_equal(a, a_result) + a = nx.attribute_mixing_matrix(self.F, "margin", mapping=mapping) + np.testing.assert_equal(a, a_result / a_result.sum()) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/test_neighbor_degree.py b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/test_neighbor_degree.py new file mode 100644 index 0000000000000000000000000000000000000000..bf1252d532079d4de6de4659943ce008eb9018b3 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/test_neighbor_degree.py @@ -0,0 +1,108 @@ +import pytest + +import networkx as nx + + +class TestAverageNeighbor: + def test_degree_p4(self): + G = nx.path_graph(4) + answer = {0: 2, 1: 1.5, 2: 1.5, 3: 2} + nd = nx.average_neighbor_degree(G) + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D) + assert nd == answer + + D = nx.DiGraph(G.edges(data=True)) + nd = nx.average_neighbor_degree(D) + assert nd == {0: 1, 1: 1, 2: 0, 3: 0} + nd = nx.average_neighbor_degree(D, "in", "out") + assert nd == {0: 0, 1: 1, 2: 1, 3: 1} + nd = nx.average_neighbor_degree(D, "out", "in") + assert nd == {0: 1, 1: 1, 2: 1, 3: 0} + nd = nx.average_neighbor_degree(D, "in", "in") + assert nd == {0: 0, 1: 0, 2: 1, 3: 1} + + def test_degree_p4_weighted(self): + G = nx.path_graph(4) + G[1][2]["weight"] = 4 + answer = {0: 2, 1: 1.8, 2: 1.8, 3: 2} + nd = nx.average_neighbor_degree(G, weight="weight") + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D, weight="weight") + assert nd == answer + + D = nx.DiGraph(G.edges(data=True)) + print(D.edges(data=True)) + nd = nx.average_neighbor_degree(D, weight="weight") + assert nd == {0: 1, 1: 1, 2: 0, 3: 0} + nd = nx.average_neighbor_degree(D, "out", "out", weight="weight") + assert nd == {0: 1, 1: 1, 2: 0, 3: 0} + nd = nx.average_neighbor_degree(D, "in", "in", weight="weight") + assert nd == {0: 0, 1: 0, 2: 1, 3: 1} + nd = nx.average_neighbor_degree(D, "in", "out", weight="weight") + assert nd == {0: 0, 1: 1, 2: 1, 3: 1} + nd = nx.average_neighbor_degree(D, "out", "in", weight="weight") + assert nd == {0: 1, 1: 1, 2: 1, 3: 0} + nd = nx.average_neighbor_degree(D, source="in+out", weight="weight") + assert nd == {0: 1.0, 1: 1.0, 2: 0.8, 3: 1.0} + nd = nx.average_neighbor_degree(D, target="in+out", weight="weight") + assert nd == {0: 2.0, 1: 2.0, 2: 1.0, 3: 0.0} + + D = G.to_directed() + nd = nx.average_neighbor_degree(D, weight="weight") + assert nd == answer + nd = nx.average_neighbor_degree(D, source="out", target="out", weight="weight") + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D, source="in", target="in", weight="weight") + assert nd == answer + + def test_degree_k4(self): + G = nx.complete_graph(4) + answer = {0: 3, 1: 3, 2: 3, 3: 3} + nd = nx.average_neighbor_degree(G) + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D) + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D) + assert nd == answer + + D = G.to_directed() + nd = nx.average_neighbor_degree(D, source="in", target="in") + assert nd == answer + + def test_degree_k4_nodes(self): + G = nx.complete_graph(4) + answer = {1: 3.0, 2: 3.0} + nd = nx.average_neighbor_degree(G, nodes=[1, 2]) + assert nd == answer + + def test_degree_barrat(self): + G = nx.star_graph(5) + G.add_edges_from([(5, 6), (5, 7), (5, 8), (5, 9)]) + G[0][5]["weight"] = 5 + nd = nx.average_neighbor_degree(G)[5] + assert nd == 1.8 + nd = nx.average_neighbor_degree(G, weight="weight")[5] + assert nd == pytest.approx(3.222222, abs=1e-5) + + def test_error_invalid_source_target(self): + G = nx.path_graph(4) + with pytest.raises(nx.NetworkXError): + nx.average_neighbor_degree(G, "error") + with pytest.raises(nx.NetworkXError): + nx.average_neighbor_degree(G, "in", "error") + G = G.to_directed() + with pytest.raises(nx.NetworkXError): + nx.average_neighbor_degree(G, "error") + with pytest.raises(nx.NetworkXError): + nx.average_neighbor_degree(G, "in", "error") diff --git a/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/test_pairs.py b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/test_pairs.py new file mode 100644 index 0000000000000000000000000000000000000000..3984292be84dd7b306066809fb3c50a7cf0424f4 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/assortativity/tests/test_pairs.py @@ -0,0 +1,87 @@ +import networkx as nx + +from .base_test import BaseTestAttributeMixing, BaseTestDegreeMixing + + +class TestAttributeMixingXY(BaseTestAttributeMixing): + def test_node_attribute_xy_undirected(self): + attrxy = sorted(nx.node_attribute_xy(self.G, "fish")) + attrxy_result = sorted( + [ + ("one", "one"), + ("one", "one"), + ("two", "two"), + ("two", "two"), + ("one", "red"), + ("red", "one"), + ("blue", "two"), + ("two", "blue"), + ] + ) + assert attrxy == attrxy_result + + def test_node_attribute_xy_undirected_nodes(self): + attrxy = sorted(nx.node_attribute_xy(self.G, "fish", nodes=["one", "yellow"])) + attrxy_result = sorted([]) + assert attrxy == attrxy_result + + def test_node_attribute_xy_directed(self): + attrxy = sorted(nx.node_attribute_xy(self.D, "fish")) + attrxy_result = sorted( + [("one", "one"), ("two", "two"), ("one", "red"), ("two", "blue")] + ) + assert attrxy == attrxy_result + + def test_node_attribute_xy_multigraph(self): + attrxy = sorted(nx.node_attribute_xy(self.M, "fish")) + attrxy_result = [ + ("one", "one"), + ("one", "one"), + ("one", "one"), + ("one", "one"), + ("two", "two"), + ("two", "two"), + ] + assert attrxy == attrxy_result + + def test_node_attribute_xy_selfloop(self): + attrxy = sorted(nx.node_attribute_xy(self.S, "fish")) + attrxy_result = [("one", "one"), ("two", "two")] + assert attrxy == attrxy_result + + +class TestDegreeMixingXY(BaseTestDegreeMixing): + def test_node_degree_xy_undirected(self): + xy = sorted(nx.node_degree_xy(self.P4)) + xy_result = sorted([(1, 2), (2, 1), (2, 2), (2, 2), (1, 2), (2, 1)]) + assert xy == xy_result + + def test_node_degree_xy_undirected_nodes(self): + xy = sorted(nx.node_degree_xy(self.P4, nodes=[0, 1, -1])) + xy_result = sorted([(1, 2), (2, 1)]) + assert xy == xy_result + + def test_node_degree_xy_directed(self): + xy = sorted(nx.node_degree_xy(self.D)) + xy_result = sorted([(2, 1), (2, 3), (1, 3), (1, 3)]) + assert xy == xy_result + + def test_node_degree_xy_multigraph(self): + xy = sorted(nx.node_degree_xy(self.M)) + xy_result = sorted( + [(2, 3), (2, 3), (3, 2), (3, 2), (2, 3), (3, 2), (1, 2), (2, 1)] + ) + assert xy == xy_result + + def test_node_degree_xy_selfloop(self): + xy = sorted(nx.node_degree_xy(self.S)) + xy_result = sorted([(2, 2), (2, 2)]) + assert xy == xy_result + + def test_node_degree_xy_weighted(self): + G = nx.Graph() + G.add_edge(1, 2, weight=7) + G.add_edge(2, 3, weight=10) + xy = sorted(nx.node_degree_xy(G, weight="weight")) + xy_result = sorted([(7, 17), (17, 10), (17, 7), (10, 17)]) + assert xy == xy_result diff --git a/MLPY/Lib/site-packages/networkx/algorithms/asteroidal.py b/MLPY/Lib/site-packages/networkx/algorithms/asteroidal.py new file mode 100644 index 0000000000000000000000000000000000000000..65355fe625338e532cfc3dd3baa59c7340358e59 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/asteroidal.py @@ -0,0 +1,170 @@ +""" +Algorithms for asteroidal triples and asteroidal numbers in graphs. + +An asteroidal triple in a graph G is a set of three non-adjacent vertices +u, v and w such that there exist a path between any two of them that avoids +closed neighborhood of the third. More formally, v_j, v_k belongs to the same +connected component of G - N[v_i], where N[v_i] denotes the closed neighborhood +of v_i. A graph which does not contain any asteroidal triples is called +an AT-free graph. The class of AT-free graphs is a graph class for which +many NP-complete problems are solvable in polynomial time. Amongst them, +independent set and coloring. +""" +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["is_at_free", "find_asteroidal_triple"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def find_asteroidal_triple(G): + r"""Find an asteroidal triple in the given graph. + + An asteroidal triple is a triple of non-adjacent vertices such that + there exists a path between any two of them which avoids the closed + neighborhood of the third. It checks all independent triples of vertices + and whether they are an asteroidal triple or not. This is done with the + help of a data structure called a component structure. + A component structure encodes information about which vertices belongs to + the same connected component when the closed neighborhood of a given vertex + is removed from the graph. The algorithm used to check is the trivial + one, outlined in [1]_, which has a runtime of + :math:`O(|V||\overline{E} + |V||E|)`, where the second term is the + creation of the component structure. + + Parameters + ---------- + G : NetworkX Graph + The graph to check whether is AT-free or not + + Returns + ------- + list or None + An asteroidal triple is returned as a list of nodes. If no asteroidal + triple exists, i.e. the graph is AT-free, then None is returned. + The returned value depends on the certificate parameter. The default + option is a bool which is True if the graph is AT-free, i.e. the + given graph contains no asteroidal triples, and False otherwise, i.e. + if the graph contains at least one asteroidal triple. + + Notes + ----- + The component structure and the algorithm is described in [1]_. The current + implementation implements the trivial algorithm for simple graphs. + + References + ---------- + .. [1] Ekkehard Köhler, + "Recognizing Graphs without asteroidal triples", + Journal of Discrete Algorithms 2, pages 439-452, 2004. + https://www.sciencedirect.com/science/article/pii/S157086670400019X + """ + V = set(G.nodes) + + if len(V) < 6: + # An asteroidal triple cannot exist in a graph with 5 or less vertices. + return None + + component_structure = create_component_structure(G) + E_complement = set(nx.complement(G).edges) + + for e in E_complement: + u = e[0] + v = e[1] + u_neighborhood = set(G[u]).union([u]) + v_neighborhood = set(G[v]).union([v]) + union_of_neighborhoods = u_neighborhood.union(v_neighborhood) + for w in V - union_of_neighborhoods: + # Check for each pair of vertices whether they belong to the + # same connected component when the closed neighborhood of the + # third is removed. + if ( + component_structure[u][v] == component_structure[u][w] + and component_structure[v][u] == component_structure[v][w] + and component_structure[w][u] == component_structure[w][v] + ): + return [u, v, w] + return None + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def is_at_free(G): + """Check if a graph is AT-free. + + The method uses the `find_asteroidal_triple` method to recognize + an AT-free graph. If no asteroidal triple is found the graph is + AT-free and True is returned. If at least one asteroidal triple is + found the graph is not AT-free and False is returned. + + Parameters + ---------- + G : NetworkX Graph + The graph to check whether is AT-free or not. + + Returns + ------- + bool + True if G is AT-free and False otherwise. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)]) + >>> nx.is_at_free(G) + True + + >>> G = nx.cycle_graph(6) + >>> nx.is_at_free(G) + False + """ + return find_asteroidal_triple(G) is None + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def create_component_structure(G): + r"""Create component structure for G. + + A *component structure* is an `nxn` array, denoted `c`, where `n` is + the number of vertices, where each row and column corresponds to a vertex. + + .. math:: + c_{uv} = \begin{cases} 0, if v \in N[u] \\ + k, if v \in component k of G \setminus N[u] \end{cases} + + Where `k` is an arbitrary label for each component. The structure is used + to simplify the detection of asteroidal triples. + + Parameters + ---------- + G : NetworkX Graph + Undirected, simple graph. + + Returns + ------- + component_structure : dictionary + A dictionary of dictionaries, keyed by pairs of vertices. + + """ + V = set(G.nodes) + component_structure = {} + for v in V: + label = 0 + closed_neighborhood = set(G[v]).union({v}) + row_dict = {} + for u in closed_neighborhood: + row_dict[u] = 0 + + G_reduced = G.subgraph(set(G.nodes) - closed_neighborhood) + for cc in nx.connected_components(G_reduced): + label += 1 + for u in cc: + row_dict[u] = label + + component_structure[v] = row_dict + + return component_structure diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bd60020b122d38b8d13569d8f636ca45d771fb31 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__init__.py @@ -0,0 +1,87 @@ +r""" This module provides functions and operations for bipartite +graphs. Bipartite graphs `B = (U, V, E)` have two node sets `U,V` and edges in +`E` that only connect nodes from opposite sets. It is common in the literature +to use an spatial analogy referring to the two node sets as top and bottom nodes. + +The bipartite algorithms are not imported into the networkx namespace +at the top level so the easiest way to use them is with: + +>>> from networkx.algorithms import bipartite + +NetworkX does not have a custom bipartite graph class but the Graph() +or DiGraph() classes can be used to represent bipartite graphs. However, +you have to keep track of which set each node belongs to, and make +sure that there is no edge between nodes of the same set. The convention used +in NetworkX is to use a node attribute named `bipartite` with values 0 or 1 to +identify the sets each node belongs to. This convention is not enforced in +the source code of bipartite functions, it's only a recommendation. + +For example: + +>>> B = nx.Graph() +>>> # Add nodes with the node attribute "bipartite" +>>> B.add_nodes_from([1, 2, 3, 4], bipartite=0) +>>> B.add_nodes_from(["a", "b", "c"], bipartite=1) +>>> # Add edges only between nodes of opposite node sets +>>> B.add_edges_from([(1, "a"), (1, "b"), (2, "b"), (2, "c"), (3, "c"), (4, "a")]) + +Many algorithms of the bipartite module of NetworkX require, as an argument, a +container with all the nodes that belong to one set, in addition to the bipartite +graph `B`. The functions in the bipartite package do not check that the node set +is actually correct nor that the input graph is actually bipartite. +If `B` is connected, you can find the two node sets using a two-coloring +algorithm: + +>>> nx.is_connected(B) +True +>>> bottom_nodes, top_nodes = bipartite.sets(B) + +However, if the input graph is not connected, there are more than one possible +colorations. This is the reason why we require the user to pass a container +with all nodes of one bipartite node set as an argument to most bipartite +functions. In the face of ambiguity, we refuse the temptation to guess and +raise an :exc:`AmbiguousSolution ` +Exception if the input graph for +:func:`bipartite.sets ` +is disconnected. + +Using the `bipartite` node attribute, you can easily get the two node sets: + +>>> top_nodes = {n for n, d in B.nodes(data=True) if d["bipartite"] == 0} +>>> bottom_nodes = set(B) - top_nodes + +So you can easily use the bipartite algorithms that require, as an argument, a +container with all nodes that belong to one node set: + +>>> print(round(bipartite.density(B, bottom_nodes), 2)) +0.5 +>>> G = bipartite.projected_graph(B, top_nodes) + +All bipartite graph generators in NetworkX build bipartite graphs with the +`bipartite` node attribute. Thus, you can use the same approach: + +>>> RB = bipartite.random_graph(5, 7, 0.2) +>>> RB_top = {n for n, d in RB.nodes(data=True) if d["bipartite"] == 0} +>>> RB_bottom = set(RB) - RB_top +>>> list(RB_top) +[0, 1, 2, 3, 4] +>>> list(RB_bottom) +[5, 6, 7, 8, 9, 10, 11] + +For other bipartite graph generators see +:mod:`Generators `. + +""" + +from networkx.algorithms.bipartite.basic import * +from networkx.algorithms.bipartite.centrality import * +from networkx.algorithms.bipartite.cluster import * +from networkx.algorithms.bipartite.covering import * +from networkx.algorithms.bipartite.edgelist import * +from networkx.algorithms.bipartite.matching import * +from networkx.algorithms.bipartite.matrix import * +from networkx.algorithms.bipartite.projection import * +from networkx.algorithms.bipartite.redundancy import * +from networkx.algorithms.bipartite.spectral import * +from networkx.algorithms.bipartite.generators import * +from networkx.algorithms.bipartite.extendability import * diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0b042cf4ce3661ec8fe6592e126ea2f1d18d39e Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/basic.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/basic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40b8bb8e5629e89dfea85edd319f75b90976f9c2 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/basic.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/centrality.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8b46329b4fc2842c0f6a9c14cc6d088df946949 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/centrality.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/cluster.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/cluster.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bbe5fd8786089c4bceb863219277e5f7cc3f25d Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/cluster.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/covering.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/covering.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbdc951efcde4952f9737656e674eab6a0a0b0bf Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/covering.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/edgelist.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/edgelist.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e3f896341ed172aba85ce788b521a9bf7bbc557 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/edgelist.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/extendability.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/extendability.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dfd9a140798a1fdf17f5a996734b7cdceca55160 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/extendability.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/generators.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/generators.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..028fb399ef16b03c5e312526d311c3dbed178843 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/generators.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/matching.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/matching.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4f62cd60dd3ad276b3b4f6647e3de1d68bd93b9 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/matching.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/matrix.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/matrix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea19820edf55087072aba43be2173175f864f08a Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/matrix.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/projection.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/projection.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90c0b13bd87bfcee3e518c41f4dbf4bb6cd9ecda Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/projection.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/redundancy.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/redundancy.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0802b893b58440eacdfa6d2b8f3c5885f16f336 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/redundancy.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/spectral.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/spectral.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31685afe171d392f6a41bcd2aab6d86c67a71727 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/spectral.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/basic.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/basic.py new file mode 100644 index 0000000000000000000000000000000000000000..8b9120e27aa9663e557680a8372465be5ed433e2 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/basic.py @@ -0,0 +1,321 @@ +""" +========================== +Bipartite Graph Algorithms +========================== +""" +import networkx as nx +from networkx.algorithms.components import connected_components +from networkx.exception import AmbiguousSolution + +__all__ = [ + "is_bipartite", + "is_bipartite_node_set", + "color", + "sets", + "density", + "degrees", +] + + +@nx._dispatch +def color(G): + """Returns a two-coloring of the graph. + + Raises an exception if the graph is not bipartite. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + color : dictionary + A dictionary keyed by node with a 1 or 0 as data for each node color. + + Raises + ------ + NetworkXError + If the graph is not two-colorable. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> c = bipartite.color(G) + >>> print(c) + {0: 1, 1: 0, 2: 1, 3: 0} + + You can use this to set a node attribute indicating the bipartite set: + + >>> nx.set_node_attributes(G, c, "bipartite") + >>> print(G.nodes[0]["bipartite"]) + 1 + >>> print(G.nodes[1]["bipartite"]) + 0 + """ + if G.is_directed(): + import itertools + + def neighbors(v): + return itertools.chain.from_iterable([G.predecessors(v), G.successors(v)]) + + else: + neighbors = G.neighbors + + color = {} + for n in G: # handle disconnected graphs + if n in color or len(G[n]) == 0: # skip isolates + continue + queue = [n] + color[n] = 1 # nodes seen with color (1 or 0) + while queue: + v = queue.pop() + c = 1 - color[v] # opposite color of node v + for w in neighbors(v): + if w in color: + if color[w] == color[v]: + raise nx.NetworkXError("Graph is not bipartite.") + else: + color[w] = c + queue.append(w) + # color isolates with 0 + color.update(dict.fromkeys(nx.isolates(G), 0)) + return color + + +@nx._dispatch +def is_bipartite(G): + """Returns True if graph G is bipartite, False if not. + + Parameters + ---------- + G : NetworkX graph + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> print(bipartite.is_bipartite(G)) + True + + See Also + -------- + color, is_bipartite_node_set + """ + try: + color(G) + return True + except nx.NetworkXError: + return False + + +@nx._dispatch +def is_bipartite_node_set(G, nodes): + """Returns True if nodes and G/nodes are a bipartition of G. + + Parameters + ---------- + G : NetworkX graph + + nodes: list or container + Check if nodes are a one of a bipartite set. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> X = set([1, 3]) + >>> bipartite.is_bipartite_node_set(G, X) + True + + Notes + ----- + An exception is raised if the input nodes are not distinct, because in this + case some bipartite algorithms will yield incorrect results. + For connected graphs the bipartite sets are unique. This function handles + disconnected graphs. + """ + S = set(nodes) + + if len(S) < len(nodes): + # this should maybe just return False? + raise AmbiguousSolution( + "The input node set contains duplicates.\n" + "This may lead to incorrect results when using it in bipartite algorithms.\n" + "Consider using set(nodes) as the input" + ) + + for CC in (G.subgraph(c).copy() for c in connected_components(G)): + X, Y = sets(CC) + if not ( + (X.issubset(S) and Y.isdisjoint(S)) or (Y.issubset(S) and X.isdisjoint(S)) + ): + return False + return True + + +@nx._dispatch +def sets(G, top_nodes=None): + """Returns bipartite node sets of graph G. + + Raises an exception if the graph is not bipartite or if the input + graph is disconnected and thus more than one valid solution exists. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + Parameters + ---------- + G : NetworkX graph + + top_nodes : container, optional + Container with all nodes in one bipartite node set. If not supplied + it will be computed. But if more than one solution exists an exception + will be raised. + + Returns + ------- + X : set + Nodes from one side of the bipartite graph. + Y : set + Nodes from the other side. + + Raises + ------ + AmbiguousSolution + Raised if the input bipartite graph is disconnected and no container + with all nodes in one bipartite set is provided. When determining + the nodes in each bipartite set more than one valid solution is + possible if the input graph is disconnected. + NetworkXError + Raised if the input graph is not bipartite. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> X, Y = bipartite.sets(G) + >>> list(X) + [0, 2] + >>> list(Y) + [1, 3] + + See Also + -------- + color + + """ + if G.is_directed(): + is_connected = nx.is_weakly_connected + else: + is_connected = nx.is_connected + if top_nodes is not None: + X = set(top_nodes) + Y = set(G) - X + else: + if not is_connected(G): + msg = "Disconnected graph: Ambiguous solution for bipartite sets." + raise nx.AmbiguousSolution(msg) + c = color(G) + X = {n for n, is_top in c.items() if is_top} + Y = {n for n, is_top in c.items() if not is_top} + return (X, Y) + + +@nx._dispatch(graphs="B") +def density(B, nodes): + """Returns density of bipartite graph B. + + Parameters + ---------- + B : NetworkX graph + + nodes: list or container + Nodes in one node set of the bipartite graph. + + Returns + ------- + d : float + The bipartite density + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.complete_bipartite_graph(3, 2) + >>> X = set([0, 1, 2]) + >>> bipartite.density(G, X) + 1.0 + >>> Y = set([3, 4]) + >>> bipartite.density(G, Y) + 1.0 + + Notes + ----- + The container of nodes passed as argument must contain all nodes + in one of the two bipartite node sets to avoid ambiguity in the + case of disconnected graphs. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + color + """ + n = len(B) + m = nx.number_of_edges(B) + nb = len(nodes) + nt = n - nb + if m == 0: # includes cases n==0 and n==1 + d = 0.0 + else: + if B.is_directed(): + d = m / (2 * nb * nt) + else: + d = m / (nb * nt) + return d + + +@nx._dispatch(graphs="B", edge_attrs="weight") +def degrees(B, nodes, weight=None): + """Returns the degrees of the two node sets in the bipartite graph B. + + Parameters + ---------- + B : NetworkX graph + + nodes: list or container + Nodes in one node set of the bipartite graph. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + (degX,degY) : tuple of dictionaries + The degrees of the two bipartite sets as dictionaries keyed by node. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.complete_bipartite_graph(3, 2) + >>> Y = set([3, 4]) + >>> degX, degY = bipartite.degrees(G, Y) + >>> dict(degX) + {0: 2, 1: 2, 2: 2} + + Notes + ----- + The container of nodes passed as argument must contain all nodes + in one of the two bipartite node sets to avoid ambiguity in the + case of disconnected graphs. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + color, density + """ + bottom = set(nodes) + top = set(B) - bottom + return (B.degree(top, weight), B.degree(bottom, weight)) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/centrality.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..a904da3528f208387e6b845dd18bb8b6253cd799 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/centrality.py @@ -0,0 +1,290 @@ +import networkx as nx + +__all__ = ["degree_centrality", "betweenness_centrality", "closeness_centrality"] + + +@nx._dispatch(name="bipartite_degree_centrality") +def degree_centrality(G, nodes): + r"""Compute the degree centrality for nodes in a bipartite network. + + The degree centrality for a node `v` is the fraction of nodes + connected to it. + + Parameters + ---------- + G : graph + A bipartite network + + nodes : list or container + Container with all nodes in one bipartite node set. + + Returns + ------- + centrality : dictionary + Dictionary keyed by node with bipartite degree centrality as the value. + + Examples + -------- + >>> G = nx.wheel_graph(5) + >>> top_nodes = {0, 1, 2} + >>> nx.bipartite.degree_centrality(G, nodes=top_nodes) + {0: 2.0, 1: 1.5, 2: 1.5, 3: 1.0, 4: 1.0} + + See Also + -------- + betweenness_centrality + closeness_centrality + :func:`~networkx.algorithms.bipartite.basic.sets` + :func:`~networkx.algorithms.bipartite.basic.is_bipartite` + + Notes + ----- + The nodes input parameter must contain all nodes in one bipartite node set, + but the dictionary returned contains all nodes from both bipartite node + sets. See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + For unipartite networks, the degree centrality values are + normalized by dividing by the maximum possible degree (which is + `n-1` where `n` is the number of nodes in G). + + In the bipartite case, the maximum possible degree of a node in a + bipartite node set is the number of nodes in the opposite node set + [1]_. The degree centrality for a node `v` in the bipartite + sets `U` with `n` nodes and `V` with `m` nodes is + + .. math:: + + d_{v} = \frac{deg(v)}{m}, \mbox{for} v \in U , + + d_{v} = \frac{deg(v)}{n}, \mbox{for} v \in V , + + + where `deg(v)` is the degree of node `v`. + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation + Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + https://dx.doi.org/10.4135/9781446294413.n28 + """ + top = set(nodes) + bottom = set(G) - top + s = 1.0 / len(bottom) + centrality = {n: d * s for n, d in G.degree(top)} + s = 1.0 / len(top) + centrality.update({n: d * s for n, d in G.degree(bottom)}) + return centrality + + +@nx._dispatch(name="bipartite_betweenness_centrality") +def betweenness_centrality(G, nodes): + r"""Compute betweenness centrality for nodes in a bipartite network. + + Betweenness centrality of a node `v` is the sum of the + fraction of all-pairs shortest paths that pass through `v`. + + Values of betweenness are normalized by the maximum possible + value which for bipartite graphs is limited by the relative size + of the two node sets [1]_. + + Let `n` be the number of nodes in the node set `U` and + `m` be the number of nodes in the node set `V`, then + nodes in `U` are normalized by dividing by + + .. math:: + + \frac{1}{2} [m^2 (s + 1)^2 + m (s + 1)(2t - s - 1) - t (2s - t + 3)] , + + where + + .. math:: + + s = (n - 1) \div m , t = (n - 1) \mod m , + + and nodes in `V` are normalized by dividing by + + .. math:: + + \frac{1}{2} [n^2 (p + 1)^2 + n (p + 1)(2r - p - 1) - r (2p - r + 3)] , + + where, + + .. math:: + + p = (m - 1) \div n , r = (m - 1) \mod n . + + Parameters + ---------- + G : graph + A bipartite graph + + nodes : list or container + Container with all nodes in one bipartite node set. + + Returns + ------- + betweenness : dictionary + Dictionary keyed by node with bipartite betweenness centrality + as the value. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> top_nodes = {1, 2} + >>> nx.bipartite.betweenness_centrality(G, nodes=top_nodes) + {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25} + + See Also + -------- + degree_centrality + closeness_centrality + :func:`~networkx.algorithms.bipartite.basic.sets` + :func:`~networkx.algorithms.bipartite.basic.is_bipartite` + + Notes + ----- + The nodes input parameter must contain all nodes in one bipartite node set, + but the dictionary returned contains all nodes from both node sets. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation + Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + https://dx.doi.org/10.4135/9781446294413.n28 + """ + top = set(nodes) + bottom = set(G) - top + n = len(top) + m = len(bottom) + s, t = divmod(n - 1, m) + bet_max_top = ( + ((m**2) * ((s + 1) ** 2)) + + (m * (s + 1) * (2 * t - s - 1)) + - (t * ((2 * s) - t + 3)) + ) / 2.0 + p, r = divmod(m - 1, n) + bet_max_bot = ( + ((n**2) * ((p + 1) ** 2)) + + (n * (p + 1) * (2 * r - p - 1)) + - (r * ((2 * p) - r + 3)) + ) / 2.0 + betweenness = nx.betweenness_centrality(G, normalized=False, weight=None) + for node in top: + betweenness[node] /= bet_max_top + for node in bottom: + betweenness[node] /= bet_max_bot + return betweenness + + +@nx._dispatch(name="bipartite_closeness_centrality") +def closeness_centrality(G, nodes, normalized=True): + r"""Compute the closeness centrality for nodes in a bipartite network. + + The closeness of a node is the distance to all other nodes in the + graph or in the case that the graph is not connected to all other nodes + in the connected component containing that node. + + Parameters + ---------- + G : graph + A bipartite network + + nodes : list or container + Container with all nodes in one bipartite node set. + + normalized : bool, optional + If True (default) normalize by connected component size. + + Returns + ------- + closeness : dictionary + Dictionary keyed by node with bipartite closeness centrality + as the value. + + Examples + -------- + >>> G = nx.wheel_graph(5) + >>> top_nodes = {0, 1, 2} + >>> nx.bipartite.closeness_centrality(G, nodes=top_nodes) + {0: 1.5, 1: 1.2, 2: 1.2, 3: 1.0, 4: 1.0} + + See Also + -------- + betweenness_centrality + degree_centrality + :func:`~networkx.algorithms.bipartite.basic.sets` + :func:`~networkx.algorithms.bipartite.basic.is_bipartite` + + Notes + ----- + The nodes input parameter must contain all nodes in one bipartite node set, + but the dictionary returned contains all nodes from both node sets. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + + Closeness centrality is normalized by the minimum distance possible. + In the bipartite case the minimum distance for a node in one bipartite + node set is 1 from all nodes in the other node set and 2 from all + other nodes in its own set [1]_. Thus the closeness centrality + for node `v` in the two bipartite sets `U` with + `n` nodes and `V` with `m` nodes is + + .. math:: + + c_{v} = \frac{m + 2(n - 1)}{d}, \mbox{for} v \in U, + + c_{v} = \frac{n + 2(m - 1)}{d}, \mbox{for} v \in V, + + where `d` is the sum of the distances from `v` to all + other nodes. + + Higher values of closeness indicate higher centrality. + + As in the unipartite case, setting normalized=True causes the + values to normalized further to n-1 / size(G)-1 where n is the + number of nodes in the connected part of graph containing the + node. If the graph is not completely connected, this algorithm + computes the closeness centrality for each connected part + separately. + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation + Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + https://dx.doi.org/10.4135/9781446294413.n28 + """ + closeness = {} + path_length = nx.single_source_shortest_path_length + top = set(nodes) + bottom = set(G) - top + n = len(top) + m = len(bottom) + for node in top: + sp = dict(path_length(G, node)) + totsp = sum(sp.values()) + if totsp > 0.0 and len(G) > 1: + closeness[node] = (m + 2 * (n - 1)) / totsp + if normalized: + s = (len(sp) - 1) / (len(G) - 1) + closeness[node] *= s + else: + closeness[node] = 0.0 + for node in bottom: + sp = dict(path_length(G, node)) + totsp = sum(sp.values()) + if totsp > 0.0 and len(G) > 1: + closeness[node] = (n + 2 * (m - 1)) / totsp + if normalized: + s = (len(sp) - 1) / (len(G) - 1) + closeness[node] *= s + else: + closeness[node] = 0.0 + return closeness diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/cluster.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/cluster.py new file mode 100644 index 0000000000000000000000000000000000000000..f10d7efd117c724bd3435dd0aab49f083347d672 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/cluster.py @@ -0,0 +1,280 @@ +"""Functions for computing clustering of pairs + +""" + +import itertools + +import networkx as nx + +__all__ = [ + "clustering", + "average_clustering", + "latapy_clustering", + "robins_alexander_clustering", +] + + +def cc_dot(nu, nv): + return len(nu & nv) / len(nu | nv) + + +def cc_max(nu, nv): + return len(nu & nv) / max(len(nu), len(nv)) + + +def cc_min(nu, nv): + return len(nu & nv) / min(len(nu), len(nv)) + + +modes = {"dot": cc_dot, "min": cc_min, "max": cc_max} + + +@nx._dispatch +def latapy_clustering(G, nodes=None, mode="dot"): + r"""Compute a bipartite clustering coefficient for nodes. + + The bipartite clustering coefficient is a measure of local density + of connections defined as [1]_: + + .. math:: + + c_u = \frac{\sum_{v \in N(N(u))} c_{uv} }{|N(N(u))|} + + where `N(N(u))` are the second order neighbors of `u` in `G` excluding `u`, + and `c_{uv}` is the pairwise clustering coefficient between nodes + `u` and `v`. + + The mode selects the function for `c_{uv}` which can be: + + `dot`: + + .. math:: + + c_{uv}=\frac{|N(u)\cap N(v)|}{|N(u) \cup N(v)|} + + `min`: + + .. math:: + + c_{uv}=\frac{|N(u)\cap N(v)|}{min(|N(u)|,|N(v)|)} + + `max`: + + .. math:: + + c_{uv}=\frac{|N(u)\cap N(v)|}{max(|N(u)|,|N(v)|)} + + + Parameters + ---------- + G : graph + A bipartite graph + + nodes : list or iterable (optional) + Compute bipartite clustering for these nodes. The default + is all nodes in G. + + mode : string + The pairwise bipartite clustering method to be used in the computation. + It must be "dot", "max", or "min". + + Returns + ------- + clustering : dictionary + A dictionary keyed by node with the clustering coefficient value. + + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) # path graphs are bipartite + >>> c = bipartite.clustering(G) + >>> c[0] + 0.5 + >>> c = bipartite.clustering(G, mode="min") + >>> c[0] + 1.0 + + See Also + -------- + robins_alexander_clustering + average_clustering + networkx.algorithms.cluster.square_clustering + + References + ---------- + .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008). + Basic notions for the analysis of large two-mode networks. + Social Networks 30(1), 31--48. + """ + if not nx.algorithms.bipartite.is_bipartite(G): + raise nx.NetworkXError("Graph is not bipartite") + + try: + cc_func = modes[mode] + except KeyError as err: + raise nx.NetworkXError( + "Mode for bipartite clustering must be: dot, min or max" + ) from err + + if nodes is None: + nodes = G + ccs = {} + for v in nodes: + cc = 0.0 + nbrs2 = {u for nbr in G[v] for u in G[nbr]} - {v} + for u in nbrs2: + cc += cc_func(set(G[u]), set(G[v])) + if cc > 0.0: # len(nbrs2)>0 + cc /= len(nbrs2) + ccs[v] = cc + return ccs + + +clustering = latapy_clustering + + +@nx._dispatch(name="bipartite_average_clustering") +def average_clustering(G, nodes=None, mode="dot"): + r"""Compute the average bipartite clustering coefficient. + + A clustering coefficient for the whole graph is the average, + + .. math:: + + C = \frac{1}{n}\sum_{v \in G} c_v, + + where `n` is the number of nodes in `G`. + + Similar measures for the two bipartite sets can be defined [1]_ + + .. math:: + + C_X = \frac{1}{|X|}\sum_{v \in X} c_v, + + where `X` is a bipartite set of `G`. + + Parameters + ---------- + G : graph + a bipartite graph + + nodes : list or iterable, optional + A container of nodes to use in computing the average. + The nodes should be either the entire graph (the default) or one of the + bipartite sets. + + mode : string + The pairwise bipartite clustering method. + It must be "dot", "max", or "min" + + Returns + ------- + clustering : float + The average bipartite clustering for the given set of nodes or the + entire graph if no nodes are specified. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.star_graph(3) # star graphs are bipartite + >>> bipartite.average_clustering(G) + 0.75 + >>> X, Y = bipartite.sets(G) + >>> bipartite.average_clustering(G, X) + 0.0 + >>> bipartite.average_clustering(G, Y) + 1.0 + + See Also + -------- + clustering + + Notes + ----- + The container of nodes passed to this function must contain all of the nodes + in one of the bipartite sets ("top" or "bottom") in order to compute + the correct average bipartite clustering coefficients. + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + + References + ---------- + .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008). + Basic notions for the analysis of large two-mode networks. + Social Networks 30(1), 31--48. + """ + if nodes is None: + nodes = G + ccs = latapy_clustering(G, nodes=nodes, mode=mode) + return sum(ccs[v] for v in nodes) / len(nodes) + + +@nx._dispatch +def robins_alexander_clustering(G): + r"""Compute the bipartite clustering of G. + + Robins and Alexander [1]_ defined bipartite clustering coefficient as + four times the number of four cycles `C_4` divided by the number of + three paths `L_3` in a bipartite graph: + + .. math:: + + CC_4 = \frac{4 * C_4}{L_3} + + Parameters + ---------- + G : graph + a bipartite graph + + Returns + ------- + clustering : float + The Robins and Alexander bipartite clustering for the input graph. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.davis_southern_women_graph() + >>> print(round(bipartite.robins_alexander_clustering(G), 3)) + 0.468 + + See Also + -------- + latapy_clustering + networkx.algorithms.cluster.square_clustering + + References + ---------- + .. [1] Robins, G. and M. Alexander (2004). Small worlds among interlocking + directors: Network structure and distance in bipartite graphs. + Computational & Mathematical Organization Theory 10(1), 69–94. + + """ + if G.order() < 4 or G.size() < 3: + return 0 + L_3 = _threepaths(G) + if L_3 == 0: + return 0 + C_4 = _four_cycles(G) + return (4.0 * C_4) / L_3 + + +def _four_cycles(G): + cycles = 0 + for v in G: + for u, w in itertools.combinations(G[v], 2): + cycles += len((set(G[u]) & set(G[w])) - {v}) + return cycles / 4 + + +def _threepaths(G): + paths = 0 + for v in G: + for u in G[v]: + for w in set(G[u]) - {v}: + paths += len(set(G[w]) - {v, u}) + # Divide by two because we count each three path twice + # one for each possible starting point + return paths / 2 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/covering.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/covering.py new file mode 100644 index 0000000000000000000000000000000000000000..8669b4b1681805d8599686f7fd2fedb1f01839b7 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/covering.py @@ -0,0 +1,57 @@ +""" Functions related to graph covers.""" + +import networkx as nx +from networkx.algorithms.bipartite.matching import hopcroft_karp_matching +from networkx.algorithms.covering import min_edge_cover as _min_edge_cover +from networkx.utils import not_implemented_for + +__all__ = ["min_edge_cover"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch(name="bipartite_min_edge_cover") +def min_edge_cover(G, matching_algorithm=None): + """Returns a set of edges which constitutes + the minimum edge cover of the graph. + + The smallest edge cover can be found in polynomial time by finding + a maximum matching and extending it greedily so that all nodes + are covered. + + Parameters + ---------- + G : NetworkX graph + An undirected bipartite graph. + + matching_algorithm : function + A function that returns a maximum cardinality matching in a + given bipartite graph. The function must take one input, the + graph ``G``, and return a dictionary mapping each node to its + mate. If not specified, + :func:`~networkx.algorithms.bipartite.matching.hopcroft_karp_matching` + will be used. Other possibilities include + :func:`~networkx.algorithms.bipartite.matching.eppstein_matching`, + + Returns + ------- + set + A set of the edges in a minimum edge cover of the graph, given as + pairs of nodes. It contains both the edges `(u, v)` and `(v, u)` + for given nodes `u` and `v` among the edges of minimum edge cover. + + Notes + ----- + An edge cover of a graph is a set of edges such that every node of + the graph is incident to at least one edge of the set. + A minimum edge cover is an edge covering of smallest cardinality. + + Due to its implementation, the worst-case running time of this algorithm + is bounded by the worst-case running time of the function + ``matching_algorithm``. + """ + if G.order() == 0: # Special case for the empty graph + return set() + if matching_algorithm is None: + matching_algorithm = hopcroft_karp_matching + return _min_edge_cover(G, matching_algorithm=matching_algorithm) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/edgelist.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/edgelist.py new file mode 100644 index 0000000000000000000000000000000000000000..5305aca3bdbe0af10a86689a8caccf315e1426bb --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/edgelist.py @@ -0,0 +1,359 @@ +""" +******************** +Bipartite Edge Lists +******************** +Read and write NetworkX graphs as bipartite edge lists. + +Format +------ +You can read or write three formats of edge lists with these functions. + +Node pairs with no data:: + + 1 2 + +Python dictionary as data:: + + 1 2 {'weight':7, 'color':'green'} + +Arbitrary data:: + + 1 2 7 green + +For each edge (u, v) the node u is assigned to part 0 and the node v to part 1. +""" +__all__ = ["generate_edgelist", "write_edgelist", "parse_edgelist", "read_edgelist"] + +import networkx as nx +from networkx.utils import not_implemented_for, open_file + + +@open_file(1, mode="wb") +def write_edgelist(G, path, comments="#", delimiter=" ", data=True, encoding="utf-8"): + """Write a bipartite graph as a list of edges. + + Parameters + ---------- + G : Graph + A NetworkX bipartite graph + path : file or string + File or filename to write. If a file is provided, it must be + opened in 'wb' mode. Filenames ending in .gz or .bz2 will be compressed. + comments : string, optional + The character used to indicate the start of a comment + delimiter : string, optional + The string used to separate values. The default is whitespace. + data : bool or list, optional + If False write no edge data. + If True write a string representation of the edge data dictionary.. + If a list (or other iterable) is provided, write the keys specified + in the list. + encoding: string, optional + Specify which encoding to use when writing file. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G.add_nodes_from([0, 2], bipartite=0) + >>> G.add_nodes_from([1, 3], bipartite=1) + >>> nx.write_edgelist(G, "test.edgelist") + >>> fh = open("test.edgelist", "wb") + >>> nx.write_edgelist(G, fh) + >>> nx.write_edgelist(G, "test.edgelist.gz") + >>> nx.write_edgelist(G, "test.edgelist.gz", data=False) + + >>> G = nx.Graph() + >>> G.add_edge(1, 2, weight=7, color="red") + >>> nx.write_edgelist(G, "test.edgelist", data=False) + >>> nx.write_edgelist(G, "test.edgelist", data=["color"]) + >>> nx.write_edgelist(G, "test.edgelist", data=["color", "weight"]) + + See Also + -------- + write_edgelist + generate_edgelist + """ + for line in generate_edgelist(G, delimiter, data): + line += "\n" + path.write(line.encode(encoding)) + + +@not_implemented_for("directed") +def generate_edgelist(G, delimiter=" ", data=True): + """Generate a single line of the bipartite graph G in edge list format. + + Parameters + ---------- + G : NetworkX graph + The graph is assumed to have node attribute `part` set to 0,1 representing + the two graph parts + + delimiter : string, optional + Separator for node labels + + data : bool or list of keys + If False generate no edge data. If True use a dictionary + representation of edge data. If a list of keys use a list of data + values corresponding to the keys. + + Returns + ------- + lines : string + Lines of data in adjlist format. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> G.add_nodes_from([0, 2], bipartite=0) + >>> G.add_nodes_from([1, 3], bipartite=1) + >>> G[1][2]["weight"] = 3 + >>> G[2][3]["capacity"] = 12 + >>> for line in bipartite.generate_edgelist(G, data=False): + ... print(line) + 0 1 + 2 1 + 2 3 + + >>> for line in bipartite.generate_edgelist(G): + ... print(line) + 0 1 {} + 2 1 {'weight': 3} + 2 3 {'capacity': 12} + + >>> for line in bipartite.generate_edgelist(G, data=["weight"]): + ... print(line) + 0 1 + 2 1 3 + 2 3 + """ + try: + part0 = [n for n, d in G.nodes.items() if d["bipartite"] == 0] + except BaseException as err: + raise AttributeError("Missing node attribute `bipartite`") from err + if data is True or data is False: + for n in part0: + for edge in G.edges(n, data=data): + yield delimiter.join(map(str, edge)) + else: + for n in part0: + for u, v, d in G.edges(n, data=True): + edge = [u, v] + try: + edge.extend(d[k] for k in data) + except KeyError: + pass # missing data for this edge, should warn? + yield delimiter.join(map(str, edge)) + + +@nx._dispatch(name="bipartite_parse_edgelist", graphs=None) +def parse_edgelist( + lines, comments="#", delimiter=None, create_using=None, nodetype=None, data=True +): + """Parse lines of an edge list representation of a bipartite graph. + + Parameters + ---------- + lines : list or iterator of strings + Input data in edgelist format + comments : string, optional + Marker for comment lines + delimiter : string, optional + Separator for node labels + create_using: NetworkX graph container, optional + Use given NetworkX graph for holding nodes or edges. + nodetype : Python type, optional + Convert nodes to this type. + data : bool or list of (label,type) tuples + If False generate no edge data or if True use a dictionary + representation of edge data or a list tuples specifying dictionary + key names and types for edge data. + + Returns + ------- + G: NetworkX Graph + The bipartite graph corresponding to lines + + Examples + -------- + Edgelist with no data: + + >>> from networkx.algorithms import bipartite + >>> lines = ["1 2", "2 3", "3 4"] + >>> G = bipartite.parse_edgelist(lines, nodetype=int) + >>> sorted(G.nodes()) + [1, 2, 3, 4] + >>> sorted(G.nodes(data=True)) + [(1, {'bipartite': 0}), (2, {'bipartite': 0}), (3, {'bipartite': 0}), (4, {'bipartite': 1})] + >>> sorted(G.edges()) + [(1, 2), (2, 3), (3, 4)] + + Edgelist with data in Python dictionary representation: + + >>> lines = ["1 2 {'weight':3}", "2 3 {'weight':27}", "3 4 {'weight':3.0}"] + >>> G = bipartite.parse_edgelist(lines, nodetype=int) + >>> sorted(G.nodes()) + [1, 2, 3, 4] + >>> sorted(G.edges(data=True)) + [(1, 2, {'weight': 3}), (2, 3, {'weight': 27}), (3, 4, {'weight': 3.0})] + + Edgelist with data in a list: + + >>> lines = ["1 2 3", "2 3 27", "3 4 3.0"] + >>> G = bipartite.parse_edgelist(lines, nodetype=int, data=(("weight", float),)) + >>> sorted(G.nodes()) + [1, 2, 3, 4] + >>> sorted(G.edges(data=True)) + [(1, 2, {'weight': 3.0}), (2, 3, {'weight': 27.0}), (3, 4, {'weight': 3.0})] + + See Also + -------- + """ + from ast import literal_eval + + G = nx.empty_graph(0, create_using) + for line in lines: + p = line.find(comments) + if p >= 0: + line = line[:p] + if not len(line): + continue + # split line, should have 2 or more + s = line.strip().split(delimiter) + if len(s) < 2: + continue + u = s.pop(0) + v = s.pop(0) + d = s + if nodetype is not None: + try: + u = nodetype(u) + v = nodetype(v) + except BaseException as err: + raise TypeError( + f"Failed to convert nodes {u},{v} " f"to type {nodetype}." + ) from err + + if len(d) == 0 or data is False: + # no data or data type specified + edgedata = {} + elif data is True: + # no edge types specified + try: # try to evaluate as dictionary + edgedata = dict(literal_eval(" ".join(d))) + except BaseException as err: + raise TypeError( + f"Failed to convert edge data ({d})" f"to dictionary." + ) from err + else: + # convert edge data to dictionary with specified keys and type + if len(d) != len(data): + raise IndexError( + f"Edge data {d} and data_keys {data} are not the same length" + ) + edgedata = {} + for (edge_key, edge_type), edge_value in zip(data, d): + try: + edge_value = edge_type(edge_value) + except BaseException as err: + raise TypeError( + f"Failed to convert {edge_key} data " + f"{edge_value} to type {edge_type}." + ) from err + edgedata.update({edge_key: edge_value}) + G.add_node(u, bipartite=0) + G.add_node(v, bipartite=1) + G.add_edge(u, v, **edgedata) + return G + + +@open_file(0, mode="rb") +@nx._dispatch(name="bipartite_read_edgelist", graphs=None) +def read_edgelist( + path, + comments="#", + delimiter=None, + create_using=None, + nodetype=None, + data=True, + edgetype=None, + encoding="utf-8", +): + """Read a bipartite graph from a list of edges. + + Parameters + ---------- + path : file or string + File or filename to read. If a file is provided, it must be + opened in 'rb' mode. + Filenames ending in .gz or .bz2 will be uncompressed. + comments : string, optional + The character used to indicate the start of a comment. + delimiter : string, optional + The string used to separate values. The default is whitespace. + create_using : Graph container, optional, + Use specified container to build graph. The default is networkx.Graph, + an undirected graph. + nodetype : int, float, str, Python type, optional + Convert node data from strings to specified type + data : bool or list of (label,type) tuples + Tuples specifying dictionary key names and types for edge data + edgetype : int, float, str, Python type, optional OBSOLETE + Convert edge data from strings to specified type and use as 'weight' + encoding: string, optional + Specify which encoding to use when reading file. + + Returns + ------- + G : graph + A networkx Graph or other type specified with create_using + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> G.add_nodes_from([0, 2], bipartite=0) + >>> G.add_nodes_from([1, 3], bipartite=1) + >>> bipartite.write_edgelist(G, "test.edgelist") + >>> G = bipartite.read_edgelist("test.edgelist") + + >>> fh = open("test.edgelist", "rb") + >>> G = bipartite.read_edgelist(fh) + >>> fh.close() + + >>> G = bipartite.read_edgelist("test.edgelist", nodetype=int) + + Edgelist with data in a list: + + >>> textline = "1 2 3" + >>> fh = open("test.edgelist", "w") + >>> d = fh.write(textline) + >>> fh.close() + >>> G = bipartite.read_edgelist( + ... "test.edgelist", nodetype=int, data=(("weight", float),) + ... ) + >>> list(G) + [1, 2] + >>> list(G.edges(data=True)) + [(1, 2, {'weight': 3.0})] + + See parse_edgelist() for more examples of formatting. + + See Also + -------- + parse_edgelist + + Notes + ----- + Since nodes must be hashable, the function nodetype must return hashable + types (e.g. int, float, str, frozenset - or tuples of those, etc.) + """ + lines = (line.decode(encoding) for line in path) + return parse_edgelist( + lines, + comments=comments, + delimiter=delimiter, + create_using=create_using, + nodetype=nodetype, + data=data, + ) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/extendability.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/extendability.py new file mode 100644 index 0000000000000000000000000000000000000000..10dd5473b5cf5f4490bcb65086371ba58751a270 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/extendability.py @@ -0,0 +1,105 @@ +""" Provides a function for computing the extendability of a graph which is +undirected, simple, connected and bipartite and contains at least one perfect matching.""" + + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["maximal_extendability"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +def maximal_extendability(G): + """Computes the extendability of a graph. + + The extendability of a graph is defined as the maximum $k$ for which `G` + is $k$-extendable. Graph `G` is $k$-extendable if and only if `G` has a + perfect matching and every set of $k$ independent edges can be extended + to a perfect matching in `G`. + + Parameters + ---------- + G : NetworkX Graph + A fully-connected bipartite graph without self-loops + + Returns + ------- + extendability : int + + Raises + ------ + NetworkXError + If the graph `G` is disconnected. + If the graph `G` is not bipartite. + If the graph `G` does not contain a perfect matching. + If the residual graph of `G` is not strongly connected. + + Notes + ----- + Definition: + Let `G` be a simple, connected, undirected and bipartite graph with a perfect + matching M and bipartition (U,V). The residual graph of `G`, denoted by $G_M$, + is the graph obtained from G by directing the edges of M from V to U and the + edges that do not belong to M from U to V. + + Lemma [1]_ : + Let M be a perfect matching of `G`. `G` is $k$-extendable if and only if its residual + graph $G_M$ is strongly connected and there are $k$ vertex-disjoint directed + paths between every vertex of U and every vertex of V. + + Assuming that input graph `G` is undirected, simple, connected, bipartite and contains + a perfect matching M, this function constructs the residual graph $G_M$ of G and + returns the minimum value among the maximum vertex-disjoint directed paths between + every vertex of U and every vertex of V in $G_M$. By combining the definitions + and the lemma, this value represents the extendability of the graph `G`. + + Time complexity O($n^3$ $m^2$)) where $n$ is the number of vertices + and $m$ is the number of edges. + + References + ---------- + .. [1] "A polynomial algorithm for the extendability problem in bipartite graphs", + J. Lakhal, L. Litzler, Information Processing Letters, 1998. + .. [2] "On n-extendible graphs", M. D. Plummer, Discrete Mathematics, 31:201–210, 1980 + https://doi.org/10.1016/0012-365X(80)90037-0 + + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph G is not connected") + + if not nx.bipartite.is_bipartite(G): + raise nx.NetworkXError("Graph G is not bipartite") + + U, V = nx.bipartite.sets(G) + + maximum_matching = nx.bipartite.hopcroft_karp_matching(G) + + if not nx.is_perfect_matching(G, maximum_matching): + raise nx.NetworkXError("Graph G does not contain a perfect matching") + + # list of edges in perfect matching, directed from V to U + pm = [(node, maximum_matching[node]) for node in V & maximum_matching.keys()] + + # Direct all the edges of G, from V to U if in matching, else from U to V + directed_edges = [ + (x, y) if (x in V and (x, y) in pm) or (x in U and (y, x) not in pm) else (y, x) + for x, y in G.edges + ] + + # Construct the residual graph of G + residual_G = nx.DiGraph() + residual_G.add_nodes_from(G) + residual_G.add_edges_from(directed_edges) + + if not nx.is_strongly_connected(residual_G): + raise nx.NetworkXError("The residual graph of G is not strongly connected") + + # For node-pairs between V & U, keep min of max number of node-disjoint paths + # Variable $k$ stands for the extendability of graph G + k = float("Inf") + for u in U: + for v in V: + num_paths = sum(1 for _ in nx.node_disjoint_paths(residual_G, u, v)) + k = k if k < num_paths else num_paths + return k diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/generators.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/generators.py new file mode 100644 index 0000000000000000000000000000000000000000..9cea597875b324688a92e321d17b13cc1cfb8878 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/generators.py @@ -0,0 +1,603 @@ +""" +Generators and functions for bipartite graphs. +""" +import math +import numbers +from functools import reduce + +import networkx as nx +from networkx.utils import nodes_or_number, py_random_state + +__all__ = [ + "configuration_model", + "havel_hakimi_graph", + "reverse_havel_hakimi_graph", + "alternating_havel_hakimi_graph", + "preferential_attachment_graph", + "random_graph", + "gnmk_random_graph", + "complete_bipartite_graph", +] + + +@nodes_or_number([0, 1]) +@nx._dispatch(graphs=None) +def complete_bipartite_graph(n1, n2, create_using=None): + """Returns the complete bipartite graph `K_{n_1,n_2}`. + + The graph is composed of two partitions with nodes 0 to (n1 - 1) + in the first and nodes n1 to (n1 + n2 - 1) in the second. + Each node in the first is connected to each node in the second. + + Parameters + ---------- + n1, n2 : integer or iterable container of nodes + If integers, nodes are from `range(n1)` and `range(n1, n1 + n2)`. + If a container, the elements are the nodes. + create_using : NetworkX graph instance, (default: nx.Graph) + Return graph of this type. + + Notes + ----- + Nodes are the integers 0 to `n1 + n2 - 1` unless either n1 or n2 are + containers of nodes. If only one of n1 or n2 are integers, that + integer is replaced by `range` of that integer. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.complete_bipartite_graph + """ + G = nx.empty_graph(0, create_using) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + n1, top = n1 + n2, bottom = n2 + if isinstance(n1, numbers.Integral) and isinstance(n2, numbers.Integral): + bottom = [n1 + i for i in bottom] + G.add_nodes_from(top, bipartite=0) + G.add_nodes_from(bottom, bipartite=1) + if len(G) != len(top) + len(bottom): + raise nx.NetworkXError("Inputs n1 and n2 must contain distinct nodes") + G.add_edges_from((u, v) for u in top for v in bottom) + G.graph["name"] = f"complete_bipartite_graph({n1}, {n2})" + return G + + +@py_random_state(3) +@nx._dispatch(name="bipartite_configuration_model", graphs=None) +def configuration_model(aseq, bseq, create_using=None, seed=None): + """Returns a random bipartite graph from two given degree sequences. + + Parameters + ---------- + aseq : list + Degree sequence for node set A. + bseq : list + Degree sequence for node set B. + create_using : NetworkX graph instance, optional + Return graph of this type. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + The graph is composed of two partitions. Set A has nodes 0 to + (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1). + Nodes from set A are connected to nodes in set B by choosing + randomly from the possible free stubs, one in A and one in B. + + Notes + ----- + The sum of the two sequences must be equal: sum(aseq)=sum(bseq) + If no graph type is specified use MultiGraph with parallel edges. + If you want a graph with no parallel edges use create_using=Graph() + but then the resulting degree sequences might not be exact. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.configuration_model + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + # length and sum of each sequence + lena = len(aseq) + lenb = len(bseq) + suma = sum(aseq) + sumb = sum(bseq) + + if not suma == sumb: + raise nx.NetworkXError( + f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}" + ) + + G = _add_nodes_with_bipartite_label(G, lena, lenb) + + if len(aseq) == 0 or max(aseq) == 0: + return G # done if no edges + + # build lists of degree-repeated vertex numbers + stubs = [[v] * aseq[v] for v in range(lena)] + astubs = [x for subseq in stubs for x in subseq] + + stubs = [[v] * bseq[v - lena] for v in range(lena, lena + lenb)] + bstubs = [x for subseq in stubs for x in subseq] + + # shuffle lists + seed.shuffle(astubs) + seed.shuffle(bstubs) + + G.add_edges_from([astubs[i], bstubs[i]] for i in range(suma)) + + G.name = "bipartite_configuration_model" + return G + + +@nx._dispatch(name="bipartite_havel_hakimi_graph", graphs=None) +def havel_hakimi_graph(aseq, bseq, create_using=None): + """Returns a bipartite graph from two given degree sequences using a + Havel-Hakimi style construction. + + The graph is composed of two partitions. Set A has nodes 0 to + (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1). + Nodes from the set A are connected to nodes in the set B by + connecting the highest degree nodes in set A to the highest degree + nodes in set B until all stubs are connected. + + Parameters + ---------- + aseq : list + Degree sequence for node set A. + bseq : list + Degree sequence for node set B. + create_using : NetworkX graph instance, optional + Return graph of this type. + + Notes + ----- + The sum of the two sequences must be equal: sum(aseq)=sum(bseq) + If no graph type is specified use MultiGraph with parallel edges. + If you want a graph with no parallel edges use create_using=Graph() + but then the resulting degree sequences might not be exact. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.havel_hakimi_graph + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + # length of the each sequence + naseq = len(aseq) + nbseq = len(bseq) + + suma = sum(aseq) + sumb = sum(bseq) + + if not suma == sumb: + raise nx.NetworkXError( + f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}" + ) + + G = _add_nodes_with_bipartite_label(G, naseq, nbseq) + + if len(aseq) == 0 or max(aseq) == 0: + return G # done if no edges + + # build list of degree-repeated vertex numbers + astubs = [[aseq[v], v] for v in range(naseq)] + bstubs = [[bseq[v - naseq], v] for v in range(naseq, naseq + nbseq)] + astubs.sort() + while astubs: + (degree, u) = astubs.pop() # take of largest degree node in the a set + if degree == 0: + break # done, all are zero + # connect the source to largest degree nodes in the b set + bstubs.sort() + for target in bstubs[-degree:]: + v = target[1] + G.add_edge(u, v) + target[0] -= 1 # note this updates bstubs too. + if target[0] == 0: + bstubs.remove(target) + + G.name = "bipartite_havel_hakimi_graph" + return G + + +@nx._dispatch(graphs=None) +def reverse_havel_hakimi_graph(aseq, bseq, create_using=None): + """Returns a bipartite graph from two given degree sequences using a + Havel-Hakimi style construction. + + The graph is composed of two partitions. Set A has nodes 0 to + (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1). + Nodes from set A are connected to nodes in the set B by connecting + the highest degree nodes in set A to the lowest degree nodes in + set B until all stubs are connected. + + Parameters + ---------- + aseq : list + Degree sequence for node set A. + bseq : list + Degree sequence for node set B. + create_using : NetworkX graph instance, optional + Return graph of this type. + + Notes + ----- + The sum of the two sequences must be equal: sum(aseq)=sum(bseq) + If no graph type is specified use MultiGraph with parallel edges. + If you want a graph with no parallel edges use create_using=Graph() + but then the resulting degree sequences might not be exact. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.reverse_havel_hakimi_graph + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + # length of the each sequence + lena = len(aseq) + lenb = len(bseq) + suma = sum(aseq) + sumb = sum(bseq) + + if not suma == sumb: + raise nx.NetworkXError( + f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}" + ) + + G = _add_nodes_with_bipartite_label(G, lena, lenb) + + if len(aseq) == 0 or max(aseq) == 0: + return G # done if no edges + + # build list of degree-repeated vertex numbers + astubs = [[aseq[v], v] for v in range(lena)] + bstubs = [[bseq[v - lena], v] for v in range(lena, lena + lenb)] + astubs.sort() + bstubs.sort() + while astubs: + (degree, u) = astubs.pop() # take of largest degree node in the a set + if degree == 0: + break # done, all are zero + # connect the source to the smallest degree nodes in the b set + for target in bstubs[0:degree]: + v = target[1] + G.add_edge(u, v) + target[0] -= 1 # note this updates bstubs too. + if target[0] == 0: + bstubs.remove(target) + + G.name = "bipartite_reverse_havel_hakimi_graph" + return G + + +@nx._dispatch(graphs=None) +def alternating_havel_hakimi_graph(aseq, bseq, create_using=None): + """Returns a bipartite graph from two given degree sequences using + an alternating Havel-Hakimi style construction. + + The graph is composed of two partitions. Set A has nodes 0 to + (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1). + Nodes from the set A are connected to nodes in the set B by + connecting the highest degree nodes in set A to alternatively the + highest and the lowest degree nodes in set B until all stubs are + connected. + + Parameters + ---------- + aseq : list + Degree sequence for node set A. + bseq : list + Degree sequence for node set B. + create_using : NetworkX graph instance, optional + Return graph of this type. + + Notes + ----- + The sum of the two sequences must be equal: sum(aseq)=sum(bseq) + If no graph type is specified use MultiGraph with parallel edges. + If you want a graph with no parallel edges use create_using=Graph() + but then the resulting degree sequences might not be exact. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.alternating_havel_hakimi_graph + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + # length of the each sequence + naseq = len(aseq) + nbseq = len(bseq) + suma = sum(aseq) + sumb = sum(bseq) + + if not suma == sumb: + raise nx.NetworkXError( + f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}" + ) + + G = _add_nodes_with_bipartite_label(G, naseq, nbseq) + + if len(aseq) == 0 or max(aseq) == 0: + return G # done if no edges + # build list of degree-repeated vertex numbers + astubs = [[aseq[v], v] for v in range(naseq)] + bstubs = [[bseq[v - naseq], v] for v in range(naseq, naseq + nbseq)] + while astubs: + astubs.sort() + (degree, u) = astubs.pop() # take of largest degree node in the a set + if degree == 0: + break # done, all are zero + bstubs.sort() + small = bstubs[0 : degree // 2] # add these low degree targets + large = bstubs[(-degree + degree // 2) :] # now high degree targets + stubs = [x for z in zip(large, small) for x in z] # combine, sorry + if len(stubs) < len(small) + len(large): # check for zip truncation + stubs.append(large.pop()) + for target in stubs: + v = target[1] + G.add_edge(u, v) + target[0] -= 1 # note this updates bstubs too. + if target[0] == 0: + bstubs.remove(target) + + G.name = "bipartite_alternating_havel_hakimi_graph" + return G + + +@py_random_state(3) +@nx._dispatch(graphs=None) +def preferential_attachment_graph(aseq, p, create_using=None, seed=None): + """Create a bipartite graph with a preferential attachment model from + a given single degree sequence. + + The graph is composed of two partitions. Set A has nodes 0 to + (len(aseq) - 1) and set B has nodes starting with node len(aseq). + The number of nodes in set B is random. + + Parameters + ---------- + aseq : list + Degree sequence for node set A. + p : float + Probability that a new bottom node is added. + create_using : NetworkX graph instance, optional + Return graph of this type. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + References + ---------- + .. [1] Guillaume, J.L. and Latapy, M., + Bipartite graphs as models of complex networks. + Physica A: Statistical Mechanics and its Applications, + 2006, 371(2), pp.795-813. + .. [2] Jean-Loup Guillaume and Matthieu Latapy, + Bipartite structure of all complex networks, + Inf. Process. Lett. 90, 2004, pg. 215-221 + https://doi.org/10.1016/j.ipl.2004.03.007 + + Notes + ----- + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.preferential_attachment_graph + """ + G = nx.empty_graph(0, create_using, default=nx.MultiGraph) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + if p > 1: + raise nx.NetworkXError(f"probability {p} > 1") + + naseq = len(aseq) + G = _add_nodes_with_bipartite_label(G, naseq, 0) + vv = [[v] * aseq[v] for v in range(naseq)] + while vv: + while vv[0]: + source = vv[0][0] + vv[0].remove(source) + if seed.random() < p or len(G) == naseq: + target = len(G) + G.add_node(target, bipartite=1) + G.add_edge(source, target) + else: + bb = [[b] * G.degree(b) for b in range(naseq, len(G))] + # flatten the list of lists into a list. + bbstubs = reduce(lambda x, y: x + y, bb) + # choose preferentially a bottom node. + target = seed.choice(bbstubs) + G.add_node(target, bipartite=1) + G.add_edge(source, target) + vv.remove(vv[0]) + G.name = "bipartite_preferential_attachment_model" + return G + + +@py_random_state(3) +@nx._dispatch(graphs=None) +def random_graph(n, m, p, seed=None, directed=False): + """Returns a bipartite random graph. + + This is a bipartite version of the binomial (Erdős-Rényi) graph. + The graph is composed of two partitions. Set A has nodes 0 to + (n - 1) and set B has nodes n to (n + m - 1). + + Parameters + ---------- + n : int + The number of nodes in the first bipartite set. + m : int + The number of nodes in the second bipartite set. + p : float + Probability for edge creation. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + directed : bool, optional (default=False) + If True return a directed graph + + Notes + ----- + The bipartite random graph algorithm chooses each of the n*m (undirected) + or 2*nm (directed) possible edges with probability p. + + This algorithm is $O(n+m)$ where $m$ is the expected number of edges. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.random_graph + + See Also + -------- + gnp_random_graph, configuration_model + + References + ---------- + .. [1] Vladimir Batagelj and Ulrik Brandes, + "Efficient generation of large random networks", + Phys. Rev. E, 71, 036113, 2005. + """ + G = nx.Graph() + G = _add_nodes_with_bipartite_label(G, n, m) + if directed: + G = nx.DiGraph(G) + G.name = f"fast_gnp_random_graph({n},{m},{p})" + + if p <= 0: + return G + if p >= 1: + return nx.complete_bipartite_graph(n, m) + + lp = math.log(1.0 - p) + + v = 0 + w = -1 + while v < n: + lr = math.log(1.0 - seed.random()) + w = w + 1 + int(lr / lp) + while w >= m and v < n: + w = w - m + v = v + 1 + if v < n: + G.add_edge(v, n + w) + + if directed: + # use the same algorithm to + # add edges from the "m" to "n" set + v = 0 + w = -1 + while v < n: + lr = math.log(1.0 - seed.random()) + w = w + 1 + int(lr / lp) + while w >= m and v < n: + w = w - m + v = v + 1 + if v < n: + G.add_edge(n + w, v) + + return G + + +@py_random_state(3) +@nx._dispatch(graphs=None) +def gnmk_random_graph(n, m, k, seed=None, directed=False): + """Returns a random bipartite graph G_{n,m,k}. + + Produces a bipartite graph chosen randomly out of the set of all graphs + with n top nodes, m bottom nodes, and k edges. + The graph is composed of two sets of nodes. + Set A has nodes 0 to (n - 1) and set B has nodes n to (n + m - 1). + + Parameters + ---------- + n : int + The number of nodes in the first bipartite set. + m : int + The number of nodes in the second bipartite set. + k : int + The number of edges + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + directed : bool, optional (default=False) + If True return a directed graph + + Examples + -------- + from nx.algorithms import bipartite + G = bipartite.gnmk_random_graph(10,20,50) + + See Also + -------- + gnm_random_graph + + Notes + ----- + If k > m * n then a complete bipartite graph is returned. + + This graph is a bipartite version of the `G_{nm}` random graph model. + + The nodes are assigned the attribute 'bipartite' with the value 0 or 1 + to indicate which bipartite set the node belongs to. + + This function is not imported in the main namespace. + To use it use nx.bipartite.gnmk_random_graph + """ + G = nx.Graph() + G = _add_nodes_with_bipartite_label(G, n, m) + if directed: + G = nx.DiGraph(G) + G.name = f"bipartite_gnm_random_graph({n},{m},{k})" + if n == 1 or m == 1: + return G + max_edges = n * m # max_edges for bipartite networks + if k >= max_edges: # Maybe we should raise an exception here + return nx.complete_bipartite_graph(n, m, create_using=G) + + top = [n for n, d in G.nodes(data=True) if d["bipartite"] == 0] + bottom = list(set(G) - set(top)) + edge_count = 0 + while edge_count < k: + # generate random edge,u,v + u = seed.choice(top) + v = seed.choice(bottom) + if v in G[u]: + continue + else: + G.add_edge(u, v) + edge_count += 1 + return G + + +def _add_nodes_with_bipartite_label(G, lena, lenb): + G.add_nodes_from(range(lena + lenb)) + b = dict(zip(range(lena), [0] * lena)) + b.update(dict(zip(range(lena, lena + lenb), [1] * lenb))) + nx.set_node_attributes(G, b, "bipartite") + return G diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/matching.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/matching.py new file mode 100644 index 0000000000000000000000000000000000000000..17d55614bcddae9318ae5993ee0b21573d037878 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/matching.py @@ -0,0 +1,589 @@ +# This module uses material from the Wikipedia article Hopcroft--Karp algorithm +# , accessed on +# January 3, 2015, which is released under the Creative Commons +# Attribution-Share-Alike License 3.0 +# . That article includes +# pseudocode, which has been translated into the corresponding Python code. +# +# Portions of this module use code from David Eppstein's Python Algorithms and +# Data Structures (PADS) library, which is dedicated to the public domain (for +# proof, see ). +"""Provides functions for computing maximum cardinality matchings and minimum +weight full matchings in a bipartite graph. + +If you don't care about the particular implementation of the maximum matching +algorithm, simply use the :func:`maximum_matching`. If you do care, you can +import one of the named maximum matching algorithms directly. + +For example, to find a maximum matching in the complete bipartite graph with +two vertices on the left and three vertices on the right: + +>>> G = nx.complete_bipartite_graph(2, 3) +>>> left, right = nx.bipartite.sets(G) +>>> list(left) +[0, 1] +>>> list(right) +[2, 3, 4] +>>> nx.bipartite.maximum_matching(G) +{0: 2, 1: 3, 2: 0, 3: 1} + +The dictionary returned by :func:`maximum_matching` includes a mapping for +vertices in both the left and right vertex sets. + +Similarly, :func:`minimum_weight_full_matching` produces, for a complete +weighted bipartite graph, a matching whose cardinality is the cardinality of +the smaller of the two partitions, and for which the sum of the weights of the +edges included in the matching is minimal. + +""" +import collections +import itertools + +import networkx as nx +from networkx.algorithms.bipartite import sets as bipartite_sets +from networkx.algorithms.bipartite.matrix import biadjacency_matrix + +__all__ = [ + "maximum_matching", + "hopcroft_karp_matching", + "eppstein_matching", + "to_vertex_cover", + "minimum_weight_full_matching", +] + +INFINITY = float("inf") + + +@nx._dispatch +def hopcroft_karp_matching(G, top_nodes=None): + """Returns the maximum cardinality matching of the bipartite graph `G`. + + A matching is a set of edges that do not share any nodes. A maximum + cardinality matching is a matching with the most edges possible. It + is not always unique. Finding a matching in a bipartite graph can be + treated as a networkx flow problem. + + The functions ``hopcroft_karp_matching`` and ``maximum_matching`` + are aliases of the same function. + + Parameters + ---------- + G : NetworkX graph + + Undirected bipartite graph + + top_nodes : container of nodes + + Container with all nodes in one bipartite node set. If not supplied + it will be computed. But if more than one solution exists an exception + will be raised. + + Returns + ------- + matches : dictionary + + The matching is returned as a dictionary, `matches`, such that + ``matches[v] == w`` if node `v` is matched to node `w`. Unmatched + nodes do not occur as a key in `matches`. + + Raises + ------ + AmbiguousSolution + Raised if the input bipartite graph is disconnected and no container + with all nodes in one bipartite set is provided. When determining + the nodes in each bipartite set more than one valid solution is + possible if the input graph is disconnected. + + Notes + ----- + This function is implemented with the `Hopcroft--Karp matching algorithm + `_ for + bipartite graphs. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + maximum_matching + hopcroft_karp_matching + eppstein_matching + + References + ---------- + .. [1] John E. Hopcroft and Richard M. Karp. "An n^{5 / 2} Algorithm for + Maximum Matchings in Bipartite Graphs" In: **SIAM Journal of Computing** + 2.4 (1973), pp. 225--231. . + + """ + + # First we define some auxiliary search functions. + # + # If you are a human reading these auxiliary search functions, the "global" + # variables `leftmatches`, `rightmatches`, `distances`, etc. are defined + # below the functions, so that they are initialized close to the initial + # invocation of the search functions. + def breadth_first_search(): + for v in left: + if leftmatches[v] is None: + distances[v] = 0 + queue.append(v) + else: + distances[v] = INFINITY + distances[None] = INFINITY + while queue: + v = queue.popleft() + if distances[v] < distances[None]: + for u in G[v]: + if distances[rightmatches[u]] is INFINITY: + distances[rightmatches[u]] = distances[v] + 1 + queue.append(rightmatches[u]) + return distances[None] is not INFINITY + + def depth_first_search(v): + if v is not None: + for u in G[v]: + if distances[rightmatches[u]] == distances[v] + 1: + if depth_first_search(rightmatches[u]): + rightmatches[u] = v + leftmatches[v] = u + return True + distances[v] = INFINITY + return False + return True + + # Initialize the "global" variables that maintain state during the search. + left, right = bipartite_sets(G, top_nodes) + leftmatches = {v: None for v in left} + rightmatches = {v: None for v in right} + distances = {} + queue = collections.deque() + + # Implementation note: this counter is incremented as pairs are matched but + # it is currently not used elsewhere in the computation. + num_matched_pairs = 0 + while breadth_first_search(): + for v in left: + if leftmatches[v] is None: + if depth_first_search(v): + num_matched_pairs += 1 + + # Strip the entries matched to `None`. + leftmatches = {k: v for k, v in leftmatches.items() if v is not None} + rightmatches = {k: v for k, v in rightmatches.items() if v is not None} + + # At this point, the left matches and the right matches are inverses of one + # another. In other words, + # + # leftmatches == {v, k for k, v in rightmatches.items()} + # + # Finally, we combine both the left matches and right matches. + return dict(itertools.chain(leftmatches.items(), rightmatches.items())) + + +@nx._dispatch +def eppstein_matching(G, top_nodes=None): + """Returns the maximum cardinality matching of the bipartite graph `G`. + + Parameters + ---------- + G : NetworkX graph + + Undirected bipartite graph + + top_nodes : container + + Container with all nodes in one bipartite node set. If not supplied + it will be computed. But if more than one solution exists an exception + will be raised. + + Returns + ------- + matches : dictionary + + The matching is returned as a dictionary, `matching`, such that + ``matching[v] == w`` if node `v` is matched to node `w`. Unmatched + nodes do not occur as a key in `matching`. + + Raises + ------ + AmbiguousSolution + Raised if the input bipartite graph is disconnected and no container + with all nodes in one bipartite set is provided. When determining + the nodes in each bipartite set more than one valid solution is + possible if the input graph is disconnected. + + Notes + ----- + This function is implemented with David Eppstein's version of the algorithm + Hopcroft--Karp algorithm (see :func:`hopcroft_karp_matching`), which + originally appeared in the `Python Algorithms and Data Structures library + (PADS) `_. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + + hopcroft_karp_matching + + """ + # Due to its original implementation, a directed graph is needed + # so that the two sets of bipartite nodes can be distinguished + left, right = bipartite_sets(G, top_nodes) + G = nx.DiGraph(G.edges(left)) + # initialize greedy matching (redundant, but faster than full search) + matching = {} + for u in G: + for v in G[u]: + if v not in matching: + matching[v] = u + break + while True: + # structure residual graph into layers + # pred[u] gives the neighbor in the previous layer for u in U + # preds[v] gives a list of neighbors in the previous layer for v in V + # unmatched gives a list of unmatched vertices in final layer of V, + # and is also used as a flag value for pred[u] when u is in the first + # layer + preds = {} + unmatched = [] + pred = {u: unmatched for u in G} + for v in matching: + del pred[matching[v]] + layer = list(pred) + + # repeatedly extend layering structure by another pair of layers + while layer and not unmatched: + newLayer = {} + for u in layer: + for v in G[u]: + if v not in preds: + newLayer.setdefault(v, []).append(u) + layer = [] + for v in newLayer: + preds[v] = newLayer[v] + if v in matching: + layer.append(matching[v]) + pred[matching[v]] = v + else: + unmatched.append(v) + + # did we finish layering without finding any alternating paths? + if not unmatched: + # TODO - The lines between --- were unused and were thus commented + # out. This whole commented chunk should be reviewed to determine + # whether it should be built upon or completely removed. + # --- + # unlayered = {} + # for u in G: + # # TODO Why is extra inner loop necessary? + # for v in G[u]: + # if v not in preds: + # unlayered[v] = None + # --- + # TODO Originally, this function returned a three-tuple: + # + # return (matching, list(pred), list(unlayered)) + # + # For some reason, the documentation for this function + # indicated that the second and third elements of the returned + # three-tuple would be the vertices in the left and right vertex + # sets, respectively, that are also in the maximum independent set. + # However, what I think the author meant was that the second + # element is the list of vertices that were unmatched and the third + # element was the list of vertices that were matched. Since that + # seems to be the case, they don't really need to be returned, + # since that information can be inferred from the matching + # dictionary. + + # All the matched nodes must be a key in the dictionary + for key in matching.copy(): + matching[matching[key]] = key + return matching + + # recursively search backward through layers to find alternating paths + # recursion returns true if found path, false otherwise + def recurse(v): + if v in preds: + L = preds.pop(v) + for u in L: + if u in pred: + pu = pred.pop(u) + if pu is unmatched or recurse(pu): + matching[v] = u + return True + return False + + for v in unmatched: + recurse(v) + + +def _is_connected_by_alternating_path(G, v, matched_edges, unmatched_edges, targets): + """Returns True if and only if the vertex `v` is connected to one of + the target vertices by an alternating path in `G`. + + An *alternating path* is a path in which every other edge is in the + specified maximum matching (and the remaining edges in the path are not in + the matching). An alternating path may have matched edges in the even + positions or in the odd positions, as long as the edges alternate between + 'matched' and 'unmatched'. + + `G` is an undirected bipartite NetworkX graph. + + `v` is a vertex in `G`. + + `matched_edges` is a set of edges present in a maximum matching in `G`. + + `unmatched_edges` is a set of edges not present in a maximum + matching in `G`. + + `targets` is a set of vertices. + + """ + + def _alternating_dfs(u, along_matched=True): + """Returns True if and only if `u` is connected to one of the + targets by an alternating path. + + `u` is a vertex in the graph `G`. + + If `along_matched` is True, this step of the depth-first search + will continue only through edges in the given matching. Otherwise, it + will continue only through edges *not* in the given matching. + + """ + visited = set() + # Follow matched edges when depth is even, + # and follow unmatched edges when depth is odd. + initial_depth = 0 if along_matched else 1 + stack = [(u, iter(G[u]), initial_depth)] + while stack: + parent, children, depth = stack[-1] + valid_edges = matched_edges if depth % 2 else unmatched_edges + try: + child = next(children) + if child not in visited: + if (parent, child) in valid_edges or (child, parent) in valid_edges: + if child in targets: + return True + visited.add(child) + stack.append((child, iter(G[child]), depth + 1)) + except StopIteration: + stack.pop() + return False + + # Check for alternating paths starting with edges in the matching, then + # check for alternating paths starting with edges not in the + # matching. + return _alternating_dfs(v, along_matched=True) or _alternating_dfs( + v, along_matched=False + ) + + +def _connected_by_alternating_paths(G, matching, targets): + """Returns the set of vertices that are connected to one of the target + vertices by an alternating path in `G` or are themselves a target. + + An *alternating path* is a path in which every other edge is in the + specified maximum matching (and the remaining edges in the path are not in + the matching). An alternating path may have matched edges in the even + positions or in the odd positions, as long as the edges alternate between + 'matched' and 'unmatched'. + + `G` is an undirected bipartite NetworkX graph. + + `matching` is a dictionary representing a maximum matching in `G`, as + returned by, for example, :func:`maximum_matching`. + + `targets` is a set of vertices. + + """ + # Get the set of matched edges and the set of unmatched edges. Only include + # one version of each undirected edge (for example, include edge (1, 2) but + # not edge (2, 1)). Using frozensets as an intermediary step we do not + # require nodes to be orderable. + edge_sets = {frozenset((u, v)) for u, v in matching.items()} + matched_edges = {tuple(edge) for edge in edge_sets} + unmatched_edges = { + (u, v) for (u, v) in G.edges() if frozenset((u, v)) not in edge_sets + } + + return { + v + for v in G + if v in targets + or _is_connected_by_alternating_path( + G, v, matched_edges, unmatched_edges, targets + ) + } + + +@nx._dispatch +def to_vertex_cover(G, matching, top_nodes=None): + """Returns the minimum vertex cover corresponding to the given maximum + matching of the bipartite graph `G`. + + Parameters + ---------- + G : NetworkX graph + + Undirected bipartite graph + + matching : dictionary + + A dictionary whose keys are vertices in `G` and whose values are the + distinct neighbors comprising the maximum matching for `G`, as returned + by, for example, :func:`maximum_matching`. The dictionary *must* + represent the maximum matching. + + top_nodes : container + + Container with all nodes in one bipartite node set. If not supplied + it will be computed. But if more than one solution exists an exception + will be raised. + + Returns + ------- + vertex_cover : :class:`set` + + The minimum vertex cover in `G`. + + Raises + ------ + AmbiguousSolution + Raised if the input bipartite graph is disconnected and no container + with all nodes in one bipartite set is provided. When determining + the nodes in each bipartite set more than one valid solution is + possible if the input graph is disconnected. + + Notes + ----- + This function is implemented using the procedure guaranteed by `Konig's + theorem + `_, + which proves an equivalence between a maximum matching and a minimum vertex + cover in bipartite graphs. + + Since a minimum vertex cover is the complement of a maximum independent set + for any graph, one can compute the maximum independent set of a bipartite + graph this way: + + >>> G = nx.complete_bipartite_graph(2, 3) + >>> matching = nx.bipartite.maximum_matching(G) + >>> vertex_cover = nx.bipartite.to_vertex_cover(G, matching) + >>> independent_set = set(G) - vertex_cover + >>> print(list(independent_set)) + [2, 3, 4] + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + """ + # This is a Python implementation of the algorithm described at + # . + L, R = bipartite_sets(G, top_nodes) + # Let U be the set of unmatched vertices in the left vertex set. + unmatched_vertices = set(G) - set(matching) + U = unmatched_vertices & L + # Let Z be the set of vertices that are either in U or are connected to U + # by alternating paths. + Z = _connected_by_alternating_paths(G, matching, U) + # At this point, every edge either has a right endpoint in Z or a left + # endpoint not in Z. This gives us the vertex cover. + return (L - Z) | (R & Z) + + +#: Returns the maximum cardinality matching in the given bipartite graph. +#: +#: This function is simply an alias for :func:`hopcroft_karp_matching`. +maximum_matching = hopcroft_karp_matching + + +@nx._dispatch(edge_attrs="weight") +def minimum_weight_full_matching(G, top_nodes=None, weight="weight"): + r"""Returns a minimum weight full matching of the bipartite graph `G`. + + Let :math:`G = ((U, V), E)` be a weighted bipartite graph with real weights + :math:`w : E \to \mathbb{R}`. This function then produces a matching + :math:`M \subseteq E` with cardinality + + .. math:: + \lvert M \rvert = \min(\lvert U \rvert, \lvert V \rvert), + + which minimizes the sum of the weights of the edges included in the + matching, :math:`\sum_{e \in M} w(e)`, or raises an error if no such + matching exists. + + When :math:`\lvert U \rvert = \lvert V \rvert`, this is commonly + referred to as a perfect matching; here, since we allow + :math:`\lvert U \rvert` and :math:`\lvert V \rvert` to differ, we + follow Karp [1]_ and refer to the matching as *full*. + + Parameters + ---------- + G : NetworkX graph + + Undirected bipartite graph + + top_nodes : container + + Container with all nodes in one bipartite node set. If not supplied + it will be computed. + + weight : string, optional (default='weight') + + The edge data key used to provide each value in the matrix. + If None, then each edge has weight 1. + + Returns + ------- + matches : dictionary + + The matching is returned as a dictionary, `matches`, such that + ``matches[v] == w`` if node `v` is matched to node `w`. Unmatched + nodes do not occur as a key in `matches`. + + Raises + ------ + ValueError + Raised if no full matching exists. + + ImportError + Raised if SciPy is not available. + + Notes + ----- + The problem of determining a minimum weight full matching is also known as + the rectangular linear assignment problem. This implementation defers the + calculation of the assignment to SciPy. + + References + ---------- + .. [1] Richard Manning Karp: + An algorithm to Solve the m x n Assignment Problem in Expected Time + O(mn log n). + Networks, 10(2):143–152, 1980. + + """ + import numpy as np + import scipy as sp + + left, right = nx.bipartite.sets(G, top_nodes) + U = list(left) + V = list(right) + # We explicitly create the biadjacency matrix having infinities + # where edges are missing (as opposed to zeros, which is what one would + # get by using toarray on the sparse matrix). + weights_sparse = biadjacency_matrix( + G, row_order=U, column_order=V, weight=weight, format="coo" + ) + weights = np.full(weights_sparse.shape, np.inf) + weights[weights_sparse.row, weights_sparse.col] = weights_sparse.data + left_matches = sp.optimize.linear_sum_assignment(weights) + d = {U[u]: V[v] for u, v in zip(*left_matches)} + # d will contain the matching from edges in left to right; we need to + # add the ones from right to left as well. + d.update({v: u for u, v in d.items()}) + return d diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/matrix.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..e567967757451d8fa87b3736a8e95b9c59ce17ca --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/matrix.py @@ -0,0 +1,167 @@ +""" +==================== +Biadjacency matrices +==================== +""" +import itertools + +import networkx as nx +from networkx.convert_matrix import _generate_weighted_edges + +__all__ = ["biadjacency_matrix", "from_biadjacency_matrix"] + + +@nx._dispatch(edge_attrs="weight") +def biadjacency_matrix( + G, row_order, column_order=None, dtype=None, weight="weight", format="csr" +): + r"""Returns the biadjacency matrix of the bipartite graph G. + + Let `G = (U, V, E)` be a bipartite graph with node sets + `U = u_{1},...,u_{r}` and `V = v_{1},...,v_{s}`. The biadjacency + matrix [1]_ is the `r` x `s` matrix `B` in which `b_{i,j} = 1` + if, and only if, `(u_i, v_j) \in E`. If the parameter `weight` is + not `None` and matches the name of an edge attribute, its value is + used instead of 1. + + Parameters + ---------- + G : graph + A NetworkX graph + + row_order : list of nodes + The rows of the matrix are ordered according to the list of nodes. + + column_order : list, optional + The columns of the matrix are ordered according to the list of nodes. + If column_order is None, then the ordering of columns is arbitrary. + + dtype : NumPy data-type, optional + A valid NumPy dtype used to initialize the array. If None, then the + NumPy default is used. + + weight : string or None, optional (default='weight') + The edge data key used to provide each value in the matrix. + If None, then each edge has weight 1. + + format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'} + The type of the matrix to be returned (default 'csr'). For + some algorithms different implementations of sparse matrices + can perform better. See [2]_ for details. + + Returns + ------- + M : SciPy sparse array + Biadjacency matrix representation of the bipartite graph G. + + Notes + ----- + No attempt is made to check that the input graph is bipartite. + + For directed bipartite graphs only successors are considered as neighbors. + To obtain an adjacency matrix with ones (or weight values) for both + predecessors and successors you have to generate two biadjacency matrices + where the rows of one of them are the columns of the other, and then add + one to the transpose of the other. + + See Also + -------- + adjacency_matrix + from_biadjacency_matrix + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Adjacency_matrix#Adjacency_matrix_of_a_bipartite_graph + .. [2] Scipy Dev. References, "Sparse Matrices", + https://docs.scipy.org/doc/scipy/reference/sparse.html + """ + import scipy as sp + + nlen = len(row_order) + if nlen == 0: + raise nx.NetworkXError("row_order is empty list") + if len(row_order) != len(set(row_order)): + msg = "Ambiguous ordering: `row_order` contained duplicates." + raise nx.NetworkXError(msg) + if column_order is None: + column_order = list(set(G) - set(row_order)) + mlen = len(column_order) + if len(column_order) != len(set(column_order)): + msg = "Ambiguous ordering: `column_order` contained duplicates." + raise nx.NetworkXError(msg) + + row_index = dict(zip(row_order, itertools.count())) + col_index = dict(zip(column_order, itertools.count())) + + if G.number_of_edges() == 0: + row, col, data = [], [], [] + else: + row, col, data = zip( + *( + (row_index[u], col_index[v], d.get(weight, 1)) + for u, v, d in G.edges(row_order, data=True) + if u in row_index and v in col_index + ) + ) + A = sp.sparse.coo_array((data, (row, col)), shape=(nlen, mlen), dtype=dtype) + try: + return A.asformat(format) + except ValueError as err: + raise nx.NetworkXError(f"Unknown sparse array format: {format}") from err + + +@nx._dispatch(graphs=None) +def from_biadjacency_matrix(A, create_using=None, edge_attribute="weight"): + r"""Creates a new bipartite graph from a biadjacency matrix given as a + SciPy sparse array. + + Parameters + ---------- + A: scipy sparse array + A biadjacency matrix representation of a graph + + create_using: NetworkX graph + Use specified graph for result. The default is Graph() + + edge_attribute: string + Name of edge attribute to store matrix numeric value. The data will + have the same type as the matrix entry (int, float, (real,imag)). + + Notes + ----- + The nodes are labeled with the attribute `bipartite` set to an integer + 0 or 1 representing membership in part 0 or part 1 of the bipartite graph. + + If `create_using` is an instance of :class:`networkx.MultiGraph` or + :class:`networkx.MultiDiGraph` and the entries of `A` are of + type :class:`int`, then this function returns a multigraph (of the same + type as `create_using`) with parallel edges. In this case, `edge_attribute` + will be ignored. + + See Also + -------- + biadjacency_matrix + from_numpy_array + + References + ---------- + [1] https://en.wikipedia.org/wiki/Adjacency_matrix#Adjacency_matrix_of_a_bipartite_graph + """ + G = nx.empty_graph(0, create_using) + n, m = A.shape + # Make sure we get even the isolated nodes of the graph. + G.add_nodes_from(range(n), bipartite=0) + G.add_nodes_from(range(n, n + m), bipartite=1) + # Create an iterable over (u, v, w) triples and for each triple, add an + # edge from u to v with weight w. + triples = ((u, n + v, d) for (u, v, d) in _generate_weighted_edges(A)) + # If the entries in the adjacency matrix are integers and the graph is a + # multigraph, then create parallel edges, each with weight 1, for each + # entry in the adjacency matrix. Otherwise, create one edge for each + # positive entry in the adjacency matrix and set the weight of that edge to + # be the entry in the matrix. + if A.dtype.kind in ("i", "u") and G.is_multigraph(): + chain = itertools.chain.from_iterable + triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples) + G.add_weighted_edges_from(triples, weight=edge_attribute) + return G diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/projection.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/projection.py new file mode 100644 index 0000000000000000000000000000000000000000..57f960e13b3befbb575b7b318883cc81aafbecd8 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/projection.py @@ -0,0 +1,528 @@ +"""One-mode (unipartite) projections of bipartite graphs.""" +import networkx as nx +from networkx.exception import NetworkXAlgorithmError +from networkx.utils import not_implemented_for + +__all__ = [ + "projected_graph", + "weighted_projected_graph", + "collaboration_weighted_projected_graph", + "overlap_weighted_projected_graph", + "generic_weighted_projected_graph", +] + + +@nx._dispatch(graphs="B", preserve_node_attrs=True, preserve_graph_attrs=True) +def projected_graph(B, nodes, multigraph=False): + r"""Returns the projection of B onto one of its node sets. + + Returns the graph G that is the projection of the bipartite graph B + onto the specified nodes. They retain their attributes and are connected + in G if they have a common neighbor in B. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Nodes to project onto (the "bottom" nodes). + + multigraph: bool (default=False) + If True return a multigraph where the multiple edges represent multiple + shared neighbors. They edge key in the multigraph is assigned to the + label of the neighbor. + + Returns + ------- + Graph : NetworkX graph or multigraph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> B = nx.path_graph(4) + >>> G = bipartite.projected_graph(B, [1, 3]) + >>> list(G) + [1, 3] + >>> list(G.edges()) + [(1, 3)] + + If nodes `a`, and `b` are connected through both nodes 1 and 2 then + building a multigraph results in two edges in the projection onto + [`a`, `b`]: + + >>> B = nx.Graph() + >>> B.add_edges_from([("a", 1), ("b", 1), ("a", 2), ("b", 2)]) + >>> G = bipartite.projected_graph(B, ["a", "b"], multigraph=True) + >>> print([sorted((u, v)) for u, v in G.edges()]) + [['a', 'b'], ['a', 'b']] + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite. + Returns a simple graph that is the projection of the bipartite graph B + onto the set of nodes given in list nodes. If multigraph=True then + a multigraph is returned with an edge for every shared neighbor. + + Directed graphs are allowed as input. The output will also then + be a directed graph with edges if there is a directed path between + the nodes. + + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + weighted_projected_graph, + collaboration_weighted_projected_graph, + overlap_weighted_projected_graph, + generic_weighted_projected_graph + """ + if B.is_multigraph(): + raise nx.NetworkXError("not defined for multigraphs") + if B.is_directed(): + directed = True + if multigraph: + G = nx.MultiDiGraph() + else: + G = nx.DiGraph() + else: + directed = False + if multigraph: + G = nx.MultiGraph() + else: + G = nx.Graph() + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + for u in nodes: + nbrs2 = {v for nbr in B[u] for v in B[nbr] if v != u} + if multigraph: + for n in nbrs2: + if directed: + links = set(B[u]) & set(B.pred[n]) + else: + links = set(B[u]) & set(B[n]) + for l in links: + if not G.has_edge(u, n, l): + G.add_edge(u, n, key=l) + else: + G.add_edges_from((u, n) for n in nbrs2) + return G + + +@not_implemented_for("multigraph") +@nx._dispatch(graphs="B") +def weighted_projected_graph(B, nodes, ratio=False): + r"""Returns a weighted projection of B onto one of its node sets. + + The weighted projected graph is the projection of the bipartite + network B onto the specified nodes with weights representing the + number of shared neighbors or the ratio between actual shared + neighbors and possible shared neighbors if ``ratio is True`` [1]_. + The nodes retain their attributes and are connected in the resulting + graph if they have an edge to a common node in the original graph. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Distinct nodes to project onto (the "bottom" nodes). + + ratio: Bool (default=False) + If True, edge weight is the ratio between actual shared neighbors + and maximum possible shared neighbors (i.e., the size of the other + node set). If False, edges weight is the number of shared neighbors. + + Returns + ------- + Graph : NetworkX graph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> B = nx.path_graph(4) + >>> G = bipartite.weighted_projected_graph(B, [1, 3]) + >>> list(G) + [1, 3] + >>> list(G.edges(data=True)) + [(1, 3, {'weight': 1})] + >>> G = bipartite.weighted_projected_graph(B, [1, 3], ratio=True) + >>> list(G.edges(data=True)) + [(1, 3, {'weight': 0.5})] + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite, or that + the input nodes are distinct. However, if the length of the input nodes is + greater than or equal to the nodes in the graph B, an exception is raised. + If the nodes are not distinct but don't raise this error, the output weights + will be incorrect. + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + collaboration_weighted_projected_graph, + overlap_weighted_projected_graph, + generic_weighted_projected_graph + projected_graph + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation + Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + """ + if B.is_directed(): + pred = B.pred + G = nx.DiGraph() + else: + pred = B.adj + G = nx.Graph() + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + n_top = len(B) - len(nodes) + + if n_top < 1: + raise NetworkXAlgorithmError( + f"the size of the nodes to project onto ({len(nodes)}) is >= the graph size ({len(B)}).\n" + "They are either not a valid bipartite partition or contain duplicates" + ) + + for u in nodes: + unbrs = set(B[u]) + nbrs2 = {n for nbr in unbrs for n in B[nbr]} - {u} + for v in nbrs2: + vnbrs = set(pred[v]) + common = unbrs & vnbrs + if not ratio: + weight = len(common) + else: + weight = len(common) / n_top + G.add_edge(u, v, weight=weight) + return G + + +@not_implemented_for("multigraph") +@nx._dispatch(graphs="B") +def collaboration_weighted_projected_graph(B, nodes): + r"""Newman's weighted projection of B onto one of its node sets. + + The collaboration weighted projection is the projection of the + bipartite network B onto the specified nodes with weights assigned + using Newman's collaboration model [1]_: + + .. math:: + + w_{u, v} = \sum_k \frac{\delta_{u}^{k} \delta_{v}^{k}}{d_k - 1} + + where `u` and `v` are nodes from the bottom bipartite node set, + and `k` is a node of the top node set. + The value `d_k` is the degree of node `k` in the bipartite + network and `\delta_{u}^{k}` is 1 if node `u` is + linked to node `k` in the original bipartite graph or 0 otherwise. + + The nodes retain their attributes and are connected in the resulting + graph if have an edge to a common node in the original bipartite + graph. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Nodes to project onto (the "bottom" nodes). + + Returns + ------- + Graph : NetworkX graph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> B = nx.path_graph(5) + >>> B.add_edge(1, 5) + >>> G = bipartite.collaboration_weighted_projected_graph(B, [0, 2, 4, 5]) + >>> list(G) + [0, 2, 4, 5] + >>> for edge in sorted(G.edges(data=True)): + ... print(edge) + ... + (0, 2, {'weight': 0.5}) + (0, 5, {'weight': 0.5}) + (2, 4, {'weight': 1.0}) + (2, 5, {'weight': 0.5}) + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite. + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + weighted_projected_graph, + overlap_weighted_projected_graph, + generic_weighted_projected_graph, + projected_graph + + References + ---------- + .. [1] Scientific collaboration networks: II. + Shortest paths, weighted networks, and centrality, + M. E. J. Newman, Phys. Rev. E 64, 016132 (2001). + """ + if B.is_directed(): + pred = B.pred + G = nx.DiGraph() + else: + pred = B.adj + G = nx.Graph() + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + for u in nodes: + unbrs = set(B[u]) + nbrs2 = {n for nbr in unbrs for n in B[nbr] if n != u} + for v in nbrs2: + vnbrs = set(pred[v]) + common_degree = (len(B[n]) for n in unbrs & vnbrs) + weight = sum(1.0 / (deg - 1) for deg in common_degree if deg > 1) + G.add_edge(u, v, weight=weight) + return G + + +@not_implemented_for("multigraph") +@nx._dispatch(graphs="B") +def overlap_weighted_projected_graph(B, nodes, jaccard=True): + r"""Overlap weighted projection of B onto one of its node sets. + + The overlap weighted projection is the projection of the bipartite + network B onto the specified nodes with weights representing + the Jaccard index between the neighborhoods of the two nodes in the + original bipartite network [1]_: + + .. math:: + + w_{v, u} = \frac{|N(u) \cap N(v)|}{|N(u) \cup N(v)|} + + or if the parameter 'jaccard' is False, the fraction of common + neighbors by minimum of both nodes degree in the original + bipartite graph [1]_: + + .. math:: + + w_{v, u} = \frac{|N(u) \cap N(v)|}{min(|N(u)|, |N(v)|)} + + The nodes retain their attributes and are connected in the resulting + graph if have an edge to a common node in the original bipartite graph. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Nodes to project onto (the "bottom" nodes). + + jaccard: Bool (default=True) + + Returns + ------- + Graph : NetworkX graph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> B = nx.path_graph(5) + >>> nodes = [0, 2, 4] + >>> G = bipartite.overlap_weighted_projected_graph(B, nodes) + >>> list(G) + [0, 2, 4] + >>> list(G.edges(data=True)) + [(0, 2, {'weight': 0.5}), (2, 4, {'weight': 0.5})] + >>> G = bipartite.overlap_weighted_projected_graph(B, nodes, jaccard=False) + >>> list(G.edges(data=True)) + [(0, 2, {'weight': 1.0}), (2, 4, {'weight': 1.0})] + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite. + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + weighted_projected_graph, + collaboration_weighted_projected_graph, + generic_weighted_projected_graph, + projected_graph + + References + ---------- + .. [1] Borgatti, S.P. and Halgin, D. In press. Analyzing Affiliation + Networks. In Carrington, P. and Scott, J. (eds) The Sage Handbook + of Social Network Analysis. Sage Publications. + + """ + if B.is_directed(): + pred = B.pred + G = nx.DiGraph() + else: + pred = B.adj + G = nx.Graph() + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + for u in nodes: + unbrs = set(B[u]) + nbrs2 = {n for nbr in unbrs for n in B[nbr]} - {u} + for v in nbrs2: + vnbrs = set(pred[v]) + if jaccard: + wt = len(unbrs & vnbrs) / len(unbrs | vnbrs) + else: + wt = len(unbrs & vnbrs) / min(len(unbrs), len(vnbrs)) + G.add_edge(u, v, weight=wt) + return G + + +@not_implemented_for("multigraph") +@nx._dispatch(graphs="B", preserve_all_attrs=True) +def generic_weighted_projected_graph(B, nodes, weight_function=None): + r"""Weighted projection of B with a user-specified weight function. + + The bipartite network B is projected on to the specified nodes + with weights computed by a user-specified function. This function + must accept as a parameter the neighborhood sets of two nodes and + return an integer or a float. + + The nodes retain their attributes and are connected in the resulting graph + if they have an edge to a common node in the original graph. + + Parameters + ---------- + B : NetworkX graph + The input graph should be bipartite. + + nodes : list or iterable + Nodes to project onto (the "bottom" nodes). + + weight_function : function + This function must accept as parameters the same input graph + that this function, and two nodes; and return an integer or a float. + The default function computes the number of shared neighbors. + + Returns + ------- + Graph : NetworkX graph + A graph that is the projection onto the given nodes. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> # Define some custom weight functions + >>> def jaccard(G, u, v): + ... unbrs = set(G[u]) + ... vnbrs = set(G[v]) + ... return float(len(unbrs & vnbrs)) / len(unbrs | vnbrs) + ... + >>> def my_weight(G, u, v, weight="weight"): + ... w = 0 + ... for nbr in set(G[u]) & set(G[v]): + ... w += G[u][nbr].get(weight, 1) + G[v][nbr].get(weight, 1) + ... return w + ... + >>> # A complete bipartite graph with 4 nodes and 4 edges + >>> B = nx.complete_bipartite_graph(2, 2) + >>> # Add some arbitrary weight to the edges + >>> for i, (u, v) in enumerate(B.edges()): + ... B.edges[u, v]["weight"] = i + 1 + ... + >>> for edge in B.edges(data=True): + ... print(edge) + ... + (0, 2, {'weight': 1}) + (0, 3, {'weight': 2}) + (1, 2, {'weight': 3}) + (1, 3, {'weight': 4}) + >>> # By default, the weight is the number of shared neighbors + >>> G = bipartite.generic_weighted_projected_graph(B, [0, 1]) + >>> print(list(G.edges(data=True))) + [(0, 1, {'weight': 2})] + >>> # To specify a custom weight function use the weight_function parameter + >>> G = bipartite.generic_weighted_projected_graph( + ... B, [0, 1], weight_function=jaccard + ... ) + >>> print(list(G.edges(data=True))) + [(0, 1, {'weight': 1.0})] + >>> G = bipartite.generic_weighted_projected_graph( + ... B, [0, 1], weight_function=my_weight + ... ) + >>> print(list(G.edges(data=True))) + [(0, 1, {'weight': 10})] + + Notes + ----- + No attempt is made to verify that the input graph B is bipartite. + The graph and node properties are (shallow) copied to the projected graph. + + See :mod:`bipartite documentation ` + for further details on how bipartite graphs are handled in NetworkX. + + See Also + -------- + is_bipartite, + is_bipartite_node_set, + sets, + weighted_projected_graph, + collaboration_weighted_projected_graph, + overlap_weighted_projected_graph, + projected_graph + + """ + if B.is_directed(): + pred = B.pred + G = nx.DiGraph() + else: + pred = B.adj + G = nx.Graph() + if weight_function is None: + + def weight_function(G, u, v): + # Notice that we use set(pred[v]) for handling the directed case. + return len(set(G[u]) & set(pred[v])) + + G.graph.update(B.graph) + G.add_nodes_from((n, B.nodes[n]) for n in nodes) + for u in nodes: + nbrs2 = {n for nbr in set(B[u]) for n in B[nbr]} - {u} + for v in nbrs2: + weight = weight_function(B, u, v) + G.add_edge(u, v, weight=weight) + return G diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/redundancy.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/redundancy.py new file mode 100644 index 0000000000000000000000000000000000000000..04b3ae9ca756facd5854caac37cf7fcf2ec6d431 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/redundancy.py @@ -0,0 +1,111 @@ +"""Node redundancy for bipartite graphs.""" +from itertools import combinations + +import networkx as nx +from networkx import NetworkXError + +__all__ = ["node_redundancy"] + + +@nx._dispatch +def node_redundancy(G, nodes=None): + r"""Computes the node redundancy coefficients for the nodes in the bipartite + graph `G`. + + The redundancy coefficient of a node `v` is the fraction of pairs of + neighbors of `v` that are both linked to other nodes. In a one-mode + projection these nodes would be linked together even if `v` were + not there. + + More formally, for any vertex `v`, the *redundancy coefficient of `v`* is + defined by + + .. math:: + + rc(v) = \frac{|\{\{u, w\} \subseteq N(v), + \: \exists v' \neq v,\: (v',u) \in E\: + \mathrm{and}\: (v',w) \in E\}|}{ \frac{|N(v)|(|N(v)|-1)}{2}}, + + where `N(v)` is the set of neighbors of `v` in `G`. + + Parameters + ---------- + G : graph + A bipartite graph + + nodes : list or iterable (optional) + Compute redundancy for these nodes. The default is all nodes in G. + + Returns + ------- + redundancy : dictionary + A dictionary keyed by node with the node redundancy value. + + Examples + -------- + Compute the redundancy coefficient of each node in a graph:: + + >>> from networkx.algorithms import bipartite + >>> G = nx.cycle_graph(4) + >>> rc = bipartite.node_redundancy(G) + >>> rc[0] + 1.0 + + Compute the average redundancy for the graph:: + + >>> from networkx.algorithms import bipartite + >>> G = nx.cycle_graph(4) + >>> rc = bipartite.node_redundancy(G) + >>> sum(rc.values()) / len(G) + 1.0 + + Compute the average redundancy for a set of nodes:: + + >>> from networkx.algorithms import bipartite + >>> G = nx.cycle_graph(4) + >>> rc = bipartite.node_redundancy(G) + >>> nodes = [0, 2] + >>> sum(rc[n] for n in nodes) / len(nodes) + 1.0 + + Raises + ------ + NetworkXError + If any of the nodes in the graph (or in `nodes`, if specified) has + (out-)degree less than two (which would result in division by zero, + according to the definition of the redundancy coefficient). + + References + ---------- + .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008). + Basic notions for the analysis of large two-mode networks. + Social Networks 30(1), 31--48. + + """ + if nodes is None: + nodes = G + if any(len(G[v]) < 2 for v in nodes): + raise NetworkXError( + "Cannot compute redundancy coefficient for a node" + " that has fewer than two neighbors." + ) + # TODO This can be trivially parallelized. + return {v: _node_redundancy(G, v) for v in nodes} + + +def _node_redundancy(G, v): + """Returns the redundancy of the node `v` in the bipartite graph `G`. + + If `G` is a graph with `n` nodes, the redundancy of a node is the ratio + of the "overlap" of `v` to the maximum possible overlap of `v` + according to its degree. The overlap of `v` is the number of pairs of + neighbors that have mutual neighbors themselves, other than `v`. + + `v` must have at least two neighbors in `G`. + + """ + n = len(G[v]) + overlap = sum( + 1 for (u, w) in combinations(G[v], 2) if (set(G[u]) & set(G[w])) - {v} + ) + return (2 * overlap) / (n * (n - 1)) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/spectral.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/spectral.py new file mode 100644 index 0000000000000000000000000000000000000000..f4b414243ac8faa2cb10f17a1e93f894b4202034 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/spectral.py @@ -0,0 +1,68 @@ +""" +Spectral bipartivity measure. +""" +import networkx as nx + +__all__ = ["spectral_bipartivity"] + + +@nx._dispatch(edge_attrs="weight") +def spectral_bipartivity(G, nodes=None, weight="weight"): + """Returns the spectral bipartivity. + + Parameters + ---------- + G : NetworkX graph + + nodes : list or container optional(default is all nodes) + Nodes to return value of spectral bipartivity contribution. + + weight : string or None optional (default = 'weight') + Edge data key to use for edge weights. If None, weights set to 1. + + Returns + ------- + sb : float or dict + A single number if the keyword nodes is not specified, or + a dictionary keyed by node with the spectral bipartivity contribution + of that node as the value. + + Examples + -------- + >>> from networkx.algorithms import bipartite + >>> G = nx.path_graph(4) + >>> bipartite.spectral_bipartivity(G) + 1.0 + + Notes + ----- + This implementation uses Numpy (dense) matrices which are not efficient + for storing large sparse graphs. + + See Also + -------- + color + + References + ---------- + .. [1] E. Estrada and J. A. Rodríguez-Velázquez, "Spectral measures of + bipartivity in complex networks", PhysRev E 72, 046105 (2005) + """ + import scipy as sp + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist, weight=weight) + expA = sp.linalg.expm(A) + expmA = sp.linalg.expm(-A) + coshA = 0.5 * (expA + expmA) + if nodes is None: + # return single number for entire graph + return coshA.diagonal().sum() / expA.diagonal().sum() + else: + # contribution for individual nodes + index = dict(zip(nodelist, range(len(nodelist)))) + sb = {} + for n in nodes: + i = index[n] + sb[n] = coshA[i, i] / expA[i, i] + return sb diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0ab05775f486fc4cc32fa5b4775993fd11a9a3c Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_basic.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_basic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8a88e7c042853100163878f9e990d1f1408d304 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_basic.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29d522f5522144f99364f90b9140a6c5668330ec Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_cluster.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_cluster.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9aa24e6226ace4268bbea795164cf24e387b4384 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_cluster.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_covering.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_covering.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b09e9e03cc428be43b7a7c48219d5e2a02984b6 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_covering.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_edgelist.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_edgelist.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c5c0fd33137c9b9ad002fb40416a1cf09707eba Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_edgelist.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a488b202146ef32de4250d13d8b0de021a756ca7 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0942b2798ae04cbee477aa5be373e556be19479e Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8af7ab218f268c3b5ded599281aa496b4c08aa4 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matrix.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matrix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8600e6203401043c10af71316b8cd1007fc59e9f Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matrix.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_project.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_project.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e457e37fb1dae9c8c214be3872335ec95a44d5a2 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_project.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f53132b045b78b1dc93a64fb9c41e5e13690b4b1 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9dfa44fe9b5177a8c62740223ca9c4ebbbaf1876 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_basic.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..655506b4f74110b57cb37db277e2be50bb0be8f4 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_basic.py @@ -0,0 +1,125 @@ +import pytest + +import networkx as nx +from networkx.algorithms import bipartite + + +class TestBipartiteBasic: + def test_is_bipartite(self): + assert bipartite.is_bipartite(nx.path_graph(4)) + assert bipartite.is_bipartite(nx.DiGraph([(1, 0)])) + assert not bipartite.is_bipartite(nx.complete_graph(3)) + + def test_bipartite_color(self): + G = nx.path_graph(4) + c = bipartite.color(G) + assert c == {0: 1, 1: 0, 2: 1, 3: 0} + + def test_not_bipartite_color(self): + with pytest.raises(nx.NetworkXError): + c = bipartite.color(nx.complete_graph(4)) + + def test_bipartite_directed(self): + G = bipartite.random_graph(10, 10, 0.1, directed=True) + assert bipartite.is_bipartite(G) + + def test_bipartite_sets(self): + G = nx.path_graph(4) + X, Y = bipartite.sets(G) + assert X == {0, 2} + assert Y == {1, 3} + + def test_bipartite_sets_directed(self): + G = nx.path_graph(4) + D = G.to_directed() + X, Y = bipartite.sets(D) + assert X == {0, 2} + assert Y == {1, 3} + + def test_bipartite_sets_given_top_nodes(self): + G = nx.path_graph(4) + top_nodes = [0, 2] + X, Y = bipartite.sets(G, top_nodes) + assert X == {0, 2} + assert Y == {1, 3} + + def test_bipartite_sets_disconnected(self): + with pytest.raises(nx.AmbiguousSolution): + G = nx.path_graph(4) + G.add_edges_from([(5, 6), (6, 7)]) + X, Y = bipartite.sets(G) + + def test_is_bipartite_node_set(self): + G = nx.path_graph(4) + + with pytest.raises(nx.AmbiguousSolution): + bipartite.is_bipartite_node_set(G, [1, 1, 2, 3]) + + assert bipartite.is_bipartite_node_set(G, [0, 2]) + assert bipartite.is_bipartite_node_set(G, [1, 3]) + assert not bipartite.is_bipartite_node_set(G, [1, 2]) + G.add_edge(10, 20) + assert bipartite.is_bipartite_node_set(G, [0, 2, 10]) + assert bipartite.is_bipartite_node_set(G, [0, 2, 20]) + assert bipartite.is_bipartite_node_set(G, [1, 3, 10]) + assert bipartite.is_bipartite_node_set(G, [1, 3, 20]) + + def test_bipartite_density(self): + G = nx.path_graph(5) + X, Y = bipartite.sets(G) + density = len(list(G.edges())) / (len(X) * len(Y)) + assert bipartite.density(G, X) == density + D = nx.DiGraph(G.edges()) + assert bipartite.density(D, X) == density / 2.0 + assert bipartite.density(nx.Graph(), {}) == 0.0 + + def test_bipartite_degrees(self): + G = nx.path_graph(5) + X = {1, 3} + Y = {0, 2, 4} + u, d = bipartite.degrees(G, Y) + assert dict(u) == {1: 2, 3: 2} + assert dict(d) == {0: 1, 2: 2, 4: 1} + + def test_bipartite_weighted_degrees(self): + G = nx.path_graph(5) + G.add_edge(0, 1, weight=0.1, other=0.2) + X = {1, 3} + Y = {0, 2, 4} + u, d = bipartite.degrees(G, Y, weight="weight") + assert dict(u) == {1: 1.1, 3: 2} + assert dict(d) == {0: 0.1, 2: 2, 4: 1} + u, d = bipartite.degrees(G, Y, weight="other") + assert dict(u) == {1: 1.2, 3: 2} + assert dict(d) == {0: 0.2, 2: 2, 4: 1} + + def test_biadjacency_matrix_weight(self): + pytest.importorskip("scipy") + G = nx.path_graph(5) + G.add_edge(0, 1, weight=2, other=4) + X = [1, 3] + Y = [0, 2, 4] + M = bipartite.biadjacency_matrix(G, X, weight="weight") + assert M[0, 0] == 2 + M = bipartite.biadjacency_matrix(G, X, weight="other") + assert M[0, 0] == 4 + + def test_biadjacency_matrix(self): + pytest.importorskip("scipy") + tops = [2, 5, 10] + bots = [5, 10, 15] + for i in range(len(tops)): + G = bipartite.random_graph(tops[i], bots[i], 0.2) + top = [n for n, d in G.nodes(data=True) if d["bipartite"] == 0] + M = bipartite.biadjacency_matrix(G, top) + assert M.shape[0] == tops[i] + assert M.shape[1] == bots[i] + + def test_biadjacency_matrix_order(self): + pytest.importorskip("scipy") + G = nx.path_graph(5) + G.add_edge(0, 1, weight=2) + X = [3, 1] + Y = [4, 2, 0] + M = bipartite.biadjacency_matrix(G, X, Y, weight="weight") + assert M[1, 2] == 2 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_centrality.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..19fb5d117be94c688616a394ea3322e93bfa3e00 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_centrality.py @@ -0,0 +1,192 @@ +import pytest + +import networkx as nx +from networkx.algorithms import bipartite + + +class TestBipartiteCentrality: + @classmethod + def setup_class(cls): + cls.P4 = nx.path_graph(4) + cls.K3 = nx.complete_bipartite_graph(3, 3) + cls.C4 = nx.cycle_graph(4) + cls.davis = nx.davis_southern_women_graph() + cls.top_nodes = [ + n for n, d in cls.davis.nodes(data=True) if d["bipartite"] == 0 + ] + + def test_degree_centrality(self): + d = bipartite.degree_centrality(self.P4, [1, 3]) + answer = {0: 0.5, 1: 1.0, 2: 1.0, 3: 0.5} + assert d == answer + d = bipartite.degree_centrality(self.K3, [0, 1, 2]) + answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0} + assert d == answer + d = bipartite.degree_centrality(self.C4, [0, 2]) + answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0} + assert d == answer + + def test_betweenness_centrality(self): + c = bipartite.betweenness_centrality(self.P4, [1, 3]) + answer = {0: 0.0, 1: 1.0, 2: 1.0, 3: 0.0} + assert c == answer + c = bipartite.betweenness_centrality(self.K3, [0, 1, 2]) + answer = {0: 0.125, 1: 0.125, 2: 0.125, 3: 0.125, 4: 0.125, 5: 0.125} + assert c == answer + c = bipartite.betweenness_centrality(self.C4, [0, 2]) + answer = {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25} + assert c == answer + + def test_closeness_centrality(self): + c = bipartite.closeness_centrality(self.P4, [1, 3]) + answer = {0: 2.0 / 3, 1: 1.0, 2: 1.0, 3: 2.0 / 3} + assert c == answer + c = bipartite.closeness_centrality(self.K3, [0, 1, 2]) + answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0} + assert c == answer + c = bipartite.closeness_centrality(self.C4, [0, 2]) + answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0} + assert c == answer + G = nx.Graph() + G.add_node(0) + G.add_node(1) + c = bipartite.closeness_centrality(G, [0]) + assert c == {0: 0.0, 1: 0.0} + c = bipartite.closeness_centrality(G, [1]) + assert c == {0: 0.0, 1: 0.0} + + def test_bipartite_closeness_centrality_unconnected(self): + G = nx.complete_bipartite_graph(3, 3) + G.add_edge(6, 7) + c = bipartite.closeness_centrality(G, [0, 2, 4, 6], normalized=False) + answer = { + 0: 10.0 / 7, + 2: 10.0 / 7, + 4: 10.0 / 7, + 6: 10.0, + 1: 10.0 / 7, + 3: 10.0 / 7, + 5: 10.0 / 7, + 7: 10.0, + } + assert c == answer + + def test_davis_degree_centrality(self): + G = self.davis + deg = bipartite.degree_centrality(G, self.top_nodes) + answer = { + "E8": 0.78, + "E9": 0.67, + "E7": 0.56, + "Nora Fayette": 0.57, + "Evelyn Jefferson": 0.57, + "Theresa Anderson": 0.57, + "E6": 0.44, + "Sylvia Avondale": 0.50, + "Laura Mandeville": 0.50, + "Brenda Rogers": 0.50, + "Katherina Rogers": 0.43, + "E5": 0.44, + "Helen Lloyd": 0.36, + "E3": 0.33, + "Ruth DeSand": 0.29, + "Verne Sanderson": 0.29, + "E12": 0.33, + "Myra Liddel": 0.29, + "E11": 0.22, + "Eleanor Nye": 0.29, + "Frances Anderson": 0.29, + "Pearl Oglethorpe": 0.21, + "E4": 0.22, + "Charlotte McDowd": 0.29, + "E10": 0.28, + "Olivia Carleton": 0.14, + "Flora Price": 0.14, + "E2": 0.17, + "E1": 0.17, + "Dorothy Murchison": 0.14, + "E13": 0.17, + "E14": 0.17, + } + for node, value in answer.items(): + assert value == pytest.approx(deg[node], abs=1e-2) + + def test_davis_betweenness_centrality(self): + G = self.davis + bet = bipartite.betweenness_centrality(G, self.top_nodes) + answer = { + "E8": 0.24, + "E9": 0.23, + "E7": 0.13, + "Nora Fayette": 0.11, + "Evelyn Jefferson": 0.10, + "Theresa Anderson": 0.09, + "E6": 0.07, + "Sylvia Avondale": 0.07, + "Laura Mandeville": 0.05, + "Brenda Rogers": 0.05, + "Katherina Rogers": 0.05, + "E5": 0.04, + "Helen Lloyd": 0.04, + "E3": 0.02, + "Ruth DeSand": 0.02, + "Verne Sanderson": 0.02, + "E12": 0.02, + "Myra Liddel": 0.02, + "E11": 0.02, + "Eleanor Nye": 0.01, + "Frances Anderson": 0.01, + "Pearl Oglethorpe": 0.01, + "E4": 0.01, + "Charlotte McDowd": 0.01, + "E10": 0.01, + "Olivia Carleton": 0.01, + "Flora Price": 0.01, + "E2": 0.00, + "E1": 0.00, + "Dorothy Murchison": 0.00, + "E13": 0.00, + "E14": 0.00, + } + for node, value in answer.items(): + assert value == pytest.approx(bet[node], abs=1e-2) + + def test_davis_closeness_centrality(self): + G = self.davis + clos = bipartite.closeness_centrality(G, self.top_nodes) + answer = { + "E8": 0.85, + "E9": 0.79, + "E7": 0.73, + "Nora Fayette": 0.80, + "Evelyn Jefferson": 0.80, + "Theresa Anderson": 0.80, + "E6": 0.69, + "Sylvia Avondale": 0.77, + "Laura Mandeville": 0.73, + "Brenda Rogers": 0.73, + "Katherina Rogers": 0.73, + "E5": 0.59, + "Helen Lloyd": 0.73, + "E3": 0.56, + "Ruth DeSand": 0.71, + "Verne Sanderson": 0.71, + "E12": 0.56, + "Myra Liddel": 0.69, + "E11": 0.54, + "Eleanor Nye": 0.67, + "Frances Anderson": 0.67, + "Pearl Oglethorpe": 0.67, + "E4": 0.54, + "Charlotte McDowd": 0.60, + "E10": 0.55, + "Olivia Carleton": 0.59, + "Flora Price": 0.59, + "E2": 0.52, + "E1": 0.52, + "Dorothy Murchison": 0.65, + "E13": 0.52, + "E14": 0.52, + } + for node, value in answer.items(): + assert value == pytest.approx(clos[node], abs=1e-2) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_cluster.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_cluster.py new file mode 100644 index 0000000000000000000000000000000000000000..72e2dbadd64e9e768d1541b2ce742c2b62278929 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_cluster.py @@ -0,0 +1,84 @@ +import pytest + +import networkx as nx +from networkx.algorithms import bipartite +from networkx.algorithms.bipartite.cluster import cc_dot, cc_max, cc_min + + +def test_pairwise_bipartite_cc_functions(): + # Test functions for different kinds of bipartite clustering coefficients + # between pairs of nodes using 3 example graphs from figure 5 p. 40 + # Latapy et al (2008) + G1 = nx.Graph([(0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (1, 5), (1, 6), (1, 7)]) + G2 = nx.Graph([(0, 2), (0, 3), (0, 4), (1, 3), (1, 4), (1, 5)]) + G3 = nx.Graph( + [(0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9)] + ) + result = { + 0: [1 / 3.0, 2 / 3.0, 2 / 5.0], + 1: [1 / 2.0, 2 / 3.0, 2 / 3.0], + 2: [2 / 8.0, 2 / 5.0, 2 / 5.0], + } + for i, G in enumerate([G1, G2, G3]): + assert bipartite.is_bipartite(G) + assert cc_dot(set(G[0]), set(G[1])) == result[i][0] + assert cc_min(set(G[0]), set(G[1])) == result[i][1] + assert cc_max(set(G[0]), set(G[1])) == result[i][2] + + +def test_star_graph(): + G = nx.star_graph(3) + # all modes are the same + answer = {0: 0, 1: 1, 2: 1, 3: 1} + assert bipartite.clustering(G, mode="dot") == answer + assert bipartite.clustering(G, mode="min") == answer + assert bipartite.clustering(G, mode="max") == answer + + +def test_not_bipartite(): + with pytest.raises(nx.NetworkXError): + bipartite.clustering(nx.complete_graph(4)) + + +def test_bad_mode(): + with pytest.raises(nx.NetworkXError): + bipartite.clustering(nx.path_graph(4), mode="foo") + + +def test_path_graph(): + G = nx.path_graph(4) + answer = {0: 0.5, 1: 0.5, 2: 0.5, 3: 0.5} + assert bipartite.clustering(G, mode="dot") == answer + assert bipartite.clustering(G, mode="max") == answer + answer = {0: 1, 1: 1, 2: 1, 3: 1} + assert bipartite.clustering(G, mode="min") == answer + + +def test_average_path_graph(): + G = nx.path_graph(4) + assert bipartite.average_clustering(G, mode="dot") == 0.5 + assert bipartite.average_clustering(G, mode="max") == 0.5 + assert bipartite.average_clustering(G, mode="min") == 1 + + +def test_ra_clustering_davis(): + G = nx.davis_southern_women_graph() + cc4 = round(bipartite.robins_alexander_clustering(G), 3) + assert cc4 == 0.468 + + +def test_ra_clustering_square(): + G = nx.path_graph(4) + G.add_edge(0, 3) + assert bipartite.robins_alexander_clustering(G) == 1.0 + + +def test_ra_clustering_zero(): + G = nx.Graph() + assert bipartite.robins_alexander_clustering(G) == 0 + G.add_nodes_from(range(4)) + assert bipartite.robins_alexander_clustering(G) == 0 + G.add_edges_from([(0, 1), (2, 3), (3, 4)]) + assert bipartite.robins_alexander_clustering(G) == 0 + G.add_edge(1, 2) + assert bipartite.robins_alexander_clustering(G) == 0 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_covering.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_covering.py new file mode 100644 index 0000000000000000000000000000000000000000..9507e13492acbe505aa3394a24dbc41c095a037c --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_covering.py @@ -0,0 +1,33 @@ +import networkx as nx +from networkx.algorithms import bipartite + + +class TestMinEdgeCover: + """Tests for :func:`networkx.algorithms.bipartite.min_edge_cover`""" + + def test_empty_graph(self): + G = nx.Graph() + assert bipartite.min_edge_cover(G) == set() + + def test_graph_single_edge(self): + G = nx.Graph() + G.add_edge(0, 1) + assert bipartite.min_edge_cover(G) == {(0, 1), (1, 0)} + + def test_bipartite_default(self): + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4], bipartite=0) + G.add_nodes_from(["a", "b", "c"], bipartite=1) + G.add_edges_from([(1, "a"), (1, "b"), (2, "b"), (2, "c"), (3, "c"), (4, "a")]) + min_cover = bipartite.min_edge_cover(G) + assert nx.is_edge_cover(G, min_cover) + assert len(min_cover) == 8 + + def test_bipartite_explicit(self): + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4], bipartite=0) + G.add_nodes_from(["a", "b", "c"], bipartite=1) + G.add_edges_from([(1, "a"), (1, "b"), (2, "b"), (2, "c"), (3, "c"), (4, "a")]) + min_cover = bipartite.min_edge_cover(G, bipartite.eppstein_matching) + assert nx.is_edge_cover(G, min_cover) + assert len(min_cover) == 8 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_edgelist.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_edgelist.py new file mode 100644 index 0000000000000000000000000000000000000000..b388465ef4ba7bea6761ae1622f4f8ce20ba2657 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_edgelist.py @@ -0,0 +1,229 @@ +""" + Unit tests for bipartite edgelists. +""" +import io +import os +import tempfile + +import pytest + +import networkx as nx +from networkx.algorithms import bipartite +from networkx.utils import edges_equal, graphs_equal, nodes_equal + + +class TestEdgelist: + @classmethod + def setup_class(cls): + cls.G = nx.Graph(name="test") + e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")] + cls.G.add_edges_from(e) + cls.G.add_nodes_from(["a", "c", "e"], bipartite=0) + cls.G.add_nodes_from(["b", "d", "f"], bipartite=1) + cls.G.add_node("g", bipartite=0) + cls.DG = nx.DiGraph(cls.G) + cls.MG = nx.MultiGraph() + cls.MG.add_edges_from([(1, 2), (1, 2), (1, 2)]) + cls.MG.add_node(1, bipartite=0) + cls.MG.add_node(2, bipartite=1) + + def test_read_edgelist_1(self): + s = b"""\ +# comment line +1 2 +# comment line +2 3 +""" + bytesIO = io.BytesIO(s) + G = bipartite.read_edgelist(bytesIO, nodetype=int) + assert edges_equal(G.edges(), [(1, 2), (2, 3)]) + + def test_read_edgelist_3(self): + s = b"""\ +# comment line +1 2 {'weight':2.0} +# comment line +2 3 {'weight':3.0} +""" + bytesIO = io.BytesIO(s) + G = bipartite.read_edgelist(bytesIO, nodetype=int, data=False) + assert edges_equal(G.edges(), [(1, 2), (2, 3)]) + + bytesIO = io.BytesIO(s) + G = bipartite.read_edgelist(bytesIO, nodetype=int, data=True) + assert edges_equal( + G.edges(data=True), [(1, 2, {"weight": 2.0}), (2, 3, {"weight": 3.0})] + ) + + def test_write_edgelist_1(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edges_from([(1, 2), (2, 3)]) + G.add_node(1, bipartite=0) + G.add_node(2, bipartite=1) + G.add_node(3, bipartite=0) + bipartite.write_edgelist(G, fh, data=False) + fh.seek(0) + assert fh.read() == b"1 2\n3 2\n" + + def test_write_edgelist_2(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edges_from([(1, 2), (2, 3)]) + G.add_node(1, bipartite=0) + G.add_node(2, bipartite=1) + G.add_node(3, bipartite=0) + bipartite.write_edgelist(G, fh, data=True) + fh.seek(0) + assert fh.read() == b"1 2 {}\n3 2 {}\n" + + def test_write_edgelist_3(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edge(1, 2, weight=2.0) + G.add_edge(2, 3, weight=3.0) + G.add_node(1, bipartite=0) + G.add_node(2, bipartite=1) + G.add_node(3, bipartite=0) + bipartite.write_edgelist(G, fh, data=True) + fh.seek(0) + assert fh.read() == b"1 2 {'weight': 2.0}\n3 2 {'weight': 3.0}\n" + + def test_write_edgelist_4(self): + fh = io.BytesIO() + G = nx.Graph() + G.add_edge(1, 2, weight=2.0) + G.add_edge(2, 3, weight=3.0) + G.add_node(1, bipartite=0) + G.add_node(2, bipartite=1) + G.add_node(3, bipartite=0) + bipartite.write_edgelist(G, fh, data=[("weight")]) + fh.seek(0) + assert fh.read() == b"1 2 2.0\n3 2 3.0\n" + + def test_unicode(self): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", **{name2: 3}) + G.add_node(name1, bipartite=0) + G.add_node("Radiohead", bipartite=1) + fd, fname = tempfile.mkstemp() + bipartite.write_edgelist(G, fname) + H = bipartite.read_edgelist(fname) + assert graphs_equal(G, H) + os.close(fd) + os.unlink(fname) + + def test_latin1_issue(self): + G = nx.Graph() + name1 = chr(2344) + chr(123) + chr(6543) + name2 = chr(5543) + chr(1543) + chr(324) + G.add_edge(name1, "Radiohead", **{name2: 3}) + G.add_node(name1, bipartite=0) + G.add_node("Radiohead", bipartite=1) + fd, fname = tempfile.mkstemp() + pytest.raises( + UnicodeEncodeError, bipartite.write_edgelist, G, fname, encoding="latin-1" + ) + os.close(fd) + os.unlink(fname) + + def test_latin1(self): + G = nx.Graph() + name1 = "Bj" + chr(246) + "rk" + name2 = chr(220) + "ber" + G.add_edge(name1, "Radiohead", **{name2: 3}) + G.add_node(name1, bipartite=0) + G.add_node("Radiohead", bipartite=1) + fd, fname = tempfile.mkstemp() + bipartite.write_edgelist(G, fname, encoding="latin-1") + H = bipartite.read_edgelist(fname, encoding="latin-1") + assert graphs_equal(G, H) + os.close(fd) + os.unlink(fname) + + def test_edgelist_graph(self): + G = self.G + (fd, fname) = tempfile.mkstemp() + bipartite.write_edgelist(G, fname) + H = bipartite.read_edgelist(fname) + H2 = bipartite.read_edgelist(fname) + assert H is not H2 # they should be different graphs + G.remove_node("g") # isolated nodes are not written in edgelist + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_edgelist_integers(self): + G = nx.convert_node_labels_to_integers(self.G) + (fd, fname) = tempfile.mkstemp() + bipartite.write_edgelist(G, fname) + H = bipartite.read_edgelist(fname, nodetype=int) + # isolated nodes are not written in edgelist + G.remove_nodes_from(list(nx.isolates(G))) + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_edgelist_multigraph(self): + G = self.MG + (fd, fname) = tempfile.mkstemp() + bipartite.write_edgelist(G, fname) + H = bipartite.read_edgelist(fname, nodetype=int, create_using=nx.MultiGraph()) + H2 = bipartite.read_edgelist(fname, nodetype=int, create_using=nx.MultiGraph()) + assert H is not H2 # they should be different graphs + assert nodes_equal(list(H), list(G)) + assert edges_equal(list(H.edges()), list(G.edges())) + os.close(fd) + os.unlink(fname) + + def test_empty_digraph(self): + with pytest.raises(nx.NetworkXNotImplemented): + bytesIO = io.BytesIO() + bipartite.write_edgelist(nx.DiGraph(), bytesIO) + + def test_raise_attribute(self): + with pytest.raises(AttributeError): + G = nx.path_graph(4) + bytesIO = io.BytesIO() + bipartite.write_edgelist(G, bytesIO) + + def test_parse_edgelist(self): + """Tests for conditions specific to + parse_edge_list method""" + + # ignore strings of length less than 2 + lines = ["1 2", "2 3", "3 1", "4", " "] + G = bipartite.parse_edgelist(lines, nodetype=int) + assert list(G.nodes) == [1, 2, 3] + + # Exception raised when node is not convertible + # to specified data type + with pytest.raises(TypeError, match=".*Failed to convert nodes"): + lines = ["a b", "b c", "c a"] + G = bipartite.parse_edgelist(lines, nodetype=int) + + # Exception raised when format of data is not + # convertible to dictionary object + with pytest.raises(TypeError, match=".*Failed to convert edge data"): + lines = ["1 2 3", "2 3 4", "3 1 2"] + G = bipartite.parse_edgelist(lines, nodetype=int) + + # Exception raised when edge data and data + # keys are not of same length + with pytest.raises(IndexError): + lines = ["1 2 3 4", "2 3 4"] + G = bipartite.parse_edgelist( + lines, nodetype=int, data=[("weight", int), ("key", int)] + ) + + # Exception raised when edge data is not + # convertible to specified data type + with pytest.raises(TypeError, match=".*Failed to convert key data"): + lines = ["1 2 3 a", "2 3 4 b"] + G = bipartite.parse_edgelist( + lines, nodetype=int, data=[("weight", int), ("key", int)] + ) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_extendability.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_extendability.py new file mode 100644 index 0000000000000000000000000000000000000000..d7ae34e4c1b0a04a9929a6468894c7efe6f74c53 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_extendability.py @@ -0,0 +1,326 @@ +import pytest + +import networkx as nx + + +def test_selfloops_raises(): + G = nx.ladder_graph(3) + G.add_edge(0, 0) + with pytest.raises(nx.NetworkXError, match=".*not bipartite"): + nx.bipartite.maximal_extendability(G) + + +def test_disconnected_raises(): + G = nx.ladder_graph(3) + G.add_node("a") + with pytest.raises(nx.NetworkXError, match=".*not connected"): + nx.bipartite.maximal_extendability(G) + + +def test_not_bipartite_raises(): + G = nx.complete_graph(5) + with pytest.raises(nx.NetworkXError, match=".*not bipartite"): + nx.bipartite.maximal_extendability(G) + + +def test_no_perfect_matching_raises(): + G = nx.Graph([(0, 1), (0, 2)]) + with pytest.raises(nx.NetworkXError, match=".*not contain a perfect matching"): + nx.bipartite.maximal_extendability(G) + + +def test_ladder_graph_is_1(): + G = nx.ladder_graph(3) + assert nx.bipartite.maximal_extendability(G) == 1 + + +def test_cubical_graph_is_2(): + G = nx.cubical_graph() + assert nx.bipartite.maximal_extendability(G) == 2 + + +def test_k_is_3(): + G = nx.Graph( + [ + (1, 6), + (1, 7), + (1, 8), + (1, 9), + (2, 6), + (2, 7), + (2, 8), + (2, 10), + (3, 6), + (3, 8), + (3, 9), + (3, 10), + (4, 7), + (4, 8), + (4, 9), + (4, 10), + (5, 6), + (5, 7), + (5, 9), + (5, 10), + ] + ) + assert nx.bipartite.maximal_extendability(G) == 3 + + +def test_k_is_4(): + G = nx.Graph( + [ + (8, 1), + (8, 2), + (8, 3), + (8, 4), + (8, 5), + (9, 1), + (9, 2), + (9, 3), + (9, 4), + (9, 7), + (10, 1), + (10, 2), + (10, 3), + (10, 4), + (10, 6), + (11, 1), + (11, 2), + (11, 5), + (11, 6), + (11, 7), + (12, 1), + (12, 3), + (12, 5), + (12, 6), + (12, 7), + (13, 2), + (13, 4), + (13, 5), + (13, 6), + (13, 7), + (14, 3), + (14, 4), + (14, 5), + (14, 6), + (14, 7), + ] + ) + assert nx.bipartite.maximal_extendability(G) == 4 + + +def test_k_is_5(): + G = nx.Graph( + [ + (8, 1), + (8, 2), + (8, 3), + (8, 4), + (8, 5), + (8, 6), + (9, 1), + (9, 2), + (9, 3), + (9, 4), + (9, 5), + (9, 7), + (10, 1), + (10, 2), + (10, 3), + (10, 4), + (10, 6), + (10, 7), + (11, 1), + (11, 2), + (11, 3), + (11, 5), + (11, 6), + (11, 7), + (12, 1), + (12, 2), + (12, 4), + (12, 5), + (12, 6), + (12, 7), + (13, 1), + (13, 3), + (13, 4), + (13, 5), + (13, 6), + (13, 7), + (14, 2), + (14, 3), + (14, 4), + (14, 5), + (14, 6), + (14, 7), + ] + ) + assert nx.bipartite.maximal_extendability(G) == 5 + + +def test_k_is_6(): + G = nx.Graph( + [ + (9, 1), + (9, 2), + (9, 3), + (9, 4), + (9, 5), + (9, 6), + (9, 7), + (10, 1), + (10, 2), + (10, 3), + (10, 4), + (10, 5), + (10, 6), + (10, 8), + (11, 1), + (11, 2), + (11, 3), + (11, 4), + (11, 5), + (11, 7), + (11, 8), + (12, 1), + (12, 2), + (12, 3), + (12, 4), + (12, 6), + (12, 7), + (12, 8), + (13, 1), + (13, 2), + (13, 3), + (13, 5), + (13, 6), + (13, 7), + (13, 8), + (14, 1), + (14, 2), + (14, 4), + (14, 5), + (14, 6), + (14, 7), + (14, 8), + (15, 1), + (15, 3), + (15, 4), + (15, 5), + (15, 6), + (15, 7), + (15, 8), + (16, 2), + (16, 3), + (16, 4), + (16, 5), + (16, 6), + (16, 7), + (16, 8), + ] + ) + assert nx.bipartite.maximal_extendability(G) == 6 + + +def test_k_is_7(): + G = nx.Graph( + [ + (1, 11), + (1, 12), + (1, 13), + (1, 14), + (1, 15), + (1, 16), + (1, 17), + (1, 18), + (2, 11), + (2, 12), + (2, 13), + (2, 14), + (2, 15), + (2, 16), + (2, 17), + (2, 19), + (3, 11), + (3, 12), + (3, 13), + (3, 14), + (3, 15), + (3, 16), + (3, 17), + (3, 20), + (4, 11), + (4, 12), + (4, 13), + (4, 14), + (4, 15), + (4, 16), + (4, 17), + (4, 18), + (4, 19), + (4, 20), + (5, 11), + (5, 12), + (5, 13), + (5, 14), + (5, 15), + (5, 16), + (5, 17), + (5, 18), + (5, 19), + (5, 20), + (6, 11), + (6, 12), + (6, 13), + (6, 14), + (6, 15), + (6, 16), + (6, 17), + (6, 18), + (6, 19), + (6, 20), + (7, 11), + (7, 12), + (7, 13), + (7, 14), + (7, 15), + (7, 16), + (7, 17), + (7, 18), + (7, 19), + (7, 20), + (8, 11), + (8, 12), + (8, 13), + (8, 14), + (8, 15), + (8, 16), + (8, 17), + (8, 18), + (8, 19), + (8, 20), + (9, 11), + (9, 12), + (9, 13), + (9, 14), + (9, 15), + (9, 16), + (9, 17), + (9, 18), + (9, 19), + (9, 20), + (10, 11), + (10, 12), + (10, 13), + (10, 14), + (10, 15), + (10, 16), + (10, 17), + (10, 18), + (10, 19), + (10, 20), + ] + ) + assert nx.bipartite.maximal_extendability(G) == 7 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_generators.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_generators.py new file mode 100644 index 0000000000000000000000000000000000000000..5f3b84cece23ba6f3de2a1e454d01548af2e1390 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_generators.py @@ -0,0 +1,400 @@ +import numbers + +import pytest + +import networkx as nx + +from ..generators import ( + alternating_havel_hakimi_graph, + complete_bipartite_graph, + configuration_model, + gnmk_random_graph, + havel_hakimi_graph, + preferential_attachment_graph, + random_graph, + reverse_havel_hakimi_graph, +) + +""" +Generators - Bipartite +---------------------- +""" + + +class TestGeneratorsBipartite: + def test_complete_bipartite_graph(self): + G = complete_bipartite_graph(0, 0) + assert nx.is_isomorphic(G, nx.null_graph()) + + for i in [1, 5]: + G = complete_bipartite_graph(i, 0) + assert nx.is_isomorphic(G, nx.empty_graph(i)) + G = complete_bipartite_graph(0, i) + assert nx.is_isomorphic(G, nx.empty_graph(i)) + + G = complete_bipartite_graph(2, 2) + assert nx.is_isomorphic(G, nx.cycle_graph(4)) + + G = complete_bipartite_graph(1, 5) + assert nx.is_isomorphic(G, nx.star_graph(5)) + + G = complete_bipartite_graph(5, 1) + assert nx.is_isomorphic(G, nx.star_graph(5)) + + # complete_bipartite_graph(m1,m2) is a connected graph with + # m1+m2 nodes and m1*m2 edges + for m1, m2 in [(5, 11), (7, 3)]: + G = complete_bipartite_graph(m1, m2) + assert nx.number_of_nodes(G) == m1 + m2 + assert nx.number_of_edges(G) == m1 * m2 + + with pytest.raises(nx.NetworkXError): + complete_bipartite_graph(7, 3, create_using=nx.DiGraph) + with pytest.raises(nx.NetworkXError): + complete_bipartite_graph(7, 3, create_using=nx.MultiDiGraph) + + mG = complete_bipartite_graph(7, 3, create_using=nx.MultiGraph) + assert mG.is_multigraph() + assert sorted(mG.edges()) == sorted(G.edges()) + + mG = complete_bipartite_graph(7, 3, create_using=nx.MultiGraph) + assert mG.is_multigraph() + assert sorted(mG.edges()) == sorted(G.edges()) + + mG = complete_bipartite_graph(7, 3) # default to Graph + assert sorted(mG.edges()) == sorted(G.edges()) + assert not mG.is_multigraph() + assert not mG.is_directed() + + # specify nodes rather than number of nodes + for n1, n2 in [([1, 2], "ab"), (3, 2), (3, "ab"), ("ab", 3)]: + G = complete_bipartite_graph(n1, n2) + if isinstance(n1, numbers.Integral): + if isinstance(n2, numbers.Integral): + n2 = range(n1, n1 + n2) + n1 = range(n1) + elif isinstance(n2, numbers.Integral): + n2 = range(n2) + edges = {(u, v) for u in n1 for v in n2} + assert edges == set(G.edges) + assert G.size() == len(edges) + + # raise when node sets are not distinct + for n1, n2 in [([1, 2], 3), (3, [1, 2]), ("abc", "bcd")]: + pytest.raises(nx.NetworkXError, complete_bipartite_graph, n1, n2) + + def test_configuration_model(self): + aseq = [] + bseq = [] + G = configuration_model(aseq, bseq) + assert len(G) == 0 + + aseq = [0, 0] + bseq = [0, 0] + G = configuration_model(aseq, bseq) + assert len(G) == 4 + assert G.number_of_edges() == 0 + + aseq = [3, 3, 3, 3] + bseq = [2, 2, 2, 2, 2] + pytest.raises(nx.NetworkXError, configuration_model, aseq, bseq) + + aseq = [3, 3, 3, 3] + bseq = [2, 2, 2, 2, 2, 2] + G = configuration_model(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 2, 2, 2] + bseq = [3, 3, 3, 3] + G = configuration_model(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 1, 1, 1] + bseq = [3, 3, 3] + G = configuration_model(aseq, bseq) + assert G.is_multigraph() + assert not G.is_directed() + assert sorted(d for n, d in G.degree()) == [1, 1, 1, 2, 2, 2, 3, 3, 3] + + GU = nx.projected_graph(nx.Graph(G), range(len(aseq))) + assert GU.number_of_nodes() == 6 + + GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq))) + assert GD.number_of_nodes() == 3 + + G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph) + assert not G.is_multigraph() + assert not G.is_directed() + + pytest.raises( + nx.NetworkXError, configuration_model, aseq, bseq, create_using=nx.DiGraph() + ) + pytest.raises( + nx.NetworkXError, configuration_model, aseq, bseq, create_using=nx.DiGraph + ) + pytest.raises( + nx.NetworkXError, + configuration_model, + aseq, + bseq, + create_using=nx.MultiDiGraph, + ) + + def test_havel_hakimi_graph(self): + aseq = [] + bseq = [] + G = havel_hakimi_graph(aseq, bseq) + assert len(G) == 0 + + aseq = [0, 0] + bseq = [0, 0] + G = havel_hakimi_graph(aseq, bseq) + assert len(G) == 4 + assert G.number_of_edges() == 0 + + aseq = [3, 3, 3, 3] + bseq = [2, 2, 2, 2, 2] + pytest.raises(nx.NetworkXError, havel_hakimi_graph, aseq, bseq) + + bseq = [2, 2, 2, 2, 2, 2] + G = havel_hakimi_graph(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 2, 2, 2] + bseq = [3, 3, 3, 3] + G = havel_hakimi_graph(aseq, bseq) + assert G.is_multigraph() + assert not G.is_directed() + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + GU = nx.projected_graph(nx.Graph(G), range(len(aseq))) + assert GU.number_of_nodes() == 6 + + GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq))) + assert GD.number_of_nodes() == 4 + + G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph) + assert not G.is_multigraph() + assert not G.is_directed() + + pytest.raises( + nx.NetworkXError, havel_hakimi_graph, aseq, bseq, create_using=nx.DiGraph + ) + pytest.raises( + nx.NetworkXError, havel_hakimi_graph, aseq, bseq, create_using=nx.DiGraph + ) + pytest.raises( + nx.NetworkXError, + havel_hakimi_graph, + aseq, + bseq, + create_using=nx.MultiDiGraph, + ) + + def test_reverse_havel_hakimi_graph(self): + aseq = [] + bseq = [] + G = reverse_havel_hakimi_graph(aseq, bseq) + assert len(G) == 0 + + aseq = [0, 0] + bseq = [0, 0] + G = reverse_havel_hakimi_graph(aseq, bseq) + assert len(G) == 4 + assert G.number_of_edges() == 0 + + aseq = [3, 3, 3, 3] + bseq = [2, 2, 2, 2, 2] + pytest.raises(nx.NetworkXError, reverse_havel_hakimi_graph, aseq, bseq) + + bseq = [2, 2, 2, 2, 2, 2] + G = reverse_havel_hakimi_graph(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 2, 2, 2] + bseq = [3, 3, 3, 3] + G = reverse_havel_hakimi_graph(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 1, 1, 1] + bseq = [3, 3, 3] + G = reverse_havel_hakimi_graph(aseq, bseq) + assert G.is_multigraph() + assert not G.is_directed() + assert sorted(d for n, d in G.degree()) == [1, 1, 1, 2, 2, 2, 3, 3, 3] + + GU = nx.projected_graph(nx.Graph(G), range(len(aseq))) + assert GU.number_of_nodes() == 6 + + GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq))) + assert GD.number_of_nodes() == 3 + + G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph) + assert not G.is_multigraph() + assert not G.is_directed() + + pytest.raises( + nx.NetworkXError, + reverse_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.DiGraph, + ) + pytest.raises( + nx.NetworkXError, + reverse_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.DiGraph, + ) + pytest.raises( + nx.NetworkXError, + reverse_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.MultiDiGraph, + ) + + def test_alternating_havel_hakimi_graph(self): + aseq = [] + bseq = [] + G = alternating_havel_hakimi_graph(aseq, bseq) + assert len(G) == 0 + + aseq = [0, 0] + bseq = [0, 0] + G = alternating_havel_hakimi_graph(aseq, bseq) + assert len(G) == 4 + assert G.number_of_edges() == 0 + + aseq = [3, 3, 3, 3] + bseq = [2, 2, 2, 2, 2] + pytest.raises(nx.NetworkXError, alternating_havel_hakimi_graph, aseq, bseq) + + bseq = [2, 2, 2, 2, 2, 2] + G = alternating_havel_hakimi_graph(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 2, 2, 2] + bseq = [3, 3, 3, 3] + G = alternating_havel_hakimi_graph(aseq, bseq) + assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3] + + aseq = [2, 2, 2, 1, 1, 1] + bseq = [3, 3, 3] + G = alternating_havel_hakimi_graph(aseq, bseq) + assert G.is_multigraph() + assert not G.is_directed() + assert sorted(d for n, d in G.degree()) == [1, 1, 1, 2, 2, 2, 3, 3, 3] + + GU = nx.projected_graph(nx.Graph(G), range(len(aseq))) + assert GU.number_of_nodes() == 6 + + GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq))) + assert GD.number_of_nodes() == 3 + + G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph) + assert not G.is_multigraph() + assert not G.is_directed() + + pytest.raises( + nx.NetworkXError, + alternating_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.DiGraph, + ) + pytest.raises( + nx.NetworkXError, + alternating_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.DiGraph, + ) + pytest.raises( + nx.NetworkXError, + alternating_havel_hakimi_graph, + aseq, + bseq, + create_using=nx.MultiDiGraph, + ) + + def test_preferential_attachment(self): + aseq = [3, 2, 1, 1] + G = preferential_attachment_graph(aseq, 0.5) + assert G.is_multigraph() + assert not G.is_directed() + + G = preferential_attachment_graph(aseq, 0.5, create_using=nx.Graph) + assert not G.is_multigraph() + assert not G.is_directed() + + pytest.raises( + nx.NetworkXError, + preferential_attachment_graph, + aseq, + 0.5, + create_using=nx.DiGraph(), + ) + pytest.raises( + nx.NetworkXError, + preferential_attachment_graph, + aseq, + 0.5, + create_using=nx.DiGraph(), + ) + pytest.raises( + nx.NetworkXError, + preferential_attachment_graph, + aseq, + 0.5, + create_using=nx.DiGraph(), + ) + + def test_random_graph(self): + n = 10 + m = 20 + G = random_graph(n, m, 0.9) + assert len(G) == 30 + assert nx.is_bipartite(G) + X, Y = nx.algorithms.bipartite.sets(G) + assert set(range(n)) == X + assert set(range(n, n + m)) == Y + + def test_random_digraph(self): + n = 10 + m = 20 + G = random_graph(n, m, 0.9, directed=True) + assert len(G) == 30 + assert nx.is_bipartite(G) + X, Y = nx.algorithms.bipartite.sets(G) + assert set(range(n)) == X + assert set(range(n, n + m)) == Y + + def test_gnmk_random_graph(self): + n = 10 + m = 20 + edges = 100 + # set seed because sometimes it is not connected + # which raises an error in bipartite.sets(G) below. + G = gnmk_random_graph(n, m, edges, seed=1234) + assert len(G) == n + m + assert nx.is_bipartite(G) + X, Y = nx.algorithms.bipartite.sets(G) + # print(X) + assert set(range(n)) == X + assert set(range(n, n + m)) == Y + assert edges == len(list(G.edges())) + + def test_gnmk_random_graph_complete(self): + n = 10 + m = 20 + edges = 200 + G = gnmk_random_graph(n, m, edges) + assert len(G) == n + m + assert nx.is_bipartite(G) + X, Y = nx.algorithms.bipartite.sets(G) + # print(X) + assert set(range(n)) == X + assert set(range(n, n + m)) == Y + assert edges == len(list(G.edges())) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_matching.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_matching.py new file mode 100644 index 0000000000000000000000000000000000000000..7ed7cdcb43429f92c95a8d78da48f5d2771db77b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_matching.py @@ -0,0 +1,326 @@ +"""Unit tests for the :mod:`networkx.algorithms.bipartite.matching` module.""" +import itertools + +import pytest + +import networkx as nx +from networkx.algorithms.bipartite.matching import ( + eppstein_matching, + hopcroft_karp_matching, + maximum_matching, + minimum_weight_full_matching, + to_vertex_cover, +) + + +class TestMatching: + """Tests for bipartite matching algorithms.""" + + def setup_method(self): + """Creates a bipartite graph for use in testing matching algorithms. + + The bipartite graph has a maximum cardinality matching that leaves + vertex 1 and vertex 10 unmatched. The first six numbers are the left + vertices and the next six numbers are the right vertices. + + """ + self.simple_graph = nx.complete_bipartite_graph(2, 3) + self.simple_solution = {0: 2, 1: 3, 2: 0, 3: 1} + + edges = [(0, 7), (0, 8), (2, 6), (2, 9), (3, 8), (4, 8), (4, 9), (5, 11)] + self.top_nodes = set(range(6)) + self.graph = nx.Graph() + self.graph.add_nodes_from(range(12)) + self.graph.add_edges_from(edges) + + # Example bipartite graph from issue 2127 + G = nx.Graph() + G.add_nodes_from( + [ + (1, "C"), + (1, "B"), + (0, "G"), + (1, "F"), + (1, "E"), + (0, "C"), + (1, "D"), + (1, "I"), + (0, "A"), + (0, "D"), + (0, "F"), + (0, "E"), + (0, "H"), + (1, "G"), + (1, "A"), + (0, "I"), + (0, "B"), + (1, "H"), + ] + ) + G.add_edge((1, "C"), (0, "A")) + G.add_edge((1, "B"), (0, "A")) + G.add_edge((0, "G"), (1, "I")) + G.add_edge((0, "G"), (1, "H")) + G.add_edge((1, "F"), (0, "A")) + G.add_edge((1, "F"), (0, "C")) + G.add_edge((1, "F"), (0, "E")) + G.add_edge((1, "E"), (0, "A")) + G.add_edge((1, "E"), (0, "C")) + G.add_edge((0, "C"), (1, "D")) + G.add_edge((0, "C"), (1, "I")) + G.add_edge((0, "C"), (1, "G")) + G.add_edge((0, "C"), (1, "H")) + G.add_edge((1, "D"), (0, "A")) + G.add_edge((1, "I"), (0, "A")) + G.add_edge((1, "I"), (0, "E")) + G.add_edge((0, "A"), (1, "G")) + G.add_edge((0, "A"), (1, "H")) + G.add_edge((0, "E"), (1, "G")) + G.add_edge((0, "E"), (1, "H")) + self.disconnected_graph = G + + def check_match(self, matching): + """Asserts that the matching is what we expect from the bipartite graph + constructed in the :meth:`setup` fixture. + + """ + # For the sake of brevity, rename `matching` to `M`. + M = matching + matched_vertices = frozenset(itertools.chain(*M.items())) + # Assert that the maximum number of vertices (10) is matched. + assert matched_vertices == frozenset(range(12)) - {1, 10} + # Assert that no vertex appears in two edges, or in other words, that + # the matching (u, v) and (v, u) both appear in the matching + # dictionary. + assert all(u == M[M[u]] for u in range(12) if u in M) + + def check_vertex_cover(self, vertices): + """Asserts that the given set of vertices is the vertex cover we + expected from the bipartite graph constructed in the :meth:`setup` + fixture. + + """ + # By Konig's theorem, the number of edges in a maximum matching equals + # the number of vertices in a minimum vertex cover. + assert len(vertices) == 5 + # Assert that the set is truly a vertex cover. + for u, v in self.graph.edges(): + assert u in vertices or v in vertices + # TODO Assert that the vertices are the correct ones. + + def test_eppstein_matching(self): + """Tests that David Eppstein's implementation of the Hopcroft--Karp + algorithm produces a maximum cardinality matching. + + """ + self.check_match(eppstein_matching(self.graph, self.top_nodes)) + + def test_hopcroft_karp_matching(self): + """Tests that the Hopcroft--Karp algorithm produces a maximum + cardinality matching in a bipartite graph. + + """ + self.check_match(hopcroft_karp_matching(self.graph, self.top_nodes)) + + def test_to_vertex_cover(self): + """Test for converting a maximum matching to a minimum vertex cover.""" + matching = maximum_matching(self.graph, self.top_nodes) + vertex_cover = to_vertex_cover(self.graph, matching, self.top_nodes) + self.check_vertex_cover(vertex_cover) + + def test_eppstein_matching_simple(self): + match = eppstein_matching(self.simple_graph) + assert match == self.simple_solution + + def test_hopcroft_karp_matching_simple(self): + match = hopcroft_karp_matching(self.simple_graph) + assert match == self.simple_solution + + def test_eppstein_matching_disconnected(self): + with pytest.raises(nx.AmbiguousSolution): + match = eppstein_matching(self.disconnected_graph) + + def test_hopcroft_karp_matching_disconnected(self): + with pytest.raises(nx.AmbiguousSolution): + match = hopcroft_karp_matching(self.disconnected_graph) + + def test_issue_2127(self): + """Test from issue 2127""" + # Build the example DAG + G = nx.DiGraph() + G.add_edge("A", "C") + G.add_edge("A", "B") + G.add_edge("C", "E") + G.add_edge("C", "D") + G.add_edge("E", "G") + G.add_edge("E", "F") + G.add_edge("G", "I") + G.add_edge("G", "H") + + tc = nx.transitive_closure(G) + btc = nx.Graph() + + # Create a bipartite graph based on the transitive closure of G + for v in tc.nodes(): + btc.add_node((0, v)) + btc.add_node((1, v)) + + for u, v in tc.edges(): + btc.add_edge((0, u), (1, v)) + + top_nodes = {n for n in btc if n[0] == 0} + matching = hopcroft_karp_matching(btc, top_nodes) + vertex_cover = to_vertex_cover(btc, matching, top_nodes) + independent_set = set(G) - {v for _, v in vertex_cover} + assert {"B", "D", "F", "I", "H"} == independent_set + + def test_vertex_cover_issue_2384(self): + G = nx.Graph([(0, 3), (1, 3), (1, 4), (2, 3)]) + matching = maximum_matching(G) + vertex_cover = to_vertex_cover(G, matching) + for u, v in G.edges(): + assert u in vertex_cover or v in vertex_cover + + def test_vertex_cover_issue_3306(self): + G = nx.Graph() + edges = [(0, 2), (1, 0), (1, 1), (1, 2), (2, 2)] + G.add_edges_from([((i, "L"), (j, "R")) for i, j in edges]) + + matching = maximum_matching(G) + vertex_cover = to_vertex_cover(G, matching) + for u, v in G.edges(): + assert u in vertex_cover or v in vertex_cover + + def test_unorderable_nodes(self): + a = object() + b = object() + c = object() + d = object() + e = object() + G = nx.Graph([(a, d), (b, d), (b, e), (c, d)]) + matching = maximum_matching(G) + vertex_cover = to_vertex_cover(G, matching) + for u, v in G.edges(): + assert u in vertex_cover or v in vertex_cover + + +def test_eppstein_matching(): + """Test in accordance to issue #1927""" + G = nx.Graph() + G.add_nodes_from(["a", 2, 3, 4], bipartite=0) + G.add_nodes_from([1, "b", "c"], bipartite=1) + G.add_edges_from([("a", 1), ("a", "b"), (2, "b"), (2, "c"), (3, "c"), (4, 1)]) + matching = eppstein_matching(G) + assert len(matching) == len(maximum_matching(G)) + assert all(x in set(matching.keys()) for x in set(matching.values())) + + +class TestMinimumWeightFullMatching: + @classmethod + def setup_class(cls): + pytest.importorskip("scipy") + + def test_minimum_weight_full_matching_incomplete_graph(self): + B = nx.Graph() + B.add_nodes_from([1, 2], bipartite=0) + B.add_nodes_from([3, 4], bipartite=1) + B.add_edge(1, 4, weight=100) + B.add_edge(2, 3, weight=100) + B.add_edge(2, 4, weight=50) + matching = minimum_weight_full_matching(B) + assert matching == {1: 4, 2: 3, 4: 1, 3: 2} + + def test_minimum_weight_full_matching_with_no_full_matching(self): + B = nx.Graph() + B.add_nodes_from([1, 2, 3], bipartite=0) + B.add_nodes_from([4, 5, 6], bipartite=1) + B.add_edge(1, 4, weight=100) + B.add_edge(2, 4, weight=100) + B.add_edge(3, 4, weight=50) + B.add_edge(3, 5, weight=50) + B.add_edge(3, 6, weight=50) + with pytest.raises(ValueError): + minimum_weight_full_matching(B) + + def test_minimum_weight_full_matching_square(self): + G = nx.complete_bipartite_graph(3, 3) + G.add_edge(0, 3, weight=400) + G.add_edge(0, 4, weight=150) + G.add_edge(0, 5, weight=400) + G.add_edge(1, 3, weight=400) + G.add_edge(1, 4, weight=450) + G.add_edge(1, 5, weight=600) + G.add_edge(2, 3, weight=300) + G.add_edge(2, 4, weight=225) + G.add_edge(2, 5, weight=300) + matching = minimum_weight_full_matching(G) + assert matching == {0: 4, 1: 3, 2: 5, 4: 0, 3: 1, 5: 2} + + def test_minimum_weight_full_matching_smaller_left(self): + G = nx.complete_bipartite_graph(3, 4) + G.add_edge(0, 3, weight=400) + G.add_edge(0, 4, weight=150) + G.add_edge(0, 5, weight=400) + G.add_edge(0, 6, weight=1) + G.add_edge(1, 3, weight=400) + G.add_edge(1, 4, weight=450) + G.add_edge(1, 5, weight=600) + G.add_edge(1, 6, weight=2) + G.add_edge(2, 3, weight=300) + G.add_edge(2, 4, weight=225) + G.add_edge(2, 5, weight=290) + G.add_edge(2, 6, weight=3) + matching = minimum_weight_full_matching(G) + assert matching == {0: 4, 1: 6, 2: 5, 4: 0, 5: 2, 6: 1} + + def test_minimum_weight_full_matching_smaller_top_nodes_right(self): + G = nx.complete_bipartite_graph(3, 4) + G.add_edge(0, 3, weight=400) + G.add_edge(0, 4, weight=150) + G.add_edge(0, 5, weight=400) + G.add_edge(0, 6, weight=1) + G.add_edge(1, 3, weight=400) + G.add_edge(1, 4, weight=450) + G.add_edge(1, 5, weight=600) + G.add_edge(1, 6, weight=2) + G.add_edge(2, 3, weight=300) + G.add_edge(2, 4, weight=225) + G.add_edge(2, 5, weight=290) + G.add_edge(2, 6, weight=3) + matching = minimum_weight_full_matching(G, top_nodes=[3, 4, 5, 6]) + assert matching == {0: 4, 1: 6, 2: 5, 4: 0, 5: 2, 6: 1} + + def test_minimum_weight_full_matching_smaller_right(self): + G = nx.complete_bipartite_graph(4, 3) + G.add_edge(0, 4, weight=400) + G.add_edge(0, 5, weight=400) + G.add_edge(0, 6, weight=300) + G.add_edge(1, 4, weight=150) + G.add_edge(1, 5, weight=450) + G.add_edge(1, 6, weight=225) + G.add_edge(2, 4, weight=400) + G.add_edge(2, 5, weight=600) + G.add_edge(2, 6, weight=290) + G.add_edge(3, 4, weight=1) + G.add_edge(3, 5, weight=2) + G.add_edge(3, 6, weight=3) + matching = minimum_weight_full_matching(G) + assert matching == {1: 4, 2: 6, 3: 5, 4: 1, 5: 3, 6: 2} + + def test_minimum_weight_full_matching_negative_weights(self): + G = nx.complete_bipartite_graph(2, 2) + G.add_edge(0, 2, weight=-2) + G.add_edge(0, 3, weight=0.2) + G.add_edge(1, 2, weight=-2) + G.add_edge(1, 3, weight=0.3) + matching = minimum_weight_full_matching(G) + assert matching == {0: 3, 1: 2, 2: 1, 3: 0} + + def test_minimum_weight_full_matching_different_weight_key(self): + G = nx.complete_bipartite_graph(2, 2) + G.add_edge(0, 2, mass=2) + G.add_edge(0, 3, mass=0.2) + G.add_edge(1, 2, mass=1) + G.add_edge(1, 3, mass=2) + matching = minimum_weight_full_matching(G, weight="mass") + assert matching == {0: 3, 1: 2, 2: 1, 3: 0} diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_matrix.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..393b71e7ca29aa9385ed312788ec038acdee9390 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_matrix.py @@ -0,0 +1,79 @@ +import pytest + +np = pytest.importorskip("numpy") +sp = pytest.importorskip("scipy") +sparse = pytest.importorskip("scipy.sparse") + + +import networkx as nx +from networkx.algorithms import bipartite +from networkx.utils import edges_equal + + +class TestBiadjacencyMatrix: + def test_biadjacency_matrix_weight(self): + G = nx.path_graph(5) + G.add_edge(0, 1, weight=2, other=4) + X = [1, 3] + Y = [0, 2, 4] + M = bipartite.biadjacency_matrix(G, X, weight="weight") + assert M[0, 0] == 2 + M = bipartite.biadjacency_matrix(G, X, weight="other") + assert M[0, 0] == 4 + + def test_biadjacency_matrix(self): + tops = [2, 5, 10] + bots = [5, 10, 15] + for i in range(len(tops)): + G = bipartite.random_graph(tops[i], bots[i], 0.2) + top = [n for n, d in G.nodes(data=True) if d["bipartite"] == 0] + M = bipartite.biadjacency_matrix(G, top) + assert M.shape[0] == tops[i] + assert M.shape[1] == bots[i] + + def test_biadjacency_matrix_order(self): + G = nx.path_graph(5) + G.add_edge(0, 1, weight=2) + X = [3, 1] + Y = [4, 2, 0] + M = bipartite.biadjacency_matrix(G, X, Y, weight="weight") + assert M[1, 2] == 2 + + def test_null_graph(self): + with pytest.raises(nx.NetworkXError): + bipartite.biadjacency_matrix(nx.Graph(), []) + + def test_empty_graph(self): + with pytest.raises(nx.NetworkXError): + bipartite.biadjacency_matrix(nx.Graph([(1, 0)]), []) + + def test_duplicate_row(self): + with pytest.raises(nx.NetworkXError): + bipartite.biadjacency_matrix(nx.Graph([(1, 0)]), [1, 1]) + + def test_duplicate_col(self): + with pytest.raises(nx.NetworkXError): + bipartite.biadjacency_matrix(nx.Graph([(1, 0)]), [0], [1, 1]) + + def test_format_keyword(self): + with pytest.raises(nx.NetworkXError): + bipartite.biadjacency_matrix(nx.Graph([(1, 0)]), [0], format="foo") + + def test_from_biadjacency_roundtrip(self): + B1 = nx.path_graph(5) + M = bipartite.biadjacency_matrix(B1, [0, 2, 4]) + B2 = bipartite.from_biadjacency_matrix(M) + assert nx.is_isomorphic(B1, B2) + + def test_from_biadjacency_weight(self): + M = sparse.csc_matrix([[1, 2], [0, 3]]) + B = bipartite.from_biadjacency_matrix(M) + assert edges_equal(B.edges(), [(0, 2), (0, 3), (1, 3)]) + B = bipartite.from_biadjacency_matrix(M, edge_attribute="weight") + e = [(0, 2, {"weight": 1}), (0, 3, {"weight": 2}), (1, 3, {"weight": 3})] + assert edges_equal(B.edges(data=True), e) + + def test_from_biadjacency_multigraph(self): + M = sparse.csc_matrix([[1, 2], [0, 3]]) + B = bipartite.from_biadjacency_matrix(M, create_using=nx.MultiGraph()) + assert edges_equal(B.edges(), [(0, 2), (0, 3), (0, 3), (1, 3), (1, 3), (1, 3)]) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_project.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_project.py new file mode 100644 index 0000000000000000000000000000000000000000..076bb42b668657cad51f6423e5aacf23a2a1cd28 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_project.py @@ -0,0 +1,407 @@ +import pytest + +import networkx as nx +from networkx.algorithms import bipartite +from networkx.utils import edges_equal, nodes_equal + + +class TestBipartiteProject: + def test_path_projected_graph(self): + G = nx.path_graph(4) + P = bipartite.projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P = bipartite.projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + G = nx.MultiGraph([(0, 1)]) + with pytest.raises(nx.NetworkXError, match="not defined for multigraphs"): + bipartite.projected_graph(G, [0]) + + def test_path_projected_properties_graph(self): + G = nx.path_graph(4) + G.add_node(1, name="one") + G.add_node(2, name="two") + P = bipartite.projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + assert P.nodes[1]["name"] == G.nodes[1]["name"] + P = bipartite.projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + assert P.nodes[2]["name"] == G.nodes[2]["name"] + + def test_path_collaboration_projected_graph(self): + G = nx.path_graph(4) + P = bipartite.collaboration_weighted_projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P[1][3]["weight"] = 1 + P = bipartite.collaboration_weighted_projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + P[0][2]["weight"] = 1 + + def test_directed_path_collaboration_projected_graph(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + P = bipartite.collaboration_weighted_projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P[1][3]["weight"] = 1 + P = bipartite.collaboration_weighted_projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + P[0][2]["weight"] = 1 + + def test_path_weighted_projected_graph(self): + G = nx.path_graph(4) + + with pytest.raises(nx.NetworkXAlgorithmError): + bipartite.weighted_projected_graph(G, [1, 2, 3, 3]) + + P = bipartite.weighted_projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P[1][3]["weight"] = 1 + P = bipartite.weighted_projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + P[0][2]["weight"] = 1 + + def test_digraph_weighted_projection(self): + G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 4)]) + P = bipartite.overlap_weighted_projected_graph(G, [1, 3]) + assert nx.get_edge_attributes(P, "weight") == {(1, 3): 1.0} + assert len(P) == 2 + + def test_path_weighted_projected_directed_graph(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + P = bipartite.weighted_projected_graph(G, [1, 3]) + assert nodes_equal(list(P), [1, 3]) + assert edges_equal(list(P.edges()), [(1, 3)]) + P[1][3]["weight"] = 1 + P = bipartite.weighted_projected_graph(G, [0, 2]) + assert nodes_equal(list(P), [0, 2]) + assert edges_equal(list(P.edges()), [(0, 2)]) + P[0][2]["weight"] = 1 + + def test_star_projected_graph(self): + G = nx.star_graph(3) + P = bipartite.projected_graph(G, [1, 2, 3]) + assert nodes_equal(list(P), [1, 2, 3]) + assert edges_equal(list(P.edges()), [(1, 2), (1, 3), (2, 3)]) + P = bipartite.weighted_projected_graph(G, [1, 2, 3]) + assert nodes_equal(list(P), [1, 2, 3]) + assert edges_equal(list(P.edges()), [(1, 2), (1, 3), (2, 3)]) + + P = bipartite.projected_graph(G, [0]) + assert nodes_equal(list(P), [0]) + assert edges_equal(list(P.edges()), []) + + def test_project_multigraph(self): + G = nx.Graph() + G.add_edge("a", 1) + G.add_edge("b", 1) + G.add_edge("a", 2) + G.add_edge("b", 2) + P = bipartite.projected_graph(G, "ab") + assert edges_equal(list(P.edges()), [("a", "b")]) + P = bipartite.weighted_projected_graph(G, "ab") + assert edges_equal(list(P.edges()), [("a", "b")]) + P = bipartite.projected_graph(G, "ab", multigraph=True) + assert edges_equal(list(P.edges()), [("a", "b"), ("a", "b")]) + + def test_project_collaboration(self): + G = nx.Graph() + G.add_edge("a", 1) + G.add_edge("b", 1) + G.add_edge("b", 2) + G.add_edge("c", 2) + G.add_edge("c", 3) + G.add_edge("c", 4) + G.add_edge("b", 4) + P = bipartite.collaboration_weighted_projected_graph(G, "abc") + assert P["a"]["b"]["weight"] == 1 + assert P["b"]["c"]["weight"] == 2 + + def test_directed_projection(self): + G = nx.DiGraph() + G.add_edge("A", 1) + G.add_edge(1, "B") + G.add_edge("A", 2) + G.add_edge("B", 2) + P = bipartite.projected_graph(G, "AB") + assert edges_equal(list(P.edges()), [("A", "B")]) + P = bipartite.weighted_projected_graph(G, "AB") + assert edges_equal(list(P.edges()), [("A", "B")]) + assert P["A"]["B"]["weight"] == 1 + + P = bipartite.projected_graph(G, "AB", multigraph=True) + assert edges_equal(list(P.edges()), [("A", "B")]) + + G = nx.DiGraph() + G.add_edge("A", 1) + G.add_edge(1, "B") + G.add_edge("A", 2) + G.add_edge(2, "B") + P = bipartite.projected_graph(G, "AB") + assert edges_equal(list(P.edges()), [("A", "B")]) + P = bipartite.weighted_projected_graph(G, "AB") + assert edges_equal(list(P.edges()), [("A", "B")]) + assert P["A"]["B"]["weight"] == 2 + + P = bipartite.projected_graph(G, "AB", multigraph=True) + assert edges_equal(list(P.edges()), [("A", "B"), ("A", "B")]) + + +class TestBipartiteWeightedProjection: + @classmethod + def setup_class(cls): + # Tore Opsahl's example + # http://toreopsahl.com/2009/05/01/projecting-two-mode-networks-onto-weighted-one-mode-networks/ + cls.G = nx.Graph() + cls.G.add_edge("A", 1) + cls.G.add_edge("A", 2) + cls.G.add_edge("B", 1) + cls.G.add_edge("B", 2) + cls.G.add_edge("B", 3) + cls.G.add_edge("B", 4) + cls.G.add_edge("B", 5) + cls.G.add_edge("C", 1) + cls.G.add_edge("D", 3) + cls.G.add_edge("E", 4) + cls.G.add_edge("E", 5) + cls.G.add_edge("E", 6) + cls.G.add_edge("F", 6) + # Graph based on figure 6 from Newman (2001) + cls.N = nx.Graph() + cls.N.add_edge("A", 1) + cls.N.add_edge("A", 2) + cls.N.add_edge("A", 3) + cls.N.add_edge("B", 1) + cls.N.add_edge("B", 2) + cls.N.add_edge("B", 3) + cls.N.add_edge("C", 1) + cls.N.add_edge("D", 1) + cls.N.add_edge("E", 3) + + def test_project_weighted_shared(self): + edges = [ + ("A", "B", 2), + ("A", "C", 1), + ("B", "C", 1), + ("B", "D", 1), + ("B", "E", 2), + ("E", "F", 1), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.weighted_projected_graph(self.G, "ABCDEF") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 3), + ("A", "E", 1), + ("A", "C", 1), + ("A", "D", 1), + ("B", "E", 1), + ("B", "C", 1), + ("B", "D", 1), + ("C", "D", 1), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.weighted_projected_graph(self.N, "ABCDE") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_project_weighted_newman(self): + edges = [ + ("A", "B", 1.5), + ("A", "C", 0.5), + ("B", "C", 0.5), + ("B", "D", 1), + ("B", "E", 2), + ("E", "F", 1), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.collaboration_weighted_projected_graph(self.G, "ABCDEF") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 11 / 6.0), + ("A", "E", 1 / 2.0), + ("A", "C", 1 / 3.0), + ("A", "D", 1 / 3.0), + ("B", "E", 1 / 2.0), + ("B", "C", 1 / 3.0), + ("B", "D", 1 / 3.0), + ("C", "D", 1 / 3.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.collaboration_weighted_projected_graph(self.N, "ABCDE") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_project_weighted_ratio(self): + edges = [ + ("A", "B", 2 / 6.0), + ("A", "C", 1 / 6.0), + ("B", "C", 1 / 6.0), + ("B", "D", 1 / 6.0), + ("B", "E", 2 / 6.0), + ("E", "F", 1 / 6.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.weighted_projected_graph(self.G, "ABCDEF", ratio=True) + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 3 / 3.0), + ("A", "E", 1 / 3.0), + ("A", "C", 1 / 3.0), + ("A", "D", 1 / 3.0), + ("B", "E", 1 / 3.0), + ("B", "C", 1 / 3.0), + ("B", "D", 1 / 3.0), + ("C", "D", 1 / 3.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.weighted_projected_graph(self.N, "ABCDE", ratio=True) + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_project_weighted_overlap(self): + edges = [ + ("A", "B", 2 / 2.0), + ("A", "C", 1 / 1.0), + ("B", "C", 1 / 1.0), + ("B", "D", 1 / 1.0), + ("B", "E", 2 / 3.0), + ("E", "F", 1 / 1.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.overlap_weighted_projected_graph(self.G, "ABCDEF", jaccard=False) + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 3 / 3.0), + ("A", "E", 1 / 1.0), + ("A", "C", 1 / 1.0), + ("A", "D", 1 / 1.0), + ("B", "E", 1 / 1.0), + ("B", "C", 1 / 1.0), + ("B", "D", 1 / 1.0), + ("C", "D", 1 / 1.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.overlap_weighted_projected_graph(self.N, "ABCDE", jaccard=False) + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_project_weighted_jaccard(self): + edges = [ + ("A", "B", 2 / 5.0), + ("A", "C", 1 / 2.0), + ("B", "C", 1 / 5.0), + ("B", "D", 1 / 5.0), + ("B", "E", 2 / 6.0), + ("E", "F", 1 / 3.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.overlap_weighted_projected_graph(self.G, "ABCDEF") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in list(P.edges()): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + edges = [ + ("A", "B", 3 / 3.0), + ("A", "E", 1 / 3.0), + ("A", "C", 1 / 3.0), + ("A", "D", 1 / 3.0), + ("B", "E", 1 / 3.0), + ("B", "C", 1 / 3.0), + ("B", "D", 1 / 3.0), + ("C", "D", 1 / 1.0), + ] + Panswer = nx.Graph() + Panswer.add_weighted_edges_from(edges) + P = bipartite.overlap_weighted_projected_graph(self.N, "ABCDE") + assert edges_equal(list(P.edges()), Panswer.edges()) + for u, v in P.edges(): + assert P[u][v]["weight"] == Panswer[u][v]["weight"] + + def test_generic_weighted_projected_graph_simple(self): + def shared(G, u, v): + return len(set(G[u]) & set(G[v])) + + B = nx.path_graph(5) + G = bipartite.generic_weighted_projected_graph( + B, [0, 2, 4], weight_function=shared + ) + assert nodes_equal(list(G), [0, 2, 4]) + assert edges_equal( + list(G.edges(data=True)), + [(0, 2, {"weight": 1}), (2, 4, {"weight": 1})], + ) + + G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4]) + assert nodes_equal(list(G), [0, 2, 4]) + assert edges_equal( + list(G.edges(data=True)), + [(0, 2, {"weight": 1}), (2, 4, {"weight": 1})], + ) + B = nx.DiGraph() + nx.add_path(B, range(5)) + G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4]) + assert nodes_equal(list(G), [0, 2, 4]) + assert edges_equal( + list(G.edges(data=True)), [(0, 2, {"weight": 1}), (2, 4, {"weight": 1})] + ) + + def test_generic_weighted_projected_graph_custom(self): + def jaccard(G, u, v): + unbrs = set(G[u]) + vnbrs = set(G[v]) + return len(unbrs & vnbrs) / len(unbrs | vnbrs) + + def my_weight(G, u, v, weight="weight"): + w = 0 + for nbr in set(G[u]) & set(G[v]): + w += G.edges[u, nbr].get(weight, 1) + G.edges[v, nbr].get(weight, 1) + return w + + B = nx.bipartite.complete_bipartite_graph(2, 2) + for i, (u, v) in enumerate(B.edges()): + B.edges[u, v]["weight"] = i + 1 + G = bipartite.generic_weighted_projected_graph( + B, [0, 1], weight_function=jaccard + ) + assert edges_equal(list(G.edges(data=True)), [(0, 1, {"weight": 1.0})]) + G = bipartite.generic_weighted_projected_graph( + B, [0, 1], weight_function=my_weight + ) + assert edges_equal(list(G.edges(data=True)), [(0, 1, {"weight": 10})]) + G = bipartite.generic_weighted_projected_graph(B, [0, 1]) + assert edges_equal(list(G.edges(data=True)), [(0, 1, {"weight": 2})]) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_redundancy.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_redundancy.py new file mode 100644 index 0000000000000000000000000000000000000000..7ab7813d5facd2953e1d661d3b64a2223b38e48b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_redundancy.py @@ -0,0 +1,37 @@ +"""Unit tests for the :mod:`networkx.algorithms.bipartite.redundancy` module. + +""" + +import pytest + +from networkx import NetworkXError, cycle_graph +from networkx.algorithms.bipartite import complete_bipartite_graph, node_redundancy + + +def test_no_redundant_nodes(): + G = complete_bipartite_graph(2, 2) + + # when nodes is None + rc = node_redundancy(G) + assert all(redundancy == 1 for redundancy in rc.values()) + + # when set of nodes is specified + rc = node_redundancy(G, (2, 3)) + assert rc == {2: 1.0, 3: 1.0} + + +def test_redundant_nodes(): + G = cycle_graph(6) + edge = {0, 3} + G.add_edge(*edge) + redundancy = node_redundancy(G) + for v in edge: + assert redundancy[v] == 2 / 3 + for v in set(G) - edge: + assert redundancy[v] == 1 + + +def test_not_enough_neighbors(): + with pytest.raises(NetworkXError): + G = complete_bipartite_graph(1, 2) + node_redundancy(G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py new file mode 100644 index 0000000000000000000000000000000000000000..b940649793d40aa73606914f3d48348761c329df --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py @@ -0,0 +1,80 @@ +import pytest + +pytest.importorskip("scipy") + +import networkx as nx +from networkx.algorithms.bipartite import spectral_bipartivity as sb + +# Examples from Figure 1 +# E. Estrada and J. A. Rodríguez-Velázquez, "Spectral measures of +# bipartivity in complex networks", PhysRev E 72, 046105 (2005) + + +class TestSpectralBipartivity: + def test_star_like(self): + # star-like + + G = nx.star_graph(2) + G.add_edge(1, 2) + assert sb(G) == pytest.approx(0.843, abs=1e-3) + + G = nx.star_graph(3) + G.add_edge(1, 2) + assert sb(G) == pytest.approx(0.871, abs=1e-3) + + G = nx.star_graph(4) + G.add_edge(1, 2) + assert sb(G) == pytest.approx(0.890, abs=1e-3) + + def test_k23_like(self): + # K2,3-like + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(0, 1) + assert sb(G) == pytest.approx(0.769, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + assert sb(G) == pytest.approx(0.829, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + G.add_edge(3, 4) + assert sb(G) == pytest.approx(0.731, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(0, 1) + G.add_edge(2, 4) + assert sb(G) == pytest.approx(0.692, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + G.add_edge(3, 4) + G.add_edge(0, 1) + assert sb(G) == pytest.approx(0.645, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + G.add_edge(3, 4) + G.add_edge(2, 3) + assert sb(G) == pytest.approx(0.645, abs=1e-3) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + G.add_edge(3, 4) + G.add_edge(2, 3) + G.add_edge(0, 1) + assert sb(G) == pytest.approx(0.597, abs=1e-3) + + def test_single_nodes(self): + # single nodes + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(2, 4) + sbn = sb(G, nodes=[1, 2]) + assert sbn[1] == pytest.approx(0.85, abs=1e-2) + assert sbn[2] == pytest.approx(0.77, abs=1e-2) + + G = nx.complete_bipartite_graph(2, 3) + G.add_edge(0, 1) + sbn = sb(G, nodes=[1, 2]) + assert sbn[1] == pytest.approx(0.73, abs=1e-2) + assert sbn[2] == pytest.approx(0.82, abs=1e-2) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/boundary.py b/MLPY/Lib/site-packages/networkx/algorithms/boundary.py new file mode 100644 index 0000000000000000000000000000000000000000..ea97cee6efb10e1e29735184c7e8b5c328943c42 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/boundary.py @@ -0,0 +1,167 @@ +"""Routines to find the boundary of a set of nodes. + +An edge boundary is a set of edges, each of which has exactly one +endpoint in a given set of nodes (or, in the case of directed graphs, +the set of edges whose source node is in the set). + +A node boundary of a set *S* of nodes is the set of (out-)neighbors of +nodes in *S* that are outside *S*. + +""" +from itertools import chain + +import networkx as nx + +__all__ = ["edge_boundary", "node_boundary"] + + +@nx._dispatch(edge_attrs={"data": "default"}, preserve_edge_attrs="data") +def edge_boundary(G, nbunch1, nbunch2=None, data=False, keys=False, default=None): + """Returns the edge boundary of `nbunch1`. + + The *edge boundary* of a set *S* with respect to a set *T* is the + set of edges (*u*, *v*) such that *u* is in *S* and *v* is in *T*. + If *T* is not specified, it is assumed to be the set of all nodes + not in *S*. + + Parameters + ---------- + G : NetworkX graph + + nbunch1 : iterable + Iterable of nodes in the graph representing the set of nodes + whose edge boundary will be returned. (This is the set *S* from + the definition above.) + + nbunch2 : iterable + Iterable of nodes representing the target (or "exterior") set of + nodes. (This is the set *T* from the definition above.) If not + specified, this is assumed to be the set of all nodes in `G` + not in `nbunch1`. + + keys : bool + This parameter has the same meaning as in + :meth:`MultiGraph.edges`. + + data : bool or object + This parameter has the same meaning as in + :meth:`MultiGraph.edges`. + + default : object + This parameter has the same meaning as in + :meth:`MultiGraph.edges`. + + Returns + ------- + iterator + An iterator over the edges in the boundary of `nbunch1` with + respect to `nbunch2`. If `keys`, `data`, or `default` + are specified and `G` is a multigraph, then edges are returned + with keys and/or data, as in :meth:`MultiGraph.edges`. + + Examples + -------- + >>> G = nx.wheel_graph(6) + + When nbunch2=None: + + >>> list(nx.edge_boundary(G, (1, 3))) + [(1, 0), (1, 2), (1, 5), (3, 0), (3, 2), (3, 4)] + + When nbunch2 is given: + + >>> list(nx.edge_boundary(G, (1, 3), (2, 0))) + [(1, 0), (1, 2), (3, 0), (3, 2)] + + Notes + ----- + Any element of `nbunch` that is not in the graph `G` will be + ignored. + + `nbunch1` and `nbunch2` are usually meant to be disjoint, but in + the interest of speed and generality, that is not required here. + + """ + nset1 = {n for n in nbunch1 if n in G} + # Here we create an iterator over edges incident to nodes in the set + # `nset1`. The `Graph.edges()` method does not provide a guarantee + # on the orientation of the edges, so our algorithm below must + # handle the case in which exactly one orientation, either (u, v) or + # (v, u), appears in this iterable. + if G.is_multigraph(): + edges = G.edges(nset1, data=data, keys=keys, default=default) + else: + edges = G.edges(nset1, data=data, default=default) + # If `nbunch2` is not provided, then it is assumed to be the set + # complement of `nbunch1`. For the sake of efficiency, this is + # implemented by using the `not in` operator, instead of by creating + # an additional set and using the `in` operator. + if nbunch2 is None: + return (e for e in edges if (e[0] in nset1) ^ (e[1] in nset1)) + nset2 = set(nbunch2) + return ( + e + for e in edges + if (e[0] in nset1 and e[1] in nset2) or (e[1] in nset1 and e[0] in nset2) + ) + + +@nx._dispatch +def node_boundary(G, nbunch1, nbunch2=None): + """Returns the node boundary of `nbunch1`. + + The *node boundary* of a set *S* with respect to a set *T* is the + set of nodes *v* in *T* such that for some *u* in *S*, there is an + edge joining *u* to *v*. If *T* is not specified, it is assumed to + be the set of all nodes not in *S*. + + Parameters + ---------- + G : NetworkX graph + + nbunch1 : iterable + Iterable of nodes in the graph representing the set of nodes + whose node boundary will be returned. (This is the set *S* from + the definition above.) + + nbunch2 : iterable + Iterable of nodes representing the target (or "exterior") set of + nodes. (This is the set *T* from the definition above.) If not + specified, this is assumed to be the set of all nodes in `G` + not in `nbunch1`. + + Returns + ------- + set + The node boundary of `nbunch1` with respect to `nbunch2`. + + Examples + -------- + >>> G = nx.wheel_graph(6) + + When nbunch2=None: + + >>> list(nx.node_boundary(G, (3, 4))) + [0, 2, 5] + + When nbunch2 is given: + + >>> list(nx.node_boundary(G, (3, 4), (0, 1, 5))) + [0, 5] + + Notes + ----- + Any element of `nbunch` that is not in the graph `G` will be + ignored. + + `nbunch1` and `nbunch2` are usually meant to be disjoint, but in + the interest of speed and generality, that is not required here. + + """ + nset1 = {n for n in nbunch1 if n in G} + bdy = set(chain.from_iterable(G[v] for v in nset1)) - nset1 + # If `nbunch2` is not specified, it is assumed to be the set + # complement of `nbunch1`. + if nbunch2 is not None: + bdy &= set(nbunch2) + return bdy diff --git a/MLPY/Lib/site-packages/networkx/algorithms/bridges.py b/MLPY/Lib/site-packages/networkx/algorithms/bridges.py new file mode 100644 index 0000000000000000000000000000000000000000..106120e2fde435e82fa6186a835f3fedabd21a0a --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/bridges.py @@ -0,0 +1,205 @@ +"""Bridge-finding algorithms.""" +from itertools import chain + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["bridges", "has_bridges", "local_bridges"] + + +@not_implemented_for("directed") +@nx._dispatch +def bridges(G, root=None): + """Generate all bridges in a graph. + + A *bridge* in a graph is an edge whose removal causes the number of + connected components of the graph to increase. Equivalently, a bridge is an + edge that does not belong to any cycle. Bridges are also known as cut-edges, + isthmuses, or cut arcs. + + Parameters + ---------- + G : undirected graph + + root : node (optional) + A node in the graph `G`. If specified, only the bridges in the + connected component containing this node will be returned. + + Yields + ------ + e : edge + An edge in the graph whose removal disconnects the graph (or + causes the number of connected components to increase). + + Raises + ------ + NodeNotFound + If `root` is not in the graph `G`. + + NetworkXNotImplemented + If `G` is a directed graph. + + Examples + -------- + The barbell graph with parameter zero has a single bridge: + + >>> G = nx.barbell_graph(10, 0) + >>> list(nx.bridges(G)) + [(9, 10)] + + Notes + ----- + This is an implementation of the algorithm described in [1]_. An edge is a + bridge if and only if it is not contained in any chain. Chains are found + using the :func:`networkx.chain_decomposition` function. + + The algorithm described in [1]_ requires a simple graph. If the provided + graph is a multigraph, we convert it to a simple graph and verify that any + bridges discovered by the chain decomposition algorithm are not multi-edges. + + Ignoring polylogarithmic factors, the worst-case time complexity is the + same as the :func:`networkx.chain_decomposition` function, + $O(m + n)$, where $n$ is the number of nodes in the graph and $m$ is + the number of edges. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Bridge_%28graph_theory%29#Bridge-Finding_with_Chain_Decompositions + """ + multigraph = G.is_multigraph() + H = nx.Graph(G) if multigraph else G + chains = nx.chain_decomposition(H, root=root) + chain_edges = set(chain.from_iterable(chains)) + H_copy = H.copy() + if root is not None: + H = H.subgraph(nx.node_connected_component(H, root)).copy() + for u, v in H.edges(): + if (u, v) not in chain_edges and (v, u) not in chain_edges: + if multigraph and len(G[u][v]) > 1: + continue + yield u, v + + +@not_implemented_for("directed") +@nx._dispatch +def has_bridges(G, root=None): + """Decide whether a graph has any bridges. + + A *bridge* in a graph is an edge whose removal causes the number of + connected components of the graph to increase. + + Parameters + ---------- + G : undirected graph + + root : node (optional) + A node in the graph `G`. If specified, only the bridges in the + connected component containing this node will be considered. + + Returns + ------- + bool + Whether the graph (or the connected component containing `root`) + has any bridges. + + Raises + ------ + NodeNotFound + If `root` is not in the graph `G`. + + NetworkXNotImplemented + If `G` is a directed graph. + + Examples + -------- + The barbell graph with parameter zero has a single bridge:: + + >>> G = nx.barbell_graph(10, 0) + >>> nx.has_bridges(G) + True + + On the other hand, the cycle graph has no bridges:: + + >>> G = nx.cycle_graph(5) + >>> nx.has_bridges(G) + False + + Notes + ----- + This implementation uses the :func:`networkx.bridges` function, so + it shares its worst-case time complexity, $O(m + n)$, ignoring + polylogarithmic factors, where $n$ is the number of nodes in the + graph and $m$ is the number of edges. + + """ + try: + next(bridges(G, root=root)) + except StopIteration: + return False + else: + return True + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def local_bridges(G, with_span=True, weight=None): + """Iterate over local bridges of `G` optionally computing the span + + A *local bridge* is an edge whose endpoints have no common neighbors. + That is, the edge is not part of a triangle in the graph. + + The *span* of a *local bridge* is the shortest path length between + the endpoints if the local bridge is removed. + + Parameters + ---------- + G : undirected graph + + with_span : bool + If True, yield a 3-tuple `(u, v, span)` + + weight : function, string or None (default: None) + If function, used to compute edge weights for the span. + If string, the edge data attribute used in calculating span. + If None, all edges have weight 1. + + Yields + ------ + e : edge + The local bridges as an edge 2-tuple of nodes `(u, v)` or + as a 3-tuple `(u, v, span)` when `with_span is True`. + + Raises + ------ + NetworkXNotImplemented + If `G` is a directed graph or multigraph. + + Examples + -------- + A cycle graph has every edge a local bridge with span N-1. + + >>> G = nx.cycle_graph(9) + >>> (0, 8, 8) in set(nx.local_bridges(G)) + True + """ + if with_span is not True: + for u, v in G.edges: + if not (set(G[u]) & set(G[v])): + yield u, v + else: + wt = nx.weighted._weight_function(G, weight) + for u, v in G.edges: + if not (set(G[u]) & set(G[v])): + enodes = {u, v} + + def hide_edge(n, nbr, d): + if n not in enodes or nbr not in enodes: + return wt(n, nbr, d) + return None + + try: + span = nx.shortest_path_length(G, u, v, weight=hide_edge) + yield u, v, span + except nx.NetworkXNoPath: + yield u, v, float("inf") diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c91a904a13496ecab5a3a6c8caa026970d99a540 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__init__.py @@ -0,0 +1,20 @@ +from .betweenness import * +from .betweenness_subset import * +from .closeness import * +from .current_flow_betweenness import * +from .current_flow_betweenness_subset import * +from .current_flow_closeness import * +from .degree_alg import * +from .dispersion import * +from .eigenvector import * +from .group import * +from .harmonic import * +from .katz import * +from .load import * +from .percolation import * +from .reaching import * +from .second_order import * +from .subgraph_alg import * +from .trophic import * +from .voterank_alg import * +from .laplacian import * diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fefaf544dbd345713306828aa8da08b5a04123ad Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/betweenness.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/betweenness.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de58dfc2c22040a3d8dae590998405659d61ac2a Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/betweenness.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/betweenness_subset.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/betweenness_subset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f11ddd7b4ad2309e369363aab48de0ad6fd6e802 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/betweenness_subset.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/closeness.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/closeness.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e073dd304a27c4744be9cbdb77095dc864fa11f0 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/closeness.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28baf3e535949aac870df12ec88a6065c96cd918 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness_subset.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness_subset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d58e7f3e0da157cb44426fb94e837da7b8c4b5e4 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_betweenness_subset.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_closeness.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_closeness.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b83eb31500aab56655453aba346d45555cbeb6d Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/current_flow_closeness.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/degree_alg.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/degree_alg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3ae0952f56cfcbf7369edebeaf705c4901e204c Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/degree_alg.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/dispersion.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/dispersion.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f271985a13c6503935b58bfb8b1074799bb020e2 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/dispersion.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/eigenvector.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/eigenvector.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f971d94ee3411eb4db75ec40692b8368e0d7911 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/eigenvector.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/flow_matrix.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/flow_matrix.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9310ef4f60754c646945e465f0792966fb0a4cd Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/flow_matrix.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/group.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/group.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e3310c8d440ac79ef682a849da65ba6afd96b3a Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/group.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/harmonic.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/harmonic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52beff017fa8afceffd6a98fa4233b356dbaa315 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/harmonic.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/katz.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/katz.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8682343967b6a5c5459cc55edf4f76acbdb79c89 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/katz.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/laplacian.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/laplacian.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8510e384ad874ce45e2325c60d179be6563002b5 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/laplacian.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/load.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/load.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb02e04968654e3e93239b55206adb2d06a26c4d Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/load.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/percolation.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/percolation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5521deee09de1df8da174909b1f6136c3543ceff Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/percolation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/reaching.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/reaching.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01b282ae077969ee9422eabc877d1974e8485522 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/reaching.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/second_order.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/second_order.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..992d1f9dfa78ceb46b875ff5262daccbeaf1f819 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/second_order.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/subgraph_alg.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/subgraph_alg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05ec2dde85c97fd508bb15875ab9608c2fdee118 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/subgraph_alg.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/trophic.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/trophic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6ed04ef4d25a8aeb4877168f3b20e0bd1dcc692 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/trophic.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/voterank_alg.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/voterank_alg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa5657deac4f6a54f252dfe4ebf26bd7ed2d9f83 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/__pycache__/voterank_alg.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/betweenness.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/betweenness.py new file mode 100644 index 0000000000000000000000000000000000000000..b4b1f3963b00e451258b9838723b71a2d9f799fe --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/betweenness.py @@ -0,0 +1,435 @@ +"""Betweenness centrality measures.""" +from collections import deque +from heapq import heappop, heappush +from itertools import count + +import networkx as nx +from networkx.algorithms.shortest_paths.weighted import _weight_function +from networkx.utils import py_random_state +from networkx.utils.decorators import not_implemented_for + +__all__ = ["betweenness_centrality", "edge_betweenness_centrality"] + + +@py_random_state(5) +@nx._dispatch(edge_attrs="weight") +def betweenness_centrality( + G, k=None, normalized=True, weight=None, endpoints=False, seed=None +): + r"""Compute the shortest-path betweenness centrality for nodes. + + Betweenness centrality of a node $v$ is the sum of the + fraction of all-pairs shortest paths that pass through $v$ + + .. math:: + + c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)} + + where $V$ is the set of nodes, $\sigma(s, t)$ is the number of + shortest $(s, t)$-paths, and $\sigma(s, t|v)$ is the number of + those paths passing through some node $v$ other than $s, t$. + If $s = t$, $\sigma(s, t) = 1$, and if $v \in {s, t}$, + $\sigma(s, t|v) = 0$ [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph. + + k : int, optional (default=None) + If k is not None use k node samples to estimate betweenness. + The value of k <= n where n is the number of nodes in the graph. + Higher values give better approximation. + + normalized : bool, optional + If True the betweenness values are normalized by `2/((n-1)(n-2))` + for graphs, and `1/((n-1)(n-2))` for directed graphs where `n` + is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Weights are used to calculate weighted shortest paths, so they are + interpreted as distances. + + endpoints : bool, optional + If True include the endpoints in the shortest path counts. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + Note that this is only used if k is not None. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + edge_betweenness_centrality + load_centrality + + Notes + ----- + The algorithm is from Ulrik Brandes [1]_. + See [4]_ for the original first published version and [2]_ for details on + algorithms for variations and related metrics. + + For approximate betweenness calculations set k=#samples to use + k nodes ("pivots") to estimate the betweenness values. For an estimate + of the number of pivots needed see [3]_. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The total number of paths between source and target is counted + differently for directed and undirected graphs. Directed paths + are easy to count. Undirected paths are tricky: should a path + from "u" to "v" count as 1 undirected path or as 2 directed paths? + + For betweenness_centrality we report the number of undirected + paths when G is undirected. + + For betweenness_centrality_subset the reporting is different. + If the source and target subsets are the same, then we want + to count undirected paths. But if the source and target subsets + differ -- for example, if sources is {0} and targets is {1}, + then we are only counting the paths in one direction. They are + undirected paths but we are counting them in a directed way. + To count them as undirected paths, each should count as half a path. + + This algorithm is not guaranteed to be correct if edge weights + are floating point numbers. As a workaround you can use integer + numbers by multiplying the relevant edge attributes by a convenient + constant factor (eg 100) and converting to integers. + + References + ---------- + .. [1] Ulrik Brandes: + A Faster Algorithm for Betweenness Centrality. + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + .. [2] Ulrik Brandes: + On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + https://doi.org/10.1016/j.socnet.2007.11.001 + .. [3] Ulrik Brandes and Christian Pich: + Centrality Estimation in Large Networks. + International Journal of Bifurcation and Chaos 17(7):2303-2318, 2007. + https://dx.doi.org/10.1142/S0218127407018403 + .. [4] Linton C. Freeman: + A set of measures of centrality based on betweenness. + Sociometry 40: 35–41, 1977 + https://doi.org/10.2307/3033543 + """ + betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + if k is None: + nodes = G + else: + nodes = seed.sample(list(G.nodes()), k) + for s in nodes: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = _single_source_shortest_path_basic(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = _single_source_dijkstra_path_basic(G, s, weight) + # accumulation + if endpoints: + betweenness, _ = _accumulate_endpoints(betweenness, S, P, sigma, s) + else: + betweenness, _ = _accumulate_basic(betweenness, S, P, sigma, s) + # rescaling + betweenness = _rescale( + betweenness, + len(G), + normalized=normalized, + directed=G.is_directed(), + k=k, + endpoints=endpoints, + ) + return betweenness + + +@py_random_state(4) +@nx._dispatch(edge_attrs="weight") +def edge_betweenness_centrality(G, k=None, normalized=True, weight=None, seed=None): + r"""Compute betweenness centrality for edges. + + Betweenness centrality of an edge $e$ is the sum of the + fraction of all-pairs shortest paths that pass through $e$ + + .. math:: + + c_B(e) =\sum_{s,t \in V} \frac{\sigma(s, t|e)}{\sigma(s, t)} + + where $V$ is the set of nodes, $\sigma(s, t)$ is the number of + shortest $(s, t)$-paths, and $\sigma(s, t|e)$ is the number of + those paths passing through edge $e$ [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph. + + k : int, optional (default=None) + If k is not None use k node samples to estimate betweenness. + The value of k <= n where n is the number of nodes in the graph. + Higher values give better approximation. + + normalized : bool, optional + If True the betweenness values are normalized by $2/(n(n-1))$ + for graphs, and $1/(n(n-1))$ for directed graphs where $n$ + is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Weights are used to calculate weighted shortest paths, so they are + interpreted as distances. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + Note that this is only used if k is not None. + + Returns + ------- + edges : dictionary + Dictionary of edges with betweenness centrality as the value. + + See Also + -------- + betweenness_centrality + edge_load + + Notes + ----- + The algorithm is from Ulrik Brandes [1]_. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + References + ---------- + .. [1] A Faster Algorithm for Betweenness Centrality. Ulrik Brandes, + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + https://doi.org/10.1016/j.socnet.2007.11.001 + """ + betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + # b[e]=0 for e in G.edges() + betweenness.update(dict.fromkeys(G.edges(), 0.0)) + if k is None: + nodes = G + else: + nodes = seed.sample(list(G.nodes()), k) + for s in nodes: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = _single_source_shortest_path_basic(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = _single_source_dijkstra_path_basic(G, s, weight) + # accumulation + betweenness = _accumulate_edges(betweenness, S, P, sigma, s) + # rescaling + for n in G: # remove nodes to only return edges + del betweenness[n] + betweenness = _rescale_e( + betweenness, len(G), normalized=normalized, directed=G.is_directed() + ) + if G.is_multigraph(): + betweenness = _add_edge_keys(G, betweenness, weight=weight) + return betweenness + + +# helpers for betweenness centrality + + +def _single_source_shortest_path_basic(G, s): + S = [] + P = {} + for v in G: + P[v] = [] + sigma = dict.fromkeys(G, 0.0) # sigma[v]=0 for v in G + D = {} + sigma[s] = 1.0 + D[s] = 0 + Q = deque([s]) + while Q: # use BFS to find shortest paths + v = Q.popleft() + S.append(v) + Dv = D[v] + sigmav = sigma[v] + for w in G[v]: + if w not in D: + Q.append(w) + D[w] = Dv + 1 + if D[w] == Dv + 1: # this is a shortest path, count paths + sigma[w] += sigmav + P[w].append(v) # predecessors + return S, P, sigma, D + + +def _single_source_dijkstra_path_basic(G, s, weight): + weight = _weight_function(G, weight) + # modified from Eppstein + S = [] + P = {} + for v in G: + P[v] = [] + sigma = dict.fromkeys(G, 0.0) # sigma[v]=0 for v in G + D = {} + sigma[s] = 1.0 + push = heappush + pop = heappop + seen = {s: 0} + c = count() + Q = [] # use Q as heap with (distance,node id) tuples + push(Q, (0, next(c), s, s)) + while Q: + (dist, _, pred, v) = pop(Q) + if v in D: + continue # already searched this node. + sigma[v] += sigma[pred] # count paths + S.append(v) + D[v] = dist + for w, edgedata in G[v].items(): + vw_dist = dist + weight(v, w, edgedata) + if w not in D and (w not in seen or vw_dist < seen[w]): + seen[w] = vw_dist + push(Q, (vw_dist, next(c), v, w)) + sigma[w] = 0.0 + P[w] = [v] + elif vw_dist == seen[w]: # handle equal paths + sigma[w] += sigma[v] + P[w].append(v) + return S, P, sigma, D + + +def _accumulate_basic(betweenness, S, P, sigma, s): + delta = dict.fromkeys(S, 0) + while S: + w = S.pop() + coeff = (1 + delta[w]) / sigma[w] + for v in P[w]: + delta[v] += sigma[v] * coeff + if w != s: + betweenness[w] += delta[w] + return betweenness, delta + + +def _accumulate_endpoints(betweenness, S, P, sigma, s): + betweenness[s] += len(S) - 1 + delta = dict.fromkeys(S, 0) + while S: + w = S.pop() + coeff = (1 + delta[w]) / sigma[w] + for v in P[w]: + delta[v] += sigma[v] * coeff + if w != s: + betweenness[w] += delta[w] + 1 + return betweenness, delta + + +def _accumulate_edges(betweenness, S, P, sigma, s): + delta = dict.fromkeys(S, 0) + while S: + w = S.pop() + coeff = (1 + delta[w]) / sigma[w] + for v in P[w]: + c = sigma[v] * coeff + if (v, w) not in betweenness: + betweenness[(w, v)] += c + else: + betweenness[(v, w)] += c + delta[v] += c + if w != s: + betweenness[w] += delta[w] + return betweenness + + +def _rescale(betweenness, n, normalized, directed=False, k=None, endpoints=False): + if normalized: + if endpoints: + if n < 2: + scale = None # no normalization + else: + # Scale factor should include endpoint nodes + scale = 1 / (n * (n - 1)) + elif n <= 2: + scale = None # no normalization b=0 for all nodes + else: + scale = 1 / ((n - 1) * (n - 2)) + else: # rescale by 2 for undirected graphs + if not directed: + scale = 0.5 + else: + scale = None + if scale is not None: + if k is not None: + scale = scale * n / k + for v in betweenness: + betweenness[v] *= scale + return betweenness + + +def _rescale_e(betweenness, n, normalized, directed=False, k=None): + if normalized: + if n <= 1: + scale = None # no normalization b=0 for all nodes + else: + scale = 1 / (n * (n - 1)) + else: # rescale by 2 for undirected graphs + if not directed: + scale = 0.5 + else: + scale = None + if scale is not None: + if k is not None: + scale = scale * n / k + for v in betweenness: + betweenness[v] *= scale + return betweenness + + +@not_implemented_for("graph") +def _add_edge_keys(G, betweenness, weight=None): + r"""Adds the corrected betweenness centrality (BC) values for multigraphs. + + Parameters + ---------- + G : NetworkX graph. + + betweenness : dictionary + Dictionary mapping adjacent node tuples to betweenness centrality values. + + weight : string or function + See `_weight_function` for details. Defaults to `None`. + + Returns + ------- + edges : dictionary + The parameter `betweenness` including edges with keys and their + betweenness centrality values. + + The BC value is divided among edges of equal weight. + """ + _weight = _weight_function(G, weight) + + edge_bc = dict.fromkeys(G.edges, 0.0) + for u, v in betweenness: + d = G[u][v] + wt = _weight(u, v, d) + keys = [k for k in d if _weight(u, v, {k: d[k]}) == wt] + bc = betweenness[(u, v)] / len(keys) + for k in keys: + edge_bc[(u, v, k)] = bc + + return edge_bc diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/betweenness_subset.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/betweenness_subset.py new file mode 100644 index 0000000000000000000000000000000000000000..e6c1acdf4ffe4d7423a49bcdf8c340886c998b3b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/betweenness_subset.py @@ -0,0 +1,274 @@ +"""Betweenness centrality measures for subsets of nodes.""" +import networkx as nx +from networkx.algorithms.centrality.betweenness import ( + _add_edge_keys, +) +from networkx.algorithms.centrality.betweenness import ( + _single_source_dijkstra_path_basic as dijkstra, +) +from networkx.algorithms.centrality.betweenness import ( + _single_source_shortest_path_basic as shortest_path, +) + +__all__ = [ + "betweenness_centrality_subset", + "edge_betweenness_centrality_subset", +] + + +@nx._dispatch(edge_attrs="weight") +def betweenness_centrality_subset(G, sources, targets, normalized=False, weight=None): + r"""Compute betweenness centrality for a subset of nodes. + + .. math:: + + c_B(v) =\sum_{s\in S, t \in T} \frac{\sigma(s, t|v)}{\sigma(s, t)} + + where $S$ is the set of sources, $T$ is the set of targets, + $\sigma(s, t)$ is the number of shortest $(s, t)$-paths, + and $\sigma(s, t|v)$ is the number of those paths + passing through some node $v$ other than $s, t$. + If $s = t$, $\sigma(s, t) = 1$, + and if $v \in {s, t}$, $\sigma(s, t|v) = 0$ [2]_. + + + Parameters + ---------- + G : graph + A NetworkX graph. + + sources: list of nodes + Nodes to use as sources for shortest paths in betweenness + + targets: list of nodes + Nodes to use as targets for shortest paths in betweenness + + normalized : bool, optional + If True the betweenness values are normalized by $2/((n-1)(n-2))$ + for graphs, and $1/((n-1)(n-2))$ for directed graphs where $n$ + is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Weights are used to calculate weighted shortest paths, so they are + interpreted as distances. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + edge_betweenness_centrality + load_centrality + + Notes + ----- + The basic algorithm is from [1]_. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The normalization might seem a little strange but it is + designed to make betweenness_centrality(G) be the same as + betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()). + + The total number of paths between source and target is counted + differently for directed and undirected graphs. Directed paths + are easy to count. Undirected paths are tricky: should a path + from "u" to "v" count as 1 undirected path or as 2 directed paths? + + For betweenness_centrality we report the number of undirected + paths when G is undirected. + + For betweenness_centrality_subset the reporting is different. + If the source and target subsets are the same, then we want + to count undirected paths. But if the source and target subsets + differ -- for example, if sources is {0} and targets is {1}, + then we are only counting the paths in one direction. They are + undirected paths but we are counting them in a directed way. + To count them as undirected paths, each should count as half a path. + + References + ---------- + .. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality. + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + https://doi.org/10.1016/j.socnet.2007.11.001 + """ + b = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + for s in sources: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = shortest_path(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = dijkstra(G, s, weight) + b = _accumulate_subset(b, S, P, sigma, s, targets) + b = _rescale(b, len(G), normalized=normalized, directed=G.is_directed()) + return b + + +@nx._dispatch(edge_attrs="weight") +def edge_betweenness_centrality_subset( + G, sources, targets, normalized=False, weight=None +): + r"""Compute betweenness centrality for edges for a subset of nodes. + + .. math:: + + c_B(v) =\sum_{s\in S,t \in T} \frac{\sigma(s, t|e)}{\sigma(s, t)} + + where $S$ is the set of sources, $T$ is the set of targets, + $\sigma(s, t)$ is the number of shortest $(s, t)$-paths, + and $\sigma(s, t|e)$ is the number of those paths + passing through edge $e$ [2]_. + + Parameters + ---------- + G : graph + A networkx graph. + + sources: list of nodes + Nodes to use as sources for shortest paths in betweenness + + targets: list of nodes + Nodes to use as targets for shortest paths in betweenness + + normalized : bool, optional + If True the betweenness values are normalized by `2/(n(n-1))` + for graphs, and `1/(n(n-1))` for directed graphs where `n` + is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + Weights are used to calculate weighted shortest paths, so they are + interpreted as distances. + + Returns + ------- + edges : dictionary + Dictionary of edges with Betweenness centrality as the value. + + See Also + -------- + betweenness_centrality + edge_load + + Notes + ----- + The basic algorithm is from [1]_. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The normalization might seem a little strange but it is the same + as in edge_betweenness_centrality() and is designed to make + edge_betweenness_centrality(G) be the same as + edge_betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()). + + References + ---------- + .. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality. + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + https://doi.org/10.1016/j.socnet.2007.11.001 + """ + b = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + b.update(dict.fromkeys(G.edges(), 0.0)) # b[e] for e in G.edges() + for s in sources: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = shortest_path(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = dijkstra(G, s, weight) + b = _accumulate_edges_subset(b, S, P, sigma, s, targets) + for n in G: # remove nodes to only return edges + del b[n] + b = _rescale_e(b, len(G), normalized=normalized, directed=G.is_directed()) + if G.is_multigraph(): + b = _add_edge_keys(G, b, weight=weight) + return b + + +def _accumulate_subset(betweenness, S, P, sigma, s, targets): + delta = dict.fromkeys(S, 0.0) + target_set = set(targets) - {s} + while S: + w = S.pop() + if w in target_set: + coeff = (delta[w] + 1.0) / sigma[w] + else: + coeff = delta[w] / sigma[w] + for v in P[w]: + delta[v] += sigma[v] * coeff + if w != s: + betweenness[w] += delta[w] + return betweenness + + +def _accumulate_edges_subset(betweenness, S, P, sigma, s, targets): + """edge_betweenness_centrality_subset helper.""" + delta = dict.fromkeys(S, 0) + target_set = set(targets) + while S: + w = S.pop() + for v in P[w]: + if w in target_set: + c = (sigma[v] / sigma[w]) * (1.0 + delta[w]) + else: + c = delta[w] / len(P[w]) + if (v, w) not in betweenness: + betweenness[(w, v)] += c + else: + betweenness[(v, w)] += c + delta[v] += c + if w != s: + betweenness[w] += delta[w] + return betweenness + + +def _rescale(betweenness, n, normalized, directed=False): + """betweenness_centrality_subset helper.""" + if normalized: + if n <= 2: + scale = None # no normalization b=0 for all nodes + else: + scale = 1.0 / ((n - 1) * (n - 2)) + else: # rescale by 2 for undirected graphs + if not directed: + scale = 0.5 + else: + scale = None + if scale is not None: + for v in betweenness: + betweenness[v] *= scale + return betweenness + + +def _rescale_e(betweenness, n, normalized, directed=False): + """edge_betweenness_centrality_subset helper.""" + if normalized: + if n <= 1: + scale = None # no normalization b=0 for all nodes + else: + scale = 1.0 / (n * (n - 1)) + else: # rescale by 2 for undirected graphs + if not directed: + scale = 0.5 + else: + scale = None + if scale is not None: + for v in betweenness: + betweenness[v] *= scale + return betweenness diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/closeness.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/closeness.py new file mode 100644 index 0000000000000000000000000000000000000000..6a95ac14ef848c8d146dee8ceba01d3b2de5eeee --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/closeness.py @@ -0,0 +1,281 @@ +""" +Closeness centrality measures. +""" +import functools + +import networkx as nx +from networkx.exception import NetworkXError +from networkx.utils.decorators import not_implemented_for + +__all__ = ["closeness_centrality", "incremental_closeness_centrality"] + + +@nx._dispatch(edge_attrs="distance") +def closeness_centrality(G, u=None, distance=None, wf_improved=True): + r"""Compute closeness centrality for nodes. + + Closeness centrality [1]_ of a node `u` is the reciprocal of the + average shortest path distance to `u` over all `n-1` reachable nodes. + + .. math:: + + C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)}, + + where `d(v, u)` is the shortest-path distance between `v` and `u`, + and `n-1` is the number of nodes reachable from `u`. Notice that the + closeness distance function computes the incoming distance to `u` + for directed graphs. To use outward distance, act on `G.reverse()`. + + Notice that higher values of closeness indicate higher centrality. + + Wasserman and Faust propose an improved formula for graphs with + more than one connected component. The result is "a ratio of the + fraction of actors in the group who are reachable, to the average + distance" from the reachable actors [2]_. You might think this + scale factor is inverted but it is not. As is, nodes from small + components receive a smaller closeness value. Letting `N` denote + the number of nodes in the graph, + + .. math:: + + C_{WF}(u) = \frac{n-1}{N-1} \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)}, + + Parameters + ---------- + G : graph + A NetworkX graph + + u : node, optional + Return only the value for node u + + distance : edge attribute key, optional (default=None) + Use the specified edge attribute as the edge distance in shortest + path calculations. If `None` (the default) all edges have a distance of 1. + Absent edge attributes are assigned a distance of 1. Note that no check + is performed to ensure that edges have the provided attribute. + + wf_improved : bool, optional (default=True) + If True, scale by the fraction of nodes reachable. This gives the + Wasserman and Faust improved formula. For single component graphs + it is the same as the original formula. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with closeness centrality as the value. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.closeness_centrality(G) + {0: 1.0, 1: 1.0, 2: 0.75, 3: 0.75} + + See Also + -------- + betweenness_centrality, load_centrality, eigenvector_centrality, + degree_centrality, incremental_closeness_centrality + + Notes + ----- + The closeness centrality is normalized to `(n-1)/(|G|-1)` where + `n` is the number of nodes in the connected part of graph + containing the node. If the graph is not completely connected, + this algorithm computes the closeness centrality for each + connected part separately scaled by that parts size. + + If the 'distance' keyword is set to an edge attribute key then the + shortest-path length will be computed using Dijkstra's algorithm with + that edge attribute as the edge weight. + + The closeness centrality uses *inward* distance to a node, not outward. + If you want to use outword distances apply the function to `G.reverse()` + + In NetworkX 2.2 and earlier a bug caused Dijkstra's algorithm to use the + outward distance rather than the inward distance. If you use a 'distance' + keyword and a DiGraph, your results will change between v2.2 and v2.3. + + References + ---------- + .. [1] Linton C. Freeman: Centrality in networks: I. + Conceptual clarification. Social Networks 1:215-239, 1979. + https://doi.org/10.1016/0378-8733(78)90021-7 + .. [2] pg. 201 of Wasserman, S. and Faust, K., + Social Network Analysis: Methods and Applications, 1994, + Cambridge University Press. + """ + if G.is_directed(): + G = G.reverse() # create a reversed graph view + + if distance is not None: + # use Dijkstra's algorithm with specified attribute as edge weight + path_length = functools.partial( + nx.single_source_dijkstra_path_length, weight=distance + ) + else: + path_length = nx.single_source_shortest_path_length + + if u is None: + nodes = G.nodes + else: + nodes = [u] + closeness_dict = {} + for n in nodes: + sp = path_length(G, n) + totsp = sum(sp.values()) + len_G = len(G) + _closeness_centrality = 0.0 + if totsp > 0.0 and len_G > 1: + _closeness_centrality = (len(sp) - 1.0) / totsp + # normalize to number of nodes-1 in connected part + if wf_improved: + s = (len(sp) - 1.0) / (len_G - 1) + _closeness_centrality *= s + closeness_dict[n] = _closeness_centrality + if u is not None: + return closeness_dict[u] + return closeness_dict + + +@not_implemented_for("directed") +@nx._dispatch +def incremental_closeness_centrality( + G, edge, prev_cc=None, insertion=True, wf_improved=True +): + r"""Incremental closeness centrality for nodes. + + Compute closeness centrality for nodes using level-based work filtering + as described in Incremental Algorithms for Closeness Centrality by Sariyuce et al. + + Level-based work filtering detects unnecessary updates to the closeness + centrality and filters them out. + + --- + From "Incremental Algorithms for Closeness Centrality": + + Theorem 1: Let :math:`G = (V, E)` be a graph and u and v be two vertices in V + such that there is no edge (u, v) in E. Let :math:`G' = (V, E \cup uv)` + Then :math:`cc[s] = cc'[s]` if and only if :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`. + + Where :math:`dG(u, v)` denotes the length of the shortest path between + two vertices u, v in a graph G, cc[s] is the closeness centrality for a + vertex s in V, and cc'[s] is the closeness centrality for a + vertex s in V, with the (u, v) edge added. + --- + + We use Theorem 1 to filter out updates when adding or removing an edge. + When adding an edge (u, v), we compute the shortest path lengths from all + other nodes to u and to v before the node is added. When removing an edge, + we compute the shortest path lengths after the edge is removed. Then we + apply Theorem 1 to use previously computed closeness centrality for nodes + where :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`. This works only for + undirected, unweighted graphs; the distance argument is not supported. + + Closeness centrality [1]_ of a node `u` is the reciprocal of the + sum of the shortest path distances from `u` to all `n-1` other nodes. + Since the sum of distances depends on the number of nodes in the + graph, closeness is normalized by the sum of minimum possible + distances `n-1`. + + .. math:: + + C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)}, + + where `d(v, u)` is the shortest-path distance between `v` and `u`, + and `n` is the number of nodes in the graph. + + Notice that higher values of closeness indicate higher centrality. + + Parameters + ---------- + G : graph + A NetworkX graph + + edge : tuple + The modified edge (u, v) in the graph. + + prev_cc : dictionary + The previous closeness centrality for all nodes in the graph. + + insertion : bool, optional + If True (default) the edge was inserted, otherwise it was deleted from the graph. + + wf_improved : bool, optional (default=True) + If True, scale by the fraction of nodes reachable. This gives the + Wasserman and Faust improved formula. For single component graphs + it is the same as the original formula. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with closeness centrality as the value. + + See Also + -------- + betweenness_centrality, load_centrality, eigenvector_centrality, + degree_centrality, closeness_centrality + + Notes + ----- + The closeness centrality is normalized to `(n-1)/(|G|-1)` where + `n` is the number of nodes in the connected part of graph + containing the node. If the graph is not completely connected, + this algorithm computes the closeness centrality for each + connected part separately. + + References + ---------- + .. [1] Freeman, L.C., 1979. Centrality in networks: I. + Conceptual clarification. Social Networks 1, 215--239. + https://doi.org/10.1016/0378-8733(78)90021-7 + .. [2] Sariyuce, A.E. ; Kaya, K. ; Saule, E. ; Catalyiirek, U.V. Incremental + Algorithms for Closeness Centrality. 2013 IEEE International Conference on Big Data + http://sariyuce.com/papers/bigdata13.pdf + """ + if prev_cc is not None and set(prev_cc.keys()) != set(G.nodes()): + raise NetworkXError("prev_cc and G do not have the same nodes") + + # Unpack edge + (u, v) = edge + path_length = nx.single_source_shortest_path_length + + if insertion: + # For edge insertion, we want shortest paths before the edge is inserted + du = path_length(G, u) + dv = path_length(G, v) + + G.add_edge(u, v) + else: + G.remove_edge(u, v) + + # For edge removal, we want shortest paths after the edge is removed + du = path_length(G, u) + dv = path_length(G, v) + + if prev_cc is None: + return nx.closeness_centrality(G) + + nodes = G.nodes() + closeness_dict = {} + for n in nodes: + if n in du and n in dv and abs(du[n] - dv[n]) <= 1: + closeness_dict[n] = prev_cc[n] + else: + sp = path_length(G, n) + totsp = sum(sp.values()) + len_G = len(G) + _closeness_centrality = 0.0 + if totsp > 0.0 and len_G > 1: + _closeness_centrality = (len(sp) - 1.0) / totsp + # normalize to number of nodes-1 in connected part + if wf_improved: + s = (len(sp) - 1.0) / (len_G - 1) + _closeness_centrality *= s + closeness_dict[n] = _closeness_centrality + + # Leave the graph as we found it + if insertion: + G.remove_edge(u, v) + else: + G.add_edge(u, v) + + return closeness_dict diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py new file mode 100644 index 0000000000000000000000000000000000000000..ea1b2c8f2f49f97020adf100b495f20ec3f19ce1 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py @@ -0,0 +1,343 @@ +"""Current-flow betweenness centrality measures.""" +import networkx as nx +from networkx.algorithms.centrality.flow_matrix import ( + CGInverseLaplacian, + FullInverseLaplacian, + SuperLUInverseLaplacian, + flow_matrix_row, +) +from networkx.utils import ( + not_implemented_for, + py_random_state, + reverse_cuthill_mckee_ordering, +) + +__all__ = [ + "current_flow_betweenness_centrality", + "approximate_current_flow_betweenness_centrality", + "edge_current_flow_betweenness_centrality", +] + + +@not_implemented_for("directed") +@py_random_state(7) +@nx._dispatch(edge_attrs="weight") +def approximate_current_flow_betweenness_centrality( + G, + normalized=True, + weight=None, + dtype=float, + solver="full", + epsilon=0.5, + kmax=10000, + seed=None, +): + r"""Compute the approximate current-flow betweenness centrality for nodes. + + Approximates the current-flow betweenness centrality within absolute + error of epsilon with high probability [1]_. + + + Parameters + ---------- + G : graph + A NetworkX graph + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by 2/[(n-1)(n-2)] where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype : data type (float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver : string (default='full') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + epsilon: float + Absolute error tolerance. + + kmax: int + Maximum number of sample node pairs to use for approximation. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + current_flow_betweenness_centrality + + Notes + ----- + The running time is $O((1/\epsilon^2)m{\sqrt k} \log n)$ + and the space required is $O(m)$ for $n$ nodes and $m$ edges. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Ulrik Brandes and Daniel Fleischer: + Centrality Measures Based on Current Flow. + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + """ + import numpy as np + + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + solvername = { + "full": FullInverseLaplacian, + "lu": SuperLUInverseLaplacian, + "cg": CGInverseLaplacian, + } + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(n)))) + L = nx.laplacian_matrix(H, nodelist=range(n), weight=weight).asformat("csc") + L = L.astype(dtype) + C = solvername[solver](L, dtype=dtype) # initialize solver + betweenness = dict.fromkeys(H, 0.0) + nb = (n - 1.0) * (n - 2.0) # normalization factor + cstar = n * (n - 1) / nb + l = 1 # parameter in approximation, adjustable + k = l * int(np.ceil((cstar / epsilon) ** 2 * np.log(n))) + if k > kmax: + msg = f"Number random pairs k>kmax ({k}>{kmax}) " + raise nx.NetworkXError(msg, "Increase kmax or epsilon") + cstar2k = cstar / (2 * k) + for _ in range(k): + s, t = pair = seed.sample(range(n), 2) + b = np.zeros(n, dtype=dtype) + b[s] = 1 + b[t] = -1 + p = C.solve(b) + for v in H: + if v in pair: + continue + for nbr in H[v]: + w = H[v][nbr].get(weight, 1.0) + betweenness[v] += w * np.abs(p[v] - p[nbr]) * cstar2k + if normalized: + factor = 1.0 + else: + factor = nb / 2.0 + # remap to original node names and "unnormalize" if required + return {ordering[k]: v * factor for k, v in betweenness.items()} + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def current_flow_betweenness_centrality( + G, normalized=True, weight=None, dtype=float, solver="full" +): + r"""Compute current-flow betweenness centrality for nodes. + + Current-flow betweenness centrality uses an electrical current + model for information spreading in contrast to betweenness + centrality which uses shortest paths. + + Current-flow betweenness centrality is also known as + random-walk betweenness centrality [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by 2/[(n-1)(n-2)] where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype : data type (float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver : string (default='full') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + approximate_current_flow_betweenness_centrality + betweenness_centrality + edge_betweenness_centrality + edge_current_flow_betweenness_centrality + + Notes + ----- + Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$ + time [1]_, where $I(n-1)$ is the time needed to compute the + inverse Laplacian. For a full matrix this is $O(n^3)$ but using + sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the + Laplacian matrix condition number. + + The space required is $O(nw)$ where $w$ is the width of the sparse + Laplacian matrix. Worse case is $w=n$ for $O(n^2)$. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Centrality Measures Based on Current Flow. + Ulrik Brandes and Daniel Fleischer, + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] A measure of betweenness centrality based on random walks, + M. E. J. Newman, Social Networks 27, 39-54 (2005). + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(n)))) + betweenness = dict.fromkeys(H, 0.0) # b[v]=0 for v in H + for row, (s, t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): + pos = dict(zip(row.argsort()[::-1], range(n))) + for i in range(n): + betweenness[s] += (i - pos[i]) * row[i] + betweenness[t] += (n - i - 1 - pos[i]) * row[i] + if normalized: + nb = (n - 1.0) * (n - 2.0) # normalization factor + else: + nb = 2.0 + for v in H: + betweenness[v] = float((betweenness[v] - v) * 2.0 / nb) + return {ordering[k]: v for k, v in betweenness.items()} + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def edge_current_flow_betweenness_centrality( + G, normalized=True, weight=None, dtype=float, solver="full" +): + r"""Compute current-flow betweenness centrality for edges. + + Current-flow betweenness centrality uses an electrical current + model for information spreading in contrast to betweenness + centrality which uses shortest paths. + + Current-flow betweenness centrality is also known as + random-walk betweenness centrality [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by 2/[(n-1)(n-2)] where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype : data type (default=float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver : string (default='full') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dictionary + Dictionary of edge tuples with betweenness centrality as the value. + + Raises + ------ + NetworkXError + The algorithm does not support DiGraphs. + If the input graph is an instance of DiGraph class, NetworkXError + is raised. + + See Also + -------- + betweenness_centrality + edge_betweenness_centrality + current_flow_betweenness_centrality + + Notes + ----- + Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$ + time [1]_, where $I(n-1)$ is the time needed to compute the + inverse Laplacian. For a full matrix this is $O(n^3)$ but using + sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the + Laplacian matrix condition number. + + The space required is $O(nw)$ where $w$ is the width of the sparse + Laplacian matrix. Worse case is $w=n$ for $O(n^2)$. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Centrality Measures Based on Current Flow. + Ulrik Brandes and Daniel Fleischer, + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] A measure of betweenness centrality based on random walks, + M. E. J. Newman, Social Networks 27, 39-54 (2005). + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(n)))) + edges = (tuple(sorted((u, v))) for u, v in H.edges()) + betweenness = dict.fromkeys(edges, 0.0) + if normalized: + nb = (n - 1.0) * (n - 2.0) # normalization factor + else: + nb = 2.0 + for row, (e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): + pos = dict(zip(row.argsort()[::-1], range(1, n + 1))) + for i in range(n): + betweenness[e] += (i + 1 - pos[i]) * row[i] + betweenness[e] += (n - i - pos[i]) * row[i] + betweenness[e] /= nb + return {(ordering[s], ordering[t]): v for (s, t), v in betweenness.items()} diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py new file mode 100644 index 0000000000000000000000000000000000000000..debfca27f55d84d7e40eff227ccdb6e5dd236c6c --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py @@ -0,0 +1,226 @@ +"""Current-flow betweenness centrality measures for subsets of nodes.""" +import networkx as nx +from networkx.algorithms.centrality.flow_matrix import flow_matrix_row +from networkx.utils import not_implemented_for, reverse_cuthill_mckee_ordering + +__all__ = [ + "current_flow_betweenness_centrality_subset", + "edge_current_flow_betweenness_centrality_subset", +] + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def current_flow_betweenness_centrality_subset( + G, sources, targets, normalized=True, weight=None, dtype=float, solver="lu" +): + r"""Compute current-flow betweenness centrality for subsets of nodes. + + Current-flow betweenness centrality uses an electrical current + model for information spreading in contrast to betweenness + centrality which uses shortest paths. + + Current-flow betweenness centrality is also known as + random-walk betweenness centrality [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + sources: list of nodes + Nodes to use as sources for current + + targets: list of nodes + Nodes to use as sinks for current + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by b=b/(n-1)(n-2) where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype: data type (float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver: string (default='lu') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dictionary + Dictionary of nodes with betweenness centrality as the value. + + See Also + -------- + approximate_current_flow_betweenness_centrality + betweenness_centrality + edge_betweenness_centrality + edge_current_flow_betweenness_centrality + + Notes + ----- + Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$ + time [1]_, where $I(n-1)$ is the time needed to compute the + inverse Laplacian. For a full matrix this is $O(n^3)$ but using + sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the + Laplacian matrix condition number. + + The space required is $O(nw)$ where $w$ is the width of the sparse + Laplacian matrix. Worse case is $w=n$ for $O(n^2)$. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Centrality Measures Based on Current Flow. + Ulrik Brandes and Daniel Fleischer, + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] A measure of betweenness centrality based on random walks, + M. E. J. Newman, Social Networks 27, 39-54 (2005). + """ + import numpy as np + + from networkx.utils import reverse_cuthill_mckee_ordering + + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + mapping = dict(zip(ordering, range(n))) + H = nx.relabel_nodes(G, mapping) + betweenness = dict.fromkeys(H, 0.0) # b[v]=0 for v in H + for row, (s, t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): + for ss in sources: + i = mapping[ss] + for tt in targets: + j = mapping[tt] + betweenness[s] += 0.5 * np.abs(row[i] - row[j]) + betweenness[t] += 0.5 * np.abs(row[i] - row[j]) + if normalized: + nb = (n - 1.0) * (n - 2.0) # normalization factor + else: + nb = 2.0 + for v in H: + betweenness[v] = betweenness[v] / nb + 1.0 / (2 - n) + return {ordering[k]: v for k, v in betweenness.items()} + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def edge_current_flow_betweenness_centrality_subset( + G, sources, targets, normalized=True, weight=None, dtype=float, solver="lu" +): + r"""Compute current-flow betweenness centrality for edges using subsets + of nodes. + + Current-flow betweenness centrality uses an electrical current + model for information spreading in contrast to betweenness + centrality which uses shortest paths. + + Current-flow betweenness centrality is also known as + random-walk betweenness centrality [2]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + sources: list of nodes + Nodes to use as sources for current + + targets: list of nodes + Nodes to use as sinks for current + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by b=b/(n-1)(n-2) where + n is the number of nodes in G. + + weight : string or None, optional (default=None) + Key for edge data used as the edge weight. + If None, then use 1 as each edge weight. + The weight reflects the capacity or the strength of the + edge. + + dtype: data type (float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver: string (default='lu') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dict + Dictionary of edge tuples with betweenness centrality as the value. + + See Also + -------- + betweenness_centrality + edge_betweenness_centrality + current_flow_betweenness_centrality + + Notes + ----- + Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$ + time [1]_, where $I(n-1)$ is the time needed to compute the + inverse Laplacian. For a full matrix this is $O(n^3)$ but using + sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the + Laplacian matrix condition number. + + The space required is $O(nw)$ where $w$ is the width of the sparse + Laplacian matrix. Worse case is $w=n$ for $O(n^2)$. + + If the edges have a 'weight' attribute they will be used as + weights in this algorithm. Unspecified weights are set to 1. + + References + ---------- + .. [1] Centrality Measures Based on Current Flow. + Ulrik Brandes and Daniel Fleischer, + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] A measure of betweenness centrality based on random walks, + M. E. J. Newman, Social Networks 27, 39-54 (2005). + """ + import numpy as np + + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + mapping = dict(zip(ordering, range(n))) + H = nx.relabel_nodes(G, mapping) + edges = (tuple(sorted((u, v))) for u, v in H.edges()) + betweenness = dict.fromkeys(edges, 0.0) + if normalized: + nb = (n - 1.0) * (n - 2.0) # normalization factor + else: + nb = 2.0 + for row, (e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver): + for ss in sources: + i = mapping[ss] + for tt in targets: + j = mapping[tt] + betweenness[e] += 0.5 * np.abs(row[i] - row[j]) + betweenness[e] /= nb + return {(ordering[s], ordering[t]): v for (s, t), v in betweenness.items()} diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/current_flow_closeness.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/current_flow_closeness.py new file mode 100644 index 0000000000000000000000000000000000000000..daefbae902ba7f513f3f7b979c4b4918053eaf9a --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/current_flow_closeness.py @@ -0,0 +1,97 @@ +"""Current-flow closeness centrality measures.""" +import networkx as nx +from networkx.algorithms.centrality.flow_matrix import ( + CGInverseLaplacian, + FullInverseLaplacian, + SuperLUInverseLaplacian, +) +from networkx.utils import not_implemented_for, reverse_cuthill_mckee_ordering + +__all__ = ["current_flow_closeness_centrality", "information_centrality"] + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def current_flow_closeness_centrality(G, weight=None, dtype=float, solver="lu"): + """Compute current-flow closeness centrality for nodes. + + Current-flow closeness centrality is variant of closeness + centrality based on effective resistance between nodes in + a network. This metric is also known as information centrality. + + Parameters + ---------- + G : graph + A NetworkX graph. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight reflects the capacity or the strength of the + edge. + + dtype: data type (default=float) + Default data type for internal matrices. + Set to np.float32 for lower memory consumption. + + solver: string (default='lu') + Type of linear solver to use for computing the flow matrix. + Options are "full" (uses most memory), "lu" (recommended), and + "cg" (uses least memory). + + Returns + ------- + nodes : dictionary + Dictionary of nodes with current flow closeness centrality as the value. + + See Also + -------- + closeness_centrality + + Notes + ----- + The algorithm is from Brandes [1]_. + + See also [2]_ for the original definition of information centrality. + + References + ---------- + .. [1] Ulrik Brandes and Daniel Fleischer, + Centrality Measures Based on Current Flow. + Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05). + LNCS 3404, pp. 533-544. Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31856-9_44 + + .. [2] Karen Stephenson and Marvin Zelen: + Rethinking centrality: Methods and examples. + Social Networks 11(1):1-37, 1989. + https://doi.org/10.1016/0378-8733(89)90016-6 + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected.") + solvername = { + "full": FullInverseLaplacian, + "lu": SuperLUInverseLaplacian, + "cg": CGInverseLaplacian, + } + n = G.number_of_nodes() + ordering = list(reverse_cuthill_mckee_ordering(G)) + # make a copy with integer labels according to rcm ordering + # this could be done without a copy if we really wanted to + H = nx.relabel_nodes(G, dict(zip(ordering, range(n)))) + betweenness = dict.fromkeys(H, 0.0) # b[v]=0 for v in H + n = H.number_of_nodes() + L = nx.laplacian_matrix(H, nodelist=range(n), weight=weight).asformat("csc") + L = L.astype(dtype) + C2 = solvername[solver](L, width=1, dtype=dtype) # initialize solver + for v in H: + col = C2.get_row(v) + for w in H: + betweenness[v] += col[v] - 2 * col[w] + betweenness[w] += col[v] + for v in H: + betweenness[v] = 1 / (betweenness[v]) + return {ordering[k]: v for k, v in betweenness.items()} + + +information_centrality = current_flow_closeness_centrality diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/degree_alg.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/degree_alg.py new file mode 100644 index 0000000000000000000000000000000000000000..2631730dbc0d63e1c907a0accc3170af00c50dfa --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/degree_alg.py @@ -0,0 +1,149 @@ +"""Degree centrality measures.""" +import networkx as nx +from networkx.utils.decorators import not_implemented_for + +__all__ = ["degree_centrality", "in_degree_centrality", "out_degree_centrality"] + + +@nx._dispatch +def degree_centrality(G): + """Compute the degree centrality for nodes. + + The degree centrality for a node v is the fraction of nodes it + is connected to. + + Parameters + ---------- + G : graph + A networkx graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with degree centrality as the value. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.degree_centrality(G) + {0: 1.0, 1: 1.0, 2: 0.6666666666666666, 3: 0.6666666666666666} + + See Also + -------- + betweenness_centrality, load_centrality, eigenvector_centrality + + Notes + ----- + The degree centrality values are normalized by dividing by the maximum + possible degree in a simple graph n-1 where n is the number of nodes in G. + + For multigraphs or graphs with self loops the maximum degree might + be higher than n-1 and values of degree centrality greater than 1 + are possible. + """ + if len(G) <= 1: + return {n: 1 for n in G} + + s = 1.0 / (len(G) - 1.0) + centrality = {n: d * s for n, d in G.degree()} + return centrality + + +@not_implemented_for("undirected") +@nx._dispatch +def in_degree_centrality(G): + """Compute the in-degree centrality for nodes. + + The in-degree centrality for a node v is the fraction of nodes its + incoming edges are connected to. + + Parameters + ---------- + G : graph + A NetworkX graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with in-degree centrality as values. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.in_degree_centrality(G) + {0: 0.0, 1: 0.3333333333333333, 2: 0.6666666666666666, 3: 0.6666666666666666} + + See Also + -------- + degree_centrality, out_degree_centrality + + Notes + ----- + The degree centrality values are normalized by dividing by the maximum + possible degree in a simple graph n-1 where n is the number of nodes in G. + + For multigraphs or graphs with self loops the maximum degree might + be higher than n-1 and values of degree centrality greater than 1 + are possible. + """ + if len(G) <= 1: + return {n: 1 for n in G} + + s = 1.0 / (len(G) - 1.0) + centrality = {n: d * s for n, d in G.in_degree()} + return centrality + + +@not_implemented_for("undirected") +@nx._dispatch +def out_degree_centrality(G): + """Compute the out-degree centrality for nodes. + + The out-degree centrality for a node v is the fraction of nodes its + outgoing edges are connected to. + + Parameters + ---------- + G : graph + A NetworkX graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with out-degree centrality as values. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.out_degree_centrality(G) + {0: 1.0, 1: 0.6666666666666666, 2: 0.0, 3: 0.0} + + See Also + -------- + degree_centrality, in_degree_centrality + + Notes + ----- + The degree centrality values are normalized by dividing by the maximum + possible degree in a simple graph n-1 where n is the number of nodes in G. + + For multigraphs or graphs with self loops the maximum degree might + be higher than n-1 and values of degree centrality greater than 1 + are possible. + """ + if len(G) <= 1: + return {n: 1 for n in G} + + s = 1.0 / (len(G) - 1.0) + centrality = {n: d * s for n, d in G.out_degree()} + return centrality diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/dispersion.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/dispersion.py new file mode 100644 index 0000000000000000000000000000000000000000..a551c387d88b0c69f4763678b5cc88567455d98b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/dispersion.py @@ -0,0 +1,107 @@ +from itertools import combinations + +import networkx as nx + +__all__ = ["dispersion"] + + +@nx._dispatch +def dispersion(G, u=None, v=None, normalized=True, alpha=1.0, b=0.0, c=0.0): + r"""Calculate dispersion between `u` and `v` in `G`. + + A link between two actors (`u` and `v`) has a high dispersion when their + mutual ties (`s` and `t`) are not well connected with each other. + + Parameters + ---------- + G : graph + A NetworkX graph. + u : node, optional + The source for the dispersion score (e.g. ego node of the network). + v : node, optional + The target of the dispersion score if specified. + normalized : bool + If True (default) normalize by the embeddedness of the nodes (u and v). + alpha, b, c : float + Parameters for the normalization procedure. When `normalized` is True, + the dispersion value is normalized by:: + + result = ((dispersion + b) ** alpha) / (embeddedness + c) + + as long as the denominator is nonzero. + + Returns + ------- + nodes : dictionary + If u (v) is specified, returns a dictionary of nodes with dispersion + score for all "target" ("source") nodes. If neither u nor v is + specified, returns a dictionary of dictionaries for all nodes 'u' in the + graph with a dispersion score for each node 'v'. + + Notes + ----- + This implementation follows Lars Backstrom and Jon Kleinberg [1]_. Typical + usage would be to run dispersion on the ego network $G_u$ if $u$ were + specified. Running :func:`dispersion` with neither $u$ nor $v$ specified + can take some time to complete. + + References + ---------- + .. [1] Romantic Partnerships and the Dispersion of Social Ties: + A Network Analysis of Relationship Status on Facebook. + Lars Backstrom, Jon Kleinberg. + https://arxiv.org/pdf/1310.6753v1.pdf + + """ + + def _dispersion(G_u, u, v): + """dispersion for all nodes 'v' in a ego network G_u of node 'u'""" + u_nbrs = set(G_u[u]) + ST = {n for n in G_u[v] if n in u_nbrs} + set_uv = {u, v} + # all possible ties of connections that u and b share + possib = combinations(ST, 2) + total = 0 + for s, t in possib: + # neighbors of s that are in G_u, not including u and v + nbrs_s = u_nbrs.intersection(G_u[s]) - set_uv + # s and t are not directly connected + if t not in nbrs_s: + # s and t do not share a connection + if nbrs_s.isdisjoint(G_u[t]): + # tick for disp(u, v) + total += 1 + # neighbors that u and v share + embeddedness = len(ST) + + dispersion_val = total + if normalized: + dispersion_val = (total + b) ** alpha + if embeddedness + c != 0: + dispersion_val /= embeddedness + c + + return dispersion_val + + if u is None: + # v and u are not specified + if v is None: + results = {n: {} for n in G} + for u in G: + for v in G[u]: + results[u][v] = _dispersion(G, u, v) + # u is not specified, but v is + else: + results = dict.fromkeys(G[v], {}) + for u in G[v]: + results[u] = _dispersion(G, v, u) + else: + # u is specified with no target v + if v is None: + results = dict.fromkeys(G[u], {}) + for v in G[u]: + results[v] = _dispersion(G, u, v) + # both u and v are specified + else: + results = _dispersion(G, u, v) + + return results diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/eigenvector.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/eigenvector.py new file mode 100644 index 0000000000000000000000000000000000000000..267e7b5102734d0d28f310ad019325d483c931cb --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/eigenvector.py @@ -0,0 +1,341 @@ +"""Functions for computing eigenvector centrality.""" +import math + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["eigenvector_centrality", "eigenvector_centrality_numpy"] + + +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="weight") +def eigenvector_centrality(G, max_iter=100, tol=1.0e-6, nstart=None, weight=None): + r"""Compute the eigenvector centrality for the graph G. + + Eigenvector centrality computes the centrality for a node by adding + the centrality of its predecessors. The centrality for node $i$ is the + $i$-th element of a left eigenvector associated with the eigenvalue $\lambda$ + of maximum modulus that is positive. Such an eigenvector $x$ is + defined up to a multiplicative constant by the equation + + .. math:: + + \lambda x^T = x^T A, + + where $A$ is the adjacency matrix of the graph G. By definition of + row-column product, the equation above is equivalent to + + .. math:: + + \lambda x_i = \sum_{j\to i}x_j. + + That is, adding the eigenvector centralities of the predecessors of + $i$ one obtains the eigenvector centrality of $i$ multiplied by + $\lambda$. In the case of undirected graphs, $x$ also solves the familiar + right-eigenvector equation $Ax = \lambda x$. + + By virtue of the Perron–Frobenius theorem [1]_, if G is strongly + connected there is a unique eigenvector $x$, and all its entries + are strictly positive. + + If G is not strongly connected there might be several left + eigenvectors associated with $\lambda$, and some of their elements + might be zero. + + Parameters + ---------- + G : graph + A networkx graph. + + max_iter : integer, optional (default=100) + Maximum number of power iterations. + + tol : float, optional (default=1.0e-6) + Error tolerance (in Euclidean norm) used to check convergence in + power iteration. + + nstart : dictionary, optional (default=None) + Starting value of power iteration for each node. Must have a nonzero + projection on the desired eigenvector for the power method to converge. + If None, this implementation uses an all-ones vector, which is a safe + choice. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. Otherwise holds the + name of the edge attribute used as weight. In this measure the + weight is interpreted as the connection strength. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with eigenvector centrality as the value. The + associated vector has unit Euclidean norm and the values are + nonegative. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> centrality = nx.eigenvector_centrality(G) + >>> sorted((v, f"{c:0.2f}") for v, c in centrality.items()) + [(0, '0.37'), (1, '0.60'), (2, '0.60'), (3, '0.37')] + + Raises + ------ + NetworkXPointlessConcept + If the graph G is the null graph. + + NetworkXError + If each value in `nstart` is zero. + + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + See Also + -------- + eigenvector_centrality_numpy + :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank` + :func:`~networkx.algorithms.link_analysis.hits_alg.hits` + + Notes + ----- + Eigenvector centrality was introduced by Landau [2]_ for chess + tournaments. It was later rediscovered by Wei [3]_ and then + popularized by Kendall [4]_ in the context of sport ranking. Berge + introduced a general definition for graphs based on social connections + [5]_. Bonacich [6]_ reintroduced again eigenvector centrality and made + it popular in link analysis. + + This function computes the left dominant eigenvector, which corresponds + to adding the centrality of predecessors: this is the usual approach. + To add the centrality of successors first reverse the graph with + ``G.reverse()``. + + The implementation uses power iteration [7]_ to compute a dominant + eigenvector starting from the provided vector `nstart`. Convergence is + guaranteed as long as `nstart` has a nonzero projection on a dominant + eigenvector, which certainly happens using the default value. + + The method stops when the change in the computed vector between two + iterations is smaller than an error tolerance of ``G.number_of_nodes() + * tol`` or after ``max_iter`` iterations, but in the second case it + raises an exception. + + This implementation uses $(A + I)$ rather than the adjacency matrix + $A$ because the change preserves eigenvectors, but it shifts the + spectrum, thus guaranteeing convergence even for networks with + negative eigenvalues of maximum modulus. + + References + ---------- + .. [1] Abraham Berman and Robert J. Plemmons. + "Nonnegative Matrices in the Mathematical Sciences." + Classics in Applied Mathematics. SIAM, 1994. + + .. [2] Edmund Landau. + "Zur relativen Wertbemessung der Turnierresultate." + Deutsches Wochenschach, 11:366–369, 1895. + + .. [3] Teh-Hsing Wei. + "The Algebraic Foundations of Ranking Theory." + PhD thesis, University of Cambridge, 1952. + + .. [4] Maurice G. Kendall. + "Further contributions to the theory of paired comparisons." + Biometrics, 11(1):43–62, 1955. + https://www.jstor.org/stable/3001479 + + .. [5] Claude Berge + "Théorie des graphes et ses applications." + Dunod, Paris, France, 1958. + + .. [6] Phillip Bonacich. + "Technique for analyzing overlapping memberships." + Sociological Methodology, 4:176–185, 1972. + https://www.jstor.org/stable/270732 + + .. [7] Power iteration:: https://en.wikipedia.org/wiki/Power_iteration + + """ + if len(G) == 0: + raise nx.NetworkXPointlessConcept( + "cannot compute centrality for the null graph" + ) + # If no initial vector is provided, start with the all-ones vector. + if nstart is None: + nstart = {v: 1 for v in G} + if all(v == 0 for v in nstart.values()): + raise nx.NetworkXError("initial vector cannot have all zero values") + # Normalize the initial vector so that each entry is in [0, 1]. This is + # guaranteed to never have a divide-by-zero error by the previous line. + nstart_sum = sum(nstart.values()) + x = {k: v / nstart_sum for k, v in nstart.items()} + nnodes = G.number_of_nodes() + # make up to max_iter iterations + for _ in range(max_iter): + xlast = x + x = xlast.copy() # Start with xlast times I to iterate with (A+I) + # do the multiplication y^T = x^T A (left eigenvector) + for n in x: + for nbr in G[n]: + w = G[n][nbr].get(weight, 1) if weight else 1 + x[nbr] += xlast[n] * w + # Normalize the vector. The normalization denominator `norm` + # should never be zero by the Perron--Frobenius + # theorem. However, in case it is due to numerical error, we + # assume the norm to be one instead. + norm = math.hypot(*x.values()) or 1 + x = {k: v / norm for k, v in x.items()} + # Check for convergence (in the L_1 norm). + if sum(abs(x[n] - xlast[n]) for n in x) < nnodes * tol: + return x + raise nx.PowerIterationFailedConvergence(max_iter) + + +@nx._dispatch(edge_attrs="weight") +def eigenvector_centrality_numpy(G, weight=None, max_iter=50, tol=0): + r"""Compute the eigenvector centrality for the graph G. + + Eigenvector centrality computes the centrality for a node by adding + the centrality of its predecessors. The centrality for node $i$ is the + $i$-th element of a left eigenvector associated with the eigenvalue $\lambda$ + of maximum modulus that is positive. Such an eigenvector $x$ is + defined up to a multiplicative constant by the equation + + .. math:: + + \lambda x^T = x^T A, + + where $A$ is the adjacency matrix of the graph G. By definition of + row-column product, the equation above is equivalent to + + .. math:: + + \lambda x_i = \sum_{j\to i}x_j. + + That is, adding the eigenvector centralities of the predecessors of + $i$ one obtains the eigenvector centrality of $i$ multiplied by + $\lambda$. In the case of undirected graphs, $x$ also solves the familiar + right-eigenvector equation $Ax = \lambda x$. + + By virtue of the Perron–Frobenius theorem [1]_, if G is strongly + connected there is a unique eigenvector $x$, and all its entries + are strictly positive. + + If G is not strongly connected there might be several left + eigenvectors associated with $\lambda$, and some of their elements + might be zero. + + Parameters + ---------- + G : graph + A networkx graph. + + max_iter : integer, optional (default=50) + Maximum number of Arnoldi update iterations allowed. + + tol : float, optional (default=0) + Relative accuracy for eigenvalues (stopping criterion). + The default value of 0 implies machine precision. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. Otherwise holds the + name of the edge attribute used as weight. In this measure the + weight is interpreted as the connection strength. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with eigenvector centrality as the value. The + associated vector has unit Euclidean norm and the values are + nonegative. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> centrality = nx.eigenvector_centrality_numpy(G) + >>> print([f"{node} {centrality[node]:0.2f}" for node in centrality]) + ['0 0.37', '1 0.60', '2 0.60', '3 0.37'] + + Raises + ------ + NetworkXPointlessConcept + If the graph G is the null graph. + + ArpackNoConvergence + When the requested convergence is not obtained. The currently + converged eigenvalues and eigenvectors can be found as + eigenvalues and eigenvectors attributes of the exception object. + + See Also + -------- + :func:`scipy.sparse.linalg.eigs` + eigenvector_centrality + :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank` + :func:`~networkx.algorithms.link_analysis.hits_alg.hits` + + Notes + ----- + Eigenvector centrality was introduced by Landau [2]_ for chess + tournaments. It was later rediscovered by Wei [3]_ and then + popularized by Kendall [4]_ in the context of sport ranking. Berge + introduced a general definition for graphs based on social connections + [5]_. Bonacich [6]_ reintroduced again eigenvector centrality and made + it popular in link analysis. + + This function computes the left dominant eigenvector, which corresponds + to adding the centrality of predecessors: this is the usual approach. + To add the centrality of successors first reverse the graph with + ``G.reverse()``. + + This implementation uses the + :func:`SciPy sparse eigenvalue solver` (ARPACK) + to find the largest eigenvalue/eigenvector pair using Arnoldi iterations + [7]_. + + References + ---------- + .. [1] Abraham Berman and Robert J. Plemmons. + "Nonnegative Matrices in the Mathematical Sciences." + Classics in Applied Mathematics. SIAM, 1994. + + .. [2] Edmund Landau. + "Zur relativen Wertbemessung der Turnierresultate." + Deutsches Wochenschach, 11:366–369, 1895. + + .. [3] Teh-Hsing Wei. + "The Algebraic Foundations of Ranking Theory." + PhD thesis, University of Cambridge, 1952. + + .. [4] Maurice G. Kendall. + "Further contributions to the theory of paired comparisons." + Biometrics, 11(1):43–62, 1955. + https://www.jstor.org/stable/3001479 + + .. [5] Claude Berge + "Théorie des graphes et ses applications." + Dunod, Paris, France, 1958. + + .. [6] Phillip Bonacich. + "Technique for analyzing overlapping memberships." + Sociological Methodology, 4:176–185, 1972. + https://www.jstor.org/stable/270732 + + .. [7] Arnoldi iteration:: https://en.wikipedia.org/wiki/Arnoldi_iteration + + """ + import numpy as np + import scipy as sp + + if len(G) == 0: + raise nx.NetworkXPointlessConcept( + "cannot compute centrality for the null graph" + ) + M = nx.to_scipy_sparse_array(G, nodelist=list(G), weight=weight, dtype=float) + _, eigenvector = sp.sparse.linalg.eigs( + M.T, k=1, which="LR", maxiter=max_iter, tol=tol + ) + largest = eigenvector.flatten().real + norm = np.sign(largest.sum()) * sp.linalg.norm(largest) + return dict(zip(G, largest / norm)) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/flow_matrix.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/flow_matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..e9cd7e26016e1be01c8d096b0404a33144799eb6 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/flow_matrix.py @@ -0,0 +1,130 @@ +# Helpers for current-flow betweenness and current-flow closeness +# Lazy computations for inverse Laplacian and flow-matrix rows. +import networkx as nx + + +@nx._dispatch(edge_attrs="weight") +def flow_matrix_row(G, weight=None, dtype=float, solver="lu"): + # Generate a row of the current-flow matrix + import numpy as np + + solvername = { + "full": FullInverseLaplacian, + "lu": SuperLUInverseLaplacian, + "cg": CGInverseLaplacian, + } + n = G.number_of_nodes() + L = nx.laplacian_matrix(G, nodelist=range(n), weight=weight).asformat("csc") + L = L.astype(dtype) + C = solvername[solver](L, dtype=dtype) # initialize solver + w = C.w # w is the Laplacian matrix width + # row-by-row flow matrix + for u, v in sorted(sorted((u, v)) for u, v in G.edges()): + B = np.zeros(w, dtype=dtype) + c = G[u][v].get(weight, 1.0) + B[u % w] = c + B[v % w] = -c + # get only the rows needed in the inverse laplacian + # and multiply to get the flow matrix row + row = B @ C.get_rows(u, v) + yield row, (u, v) + + +# Class to compute the inverse laplacian only for specified rows +# Allows computation of the current-flow matrix without storing entire +# inverse laplacian matrix +class InverseLaplacian: + def __init__(self, L, width=None, dtype=None): + global np + import numpy as np + + (n, n) = L.shape + self.dtype = dtype + self.n = n + if width is None: + self.w = self.width(L) + else: + self.w = width + self.C = np.zeros((self.w, n), dtype=dtype) + self.L1 = L[1:, 1:] + self.init_solver(L) + + def init_solver(self, L): + pass + + def solve(self, r): + raise nx.NetworkXError("Implement solver") + + def solve_inverse(self, r): + raise nx.NetworkXError("Implement solver") + + def get_rows(self, r1, r2): + for r in range(r1, r2 + 1): + self.C[r % self.w, 1:] = self.solve_inverse(r) + return self.C + + def get_row(self, r): + self.C[r % self.w, 1:] = self.solve_inverse(r) + return self.C[r % self.w] + + def width(self, L): + m = 0 + for i, row in enumerate(L): + w = 0 + x, y = np.nonzero(row) + if len(y) > 0: + v = y - i + w = v.max() - v.min() + 1 + m = max(w, m) + return m + + +class FullInverseLaplacian(InverseLaplacian): + def init_solver(self, L): + self.IL = np.zeros(L.shape, dtype=self.dtype) + self.IL[1:, 1:] = np.linalg.inv(self.L1.todense()) + + def solve(self, rhs): + s = np.zeros(rhs.shape, dtype=self.dtype) + s = self.IL @ rhs + return s + + def solve_inverse(self, r): + return self.IL[r, 1:] + + +class SuperLUInverseLaplacian(InverseLaplacian): + def init_solver(self, L): + import scipy as sp + + self.lusolve = sp.sparse.linalg.factorized(self.L1.tocsc()) + + def solve_inverse(self, r): + rhs = np.zeros(self.n, dtype=self.dtype) + rhs[r] = 1 + return self.lusolve(rhs[1:]) + + def solve(self, rhs): + s = np.zeros(rhs.shape, dtype=self.dtype) + s[1:] = self.lusolve(rhs[1:]) + return s + + +class CGInverseLaplacian(InverseLaplacian): + def init_solver(self, L): + global sp + import scipy as sp + + ilu = sp.sparse.linalg.spilu(self.L1.tocsc()) + n = self.n - 1 + self.M = sp.sparse.linalg.LinearOperator(shape=(n, n), matvec=ilu.solve) + + def solve(self, rhs): + s = np.zeros(rhs.shape, dtype=self.dtype) + s[1:] = sp.sparse.linalg.cg(self.L1, rhs[1:], M=self.M, atol=0)[0] + return s + + def solve_inverse(self, r): + rhs = np.zeros(self.n, self.dtype) + rhs[r] = 1 + return sp.sparse.linalg.cg(self.L1, rhs[1:], M=self.M, atol=0)[0] diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/group.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/group.py new file mode 100644 index 0000000000000000000000000000000000000000..8207a71a5ae3bd0acfe0d90370ec10304d8a8c67 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/group.py @@ -0,0 +1,785 @@ +"""Group centrality measures.""" +from copy import deepcopy + +import networkx as nx +from networkx.algorithms.centrality.betweenness import ( + _accumulate_endpoints, + _single_source_dijkstra_path_basic, + _single_source_shortest_path_basic, +) +from networkx.utils.decorators import not_implemented_for + +__all__ = [ + "group_betweenness_centrality", + "group_closeness_centrality", + "group_degree_centrality", + "group_in_degree_centrality", + "group_out_degree_centrality", + "prominent_group", +] + + +@nx._dispatch(edge_attrs="weight") +def group_betweenness_centrality(G, C, normalized=True, weight=None, endpoints=False): + r"""Compute the group betweenness centrality for a group of nodes. + + Group betweenness centrality of a group of nodes $C$ is the sum of the + fraction of all-pairs shortest paths that pass through any vertex in $C$ + + .. math:: + + c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)} + + where $V$ is the set of nodes, $\sigma(s, t)$ is the number of + shortest $(s, t)$-paths, and $\sigma(s, t|C)$ is the number of + those paths passing through some node in group $C$. Note that + $(s, t)$ are not members of the group ($V-C$ is the set of nodes + in $V$ that are not in $C$). + + Parameters + ---------- + G : graph + A NetworkX graph. + + C : list or set or list of lists or list of sets + A group or a list of groups containing nodes which belong to G, for which group betweenness + centrality is to be calculated. + + normalized : bool, optional (default=True) + If True, group betweenness is normalized by `1/((|V|-|C|)(|V|-|C|-1))` + where `|V|` is the number of nodes in G and `|C|` is the number of nodes in C. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + endpoints : bool, optional (default=False) + If True include the endpoints in the shortest path counts. + + Raises + ------ + NodeNotFound + If node(s) in C are not present in G. + + Returns + ------- + betweenness : list of floats or float + If C is a single group then return a float. If C is a list with + several groups then return a list of group betweenness centralities. + + See Also + -------- + betweenness_centrality + + Notes + ----- + Group betweenness centrality is described in [1]_ and its importance discussed in [3]_. + The initial implementation of the algorithm is mentioned in [2]_. This function uses + an improved algorithm presented in [4]_. + + The number of nodes in the group must be a maximum of n - 2 where `n` + is the total number of nodes in the graph. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The total number of paths between source and target is counted + differently for directed and undirected graphs. Directed paths + between "u" and "v" are counted as two possible paths (one each + direction) while undirected paths between "u" and "v" are counted + as one path. Said another way, the sum in the expression above is + over all ``s != t`` for directed graphs and for ``s < t`` for undirected graphs. + + + References + ---------- + .. [1] M G Everett and S P Borgatti: + The Centrality of Groups and Classes. + Journal of Mathematical Sociology. 23(3): 181-201. 1999. + http://www.analytictech.com/borgatti/group_centrality.htm + .. [2] Ulrik Brandes: + On Variants of Shortest-Path Betweenness + Centrality and their Generic Computation. + Social Networks 30(2):136-145, 2008. + http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.72.9610&rep=rep1&type=pdf + .. [3] Sourav Medya et. al.: + Group Centrality Maximization via Network Design. + SIAM International Conference on Data Mining, SDM 2018, 126–134. + https://sites.cs.ucsb.edu/~arlei/pubs/sdm18.pdf + .. [4] Rami Puzis, Yuval Elovici, and Shlomi Dolev. + "Fast algorithm for successive computation of group betweenness centrality." + https://journals.aps.org/pre/pdf/10.1103/PhysRevE.76.056709 + + """ + GBC = [] # initialize betweenness + list_of_groups = True + # check weather C contains one or many groups + if any(el in G for el in C): + C = [C] + list_of_groups = False + set_v = {node for group in C for node in group} + if set_v - G.nodes: # element(s) of C not in G + raise nx.NodeNotFound(f"The node(s) {set_v - G.nodes} are in C but not in G.") + + # pre-processing + PB, sigma, D = _group_preprocessing(G, set_v, weight) + + # the algorithm for each group + for group in C: + group = set(group) # set of nodes in group + # initialize the matrices of the sigma and the PB + GBC_group = 0 + sigma_m = deepcopy(sigma) + PB_m = deepcopy(PB) + sigma_m_v = deepcopy(sigma_m) + PB_m_v = deepcopy(PB_m) + for v in group: + GBC_group += PB_m[v][v] + for x in group: + for y in group: + dxvy = 0 + dxyv = 0 + dvxy = 0 + if not ( + sigma_m[x][y] == 0 or sigma_m[x][v] == 0 or sigma_m[v][y] == 0 + ): + if D[x][v] == D[x][y] + D[y][v]: + dxyv = sigma_m[x][y] * sigma_m[y][v] / sigma_m[x][v] + if D[x][y] == D[x][v] + D[v][y]: + dxvy = sigma_m[x][v] * sigma_m[v][y] / sigma_m[x][y] + if D[v][y] == D[v][x] + D[x][y]: + dvxy = sigma_m[v][x] * sigma[x][y] / sigma[v][y] + sigma_m_v[x][y] = sigma_m[x][y] * (1 - dxvy) + PB_m_v[x][y] = PB_m[x][y] - PB_m[x][y] * dxvy + if y != v: + PB_m_v[x][y] -= PB_m[x][v] * dxyv + if x != v: + PB_m_v[x][y] -= PB_m[v][y] * dvxy + sigma_m, sigma_m_v = sigma_m_v, sigma_m + PB_m, PB_m_v = PB_m_v, PB_m + + # endpoints + v, c = len(G), len(group) + if not endpoints: + scale = 0 + # if the graph is connected then subtract the endpoints from + # the count for all the nodes in the graph. else count how many + # nodes are connected to the group's nodes and subtract that. + if nx.is_directed(G): + if nx.is_strongly_connected(G): + scale = c * (2 * v - c - 1) + elif nx.is_connected(G): + scale = c * (2 * v - c - 1) + if scale == 0: + for group_node1 in group: + for node in D[group_node1]: + if node != group_node1: + if node in group: + scale += 1 + else: + scale += 2 + GBC_group -= scale + + # normalized + if normalized: + scale = 1 / ((v - c) * (v - c - 1)) + GBC_group *= scale + + # If undirected than count only the undirected edges + elif not G.is_directed(): + GBC_group /= 2 + + GBC.append(GBC_group) + if list_of_groups: + return GBC + return GBC[0] + + +def _group_preprocessing(G, set_v, weight): + sigma = {} + delta = {} + D = {} + betweenness = dict.fromkeys(G, 0) + for s in G: + if weight is None: # use BFS + S, P, sigma[s], D[s] = _single_source_shortest_path_basic(G, s) + else: # use Dijkstra's algorithm + S, P, sigma[s], D[s] = _single_source_dijkstra_path_basic(G, s, weight) + betweenness, delta[s] = _accumulate_endpoints(betweenness, S, P, sigma[s], s) + for i in delta[s]: # add the paths from s to i and rescale sigma + if s != i: + delta[s][i] += 1 + if weight is not None: + sigma[s][i] = sigma[s][i] / 2 + # building the path betweenness matrix only for nodes that appear in the group + PB = dict.fromkeys(G) + for group_node1 in set_v: + PB[group_node1] = dict.fromkeys(G, 0.0) + for group_node2 in set_v: + if group_node2 not in D[group_node1]: + continue + for node in G: + # if node is connected to the two group nodes than continue + if group_node2 in D[node] and group_node1 in D[node]: + if ( + D[node][group_node2] + == D[node][group_node1] + D[group_node1][group_node2] + ): + PB[group_node1][group_node2] += ( + delta[node][group_node2] + * sigma[node][group_node1] + * sigma[group_node1][group_node2] + / sigma[node][group_node2] + ) + return PB, sigma, D + + +@nx._dispatch(edge_attrs="weight") +def prominent_group( + G, k, weight=None, C=None, endpoints=False, normalized=True, greedy=False +): + r"""Find the prominent group of size $k$ in graph $G$. The prominence of the + group is evaluated by the group betweenness centrality. + + Group betweenness centrality of a group of nodes $C$ is the sum of the + fraction of all-pairs shortest paths that pass through any vertex in $C$ + + .. math:: + + c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)} + + where $V$ is the set of nodes, $\sigma(s, t)$ is the number of + shortest $(s, t)$-paths, and $\sigma(s, t|C)$ is the number of + those paths passing through some node in group $C$. Note that + $(s, t)$ are not members of the group ($V-C$ is the set of nodes + in $V$ that are not in $C$). + + Parameters + ---------- + G : graph + A NetworkX graph. + + k : int + The number of nodes in the group. + + normalized : bool, optional (default=True) + If True, group betweenness is normalized by ``1/((|V|-|C|)(|V|-|C|-1))`` + where ``|V|`` is the number of nodes in G and ``|C|`` is the number of + nodes in C. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + endpoints : bool, optional (default=False) + If True include the endpoints in the shortest path counts. + + C : list or set, optional (default=None) + list of nodes which won't be candidates of the prominent group. + + greedy : bool, optional (default=False) + Using a naive greedy algorithm in order to find non-optimal prominent + group. For scale free networks the results are negligibly below the optimal + results. + + Raises + ------ + NodeNotFound + If node(s) in C are not present in G. + + Returns + ------- + max_GBC : float + The group betweenness centrality of the prominent group. + + max_group : list + The list of nodes in the prominent group. + + See Also + -------- + betweenness_centrality, group_betweenness_centrality + + Notes + ----- + Group betweenness centrality is described in [1]_ and its importance discussed in [3]_. + The algorithm is described in [2]_ and is based on techniques mentioned in [4]_. + + The number of nodes in the group must be a maximum of ``n - 2`` where ``n`` + is the total number of nodes in the graph. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + The total number of paths between source and target is counted + differently for directed and undirected graphs. Directed paths + between "u" and "v" are counted as two possible paths (one each + direction) while undirected paths between "u" and "v" are counted + as one path. Said another way, the sum in the expression above is + over all ``s != t`` for directed graphs and for ``s < t`` for undirected graphs. + + References + ---------- + .. [1] M G Everett and S P Borgatti: + The Centrality of Groups and Classes. + Journal of Mathematical Sociology. 23(3): 181-201. 1999. + http://www.analytictech.com/borgatti/group_centrality.htm + .. [2] Rami Puzis, Yuval Elovici, and Shlomi Dolev: + "Finding the Most Prominent Group in Complex Networks" + AI communications 20(4): 287-296, 2007. + https://www.researchgate.net/profile/Rami_Puzis2/publication/220308855 + .. [3] Sourav Medya et. al.: + Group Centrality Maximization via Network Design. + SIAM International Conference on Data Mining, SDM 2018, 126–134. + https://sites.cs.ucsb.edu/~arlei/pubs/sdm18.pdf + .. [4] Rami Puzis, Yuval Elovici, and Shlomi Dolev. + "Fast algorithm for successive computation of group betweenness centrality." + https://journals.aps.org/pre/pdf/10.1103/PhysRevE.76.056709 + """ + import numpy as np + import pandas as pd + + if C is not None: + C = set(C) + if C - G.nodes: # element(s) of C not in G + raise nx.NodeNotFound(f"The node(s) {C - G.nodes} are in C but not in G.") + nodes = list(G.nodes - C) + else: + nodes = list(G.nodes) + DF_tree = nx.Graph() + PB, sigma, D = _group_preprocessing(G, nodes, weight) + betweenness = pd.DataFrame.from_dict(PB) + if C is not None: + for node in C: + # remove from the betweenness all the nodes not part of the group + betweenness.drop(index=node, inplace=True) + betweenness.drop(columns=node, inplace=True) + CL = [node for _, node in sorted(zip(np.diag(betweenness), nodes), reverse=True)] + max_GBC = 0 + max_group = [] + DF_tree.add_node( + 1, + CL=CL, + betweenness=betweenness, + GBC=0, + GM=[], + sigma=sigma, + cont=dict(zip(nodes, np.diag(betweenness))), + ) + + # the algorithm + DF_tree.nodes[1]["heu"] = 0 + for i in range(k): + DF_tree.nodes[1]["heu"] += DF_tree.nodes[1]["cont"][DF_tree.nodes[1]["CL"][i]] + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, 1, D, max_group, nodes, greedy + ) + + v = len(G) + if not endpoints: + scale = 0 + # if the graph is connected then subtract the endpoints from + # the count for all the nodes in the graph. else count how many + # nodes are connected to the group's nodes and subtract that. + if nx.is_directed(G): + if nx.is_strongly_connected(G): + scale = k * (2 * v - k - 1) + elif nx.is_connected(G): + scale = k * (2 * v - k - 1) + if scale == 0: + for group_node1 in max_group: + for node in D[group_node1]: + if node != group_node1: + if node in max_group: + scale += 1 + else: + scale += 2 + max_GBC -= scale + + # normalized + if normalized: + scale = 1 / ((v - k) * (v - k - 1)) + max_GBC *= scale + + # If undirected then count only the undirected edges + elif not G.is_directed(): + max_GBC /= 2 + max_GBC = float("%.2f" % max_GBC) + return max_GBC, max_group + + +def _dfbnb(G, k, DF_tree, max_GBC, root, D, max_group, nodes, greedy): + # stopping condition - if we found a group of size k and with higher GBC then prune + if len(DF_tree.nodes[root]["GM"]) == k and DF_tree.nodes[root]["GBC"] > max_GBC: + return DF_tree.nodes[root]["GBC"], DF_tree, DF_tree.nodes[root]["GM"] + # stopping condition - if the size of group members equal to k or there are less than + # k - |GM| in the candidate list or the heuristic function plus the GBC is below the + # maximal GBC found then prune + if ( + len(DF_tree.nodes[root]["GM"]) == k + or len(DF_tree.nodes[root]["CL"]) <= k - len(DF_tree.nodes[root]["GM"]) + or DF_tree.nodes[root]["GBC"] + DF_tree.nodes[root]["heu"] <= max_GBC + ): + return max_GBC, DF_tree, max_group + + # finding the heuristic of both children + node_p, node_m, DF_tree = _heuristic(k, root, DF_tree, D, nodes, greedy) + + # finding the child with the bigger heuristic + GBC and expand + # that node first if greedy then only expand the plus node + if greedy: + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy + ) + + elif ( + DF_tree.nodes[node_p]["GBC"] + DF_tree.nodes[node_p]["heu"] + > DF_tree.nodes[node_m]["GBC"] + DF_tree.nodes[node_m]["heu"] + ): + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy + ) + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_m, D, max_group, nodes, greedy + ) + else: + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_m, D, max_group, nodes, greedy + ) + max_GBC, DF_tree, max_group = _dfbnb( + G, k, DF_tree, max_GBC, node_p, D, max_group, nodes, greedy + ) + return max_GBC, DF_tree, max_group + + +def _heuristic(k, root, DF_tree, D, nodes, greedy): + import numpy as np + + # This helper function add two nodes to DF_tree - one left son and the + # other right son, finds their heuristic, CL, GBC, and GM + node_p = DF_tree.number_of_nodes() + 1 + node_m = DF_tree.number_of_nodes() + 2 + added_node = DF_tree.nodes[root]["CL"][0] + + # adding the plus node + DF_tree.add_nodes_from([(node_p, deepcopy(DF_tree.nodes[root]))]) + DF_tree.nodes[node_p]["GM"].append(added_node) + DF_tree.nodes[node_p]["GBC"] += DF_tree.nodes[node_p]["cont"][added_node] + root_node = DF_tree.nodes[root] + for x in nodes: + for y in nodes: + dxvy = 0 + dxyv = 0 + dvxy = 0 + if not ( + root_node["sigma"][x][y] == 0 + or root_node["sigma"][x][added_node] == 0 + or root_node["sigma"][added_node][y] == 0 + ): + if D[x][added_node] == D[x][y] + D[y][added_node]: + dxyv = ( + root_node["sigma"][x][y] + * root_node["sigma"][y][added_node] + / root_node["sigma"][x][added_node] + ) + if D[x][y] == D[x][added_node] + D[added_node][y]: + dxvy = ( + root_node["sigma"][x][added_node] + * root_node["sigma"][added_node][y] + / root_node["sigma"][x][y] + ) + if D[added_node][y] == D[added_node][x] + D[x][y]: + dvxy = ( + root_node["sigma"][added_node][x] + * root_node["sigma"][x][y] + / root_node["sigma"][added_node][y] + ) + DF_tree.nodes[node_p]["sigma"][x][y] = root_node["sigma"][x][y] * (1 - dxvy) + DF_tree.nodes[node_p]["betweenness"][x][y] = ( + root_node["betweenness"][x][y] - root_node["betweenness"][x][y] * dxvy + ) + if y != added_node: + DF_tree.nodes[node_p]["betweenness"][x][y] -= ( + root_node["betweenness"][x][added_node] * dxyv + ) + if x != added_node: + DF_tree.nodes[node_p]["betweenness"][x][y] -= ( + root_node["betweenness"][added_node][y] * dvxy + ) + + DF_tree.nodes[node_p]["CL"] = [ + node + for _, node in sorted( + zip(np.diag(DF_tree.nodes[node_p]["betweenness"]), nodes), reverse=True + ) + if node not in DF_tree.nodes[node_p]["GM"] + ] + DF_tree.nodes[node_p]["cont"] = dict( + zip(nodes, np.diag(DF_tree.nodes[node_p]["betweenness"])) + ) + DF_tree.nodes[node_p]["heu"] = 0 + for i in range(k - len(DF_tree.nodes[node_p]["GM"])): + DF_tree.nodes[node_p]["heu"] += DF_tree.nodes[node_p]["cont"][ + DF_tree.nodes[node_p]["CL"][i] + ] + + # adding the minus node - don't insert the first node in the CL to GM + # Insert minus node only if isn't greedy type algorithm + if not greedy: + DF_tree.add_nodes_from([(node_m, deepcopy(DF_tree.nodes[root]))]) + DF_tree.nodes[node_m]["CL"].pop(0) + DF_tree.nodes[node_m]["cont"].pop(added_node) + DF_tree.nodes[node_m]["heu"] = 0 + for i in range(k - len(DF_tree.nodes[node_m]["GM"])): + DF_tree.nodes[node_m]["heu"] += DF_tree.nodes[node_m]["cont"][ + DF_tree.nodes[node_m]["CL"][i] + ] + else: + node_m = None + + return node_p, node_m, DF_tree + + +@nx._dispatch(edge_attrs="weight") +def group_closeness_centrality(G, S, weight=None): + r"""Compute the group closeness centrality for a group of nodes. + + Group closeness centrality of a group of nodes $S$ is a measure + of how close the group is to the other nodes in the graph. + + .. math:: + + c_{close}(S) = \frac{|V-S|}{\sum_{v \in V-S} d_{S, v}} + + d_{S, v} = min_{u \in S} (d_{u, v}) + + where $V$ is the set of nodes, $d_{S, v}$ is the distance of + the group $S$ from $v$ defined as above. ($V-S$ is the set of nodes + in $V$ that are not in $S$). + + Parameters + ---------- + G : graph + A NetworkX graph. + + S : list or set + S is a group of nodes which belong to G, for which group closeness + centrality is to be calculated. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + Raises + ------ + NodeNotFound + If node(s) in S are not present in G. + + Returns + ------- + closeness : float + Group closeness centrality of the group S. + + See Also + -------- + closeness_centrality + + Notes + ----- + The measure was introduced in [1]_. + The formula implemented here is described in [2]_. + + Higher values of closeness indicate greater centrality. + + It is assumed that 1 / 0 is 0 (required in the case of directed graphs, + or when a shortest path length is 0). + + The number of nodes in the group must be a maximum of n - 1 where `n` + is the total number of nodes in the graph. + + For directed graphs, the incoming distance is utilized here. To use the + outward distance, act on `G.reverse()`. + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + References + ---------- + .. [1] M G Everett and S P Borgatti: + The Centrality of Groups and Classes. + Journal of Mathematical Sociology. 23(3): 181-201. 1999. + http://www.analytictech.com/borgatti/group_centrality.htm + .. [2] J. Zhao et. al.: + Measuring and Maximizing Group Closeness Centrality over + Disk Resident Graphs. + WWWConference Proceedings, 2014. 689-694. + https://doi.org/10.1145/2567948.2579356 + """ + if G.is_directed(): + G = G.reverse() # reverse view + closeness = 0 # initialize to 0 + V = set(G) # set of nodes in G + S = set(S) # set of nodes in group S + V_S = V - S # set of nodes in V but not S + shortest_path_lengths = nx.multi_source_dijkstra_path_length(G, S, weight=weight) + # accumulation + for v in V_S: + try: + closeness += shortest_path_lengths[v] + except KeyError: # no path exists + closeness += 0 + try: + closeness = len(V_S) / closeness + except ZeroDivisionError: # 1 / 0 assumed as 0 + closeness = 0 + return closeness + + +@nx._dispatch +def group_degree_centrality(G, S): + """Compute the group degree centrality for a group of nodes. + + Group degree centrality of a group of nodes $S$ is the fraction + of non-group members connected to group members. + + Parameters + ---------- + G : graph + A NetworkX graph. + + S : list or set + S is a group of nodes which belong to G, for which group degree + centrality is to be calculated. + + Raises + ------ + NetworkXError + If node(s) in S are not in G. + + Returns + ------- + centrality : float + Group degree centrality of the group S. + + See Also + -------- + degree_centrality + group_in_degree_centrality + group_out_degree_centrality + + Notes + ----- + The measure was introduced in [1]_. + + The number of nodes in the group must be a maximum of n - 1 where `n` + is the total number of nodes in the graph. + + References + ---------- + .. [1] M G Everett and S P Borgatti: + The Centrality of Groups and Classes. + Journal of Mathematical Sociology. 23(3): 181-201. 1999. + http://www.analytictech.com/borgatti/group_centrality.htm + """ + centrality = len(set().union(*[set(G.neighbors(i)) for i in S]) - set(S)) + centrality /= len(G.nodes()) - len(S) + return centrality + + +@not_implemented_for("undirected") +@nx._dispatch +def group_in_degree_centrality(G, S): + """Compute the group in-degree centrality for a group of nodes. + + Group in-degree centrality of a group of nodes $S$ is the fraction + of non-group members connected to group members by incoming edges. + + Parameters + ---------- + G : graph + A NetworkX graph. + + S : list or set + S is a group of nodes which belong to G, for which group in-degree + centrality is to be calculated. + + Returns + ------- + centrality : float + Group in-degree centrality of the group S. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + NodeNotFound + If node(s) in S are not in G. + + See Also + -------- + degree_centrality + group_degree_centrality + group_out_degree_centrality + + Notes + ----- + The number of nodes in the group must be a maximum of n - 1 where `n` + is the total number of nodes in the graph. + + `G.neighbors(i)` gives nodes with an outward edge from i, in a DiGraph, + so for group in-degree centrality, the reverse graph is used. + """ + return group_degree_centrality(G.reverse(), S) + + +@not_implemented_for("undirected") +@nx._dispatch +def group_out_degree_centrality(G, S): + """Compute the group out-degree centrality for a group of nodes. + + Group out-degree centrality of a group of nodes $S$ is the fraction + of non-group members connected to group members by outgoing edges. + + Parameters + ---------- + G : graph + A NetworkX graph. + + S : list or set + S is a group of nodes which belong to G, for which group in-degree + centrality is to be calculated. + + Returns + ------- + centrality : float + Group out-degree centrality of the group S. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + NodeNotFound + If node(s) in S are not in G. + + See Also + -------- + degree_centrality + group_degree_centrality + group_in_degree_centrality + + Notes + ----- + The number of nodes in the group must be a maximum of n - 1 where `n` + is the total number of nodes in the graph. + + `G.neighbors(i)` gives nodes with an outward edge from i, in a DiGraph, + so for group out-degree centrality, the graph itself is used. + """ + return group_degree_centrality(G, S) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/harmonic.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/harmonic.py new file mode 100644 index 0000000000000000000000000000000000000000..86b5020f96c49f4d647bea5d1624b862ee54c849 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/harmonic.py @@ -0,0 +1,80 @@ +"""Functions for computing the harmonic centrality of a graph.""" +from functools import partial + +import networkx as nx + +__all__ = ["harmonic_centrality"] + + +@nx._dispatch(edge_attrs="distance") +def harmonic_centrality(G, nbunch=None, distance=None, sources=None): + r"""Compute harmonic centrality for nodes. + + Harmonic centrality [1]_ of a node `u` is the sum of the reciprocal + of the shortest path distances from all other nodes to `u` + + .. math:: + + C(u) = \sum_{v \neq u} \frac{1}{d(v, u)} + + where `d(v, u)` is the shortest-path distance between `v` and `u`. + + If `sources` is given as an argument, the returned harmonic centrality + values are calculated as the sum of the reciprocals of the shortest + path distances from the nodes specified in `sources` to `u` instead + of from all nodes to `u`. + + Notice that higher values indicate higher centrality. + + Parameters + ---------- + G : graph + A NetworkX graph + + nbunch : container (default: all nodes in G) + Container of nodes for which harmonic centrality values are calculated. + + sources : container (default: all nodes in G) + Container of nodes `v` over which reciprocal distances are computed. + Nodes not in `G` are silently ignored. + + distance : edge attribute key, optional (default=None) + Use the specified edge attribute as the edge distance in shortest + path calculations. If `None`, then each edge will have distance equal to 1. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with harmonic centrality as the value. + + See Also + -------- + betweenness_centrality, load_centrality, eigenvector_centrality, + degree_centrality, closeness_centrality + + Notes + ----- + If the 'distance' keyword is set to an edge attribute key then the + shortest-path length will be computed using Dijkstra's algorithm with + that edge attribute as the edge weight. + + References + ---------- + .. [1] Boldi, Paolo, and Sebastiano Vigna. "Axioms for centrality." + Internet Mathematics 10.3-4 (2014): 222-262. + """ + + nbunch = set(G.nbunch_iter(nbunch)) if nbunch is not None else set(G.nodes) + sources = set(G.nbunch_iter(sources)) if sources is not None else G.nodes + + spl = partial(nx.shortest_path_length, G, weight=distance) + centrality = {u: 0 for u in nbunch} + for v in sources: + dist = spl(v) + for u in nbunch.intersection(dist): + d = dist[u] + if d == 0: # handle u == v and edges with 0 weight + continue + centrality[u] += 1 / d + + return centrality diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/katz.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/katz.py new file mode 100644 index 0000000000000000000000000000000000000000..3c18e5aa2b25fbefb88411e691de3380bc94f012 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/katz.py @@ -0,0 +1,331 @@ +"""Katz centrality.""" +import math + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["katz_centrality", "katz_centrality_numpy"] + + +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="weight") +def katz_centrality( + G, + alpha=0.1, + beta=1.0, + max_iter=1000, + tol=1.0e-6, + nstart=None, + normalized=True, + weight=None, +): + r"""Compute the Katz centrality for the nodes of the graph G. + + Katz centrality computes the centrality for a node based on the centrality + of its neighbors. It is a generalization of the eigenvector centrality. The + Katz centrality for node $i$ is + + .. math:: + + x_i = \alpha \sum_{j} A_{ij} x_j + \beta, + + where $A$ is the adjacency matrix of graph G with eigenvalues $\lambda$. + + The parameter $\beta$ controls the initial centrality and + + .. math:: + + \alpha < \frac{1}{\lambda_{\max}}. + + Katz centrality computes the relative influence of a node within a + network by measuring the number of the immediate neighbors (first + degree nodes) and also all other nodes in the network that connect + to the node under consideration through these immediate neighbors. + + Extra weight can be provided to immediate neighbors through the + parameter $\beta$. Connections made with distant neighbors + are, however, penalized by an attenuation factor $\alpha$ which + should be strictly less than the inverse largest eigenvalue of the + adjacency matrix in order for the Katz centrality to be computed + correctly. More information is provided in [1]_. + + Parameters + ---------- + G : graph + A NetworkX graph. + + alpha : float, optional (default=0.1) + Attenuation factor + + beta : scalar or dictionary, optional (default=1.0) + Weight attributed to the immediate neighborhood. If not a scalar, the + dictionary must have an value for every node. + + max_iter : integer, optional (default=1000) + Maximum number of iterations in power method. + + tol : float, optional (default=1.0e-6) + Error tolerance used to check convergence in power method iteration. + + nstart : dictionary, optional + Starting value of Katz iteration for each node. + + normalized : bool, optional (default=True) + If True normalize the resulting values. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + In this measure the weight is interpreted as the connection strength. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with Katz centrality as the value. + + Raises + ------ + NetworkXError + If the parameter `beta` is not a scalar but lacks a value for at least + one node + + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + Examples + -------- + >>> import math + >>> G = nx.path_graph(4) + >>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix + >>> centrality = nx.katz_centrality(G, 1 / phi - 0.01) + >>> for n, c in sorted(centrality.items()): + ... print(f"{n} {c:.2f}") + 0 0.37 + 1 0.60 + 2 0.60 + 3 0.37 + + See Also + -------- + katz_centrality_numpy + eigenvector_centrality + eigenvector_centrality_numpy + :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank` + :func:`~networkx.algorithms.link_analysis.hits_alg.hits` + + Notes + ----- + Katz centrality was introduced by [2]_. + + This algorithm it uses the power method to find the eigenvector + corresponding to the largest eigenvalue of the adjacency matrix of ``G``. + The parameter ``alpha`` should be strictly less than the inverse of largest + eigenvalue of the adjacency matrix for the algorithm to converge. + You can use ``max(nx.adjacency_spectrum(G))`` to get $\lambda_{\max}$ the largest + eigenvalue of the adjacency matrix. + The iteration will stop after ``max_iter`` iterations or an error tolerance of + ``number_of_nodes(G) * tol`` has been reached. + + When $\alpha = 1/\lambda_{\max}$ and $\beta=0$, Katz centrality is the same + as eigenvector centrality. + + For directed graphs this finds "left" eigenvectors which corresponds + to the in-edges in the graph. For out-edges Katz centrality + first reverse the graph with ``G.reverse()``. + + References + ---------- + .. [1] Mark E. J. Newman: + Networks: An Introduction. + Oxford University Press, USA, 2010, p. 720. + .. [2] Leo Katz: + A New Status Index Derived from Sociometric Index. + Psychometrika 18(1):39–43, 1953 + https://link.springer.com/content/pdf/10.1007/BF02289026.pdf + """ + if len(G) == 0: + return {} + + nnodes = G.number_of_nodes() + + if nstart is None: + # choose starting vector with entries of 0 + x = {n: 0 for n in G} + else: + x = nstart + + try: + b = dict.fromkeys(G, float(beta)) + except (TypeError, ValueError, AttributeError) as err: + b = beta + if set(beta) != set(G): + raise nx.NetworkXError( + "beta dictionary " "must have a value for every node" + ) from err + + # make up to max_iter iterations + for _ in range(max_iter): + xlast = x + x = dict.fromkeys(xlast, 0) + # do the multiplication y^T = Alpha * x^T A + Beta + for n in x: + for nbr in G[n]: + x[nbr] += xlast[n] * G[n][nbr].get(weight, 1) + for n in x: + x[n] = alpha * x[n] + b[n] + + # check convergence + error = sum(abs(x[n] - xlast[n]) for n in x) + if error < nnodes * tol: + if normalized: + # normalize vector + try: + s = 1.0 / math.hypot(*x.values()) + # this should never be zero? + except ZeroDivisionError: + s = 1.0 + else: + s = 1 + for n in x: + x[n] *= s + return x + raise nx.PowerIterationFailedConvergence(max_iter) + + +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="weight") +def katz_centrality_numpy(G, alpha=0.1, beta=1.0, normalized=True, weight=None): + r"""Compute the Katz centrality for the graph G. + + Katz centrality computes the centrality for a node based on the centrality + of its neighbors. It is a generalization of the eigenvector centrality. The + Katz centrality for node $i$ is + + .. math:: + + x_i = \alpha \sum_{j} A_{ij} x_j + \beta, + + where $A$ is the adjacency matrix of graph G with eigenvalues $\lambda$. + + The parameter $\beta$ controls the initial centrality and + + .. math:: + + \alpha < \frac{1}{\lambda_{\max}}. + + Katz centrality computes the relative influence of a node within a + network by measuring the number of the immediate neighbors (first + degree nodes) and also all other nodes in the network that connect + to the node under consideration through these immediate neighbors. + + Extra weight can be provided to immediate neighbors through the + parameter $\beta$. Connections made with distant neighbors + are, however, penalized by an attenuation factor $\alpha$ which + should be strictly less than the inverse largest eigenvalue of the + adjacency matrix in order for the Katz centrality to be computed + correctly. More information is provided in [1]_. + + Parameters + ---------- + G : graph + A NetworkX graph + + alpha : float + Attenuation factor + + beta : scalar or dictionary, optional (default=1.0) + Weight attributed to the immediate neighborhood. If not a scalar the + dictionary must have an value for every node. + + normalized : bool + If True normalize the resulting values. + + weight : None or string, optional + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + In this measure the weight is interpreted as the connection strength. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with Katz centrality as the value. + + Raises + ------ + NetworkXError + If the parameter `beta` is not a scalar but lacks a value for at least + one node + + Examples + -------- + >>> import math + >>> G = nx.path_graph(4) + >>> phi = (1 + math.sqrt(5)) / 2.0 # largest eigenvalue of adj matrix + >>> centrality = nx.katz_centrality_numpy(G, 1 / phi) + >>> for n, c in sorted(centrality.items()): + ... print(f"{n} {c:.2f}") + 0 0.37 + 1 0.60 + 2 0.60 + 3 0.37 + + See Also + -------- + katz_centrality + eigenvector_centrality_numpy + eigenvector_centrality + :func:`~networkx.algorithms.link_analysis.pagerank_alg.pagerank` + :func:`~networkx.algorithms.link_analysis.hits_alg.hits` + + Notes + ----- + Katz centrality was introduced by [2]_. + + This algorithm uses a direct linear solver to solve the above equation. + The parameter ``alpha`` should be strictly less than the inverse of largest + eigenvalue of the adjacency matrix for there to be a solution. + You can use ``max(nx.adjacency_spectrum(G))`` to get $\lambda_{\max}$ the largest + eigenvalue of the adjacency matrix. + + When $\alpha = 1/\lambda_{\max}$ and $\beta=0$, Katz centrality is the same + as eigenvector centrality. + + For directed graphs this finds "left" eigenvectors which corresponds + to the in-edges in the graph. For out-edges Katz centrality + first reverse the graph with ``G.reverse()``. + + References + ---------- + .. [1] Mark E. J. Newman: + Networks: An Introduction. + Oxford University Press, USA, 2010, p. 173. + .. [2] Leo Katz: + A New Status Index Derived from Sociometric Index. + Psychometrika 18(1):39–43, 1953 + https://link.springer.com/content/pdf/10.1007/BF02289026.pdf + """ + import numpy as np + + if len(G) == 0: + return {} + try: + nodelist = beta.keys() + if set(nodelist) != set(G): + raise nx.NetworkXError("beta dictionary must have a value for every node") + b = np.array(list(beta.values()), dtype=float) + except AttributeError: + nodelist = list(G) + try: + b = np.ones((len(nodelist), 1)) * beta + except (TypeError, ValueError, AttributeError) as err: + raise nx.NetworkXError("beta must be a number") from err + + A = nx.adjacency_matrix(G, nodelist=nodelist, weight=weight).todense().T + n = A.shape[0] + centrality = np.linalg.solve(np.eye(n, n) - (alpha * A), b).squeeze() + + # Normalize: rely on truediv to cast to float + norm = np.sign(sum(centrality)) * np.linalg.norm(centrality) if normalized else 1 + return dict(zip(nodelist, centrality / norm)) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/laplacian.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/laplacian.py new file mode 100644 index 0000000000000000000000000000000000000000..e0a9a6d517254b72b130474577688ceb3d02ad8c --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/laplacian.py @@ -0,0 +1,146 @@ +""" +Laplacian centrality measures. +""" +import networkx as nx + +__all__ = ["laplacian_centrality"] + + +@nx._dispatch(edge_attrs="weight") +def laplacian_centrality( + G, normalized=True, nodelist=None, weight="weight", walk_type=None, alpha=0.95 +): + r"""Compute the Laplacian centrality for nodes in the graph `G`. + + The Laplacian Centrality of a node ``i`` is measured by the drop in the + Laplacian Energy after deleting node ``i`` from the graph. The Laplacian Energy + is the sum of the squared eigenvalues of a graph's Laplacian matrix. + + .. math:: + + C_L(u_i,G) = \frac{(\Delta E)_i}{E_L (G)} = \frac{E_L (G)-E_L (G_i)}{E_L (G)} + + E_L (G) = \sum_{i=0}^n \lambda_i^2 + + Where $E_L (G)$ is the Laplacian energy of graph `G`, + E_L (G_i) is the Laplacian energy of graph `G` after deleting node ``i`` + and $\lambda_i$ are the eigenvalues of `G`'s Laplacian matrix. + This formula shows the normalized value. Without normalization, + the numerator on the right side is returned. + + Parameters + ---------- + G : graph + A networkx graph + + normalized : bool (default = True) + If True the centrality score is scaled so the sum over all nodes is 1. + If False the centrality score for each node is the drop in Laplacian + energy when that node is removed. + + nodelist : list, optional (default = None) + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight: string or None, optional (default=`weight`) + Optional parameter `weight` to compute the Laplacian matrix. + The edge data key used to compute each value in the matrix. + If None, then each edge has weight 1. + + walk_type : string or None, optional (default=None) + Optional parameter `walk_type` used when calling + :func:`directed_laplacian_matrix `. + If None, the transition matrix is selected depending on the properties + of the graph. Otherwise can be `random`, `lazy`, or `pagerank`. + + alpha : real (default = 0.95) + Optional parameter `alpha` used when calling + :func:`directed_laplacian_matrix `. + (1 - alpha) is the teleportation probability used with pagerank. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with Laplacian centrality as the value. + + Examples + -------- + >>> G = nx.Graph() + >>> edges = [(0, 1, 4), (0, 2, 2), (2, 1, 1), (1, 3, 2), (1, 4, 2), (4, 5, 1)] + >>> G.add_weighted_edges_from(edges) + >>> sorted((v, f"{c:0.2f}") for v, c in laplacian_centrality(G).items()) + [(0, '0.70'), (1, '0.90'), (2, '0.28'), (3, '0.22'), (4, '0.26'), (5, '0.04')] + + Notes + ----- + The algorithm is implemented based on [1]_ with an extension to directed graphs + using the ``directed_laplacian_matrix`` function. + + Raises + ------ + NetworkXPointlessConcept + If the graph `G` is the null graph. + ZeroDivisionError + If the graph `G` has no edges (is empty) and normalization is requested. + + References + ---------- + .. [1] Qi, X., Fuller, E., Wu, Q., Wu, Y., and Zhang, C.-Q. (2012). + Laplacian centrality: A new centrality measure for weighted networks. + Information Sciences, 194:240-253. + https://math.wvu.edu/~cqzhang/Publication-files/my-paper/INS-2012-Laplacian-W.pdf + + See Also + -------- + :func:`~networkx.linalg.laplacianmatrix.directed_laplacian_matrix` + :func:`~networkx.linalg.laplacianmatrix.laplacian_matrix` + """ + import numpy as np + import scipy as sp + + if len(G) == 0: + raise nx.NetworkXPointlessConcept("null graph has no centrality defined") + if G.size(weight=weight) == 0: + if normalized: + raise ZeroDivisionError("graph with no edges has zero full energy") + return {n: 0 for n in G} + + if nodelist is not None: + nodeset = set(G.nbunch_iter(nodelist)) + if len(nodeset) != len(nodelist): + raise nx.NetworkXError("nodelist has duplicate nodes or nodes not in G") + nodes = nodelist + [n for n in G if n not in nodeset] + else: + nodelist = nodes = list(G) + + if G.is_directed(): + lap_matrix = nx.directed_laplacian_matrix(G, nodes, weight, walk_type, alpha) + else: + lap_matrix = nx.laplacian_matrix(G, nodes, weight).toarray() + + full_energy = np.power(sp.linalg.eigh(lap_matrix, eigvals_only=True), 2).sum() + + # calculate laplacian centrality + laplace_centralities_dict = {} + for i, node in enumerate(nodelist): + # remove row and col i from lap_matrix + all_but_i = list(np.arange(lap_matrix.shape[0])) + all_but_i.remove(i) + A_2 = lap_matrix[all_but_i, :][:, all_but_i] + + # Adjust diagonal for removed row + new_diag = lap_matrix.diagonal() - abs(lap_matrix[:, i]) + np.fill_diagonal(A_2, new_diag[all_but_i]) + + if len(all_but_i) > 0: # catches degenerate case of single node + new_energy = np.power(sp.linalg.eigh(A_2, eigvals_only=True), 2).sum() + else: + new_energy = 0.0 + + lapl_cent = full_energy - new_energy + if normalized: + lapl_cent = lapl_cent / full_energy + + laplace_centralities_dict[node] = lapl_cent + + return laplace_centralities_dict diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/load.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/load.py new file mode 100644 index 0000000000000000000000000000000000000000..9a81cc43282d2cdd19fb365d6265c3d128faddc9 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/load.py @@ -0,0 +1,199 @@ +"""Load centrality.""" +from operator import itemgetter + +import networkx as nx + +__all__ = ["load_centrality", "edge_load_centrality"] + + +@nx._dispatch(edge_attrs="weight") +def newman_betweenness_centrality(G, v=None, cutoff=None, normalized=True, weight=None): + """Compute load centrality for nodes. + + The load centrality of a node is the fraction of all shortest + paths that pass through that node. + + Parameters + ---------- + G : graph + A networkx graph. + + normalized : bool, optional (default=True) + If True the betweenness values are normalized by b=b/(n-1)(n-2) where + n is the number of nodes in G. + + weight : None or string, optional (default=None) + If None, edge weights are ignored. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + cutoff : bool, optional (default=None) + If specified, only consider paths of length <= cutoff. + + Returns + ------- + nodes : dictionary + Dictionary of nodes with centrality as the value. + + See Also + -------- + betweenness_centrality + + Notes + ----- + Load centrality is slightly different than betweenness. It was originally + introduced by [2]_. For this load algorithm see [1]_. + + References + ---------- + .. [1] Mark E. J. Newman: + Scientific collaboration networks. II. + Shortest paths, weighted networks, and centrality. + Physical Review E 64, 016132, 2001. + http://journals.aps.org/pre/abstract/10.1103/PhysRevE.64.016132 + .. [2] Kwang-Il Goh, Byungnam Kahng and Doochul Kim + Universal behavior of Load Distribution in Scale-Free Networks. + Physical Review Letters 87(27):1–4, 2001. + https://doi.org/10.1103/PhysRevLett.87.278701 + """ + if v is not None: # only one node + betweenness = 0.0 + for source in G: + ubetween = _node_betweenness(G, source, cutoff, False, weight) + betweenness += ubetween[v] if v in ubetween else 0 + if normalized: + order = G.order() + if order <= 2: + return betweenness # no normalization b=0 for all nodes + betweenness *= 1.0 / ((order - 1) * (order - 2)) + else: + betweenness = {}.fromkeys(G, 0.0) + for source in betweenness: + ubetween = _node_betweenness(G, source, cutoff, False, weight) + for vk in ubetween: + betweenness[vk] += ubetween[vk] + if normalized: + order = G.order() + if order <= 2: + return betweenness # no normalization b=0 for all nodes + scale = 1.0 / ((order - 1) * (order - 2)) + for v in betweenness: + betweenness[v] *= scale + return betweenness # all nodes + + +def _node_betweenness(G, source, cutoff=False, normalized=True, weight=None): + """Node betweenness_centrality helper: + + See betweenness_centrality for what you probably want. + This actually computes "load" and not betweenness. + See https://networkx.lanl.gov/ticket/103 + + This calculates the load of each node for paths from a single source. + (The fraction of number of shortests paths from source that go + through each node.) + + To get the load for a node you need to do all-pairs shortest paths. + + If weight is not None then use Dijkstra for finding shortest paths. + """ + # get the predecessor and path length data + if weight is None: + (pred, length) = nx.predecessor(G, source, cutoff=cutoff, return_seen=True) + else: + (pred, length) = nx.dijkstra_predecessor_and_distance(G, source, cutoff, weight) + + # order the nodes by path length + onodes = [(l, vert) for (vert, l) in length.items()] + onodes.sort() + onodes[:] = [vert for (l, vert) in onodes if l > 0] + + # initialize betweenness + between = {}.fromkeys(length, 1.0) + + while onodes: + v = onodes.pop() + if v in pred: + num_paths = len(pred[v]) # Discount betweenness if more than + for x in pred[v]: # one shortest path. + if x == source: # stop if hit source because all remaining v + break # also have pred[v]==[source] + between[x] += between[v] / num_paths + # remove source + for v in between: + between[v] -= 1 + # rescale to be between 0 and 1 + if normalized: + l = len(between) + if l > 2: + # scale by 1/the number of possible paths + scale = 1 / ((l - 1) * (l - 2)) + for v in between: + between[v] *= scale + return between + + +load_centrality = newman_betweenness_centrality + + +@nx._dispatch +def edge_load_centrality(G, cutoff=False): + """Compute edge load. + + WARNING: This concept of edge load has not been analysed + or discussed outside of NetworkX that we know of. + It is based loosely on load_centrality in the sense that + it counts the number of shortest paths which cross each edge. + This function is for demonstration and testing purposes. + + Parameters + ---------- + G : graph + A networkx graph + + cutoff : bool, optional (default=False) + If specified, only consider paths of length <= cutoff. + + Returns + ------- + A dict keyed by edge 2-tuple to the number of shortest paths + which use that edge. Where more than one path is shortest + the count is divided equally among paths. + """ + betweenness = {} + for u, v in G.edges(): + betweenness[(u, v)] = 0.0 + betweenness[(v, u)] = 0.0 + + for source in G: + ubetween = _edge_betweenness(G, source, cutoff=cutoff) + for e, ubetweenv in ubetween.items(): + betweenness[e] += ubetweenv # cumulative total + return betweenness + + +def _edge_betweenness(G, source, nodes=None, cutoff=False): + """Edge betweenness helper.""" + # get the predecessor data + (pred, length) = nx.predecessor(G, source, cutoff=cutoff, return_seen=True) + # order the nodes by path length + onodes = [n for n, d in sorted(length.items(), key=itemgetter(1))] + # initialize betweenness, doesn't account for any edge weights + between = {} + for u, v in G.edges(nodes): + between[(u, v)] = 1.0 + between[(v, u)] = 1.0 + + while onodes: # work through all paths + v = onodes.pop() + if v in pred: + # Discount betweenness if more than one shortest path. + num_paths = len(pred[v]) + for w in pred[v]: + if w in pred: + # Discount betweenness, mult path + num_paths = len(pred[w]) + for x in pred[w]: + between[(w, x)] += between[(v, w)] / num_paths + between[(x, w)] += between[(w, v)] / num_paths + return between diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/percolation.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/percolation.py new file mode 100644 index 0000000000000000000000000000000000000000..cc5d5ce6d7ddc0f81fbbebc7512901dc82d858ef --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/percolation.py @@ -0,0 +1,128 @@ +"""Percolation centrality measures.""" + +import networkx as nx +from networkx.algorithms.centrality.betweenness import ( + _single_source_dijkstra_path_basic as dijkstra, +) +from networkx.algorithms.centrality.betweenness import ( + _single_source_shortest_path_basic as shortest_path, +) + +__all__ = ["percolation_centrality"] + + +@nx._dispatch(node_attrs="attribute", edge_attrs="weight") +def percolation_centrality(G, attribute="percolation", states=None, weight=None): + r"""Compute the percolation centrality for nodes. + + Percolation centrality of a node $v$, at a given time, is defined + as the proportion of ‘percolated paths’ that go through that node. + + This measure quantifies relative impact of nodes based on their + topological connectivity, as well as their percolation states. + + Percolation states of nodes are used to depict network percolation + scenarios (such as during infection transmission in a social network + of individuals, spreading of computer viruses on computer networks, or + transmission of disease over a network of towns) over time. In this + measure usually the percolation state is expressed as a decimal + between 0.0 and 1.0. + + When all nodes are in the same percolated state this measure is + equivalent to betweenness centrality. + + Parameters + ---------- + G : graph + A NetworkX graph. + + attribute : None or string, optional (default='percolation') + Name of the node attribute to use for percolation state, used + if `states` is None. If a node does not set the attribute the + state of that node will be set to the default value of 1. + If all nodes do not have the attribute all nodes will be set to + 1 and the centrality measure will be equivalent to betweenness centrality. + + states : None or dict, optional (default=None) + Specify percolation states for the nodes, nodes as keys states + as values. + + weight : None or string, optional (default=None) + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + The weight of an edge is treated as the length or distance between the two sides. + + + Returns + ------- + nodes : dictionary + Dictionary of nodes with percolation centrality as the value. + + See Also + -------- + betweenness_centrality + + Notes + ----- + The algorithm is from Mahendra Piraveenan, Mikhail Prokopenko, and + Liaquat Hossain [1]_ + Pair dependencies are calculated and accumulated using [2]_ + + For weighted graphs the edge weights must be greater than zero. + Zero edge weights can produce an infinite number of equal length + paths between pairs of nodes. + + References + ---------- + .. [1] Mahendra Piraveenan, Mikhail Prokopenko, Liaquat Hossain + Percolation Centrality: Quantifying Graph-Theoretic Impact of Nodes + during Percolation in Networks + http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0053095 + .. [2] Ulrik Brandes: + A Faster Algorithm for Betweenness Centrality. + Journal of Mathematical Sociology 25(2):163-177, 2001. + https://doi.org/10.1080/0022250X.2001.9990249 + """ + percolation = dict.fromkeys(G, 0.0) # b[v]=0 for v in G + + nodes = G + + if states is None: + states = nx.get_node_attributes(nodes, attribute, default=1) + + # sum of all percolation states + p_sigma_x_t = 0.0 + for v in states.values(): + p_sigma_x_t += v + + for s in nodes: + # single source shortest paths + if weight is None: # use BFS + S, P, sigma, _ = shortest_path(G, s) + else: # use Dijkstra's algorithm + S, P, sigma, _ = dijkstra(G, s, weight) + # accumulation + percolation = _accumulate_percolation( + percolation, S, P, sigma, s, states, p_sigma_x_t + ) + + n = len(G) + + for v in percolation: + percolation[v] *= 1 / (n - 2) + + return percolation + + +def _accumulate_percolation(percolation, S, P, sigma, s, states, p_sigma_x_t): + delta = dict.fromkeys(S, 0) + while S: + w = S.pop() + coeff = (1 + delta[w]) / sigma[w] + for v in P[w]: + delta[v] += sigma[v] * coeff + if w != s: + # percolation weight + pw_s_w = states[s] / (p_sigma_x_t - states[w]) + percolation[w] += delta[w] * pw_s_w + return percolation diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/reaching.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/reaching.py new file mode 100644 index 0000000000000000000000000000000000000000..7b9eac564acc0dcde38409007e9df38863ee24de --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/reaching.py @@ -0,0 +1,206 @@ +"""Functions for computing reaching centrality of a node or a graph.""" + +import networkx as nx +from networkx.utils import pairwise + +__all__ = ["global_reaching_centrality", "local_reaching_centrality"] + + +def _average_weight(G, path, weight=None): + """Returns the average weight of an edge in a weighted path. + + Parameters + ---------- + G : graph + A networkx graph. + + path: list + A list of vertices that define the path. + + weight : None or string, optional (default=None) + If None, edge weights are ignored. Then the average weight of an edge + is assumed to be the multiplicative inverse of the length of the path. + Otherwise holds the name of the edge attribute used as weight. + """ + path_length = len(path) - 1 + if path_length <= 0: + return 0 + if weight is None: + return 1 / path_length + total_weight = sum(G.edges[i, j][weight] for i, j in pairwise(path)) + return total_weight / path_length + + +@nx._dispatch(edge_attrs="weight") +def global_reaching_centrality(G, weight=None, normalized=True): + """Returns the global reaching centrality of a directed graph. + + The *global reaching centrality* of a weighted directed graph is the + average over all nodes of the difference between the local reaching + centrality of the node and the greatest local reaching centrality of + any node in the graph [1]_. For more information on the local + reaching centrality, see :func:`local_reaching_centrality`. + Informally, the local reaching centrality is the proportion of the + graph that is reachable from the neighbors of the node. + + Parameters + ---------- + G : DiGraph + A networkx DiGraph. + + weight : None or string, optional (default=None) + Attribute to use for edge weights. If ``None``, each edge weight + is assumed to be one. A higher weight implies a stronger + connection between nodes and a *shorter* path length. + + normalized : bool, optional (default=True) + Whether to normalize the edge weights by the total sum of edge + weights. + + Returns + ------- + h : float + The global reaching centrality of the graph. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge(1, 2) + >>> G.add_edge(1, 3) + >>> nx.global_reaching_centrality(G) + 1.0 + >>> G.add_edge(3, 2) + >>> nx.global_reaching_centrality(G) + 0.75 + + See also + -------- + local_reaching_centrality + + References + ---------- + .. [1] Mones, Enys, Lilla Vicsek, and Tamás Vicsek. + "Hierarchy Measure for Complex Networks." + *PLoS ONE* 7.3 (2012): e33799. + https://doi.org/10.1371/journal.pone.0033799 + """ + if nx.is_negatively_weighted(G, weight=weight): + raise nx.NetworkXError("edge weights must be positive") + total_weight = G.size(weight=weight) + if total_weight <= 0: + raise nx.NetworkXError("Size of G must be positive") + + # If provided, weights must be interpreted as connection strength + # (so higher weights are more likely to be chosen). However, the + # shortest path algorithms in NetworkX assume the provided "weight" + # is actually a distance (so edges with higher weight are less + # likely to be chosen). Therefore we need to invert the weights when + # computing shortest paths. + # + # If weight is None, we leave it as-is so that the shortest path + # algorithm can use a faster, unweighted algorithm. + if weight is not None: + + def as_distance(u, v, d): + return total_weight / d.get(weight, 1) + + shortest_paths = nx.shortest_path(G, weight=as_distance) + else: + shortest_paths = nx.shortest_path(G) + + centrality = local_reaching_centrality + # TODO This can be trivially parallelized. + lrc = [ + centrality(G, node, paths=paths, weight=weight, normalized=normalized) + for node, paths in shortest_paths.items() + ] + + max_lrc = max(lrc) + return sum(max_lrc - c for c in lrc) / (len(G) - 1) + + +@nx._dispatch(edge_attrs="weight") +def local_reaching_centrality(G, v, paths=None, weight=None, normalized=True): + """Returns the local reaching centrality of a node in a directed + graph. + + The *local reaching centrality* of a node in a directed graph is the + proportion of other nodes reachable from that node [1]_. + + Parameters + ---------- + G : DiGraph + A NetworkX DiGraph. + + v : node + A node in the directed graph `G`. + + paths : dictionary (default=None) + If this is not `None` it must be a dictionary representation + of single-source shortest paths, as computed by, for example, + :func:`networkx.shortest_path` with source node `v`. Use this + keyword argument if you intend to invoke this function many + times but don't want the paths to be recomputed each time. + + weight : None or string, optional (default=None) + Attribute to use for edge weights. If `None`, each edge weight + is assumed to be one. A higher weight implies a stronger + connection between nodes and a *shorter* path length. + + normalized : bool, optional (default=True) + Whether to normalize the edge weights by the total sum of edge + weights. + + Returns + ------- + h : float + The local reaching centrality of the node ``v`` in the graph + ``G``. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edges_from([(1, 2), (1, 3)]) + >>> nx.local_reaching_centrality(G, 3) + 0.0 + >>> G.add_edge(3, 2) + >>> nx.local_reaching_centrality(G, 3) + 0.5 + + See also + -------- + global_reaching_centrality + + References + ---------- + .. [1] Mones, Enys, Lilla Vicsek, and Tamás Vicsek. + "Hierarchy Measure for Complex Networks." + *PLoS ONE* 7.3 (2012): e33799. + https://doi.org/10.1371/journal.pone.0033799 + """ + if paths is None: + if nx.is_negatively_weighted(G, weight=weight): + raise nx.NetworkXError("edge weights must be positive") + total_weight = G.size(weight=weight) + if total_weight <= 0: + raise nx.NetworkXError("Size of G must be positive") + if weight is not None: + # Interpret weights as lengths. + def as_distance(u, v, d): + return total_weight / d.get(weight, 1) + + paths = nx.shortest_path(G, source=v, weight=as_distance) + else: + paths = nx.shortest_path(G, source=v) + # If the graph is unweighted, simply return the proportion of nodes + # reachable from the source node ``v``. + if weight is None and G.is_directed(): + return (len(paths) - 1) / (len(G) - 1) + if normalized and weight is not None: + norm = G.size(weight=weight) / G.size() + else: + norm = 1 + # TODO This can be trivially parallelized. + avgw = (_average_weight(G, path, weight=weight) for path in paths.values()) + sum_avg_weight = sum(avgw) / norm + return sum_avg_weight / (len(G) - 1) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/second_order.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/second_order.py new file mode 100644 index 0000000000000000000000000000000000000000..4bdb1f52141223f43d8e5eb6c0a4fddbd7e58e08 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/second_order.py @@ -0,0 +1,138 @@ +"""Copyright (c) 2015 – Thomson Licensing, SAS + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +* Neither the name of Thomson Licensing, or Technicolor, nor the names +of its contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" + +import networkx as nx +from networkx.utils import not_implemented_for + +# Authors: Erwan Le Merrer (erwan.lemerrer@technicolor.com) + +__all__ = ["second_order_centrality"] + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def second_order_centrality(G, weight="weight"): + """Compute the second order centrality for nodes of G. + + The second order centrality of a given node is the standard deviation of + the return times to that node of a perpetual random walk on G: + + Parameters + ---------- + G : graph + A NetworkX connected and undirected graph. + + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + + Returns + ------- + nodes : dictionary + Dictionary keyed by node with second order centrality as the value. + + Examples + -------- + >>> G = nx.star_graph(10) + >>> soc = nx.second_order_centrality(G) + >>> print(sorted(soc.items(), key=lambda x: x[1])[0][0]) # pick first id + 0 + + Raises + ------ + NetworkXException + If the graph G is empty, non connected or has negative weights. + + See Also + -------- + betweenness_centrality + + Notes + ----- + Lower values of second order centrality indicate higher centrality. + + The algorithm is from Kermarrec, Le Merrer, Sericola and Trédan [1]_. + + This code implements the analytical version of the algorithm, i.e., + there is no simulation of a random walk process involved. The random walk + is here unbiased (corresponding to eq 6 of the paper [1]_), thus the + centrality values are the standard deviations for random walk return times + on the transformed input graph G (equal in-degree at each nodes by adding + self-loops). + + Complexity of this implementation, made to run locally on a single machine, + is O(n^3), with n the size of G, which makes it viable only for small + graphs. + + References + ---------- + .. [1] Anne-Marie Kermarrec, Erwan Le Merrer, Bruno Sericola, Gilles Trédan + "Second order centrality: Distributed assessment of nodes criticity in + complex networks", Elsevier Computer Communications 34(5):619-628, 2011. + """ + import numpy as np + + n = len(G) + + if n == 0: + raise nx.NetworkXException("Empty graph.") + if not nx.is_connected(G): + raise nx.NetworkXException("Non connected graph.") + if any(d.get(weight, 0) < 0 for u, v, d in G.edges(data=True)): + raise nx.NetworkXException("Graph has negative edge weights.") + + # balancing G for Metropolis-Hastings random walks + G = nx.DiGraph(G) + in_deg = dict(G.in_degree(weight=weight)) + d_max = max(in_deg.values()) + for i, deg in in_deg.items(): + if deg < d_max: + G.add_edge(i, i, weight=d_max - deg) + + P = nx.to_numpy_array(G) + P /= P.sum(axis=1)[:, np.newaxis] # to transition probability matrix + + def _Qj(P, j): + P = P.copy() + P[:, j] = 0 + return P + + M = np.empty([n, n]) + + for i in range(n): + M[:, i] = np.linalg.solve( + np.identity(n) - _Qj(P, i), np.ones([n, 1])[:, 0] + ) # eq 3 + + return dict( + zip(G.nodes, [np.sqrt(2 * np.sum(M[:, i]) - n * (n + 1)) for i in range(n)]) + ) # eq 6 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/subgraph_alg.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/subgraph_alg.py new file mode 100644 index 0000000000000000000000000000000000000000..c615b4892014e9a180eb6ff6028988a571284bc4 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/subgraph_alg.py @@ -0,0 +1,340 @@ +""" +Subraph centrality and communicability betweenness. +""" +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "subgraph_centrality_exp", + "subgraph_centrality", + "communicability_betweenness_centrality", + "estrada_index", +] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def subgraph_centrality_exp(G): + r"""Returns the subgraph centrality for each node of G. + + Subgraph centrality of a node `n` is the sum of weighted closed + walks of all lengths starting and ending at node `n`. The weights + decrease with path length. Each closed walk is associated with a + connected subgraph ([1]_). + + Parameters + ---------- + G: graph + + Returns + ------- + nodes:dictionary + Dictionary of nodes with subgraph centrality as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + See Also + -------- + subgraph_centrality: + Alternative algorithm of the subgraph centrality for each node of G. + + Notes + ----- + This version of the algorithm exponentiates the adjacency matrix. + + The subgraph centrality of a node `u` in G can be found using + the matrix exponential of the adjacency matrix of G [1]_, + + .. math:: + + SC(u)=(e^A)_{uu} . + + References + ---------- + .. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez, + "Subgraph centrality in complex networks", + Physical Review E 71, 056103 (2005). + https://arxiv.org/abs/cond-mat/0504730 + + Examples + -------- + (Example from [1]_) + >>> G = nx.Graph( + ... [ + ... (1, 2), + ... (1, 5), + ... (1, 8), + ... (2, 3), + ... (2, 8), + ... (3, 4), + ... (3, 6), + ... (4, 5), + ... (4, 7), + ... (5, 6), + ... (6, 7), + ... (7, 8), + ... ] + ... ) + >>> sc = nx.subgraph_centrality_exp(G) + >>> print([f"{node} {sc[node]:0.2f}" for node in sorted(sc)]) + ['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90'] + """ + # alternative implementation that calculates the matrix exponential + import scipy as sp + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[A != 0.0] = 1 + expA = sp.linalg.expm(A) + # convert diagonal to dictionary keyed by node + sc = dict(zip(nodelist, map(float, expA.diagonal()))) + return sc + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def subgraph_centrality(G): + r"""Returns subgraph centrality for each node in G. + + Subgraph centrality of a node `n` is the sum of weighted closed + walks of all lengths starting and ending at node `n`. The weights + decrease with path length. Each closed walk is associated with a + connected subgraph ([1]_). + + Parameters + ---------- + G: graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with subgraph centrality as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + See Also + -------- + subgraph_centrality_exp: + Alternative algorithm of the subgraph centrality for each node of G. + + Notes + ----- + This version of the algorithm computes eigenvalues and eigenvectors + of the adjacency matrix. + + Subgraph centrality of a node `u` in G can be found using + a spectral decomposition of the adjacency matrix [1]_, + + .. math:: + + SC(u)=\sum_{j=1}^{N}(v_{j}^{u})^2 e^{\lambda_{j}}, + + where `v_j` is an eigenvector of the adjacency matrix `A` of G + corresponding to the eigenvalue `\lambda_j`. + + Examples + -------- + (Example from [1]_) + >>> G = nx.Graph( + ... [ + ... (1, 2), + ... (1, 5), + ... (1, 8), + ... (2, 3), + ... (2, 8), + ... (3, 4), + ... (3, 6), + ... (4, 5), + ... (4, 7), + ... (5, 6), + ... (6, 7), + ... (7, 8), + ... ] + ... ) + >>> sc = nx.subgraph_centrality(G) + >>> print([f"{node} {sc[node]:0.2f}" for node in sorted(sc)]) + ['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90'] + + References + ---------- + .. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez, + "Subgraph centrality in complex networks", + Physical Review E 71, 056103 (2005). + https://arxiv.org/abs/cond-mat/0504730 + + """ + import numpy as np + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[np.nonzero(A)] = 1 + w, v = np.linalg.eigh(A) + vsquare = np.array(v) ** 2 + expw = np.exp(w) + xg = vsquare @ expw + # convert vector dictionary keyed by node + sc = dict(zip(nodelist, map(float, xg))) + return sc + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def communicability_betweenness_centrality(G): + r"""Returns subgraph communicability for all pairs of nodes in G. + + Communicability betweenness measure makes use of the number of walks + connecting every pair of nodes as the basis of a betweenness centrality + measure. + + Parameters + ---------- + G: graph + + Returns + ------- + nodes : dictionary + Dictionary of nodes with communicability betweenness as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + Notes + ----- + Let `G=(V,E)` be a simple undirected graph with `n` nodes and `m` edges, + and `A` denote the adjacency matrix of `G`. + + Let `G(r)=(V,E(r))` be the graph resulting from + removing all edges connected to node `r` but not the node itself. + + The adjacency matrix for `G(r)` is `A+E(r)`, where `E(r)` has nonzeros + only in row and column `r`. + + The subraph betweenness of a node `r` is [1]_ + + .. math:: + + \omega_{r} = \frac{1}{C}\sum_{p}\sum_{q}\frac{G_{prq}}{G_{pq}}, + p\neq q, q\neq r, + + where + `G_{prq}=(e^{A}_{pq} - (e^{A+E(r)})_{pq}` is the number of walks + involving node r, + `G_{pq}=(e^{A})_{pq}` is the number of closed walks starting + at node `p` and ending at node `q`, + and `C=(n-1)^{2}-(n-1)` is a normalization factor equal to the + number of terms in the sum. + + The resulting `\omega_{r}` takes values between zero and one. + The lower bound cannot be attained for a connected + graph, and the upper bound is attained in the star graph. + + References + ---------- + .. [1] Ernesto Estrada, Desmond J. Higham, Naomichi Hatano, + "Communicability Betweenness in Complex Networks" + Physica A 388 (2009) 764-774. + https://arxiv.org/abs/0905.4102 + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)]) + >>> cbc = nx.communicability_betweenness_centrality(G) + >>> print([f"{node} {cbc[node]:0.2f}" for node in sorted(cbc)]) + ['0 0.03', '1 0.45', '2 0.51', '3 0.45', '4 0.40', '5 0.19', '6 0.03'] + """ + import numpy as np + import scipy as sp + + nodelist = list(G) # ordering of nodes in matrix + n = len(nodelist) + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[np.nonzero(A)] = 1 + expA = sp.linalg.expm(A) + mapping = dict(zip(nodelist, range(n))) + cbc = {} + for v in G: + # remove row and col of node v + i = mapping[v] + row = A[i, :].copy() + col = A[:, i].copy() + A[i, :] = 0 + A[:, i] = 0 + B = (expA - sp.linalg.expm(A)) / expA + # sum with row/col of node v and diag set to zero + B[i, :] = 0 + B[:, i] = 0 + B -= np.diag(np.diag(B)) + cbc[v] = B.sum() + # put row and col back + A[i, :] = row + A[:, i] = col + # rescale when more than two nodes + order = len(cbc) + if order > 2: + scale = 1.0 / ((order - 1.0) ** 2 - (order - 1.0)) + for v in cbc: + cbc[v] *= scale + return cbc + + +@nx._dispatch +def estrada_index(G): + r"""Returns the Estrada index of a the graph G. + + The Estrada Index is a topological index of folding or 3D "compactness" ([1]_). + + Parameters + ---------- + G: graph + + Returns + ------- + estrada index: float + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + Notes + ----- + Let `G=(V,E)` be a simple undirected graph with `n` nodes and let + `\lambda_{1}\leq\lambda_{2}\leq\cdots\lambda_{n}` + be a non-increasing ordering of the eigenvalues of its adjacency + matrix `A`. The Estrada index is ([1]_, [2]_) + + .. math:: + EE(G)=\sum_{j=1}^n e^{\lambda _j}. + + References + ---------- + .. [1] E. Estrada, "Characterization of 3D molecular structure", + Chem. Phys. Lett. 319, 713 (2000). + https://doi.org/10.1016/S0009-2614(00)00158-5 + .. [2] José Antonio de la Peñaa, Ivan Gutman, Juan Rada, + "Estimating the Estrada index", + Linear Algebra and its Applications. 427, 1 (2007). + https://doi.org/10.1016/j.laa.2007.06.020 + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)]) + >>> ei = nx.estrada_index(G) + >>> print(f"{ei:0.5}") + 20.55 + """ + return sum(subgraph_centrality(G).values()) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5724581ed71bfa6322e1d115df75506d811bf18 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3346afdf490d80deffb9924890a6a216484c507e Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality_subset.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality_subset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9539874fd89d7d27bbaff1fb99499c5408ecb6a2 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality_subset.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_closeness_centrality.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_closeness_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ba46f2ffceaed1943e6e0327ea0ec69e47f9bb7 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_closeness_centrality.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1e695ef096a39b5db0936ea3011f24751dc8517 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality_subset.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality_subset.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42b31f661f78f73279686660b8468635250ca092 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality_subset.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_closeness.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_closeness.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24cc61ebf48c02d6dfcefcc2ffc4f4e5b51cb2ce Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_current_flow_closeness.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_degree_centrality.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_degree_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37422e5a2c080b873324aadfc8a3a75ba3aeb1ea Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_degree_centrality.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_dispersion.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_dispersion.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c78ee15c993fb27747992a8738628f45e1f873e Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_dispersion.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_eigenvector_centrality.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_eigenvector_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ddd072c3e201e4cacf4dded7cd7d944b7df3eb9b Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_eigenvector_centrality.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_group.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_group.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f71bde33542eb3253df51588eec46f2998f599e Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_group.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_harmonic_centrality.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_harmonic_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..245c13ac0be6c63641e012b4cfe9fc1f52bec185 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_harmonic_centrality.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_katz_centrality.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_katz_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6572dd50b3788ecaca50e750adc36cf4c4617dd0 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_katz_centrality.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_laplacian_centrality.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_laplacian_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df6d5807e4bded331f35234b7967d88664e8e78e Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_laplacian_centrality.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_load_centrality.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_load_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7ac05613e302bacdab6b0064acd7219ab92c5b1 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_load_centrality.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_percolation_centrality.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_percolation_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ce07924d74e52ddeb308f100112c1f0ff7cb30e Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_percolation_centrality.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_reaching.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_reaching.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..053809b4ef73bda390da21922ea76a942b0a3945 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_reaching.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_second_order_centrality.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_second_order_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..934771ff02eb08820327b57fb60f4ea2fbf22515 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_second_order_centrality.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_subgraph.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_subgraph.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc05a56395a2efdce8d0de5f9f47410c5eca29b9 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_subgraph.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_trophic.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_trophic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12f962b879923cba672fac7f7897057866856533 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_trophic.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_voterank.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_voterank.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e19e0444b0e166a46ac6ac25b8ff2e3397edaf3 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/__pycache__/test_voterank.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..4c059cf980666f7e14a80929f84c80bc38749432 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality.py @@ -0,0 +1,780 @@ +import pytest + +import networkx as nx + + +def weighted_G(): + G = nx.Graph() + G.add_edge(0, 1, weight=3) + G.add_edge(0, 2, weight=2) + G.add_edge(0, 3, weight=6) + G.add_edge(0, 4, weight=4) + G.add_edge(1, 3, weight=5) + G.add_edge(1, 5, weight=5) + G.add_edge(2, 4, weight=1) + G.add_edge(3, 4, weight=2) + G.add_edge(3, 5, weight=1) + G.add_edge(4, 5, weight=4) + return G + + +class TestBetweennessCentrality: + def test_K5(self): + """Betweenness centrality: K5""" + G = nx.complete_graph(5) + b = nx.betweenness_centrality(G, weight=None, normalized=False) + b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_K5_endpoints(self): + """Betweenness centrality: K5 endpoints""" + G = nx.complete_graph(5) + b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True) + b_answer = {0: 4.0, 1: 4.0, 2: 4.0, 3: 4.0, 4: 4.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + # normalized = True case + b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True) + b_answer = {0: 0.4, 1: 0.4, 2: 0.4, 3: 0.4, 4: 0.4} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P3_normalized(self): + """Betweenness centrality: P3 normalized""" + G = nx.path_graph(3) + b = nx.betweenness_centrality(G, weight=None, normalized=True) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P3(self): + """Betweenness centrality: P3""" + G = nx.path_graph(3) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + b = nx.betweenness_centrality(G, weight=None, normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_sample_from_P3(self): + """Betweenness centrality: P3 sample""" + G = nx.path_graph(3) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + b = nx.betweenness_centrality(G, k=3, weight=None, normalized=False, seed=1) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + b = nx.betweenness_centrality(G, k=2, weight=None, normalized=False, seed=1) + # python versions give different results with same seed + b_approx1 = {0: 0.0, 1: 1.5, 2: 0.0} + b_approx2 = {0: 0.0, 1: 0.75, 2: 0.0} + for n in sorted(G): + assert b[n] in (b_approx1[n], b_approx2[n]) + + def test_P3_endpoints(self): + """Betweenness centrality: P3 endpoints""" + G = nx.path_graph(3) + b_answer = {0: 2.0, 1: 3.0, 2: 2.0} + b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + # normalized = True case + b_answer = {0: 2 / 3, 1: 1.0, 2: 2 / 3} + b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_krackhardt_kite_graph(self): + """Betweenness centrality: Krackhardt kite graph""" + G = nx.krackhardt_kite_graph() + b_answer = { + 0: 1.667, + 1: 1.667, + 2: 0.000, + 3: 7.333, + 4: 0.000, + 5: 16.667, + 6: 16.667, + 7: 28.000, + 8: 16.000, + 9: 0.000, + } + for b in b_answer: + b_answer[b] /= 2 + b = nx.betweenness_centrality(G, weight=None, normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_krackhardt_kite_graph_normalized(self): + """Betweenness centrality: Krackhardt kite graph normalized""" + G = nx.krackhardt_kite_graph() + b_answer = { + 0: 0.023, + 1: 0.023, + 2: 0.000, + 3: 0.102, + 4: 0.000, + 5: 0.231, + 6: 0.231, + 7: 0.389, + 8: 0.222, + 9: 0.000, + } + b = nx.betweenness_centrality(G, weight=None, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_florentine_families_graph(self): + """Betweenness centrality: Florentine families graph""" + G = nx.florentine_families_graph() + b_answer = { + "Acciaiuoli": 0.000, + "Albizzi": 0.212, + "Barbadori": 0.093, + "Bischeri": 0.104, + "Castellani": 0.055, + "Ginori": 0.000, + "Guadagni": 0.255, + "Lamberteschi": 0.000, + "Medici": 0.522, + "Pazzi": 0.000, + "Peruzzi": 0.022, + "Ridolfi": 0.114, + "Salviati": 0.143, + "Strozzi": 0.103, + "Tornabuoni": 0.092, + } + + b = nx.betweenness_centrality(G, weight=None, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_les_miserables_graph(self): + """Betweenness centrality: Les Miserables graph""" + G = nx.les_miserables_graph() + b_answer = { + "Napoleon": 0.000, + "Myriel": 0.177, + "MlleBaptistine": 0.000, + "MmeMagloire": 0.000, + "CountessDeLo": 0.000, + "Geborand": 0.000, + "Champtercier": 0.000, + "Cravatte": 0.000, + "Count": 0.000, + "OldMan": 0.000, + "Valjean": 0.570, + "Labarre": 0.000, + "Marguerite": 0.000, + "MmeDeR": 0.000, + "Isabeau": 0.000, + "Gervais": 0.000, + "Listolier": 0.000, + "Tholomyes": 0.041, + "Fameuil": 0.000, + "Blacheville": 0.000, + "Favourite": 0.000, + "Dahlia": 0.000, + "Zephine": 0.000, + "Fantine": 0.130, + "MmeThenardier": 0.029, + "Thenardier": 0.075, + "Cosette": 0.024, + "Javert": 0.054, + "Fauchelevent": 0.026, + "Bamatabois": 0.008, + "Perpetue": 0.000, + "Simplice": 0.009, + "Scaufflaire": 0.000, + "Woman1": 0.000, + "Judge": 0.000, + "Champmathieu": 0.000, + "Brevet": 0.000, + "Chenildieu": 0.000, + "Cochepaille": 0.000, + "Pontmercy": 0.007, + "Boulatruelle": 0.000, + "Eponine": 0.011, + "Anzelma": 0.000, + "Woman2": 0.000, + "MotherInnocent": 0.000, + "Gribier": 0.000, + "MmeBurgon": 0.026, + "Jondrette": 0.000, + "Gavroche": 0.165, + "Gillenormand": 0.020, + "Magnon": 0.000, + "MlleGillenormand": 0.048, + "MmePontmercy": 0.000, + "MlleVaubois": 0.000, + "LtGillenormand": 0.000, + "Marius": 0.132, + "BaronessT": 0.000, + "Mabeuf": 0.028, + "Enjolras": 0.043, + "Combeferre": 0.001, + "Prouvaire": 0.000, + "Feuilly": 0.001, + "Courfeyrac": 0.005, + "Bahorel": 0.002, + "Bossuet": 0.031, + "Joly": 0.002, + "Grantaire": 0.000, + "MotherPlutarch": 0.000, + "Gueulemer": 0.005, + "Babet": 0.005, + "Claquesous": 0.005, + "Montparnasse": 0.004, + "Toussaint": 0.000, + "Child1": 0.000, + "Child2": 0.000, + "Brujon": 0.000, + "MmeHucheloup": 0.000, + } + + b = nx.betweenness_centrality(G, weight=None, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_ladder_graph(self): + """Betweenness centrality: Ladder graph""" + G = nx.Graph() # ladder_graph(3) + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)]) + b_answer = {0: 1.667, 1: 1.667, 2: 6.667, 3: 6.667, 4: 1.667, 5: 1.667} + for b in b_answer: + b_answer[b] /= 2 + b = nx.betweenness_centrality(G, weight=None, normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_disconnected_path(self): + """Betweenness centrality: disconnected path""" + G = nx.Graph() + nx.add_path(G, [0, 1, 2]) + nx.add_path(G, [3, 4, 5, 6]) + b_answer = {0: 0, 1: 1, 2: 0, 3: 0, 4: 2, 5: 2, 6: 0} + b = nx.betweenness_centrality(G, weight=None, normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_disconnected_path_endpoints(self): + """Betweenness centrality: disconnected path endpoints""" + G = nx.Graph() + nx.add_path(G, [0, 1, 2]) + nx.add_path(G, [3, 4, 5, 6]) + b_answer = {0: 2, 1: 3, 2: 2, 3: 3, 4: 5, 5: 5, 6: 3} + b = nx.betweenness_centrality(G, weight=None, normalized=False, endpoints=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + # normalized = True case + b = nx.betweenness_centrality(G, weight=None, normalized=True, endpoints=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n] / 21, abs=1e-7) + + def test_directed_path(self): + """Betweenness centrality: directed path""" + G = nx.DiGraph() + nx.add_path(G, [0, 1, 2]) + b = nx.betweenness_centrality(G, weight=None, normalized=False) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_directed_path_normalized(self): + """Betweenness centrality: directed path normalized""" + G = nx.DiGraph() + nx.add_path(G, [0, 1, 2]) + b = nx.betweenness_centrality(G, weight=None, normalized=True) + b_answer = {0: 0.0, 1: 0.5, 2: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +class TestWeightedBetweennessCentrality: + def test_K5(self): + """Weighted betweenness centrality: K5""" + G = nx.complete_graph(5) + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P3_normalized(self): + """Weighted betweenness centrality: P3 normalized""" + G = nx.path_graph(3) + b = nx.betweenness_centrality(G, weight="weight", normalized=True) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P3(self): + """Weighted betweenness centrality: P3""" + G = nx.path_graph(3) + b_answer = {0: 0.0, 1: 1.0, 2: 0.0} + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_krackhardt_kite_graph(self): + """Weighted betweenness centrality: Krackhardt kite graph""" + G = nx.krackhardt_kite_graph() + b_answer = { + 0: 1.667, + 1: 1.667, + 2: 0.000, + 3: 7.333, + 4: 0.000, + 5: 16.667, + 6: 16.667, + 7: 28.000, + 8: 16.000, + 9: 0.000, + } + for b in b_answer: + b_answer[b] /= 2 + + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_krackhardt_kite_graph_normalized(self): + """Weighted betweenness centrality: + Krackhardt kite graph normalized + """ + G = nx.krackhardt_kite_graph() + b_answer = { + 0: 0.023, + 1: 0.023, + 2: 0.000, + 3: 0.102, + 4: 0.000, + 5: 0.231, + 6: 0.231, + 7: 0.389, + 8: 0.222, + 9: 0.000, + } + b = nx.betweenness_centrality(G, weight="weight", normalized=True) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_florentine_families_graph(self): + """Weighted betweenness centrality: + Florentine families graph""" + G = nx.florentine_families_graph() + b_answer = { + "Acciaiuoli": 0.000, + "Albizzi": 0.212, + "Barbadori": 0.093, + "Bischeri": 0.104, + "Castellani": 0.055, + "Ginori": 0.000, + "Guadagni": 0.255, + "Lamberteschi": 0.000, + "Medici": 0.522, + "Pazzi": 0.000, + "Peruzzi": 0.022, + "Ridolfi": 0.114, + "Salviati": 0.143, + "Strozzi": 0.103, + "Tornabuoni": 0.092, + } + + b = nx.betweenness_centrality(G, weight="weight", normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_les_miserables_graph(self): + """Weighted betweenness centrality: Les Miserables graph""" + G = nx.les_miserables_graph() + b_answer = { + "Napoleon": 0.000, + "Myriel": 0.177, + "MlleBaptistine": 0.000, + "MmeMagloire": 0.000, + "CountessDeLo": 0.000, + "Geborand": 0.000, + "Champtercier": 0.000, + "Cravatte": 0.000, + "Count": 0.000, + "OldMan": 0.000, + "Valjean": 0.454, + "Labarre": 0.000, + "Marguerite": 0.009, + "MmeDeR": 0.000, + "Isabeau": 0.000, + "Gervais": 0.000, + "Listolier": 0.000, + "Tholomyes": 0.066, + "Fameuil": 0.000, + "Blacheville": 0.000, + "Favourite": 0.000, + "Dahlia": 0.000, + "Zephine": 0.000, + "Fantine": 0.114, + "MmeThenardier": 0.046, + "Thenardier": 0.129, + "Cosette": 0.075, + "Javert": 0.193, + "Fauchelevent": 0.026, + "Bamatabois": 0.080, + "Perpetue": 0.000, + "Simplice": 0.001, + "Scaufflaire": 0.000, + "Woman1": 0.000, + "Judge": 0.000, + "Champmathieu": 0.000, + "Brevet": 0.000, + "Chenildieu": 0.000, + "Cochepaille": 0.000, + "Pontmercy": 0.023, + "Boulatruelle": 0.000, + "Eponine": 0.023, + "Anzelma": 0.000, + "Woman2": 0.000, + "MotherInnocent": 0.000, + "Gribier": 0.000, + "MmeBurgon": 0.026, + "Jondrette": 0.000, + "Gavroche": 0.285, + "Gillenormand": 0.024, + "Magnon": 0.005, + "MlleGillenormand": 0.036, + "MmePontmercy": 0.005, + "MlleVaubois": 0.000, + "LtGillenormand": 0.015, + "Marius": 0.072, + "BaronessT": 0.004, + "Mabeuf": 0.089, + "Enjolras": 0.003, + "Combeferre": 0.000, + "Prouvaire": 0.000, + "Feuilly": 0.004, + "Courfeyrac": 0.001, + "Bahorel": 0.007, + "Bossuet": 0.028, + "Joly": 0.000, + "Grantaire": 0.036, + "MotherPlutarch": 0.000, + "Gueulemer": 0.025, + "Babet": 0.015, + "Claquesous": 0.042, + "Montparnasse": 0.050, + "Toussaint": 0.011, + "Child1": 0.000, + "Child2": 0.000, + "Brujon": 0.002, + "MmeHucheloup": 0.034, + } + + b = nx.betweenness_centrality(G, weight="weight", normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_ladder_graph(self): + """Weighted betweenness centrality: Ladder graph""" + G = nx.Graph() # ladder_graph(3) + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)]) + b_answer = {0: 1.667, 1: 1.667, 2: 6.667, 3: 6.667, 4: 1.667, 5: 1.667} + for b in b_answer: + b_answer[b] /= 2 + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_G(self): + """Weighted betweenness centrality: G""" + G = weighted_G() + b_answer = {0: 2.0, 1: 0.0, 2: 4.0, 3: 3.0, 4: 4.0, 5: 0.0} + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_G2(self): + """Weighted betweenness centrality: G2""" + G = nx.DiGraph() + G.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + + b_answer = {"y": 5.0, "x": 5.0, "s": 4.0, "u": 2.0, "v": 2.0} + + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_G3(self): + """Weighted betweenness centrality: G3""" + G = nx.MultiGraph(weighted_G()) + es = list(G.edges(data=True))[::2] # duplicate every other edge + G.add_edges_from(es) + b_answer = {0: 2.0, 1: 0.0, 2: 4.0, 3: 3.0, 4: 4.0, 5: 0.0} + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_G4(self): + """Weighted betweenness centrality: G4""" + G = nx.MultiDiGraph() + G.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("s", "x", 6), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("x", "y", 3), + ("y", "s", 7), + ("y", "v", 6), + ("y", "v", 6), + ] + ) + + b_answer = {"y": 5.0, "x": 5.0, "s": 4.0, "u": 2.0, "v": 2.0} + + b = nx.betweenness_centrality(G, weight="weight", normalized=False) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +class TestEdgeBetweennessCentrality: + def test_K5(self): + """Edge betweenness centrality: K5""" + G = nx.complete_graph(5) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=False) + b_answer = dict.fromkeys(G.edges(), 1) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_K5(self): + """Edge betweenness centrality: K5""" + G = nx.complete_graph(5) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=True) + b_answer = dict.fromkeys(G.edges(), 1 / 10) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_C4(self): + """Edge betweenness centrality: C4""" + G = nx.cycle_graph(4) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=True) + b_answer = {(0, 1): 2, (0, 3): 2, (1, 2): 2, (2, 3): 2} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n] / 6, abs=1e-7) + + def test_P4(self): + """Edge betweenness centrality: P4""" + G = nx.path_graph(4) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=False) + b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_P4(self): + """Edge betweenness centrality: P4""" + G = nx.path_graph(4) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=True) + b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n] / 6, abs=1e-7) + + def test_balanced_tree(self): + """Edge betweenness centrality: balanced tree""" + G = nx.balanced_tree(r=2, h=2) + b = nx.edge_betweenness_centrality(G, weight=None, normalized=False) + b_answer = {(0, 1): 12, (0, 2): 12, (1, 3): 6, (1, 4): 6, (2, 5): 6, (2, 6): 6} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +class TestWeightedEdgeBetweennessCentrality: + def test_K5(self): + """Edge betweenness centrality: K5""" + G = nx.complete_graph(5) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = dict.fromkeys(G.edges(), 1) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_C4(self): + """Edge betweenness centrality: C4""" + G = nx.cycle_graph(4) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = {(0, 1): 2, (0, 3): 2, (1, 2): 2, (2, 3): 2} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4(self): + """Edge betweenness centrality: P4""" + G = nx.path_graph(4) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = {(0, 1): 3, (1, 2): 4, (2, 3): 3} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_balanced_tree(self): + """Edge betweenness centrality: balanced tree""" + G = nx.balanced_tree(r=2, h=2) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = {(0, 1): 12, (0, 2): 12, (1, 3): 6, (1, 4): 6, (2, 5): 6, (2, 6): 6} + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_weighted_graph(self): + """Edge betweenness centrality: weighted""" + eList = [ + (0, 1, 5), + (0, 2, 4), + (0, 3, 3), + (0, 4, 2), + (1, 2, 4), + (1, 3, 1), + (1, 4, 3), + (2, 4, 5), + (3, 4, 4), + ] + G = nx.Graph() + G.add_weighted_edges_from(eList) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = { + (0, 1): 0.0, + (0, 2): 1.0, + (0, 3): 2.0, + (0, 4): 1.0, + (1, 2): 2.0, + (1, 3): 3.5, + (1, 4): 1.5, + (2, 4): 1.0, + (3, 4): 0.5, + } + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_weighted_graph(self): + """Edge betweenness centrality: normalized weighted""" + eList = [ + (0, 1, 5), + (0, 2, 4), + (0, 3, 3), + (0, 4, 2), + (1, 2, 4), + (1, 3, 1), + (1, 4, 3), + (2, 4, 5), + (3, 4, 4), + ] + G = nx.Graph() + G.add_weighted_edges_from(eList) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=True) + b_answer = { + (0, 1): 0.0, + (0, 2): 1.0, + (0, 3): 2.0, + (0, 4): 1.0, + (1, 2): 2.0, + (1, 3): 3.5, + (1, 4): 1.5, + (2, 4): 1.0, + (3, 4): 0.5, + } + norm = len(G) * (len(G) - 1) / 2 + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n] / norm, abs=1e-7) + + def test_weighted_multigraph(self): + """Edge betweenness centrality: weighted multigraph""" + eList = [ + (0, 1, 5), + (0, 1, 4), + (0, 2, 4), + (0, 3, 3), + (0, 3, 3), + (0, 4, 2), + (1, 2, 4), + (1, 3, 1), + (1, 3, 2), + (1, 4, 3), + (1, 4, 4), + (2, 4, 5), + (3, 4, 4), + (3, 4, 4), + ] + G = nx.MultiGraph() + G.add_weighted_edges_from(eList) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=False) + b_answer = { + (0, 1, 0): 0.0, + (0, 1, 1): 0.5, + (0, 2, 0): 1.0, + (0, 3, 0): 0.75, + (0, 3, 1): 0.75, + (0, 4, 0): 1.0, + (1, 2, 0): 2.0, + (1, 3, 0): 3.0, + (1, 3, 1): 0.0, + (1, 4, 0): 1.5, + (1, 4, 1): 0.0, + (2, 4, 0): 1.0, + (3, 4, 0): 0.25, + (3, 4, 1): 0.25, + } + for n in sorted(G.edges(keys=True)): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_weighted_multigraph(self): + """Edge betweenness centrality: normalized weighted multigraph""" + eList = [ + (0, 1, 5), + (0, 1, 4), + (0, 2, 4), + (0, 3, 3), + (0, 3, 3), + (0, 4, 2), + (1, 2, 4), + (1, 3, 1), + (1, 3, 2), + (1, 4, 3), + (1, 4, 4), + (2, 4, 5), + (3, 4, 4), + (3, 4, 4), + ] + G = nx.MultiGraph() + G.add_weighted_edges_from(eList) + b = nx.edge_betweenness_centrality(G, weight="weight", normalized=True) + b_answer = { + (0, 1, 0): 0.0, + (0, 1, 1): 0.5, + (0, 2, 0): 1.0, + (0, 3, 0): 0.75, + (0, 3, 1): 0.75, + (0, 4, 0): 1.0, + (1, 2, 0): 2.0, + (1, 3, 0): 3.0, + (1, 3, 1): 0.0, + (1, 4, 0): 1.5, + (1, 4, 1): 0.0, + (2, 4, 0): 1.0, + (3, 4, 0): 0.25, + (3, 4, 1): 0.25, + } + norm = len(G) * (len(G) - 1) / 2 + for n in sorted(G.edges(keys=True)): + assert b[n] == pytest.approx(b_answer[n] / norm, abs=1e-7) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py new file mode 100644 index 0000000000000000000000000000000000000000..a35a401a28e31d279c0d715f79f8a7cc5738050f --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py @@ -0,0 +1,340 @@ +import pytest + +import networkx as nx + + +class TestSubsetBetweennessCentrality: + def test_K5(self): + """Betweenness Centrality Subset: K5""" + G = nx.complete_graph(5) + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[1, 3], weight=None + ) + b_answer = {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5_directed(self): + """Betweenness Centrality Subset: P5 directed""" + G = nx.DiGraph() + nx.add_path(G, range(5)) + b_answer = {0: 0, 1: 1, 2: 1, 3: 0, 4: 0, 5: 0} + b = nx.betweenness_centrality_subset(G, sources=[0], targets=[3], weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5(self): + """Betweenness Centrality Subset: P5""" + G = nx.Graph() + nx.add_path(G, range(5)) + b_answer = {0: 0, 1: 0.5, 2: 0.5, 3: 0, 4: 0, 5: 0} + b = nx.betweenness_centrality_subset(G, sources=[0], targets=[3], weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5_multiple_target(self): + """Betweenness Centrality Subset: P5 multiple target""" + G = nx.Graph() + nx.add_path(G, range(5)) + b_answer = {0: 0, 1: 1, 2: 1, 3: 0.5, 4: 0, 5: 0} + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box(self): + """Betweenness Centrality Subset: box""" + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + b_answer = {0: 0, 1: 0.25, 2: 0.25, 3: 0} + b = nx.betweenness_centrality_subset(G, sources=[0], targets=[3], weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box_and_path(self): + """Betweenness Centrality Subset: box and path""" + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (4, 5)]) + b_answer = {0: 0, 1: 0.5, 2: 0.5, 3: 0.5, 4: 0, 5: 0} + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box_and_path2(self): + """Betweenness Centrality Subset: box and path multiple target""" + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (1, 20), (20, 3), (3, 4)]) + b_answer = {0: 0, 1: 1.0, 2: 0.5, 20: 0.5, 3: 0.5, 4: 0} + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_diamond_multi_path(self): + """Betweenness Centrality Subset: Diamond Multi Path""" + G = nx.Graph() + G.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (1, 10), + (10, 11), + (11, 12), + (12, 9), + (2, 6), + (3, 6), + (4, 6), + (5, 7), + (7, 8), + (6, 8), + (8, 9), + ] + ) + b = nx.betweenness_centrality_subset(G, sources=[1], targets=[9], weight=None) + + expected_b = { + 1: 0, + 2: 1.0 / 10, + 3: 1.0 / 10, + 4: 1.0 / 10, + 5: 1.0 / 10, + 6: 3.0 / 10, + 7: 1.0 / 10, + 8: 4.0 / 10, + 9: 0, + 10: 1.0 / 10, + 11: 1.0 / 10, + 12: 1.0 / 10, + } + + for n in sorted(G): + assert b[n] == pytest.approx(expected_b[n], abs=1e-7) + + def test_normalized_p2(self): + """ + Betweenness Centrality Subset: Normalized P2 + if n <= 2: no normalization, betweenness centrality should be 0 for all nodes. + """ + G = nx.Graph() + nx.add_path(G, range(2)) + b_answer = {0: 0, 1: 0.0} + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[1], normalized=True, weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_P5_directed(self): + """Betweenness Centrality Subset: Normalized Directed P5""" + G = nx.DiGraph() + nx.add_path(G, range(5)) + b_answer = {0: 0, 1: 1.0 / 12.0, 2: 1.0 / 12.0, 3: 0, 4: 0, 5: 0} + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[3], normalized=True, weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_weighted_graph(self): + """Betweenness Centrality Subset: Weighted Graph""" + G = nx.DiGraph() + G.add_edge(0, 1, weight=3) + G.add_edge(0, 2, weight=2) + G.add_edge(0, 3, weight=6) + G.add_edge(0, 4, weight=4) + G.add_edge(1, 3, weight=5) + G.add_edge(1, 5, weight=5) + G.add_edge(2, 4, weight=1) + G.add_edge(3, 4, weight=2) + G.add_edge(3, 5, weight=1) + G.add_edge(4, 5, weight=4) + b_answer = {0: 0.0, 1: 0.0, 2: 0.5, 3: 0.5, 4: 0.5, 5: 0.0} + b = nx.betweenness_centrality_subset( + G, sources=[0], targets=[5], normalized=False, weight="weight" + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +class TestEdgeSubsetBetweennessCentrality: + def test_K5(self): + """Edge betweenness subset centrality: K5""" + G = nx.complete_graph(5) + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[1, 3], weight=None + ) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 3)] = b_answer[(0, 1)] = 0.5 + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5_directed(self): + """Edge betweenness subset centrality: P5 directed""" + G = nx.DiGraph() + nx.add_path(G, range(5)) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 1 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5(self): + """Edge betweenness subset centrality: P5""" + G = nx.Graph() + nx.add_path(G, range(5)) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 0.5 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P5_multiple_target(self): + """Edge betweenness subset centrality: P5 multiple target""" + G = nx.Graph() + nx.add_path(G, range(5)) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 1 + b_answer[(3, 4)] = 0.5 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box(self): + """Edge betweenness subset centrality: box""" + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(0, 2)] = 0.25 + b_answer[(1, 3)] = b_answer[(2, 3)] = 0.25 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box_and_path(self): + """Edge betweenness subset centrality: box and path""" + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (3, 4), (4, 5)]) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(0, 2)] = 0.5 + b_answer[(1, 3)] = b_answer[(2, 3)] = 0.5 + b_answer[(3, 4)] = 0.5 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_box_and_path2(self): + """Edge betweenness subset centrality: box and path multiple target""" + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (1, 20), (20, 3), (3, 4)]) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = 1.0 + b_answer[(1, 20)] = b_answer[(3, 20)] = 0.5 + b_answer[(1, 2)] = b_answer[(2, 3)] = 0.5 + b_answer[(3, 4)] = 0.5 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3, 4], weight=None + ) + for n in sorted(G.edges()): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_diamond_multi_path(self): + """Edge betweenness subset centrality: Diamond Multi Path""" + G = nx.Graph() + G.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (1, 10), + (10, 11), + (11, 12), + (12, 9), + (2, 6), + (3, 6), + (4, 6), + (5, 7), + (7, 8), + (6, 8), + (8, 9), + ] + ) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(8, 9)] = 0.4 + b_answer[(6, 8)] = b_answer[(7, 8)] = 0.2 + b_answer[(2, 6)] = b_answer[(3, 6)] = b_answer[(4, 6)] = 0.2 / 3.0 + b_answer[(1, 2)] = b_answer[(1, 3)] = b_answer[(1, 4)] = 0.2 / 3.0 + b_answer[(5, 7)] = 0.2 + b_answer[(1, 5)] = 0.2 + b_answer[(9, 12)] = 0.1 + b_answer[(11, 12)] = b_answer[(10, 11)] = b_answer[(1, 10)] = 0.1 + b = nx.edge_betweenness_centrality_subset( + G, sources=[1], targets=[9], weight=None + ) + for n in G.edges(): + sort_n = tuple(sorted(n)) + assert b[n] == pytest.approx(b_answer[sort_n], abs=1e-7) + + def test_normalized_p1(self): + """ + Edge betweenness subset centrality: P1 + if n <= 1: no normalization b=0 for all nodes + """ + G = nx.Graph() + nx.add_path(G, range(1)) + b_answer = dict.fromkeys(G.edges(), 0) + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[0], normalized=True, weight=None + ) + for n in G.edges(): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_normalized_P5_directed(self): + """Edge betweenness subset centrality: Normalized Directed P5""" + G = nx.DiGraph() + nx.add_path(G, range(5)) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 1)] = b_answer[(1, 2)] = b_answer[(2, 3)] = 0.05 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[3], normalized=True, weight=None + ) + for n in G.edges(): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_weighted_graph(self): + """Edge betweenness subset centrality: Weighted Graph""" + G = nx.DiGraph() + G.add_edge(0, 1, weight=3) + G.add_edge(0, 2, weight=2) + G.add_edge(0, 3, weight=6) + G.add_edge(0, 4, weight=4) + G.add_edge(1, 3, weight=5) + G.add_edge(1, 5, weight=5) + G.add_edge(2, 4, weight=1) + G.add_edge(3, 4, weight=2) + G.add_edge(3, 5, weight=1) + G.add_edge(4, 5, weight=4) + b_answer = dict.fromkeys(G.edges(), 0) + b_answer[(0, 2)] = b_answer[(2, 4)] = b_answer[(4, 5)] = 0.5 + b_answer[(0, 3)] = b_answer[(3, 5)] = 0.5 + b = nx.edge_betweenness_centrality_subset( + G, sources=[0], targets=[5], normalized=False, weight="weight" + ) + for n in G.edges(): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_closeness_centrality.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_closeness_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..d274206a7d6dffb6d9101378370210dd0cb8e01f --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_closeness_centrality.py @@ -0,0 +1,306 @@ +""" +Tests for closeness centrality. +""" +import pytest + +import networkx as nx + + +class TestClosenessCentrality: + @classmethod + def setup_class(cls): + cls.K = nx.krackhardt_kite_graph() + cls.P3 = nx.path_graph(3) + cls.P4 = nx.path_graph(4) + cls.K5 = nx.complete_graph(5) + + cls.C4 = nx.cycle_graph(4) + cls.T = nx.balanced_tree(r=2, h=2) + cls.Gb = nx.Graph() + cls.Gb.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)]) + + F = nx.florentine_families_graph() + cls.F = F + + cls.LM = nx.les_miserables_graph() + + # Create random undirected, unweighted graph for testing incremental version + cls.undirected_G = nx.fast_gnp_random_graph(n=100, p=0.6, seed=123) + cls.undirected_G_cc = nx.closeness_centrality(cls.undirected_G) + + def test_wf_improved(self): + G = nx.union(self.P4, nx.path_graph([4, 5, 6])) + c = nx.closeness_centrality(G) + cwf = nx.closeness_centrality(G, wf_improved=False) + res = {0: 0.25, 1: 0.375, 2: 0.375, 3: 0.25, 4: 0.222, 5: 0.333, 6: 0.222} + wf_res = {0: 0.5, 1: 0.75, 2: 0.75, 3: 0.5, 4: 0.667, 5: 1.0, 6: 0.667} + for n in G: + assert c[n] == pytest.approx(res[n], abs=1e-3) + assert cwf[n] == pytest.approx(wf_res[n], abs=1e-3) + + def test_digraph(self): + G = nx.path_graph(3, create_using=nx.DiGraph()) + c = nx.closeness_centrality(G) + cr = nx.closeness_centrality(G.reverse()) + d = {0: 0.0, 1: 0.500, 2: 0.667} + dr = {0: 0.667, 1: 0.500, 2: 0.0} + for n in sorted(self.P3): + assert c[n] == pytest.approx(d[n], abs=1e-3) + assert cr[n] == pytest.approx(dr[n], abs=1e-3) + + def test_k5_closeness(self): + c = nx.closeness_centrality(self.K5) + d = {0: 1.000, 1: 1.000, 2: 1.000, 3: 1.000, 4: 1.000} + for n in sorted(self.K5): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p3_closeness(self): + c = nx.closeness_centrality(self.P3) + d = {0: 0.667, 1: 1.000, 2: 0.667} + for n in sorted(self.P3): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_krackhardt_closeness(self): + c = nx.closeness_centrality(self.K) + d = { + 0: 0.529, + 1: 0.529, + 2: 0.500, + 3: 0.600, + 4: 0.500, + 5: 0.643, + 6: 0.643, + 7: 0.600, + 8: 0.429, + 9: 0.310, + } + for n in sorted(self.K): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_florentine_families_closeness(self): + c = nx.closeness_centrality(self.F) + d = { + "Acciaiuoli": 0.368, + "Albizzi": 0.483, + "Barbadori": 0.4375, + "Bischeri": 0.400, + "Castellani": 0.389, + "Ginori": 0.333, + "Guadagni": 0.467, + "Lamberteschi": 0.326, + "Medici": 0.560, + "Pazzi": 0.286, + "Peruzzi": 0.368, + "Ridolfi": 0.500, + "Salviati": 0.389, + "Strozzi": 0.4375, + "Tornabuoni": 0.483, + } + for n in sorted(self.F): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_les_miserables_closeness(self): + c = nx.closeness_centrality(self.LM) + d = { + "Napoleon": 0.302, + "Myriel": 0.429, + "MlleBaptistine": 0.413, + "MmeMagloire": 0.413, + "CountessDeLo": 0.302, + "Geborand": 0.302, + "Champtercier": 0.302, + "Cravatte": 0.302, + "Count": 0.302, + "OldMan": 0.302, + "Valjean": 0.644, + "Labarre": 0.394, + "Marguerite": 0.413, + "MmeDeR": 0.394, + "Isabeau": 0.394, + "Gervais": 0.394, + "Listolier": 0.341, + "Tholomyes": 0.392, + "Fameuil": 0.341, + "Blacheville": 0.341, + "Favourite": 0.341, + "Dahlia": 0.341, + "Zephine": 0.341, + "Fantine": 0.461, + "MmeThenardier": 0.461, + "Thenardier": 0.517, + "Cosette": 0.478, + "Javert": 0.517, + "Fauchelevent": 0.402, + "Bamatabois": 0.427, + "Perpetue": 0.318, + "Simplice": 0.418, + "Scaufflaire": 0.394, + "Woman1": 0.396, + "Judge": 0.404, + "Champmathieu": 0.404, + "Brevet": 0.404, + "Chenildieu": 0.404, + "Cochepaille": 0.404, + "Pontmercy": 0.373, + "Boulatruelle": 0.342, + "Eponine": 0.396, + "Anzelma": 0.352, + "Woman2": 0.402, + "MotherInnocent": 0.398, + "Gribier": 0.288, + "MmeBurgon": 0.344, + "Jondrette": 0.257, + "Gavroche": 0.514, + "Gillenormand": 0.442, + "Magnon": 0.335, + "MlleGillenormand": 0.442, + "MmePontmercy": 0.315, + "MlleVaubois": 0.308, + "LtGillenormand": 0.365, + "Marius": 0.531, + "BaronessT": 0.352, + "Mabeuf": 0.396, + "Enjolras": 0.481, + "Combeferre": 0.392, + "Prouvaire": 0.357, + "Feuilly": 0.392, + "Courfeyrac": 0.400, + "Bahorel": 0.394, + "Bossuet": 0.475, + "Joly": 0.394, + "Grantaire": 0.358, + "MotherPlutarch": 0.285, + "Gueulemer": 0.463, + "Babet": 0.463, + "Claquesous": 0.452, + "Montparnasse": 0.458, + "Toussaint": 0.402, + "Child1": 0.342, + "Child2": 0.342, + "Brujon": 0.380, + "MmeHucheloup": 0.353, + } + for n in sorted(self.LM): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_weighted_closeness(self): + edges = [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + XG = nx.Graph() + XG.add_weighted_edges_from(edges) + c = nx.closeness_centrality(XG, distance="weight") + d = {"y": 0.200, "x": 0.286, "s": 0.138, "u": 0.235, "v": 0.200} + for n in sorted(XG): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + # + # Tests for incremental closeness centrality. + # + @staticmethod + def pick_add_edge(g): + u = nx.utils.arbitrary_element(g) + possible_nodes = set(g.nodes()) + neighbors = list(g.neighbors(u)) + [u] + possible_nodes.difference_update(neighbors) + v = nx.utils.arbitrary_element(possible_nodes) + return (u, v) + + @staticmethod + def pick_remove_edge(g): + u = nx.utils.arbitrary_element(g) + possible_nodes = list(g.neighbors(u)) + v = nx.utils.arbitrary_element(possible_nodes) + return (u, v) + + def test_directed_raises(self): + with pytest.raises(nx.NetworkXNotImplemented): + dir_G = nx.gn_graph(n=5) + prev_cc = None + edge = self.pick_add_edge(dir_G) + insert = True + nx.incremental_closeness_centrality(dir_G, edge, prev_cc, insert) + + def test_wrong_size_prev_cc_raises(self): + with pytest.raises(nx.NetworkXError): + G = self.undirected_G.copy() + edge = self.pick_add_edge(G) + insert = True + prev_cc = self.undirected_G_cc.copy() + prev_cc.pop(0) + nx.incremental_closeness_centrality(G, edge, prev_cc, insert) + + def test_wrong_nodes_prev_cc_raises(self): + with pytest.raises(nx.NetworkXError): + G = self.undirected_G.copy() + edge = self.pick_add_edge(G) + insert = True + prev_cc = self.undirected_G_cc.copy() + num_nodes = len(prev_cc) + prev_cc.pop(0) + prev_cc[num_nodes] = 0.5 + nx.incremental_closeness_centrality(G, edge, prev_cc, insert) + + def test_zero_centrality(self): + G = nx.path_graph(3) + prev_cc = nx.closeness_centrality(G) + edge = self.pick_remove_edge(G) + test_cc = nx.incremental_closeness_centrality(G, edge, prev_cc, insertion=False) + G.remove_edges_from([edge]) + real_cc = nx.closeness_centrality(G) + shared_items = set(test_cc.items()) & set(real_cc.items()) + assert len(shared_items) == len(real_cc) + assert 0 in test_cc.values() + + def test_incremental(self): + # Check that incremental and regular give same output + G = self.undirected_G.copy() + prev_cc = None + for i in range(5): + if i % 2 == 0: + # Remove an edge + insert = False + edge = self.pick_remove_edge(G) + else: + # Add an edge + insert = True + edge = self.pick_add_edge(G) + + # start = timeit.default_timer() + test_cc = nx.incremental_closeness_centrality(G, edge, prev_cc, insert) + # inc_elapsed = (timeit.default_timer() - start) + # print(f"incremental time: {inc_elapsed}") + + if insert: + G.add_edges_from([edge]) + else: + G.remove_edges_from([edge]) + + # start = timeit.default_timer() + real_cc = nx.closeness_centrality(G) + # reg_elapsed = (timeit.default_timer() - start) + # print(f"regular time: {reg_elapsed}") + # Example output: + # incremental time: 0.208 + # regular time: 0.276 + # incremental time: 0.00683 + # regular time: 0.260 + # incremental time: 0.0224 + # regular time: 0.278 + # incremental time: 0.00804 + # regular time: 0.208 + # incremental time: 0.00947 + # regular time: 0.188 + + assert set(test_cc.items()) == set(real_cc.items()) + + prev_cc = test_cc diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..4e3d4385c9b266975140d49b739d09fbd449d8a6 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py @@ -0,0 +1,197 @@ +import pytest + +import networkx as nx +from networkx import approximate_current_flow_betweenness_centrality as approximate_cfbc +from networkx import edge_current_flow_betweenness_centrality as edge_current_flow + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +class TestFlowBetweennessCentrality: + def test_K4_normalized(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + b_answer = {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + G.add_edge(0, 1, weight=0.5, other=0.3) + b = nx.current_flow_betweenness_centrality(G, normalized=True, weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + wb_answer = {0: 0.2222222, 1: 0.2222222, 2: 0.30555555, 3: 0.30555555} + b = nx.current_flow_betweenness_centrality(G, normalized=True, weight="weight") + for n in sorted(G): + assert b[n] == pytest.approx(wb_answer[n], abs=1e-7) + wb_answer = {0: 0.2051282, 1: 0.2051282, 2: 0.33974358, 3: 0.33974358} + b = nx.current_flow_betweenness_centrality(G, normalized=True, weight="other") + for n in sorted(G): + assert b[n] == pytest.approx(wb_answer[n], abs=1e-7) + + def test_K4(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + for solver in ["full", "lu", "cg"]: + b = nx.current_flow_betweenness_centrality( + G, normalized=False, solver=solver + ) + b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4_normalized(self): + """Betweenness centrality: P4 normalized""" + G = nx.path_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + b_answer = {0: 0, 1: 2.0 / 3, 2: 2.0 / 3, 3: 0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4(self): + """Betweenness centrality: P4""" + G = nx.path_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=False) + b_answer = {0: 0, 1: 2, 2: 2, 3: 0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_star(self): + """Betweenness centrality: star""" + G = nx.Graph() + nx.add_star(G, ["a", "b", "c", "d"]) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + b_answer = {"a": 1.0, "b": 0.0, "c": 0.0, "d": 0.0} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_solvers2(self): + """Betweenness centrality: alternate solvers""" + G = nx.complete_graph(4) + for solver in ["full", "lu", "cg"]: + b = nx.current_flow_betweenness_centrality( + G, normalized=False, solver=solver + ) + b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +class TestApproximateFlowBetweennessCentrality: + def test_K4_normalized(self): + "Approximate current-flow betweenness centrality: K4 normalized" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + epsilon = 0.1 + ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon) + for n in sorted(G): + np.testing.assert_allclose(b[n], ba[n], atol=epsilon) + + def test_K4(self): + "Approximate current-flow betweenness centrality: K4" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality(G, normalized=False) + epsilon = 0.1 + ba = approximate_cfbc(G, normalized=False, epsilon=0.5 * epsilon) + for n in sorted(G): + np.testing.assert_allclose(b[n], ba[n], atol=epsilon * len(G) ** 2) + + def test_star(self): + "Approximate current-flow betweenness centrality: star" + G = nx.Graph() + nx.add_star(G, ["a", "b", "c", "d"]) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + epsilon = 0.1 + ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon) + for n in sorted(G): + np.testing.assert_allclose(b[n], ba[n], atol=epsilon) + + def test_grid(self): + "Approximate current-flow betweenness centrality: 2d grid" + G = nx.grid_2d_graph(4, 4) + b = nx.current_flow_betweenness_centrality(G, normalized=True) + epsilon = 0.1 + ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon) + for n in sorted(G): + np.testing.assert_allclose(b[n], ba[n], atol=epsilon) + + def test_seed(self): + G = nx.complete_graph(4) + b = approximate_cfbc(G, normalized=False, epsilon=0.05, seed=1) + b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75} + for n in sorted(G): + np.testing.assert_allclose(b[n], b_answer[n], atol=0.1) + + def test_solvers(self): + "Approximate current-flow betweenness centrality: solvers" + G = nx.complete_graph(4) + epsilon = 0.1 + for solver in ["full", "lu", "cg"]: + b = approximate_cfbc( + G, normalized=False, solver=solver, epsilon=0.5 * epsilon + ) + b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75} + for n in sorted(G): + np.testing.assert_allclose(b[n], b_answer[n], atol=epsilon) + + def test_lower_kmax(self): + G = nx.complete_graph(4) + with pytest.raises(nx.NetworkXError, match="Increase kmax or epsilon"): + nx.approximate_current_flow_betweenness_centrality(G, kmax=4) + + +class TestWeightedFlowBetweennessCentrality: + pass + + +class TestEdgeFlowBetweennessCentrality: + def test_K4(self): + """Edge flow betweenness centrality: K4""" + G = nx.complete_graph(4) + b = edge_current_flow(G, normalized=True) + b_answer = dict.fromkeys(G.edges(), 0.25) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_K4_normalized(self): + """Edge flow betweenness centrality: K4""" + G = nx.complete_graph(4) + b = edge_current_flow(G, normalized=False) + b_answer = dict.fromkeys(G.edges(), 0.75) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_C4(self): + """Edge flow betweenness centrality: C4""" + G = nx.cycle_graph(4) + b = edge_current_flow(G, normalized=False) + b_answer = {(0, 1): 1.25, (0, 3): 1.25, (1, 2): 1.25, (2, 3): 1.25} + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_P4(self): + """Edge betweenness centrality: P4""" + G = nx.path_graph(4) + b = edge_current_flow(G, normalized=False) + b_answer = {(0, 1): 1.5, (1, 2): 2.0, (2, 3): 1.5} + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + +@pytest.mark.parametrize( + "centrality_func", + ( + nx.current_flow_betweenness_centrality, + nx.edge_current_flow_betweenness_centrality, + nx.approximate_current_flow_betweenness_centrality, + ), +) +def test_unconnected_graphs_betweenness_centrality(centrality_func): + G = nx.Graph([(1, 2), (3, 4)]) + G.add_node(5) + with pytest.raises(nx.NetworkXError, match="Graph not connected"): + centrality_func(G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py new file mode 100644 index 0000000000000000000000000000000000000000..7b1611b07bbf890f5e45bba7a42c298bd8f4e749 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py @@ -0,0 +1,147 @@ +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx import edge_current_flow_betweenness_centrality as edge_current_flow +from networkx import ( + edge_current_flow_betweenness_centrality_subset as edge_current_flow_subset, +) + + +class TestFlowBetweennessCentrality: + def test_K4_normalized(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_K4(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + # test weighted network + G.add_edge(0, 1, weight=0.5, other=0.3) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True, weight=None + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True, weight="other" + ) + b_answer = nx.current_flow_betweenness_centrality( + G, normalized=True, weight="other" + ) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4_normalized(self): + """Betweenness centrality: P4 normalized""" + G = nx.path_graph(4) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4(self): + """Betweenness centrality: P4""" + G = nx.path_graph(4) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_star(self): + """Betweenness centrality: star""" + G = nx.Graph() + nx.add_star(G, ["a", "b", "c", "d"]) + b = nx.current_flow_betweenness_centrality_subset( + G, list(G), list(G), normalized=True + ) + b_answer = nx.current_flow_betweenness_centrality(G, normalized=True) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + +# class TestWeightedFlowBetweennessCentrality(): +# pass + + +class TestEdgeFlowBetweennessCentrality: + def test_K4_normalized(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + b = edge_current_flow_subset(G, list(G), list(G), normalized=True) + b_answer = edge_current_flow(G, normalized=True) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_K4(self): + """Betweenness centrality: K4""" + G = nx.complete_graph(4) + b = edge_current_flow_subset(G, list(G), list(G), normalized=False) + b_answer = edge_current_flow(G, normalized=False) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + # test weighted network + G.add_edge(0, 1, weight=0.5, other=0.3) + b = edge_current_flow_subset(G, list(G), list(G), normalized=False, weight=None) + # weight is None => same as unweighted network + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + b = edge_current_flow_subset(G, list(G), list(G), normalized=False) + b_answer = edge_current_flow(G, normalized=False) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + b = edge_current_flow_subset( + G, list(G), list(G), normalized=False, weight="other" + ) + b_answer = edge_current_flow(G, normalized=False, weight="other") + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_C4(self): + """Edge betweenness centrality: C4""" + G = nx.cycle_graph(4) + b = edge_current_flow_subset(G, list(G), list(G), normalized=True) + b_answer = edge_current_flow(G, normalized=True) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) + + def test_P4(self): + """Edge betweenness centrality: P4""" + G = nx.path_graph(4) + b = edge_current_flow_subset(G, list(G), list(G), normalized=True) + b_answer = edge_current_flow(G, normalized=True) + for (s, t), v1 in b_answer.items(): + v2 = b.get((s, t), b.get((t, s))) + assert v1 == pytest.approx(v2, abs=1e-7) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_current_flow_closeness.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_current_flow_closeness.py new file mode 100644 index 0000000000000000000000000000000000000000..2528d622855938b8f569d4fb33309ebed1dbd7c8 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_current_flow_closeness.py @@ -0,0 +1,43 @@ +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx + + +class TestFlowClosenessCentrality: + def test_K4(self): + """Closeness centrality: K4""" + G = nx.complete_graph(4) + b = nx.current_flow_closeness_centrality(G) + b_answer = {0: 2.0 / 3, 1: 2.0 / 3, 2: 2.0 / 3, 3: 2.0 / 3} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P4(self): + """Closeness centrality: P4""" + G = nx.path_graph(4) + b = nx.current_flow_closeness_centrality(G) + b_answer = {0: 1.0 / 6, 1: 1.0 / 4, 2: 1.0 / 4, 3: 1.0 / 6} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_star(self): + """Closeness centrality: star""" + G = nx.Graph() + nx.add_star(G, ["a", "b", "c", "d"]) + b = nx.current_flow_closeness_centrality(G) + b_answer = {"a": 1.0 / 3, "b": 0.6 / 3, "c": 0.6 / 3, "d": 0.6 / 3} + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_current_flow_closeness_centrality_not_connected(self): + G = nx.Graph() + G.add_nodes_from([1, 2, 3]) + with pytest.raises(nx.NetworkXError): + nx.current_flow_closeness_centrality(G) + + +class TestWeightedFlowClosenessCentrality: + pass diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_degree_centrality.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_degree_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..f3f6c39d3bd58d243627c9f33a088e4f4e37d3bb --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_degree_centrality.py @@ -0,0 +1,144 @@ +""" + Unit tests for degree centrality. +""" + +import pytest + +import networkx as nx + + +class TestDegreeCentrality: + def setup_method(self): + self.K = nx.krackhardt_kite_graph() + self.P3 = nx.path_graph(3) + self.K5 = nx.complete_graph(5) + + F = nx.Graph() # Florentine families + F.add_edge("Acciaiuoli", "Medici") + F.add_edge("Castellani", "Peruzzi") + F.add_edge("Castellani", "Strozzi") + F.add_edge("Castellani", "Barbadori") + F.add_edge("Medici", "Barbadori") + F.add_edge("Medici", "Ridolfi") + F.add_edge("Medici", "Tornabuoni") + F.add_edge("Medici", "Albizzi") + F.add_edge("Medici", "Salviati") + F.add_edge("Salviati", "Pazzi") + F.add_edge("Peruzzi", "Strozzi") + F.add_edge("Peruzzi", "Bischeri") + F.add_edge("Strozzi", "Ridolfi") + F.add_edge("Strozzi", "Bischeri") + F.add_edge("Ridolfi", "Tornabuoni") + F.add_edge("Tornabuoni", "Guadagni") + F.add_edge("Albizzi", "Ginori") + F.add_edge("Albizzi", "Guadagni") + F.add_edge("Bischeri", "Guadagni") + F.add_edge("Guadagni", "Lamberteschi") + self.F = F + + G = nx.DiGraph() + G.add_edge(0, 5) + G.add_edge(1, 5) + G.add_edge(2, 5) + G.add_edge(3, 5) + G.add_edge(4, 5) + G.add_edge(5, 6) + G.add_edge(5, 7) + G.add_edge(5, 8) + self.G = G + + def test_degree_centrality_1(self): + d = nx.degree_centrality(self.K5) + exact = dict(zip(range(5), [1] * 5)) + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + def test_degree_centrality_2(self): + d = nx.degree_centrality(self.P3) + exact = {0: 0.5, 1: 1, 2: 0.5} + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + def test_degree_centrality_3(self): + d = nx.degree_centrality(self.K) + exact = { + 0: 0.444, + 1: 0.444, + 2: 0.333, + 3: 0.667, + 4: 0.333, + 5: 0.556, + 6: 0.556, + 7: 0.333, + 8: 0.222, + 9: 0.111, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(float(f"{dc:.3f}"), abs=1e-7) + + def test_degree_centrality_4(self): + d = nx.degree_centrality(self.F) + names = sorted(self.F.nodes()) + dcs = [ + 0.071, + 0.214, + 0.143, + 0.214, + 0.214, + 0.071, + 0.286, + 0.071, + 0.429, + 0.071, + 0.214, + 0.214, + 0.143, + 0.286, + 0.214, + ] + exact = dict(zip(names, dcs)) + for n, dc in d.items(): + assert exact[n] == pytest.approx(float(f"{dc:.3f}"), abs=1e-7) + + def test_indegree_centrality(self): + d = nx.in_degree_centrality(self.G) + exact = { + 0: 0.0, + 1: 0.0, + 2: 0.0, + 3: 0.0, + 4: 0.0, + 5: 0.625, + 6: 0.125, + 7: 0.125, + 8: 0.125, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + def test_outdegree_centrality(self): + d = nx.out_degree_centrality(self.G) + exact = { + 0: 0.125, + 1: 0.125, + 2: 0.125, + 3: 0.125, + 4: 0.125, + 5: 0.375, + 6: 0.0, + 7: 0.0, + 8: 0.0, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + def test_small_graph_centrality(self): + G = nx.empty_graph(create_using=nx.DiGraph) + assert {} == nx.degree_centrality(G) + assert {} == nx.out_degree_centrality(G) + assert {} == nx.in_degree_centrality(G) + + G = nx.empty_graph(1, create_using=nx.DiGraph) + assert {0: 1} == nx.degree_centrality(G) + assert {0: 1} == nx.out_degree_centrality(G) + assert {0: 1} == nx.in_degree_centrality(G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py new file mode 100644 index 0000000000000000000000000000000000000000..05de1c43659a44f2dbf45368bf2ee552dd61dd78 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py @@ -0,0 +1,73 @@ +import networkx as nx + + +def small_ego_G(): + """The sample network from https://arxiv.org/pdf/1310.6753v1.pdf""" + edges = [ + ("a", "b"), + ("a", "c"), + ("b", "c"), + ("b", "d"), + ("b", "e"), + ("b", "f"), + ("c", "d"), + ("c", "f"), + ("c", "h"), + ("d", "f"), + ("e", "f"), + ("f", "h"), + ("h", "j"), + ("h", "k"), + ("i", "j"), + ("i", "k"), + ("j", "k"), + ("u", "a"), + ("u", "b"), + ("u", "c"), + ("u", "d"), + ("u", "e"), + ("u", "f"), + ("u", "g"), + ("u", "h"), + ("u", "i"), + ("u", "j"), + ("u", "k"), + ] + G = nx.Graph() + G.add_edges_from(edges) + + return G + + +class TestDispersion: + def test_article(self): + """our algorithm matches article's""" + G = small_ego_G() + disp_uh = nx.dispersion(G, "u", "h", normalized=False) + disp_ub = nx.dispersion(G, "u", "b", normalized=False) + assert disp_uh == 4 + assert disp_ub == 1 + + def test_results_length(self): + """there is a result for every node""" + G = small_ego_G() + disp = nx.dispersion(G) + disp_Gu = nx.dispersion(G, "u") + disp_uv = nx.dispersion(G, "u", "h") + assert len(disp) == len(G) + assert len(disp_Gu) == len(G) - 1 + assert isinstance(disp_uv, float) + + def test_dispersion_v_only(self): + G = small_ego_G() + disp_G_h = nx.dispersion(G, v="h", normalized=False) + disp_G_h_normalized = nx.dispersion(G, v="h", normalized=True) + assert disp_G_h == {"c": 0, "f": 0, "j": 0, "k": 0, "u": 4} + assert disp_G_h_normalized == {"c": 0.0, "f": 0.0, "j": 0.0, "k": 0.0, "u": 1.0} + + def test_impossible_things(self): + G = nx.karate_club_graph() + disp = nx.dispersion(G) + for u in disp: + for v in disp[u]: + assert disp[u][v] >= 0 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..b8620056a94995100fae72a66bb7e0558aae953b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_eigenvector_centrality.py @@ -0,0 +1,175 @@ +import math + +import pytest + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +import networkx as nx + + +class TestEigenvectorCentrality: + def test_K5(self): + """Eigenvector centrality: K5""" + G = nx.complete_graph(5) + b = nx.eigenvector_centrality(G) + v = math.sqrt(1 / 5.0) + b_answer = dict.fromkeys(G, v) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + nstart = {n: 1 for n in G} + b = nx.eigenvector_centrality(G, nstart=nstart) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + b = nx.eigenvector_centrality_numpy(G) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_P3(self): + """Eigenvector centrality: P3""" + G = nx.path_graph(3) + b_answer = {0: 0.5, 1: 0.7071, 2: 0.5} + b = nx.eigenvector_centrality_numpy(G) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + b = nx.eigenvector_centrality(G) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_P3_unweighted(self): + """Eigenvector centrality: P3""" + G = nx.path_graph(3) + b_answer = {0: 0.5, 1: 0.7071, 2: 0.5} + b = nx.eigenvector_centrality_numpy(G, weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_maxiter(self): + with pytest.raises(nx.PowerIterationFailedConvergence): + G = nx.path_graph(3) + nx.eigenvector_centrality(G, max_iter=0) + + +class TestEigenvectorCentralityDirected: + @classmethod + def setup_class(cls): + G = nx.DiGraph() + + edges = [ + (1, 2), + (1, 3), + (2, 4), + (3, 2), + (3, 5), + (4, 2), + (4, 5), + (4, 6), + (5, 6), + (5, 7), + (5, 8), + (6, 8), + (7, 1), + (7, 5), + (7, 8), + (8, 6), + (8, 7), + ] + + G.add_edges_from(edges, weight=2.0) + cls.G = G.reverse() + cls.G.evc = [ + 0.25368793, + 0.19576478, + 0.32817092, + 0.40430835, + 0.48199885, + 0.15724483, + 0.51346196, + 0.32475403, + ] + + H = nx.DiGraph() + + edges = [ + (1, 2), + (1, 3), + (2, 4), + (3, 2), + (3, 5), + (4, 2), + (4, 5), + (4, 6), + (5, 6), + (5, 7), + (5, 8), + (6, 8), + (7, 1), + (7, 5), + (7, 8), + (8, 6), + (8, 7), + ] + + G.add_edges_from(edges) + cls.H = G.reverse() + cls.H.evc = [ + 0.25368793, + 0.19576478, + 0.32817092, + 0.40430835, + 0.48199885, + 0.15724483, + 0.51346196, + 0.32475403, + ] + + def test_eigenvector_centrality_weighted(self): + G = self.G + p = nx.eigenvector_centrality(G) + for a, b in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-4) + + def test_eigenvector_centrality_weighted_numpy(self): + G = self.G + p = nx.eigenvector_centrality_numpy(G) + for a, b in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-7) + + def test_eigenvector_centrality_unweighted(self): + G = self.H + p = nx.eigenvector_centrality(G) + for a, b in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-4) + + def test_eigenvector_centrality_unweighted_numpy(self): + G = self.H + p = nx.eigenvector_centrality_numpy(G) + for a, b in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-7) + + +class TestEigenvectorCentralityExceptions: + def test_multigraph(self): + with pytest.raises(nx.NetworkXException): + nx.eigenvector_centrality(nx.MultiGraph()) + + def test_multigraph_numpy(self): + with pytest.raises(nx.NetworkXException): + nx.eigenvector_centrality_numpy(nx.MultiGraph()) + + def test_empty(self): + with pytest.raises(nx.NetworkXException): + nx.eigenvector_centrality(nx.Graph()) + + def test_empty_numpy(self): + with pytest.raises(nx.NetworkXException): + nx.eigenvector_centrality_numpy(nx.Graph()) + + def test_zero_nstart(self): + G = nx.Graph([(1, 2), (1, 3), (2, 3)]) + with pytest.raises( + nx.NetworkXException, match="initial vector cannot have all zero values" + ): + nx.eigenvector_centrality(G, nstart={v: 0 for v in G}) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_group.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_group.py new file mode 100644 index 0000000000000000000000000000000000000000..3f5559dcd73a268c28b513678b1fe3dd058220cb --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_group.py @@ -0,0 +1,278 @@ +""" +Tests for Group Centrality Measures +""" + + +import pytest + +import networkx as nx + + +class TestGroupBetweennessCentrality: + def test_group_betweenness_single_node(self): + """ + Group betweenness centrality for single node group + """ + G = nx.path_graph(5) + C = [1] + b = nx.group_betweenness_centrality( + G, C, weight=None, normalized=False, endpoints=False + ) + b_answer = 3.0 + assert b == b_answer + + def test_group_betweenness_with_endpoints(self): + """ + Group betweenness centrality for single node group + """ + G = nx.path_graph(5) + C = [1] + b = nx.group_betweenness_centrality( + G, C, weight=None, normalized=False, endpoints=True + ) + b_answer = 7.0 + assert b == b_answer + + def test_group_betweenness_normalized(self): + """ + Group betweenness centrality for group with more than + 1 node and normalized + """ + G = nx.path_graph(5) + C = [1, 3] + b = nx.group_betweenness_centrality( + G, C, weight=None, normalized=True, endpoints=False + ) + b_answer = 1.0 + assert b == b_answer + + def test_two_group_betweenness_value_zero(self): + """ + Group betweenness centrality value of 0 + """ + G = nx.cycle_graph(7) + C = [[0, 1, 6], [0, 1, 5]] + b = nx.group_betweenness_centrality(G, C, weight=None, normalized=False) + b_answer = [0.0, 3.0] + assert b == b_answer + + def test_group_betweenness_value_zero(self): + """ + Group betweenness centrality value of 0 + """ + G = nx.cycle_graph(6) + C = [0, 1, 5] + b = nx.group_betweenness_centrality(G, C, weight=None, normalized=False) + b_answer = 0.0 + assert b == b_answer + + def test_group_betweenness_disconnected_graph(self): + """ + Group betweenness centrality in a disconnected graph + """ + G = nx.path_graph(5) + G.remove_edge(0, 1) + C = [1] + b = nx.group_betweenness_centrality(G, C, weight=None, normalized=False) + b_answer = 0.0 + assert b == b_answer + + def test_group_betweenness_node_not_in_graph(self): + """ + Node(s) in C not in graph, raises NodeNotFound exception + """ + with pytest.raises(nx.NodeNotFound): + nx.group_betweenness_centrality(nx.path_graph(5), [4, 7, 8]) + + def test_group_betweenness_directed_weighted(self): + """ + Group betweenness centrality in a directed and weighted graph + """ + G = nx.DiGraph() + G.add_edge(1, 0, weight=1) + G.add_edge(0, 2, weight=2) + G.add_edge(1, 2, weight=3) + G.add_edge(3, 1, weight=4) + G.add_edge(2, 3, weight=1) + G.add_edge(4, 3, weight=6) + G.add_edge(2, 4, weight=7) + C = [1, 2] + b = nx.group_betweenness_centrality(G, C, weight="weight", normalized=False) + b_answer = 5.0 + assert b == b_answer + + +class TestProminentGroup: + np = pytest.importorskip("numpy") + pd = pytest.importorskip("pandas") + + def test_prominent_group_single_node(self): + """ + Prominent group for single node + """ + G = nx.path_graph(5) + k = 1 + b, g = nx.prominent_group(G, k, normalized=False, endpoints=False) + b_answer, g_answer = 4.0, [2] + assert b == b_answer and g == g_answer + + def test_prominent_group_with_c(self): + """ + Prominent group without some nodes + """ + G = nx.path_graph(5) + k = 1 + b, g = nx.prominent_group(G, k, normalized=False, C=[2]) + b_answer, g_answer = 3.0, [1] + assert b == b_answer and g == g_answer + + def test_prominent_group_normalized_endpoints(self): + """ + Prominent group with normalized result, with endpoints + """ + G = nx.cycle_graph(7) + k = 2 + b, g = nx.prominent_group(G, k, normalized=True, endpoints=True) + b_answer, g_answer = 1.7, [2, 5] + assert b == b_answer and g == g_answer + + def test_prominent_group_disconnected_graph(self): + """ + Prominent group of disconnected graph + """ + G = nx.path_graph(6) + G.remove_edge(0, 1) + k = 1 + b, g = nx.prominent_group(G, k, weight=None, normalized=False) + b_answer, g_answer = 4.0, [3] + assert b == b_answer and g == g_answer + + def test_prominent_group_node_not_in_graph(self): + """ + Node(s) in C not in graph, raises NodeNotFound exception + """ + with pytest.raises(nx.NodeNotFound): + nx.prominent_group(nx.path_graph(5), 1, C=[10]) + + def test_group_betweenness_directed_weighted(self): + """ + Group betweenness centrality in a directed and weighted graph + """ + G = nx.DiGraph() + G.add_edge(1, 0, weight=1) + G.add_edge(0, 2, weight=2) + G.add_edge(1, 2, weight=3) + G.add_edge(3, 1, weight=4) + G.add_edge(2, 3, weight=1) + G.add_edge(4, 3, weight=6) + G.add_edge(2, 4, weight=7) + k = 2 + b, g = nx.prominent_group(G, k, weight="weight", normalized=False) + b_answer, g_answer = 5.0, [1, 2] + assert b == b_answer and g == g_answer + + def test_prominent_group_greedy_algorithm(self): + """ + Group betweenness centrality in a greedy algorithm + """ + G = nx.cycle_graph(7) + k = 2 + b, g = nx.prominent_group(G, k, normalized=True, endpoints=True, greedy=True) + b_answer, g_answer = 1.7, [6, 3] + assert b == b_answer and g == g_answer + + +class TestGroupClosenessCentrality: + def test_group_closeness_single_node(self): + """ + Group closeness centrality for a single node group + """ + G = nx.path_graph(5) + c = nx.group_closeness_centrality(G, [1]) + c_answer = nx.closeness_centrality(G, 1) + assert c == c_answer + + def test_group_closeness_disconnected(self): + """ + Group closeness centrality for a disconnected graph + """ + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4]) + c = nx.group_closeness_centrality(G, [1, 2]) + c_answer = 0 + assert c == c_answer + + def test_group_closeness_multiple_node(self): + """ + Group closeness centrality for a group with more than + 1 node + """ + G = nx.path_graph(4) + c = nx.group_closeness_centrality(G, [1, 2]) + c_answer = 1 + assert c == c_answer + + def test_group_closeness_node_not_in_graph(self): + """ + Node(s) in S not in graph, raises NodeNotFound exception + """ + with pytest.raises(nx.NodeNotFound): + nx.group_closeness_centrality(nx.path_graph(5), [6, 7, 8]) + + +class TestGroupDegreeCentrality: + def test_group_degree_centrality_single_node(self): + """ + Group degree centrality for a single node group + """ + G = nx.path_graph(4) + d = nx.group_degree_centrality(G, [1]) + d_answer = nx.degree_centrality(G)[1] + assert d == d_answer + + def test_group_degree_centrality_multiple_node(self): + """ + Group degree centrality for group with more than + 1 node + """ + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + G.add_edges_from( + [(1, 2), (1, 3), (1, 6), (1, 7), (1, 8), (2, 3), (2, 4), (2, 5)] + ) + d = nx.group_degree_centrality(G, [1, 2]) + d_answer = 1 + assert d == d_answer + + def test_group_in_degree_centrality(self): + """ + Group in-degree centrality in a DiGraph + """ + G = nx.DiGraph() + G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + G.add_edges_from( + [(1, 2), (1, 3), (1, 6), (1, 7), (1, 8), (2, 3), (2, 4), (2, 5)] + ) + d = nx.group_in_degree_centrality(G, [1, 2]) + d_answer = 0 + assert d == d_answer + + def test_group_out_degree_centrality(self): + """ + Group out-degree centrality in a DiGraph + """ + G = nx.DiGraph() + G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + G.add_edges_from( + [(1, 2), (1, 3), (1, 6), (1, 7), (1, 8), (2, 3), (2, 4), (2, 5)] + ) + d = nx.group_out_degree_centrality(G, [1, 2]) + d_answer = 1 + assert d == d_answer + + def test_group_degree_centrality_node_not_in_graph(self): + """ + Node(s) in S not in graph, raises NetworkXError + """ + with pytest.raises(nx.NetworkXError): + nx.group_degree_centrality(nx.path_graph(5), [6, 7, 8]) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_harmonic_centrality.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_harmonic_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..450356ea970565eaee7612eb4c8c2d5364af50d7 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_harmonic_centrality.py @@ -0,0 +1,115 @@ +""" +Tests for degree centrality. +""" +import pytest + +import networkx as nx +from networkx.algorithms.centrality import harmonic_centrality + + +class TestClosenessCentrality: + @classmethod + def setup_class(cls): + cls.P3 = nx.path_graph(3) + cls.P4 = nx.path_graph(4) + cls.K5 = nx.complete_graph(5) + + cls.C4 = nx.cycle_graph(4) + cls.C4_directed = nx.cycle_graph(4, create_using=nx.DiGraph) + + cls.C5 = nx.cycle_graph(5) + + cls.T = nx.balanced_tree(r=2, h=2) + + cls.Gb = nx.DiGraph() + cls.Gb.add_edges_from([(0, 1), (0, 2), (0, 4), (2, 1), (2, 3), (4, 3)]) + + def test_p3_harmonic(self): + c = harmonic_centrality(self.P3) + d = {0: 1.5, 1: 2, 2: 1.5} + for n in sorted(self.P3): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p4_harmonic(self): + c = harmonic_centrality(self.P4) + d = {0: 1.8333333, 1: 2.5, 2: 2.5, 3: 1.8333333} + for n in sorted(self.P4): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_clique_complete(self): + c = harmonic_centrality(self.K5) + d = {0: 4, 1: 4, 2: 4, 3: 4, 4: 4} + for n in sorted(self.P3): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_cycle_C4(self): + c = harmonic_centrality(self.C4) + d = {0: 2.5, 1: 2.5, 2: 2.5, 3: 2.5} + for n in sorted(self.C4): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_cycle_C5(self): + c = harmonic_centrality(self.C5) + d = {0: 3, 1: 3, 2: 3, 3: 3, 4: 3, 5: 4} + for n in sorted(self.C5): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_bal_tree(self): + c = harmonic_centrality(self.T) + d = {0: 4.0, 1: 4.1666, 2: 4.1666, 3: 2.8333, 4: 2.8333, 5: 2.8333, 6: 2.8333} + for n in sorted(self.T): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_exampleGraph(self): + c = harmonic_centrality(self.Gb) + d = {0: 0, 1: 2, 2: 1, 3: 2.5, 4: 1} + for n in sorted(self.Gb): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_weighted_harmonic(self): + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [ + ("a", "b", 10), + ("d", "c", 5), + ("a", "c", 1), + ("e", "f", 2), + ("f", "c", 1), + ("a", "f", 3), + ] + ) + c = harmonic_centrality(XG, distance="weight") + d = {"a": 0, "b": 0.1, "c": 2.533, "d": 0, "e": 0, "f": 0.83333} + for n in sorted(XG): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_empty(self): + G = nx.DiGraph() + c = harmonic_centrality(G, distance="weight") + d = {} + assert c == d + + def test_singleton(self): + G = nx.DiGraph() + G.add_node(0) + c = harmonic_centrality(G, distance="weight") + d = {0: 0} + assert c == d + + def test_cycle_c4_directed(self): + c = harmonic_centrality(self.C4_directed, nbunch=[0, 1], sources=[1, 2]) + d = {0: 0.833, 1: 0.333} + for n in [0, 1]: + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p3_harmonic_subset(self): + c = harmonic_centrality(self.P3, sources=[0, 1]) + d = {0: 1, 1: 1, 2: 1.5} + for n in self.P3: + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p4_harmonic_subset(self): + c = harmonic_centrality(self.P4, nbunch=[2, 3], sources=[0, 1]) + d = {2: 1.5, 3: 0.8333333} + for n in [2, 3]: + assert c[n] == pytest.approx(d[n], abs=1e-3) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_katz_centrality.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_katz_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..0927f00bc5c31ad1134dae0c8f59367baed67bb6 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_katz_centrality.py @@ -0,0 +1,345 @@ +import math + +import pytest + +import networkx as nx + + +class TestKatzCentrality: + def test_K5(self): + """Katz centrality: K5""" + G = nx.complete_graph(5) + alpha = 0.1 + b = nx.katz_centrality(G, alpha) + v = math.sqrt(1 / 5.0) + b_answer = dict.fromkeys(G, v) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + nstart = {n: 1 for n in G} + b = nx.katz_centrality(G, alpha, nstart=nstart) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + + def test_P3(self): + """Katz centrality: P3""" + alpha = 0.1 + G = nx.path_graph(3) + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + b = nx.katz_centrality(G, alpha) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_maxiter(self): + with pytest.raises(nx.PowerIterationFailedConvergence): + nx.katz_centrality(nx.path_graph(3), 0.1, max_iter=0) + + def test_beta_as_scalar(self): + alpha = 0.1 + beta = 0.1 + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + G = nx.path_graph(3) + b = nx.katz_centrality(G, alpha, beta) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_beta_as_dict(self): + alpha = 0.1 + beta = {0: 1.0, 1: 1.0, 2: 1.0} + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + G = nx.path_graph(3) + b = nx.katz_centrality(G, alpha, beta) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_multiple_alpha(self): + alpha_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6] + for alpha in alpha_list: + b_answer = { + 0.1: { + 0: 0.5598852584152165, + 1: 0.6107839182711449, + 2: 0.5598852584152162, + }, + 0.2: { + 0: 0.5454545454545454, + 1: 0.6363636363636365, + 2: 0.5454545454545454, + }, + 0.3: { + 0: 0.5333964609104419, + 1: 0.6564879518897746, + 2: 0.5333964609104419, + }, + 0.4: { + 0: 0.5232045649263551, + 1: 0.6726915834767423, + 2: 0.5232045649263551, + }, + 0.5: { + 0: 0.5144957746691622, + 1: 0.6859943117075809, + 2: 0.5144957746691622, + }, + 0.6: { + 0: 0.5069794004195823, + 1: 0.6970966755769258, + 2: 0.5069794004195823, + }, + } + G = nx.path_graph(3) + b = nx.katz_centrality(G, alpha) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[alpha][n], abs=1e-4) + + def test_multigraph(self): + with pytest.raises(nx.NetworkXException): + nx.katz_centrality(nx.MultiGraph(), 0.1) + + def test_empty(self): + e = nx.katz_centrality(nx.Graph(), 0.1) + assert e == {} + + def test_bad_beta(self): + with pytest.raises(nx.NetworkXException): + G = nx.Graph([(0, 1)]) + beta = {0: 77} + nx.katz_centrality(G, 0.1, beta=beta) + + def test_bad_beta_number(self): + with pytest.raises(nx.NetworkXException): + G = nx.Graph([(0, 1)]) + nx.katz_centrality(G, 0.1, beta="foo") + + +class TestKatzCentralityNumpy: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + def test_K5(self): + """Katz centrality: K5""" + G = nx.complete_graph(5) + alpha = 0.1 + b = nx.katz_centrality(G, alpha) + v = math.sqrt(1 / 5.0) + b_answer = dict.fromkeys(G, v) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + b = nx.eigenvector_centrality_numpy(G) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_P3(self): + """Katz centrality: P3""" + alpha = 0.1 + G = nx.path_graph(3) + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + b = nx.katz_centrality_numpy(G, alpha) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_beta_as_scalar(self): + alpha = 0.1 + beta = 0.1 + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + G = nx.path_graph(3) + b = nx.katz_centrality_numpy(G, alpha, beta) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_beta_as_dict(self): + alpha = 0.1 + beta = {0: 1.0, 1: 1.0, 2: 1.0} + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + G = nx.path_graph(3) + b = nx.katz_centrality_numpy(G, alpha, beta) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + def test_multiple_alpha(self): + alpha_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6] + for alpha in alpha_list: + b_answer = { + 0.1: { + 0: 0.5598852584152165, + 1: 0.6107839182711449, + 2: 0.5598852584152162, + }, + 0.2: { + 0: 0.5454545454545454, + 1: 0.6363636363636365, + 2: 0.5454545454545454, + }, + 0.3: { + 0: 0.5333964609104419, + 1: 0.6564879518897746, + 2: 0.5333964609104419, + }, + 0.4: { + 0: 0.5232045649263551, + 1: 0.6726915834767423, + 2: 0.5232045649263551, + }, + 0.5: { + 0: 0.5144957746691622, + 1: 0.6859943117075809, + 2: 0.5144957746691622, + }, + 0.6: { + 0: 0.5069794004195823, + 1: 0.6970966755769258, + 2: 0.5069794004195823, + }, + } + G = nx.path_graph(3) + b = nx.katz_centrality_numpy(G, alpha) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[alpha][n], abs=1e-4) + + def test_multigraph(self): + with pytest.raises(nx.NetworkXException): + nx.katz_centrality(nx.MultiGraph(), 0.1) + + def test_empty(self): + e = nx.katz_centrality(nx.Graph(), 0.1) + assert e == {} + + def test_bad_beta(self): + with pytest.raises(nx.NetworkXException): + G = nx.Graph([(0, 1)]) + beta = {0: 77} + nx.katz_centrality_numpy(G, 0.1, beta=beta) + + def test_bad_beta_numbe(self): + with pytest.raises(nx.NetworkXException): + G = nx.Graph([(0, 1)]) + nx.katz_centrality_numpy(G, 0.1, beta="foo") + + def test_K5_unweighted(self): + """Katz centrality: K5""" + G = nx.complete_graph(5) + alpha = 0.1 + b = nx.katz_centrality(G, alpha, weight=None) + v = math.sqrt(1 / 5.0) + b_answer = dict.fromkeys(G, v) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-7) + b = nx.eigenvector_centrality_numpy(G, weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-3) + + def test_P3_unweighted(self): + """Katz centrality: P3""" + alpha = 0.1 + G = nx.path_graph(3) + b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162} + b = nx.katz_centrality_numpy(G, alpha, weight=None) + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-4) + + +class TestKatzCentralityDirected: + @classmethod + def setup_class(cls): + G = nx.DiGraph() + edges = [ + (1, 2), + (1, 3), + (2, 4), + (3, 2), + (3, 5), + (4, 2), + (4, 5), + (4, 6), + (5, 6), + (5, 7), + (5, 8), + (6, 8), + (7, 1), + (7, 5), + (7, 8), + (8, 6), + (8, 7), + ] + G.add_edges_from(edges, weight=2.0) + cls.G = G.reverse() + cls.G.alpha = 0.1 + cls.G.evc = [ + 0.3289589783189635, + 0.2832077296243516, + 0.3425906003685471, + 0.3970420865198392, + 0.41074871061646284, + 0.272257430756461, + 0.4201989685435462, + 0.34229059218038554, + ] + + H = nx.DiGraph(edges) + cls.H = G.reverse() + cls.H.alpha = 0.1 + cls.H.evc = [ + 0.3289589783189635, + 0.2832077296243516, + 0.3425906003685471, + 0.3970420865198392, + 0.41074871061646284, + 0.272257430756461, + 0.4201989685435462, + 0.34229059218038554, + ] + + def test_katz_centrality_weighted(self): + G = self.G + alpha = self.G.alpha + p = nx.katz_centrality(G, alpha, weight="weight") + for a, b in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-7) + + def test_katz_centrality_unweighted(self): + H = self.H + alpha = self.H.alpha + p = nx.katz_centrality(H, alpha, weight="weight") + for a, b in zip(list(p.values()), self.H.evc): + assert a == pytest.approx(b, abs=1e-7) + + +class TestKatzCentralityDirectedNumpy(TestKatzCentralityDirected): + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + super().setup_class() + + def test_katz_centrality_weighted(self): + G = self.G + alpha = self.G.alpha + p = nx.katz_centrality_numpy(G, alpha, weight="weight") + for a, b in zip(list(p.values()), self.G.evc): + assert a == pytest.approx(b, abs=1e-7) + + def test_katz_centrality_unweighted(self): + H = self.H + alpha = self.H.alpha + p = nx.katz_centrality_numpy(H, alpha, weight="weight") + for a, b in zip(list(p.values()), self.H.evc): + assert a == pytest.approx(b, abs=1e-7) + + +class TestKatzEigenvectorVKatz: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + def test_eigenvector_v_katz_random(self): + G = nx.gnp_random_graph(10, 0.5, seed=1234) + l = max(np.linalg.eigvals(nx.adjacency_matrix(G).todense())) + e = nx.eigenvector_centrality_numpy(G) + k = nx.katz_centrality_numpy(G, 1.0 / l) + for n in G: + assert e[n] == pytest.approx(k[n], abs=1e-7) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_laplacian_centrality.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_laplacian_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..21aa28b0b7c155078ab9c1a25e14d9aafa65683d --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_laplacian_centrality.py @@ -0,0 +1,221 @@ +import pytest + +import networkx as nx + +np = pytest.importorskip("numpy") +sp = pytest.importorskip("scipy") + + +def test_laplacian_centrality_null_graph(): + G = nx.Graph() + with pytest.raises(nx.NetworkXPointlessConcept): + d = nx.laplacian_centrality(G, normalized=False) + + +def test_laplacian_centrality_single_node(): + """See gh-6571""" + G = nx.empty_graph(1) + assert nx.laplacian_centrality(G, normalized=False) == {0: 0} + with pytest.raises(ZeroDivisionError): + nx.laplacian_centrality(G, normalized=True) + + +def test_laplacian_centrality_unconnected_nodes(): + """laplacian_centrality on a unconnected node graph should return 0 + + For graphs without edges, the Laplacian energy is 0 and is unchanged with + node removal, so:: + + LC(v) = LE(G) - LE(G - v) = 0 - 0 = 0 + """ + G = nx.empty_graph(3) + assert nx.laplacian_centrality(G, normalized=False) == {0: 0, 1: 0, 2: 0} + + +def test_laplacian_centrality_empty_graph(): + G = nx.empty_graph(3) + with pytest.raises(ZeroDivisionError): + d = nx.laplacian_centrality(G, normalized=True) + + +def test_laplacian_centrality_E(): + E = nx.Graph() + E.add_weighted_edges_from( + [(0, 1, 4), (4, 5, 1), (0, 2, 2), (2, 1, 1), (1, 3, 2), (1, 4, 2)] + ) + d = nx.laplacian_centrality(E) + exact = { + 0: 0.700000, + 1: 0.900000, + 2: 0.280000, + 3: 0.220000, + 4: 0.260000, + 5: 0.040000, + } + + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + # Check not normalized + full_energy = 200 + dnn = nx.laplacian_centrality(E, normalized=False) + for n, dc in dnn.items(): + assert exact[n] * full_energy == pytest.approx(dc, abs=1e-7) + + # Check unweighted not-normalized version + duw_nn = nx.laplacian_centrality(E, normalized=False, weight=None) + print(duw_nn) + exact_uw_nn = { + 0: 18, + 1: 34, + 2: 18, + 3: 10, + 4: 16, + 5: 6, + } + for n, dc in duw_nn.items(): + assert exact_uw_nn[n] == pytest.approx(dc, abs=1e-7) + + # Check unweighted version + duw = nx.laplacian_centrality(E, weight=None) + full_energy = 42 + for n, dc in duw.items(): + assert exact_uw_nn[n] / full_energy == pytest.approx(dc, abs=1e-7) + + +def test_laplacian_centrality_KC(): + KC = nx.karate_club_graph() + d = nx.laplacian_centrality(KC) + exact = { + 0: 0.2543593, + 1: 0.1724524, + 2: 0.2166053, + 3: 0.0964646, + 4: 0.0350344, + 5: 0.0571109, + 6: 0.0540713, + 7: 0.0788674, + 8: 0.1222204, + 9: 0.0217565, + 10: 0.0308751, + 11: 0.0215965, + 12: 0.0174372, + 13: 0.118861, + 14: 0.0366341, + 15: 0.0548712, + 16: 0.0172772, + 17: 0.0191969, + 18: 0.0225564, + 19: 0.0331147, + 20: 0.0279955, + 21: 0.0246361, + 22: 0.0382339, + 23: 0.1294193, + 24: 0.0227164, + 25: 0.0644697, + 26: 0.0281555, + 27: 0.075188, + 28: 0.0364742, + 29: 0.0707087, + 30: 0.0708687, + 31: 0.131019, + 32: 0.2370821, + 33: 0.3066709, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + # Check not normalized + full_energy = 12502 + dnn = nx.laplacian_centrality(KC, normalized=False) + for n, dc in dnn.items(): + assert exact[n] * full_energy == pytest.approx(dc, abs=1e-3) + + +def test_laplacian_centrality_K(): + K = nx.krackhardt_kite_graph() + d = nx.laplacian_centrality(K) + exact = { + 0: 0.3010753, + 1: 0.3010753, + 2: 0.2258065, + 3: 0.483871, + 4: 0.2258065, + 5: 0.3870968, + 6: 0.3870968, + 7: 0.1935484, + 8: 0.0752688, + 9: 0.0322581, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + # Check not normalized + full_energy = 186 + dnn = nx.laplacian_centrality(K, normalized=False) + for n, dc in dnn.items(): + assert exact[n] * full_energy == pytest.approx(dc, abs=1e-3) + + +def test_laplacian_centrality_P3(): + P3 = nx.path_graph(3) + d = nx.laplacian_centrality(P3) + exact = {0: 0.6, 1: 1.0, 2: 0.6} + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + +def test_laplacian_centrality_K5(): + K5 = nx.complete_graph(5) + d = nx.laplacian_centrality(K5) + exact = {0: 0.52, 1: 0.52, 2: 0.52, 3: 0.52, 4: 0.52} + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + +def test_laplacian_centrality_FF(): + FF = nx.florentine_families_graph() + d = nx.laplacian_centrality(FF) + exact = { + "Acciaiuoli": 0.0804598, + "Medici": 0.4022989, + "Castellani": 0.1724138, + "Peruzzi": 0.183908, + "Strozzi": 0.2528736, + "Barbadori": 0.137931, + "Ridolfi": 0.2183908, + "Tornabuoni": 0.2183908, + "Albizzi": 0.1954023, + "Salviati": 0.1149425, + "Pazzi": 0.0344828, + "Bischeri": 0.1954023, + "Guadagni": 0.2298851, + "Ginori": 0.045977, + "Lamberteschi": 0.0574713, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + +def test_laplacian_centrality_DG(): + DG = nx.DiGraph([(0, 5), (1, 5), (2, 5), (3, 5), (4, 5), (5, 6), (5, 7), (5, 8)]) + d = nx.laplacian_centrality(DG) + exact = { + 0: 0.2123352, + 5: 0.515391, + 1: 0.2123352, + 2: 0.2123352, + 3: 0.2123352, + 4: 0.2123352, + 6: 0.2952031, + 7: 0.2952031, + 8: 0.2952031, + } + for n, dc in d.items(): + assert exact[n] == pytest.approx(dc, abs=1e-7) + + # Check not normalized + full_energy = 9.50704 + dnn = nx.laplacian_centrality(DG, normalized=False) + for n, dc in dnn.items(): + assert exact[n] * full_energy == pytest.approx(dc, abs=1e-4) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_load_centrality.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_load_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..bf096039cd76542cc4c963ab896ee8fc4b295224 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_load_centrality.py @@ -0,0 +1,344 @@ +import pytest + +import networkx as nx + + +class TestLoadCentrality: + @classmethod + def setup_class(cls): + G = nx.Graph() + G.add_edge(0, 1, weight=3) + G.add_edge(0, 2, weight=2) + G.add_edge(0, 3, weight=6) + G.add_edge(0, 4, weight=4) + G.add_edge(1, 3, weight=5) + G.add_edge(1, 5, weight=5) + G.add_edge(2, 4, weight=1) + G.add_edge(3, 4, weight=2) + G.add_edge(3, 5, weight=1) + G.add_edge(4, 5, weight=4) + cls.G = G + cls.exact_weighted = {0: 4.0, 1: 0.0, 2: 8.0, 3: 6.0, 4: 8.0, 5: 0.0} + cls.K = nx.krackhardt_kite_graph() + cls.P3 = nx.path_graph(3) + cls.P4 = nx.path_graph(4) + cls.K5 = nx.complete_graph(5) + cls.P2 = nx.path_graph(2) + + cls.C4 = nx.cycle_graph(4) + cls.T = nx.balanced_tree(r=2, h=2) + cls.Gb = nx.Graph() + cls.Gb.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)]) + cls.F = nx.florentine_families_graph() + cls.LM = nx.les_miserables_graph() + cls.D = nx.cycle_graph(3, create_using=nx.DiGraph()) + cls.D.add_edges_from([(3, 0), (4, 3)]) + + def test_not_strongly_connected(self): + b = nx.load_centrality(self.D) + result = {0: 5.0 / 12, 1: 1.0 / 4, 2: 1.0 / 12, 3: 1.0 / 4, 4: 0.000} + for n in sorted(self.D): + assert result[n] == pytest.approx(b[n], abs=1e-3) + assert result[n] == pytest.approx(nx.load_centrality(self.D, n), abs=1e-3) + + def test_P2_normalized_load(self): + G = self.P2 + c = nx.load_centrality(G, normalized=True) + d = {0: 0.000, 1: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_weighted_load(self): + b = nx.load_centrality(self.G, weight="weight", normalized=False) + for n in sorted(self.G): + assert b[n] == self.exact_weighted[n] + + def test_k5_load(self): + G = self.K5 + c = nx.load_centrality(G) + d = {0: 0.000, 1: 0.000, 2: 0.000, 3: 0.000, 4: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p3_load(self): + G = self.P3 + c = nx.load_centrality(G) + d = {0: 0.000, 1: 1.000, 2: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + c = nx.load_centrality(G, v=1) + assert c == pytest.approx(1.0, abs=1e-7) + c = nx.load_centrality(G, v=1, normalized=True) + assert c == pytest.approx(1.0, abs=1e-7) + + def test_p2_load(self): + G = nx.path_graph(2) + c = nx.load_centrality(G) + d = {0: 0.000, 1: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_krackhardt_load(self): + G = self.K + c = nx.load_centrality(G) + d = { + 0: 0.023, + 1: 0.023, + 2: 0.000, + 3: 0.102, + 4: 0.000, + 5: 0.231, + 6: 0.231, + 7: 0.389, + 8: 0.222, + 9: 0.000, + } + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_florentine_families_load(self): + G = self.F + c = nx.load_centrality(G) + d = { + "Acciaiuoli": 0.000, + "Albizzi": 0.211, + "Barbadori": 0.093, + "Bischeri": 0.104, + "Castellani": 0.055, + "Ginori": 0.000, + "Guadagni": 0.251, + "Lamberteschi": 0.000, + "Medici": 0.522, + "Pazzi": 0.000, + "Peruzzi": 0.022, + "Ridolfi": 0.117, + "Salviati": 0.143, + "Strozzi": 0.106, + "Tornabuoni": 0.090, + } + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_les_miserables_load(self): + G = self.LM + c = nx.load_centrality(G) + d = { + "Napoleon": 0.000, + "Myriel": 0.177, + "MlleBaptistine": 0.000, + "MmeMagloire": 0.000, + "CountessDeLo": 0.000, + "Geborand": 0.000, + "Champtercier": 0.000, + "Cravatte": 0.000, + "Count": 0.000, + "OldMan": 0.000, + "Valjean": 0.567, + "Labarre": 0.000, + "Marguerite": 0.000, + "MmeDeR": 0.000, + "Isabeau": 0.000, + "Gervais": 0.000, + "Listolier": 0.000, + "Tholomyes": 0.043, + "Fameuil": 0.000, + "Blacheville": 0.000, + "Favourite": 0.000, + "Dahlia": 0.000, + "Zephine": 0.000, + "Fantine": 0.128, + "MmeThenardier": 0.029, + "Thenardier": 0.075, + "Cosette": 0.024, + "Javert": 0.054, + "Fauchelevent": 0.026, + "Bamatabois": 0.008, + "Perpetue": 0.000, + "Simplice": 0.009, + "Scaufflaire": 0.000, + "Woman1": 0.000, + "Judge": 0.000, + "Champmathieu": 0.000, + "Brevet": 0.000, + "Chenildieu": 0.000, + "Cochepaille": 0.000, + "Pontmercy": 0.007, + "Boulatruelle": 0.000, + "Eponine": 0.012, + "Anzelma": 0.000, + "Woman2": 0.000, + "MotherInnocent": 0.000, + "Gribier": 0.000, + "MmeBurgon": 0.026, + "Jondrette": 0.000, + "Gavroche": 0.164, + "Gillenormand": 0.021, + "Magnon": 0.000, + "MlleGillenormand": 0.047, + "MmePontmercy": 0.000, + "MlleVaubois": 0.000, + "LtGillenormand": 0.000, + "Marius": 0.133, + "BaronessT": 0.000, + "Mabeuf": 0.028, + "Enjolras": 0.041, + "Combeferre": 0.001, + "Prouvaire": 0.000, + "Feuilly": 0.001, + "Courfeyrac": 0.006, + "Bahorel": 0.002, + "Bossuet": 0.032, + "Joly": 0.002, + "Grantaire": 0.000, + "MotherPlutarch": 0.000, + "Gueulemer": 0.005, + "Babet": 0.005, + "Claquesous": 0.005, + "Montparnasse": 0.004, + "Toussaint": 0.000, + "Child1": 0.000, + "Child2": 0.000, + "Brujon": 0.000, + "MmeHucheloup": 0.000, + } + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_unnormalized_k5_load(self): + G = self.K5 + c = nx.load_centrality(G, normalized=False) + d = {0: 0.000, 1: 0.000, 2: 0.000, 3: 0.000, 4: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_unnormalized_p3_load(self): + G = self.P3 + c = nx.load_centrality(G, normalized=False) + d = {0: 0.000, 1: 2.000, 2: 0.000} + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_unnormalized_krackhardt_load(self): + G = self.K + c = nx.load_centrality(G, normalized=False) + d = { + 0: 1.667, + 1: 1.667, + 2: 0.000, + 3: 7.333, + 4: 0.000, + 5: 16.667, + 6: 16.667, + 7: 28.000, + 8: 16.000, + 9: 0.000, + } + + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_unnormalized_florentine_families_load(self): + G = self.F + c = nx.load_centrality(G, normalized=False) + + d = { + "Acciaiuoli": 0.000, + "Albizzi": 38.333, + "Barbadori": 17.000, + "Bischeri": 19.000, + "Castellani": 10.000, + "Ginori": 0.000, + "Guadagni": 45.667, + "Lamberteschi": 0.000, + "Medici": 95.000, + "Pazzi": 0.000, + "Peruzzi": 4.000, + "Ridolfi": 21.333, + "Salviati": 26.000, + "Strozzi": 19.333, + "Tornabuoni": 16.333, + } + for n in sorted(G): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_load_betweenness_difference(self): + # Difference Between Load and Betweenness + # --------------------------------------- The smallest graph + # that shows the difference between load and betweenness is + # G=ladder_graph(3) (Graph B below) + + # Graph A and B are from Tao Zhou, Jian-Guo Liu, Bing-Hong + # Wang: Comment on "Scientific collaboration + # networks. II. Shortest paths, weighted networks, and + # centrality". https://arxiv.org/pdf/physics/0511084 + + # Notice that unlike here, their calculation adds to 1 to the + # betweenness of every node i for every path from i to every + # other node. This is exactly what it should be, based on + # Eqn. (1) in their paper: the eqn is B(v) = \sum_{s\neq t, + # s\neq v}{\frac{\sigma_{st}(v)}{\sigma_{st}}}, therefore, + # they allow v to be the target node. + + # We follow Brandes 2001, who follows Freeman 1977 that make + # the sum for betweenness of v exclude paths where v is either + # the source or target node. To agree with their numbers, we + # must additionally, remove edge (4,8) from the graph, see AC + # example following (there is a mistake in the figure in their + # paper - personal communication). + + # A = nx.Graph() + # A.add_edges_from([(0,1), (1,2), (1,3), (2,4), + # (3,5), (4,6), (4,7), (4,8), + # (5,8), (6,9), (7,9), (8,9)]) + B = nx.Graph() # ladder_graph(3) + B.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (4, 5), (3, 5)]) + c = nx.load_centrality(B, normalized=False) + d = {0: 1.750, 1: 1.750, 2: 6.500, 3: 6.500, 4: 1.750, 5: 1.750} + for n in sorted(B): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_c4_edge_load(self): + G = self.C4 + c = nx.edge_load_centrality(G) + d = {(0, 1): 6.000, (0, 3): 6.000, (1, 2): 6.000, (2, 3): 6.000} + for n in G.edges(): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_p4_edge_load(self): + G = self.P4 + c = nx.edge_load_centrality(G) + d = {(0, 1): 6.000, (1, 2): 8.000, (2, 3): 6.000} + for n in G.edges(): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_k5_edge_load(self): + G = self.K5 + c = nx.edge_load_centrality(G) + d = { + (0, 1): 5.000, + (0, 2): 5.000, + (0, 3): 5.000, + (0, 4): 5.000, + (1, 2): 5.000, + (1, 3): 5.000, + (1, 4): 5.000, + (2, 3): 5.000, + (2, 4): 5.000, + (3, 4): 5.000, + } + for n in G.edges(): + assert c[n] == pytest.approx(d[n], abs=1e-3) + + def test_tree_edge_load(self): + G = self.T + c = nx.edge_load_centrality(G) + d = { + (0, 1): 24.000, + (0, 2): 24.000, + (1, 3): 12.000, + (1, 4): 12.000, + (2, 5): 12.000, + (2, 6): 12.000, + } + for n in G.edges(): + assert c[n] == pytest.approx(d[n], abs=1e-3) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_percolation_centrality.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_percolation_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..0cb8f52965c975013d41be7c3de874cd86ee693a --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_percolation_centrality.py @@ -0,0 +1,87 @@ +import pytest + +import networkx as nx + + +def example1a_G(): + G = nx.Graph() + G.add_node(1, percolation=0.1) + G.add_node(2, percolation=0.2) + G.add_node(3, percolation=0.2) + G.add_node(4, percolation=0.2) + G.add_node(5, percolation=0.3) + G.add_node(6, percolation=0.2) + G.add_node(7, percolation=0.5) + G.add_node(8, percolation=0.5) + G.add_edges_from([(1, 4), (2, 4), (3, 4), (4, 5), (5, 6), (6, 7), (6, 8)]) + return G + + +def example1b_G(): + G = nx.Graph() + G.add_node(1, percolation=0.3) + G.add_node(2, percolation=0.5) + G.add_node(3, percolation=0.5) + G.add_node(4, percolation=0.2) + G.add_node(5, percolation=0.3) + G.add_node(6, percolation=0.2) + G.add_node(7, percolation=0.1) + G.add_node(8, percolation=0.1) + G.add_edges_from([(1, 4), (2, 4), (3, 4), (4, 5), (5, 6), (6, 7), (6, 8)]) + return G + + +def test_percolation_example1a(): + """percolation centrality: example 1a""" + G = example1a_G() + p = nx.percolation_centrality(G) + p_answer = {4: 0.625, 6: 0.667} + for n, k in p_answer.items(): + assert p[n] == pytest.approx(k, abs=1e-3) + + +def test_percolation_example1b(): + """percolation centrality: example 1a""" + G = example1b_G() + p = nx.percolation_centrality(G) + p_answer = {4: 0.825, 6: 0.4} + for n, k in p_answer.items(): + assert p[n] == pytest.approx(k, abs=1e-3) + + +def test_converge_to_betweenness(): + """percolation centrality: should converge to betweenness + centrality when all nodes are percolated the same""" + # taken from betweenness test test_florentine_families_graph + G = nx.florentine_families_graph() + b_answer = { + "Acciaiuoli": 0.000, + "Albizzi": 0.212, + "Barbadori": 0.093, + "Bischeri": 0.104, + "Castellani": 0.055, + "Ginori": 0.000, + "Guadagni": 0.255, + "Lamberteschi": 0.000, + "Medici": 0.522, + "Pazzi": 0.000, + "Peruzzi": 0.022, + "Ridolfi": 0.114, + "Salviati": 0.143, + "Strozzi": 0.103, + "Tornabuoni": 0.092, + } + + # If no initial state is provided, state for + # every node defaults to 1 + p_answer = nx.percolation_centrality(G) + assert p_answer == pytest.approx(b_answer, abs=1e-3) + + p_states = {k: 0.3 for k, v in b_answer.items()} + p_answer = nx.percolation_centrality(G, states=p_states) + assert p_answer == pytest.approx(b_answer, abs=1e-3) + + +def test_default_percolation(): + G = nx.erdos_renyi_graph(42, 0.42, seed=42) + assert nx.percolation_centrality(G) == pytest.approx(nx.betweenness_centrality(G)) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_reaching.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_reaching.py new file mode 100644 index 0000000000000000000000000000000000000000..02ad8322cb6cf2a550afec0980ead8537abd55d5 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_reaching.py @@ -0,0 +1,117 @@ +"""Unit tests for the :mod:`networkx.algorithms.centrality.reaching` module.""" +import pytest + +import networkx as nx + + +class TestGlobalReachingCentrality: + """Unit tests for the global reaching centrality function.""" + + def test_non_positive_weights(self): + with pytest.raises(nx.NetworkXError): + G = nx.DiGraph() + nx.global_reaching_centrality(G, weight="weight") + + def test_negatively_weighted(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, -2), (1, 2, +1)]) + nx.global_reaching_centrality(G, weight="weight") + + def test_directed_star(self): + G = nx.DiGraph() + G.add_weighted_edges_from([(1, 2, 0.5), (1, 3, 0.5)]) + grc = nx.global_reaching_centrality + assert grc(G, normalized=False, weight="weight") == 0.5 + assert grc(G) == 1 + + def test_undirected_unweighted_star(self): + G = nx.star_graph(2) + grc = nx.global_reaching_centrality + assert grc(G, normalized=False, weight=None) == 0.25 + + def test_undirected_weighted_star(self): + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)]) + grc = nx.global_reaching_centrality + assert grc(G, normalized=False, weight="weight") == 0.375 + + def test_cycle_directed_unweighted(self): + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(2, 1) + assert nx.global_reaching_centrality(G, weight=None) == 0 + + def test_cycle_undirected_unweighted(self): + G = nx.Graph() + G.add_edge(1, 2) + assert nx.global_reaching_centrality(G, weight=None) == 0 + + def test_cycle_directed_weighted(self): + G = nx.DiGraph() + G.add_weighted_edges_from([(1, 2, 1), (2, 1, 1)]) + assert nx.global_reaching_centrality(G) == 0 + + def test_cycle_undirected_weighted(self): + G = nx.Graph() + G.add_edge(1, 2, weight=1) + grc = nx.global_reaching_centrality + assert grc(G, normalized=False) == 0 + + def test_directed_weighted(self): + G = nx.DiGraph() + G.add_edge("A", "B", weight=5) + G.add_edge("B", "C", weight=1) + G.add_edge("B", "D", weight=0.25) + G.add_edge("D", "E", weight=1) + + denom = len(G) - 1 + A_local = sum([5, 3, 2.625, 2.0833333333333]) / denom + B_local = sum([1, 0.25, 0.625]) / denom + C_local = 0 + D_local = sum([1]) / denom + E_local = 0 + + local_reach_ctrs = [A_local, C_local, B_local, D_local, E_local] + max_local = max(local_reach_ctrs) + expected = sum(max_local - lrc for lrc in local_reach_ctrs) / denom + grc = nx.global_reaching_centrality + actual = grc(G, normalized=False, weight="weight") + assert expected == pytest.approx(actual, abs=1e-7) + + +class TestLocalReachingCentrality: + """Unit tests for the local reaching centrality function.""" + + def test_non_positive_weights(self): + with pytest.raises(nx.NetworkXError): + G = nx.DiGraph() + G.add_weighted_edges_from([(0, 1, 0)]) + nx.local_reaching_centrality(G, 0, weight="weight") + + def test_negatively_weighted(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, -2), (1, 2, +1)]) + nx.local_reaching_centrality(G, 0, weight="weight") + + def test_undirected_unweighted_star(self): + G = nx.star_graph(2) + grc = nx.local_reaching_centrality + assert grc(G, 1, weight=None, normalized=False) == 0.75 + + def test_undirected_weighted_star(self): + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)]) + centrality = nx.local_reaching_centrality( + G, 1, normalized=False, weight="weight" + ) + assert centrality == 1.5 + + def test_undirected_weighted_normalized(self): + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 1), (1, 3, 2)]) + centrality = nx.local_reaching_centrality( + G, 1, normalized=True, weight="weight" + ) + assert centrality == 1.0 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..cc3047866079fd9fe4cf43a6793cf160a0c0cdce --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_second_order_centrality.py @@ -0,0 +1,82 @@ +""" +Tests for second order centrality. +""" + +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx + + +def test_empty(): + with pytest.raises(nx.NetworkXException): + G = nx.empty_graph() + nx.second_order_centrality(G) + + +def test_non_connected(): + with pytest.raises(nx.NetworkXException): + G = nx.Graph() + G.add_node(0) + G.add_node(1) + nx.second_order_centrality(G) + + +def test_non_negative_edge_weights(): + with pytest.raises(nx.NetworkXException): + G = nx.path_graph(2) + G.add_edge(0, 1, weight=-1) + nx.second_order_centrality(G) + + +def test_weight_attribute(): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, 1.0), (1, 2, 3.5)], weight="w") + expected = {0: 3.431, 1: 3.082, 2: 5.612} + b = nx.second_order_centrality(G, weight="w") + + for n in sorted(G): + assert b[n] == pytest.approx(expected[n], abs=1e-2) + + +def test_one_node_graph(): + """Second order centrality: single node""" + G = nx.Graph() + G.add_node(0) + G.add_edge(0, 0) + assert nx.second_order_centrality(G)[0] == 0 + + +def test_P3(): + """Second order centrality: line graph, as defined in paper""" + G = nx.path_graph(3) + b_answer = {0: 3.741, 1: 1.414, 2: 3.741} + + b = nx.second_order_centrality(G) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-2) + + +def test_K3(): + """Second order centrality: complete graph, as defined in paper""" + G = nx.complete_graph(3) + b_answer = {0: 1.414, 1: 1.414, 2: 1.414} + + b = nx.second_order_centrality(G) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-2) + + +def test_ring_graph(): + """Second order centrality: ring graph, as defined in paper""" + G = nx.cycle_graph(5) + b_answer = {0: 4.472, 1: 4.472, 2: 4.472, 3: 4.472, 4: 4.472} + + b = nx.second_order_centrality(G) + + for n in sorted(G): + assert b[n] == pytest.approx(b_answer[n], abs=1e-2) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_subgraph.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_subgraph.py new file mode 100644 index 0000000000000000000000000000000000000000..710927515baa4786e4be15ddf25ad34e423563d2 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_subgraph.py @@ -0,0 +1,110 @@ +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx.algorithms.centrality.subgraph_alg import ( + communicability_betweenness_centrality, + estrada_index, + subgraph_centrality, + subgraph_centrality_exp, +) + + +class TestSubgraph: + def test_subgraph_centrality(self): + answer = {0: 1.5430806348152433, 1: 1.5430806348152433} + result = subgraph_centrality(nx.path_graph(2)) + for k, v in result.items(): + assert answer[k] == pytest.approx(v, abs=1e-7) + + answer1 = { + "1": 1.6445956054135658, + "Albert": 2.4368257358712189, + "Aric": 2.4368257358712193, + "Dan": 3.1306328496328168, + "Franck": 2.3876142275231915, + } + G1 = nx.Graph( + [ + ("Franck", "Aric"), + ("Aric", "Dan"), + ("Dan", "Albert"), + ("Albert", "Franck"), + ("Dan", "1"), + ("Franck", "Albert"), + ] + ) + result1 = subgraph_centrality(G1) + for k, v in result1.items(): + assert answer1[k] == pytest.approx(v, abs=1e-7) + result1 = subgraph_centrality_exp(G1) + for k, v in result1.items(): + assert answer1[k] == pytest.approx(v, abs=1e-7) + + def test_subgraph_centrality_big_graph(self): + g199 = nx.complete_graph(199) + g200 = nx.complete_graph(200) + + comm199 = nx.subgraph_centrality(g199) + comm199_exp = nx.subgraph_centrality_exp(g199) + + comm200 = nx.subgraph_centrality(g200) + comm200_exp = nx.subgraph_centrality_exp(g200) + + def test_communicability_betweenness_centrality_small(self): + result = communicability_betweenness_centrality(nx.path_graph(2)) + assert result == {0: 0, 1: 0} + + result = communicability_betweenness_centrality(nx.path_graph(1)) + assert result == {0: 0} + + result = communicability_betweenness_centrality(nx.path_graph(0)) + assert result == {} + + answer = {0: 0.1411224421177313, 1: 1.0, 2: 0.1411224421177313} + result = communicability_betweenness_centrality(nx.path_graph(3)) + for k, v in result.items(): + assert answer[k] == pytest.approx(v, abs=1e-7) + + result = communicability_betweenness_centrality(nx.complete_graph(3)) + for k, v in result.items(): + assert 0.49786143366223296 == pytest.approx(v, abs=1e-7) + + def test_communicability_betweenness_centrality(self): + answer = { + 0: 0.07017447951484615, + 1: 0.71565598701107991, + 2: 0.71565598701107991, + 3: 0.07017447951484615, + } + result = communicability_betweenness_centrality(nx.path_graph(4)) + for k, v in result.items(): + assert answer[k] == pytest.approx(v, abs=1e-7) + + answer1 = { + "1": 0.060039074193949521, + "Albert": 0.315470761661372, + "Aric": 0.31547076166137211, + "Dan": 0.68297778678316201, + "Franck": 0.21977926617449497, + } + G1 = nx.Graph( + [ + ("Franck", "Aric"), + ("Aric", "Dan"), + ("Dan", "Albert"), + ("Albert", "Franck"), + ("Dan", "1"), + ("Franck", "Albert"), + ] + ) + result1 = communicability_betweenness_centrality(G1) + for k, v in result1.items(): + assert answer1[k] == pytest.approx(v, abs=1e-7) + + def test_estrada_index(self): + answer = 1041.2470334195475 + result = estrada_index(nx.karate_club_graph()) + assert answer == pytest.approx(result, abs=1e-7) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_trophic.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_trophic.py new file mode 100644 index 0000000000000000000000000000000000000000..f1d6813160eed6da3cd1fd0b254b7352bd1bd4ad --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_trophic.py @@ -0,0 +1,302 @@ +"""Test trophic levels, trophic differences and trophic coherence +""" +import pytest + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx + + +def test_trophic_levels(): + """Trivial example""" + G = nx.DiGraph() + G.add_edge("a", "b") + G.add_edge("b", "c") + + d = nx.trophic_levels(G) + assert d == {"a": 1, "b": 2, "c": 3} + + +def test_trophic_levels_levine(): + """Example from Figure 5 in Stephen Levine (1980) J. theor. Biol. 83, + 195-207 + """ + S = nx.DiGraph() + S.add_edge(1, 2, weight=1.0) + S.add_edge(1, 3, weight=0.2) + S.add_edge(1, 4, weight=0.8) + S.add_edge(2, 3, weight=0.2) + S.add_edge(2, 5, weight=0.3) + S.add_edge(4, 3, weight=0.6) + S.add_edge(4, 5, weight=0.7) + S.add_edge(5, 4, weight=0.2) + + # save copy for later, test intermediate implementation details first + S2 = S.copy() + + # drop nodes of in-degree zero + z = [nid for nid, d in S.in_degree if d == 0] + for nid in z: + S.remove_node(nid) + + # find adjacency matrix + q = nx.linalg.graphmatrix.adjacency_matrix(S).T + + # fmt: off + expected_q = np.array([ + [0, 0, 0., 0], + [0.2, 0, 0.6, 0], + [0, 0, 0, 0.2], + [0.3, 0, 0.7, 0] + ]) + # fmt: on + assert np.array_equal(q.todense(), expected_q) + + # must be square, size of number of nodes + assert len(q.shape) == 2 + assert q.shape[0] == q.shape[1] + assert q.shape[0] == len(S) + + nn = q.shape[0] + + i = np.eye(nn) + n = np.linalg.inv(i - q) + y = np.asarray(n) @ np.ones(nn) + + expected_y = np.array([1, 2.07906977, 1.46511628, 2.3255814]) + assert np.allclose(y, expected_y) + + expected_d = {1: 1, 2: 2, 3: 3.07906977, 4: 2.46511628, 5: 3.3255814} + + d = nx.trophic_levels(S2) + + for nid, level in d.items(): + expected_level = expected_d[nid] + assert expected_level == pytest.approx(level, abs=1e-7) + + +def test_trophic_levels_simple(): + matrix_a = np.array([[0, 0], [1, 0]]) + G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph) + d = nx.trophic_levels(G) + assert d[0] == pytest.approx(2, abs=1e-7) + assert d[1] == pytest.approx(1, abs=1e-7) + + +def test_trophic_levels_more_complex(): + # fmt: off + matrix = np.array([ + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) + d = nx.trophic_levels(G) + expected_result = [1, 2, 3, 4] + for ind in range(4): + assert d[ind] == pytest.approx(expected_result[ind], abs=1e-7) + + # fmt: off + matrix = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) + d = nx.trophic_levels(G) + + expected_result = [1, 2, 2.5, 3.25] + print("Calculated result: ", d) + print("Expected Result: ", expected_result) + + for ind in range(4): + assert d[ind] == pytest.approx(expected_result[ind], abs=1e-7) + + +def test_trophic_levels_even_more_complex(): + # fmt: off + # Another, bigger matrix + matrix = np.array([ + [0, 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [1, 0, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 0, 0, 1, 0] + ]) + # Generated this linear system using pen and paper: + K = np.array([ + [1, 0, -1, 0, 0], + [0, 0.5, 0, -0.5, 0], + [0, 0, 1, 0, 0], + [0, -0.5, 0, 1, -0.5], + [0, 0, 0, 0, 1], + ]) + # fmt: on + result_1 = np.ravel(np.linalg.inv(K) @ np.ones(5)) + G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) + result_2 = nx.trophic_levels(G) + + for ind in range(5): + assert result_1[ind] == pytest.approx(result_2[ind], abs=1e-7) + + +def test_trophic_levels_singular_matrix(): + """Should raise an error with graphs with only non-basal nodes""" + matrix = np.identity(4) + G = nx.from_numpy_array(matrix, create_using=nx.DiGraph) + with pytest.raises(nx.NetworkXError) as e: + nx.trophic_levels(G) + msg = ( + "Trophic levels are only defined for graphs where every node " + + "has a path from a basal node (basal nodes are nodes with no " + + "incoming edges)." + ) + assert msg in str(e.value) + + +def test_trophic_levels_singular_with_basal(): + """Should fail to compute if there are any parts of the graph which are not + reachable from any basal node (with in-degree zero). + """ + G = nx.DiGraph() + # a has in-degree zero + G.add_edge("a", "b") + + # b is one level above a, c and d + G.add_edge("c", "b") + G.add_edge("d", "b") + + # c and d form a loop, neither are reachable from a + G.add_edge("c", "d") + G.add_edge("d", "c") + + with pytest.raises(nx.NetworkXError) as e: + nx.trophic_levels(G) + msg = ( + "Trophic levels are only defined for graphs where every node " + + "has a path from a basal node (basal nodes are nodes with no " + + "incoming edges)." + ) + assert msg in str(e.value) + + # if self-loops are allowed, smaller example: + G = nx.DiGraph() + G.add_edge("a", "b") # a has in-degree zero + G.add_edge("c", "b") # b is one level above a and c + G.add_edge("c", "c") # c has a self-loop + with pytest.raises(nx.NetworkXError) as e: + nx.trophic_levels(G) + msg = ( + "Trophic levels are only defined for graphs where every node " + + "has a path from a basal node (basal nodes are nodes with no " + + "incoming edges)." + ) + assert msg in str(e.value) + + +def test_trophic_differences(): + matrix_a = np.array([[0, 1], [0, 0]]) + G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph) + diffs = nx.trophic_differences(G) + assert diffs[(0, 1)] == pytest.approx(1, abs=1e-7) + + # fmt: off + matrix_b = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_b, create_using=nx.DiGraph) + diffs = nx.trophic_differences(G) + + assert diffs[(0, 1)] == pytest.approx(1, abs=1e-7) + assert diffs[(0, 2)] == pytest.approx(1.5, abs=1e-7) + assert diffs[(1, 2)] == pytest.approx(0.5, abs=1e-7) + assert diffs[(1, 3)] == pytest.approx(1.25, abs=1e-7) + assert diffs[(2, 3)] == pytest.approx(0.75, abs=1e-7) + + +def test_trophic_incoherence_parameter_no_cannibalism(): + matrix_a = np.array([[0, 1], [0, 0]]) + G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=False) + assert q == pytest.approx(0, abs=1e-7) + + # fmt: off + matrix_b = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_b, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=False) + assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7) + + # fmt: off + matrix_c = np.array([ + [0, 1, 1, 0], + [0, 1, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 1] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_c, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=False) + # Ignore the -link + assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7) + + # no self-loops case + # fmt: off + matrix_d = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_d, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=False) + # Ignore the -link + assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7) + + +def test_trophic_incoherence_parameter_cannibalism(): + matrix_a = np.array([[0, 1], [0, 0]]) + G = nx.from_numpy_array(matrix_a, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=True) + assert q == pytest.approx(0, abs=1e-7) + + # fmt: off + matrix_b = np.array([ + [0, 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [1, 0, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 0, 0, 1, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_b, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=True) + assert q == pytest.approx(2, abs=1e-7) + + # fmt: off + matrix_c = np.array([ + [0, 1, 1, 0], + [0, 0, 1, 1], + [0, 0, 0, 1], + [0, 0, 0, 0] + ]) + # fmt: on + G = nx.from_numpy_array(matrix_c, create_using=nx.DiGraph) + q = nx.trophic_incoherence_parameter(G, cannibalism=True) + # Ignore the -link + assert q == pytest.approx(np.std([1, 1.5, 0.5, 0.75, 1.25]), abs=1e-7) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_voterank.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_voterank.py new file mode 100644 index 0000000000000000000000000000000000000000..12126818b4387a439493e8f66ba2e06e1a092416 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/tests/test_voterank.py @@ -0,0 +1,65 @@ +""" + Unit tests for VoteRank. +""" + + +import networkx as nx + + +class TestVoteRankCentrality: + # Example Graph present in reference paper + def test_voterank_centrality_1(self): + G = nx.Graph() + G.add_edges_from( + [ + (7, 8), + (7, 5), + (7, 9), + (5, 0), + (0, 1), + (0, 2), + (0, 3), + (0, 4), + (1, 6), + (2, 6), + (3, 6), + (4, 6), + ] + ) + assert [0, 7, 6] == nx.voterank(G) + + def test_voterank_emptygraph(self): + G = nx.Graph() + assert [] == nx.voterank(G) + + # Graph unit test + def test_voterank_centrality_2(self): + G = nx.florentine_families_graph() + d = nx.voterank(G, 4) + exact = ["Medici", "Strozzi", "Guadagni", "Castellani"] + assert exact == d + + # DiGraph unit test + def test_voterank_centrality_3(self): + G = nx.gnc_graph(10, seed=7) + d = nx.voterank(G, 4) + exact = [3, 6, 8] + assert exact == d + + # MultiGraph unit test + def test_voterank_centrality_4(self): + G = nx.MultiGraph() + G.add_edges_from( + [(0, 1), (0, 1), (1, 2), (2, 5), (2, 5), (5, 6), (5, 6), (2, 4), (4, 3)] + ) + exact = [2, 1, 5, 4] + assert exact == nx.voterank(G) + + # MultiDiGraph unit test + def test_voterank_centrality_5(self): + G = nx.MultiDiGraph() + G.add_edges_from( + [(0, 1), (0, 1), (1, 2), (2, 5), (2, 5), (5, 6), (5, 6), (2, 4), (4, 3)] + ) + exact = [2, 0, 5, 4] + assert exact == nx.voterank(G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/trophic.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/trophic.py new file mode 100644 index 0000000000000000000000000000000000000000..cfc7ea4f20677696f7b6a68348c51a913f5de91e --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/trophic.py @@ -0,0 +1,162 @@ +"""Trophic levels""" +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["trophic_levels", "trophic_differences", "trophic_incoherence_parameter"] + + +@not_implemented_for("undirected") +@nx._dispatch(edge_attrs="weight") +def trophic_levels(G, weight="weight"): + r"""Compute the trophic levels of nodes. + + The trophic level of a node $i$ is + + .. math:: + + s_i = 1 + \frac{1}{k^{in}_i} \sum_{j} a_{ij} s_j + + where $k^{in}_i$ is the in-degree of i + + .. math:: + + k^{in}_i = \sum_{j} a_{ij} + + and nodes with $k^{in}_i = 0$ have $s_i = 1$ by convention. + + These are calculated using the method outlined in Levine [1]_. + + Parameters + ---------- + G : DiGraph + A directed networkx graph + + Returns + ------- + nodes : dict + Dictionary of nodes with trophic level as the value. + + References + ---------- + .. [1] Stephen Levine (1980) J. theor. Biol. 83, 195-207 + """ + import numpy as np + + # find adjacency matrix + a = nx.adjacency_matrix(G, weight=weight).T.toarray() + + # drop rows/columns where in-degree is zero + rowsum = np.sum(a, axis=1) + p = a[rowsum != 0][:, rowsum != 0] + # normalise so sum of in-degree weights is 1 along each row + p = p / rowsum[rowsum != 0][:, np.newaxis] + + # calculate trophic levels + nn = p.shape[0] + i = np.eye(nn) + try: + n = np.linalg.inv(i - p) + except np.linalg.LinAlgError as err: + # LinAlgError is raised when there is a non-basal node + msg = ( + "Trophic levels are only defined for graphs where every " + + "node has a path from a basal node (basal nodes are nodes " + + "with no incoming edges)." + ) + raise nx.NetworkXError(msg) from err + y = n.sum(axis=1) + 1 + + levels = {} + + # all nodes with in-degree zero have trophic level == 1 + zero_node_ids = (node_id for node_id, degree in G.in_degree if degree == 0) + for node_id in zero_node_ids: + levels[node_id] = 1 + + # all other nodes have levels as calculated + nonzero_node_ids = (node_id for node_id, degree in G.in_degree if degree != 0) + for i, node_id in enumerate(nonzero_node_ids): + levels[node_id] = y[i] + + return levels + + +@not_implemented_for("undirected") +@nx._dispatch(edge_attrs="weight") +def trophic_differences(G, weight="weight"): + r"""Compute the trophic differences of the edges of a directed graph. + + The trophic difference $x_ij$ for each edge is defined in Johnson et al. + [1]_ as: + + .. math:: + x_ij = s_j - s_i + + Where $s_i$ is the trophic level of node $i$. + + Parameters + ---------- + G : DiGraph + A directed networkx graph + + Returns + ------- + diffs : dict + Dictionary of edges with trophic differences as the value. + + References + ---------- + .. [1] Samuel Johnson, Virginia Dominguez-Garcia, Luca Donetti, Miguel A. + Munoz (2014) PNAS "Trophic coherence determines food-web stability" + """ + levels = trophic_levels(G, weight=weight) + diffs = {} + for u, v in G.edges: + diffs[(u, v)] = levels[v] - levels[u] + return diffs + + +@not_implemented_for("undirected") +@nx._dispatch(edge_attrs="weight") +def trophic_incoherence_parameter(G, weight="weight", cannibalism=False): + r"""Compute the trophic incoherence parameter of a graph. + + Trophic coherence is defined as the homogeneity of the distribution of + trophic distances: the more similar, the more coherent. This is measured by + the standard deviation of the trophic differences and referred to as the + trophic incoherence parameter $q$ by [1]. + + Parameters + ---------- + G : DiGraph + A directed networkx graph + + cannibalism: Boolean + If set to False, self edges are not considered in the calculation + + Returns + ------- + trophic_incoherence_parameter : float + The trophic coherence of a graph + + References + ---------- + .. [1] Samuel Johnson, Virginia Dominguez-Garcia, Luca Donetti, Miguel A. + Munoz (2014) PNAS "Trophic coherence determines food-web stability" + """ + import numpy as np + + if cannibalism: + diffs = trophic_differences(G, weight=weight) + else: + # If no cannibalism, remove self-edges + self_loops = list(nx.selfloop_edges(G)) + if self_loops: + # Make a copy so we do not change G's edges in memory + G_2 = G.copy() + G_2.remove_edges_from(self_loops) + else: + # Avoid copy otherwise + G_2 = G + diffs = trophic_differences(G_2, weight=weight) + return np.std(list(diffs.values())) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/centrality/voterank_alg.py b/MLPY/Lib/site-packages/networkx/algorithms/centrality/voterank_alg.py new file mode 100644 index 0000000000000000000000000000000000000000..f9cf43c7813b91a53d63f2e15c0ef7a538d42f25 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/centrality/voterank_alg.py @@ -0,0 +1,94 @@ +"""Algorithm to select influential nodes in a graph using VoteRank.""" +import networkx as nx + +__all__ = ["voterank"] + + +@nx._dispatch +def voterank(G, number_of_nodes=None): + """Select a list of influential nodes in a graph using VoteRank algorithm + + VoteRank [1]_ computes a ranking of the nodes in a graph G based on a + voting scheme. With VoteRank, all nodes vote for each of its in-neighbours + and the node with the highest votes is elected iteratively. The voting + ability of out-neighbors of elected nodes is decreased in subsequent turns. + + Parameters + ---------- + G : graph + A NetworkX graph. + + number_of_nodes : integer, optional + Number of ranked nodes to extract (default all nodes). + + Returns + ------- + voterank : list + Ordered list of computed seeds. + Only nodes with positive number of votes are returned. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 4)]) + >>> nx.voterank(G) + [0, 1] + + The algorithm can be used both for undirected and directed graphs. + However, the directed version is different in two ways: + (i) nodes only vote for their in-neighbors and + (ii) only the voting ability of elected node and its out-neighbors are updated: + + >>> G = nx.DiGraph([(0, 1), (2, 1), (2, 3), (3, 4)]) + >>> nx.voterank(G) + [2, 3] + + Notes + ----- + Each edge is treated independently in case of multigraphs. + + References + ---------- + .. [1] Zhang, J.-X. et al. (2016). + Identifying a set of influential spreaders in complex networks. + Sci. Rep. 6, 27823; doi: 10.1038/srep27823. + """ + influential_nodes = [] + vote_rank = {} + if len(G) == 0: + return influential_nodes + if number_of_nodes is None or number_of_nodes > len(G): + number_of_nodes = len(G) + if G.is_directed(): + # For directed graphs compute average out-degree + avgDegree = sum(deg for _, deg in G.out_degree()) / len(G) + else: + # For undirected graphs compute average degree + avgDegree = sum(deg for _, deg in G.degree()) / len(G) + # step 1 - initiate all nodes to (0,1) (score, voting ability) + for n in G.nodes(): + vote_rank[n] = [0, 1] + # Repeat steps 1b to 4 until num_seeds are elected. + for _ in range(number_of_nodes): + # step 1b - reset rank + for n in G.nodes(): + vote_rank[n][0] = 0 + # step 2 - vote + for n, nbr in G.edges(): + # In directed graphs nodes only vote for their in-neighbors + vote_rank[n][0] += vote_rank[nbr][1] + if not G.is_directed(): + vote_rank[nbr][0] += vote_rank[n][1] + for n in influential_nodes: + vote_rank[n][0] = 0 + # step 3 - select top node + n = max(G.nodes, key=lambda x: vote_rank[x][0]) + if vote_rank[n][0] == 0: + return influential_nodes + influential_nodes.append(n) + # weaken the selected node + vote_rank[n] = [0, 0] + # step 4 - update voterank properties + for _, nbr in G.edges(n): + vote_rank[nbr][1] -= 1 / avgDegree + vote_rank[nbr][1] = max(vote_rank[nbr][1], 0) + return influential_nodes diff --git a/MLPY/Lib/site-packages/networkx/algorithms/chains.py b/MLPY/Lib/site-packages/networkx/algorithms/chains.py new file mode 100644 index 0000000000000000000000000000000000000000..289bc1c3dd4d0ce91f6c6393e91c812b85837f22 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/chains.py @@ -0,0 +1,172 @@ +"""Functions for finding chains in a graph.""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["chain_decomposition"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def chain_decomposition(G, root=None): + """Returns the chain decomposition of a graph. + + The *chain decomposition* of a graph with respect a depth-first + search tree is a set of cycles or paths derived from the set of + fundamental cycles of the tree in the following manner. Consider + each fundamental cycle with respect to the given tree, represented + as a list of edges beginning with the nontree edge oriented away + from the root of the tree. For each fundamental cycle, if it + overlaps with any previous fundamental cycle, just take the initial + non-overlapping segment, which is a path instead of a cycle. Each + cycle or path is called a *chain*. For more information, see [1]_. + + Parameters + ---------- + G : undirected graph + + root : node (optional) + A node in the graph `G`. If specified, only the chain + decomposition for the connected component containing this node + will be returned. This node indicates the root of the depth-first + search tree. + + Yields + ------ + chain : list + A list of edges representing a chain. There is no guarantee on + the orientation of the edges in each chain (for example, if a + chain includes the edge joining nodes 1 and 2, the chain may + include either (1, 2) or (2, 1)). + + Raises + ------ + NodeNotFound + If `root` is not in the graph `G`. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> list(nx.chain_decomposition(G)) + [[(4, 5), (5, 3), (3, 4)]] + + Notes + ----- + The worst-case running time of this implementation is linear in the + number of nodes and number of edges [1]_. + + References + ---------- + .. [1] Jens M. Schmidt (2013). "A simple test on 2-vertex- + and 2-edge-connectivity." *Information Processing Letters*, + 113, 241–244. Elsevier. + + """ + + def _dfs_cycle_forest(G, root=None): + """Builds a directed graph composed of cycles from the given graph. + + `G` is an undirected simple graph. `root` is a node in the graph + from which the depth-first search is started. + + This function returns both the depth-first search cycle graph + (as a :class:`~networkx.DiGraph`) and the list of nodes in + depth-first preorder. The depth-first search cycle graph is a + directed graph whose edges are the edges of `G` oriented toward + the root if the edge is a tree edge and away from the root if + the edge is a non-tree edge. If `root` is not specified, this + performs a depth-first search on each connected component of `G` + and returns a directed forest instead. + + If `root` is not in the graph, this raises :exc:`KeyError`. + + """ + # Create a directed graph from the depth-first search tree with + # root node `root` in which tree edges are directed toward the + # root and nontree edges are directed away from the root. For + # each node with an incident nontree edge, this creates a + # directed cycle starting with the nontree edge and returning to + # that node. + # + # The `parent` node attribute stores the parent of each node in + # the DFS tree. The `nontree` edge attribute indicates whether + # the edge is a tree edge or a nontree edge. + # + # We also store the order of the nodes found in the depth-first + # search in the `nodes` list. + H = nx.DiGraph() + nodes = [] + for u, v, d in nx.dfs_labeled_edges(G, source=root): + if d == "forward": + # `dfs_labeled_edges()` yields (root, root, 'forward') + # if it is beginning the search on a new connected + # component. + if u == v: + H.add_node(v, parent=None) + nodes.append(v) + else: + H.add_node(v, parent=u) + H.add_edge(v, u, nontree=False) + nodes.append(v) + # `dfs_labeled_edges` considers nontree edges in both + # orientations, so we need to not add the edge if it its + # other orientation has been added. + elif d == "nontree" and v not in H[u]: + H.add_edge(v, u, nontree=True) + else: + # Do nothing on 'reverse' edges; we only care about + # forward and nontree edges. + pass + return H, nodes + + def _build_chain(G, u, v, visited): + """Generate the chain starting from the given nontree edge. + + `G` is a DFS cycle graph as constructed by + :func:`_dfs_cycle_graph`. The edge (`u`, `v`) is a nontree edge + that begins a chain. `visited` is a set representing the nodes + in `G` that have already been visited. + + This function yields the edges in an initial segment of the + fundamental cycle of `G` starting with the nontree edge (`u`, + `v`) that includes all the edges up until the first node that + appears in `visited`. The tree edges are given by the 'parent' + node attribute. The `visited` set is updated to add each node in + an edge yielded by this function. + + """ + while v not in visited: + yield u, v + visited.add(v) + u, v = v, G.nodes[v]["parent"] + yield u, v + + # Check if the root is in the graph G. If not, raise NodeNotFound + if root is not None and root not in G: + raise nx.NodeNotFound(f"Root node {root} is not in graph") + + # Create a directed version of H that has the DFS edges directed + # toward the root and the nontree edges directed away from the root + # (in each connected component). + H, nodes = _dfs_cycle_forest(G, root) + + # Visit the nodes again in DFS order. For each node, and for each + # nontree edge leaving that node, compute the fundamental cycle for + # that nontree edge starting with that edge. If the fundamental + # cycle overlaps with any visited nodes, just take the prefix of the + # cycle up to the point of visited nodes. + # + # We repeat this process for each connected component (implicitly, + # since `nodes` already has a list of the nodes grouped by connected + # component). + visited = set() + for u in nodes: + visited.add(u) + # For each nontree edge going out of node u... + edges = ((u, v) for u, v, d in H.out_edges(u, data="nontree") if d) + for u, v in edges: + # Create the cycle or cycle prefix starting with the + # nontree edge. + chain = list(_build_chain(H, u, v, visited)) + yield chain diff --git a/MLPY/Lib/site-packages/networkx/algorithms/chordal.py b/MLPY/Lib/site-packages/networkx/algorithms/chordal.py new file mode 100644 index 0000000000000000000000000000000000000000..2aa5679831768aad26db2dc53b40f4834186ceae --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/chordal.py @@ -0,0 +1,440 @@ +""" +Algorithms for chordal graphs. + +A graph is chordal if every cycle of length at least 4 has a chord +(an edge joining two nodes not adjacent in the cycle). +https://en.wikipedia.org/wiki/Chordal_graph +""" +import sys + +import networkx as nx +from networkx.algorithms.components import connected_components +from networkx.utils import arbitrary_element, not_implemented_for + +__all__ = [ + "is_chordal", + "find_induced_nodes", + "chordal_graph_cliques", + "chordal_graph_treewidth", + "NetworkXTreewidthBoundExceeded", + "complete_to_chordal_graph", +] + + +class NetworkXTreewidthBoundExceeded(nx.NetworkXException): + """Exception raised when a treewidth bound has been provided and it has + been exceeded""" + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def is_chordal(G): + """Checks whether G is a chordal graph. + + A graph is chordal if every cycle of length at least 4 has a chord + (an edge joining two nodes not adjacent in the cycle). + + Parameters + ---------- + G : graph + A NetworkX graph. + + Returns + ------- + chordal : bool + True if G is a chordal graph and False otherwise. + + Raises + ------ + NetworkXNotImplemented + The algorithm does not support DiGraph, MultiGraph and MultiDiGraph. + + Examples + -------- + >>> e = [ + ... (1, 2), + ... (1, 3), + ... (2, 3), + ... (2, 4), + ... (3, 4), + ... (3, 5), + ... (3, 6), + ... (4, 5), + ... (4, 6), + ... (5, 6), + ... ] + >>> G = nx.Graph(e) + >>> nx.is_chordal(G) + True + + Notes + ----- + The routine tries to go through every node following maximum cardinality + search. It returns False when it finds that the separator for any node + is not a clique. Based on the algorithms in [1]_. + + Self loops are ignored. + + References + ---------- + .. [1] R. E. Tarjan and M. Yannakakis, Simple linear-time algorithms + to test chordality of graphs, test acyclicity of hypergraphs, and + selectively reduce acyclic hypergraphs, SIAM J. Comput., 13 (1984), + pp. 566–579. + """ + if len(G.nodes) <= 3: + return True + return len(_find_chordality_breaker(G)) == 0 + + +@nx._dispatch +def find_induced_nodes(G, s, t, treewidth_bound=sys.maxsize): + """Returns the set of induced nodes in the path from s to t. + + Parameters + ---------- + G : graph + A chordal NetworkX graph + s : node + Source node to look for induced nodes + t : node + Destination node to look for induced nodes + treewidth_bound: float + Maximum treewidth acceptable for the graph H. The search + for induced nodes will end as soon as the treewidth_bound is exceeded. + + Returns + ------- + induced_nodes : Set of nodes + The set of induced nodes in the path from s to t in G + + Raises + ------ + NetworkXError + The algorithm does not support DiGraph, MultiGraph and MultiDiGraph. + If the input graph is an instance of one of these classes, a + :exc:`NetworkXError` is raised. + The algorithm can only be applied to chordal graphs. If the input + graph is found to be non-chordal, a :exc:`NetworkXError` is raised. + + Examples + -------- + >>> G = nx.Graph() + >>> G = nx.generators.classic.path_graph(10) + >>> induced_nodes = nx.find_induced_nodes(G, 1, 9, 2) + >>> sorted(induced_nodes) + [1, 2, 3, 4, 5, 6, 7, 8, 9] + + Notes + ----- + G must be a chordal graph and (s,t) an edge that is not in G. + + If a treewidth_bound is provided, the search for induced nodes will end + as soon as the treewidth_bound is exceeded. + + The algorithm is inspired by Algorithm 4 in [1]_. + A formal definition of induced node can also be found on that reference. + + Self Loops are ignored + + References + ---------- + .. [1] Learning Bounded Treewidth Bayesian Networks. + Gal Elidan, Stephen Gould; JMLR, 9(Dec):2699--2731, 2008. + http://jmlr.csail.mit.edu/papers/volume9/elidan08a/elidan08a.pdf + """ + if not is_chordal(G): + raise nx.NetworkXError("Input graph is not chordal.") + + H = nx.Graph(G) + H.add_edge(s, t) + induced_nodes = set() + triplet = _find_chordality_breaker(H, s, treewidth_bound) + while triplet: + (u, v, w) = triplet + induced_nodes.update(triplet) + for n in triplet: + if n != s: + H.add_edge(s, n) + triplet = _find_chordality_breaker(H, s, treewidth_bound) + if induced_nodes: + # Add t and the second node in the induced path from s to t. + induced_nodes.add(t) + for u in G[s]: + if len(induced_nodes & set(G[u])) == 2: + induced_nodes.add(u) + break + return induced_nodes + + +@nx._dispatch +def chordal_graph_cliques(G): + """Returns all maximal cliques of a chordal graph. + + The algorithm breaks the graph in connected components and performs a + maximum cardinality search in each component to get the cliques. + + Parameters + ---------- + G : graph + A NetworkX graph + + Yields + ------ + frozenset of nodes + Maximal cliques, each of which is a frozenset of + nodes in `G`. The order of cliques is arbitrary. + + Raises + ------ + NetworkXError + The algorithm does not support DiGraph, MultiGraph and MultiDiGraph. + The algorithm can only be applied to chordal graphs. If the input + graph is found to be non-chordal, a :exc:`NetworkXError` is raised. + + Examples + -------- + >>> e = [ + ... (1, 2), + ... (1, 3), + ... (2, 3), + ... (2, 4), + ... (3, 4), + ... (3, 5), + ... (3, 6), + ... (4, 5), + ... (4, 6), + ... (5, 6), + ... (7, 8), + ... ] + >>> G = nx.Graph(e) + >>> G.add_node(9) + >>> cliques = [c for c in chordal_graph_cliques(G)] + >>> cliques[0] + frozenset({1, 2, 3}) + """ + for C in (G.subgraph(c).copy() for c in connected_components(G)): + if C.number_of_nodes() == 1: + if nx.number_of_selfloops(C) > 0: + raise nx.NetworkXError("Input graph is not chordal.") + yield frozenset(C.nodes()) + else: + unnumbered = set(C.nodes()) + v = arbitrary_element(C) + unnumbered.remove(v) + numbered = {v} + clique_wanna_be = {v} + while unnumbered: + v = _max_cardinality_node(C, unnumbered, numbered) + unnumbered.remove(v) + numbered.add(v) + new_clique_wanna_be = set(C.neighbors(v)) & numbered + sg = C.subgraph(clique_wanna_be) + if _is_complete_graph(sg): + new_clique_wanna_be.add(v) + if not new_clique_wanna_be >= clique_wanna_be: + yield frozenset(clique_wanna_be) + clique_wanna_be = new_clique_wanna_be + else: + raise nx.NetworkXError("Input graph is not chordal.") + yield frozenset(clique_wanna_be) + + +@nx._dispatch +def chordal_graph_treewidth(G): + """Returns the treewidth of the chordal graph G. + + Parameters + ---------- + G : graph + A NetworkX graph + + Returns + ------- + treewidth : int + The size of the largest clique in the graph minus one. + + Raises + ------ + NetworkXError + The algorithm does not support DiGraph, MultiGraph and MultiDiGraph. + The algorithm can only be applied to chordal graphs. If the input + graph is found to be non-chordal, a :exc:`NetworkXError` is raised. + + Examples + -------- + >>> e = [ + ... (1, 2), + ... (1, 3), + ... (2, 3), + ... (2, 4), + ... (3, 4), + ... (3, 5), + ... (3, 6), + ... (4, 5), + ... (4, 6), + ... (5, 6), + ... (7, 8), + ... ] + >>> G = nx.Graph(e) + >>> G.add_node(9) + >>> nx.chordal_graph_treewidth(G) + 3 + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Tree_decomposition#Treewidth + """ + if not is_chordal(G): + raise nx.NetworkXError("Input graph is not chordal.") + + max_clique = -1 + for clique in nx.chordal_graph_cliques(G): + max_clique = max(max_clique, len(clique)) + return max_clique - 1 + + +def _is_complete_graph(G): + """Returns True if G is a complete graph.""" + if nx.number_of_selfloops(G) > 0: + raise nx.NetworkXError("Self loop found in _is_complete_graph()") + n = G.number_of_nodes() + if n < 2: + return True + e = G.number_of_edges() + max_edges = (n * (n - 1)) / 2 + return e == max_edges + + +def _find_missing_edge(G): + """Given a non-complete graph G, returns a missing edge.""" + nodes = set(G) + for u in G: + missing = nodes - set(list(G[u].keys()) + [u]) + if missing: + return (u, missing.pop()) + + +def _max_cardinality_node(G, choices, wanna_connect): + """Returns a the node in choices that has more connections in G + to nodes in wanna_connect. + """ + max_number = -1 + for x in choices: + number = len([y for y in G[x] if y in wanna_connect]) + if number > max_number: + max_number = number + max_cardinality_node = x + return max_cardinality_node + + +def _find_chordality_breaker(G, s=None, treewidth_bound=sys.maxsize): + """Given a graph G, starts a max cardinality search + (starting from s if s is given and from an arbitrary node otherwise) + trying to find a non-chordal cycle. + + If it does find one, it returns (u,v,w) where u,v,w are the three + nodes that together with s are involved in the cycle. + + It ignores any self loops. + """ + unnumbered = set(G) + if s is None: + s = arbitrary_element(G) + unnumbered.remove(s) + numbered = {s} + current_treewidth = -1 + while unnumbered: # and current_treewidth <= treewidth_bound: + v = _max_cardinality_node(G, unnumbered, numbered) + unnumbered.remove(v) + numbered.add(v) + clique_wanna_be = set(G[v]) & numbered + sg = G.subgraph(clique_wanna_be) + if _is_complete_graph(sg): + # The graph seems to be chordal by now. We update the treewidth + current_treewidth = max(current_treewidth, len(clique_wanna_be)) + if current_treewidth > treewidth_bound: + raise nx.NetworkXTreewidthBoundExceeded( + f"treewidth_bound exceeded: {current_treewidth}" + ) + else: + # sg is not a clique, + # look for an edge that is not included in sg + (u, w) = _find_missing_edge(sg) + return (u, v, w) + return () + + +@not_implemented_for("directed") +@nx._dispatch +def complete_to_chordal_graph(G): + """Return a copy of G completed to a chordal graph + + Adds edges to a copy of G to create a chordal graph. A graph G=(V,E) is + called chordal if for each cycle with length bigger than 3, there exist + two non-adjacent nodes connected by an edge (called a chord). + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + H : NetworkX graph + The chordal enhancement of G + alpha : Dictionary + The elimination ordering of nodes of G + + Notes + ----- + There are different approaches to calculate the chordal + enhancement of a graph. The algorithm used here is called + MCS-M and gives at least minimal (local) triangulation of graph. Note + that this triangulation is not necessarily a global minimum. + + https://en.wikipedia.org/wiki/Chordal_graph + + References + ---------- + .. [1] Berry, Anne & Blair, Jean & Heggernes, Pinar & Peyton, Barry. (2004) + Maximum Cardinality Search for Computing Minimal Triangulations of + Graphs. Algorithmica. 39. 287-298. 10.1007/s00453-004-1084-3. + + Examples + -------- + >>> from networkx.algorithms.chordal import complete_to_chordal_graph + >>> G = nx.wheel_graph(10) + >>> H, alpha = complete_to_chordal_graph(G) + """ + H = G.copy() + alpha = {node: 0 for node in H} + if nx.is_chordal(H): + return H, alpha + chords = set() + weight = {node: 0 for node in H.nodes()} + unnumbered_nodes = list(H.nodes()) + for i in range(len(H.nodes()), 0, -1): + # get the node in unnumbered_nodes with the maximum weight + z = max(unnumbered_nodes, key=lambda node: weight[node]) + unnumbered_nodes.remove(z) + alpha[z] = i + update_nodes = [] + for y in unnumbered_nodes: + if G.has_edge(y, z): + update_nodes.append(y) + else: + # y_weight will be bigger than node weights between y and z + y_weight = weight[y] + lower_nodes = [ + node for node in unnumbered_nodes if weight[node] < y_weight + ] + if nx.has_path(H.subgraph(lower_nodes + [z, y]), y, z): + update_nodes.append(y) + chords.add((z, y)) + # during calculation of paths the weights should not be updated + for node in update_nodes: + weight[node] += 1 + H.add_edges_from(chords) + return H, alpha diff --git a/MLPY/Lib/site-packages/networkx/algorithms/clique.py b/MLPY/Lib/site-packages/networkx/algorithms/clique.py new file mode 100644 index 0000000000000000000000000000000000000000..7fd7e81665a1c4ce5ea060ef6397f35ab2ea848c --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/clique.py @@ -0,0 +1,753 @@ +"""Functions for finding and manipulating cliques. + +Finding the largest clique in a graph is NP-complete problem, so most of +these algorithms have an exponential running time; for more information, +see the Wikipedia article on the clique problem [1]_. + +.. [1] clique problem:: https://en.wikipedia.org/wiki/Clique_problem + +""" +from collections import defaultdict, deque +from itertools import chain, combinations, islice + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "find_cliques", + "find_cliques_recursive", + "make_max_clique_graph", + "make_clique_bipartite", + "node_clique_number", + "number_of_cliques", + "enumerate_all_cliques", + "max_weight_clique", +] + + +@not_implemented_for("directed") +@nx._dispatch +def enumerate_all_cliques(G): + """Returns all cliques in an undirected graph. + + This function returns an iterator over cliques, each of which is a + list of nodes. The iteration is ordered by cardinality of the + cliques: first all cliques of size one, then all cliques of size + two, etc. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + Returns + ------- + iterator + An iterator over cliques, each of which is a list of nodes in + `G`. The cliques are ordered according to size. + + Notes + ----- + To obtain a list of all cliques, use + `list(enumerate_all_cliques(G))`. However, be aware that in the + worst-case, the length of this list can be exponential in the number + of nodes in the graph (for example, when the graph is the complete + graph). This function avoids storing all cliques in memory by only + keeping current candidate node lists in memory during its search. + + The implementation is adapted from the algorithm by Zhang, et + al. (2005) [1]_ to output all cliques discovered. + + This algorithm ignores self-loops and parallel edges, since cliques + are not conventionally defined with such edges. + + References + ---------- + .. [1] Yun Zhang, Abu-Khzam, F.N., Baldwin, N.E., Chesler, E.J., + Langston, M.A., Samatova, N.F., + "Genome-Scale Computational Approaches to Memory-Intensive + Applications in Systems Biology". + *Supercomputing*, 2005. Proceedings of the ACM/IEEE SC 2005 + Conference, pp. 12, 12--18 Nov. 2005. + . + + """ + index = {} + nbrs = {} + for u in G: + index[u] = len(index) + # Neighbors of u that appear after u in the iteration order of G. + nbrs[u] = {v for v in G[u] if v not in index} + + queue = deque(([u], sorted(nbrs[u], key=index.__getitem__)) for u in G) + # Loop invariants: + # 1. len(base) is nondecreasing. + # 2. (base + cnbrs) is sorted with respect to the iteration order of G. + # 3. cnbrs is a set of common neighbors of nodes in base. + while queue: + base, cnbrs = map(list, queue.popleft()) + yield base + for i, u in enumerate(cnbrs): + # Use generators to reduce memory consumption. + queue.append( + ( + chain(base, [u]), + filter(nbrs[u].__contains__, islice(cnbrs, i + 1, None)), + ) + ) + + +@not_implemented_for("directed") +@nx._dispatch +def find_cliques(G, nodes=None): + """Returns all maximal cliques in an undirected graph. + + For each node *n*, a *maximal clique for n* is a largest complete + subgraph containing *n*. The largest maximal clique is sometimes + called the *maximum clique*. + + This function returns an iterator over cliques, each of which is a + list of nodes. It is an iterative implementation, so should not + suffer from recursion depth issues. + + This function accepts a list of `nodes` and only the maximal cliques + containing all of these `nodes` are returned. It can considerably speed up + the running time if some specific cliques are desired. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + nodes : list, optional (default=None) + If provided, only yield *maximal cliques* containing all nodes in `nodes`. + If `nodes` isn't a clique itself, a ValueError is raised. + + Returns + ------- + iterator + An iterator over maximal cliques, each of which is a list of + nodes in `G`. If `nodes` is provided, only the maximal cliques + containing all the nodes in `nodes` are returned. The order of + cliques is arbitrary. + + Raises + ------ + ValueError + If `nodes` is not a clique. + + Examples + -------- + >>> from pprint import pprint # For nice dict formatting + >>> G = nx.karate_club_graph() + >>> sum(1 for c in nx.find_cliques(G)) # The number of maximal cliques in G + 36 + >>> max(nx.find_cliques(G), key=len) # The largest maximal clique in G + [0, 1, 2, 3, 13] + + The size of the largest maximal clique is known as the *clique number* of + the graph, which can be found directly with: + + >>> max(len(c) for c in nx.find_cliques(G)) + 5 + + One can also compute the number of maximal cliques in `G` that contain a given + node. The following produces a dictionary keyed by node whose + values are the number of maximal cliques in `G` that contain the node: + + >>> pprint({n: sum(1 for c in nx.find_cliques(G) if n in c) for n in G}) + {0: 13, + 1: 6, + 2: 7, + 3: 3, + 4: 2, + 5: 3, + 6: 3, + 7: 1, + 8: 3, + 9: 2, + 10: 2, + 11: 1, + 12: 1, + 13: 2, + 14: 1, + 15: 1, + 16: 1, + 17: 1, + 18: 1, + 19: 2, + 20: 1, + 21: 1, + 22: 1, + 23: 3, + 24: 2, + 25: 2, + 26: 1, + 27: 3, + 28: 2, + 29: 2, + 30: 2, + 31: 4, + 32: 9, + 33: 14} + + Or, similarly, the maximal cliques in `G` that contain a given node. + For example, the 4 maximal cliques that contain node 31: + + >>> [c for c in nx.find_cliques(G) if 31 in c] + [[0, 31], [33, 32, 31], [33, 28, 31], [24, 25, 31]] + + See Also + -------- + find_cliques_recursive + A recursive version of the same algorithm. + + Notes + ----- + To obtain a list of all maximal cliques, use + `list(find_cliques(G))`. However, be aware that in the worst-case, + the length of this list can be exponential in the number of nodes in + the graph. This function avoids storing all cliques in memory by + only keeping current candidate node lists in memory during its search. + + This implementation is based on the algorithm published by Bron and + Kerbosch (1973) [1]_, as adapted by Tomita, Tanaka and Takahashi + (2006) [2]_ and discussed in Cazals and Karande (2008) [3]_. It + essentially unrolls the recursion used in the references to avoid + issues of recursion stack depth (for a recursive implementation, see + :func:`find_cliques_recursive`). + + This algorithm ignores self-loops and parallel edges, since cliques + are not conventionally defined with such edges. + + References + ---------- + .. [1] Bron, C. and Kerbosch, J. + "Algorithm 457: finding all cliques of an undirected graph". + *Communications of the ACM* 16, 9 (Sep. 1973), 575--577. + + + .. [2] Etsuji Tomita, Akira Tanaka, Haruhisa Takahashi, + "The worst-case time complexity for generating all maximal + cliques and computational experiments", + *Theoretical Computer Science*, Volume 363, Issue 1, + Computing and Combinatorics, + 10th Annual International Conference on + Computing and Combinatorics (COCOON 2004), 25 October 2006, Pages 28--42 + + + .. [3] F. Cazals, C. Karande, + "A note on the problem of reporting maximal cliques", + *Theoretical Computer Science*, + Volume 407, Issues 1--3, 6 November 2008, Pages 564--568, + + + """ + if len(G) == 0: + return + + adj = {u: {v for v in G[u] if v != u} for u in G} + + # Initialize Q with the given nodes and subg, cand with their nbrs + Q = nodes[:] if nodes is not None else [] + cand = set(G) + for node in Q: + if node not in cand: + raise ValueError(f"The given `nodes` {nodes} do not form a clique") + cand &= adj[node] + + if not cand: + yield Q[:] + return + + subg = cand.copy() + stack = [] + Q.append(None) + + u = max(subg, key=lambda u: len(cand & adj[u])) + ext_u = cand - adj[u] + + try: + while True: + if ext_u: + q = ext_u.pop() + cand.remove(q) + Q[-1] = q + adj_q = adj[q] + subg_q = subg & adj_q + if not subg_q: + yield Q[:] + else: + cand_q = cand & adj_q + if cand_q: + stack.append((subg, cand, ext_u)) + Q.append(None) + subg = subg_q + cand = cand_q + u = max(subg, key=lambda u: len(cand & adj[u])) + ext_u = cand - adj[u] + else: + Q.pop() + subg, cand, ext_u = stack.pop() + except IndexError: + pass + + +# TODO Should this also be not implemented for directed graphs? +@nx._dispatch +def find_cliques_recursive(G, nodes=None): + """Returns all maximal cliques in a graph. + + For each node *v*, a *maximal clique for v* is a largest complete + subgraph containing *v*. The largest maximal clique is sometimes + called the *maximum clique*. + + This function returns an iterator over cliques, each of which is a + list of nodes. It is a recursive implementation, so may suffer from + recursion depth issues, but is included for pedagogical reasons. + For a non-recursive implementation, see :func:`find_cliques`. + + This function accepts a list of `nodes` and only the maximal cliques + containing all of these `nodes` are returned. It can considerably speed up + the running time if some specific cliques are desired. + + Parameters + ---------- + G : NetworkX graph + + nodes : list, optional (default=None) + If provided, only yield *maximal cliques* containing all nodes in `nodes`. + If `nodes` isn't a clique itself, a ValueError is raised. + + Returns + ------- + iterator + An iterator over maximal cliques, each of which is a list of + nodes in `G`. If `nodes` is provided, only the maximal cliques + containing all the nodes in `nodes` are yielded. The order of + cliques is arbitrary. + + Raises + ------ + ValueError + If `nodes` is not a clique. + + See Also + -------- + find_cliques + An iterative version of the same algorithm. See docstring for examples. + + Notes + ----- + To obtain a list of all maximal cliques, use + `list(find_cliques_recursive(G))`. However, be aware that in the + worst-case, the length of this list can be exponential in the number + of nodes in the graph. This function avoids storing all cliques in memory + by only keeping current candidate node lists in memory during its search. + + This implementation is based on the algorithm published by Bron and + Kerbosch (1973) [1]_, as adapted by Tomita, Tanaka and Takahashi + (2006) [2]_ and discussed in Cazals and Karande (2008) [3]_. For a + non-recursive implementation, see :func:`find_cliques`. + + This algorithm ignores self-loops and parallel edges, since cliques + are not conventionally defined with such edges. + + References + ---------- + .. [1] Bron, C. and Kerbosch, J. + "Algorithm 457: finding all cliques of an undirected graph". + *Communications of the ACM* 16, 9 (Sep. 1973), 575--577. + + + .. [2] Etsuji Tomita, Akira Tanaka, Haruhisa Takahashi, + "The worst-case time complexity for generating all maximal + cliques and computational experiments", + *Theoretical Computer Science*, Volume 363, Issue 1, + Computing and Combinatorics, + 10th Annual International Conference on + Computing and Combinatorics (COCOON 2004), 25 October 2006, Pages 28--42 + + + .. [3] F. Cazals, C. Karande, + "A note on the problem of reporting maximal cliques", + *Theoretical Computer Science*, + Volume 407, Issues 1--3, 6 November 2008, Pages 564--568, + + + """ + if len(G) == 0: + return iter([]) + + adj = {u: {v for v in G[u] if v != u} for u in G} + + # Initialize Q with the given nodes and subg, cand with their nbrs + Q = nodes[:] if nodes is not None else [] + cand_init = set(G) + for node in Q: + if node not in cand_init: + raise ValueError(f"The given `nodes` {nodes} do not form a clique") + cand_init &= adj[node] + + if not cand_init: + return iter([Q]) + + subg_init = cand_init.copy() + + def expand(subg, cand): + u = max(subg, key=lambda u: len(cand & adj[u])) + for q in cand - adj[u]: + cand.remove(q) + Q.append(q) + adj_q = adj[q] + subg_q = subg & adj_q + if not subg_q: + yield Q[:] + else: + cand_q = cand & adj_q + if cand_q: + yield from expand(subg_q, cand_q) + Q.pop() + + return expand(subg_init, cand_init) + + +@nx._dispatch +def make_max_clique_graph(G, create_using=None): + """Returns the maximal clique graph of the given graph. + + The nodes of the maximal clique graph of `G` are the cliques of + `G` and an edge joins two cliques if the cliques are not disjoint. + + Parameters + ---------- + G : NetworkX graph + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + NetworkX graph + A graph whose nodes are the cliques of `G` and whose edges + join two cliques if they are not disjoint. + + Notes + ----- + This function behaves like the following code:: + + import networkx as nx + G = nx.make_clique_bipartite(G) + cliques = [v for v in G.nodes() if G.nodes[v]['bipartite'] == 0] + G = nx.bipartite.projected_graph(G, cliques) + G = nx.relabel_nodes(G, {-v: v - 1 for v in G}) + + It should be faster, though, since it skips all the intermediate + steps. + + """ + if create_using is None: + B = G.__class__() + else: + B = nx.empty_graph(0, create_using) + cliques = list(enumerate(set(c) for c in find_cliques(G))) + # Add a numbered node for each clique. + B.add_nodes_from(i for i, c in cliques) + # Join cliques by an edge if they share a node. + clique_pairs = combinations(cliques, 2) + B.add_edges_from((i, j) for (i, c1), (j, c2) in clique_pairs if c1 & c2) + return B + + +@nx._dispatch +def make_clique_bipartite(G, fpos=None, create_using=None, name=None): + """Returns the bipartite clique graph corresponding to `G`. + + In the returned bipartite graph, the "bottom" nodes are the nodes of + `G` and the "top" nodes represent the maximal cliques of `G`. + There is an edge from node *v* to clique *C* in the returned graph + if and only if *v* is an element of *C*. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + fpos : bool + If True or not None, the returned graph will have an + additional attribute, `pos`, a dictionary mapping node to + position in the Euclidean plane. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + NetworkX graph + A bipartite graph whose "bottom" set is the nodes of the graph + `G`, whose "top" set is the cliques of `G`, and whose edges + join nodes of `G` to the cliques that contain them. + + The nodes of the graph `G` have the node attribute + 'bipartite' set to 1 and the nodes representing cliques + have the node attribute 'bipartite' set to 0, as is the + convention for bipartite graphs in NetworkX. + + """ + B = nx.empty_graph(0, create_using) + B.clear() + # The "bottom" nodes in the bipartite graph are the nodes of the + # original graph, G. + B.add_nodes_from(G, bipartite=1) + for i, cl in enumerate(find_cliques(G)): + # The "top" nodes in the bipartite graph are the cliques. These + # nodes get negative numbers as labels. + name = -i - 1 + B.add_node(name, bipartite=0) + B.add_edges_from((v, name) for v in cl) + return B + + +@nx._dispatch +def node_clique_number(G, nodes=None, cliques=None, separate_nodes=False): + """Returns the size of the largest maximal clique containing each given node. + + Returns a single or list depending on input nodes. + An optional list of cliques can be input if already computed. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + cliques : list, optional (default=None) + A list of cliques, each of which is itself a list of nodes. + If not specified, the list of all cliques will be computed + using :func:`find_cliques`. + + Returns + ------- + int or dict + If `nodes` is a single node, returns the size of the + largest maximal clique in `G` containing that node. + Otherwise return a dict keyed by node to the size + of the largest maximal clique containing that node. + + See Also + -------- + find_cliques + find_cliques yields the maximal cliques of G. + It accepts a `nodes` argument which restricts consideration to + maximal cliques containing all the given `nodes`. + The search for the cliques is optimized for `nodes`. + """ + if cliques is None: + if nodes is not None: + # Use ego_graph to decrease size of graph + # check for single node + if nodes in G: + return max(len(c) for c in find_cliques(nx.ego_graph(G, nodes))) + # handle multiple nodes + return { + n: max(len(c) for c in find_cliques(nx.ego_graph(G, n))) for n in nodes + } + + # nodes is None--find all cliques + cliques = list(find_cliques(G)) + + # single node requested + if nodes in G: + return max(len(c) for c in cliques if nodes in c) + + # multiple nodes requested + # preprocess all nodes (faster than one at a time for even 2 nodes) + size_for_n = defaultdict(int) + for c in cliques: + size_of_c = len(c) + for n in c: + if size_for_n[n] < size_of_c: + size_for_n[n] = size_of_c + if nodes is None: + return size_for_n + return {n: size_for_n[n] for n in nodes} + + +def number_of_cliques(G, nodes=None, cliques=None): + """Returns the number of maximal cliques for each node. + + Returns a single or list depending on input nodes. + Optional list of cliques can be input if already computed. + """ + if cliques is None: + cliques = list(find_cliques(G)) + + if nodes is None: + nodes = list(G.nodes()) # none, get entire graph + + if not isinstance(nodes, list): # check for a list + v = nodes + # assume it is a single value + numcliq = len([1 for c in cliques if v in c]) + else: + numcliq = {} + for v in nodes: + numcliq[v] = len([1 for c in cliques if v in c]) + return numcliq + + +class MaxWeightClique: + """A class for the maximum weight clique algorithm. + + This class is a helper for the `max_weight_clique` function. The class + should not normally be used directly. + + Parameters + ---------- + G : NetworkX graph + The undirected graph for which a maximum weight clique is sought + weight : string or None, optional (default='weight') + The node attribute that holds the integer value used as a weight. + If None, then each node has weight 1. + + Attributes + ---------- + G : NetworkX graph + The undirected graph for which a maximum weight clique is sought + node_weights: dict + The weight of each node + incumbent_nodes : list + The nodes of the incumbent clique (the best clique found so far) + incumbent_weight: int + The weight of the incumbent clique + """ + + def __init__(self, G, weight): + self.G = G + self.incumbent_nodes = [] + self.incumbent_weight = 0 + + if weight is None: + self.node_weights = {v: 1 for v in G.nodes()} + else: + for v in G.nodes(): + if weight not in G.nodes[v]: + errmsg = f"Node {v!r} does not have the requested weight field." + raise KeyError(errmsg) + if not isinstance(G.nodes[v][weight], int): + errmsg = f"The {weight!r} field of node {v!r} is not an integer." + raise ValueError(errmsg) + self.node_weights = {v: G.nodes[v][weight] for v in G.nodes()} + + def update_incumbent_if_improved(self, C, C_weight): + """Update the incumbent if the node set C has greater weight. + + C is assumed to be a clique. + """ + if C_weight > self.incumbent_weight: + self.incumbent_nodes = C[:] + self.incumbent_weight = C_weight + + def greedily_find_independent_set(self, P): + """Greedily find an independent set of nodes from a set of + nodes P.""" + independent_set = [] + P = P[:] + while P: + v = P[0] + independent_set.append(v) + P = [w for w in P if v != w and not self.G.has_edge(v, w)] + return independent_set + + def find_branching_nodes(self, P, target): + """Find a set of nodes to branch on.""" + residual_wt = {v: self.node_weights[v] for v in P} + total_wt = 0 + P = P[:] + while P: + independent_set = self.greedily_find_independent_set(P) + min_wt_in_class = min(residual_wt[v] for v in independent_set) + total_wt += min_wt_in_class + if total_wt > target: + break + for v in independent_set: + residual_wt[v] -= min_wt_in_class + P = [v for v in P if residual_wt[v] != 0] + return P + + def expand(self, C, C_weight, P): + """Look for the best clique that contains all the nodes in C and zero or + more of the nodes in P, backtracking if it can be shown that no such + clique has greater weight than the incumbent. + """ + self.update_incumbent_if_improved(C, C_weight) + branching_nodes = self.find_branching_nodes(P, self.incumbent_weight - C_weight) + while branching_nodes: + v = branching_nodes.pop() + P.remove(v) + new_C = C + [v] + new_C_weight = C_weight + self.node_weights[v] + new_P = [w for w in P if self.G.has_edge(v, w)] + self.expand(new_C, new_C_weight, new_P) + + def find_max_weight_clique(self): + """Find a maximum weight clique.""" + # Sort nodes in reverse order of degree for speed + nodes = sorted(self.G.nodes(), key=lambda v: self.G.degree(v), reverse=True) + nodes = [v for v in nodes if self.node_weights[v] > 0] + self.expand([], 0, nodes) + + +@not_implemented_for("directed") +@nx._dispatch(node_attrs="weight") +def max_weight_clique(G, weight="weight"): + """Find a maximum weight clique in G. + + A *clique* in a graph is a set of nodes such that every two distinct nodes + are adjacent. The *weight* of a clique is the sum of the weights of its + nodes. A *maximum weight clique* of graph G is a clique C in G such that + no clique in G has weight greater than the weight of C. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + weight : string or None, optional (default='weight') + The node attribute that holds the integer value used as a weight. + If None, then each node has weight 1. + + Returns + ------- + clique : list + the nodes of a maximum weight clique + weight : int + the weight of a maximum weight clique + + Notes + ----- + The implementation is recursive, and therefore it may run into recursion + depth issues if G contains a clique whose number of nodes is close to the + recursion depth limit. + + At each search node, the algorithm greedily constructs a weighted + independent set cover of part of the graph in order to find a small set of + nodes on which to branch. The algorithm is very similar to the algorithm + of Tavares et al. [1]_, other than the fact that the NetworkX version does + not use bitsets. This style of algorithm for maximum weight clique (and + maximum weight independent set, which is the same problem but on the + complement graph) has a decades-long history. See Algorithm B of Warren + and Hicks [2]_ and the references in that paper. + + References + ---------- + .. [1] Tavares, W.A., Neto, M.B.C., Rodrigues, C.D., Michelon, P.: Um + algoritmo de branch and bound para o problema da clique máxima + ponderada. Proceedings of XLVII SBPO 1 (2015). + + .. [2] Warren, Jeffrey S, Hicks, Illya V.: Combinatorial Branch-and-Bound + for the Maximum Weight Independent Set Problem. Technical Report, + Texas A&M University (2016). + """ + + mwc = MaxWeightClique(G, weight) + mwc.find_max_weight_clique() + return mwc.incumbent_nodes, mwc.incumbent_weight diff --git a/MLPY/Lib/site-packages/networkx/algorithms/cluster.py b/MLPY/Lib/site-packages/networkx/algorithms/cluster.py new file mode 100644 index 0000000000000000000000000000000000000000..0500852a7ce87439058cc627d5f6256b4ba09ed1 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/cluster.py @@ -0,0 +1,605 @@ +"""Algorithms to characterize the number of triangles in a graph.""" + +from collections import Counter +from itertools import chain, combinations + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "triangles", + "average_clustering", + "clustering", + "transitivity", + "square_clustering", + "generalized_degree", +] + + +@not_implemented_for("directed") +@nx._dispatch +def triangles(G, nodes=None): + """Compute the number of triangles. + + Finds the number of triangles that include a node as one vertex. + + Parameters + ---------- + G : graph + A networkx graph + + nodes : node, iterable of nodes, or None (default=None) + If a singleton node, return the number of triangles for that node. + If an iterable, compute the number of triangles for each of those nodes. + If `None` (the default) compute the number of triangles for all nodes in `G`. + + Returns + ------- + out : dict or int + If `nodes` is a container of nodes, returns number of triangles keyed by node (dict). + If `nodes` is a specific node, returns number of triangles for the node (int). + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.triangles(G, 0)) + 6 + >>> print(nx.triangles(G)) + {0: 6, 1: 6, 2: 6, 3: 6, 4: 6} + >>> print(list(nx.triangles(G, [0, 1]).values())) + [6, 6] + + Notes + ----- + Self loops are ignored. + + """ + if nodes is not None: + # If `nodes` represents a single node, return only its number of triangles + if nodes in G: + return next(_triangles_and_degree_iter(G, nodes))[2] // 2 + + # if `nodes` is a container of nodes, then return a + # dictionary mapping node to number of triangles. + return {v: t // 2 for v, d, t, _ in _triangles_and_degree_iter(G, nodes)} + + # if nodes is None, then compute triangles for the complete graph + + # dict used to avoid visiting the same nodes twice + # this allows calculating/counting each triangle only once + later_neighbors = {} + + # iterate over the nodes in a graph + for node, neighbors in G.adjacency(): + later_neighbors[node] = { + n for n in neighbors if n not in later_neighbors and n != node + } + + # instantiate Counter for each node to include isolated nodes + # add 1 to the count if a nodes neighbor's neighbor is also a neighbor + triangle_counts = Counter(dict.fromkeys(G, 0)) + for node1, neighbors in later_neighbors.items(): + for node2 in neighbors: + third_nodes = neighbors & later_neighbors[node2] + m = len(third_nodes) + triangle_counts[node1] += m + triangle_counts[node2] += m + triangle_counts.update(third_nodes) + + return dict(triangle_counts) + + +@not_implemented_for("multigraph") +def _triangles_and_degree_iter(G, nodes=None): + """Return an iterator of (node, degree, triangles, generalized degree). + + This double counts triangles so you may want to divide by 2. + See degree(), triangles() and generalized_degree() for definitions + and details. + + """ + if nodes is None: + nodes_nbrs = G.adj.items() + else: + nodes_nbrs = ((n, G[n]) for n in G.nbunch_iter(nodes)) + + for v, v_nbrs in nodes_nbrs: + vs = set(v_nbrs) - {v} + gen_degree = Counter(len(vs & (set(G[w]) - {w})) for w in vs) + ntriangles = sum(k * val for k, val in gen_degree.items()) + yield (v, len(vs), ntriangles, gen_degree) + + +@not_implemented_for("multigraph") +def _weighted_triangles_and_degree_iter(G, nodes=None, weight="weight"): + """Return an iterator of (node, degree, weighted_triangles). + + Used for weighted clustering. + Note: this returns the geometric average weight of edges in the triangle. + Also, each triangle is counted twice (each direction). + So you may want to divide by 2. + + """ + import numpy as np + + if weight is None or G.number_of_edges() == 0: + max_weight = 1 + else: + max_weight = max(d.get(weight, 1) for u, v, d in G.edges(data=True)) + if nodes is None: + nodes_nbrs = G.adj.items() + else: + nodes_nbrs = ((n, G[n]) for n in G.nbunch_iter(nodes)) + + def wt(u, v): + return G[u][v].get(weight, 1) / max_weight + + for i, nbrs in nodes_nbrs: + inbrs = set(nbrs) - {i} + weighted_triangles = 0 + seen = set() + for j in inbrs: + seen.add(j) + # This avoids counting twice -- we double at the end. + jnbrs = set(G[j]) - seen + # Only compute the edge weight once, before the inner inner + # loop. + wij = wt(i, j) + weighted_triangles += sum( + np.cbrt([(wij * wt(j, k) * wt(k, i)) for k in inbrs & jnbrs]) + ) + yield (i, len(inbrs), 2 * weighted_triangles) + + +@not_implemented_for("multigraph") +def _directed_triangles_and_degree_iter(G, nodes=None): + """Return an iterator of + (node, total_degree, reciprocal_degree, directed_triangles). + + Used for directed clustering. + Note that unlike `_triangles_and_degree_iter()`, this function counts + directed triangles so does not count triangles twice. + + """ + nodes_nbrs = ((n, G._pred[n], G._succ[n]) for n in G.nbunch_iter(nodes)) + + for i, preds, succs in nodes_nbrs: + ipreds = set(preds) - {i} + isuccs = set(succs) - {i} + + directed_triangles = 0 + for j in chain(ipreds, isuccs): + jpreds = set(G._pred[j]) - {j} + jsuccs = set(G._succ[j]) - {j} + directed_triangles += sum( + 1 + for k in chain( + (ipreds & jpreds), + (ipreds & jsuccs), + (isuccs & jpreds), + (isuccs & jsuccs), + ) + ) + dtotal = len(ipreds) + len(isuccs) + dbidirectional = len(ipreds & isuccs) + yield (i, dtotal, dbidirectional, directed_triangles) + + +@not_implemented_for("multigraph") +def _directed_weighted_triangles_and_degree_iter(G, nodes=None, weight="weight"): + """Return an iterator of + (node, total_degree, reciprocal_degree, directed_weighted_triangles). + + Used for directed weighted clustering. + Note that unlike `_weighted_triangles_and_degree_iter()`, this function counts + directed triangles so does not count triangles twice. + + """ + import numpy as np + + if weight is None or G.number_of_edges() == 0: + max_weight = 1 + else: + max_weight = max(d.get(weight, 1) for u, v, d in G.edges(data=True)) + + nodes_nbrs = ((n, G._pred[n], G._succ[n]) for n in G.nbunch_iter(nodes)) + + def wt(u, v): + return G[u][v].get(weight, 1) / max_weight + + for i, preds, succs in nodes_nbrs: + ipreds = set(preds) - {i} + isuccs = set(succs) - {i} + + directed_triangles = 0 + for j in ipreds: + jpreds = set(G._pred[j]) - {j} + jsuccs = set(G._succ[j]) - {j} + directed_triangles += sum( + np.cbrt([(wt(j, i) * wt(k, i) * wt(k, j)) for k in ipreds & jpreds]) + ) + directed_triangles += sum( + np.cbrt([(wt(j, i) * wt(k, i) * wt(j, k)) for k in ipreds & jsuccs]) + ) + directed_triangles += sum( + np.cbrt([(wt(j, i) * wt(i, k) * wt(k, j)) for k in isuccs & jpreds]) + ) + directed_triangles += sum( + np.cbrt([(wt(j, i) * wt(i, k) * wt(j, k)) for k in isuccs & jsuccs]) + ) + + for j in isuccs: + jpreds = set(G._pred[j]) - {j} + jsuccs = set(G._succ[j]) - {j} + directed_triangles += sum( + np.cbrt([(wt(i, j) * wt(k, i) * wt(k, j)) for k in ipreds & jpreds]) + ) + directed_triangles += sum( + np.cbrt([(wt(i, j) * wt(k, i) * wt(j, k)) for k in ipreds & jsuccs]) + ) + directed_triangles += sum( + np.cbrt([(wt(i, j) * wt(i, k) * wt(k, j)) for k in isuccs & jpreds]) + ) + directed_triangles += sum( + np.cbrt([(wt(i, j) * wt(i, k) * wt(j, k)) for k in isuccs & jsuccs]) + ) + + dtotal = len(ipreds) + len(isuccs) + dbidirectional = len(ipreds & isuccs) + yield (i, dtotal, dbidirectional, directed_triangles) + + +@nx._dispatch(edge_attrs="weight") +def average_clustering(G, nodes=None, weight=None, count_zeros=True): + r"""Compute the average clustering coefficient for the graph G. + + The clustering coefficient for the graph is the average, + + .. math:: + + C = \frac{1}{n}\sum_{v \in G} c_v, + + where :math:`n` is the number of nodes in `G`. + + Parameters + ---------- + G : graph + + nodes : container of nodes, optional (default=all nodes in G) + Compute average clustering for nodes in this container. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + + count_zeros : bool + If False include only the nodes with nonzero clustering in the average. + + Returns + ------- + avg : float + Average clustering + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.average_clustering(G)) + 1.0 + + Notes + ----- + This is a space saving routine; it might be faster + to use the clustering function to get a list and then take the average. + + Self loops are ignored. + + References + ---------- + .. [1] Generalizations of the clustering coefficient to weighted + complex networks by J. Saramäki, M. Kivelä, J.-P. Onnela, + K. Kaski, and J. Kertész, Physical Review E, 75 027105 (2007). + http://jponnela.com/web_documents/a9.pdf + .. [2] Marcus Kaiser, Mean clustering coefficients: the role of isolated + nodes and leafs on clustering measures for small-world networks. + https://arxiv.org/abs/0802.2512 + """ + c = clustering(G, nodes, weight=weight).values() + if not count_zeros: + c = [v for v in c if abs(v) > 0] + return sum(c) / len(c) + + +@nx._dispatch(edge_attrs="weight") +def clustering(G, nodes=None, weight=None): + r"""Compute the clustering coefficient for nodes. + + For unweighted graphs, the clustering of a node :math:`u` + is the fraction of possible triangles through that node that exist, + + .. math:: + + c_u = \frac{2 T(u)}{deg(u)(deg(u)-1)}, + + where :math:`T(u)` is the number of triangles through node :math:`u` and + :math:`deg(u)` is the degree of :math:`u`. + + For weighted graphs, there are several ways to define clustering [1]_. + the one used here is defined + as the geometric average of the subgraph edge weights [2]_, + + .. math:: + + c_u = \frac{1}{deg(u)(deg(u)-1))} + \sum_{vw} (\hat{w}_{uv} \hat{w}_{uw} \hat{w}_{vw})^{1/3}. + + The edge weights :math:`\hat{w}_{uv}` are normalized by the maximum weight + in the network :math:`\hat{w}_{uv} = w_{uv}/\max(w)`. + + The value of :math:`c_u` is assigned to 0 if :math:`deg(u) < 2`. + + Additionally, this weighted definition has been generalized to support negative edge weights [3]_. + + For directed graphs, the clustering is similarly defined as the fraction + of all possible directed triangles or geometric average of the subgraph + edge weights for unweighted and weighted directed graph respectively [4]_. + + .. math:: + + c_u = \frac{T(u)}{2(deg^{tot}(u)(deg^{tot}(u)-1) - 2deg^{\leftrightarrow}(u))}, + + where :math:`T(u)` is the number of directed triangles through node + :math:`u`, :math:`deg^{tot}(u)` is the sum of in degree and out degree of + :math:`u` and :math:`deg^{\leftrightarrow}(u)` is the reciprocal degree of + :math:`u`. + + + Parameters + ---------- + G : graph + + nodes : node, iterable of nodes, or None (default=None) + If a singleton node, return the number of triangles for that node. + If an iterable, compute the number of triangles for each of those nodes. + If `None` (the default) compute the number of triangles for all nodes in `G`. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used as a weight. + If None, then each edge has weight 1. + + Returns + ------- + out : float, or dictionary + Clustering coefficient at specified nodes + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.clustering(G, 0)) + 1.0 + >>> print(nx.clustering(G)) + {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0} + + Notes + ----- + Self loops are ignored. + + References + ---------- + .. [1] Generalizations of the clustering coefficient to weighted + complex networks by J. Saramäki, M. Kivelä, J.-P. Onnela, + K. Kaski, and J. Kertész, Physical Review E, 75 027105 (2007). + http://jponnela.com/web_documents/a9.pdf + .. [2] Intensity and coherence of motifs in weighted complex + networks by J. P. Onnela, J. Saramäki, J. Kertész, and K. Kaski, + Physical Review E, 71(6), 065103 (2005). + .. [3] Generalization of Clustering Coefficients to Signed Correlation Networks + by G. Costantini and M. Perugini, PloS one, 9(2), e88669 (2014). + .. [4] Clustering in complex directed networks by G. Fagiolo, + Physical Review E, 76(2), 026107 (2007). + """ + if G.is_directed(): + if weight is not None: + td_iter = _directed_weighted_triangles_and_degree_iter(G, nodes, weight) + clusterc = { + v: 0 if t == 0 else t / ((dt * (dt - 1) - 2 * db) * 2) + for v, dt, db, t in td_iter + } + else: + td_iter = _directed_triangles_and_degree_iter(G, nodes) + clusterc = { + v: 0 if t == 0 else t / ((dt * (dt - 1) - 2 * db) * 2) + for v, dt, db, t in td_iter + } + else: + # The formula 2*T/(d*(d-1)) from docs is t/(d*(d-1)) here b/c t==2*T + if weight is not None: + td_iter = _weighted_triangles_and_degree_iter(G, nodes, weight) + clusterc = {v: 0 if t == 0 else t / (d * (d - 1)) for v, d, t in td_iter} + else: + td_iter = _triangles_and_degree_iter(G, nodes) + clusterc = {v: 0 if t == 0 else t / (d * (d - 1)) for v, d, t, _ in td_iter} + if nodes in G: + # Return the value of the sole entry in the dictionary. + return clusterc[nodes] + return clusterc + + +@nx._dispatch +def transitivity(G): + r"""Compute graph transitivity, the fraction of all possible triangles + present in G. + + Possible triangles are identified by the number of "triads" + (two edges with a shared vertex). + + The transitivity is + + .. math:: + + T = 3\frac{\#triangles}{\#triads}. + + Parameters + ---------- + G : graph + + Returns + ------- + out : float + Transitivity + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.transitivity(G)) + 1.0 + """ + triangles_contri = [ + (t, d * (d - 1)) for v, d, t, _ in _triangles_and_degree_iter(G) + ] + # If the graph is empty + if len(triangles_contri) == 0: + return 0 + triangles, contri = map(sum, zip(*triangles_contri)) + return 0 if triangles == 0 else triangles / contri + + +@nx._dispatch +def square_clustering(G, nodes=None): + r"""Compute the squares clustering coefficient for nodes. + + For each node return the fraction of possible squares that exist at + the node [1]_ + + .. math:: + C_4(v) = \frac{ \sum_{u=1}^{k_v} + \sum_{w=u+1}^{k_v} q_v(u,w) }{ \sum_{u=1}^{k_v} + \sum_{w=u+1}^{k_v} [a_v(u,w) + q_v(u,w)]}, + + where :math:`q_v(u,w)` are the number of common neighbors of :math:`u` and + :math:`w` other than :math:`v` (ie squares), and :math:`a_v(u,w) = (k_u - + (1+q_v(u,w)+\theta_{uv})) + (k_w - (1+q_v(u,w)+\theta_{uw}))`, where + :math:`\theta_{uw} = 1` if :math:`u` and :math:`w` are connected and 0 + otherwise. [2]_ + + Parameters + ---------- + G : graph + + nodes : container of nodes, optional (default=all nodes in G) + Compute clustering for nodes in this container. + + Returns + ------- + c4 : dictionary + A dictionary keyed by node with the square clustering coefficient value. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.square_clustering(G, 0)) + 1.0 + >>> print(nx.square_clustering(G)) + {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0} + + Notes + ----- + While :math:`C_3(v)` (triangle clustering) gives the probability that + two neighbors of node v are connected with each other, :math:`C_4(v)` is + the probability that two neighbors of node v share a common + neighbor different from v. This algorithm can be applied to both + bipartite and unipartite networks. + + References + ---------- + .. [1] Pedro G. Lind, Marta C. González, and Hans J. Herrmann. 2005 + Cycles and clustering in bipartite networks. + Physical Review E (72) 056127. + .. [2] Zhang, Peng et al. Clustering Coefficient and Community Structure of + Bipartite Networks. Physica A: Statistical Mechanics and its Applications 387.27 (2008): 6869–6875. + https://arxiv.org/abs/0710.0117v1 + """ + if nodes is None: + node_iter = G + else: + node_iter = G.nbunch_iter(nodes) + clustering = {} + for v in node_iter: + clustering[v] = 0 + potential = 0 + for u, w in combinations(G[v], 2): + squares = len((set(G[u]) & set(G[w])) - {v}) + clustering[v] += squares + degm = squares + 1 + if w in G[u]: + degm += 1 + potential += (len(G[u]) - degm) + (len(G[w]) - degm) + squares + if potential > 0: + clustering[v] /= potential + if nodes in G: + # Return the value of the sole entry in the dictionary. + return clustering[nodes] + return clustering + + +@not_implemented_for("directed") +@nx._dispatch +def generalized_degree(G, nodes=None): + r"""Compute the generalized degree for nodes. + + For each node, the generalized degree shows how many edges of given + triangle multiplicity the node is connected to. The triangle multiplicity + of an edge is the number of triangles an edge participates in. The + generalized degree of node :math:`i` can be written as a vector + :math:`\mathbf{k}_i=(k_i^{(0)}, \dotsc, k_i^{(N-2)})` where + :math:`k_i^{(j)}` is the number of edges attached to node :math:`i` that + participate in :math:`j` triangles. + + Parameters + ---------- + G : graph + + nodes : container of nodes, optional (default=all nodes in G) + Compute the generalized degree for nodes in this container. + + Returns + ------- + out : Counter, or dictionary of Counters + Generalized degree of specified nodes. The Counter is keyed by edge + triangle multiplicity. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> print(nx.generalized_degree(G, 0)) + Counter({3: 4}) + >>> print(nx.generalized_degree(G)) + {0: Counter({3: 4}), 1: Counter({3: 4}), 2: Counter({3: 4}), 3: Counter({3: 4}), 4: Counter({3: 4})} + + To recover the number of triangles attached to a node: + + >>> k1 = nx.generalized_degree(G, 0) + >>> sum([k * v for k, v in k1.items()]) / 2 == nx.triangles(G, 0) + True + + Notes + ----- + In a network of N nodes, the highest triangle multiplicity an edge can have + is N-2. + + The return value does not include a `zero` entry if no edges of a + particular triangle multiplicity are present. + + The number of triangles node :math:`i` is attached to can be recovered from + the generalized degree :math:`\mathbf{k}_i=(k_i^{(0)}, \dotsc, + k_i^{(N-2)})` by :math:`(k_i^{(1)}+2k_i^{(2)}+\dotsc +(N-2)k_i^{(N-2)})/2`. + + References + ---------- + .. [1] Networks with arbitrary edge multiplicities by V. Zlatić, + D. Garlaschelli and G. Caldarelli, EPL (Europhysics Letters), + Volume 97, Number 2 (2012). + https://iopscience.iop.org/article/10.1209/0295-5075/97/28005 + """ + if nodes in G: + return next(_triangles_and_degree_iter(G, nodes))[3] + return {v: gd for v, d, t, gd in _triangles_and_degree_iter(G, nodes)} diff --git a/MLPY/Lib/site-packages/networkx/algorithms/coloring/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/coloring/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..39381d9f163a5400f362b91a89215bfc915a8022 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/coloring/__init__.py @@ -0,0 +1,4 @@ +from networkx.algorithms.coloring.greedy_coloring import * +from networkx.algorithms.coloring.equitable_coloring import equitable_color + +__all__ = ["greedy_color", "equitable_color"] diff --git a/MLPY/Lib/site-packages/networkx/algorithms/coloring/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/coloring/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f01fc17017cf5e45a4f9a908455598983e5985f Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/coloring/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/coloring/__pycache__/equitable_coloring.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/coloring/__pycache__/equitable_coloring.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba1831fd4a76b644fd6c1d71d269fd7693d5af46 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/coloring/__pycache__/equitable_coloring.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/coloring/__pycache__/greedy_coloring.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/coloring/__pycache__/greedy_coloring.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bcd73203aca857faa2075d50729a0e34dd1ec48 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/coloring/__pycache__/greedy_coloring.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/coloring/equitable_coloring.py b/MLPY/Lib/site-packages/networkx/algorithms/coloring/equitable_coloring.py new file mode 100644 index 0000000000000000000000000000000000000000..af1fb5a7e7c20392a82673406b923e89b6e525f2 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/coloring/equitable_coloring.py @@ -0,0 +1,505 @@ +""" +Equitable coloring of graphs with bounded degree. +""" + +from collections import defaultdict + +import networkx as nx + +__all__ = ["equitable_color"] + + +@nx._dispatch +def is_coloring(G, coloring): + """Determine if the coloring is a valid coloring for the graph G.""" + # Verify that the coloring is valid. + return all(coloring[s] != coloring[d] for s, d in G.edges) + + +@nx._dispatch +def is_equitable(G, coloring, num_colors=None): + """Determines if the coloring is valid and equitable for the graph G.""" + + if not is_coloring(G, coloring): + return False + + # Verify whether it is equitable. + color_set_size = defaultdict(int) + for color in coloring.values(): + color_set_size[color] += 1 + + if num_colors is not None: + for color in range(num_colors): + if color not in color_set_size: + # These colors do not have any vertices attached to them. + color_set_size[color] = 0 + + # If there are more than 2 distinct values, the coloring cannot be equitable + all_set_sizes = set(color_set_size.values()) + if len(all_set_sizes) == 0 and num_colors is None: # Was an empty graph + return True + elif len(all_set_sizes) == 1: + return True + elif len(all_set_sizes) == 2: + a, b = list(all_set_sizes) + return abs(a - b) <= 1 + else: # len(all_set_sizes) > 2: + return False + + +def make_C_from_F(F): + C = defaultdict(list) + for node, color in F.items(): + C[color].append(node) + + return C + + +def make_N_from_L_C(L, C): + nodes = L.keys() + colors = C.keys() + return { + (node, color): sum(1 for v in L[node] if v in C[color]) + for node in nodes + for color in colors + } + + +def make_H_from_C_N(C, N): + return { + (c1, c2): sum(1 for node in C[c1] if N[(node, c2)] == 0) for c1 in C for c2 in C + } + + +def change_color(u, X, Y, N, H, F, C, L): + """Change the color of 'u' from X to Y and update N, H, F, C.""" + assert F[u] == X and X != Y + + # Change the class of 'u' from X to Y + F[u] = Y + + for k in C: + # 'u' witnesses an edge from k -> Y instead of from k -> X now. + if N[u, k] == 0: + H[(X, k)] -= 1 + H[(Y, k)] += 1 + + for v in L[u]: + # 'v' has lost a neighbor in X and gained one in Y + N[(v, X)] -= 1 + N[(v, Y)] += 1 + + if N[(v, X)] == 0: + # 'v' witnesses F[v] -> X + H[(F[v], X)] += 1 + + if N[(v, Y)] == 1: + # 'v' no longer witnesses F[v] -> Y + H[(F[v], Y)] -= 1 + + C[X].remove(u) + C[Y].append(u) + + +def move_witnesses(src_color, dst_color, N, H, F, C, T_cal, L): + """Move witness along a path from src_color to dst_color.""" + X = src_color + while X != dst_color: + Y = T_cal[X] + # Move _any_ witness from X to Y = T_cal[X] + w = next(x for x in C[X] if N[(x, Y)] == 0) + change_color(w, X, Y, N=N, H=H, F=F, C=C, L=L) + X = Y + + +@nx._dispatch +def pad_graph(G, num_colors): + """Add a disconnected complete clique K_p such that the number of nodes in + the graph becomes a multiple of `num_colors`. + + Assumes that the graph's nodes are labelled using integers. + + Returns the number of nodes with each color. + """ + + n_ = len(G) + r = num_colors - 1 + + # Ensure that the number of nodes in G is a multiple of (r + 1) + s = n_ // (r + 1) + if n_ != s * (r + 1): + p = (r + 1) - n_ % (r + 1) + s += 1 + + # Complete graph K_p between (imaginary) nodes [n_, ... , n_ + p] + K = nx.relabel_nodes(nx.complete_graph(p), {idx: idx + n_ for idx in range(p)}) + G.add_edges_from(K.edges) + + return s + + +def procedure_P(V_minus, V_plus, N, H, F, C, L, excluded_colors=None): + """Procedure P as described in the paper.""" + + if excluded_colors is None: + excluded_colors = set() + + A_cal = set() + T_cal = {} + R_cal = [] + + # BFS to determine A_cal, i.e. colors reachable from V- + reachable = [V_minus] + marked = set(reachable) + idx = 0 + + while idx < len(reachable): + pop = reachable[idx] + idx += 1 + + A_cal.add(pop) + R_cal.append(pop) + + # TODO: Checking whether a color has been visited can be made faster by + # using a look-up table instead of testing for membership in a set by a + # logarithmic factor. + next_layer = [] + for k in C: + if ( + H[(k, pop)] > 0 + and k not in A_cal + and k not in excluded_colors + and k not in marked + ): + next_layer.append(k) + + for dst in next_layer: + # Record that `dst` can reach `pop` + T_cal[dst] = pop + + marked.update(next_layer) + reachable.extend(next_layer) + + # Variables for the algorithm + b = len(C) - len(A_cal) + + if V_plus in A_cal: + # Easy case: V+ is in A_cal + # Move one node from V+ to V- using T_cal to find the parents. + move_witnesses(V_plus, V_minus, N=N, H=H, F=F, C=C, T_cal=T_cal, L=L) + else: + # If there is a solo edge, we can resolve the situation by + # moving witnesses from B to A, making G[A] equitable and then + # recursively balancing G[B - w] with a different V_minus and + # but the same V_plus. + + A_0 = set() + A_cal_0 = set() + num_terminal_sets_found = 0 + made_equitable = False + + for W_1 in R_cal[::-1]: + for v in C[W_1]: + X = None + + for U in C: + if N[(v, U)] == 0 and U in A_cal and U != W_1: + X = U + + # v does not witness an edge in H[A_cal] + if X is None: + continue + + for U in C: + # Note: Departing from the paper here. + if N[(v, U)] >= 1 and U not in A_cal: + X_prime = U + w = v + + try: + # Finding the solo neighbor of w in X_prime + y = next( + node + for node in L[w] + if F[node] == X_prime and N[(node, W_1)] == 1 + ) + except StopIteration: + pass + else: + W = W_1 + + # Move w from W to X, now X has one extra node. + change_color(w, W, X, N=N, H=H, F=F, C=C, L=L) + + # Move witness from X to V_minus, making the coloring + # equitable. + move_witnesses( + src_color=X, + dst_color=V_minus, + N=N, + H=H, + F=F, + C=C, + T_cal=T_cal, + L=L, + ) + + # Move y from X_prime to W, making W the correct size. + change_color(y, X_prime, W, N=N, H=H, F=F, C=C, L=L) + + # Then call the procedure on G[B - y] + procedure_P( + V_minus=X_prime, + V_plus=V_plus, + N=N, + H=H, + C=C, + F=F, + L=L, + excluded_colors=excluded_colors.union(A_cal), + ) + made_equitable = True + break + + if made_equitable: + break + else: + # No node in W_1 was found such that + # it had a solo-neighbor. + A_cal_0.add(W_1) + A_0.update(C[W_1]) + num_terminal_sets_found += 1 + + if num_terminal_sets_found == b: + # Otherwise, construct the maximal independent set and find + # a pair of z_1, z_2 as in Case II. + + # BFS to determine B_cal': the set of colors reachable from V+ + B_cal_prime = set() + T_cal_prime = {} + + reachable = [V_plus] + marked = set(reachable) + idx = 0 + while idx < len(reachable): + pop = reachable[idx] + idx += 1 + + B_cal_prime.add(pop) + + # No need to check for excluded_colors here because + # they only exclude colors from A_cal + next_layer = [ + k + for k in C + if H[(pop, k)] > 0 and k not in B_cal_prime and k not in marked + ] + + for dst in next_layer: + T_cal_prime[pop] = dst + + marked.update(next_layer) + reachable.extend(next_layer) + + # Construct the independent set of G[B'] + I_set = set() + I_covered = set() + W_covering = {} + + B_prime = [node for k in B_cal_prime for node in C[k]] + + # Add the nodes in V_plus to I first. + for z in C[V_plus] + B_prime: + if z in I_covered or F[z] not in B_cal_prime: + continue + + I_set.add(z) + I_covered.add(z) + I_covered.update(list(L[z])) + + for w in L[z]: + if F[w] in A_cal_0 and N[(z, F[w])] == 1: + if w not in W_covering: + W_covering[w] = z + else: + # Found z1, z2 which have the same solo + # neighbor in some W + z_1 = W_covering[w] + # z_2 = z + + Z = F[z_1] + W = F[w] + + # shift nodes along W, V- + move_witnesses( + W, V_minus, N=N, H=H, F=F, C=C, T_cal=T_cal, L=L + ) + + # shift nodes along V+ to Z + move_witnesses( + V_plus, + Z, + N=N, + H=H, + F=F, + C=C, + T_cal=T_cal_prime, + L=L, + ) + + # change color of z_1 to W + change_color(z_1, Z, W, N=N, H=H, F=F, C=C, L=L) + + # change color of w to some color in B_cal + W_plus = next( + k for k in C if N[(w, k)] == 0 and k not in A_cal + ) + change_color(w, W, W_plus, N=N, H=H, F=F, C=C, L=L) + + # recurse with G[B \cup W*] + excluded_colors.update( + [k for k in C if k != W and k not in B_cal_prime] + ) + procedure_P( + V_minus=W, + V_plus=W_plus, + N=N, + H=H, + C=C, + F=F, + L=L, + excluded_colors=excluded_colors, + ) + + made_equitable = True + break + + if made_equitable: + break + else: + assert False, ( + "Must find a w which is the solo neighbor " + "of two vertices in B_cal_prime." + ) + + if made_equitable: + break + + +@nx._dispatch +def equitable_color(G, num_colors): + """Provides an equitable coloring for nodes of `G`. + + Attempts to color a graph using `num_colors` colors, where no neighbors of + a node can have same color as the node itself and the number of nodes with + each color differ by at most 1. `num_colors` must be greater than the + maximum degree of `G`. The algorithm is described in [1]_ and has + complexity O(num_colors * n**2). + + Parameters + ---------- + G : networkX graph + The nodes of this graph will be colored. + + num_colors : number of colors to use + This number must be at least one more than the maximum degree of nodes + in the graph. + + Returns + ------- + A dictionary with keys representing nodes and values representing + corresponding coloring. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> nx.coloring.equitable_color(G, num_colors=3) # doctest: +SKIP + {0: 2, 1: 1, 2: 2, 3: 0} + + Raises + ------ + NetworkXAlgorithmError + If `num_colors` is not at least the maximum degree of the graph `G` + + References + ---------- + .. [1] Kierstead, H. A., Kostochka, A. V., Mydlarz, M., & Szemerédi, E. + (2010). A fast algorithm for equitable coloring. Combinatorica, 30(2), + 217-224. + """ + + # Map nodes to integers for simplicity later. + nodes_to_int = {} + int_to_nodes = {} + + for idx, node in enumerate(G.nodes): + nodes_to_int[node] = idx + int_to_nodes[idx] = node + + G = nx.relabel_nodes(G, nodes_to_int, copy=True) + + # Basic graph statistics and sanity check. + if len(G.nodes) > 0: + r_ = max(G.degree(node) for node in G.nodes) + else: + r_ = 0 + + if r_ >= num_colors: + raise nx.NetworkXAlgorithmError( + f"Graph has maximum degree {r_}, needs " + f"{r_ + 1} (> {num_colors}) colors for guaranteed coloring." + ) + + # Ensure that the number of nodes in G is a multiple of (r + 1) + pad_graph(G, num_colors) + + # Starting the algorithm. + # L = {node: list(G.neighbors(node)) for node in G.nodes} + L_ = {node: [] for node in G.nodes} + + # Arbitrary equitable allocation of colors to nodes. + F = {node: idx % num_colors for idx, node in enumerate(G.nodes)} + + C = make_C_from_F(F) + + # The neighborhood is empty initially. + N = make_N_from_L_C(L_, C) + + # Currently all nodes witness all edges. + H = make_H_from_C_N(C, N) + + # Start of algorithm. + edges_seen = set() + + for u in sorted(G.nodes): + for v in sorted(G.neighbors(u)): + # Do not double count edges if (v, u) has already been seen. + if (v, u) in edges_seen: + continue + + edges_seen.add((u, v)) + + L_[u].append(v) + L_[v].append(u) + + N[(u, F[v])] += 1 + N[(v, F[u])] += 1 + + if F[u] != F[v]: + # Were 'u' and 'v' witnesses for F[u] -> F[v] or F[v] -> F[u]? + if N[(u, F[v])] == 1: + H[F[u], F[v]] -= 1 # u cannot witness an edge between F[u], F[v] + + if N[(v, F[u])] == 1: + H[F[v], F[u]] -= 1 # v cannot witness an edge between F[v], F[u] + + if N[(u, F[u])] != 0: + # Find the first color where 'u' does not have any neighbors. + Y = next(k for k in C if N[(u, k)] == 0) + X = F[u] + change_color(u, X, Y, N=N, H=H, F=F, C=C, L=L_) + + # Procedure P + procedure_P(V_minus=X, V_plus=Y, N=N, H=H, F=F, C=C, L=L_) + + return {int_to_nodes[x]: F[x] for x in int_to_nodes} diff --git a/MLPY/Lib/site-packages/networkx/algorithms/coloring/greedy_coloring.py b/MLPY/Lib/site-packages/networkx/algorithms/coloring/greedy_coloring.py new file mode 100644 index 0000000000000000000000000000000000000000..170b2275d2960c2c2b35a29ad162d0f519994df8 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/coloring/greedy_coloring.py @@ -0,0 +1,572 @@ +""" +Greedy graph coloring using various strategies. +""" +import itertools +from collections import defaultdict, deque + +import networkx as nx +from networkx.utils import arbitrary_element, py_random_state + +__all__ = [ + "greedy_color", + "strategy_connected_sequential", + "strategy_connected_sequential_bfs", + "strategy_connected_sequential_dfs", + "strategy_independent_set", + "strategy_largest_first", + "strategy_random_sequential", + "strategy_saturation_largest_first", + "strategy_smallest_last", +] + + +@nx._dispatch +def strategy_largest_first(G, colors): + """Returns a list of the nodes of ``G`` in decreasing order by + degree. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + """ + return sorted(G, key=G.degree, reverse=True) + + +@py_random_state(2) +@nx._dispatch +def strategy_random_sequential(G, colors, seed=None): + """Returns a random permutation of the nodes of ``G`` as a list. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + """ + nodes = list(G) + seed.shuffle(nodes) + return nodes + + +@nx._dispatch +def strategy_smallest_last(G, colors): + """Returns a deque of the nodes of ``G``, "smallest" last. + + Specifically, the degrees of each node are tracked in a bucket queue. + From this, the node of minimum degree is repeatedly popped from the + graph, updating its neighbors' degrees. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + This implementation of the strategy runs in $O(n + m)$ time + (ignoring polylogarithmic factors), where $n$ is the number of nodes + and $m$ is the number of edges. + + This strategy is related to :func:`strategy_independent_set`: if we + interpret each node removed as an independent set of size one, then + this strategy chooses an independent set of size one instead of a + maximal independent set. + + """ + H = G.copy() + result = deque() + + # Build initial degree list (i.e. the bucket queue data structure) + degrees = defaultdict(set) # set(), for fast random-access removals + lbound = float("inf") + for node, d in H.degree(): + degrees[d].add(node) + lbound = min(lbound, d) # Lower bound on min-degree. + + def find_min_degree(): + # Save time by starting the iterator at `lbound`, not 0. + # The value that we find will be our new `lbound`, which we set later. + return next(d for d in itertools.count(lbound) if d in degrees) + + for _ in G: + # Pop a min-degree node and add it to the list. + min_degree = find_min_degree() + u = degrees[min_degree].pop() + if not degrees[min_degree]: # Clean up the degree list. + del degrees[min_degree] + result.appendleft(u) + + # Update degrees of removed node's neighbors. + for v in H[u]: + degree = H.degree(v) + degrees[degree].remove(v) + if not degrees[degree]: # Clean up the degree list. + del degrees[degree] + degrees[degree - 1].add(v) + + # Finally, remove the node. + H.remove_node(u) + lbound = min_degree - 1 # Subtract 1 in case of tied neighbors. + + return result + + +def _maximal_independent_set(G): + """Returns a maximal independent set of nodes in ``G`` by repeatedly + choosing an independent node of minimum degree (with respect to the + subgraph of unchosen nodes). + + """ + result = set() + remaining = set(G) + while remaining: + G = G.subgraph(remaining) + v = min(remaining, key=G.degree) + result.add(v) + remaining -= set(G[v]) | {v} + return result + + +@nx._dispatch +def strategy_independent_set(G, colors): + """Uses a greedy independent set removal strategy to determine the + colors. + + This function updates ``colors`` **in-place** and return ``None``, + unlike the other strategy functions in this module. + + This algorithm repeatedly finds and removes a maximal independent + set, assigning each node in the set an unused color. + + ``G`` is a NetworkX graph. + + This strategy is related to :func:`strategy_smallest_last`: in that + strategy, an independent set of size one is chosen at each step + instead of a maximal independent set. + + """ + remaining_nodes = set(G) + while len(remaining_nodes) > 0: + nodes = _maximal_independent_set(G.subgraph(remaining_nodes)) + remaining_nodes -= nodes + yield from nodes + + +@nx._dispatch +def strategy_connected_sequential_bfs(G, colors): + """Returns an iterable over nodes in ``G`` in the order given by a + breadth-first traversal. + + The generated sequence has the property that for each node except + the first, at least one neighbor appeared earlier in the sequence. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + """ + return strategy_connected_sequential(G, colors, "bfs") + + +@nx._dispatch +def strategy_connected_sequential_dfs(G, colors): + """Returns an iterable over nodes in ``G`` in the order given by a + depth-first traversal. + + The generated sequence has the property that for each node except + the first, at least one neighbor appeared earlier in the sequence. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + """ + return strategy_connected_sequential(G, colors, "dfs") + + +@nx._dispatch +def strategy_connected_sequential(G, colors, traversal="bfs"): + """Returns an iterable over nodes in ``G`` in the order given by a + breadth-first or depth-first traversal. + + ``traversal`` must be one of the strings ``'dfs'`` or ``'bfs'``, + representing depth-first traversal or breadth-first traversal, + respectively. + + The generated sequence has the property that for each node except + the first, at least one neighbor appeared earlier in the sequence. + + ``G`` is a NetworkX graph. ``colors`` is ignored. + + """ + if traversal == "bfs": + traverse = nx.bfs_edges + elif traversal == "dfs": + traverse = nx.dfs_edges + else: + raise nx.NetworkXError( + "Please specify one of the strings 'bfs' or" + " 'dfs' for connected sequential ordering" + ) + for component in nx.connected_components(G): + source = arbitrary_element(component) + # Yield the source node, then all the nodes in the specified + # traversal order. + yield source + for _, end in traverse(G.subgraph(component), source): + yield end + + +@nx._dispatch +def strategy_saturation_largest_first(G, colors): + """Iterates over all the nodes of ``G`` in "saturation order" (also + known as "DSATUR"). + + ``G`` is a NetworkX graph. ``colors`` is a dictionary mapping nodes of + ``G`` to colors, for those nodes that have already been colored. + + """ + distinct_colors = {v: set() for v in G} + + # Add the node color assignments given in colors to the + # distinct colors set for each neighbor of that node + for node, color in colors.items(): + for neighbor in G[node]: + distinct_colors[neighbor].add(color) + + # Check that the color assignments in colors are valid + # i.e. no neighboring nodes have the same color + if len(colors) >= 2: + for node, color in colors.items(): + if color in distinct_colors[node]: + raise nx.NetworkXError("Neighboring nodes must have different colors") + + # If 0 nodes have been colored, simply choose the node of highest degree. + if not colors: + node = max(G, key=G.degree) + yield node + # Add the color 0 to the distinct colors set for each + # neighbor of that node. + for v in G[node]: + distinct_colors[v].add(0) + + while len(G) != len(colors): + # Update the distinct color sets for the neighbors. + for node, color in colors.items(): + for neighbor in G[node]: + distinct_colors[neighbor].add(color) + + # Compute the maximum saturation and the set of nodes that + # achieve that saturation. + saturation = {v: len(c) for v, c in distinct_colors.items() if v not in colors} + # Yield the node with the highest saturation, and break ties by + # degree. + node = max(saturation, key=lambda v: (saturation[v], G.degree(v))) + yield node + + +#: Dictionary mapping name of a strategy as a string to the strategy function. +STRATEGIES = { + "largest_first": strategy_largest_first, + "random_sequential": strategy_random_sequential, + "smallest_last": strategy_smallest_last, + "independent_set": strategy_independent_set, + "connected_sequential_bfs": strategy_connected_sequential_bfs, + "connected_sequential_dfs": strategy_connected_sequential_dfs, + "connected_sequential": strategy_connected_sequential, + "saturation_largest_first": strategy_saturation_largest_first, + "DSATUR": strategy_saturation_largest_first, +} + + +@nx._dispatch +def greedy_color(G, strategy="largest_first", interchange=False): + """Color a graph using various strategies of greedy graph coloring. + + Attempts to color a graph using as few colors as possible, where no + neighbours of a node can have same color as the node itself. The + given strategy determines the order in which nodes are colored. + + The strategies are described in [1]_, and smallest-last is based on + [2]_. + + Parameters + ---------- + G : NetworkX graph + + strategy : string or function(G, colors) + A function (or a string representing a function) that provides + the coloring strategy, by returning nodes in the ordering they + should be colored. ``G`` is the graph, and ``colors`` is a + dictionary of the currently assigned colors, keyed by nodes. The + function must return an iterable over all the nodes in ``G``. + + If the strategy function is an iterator generator (that is, a + function with ``yield`` statements), keep in mind that the + ``colors`` dictionary will be updated after each ``yield``, since + this function chooses colors greedily. + + If ``strategy`` is a string, it must be one of the following, + each of which represents one of the built-in strategy functions. + + * ``'largest_first'`` + * ``'random_sequential'`` + * ``'smallest_last'`` + * ``'independent_set'`` + * ``'connected_sequential_bfs'`` + * ``'connected_sequential_dfs'`` + * ``'connected_sequential'`` (alias for the previous strategy) + * ``'saturation_largest_first'`` + * ``'DSATUR'`` (alias for the previous strategy) + + interchange: bool + Will use the color interchange algorithm described by [3]_ if set + to ``True``. + + Note that ``saturation_largest_first`` and ``independent_set`` + do not work with interchange. Furthermore, if you use + interchange with your own strategy function, you cannot rely + on the values in the ``colors`` argument. + + Returns + ------- + A dictionary with keys representing nodes and values representing + corresponding coloring. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> d = nx.coloring.greedy_color(G, strategy="largest_first") + >>> d in [{0: 0, 1: 1, 2: 0, 3: 1}, {0: 1, 1: 0, 2: 1, 3: 0}] + True + + Raises + ------ + NetworkXPointlessConcept + If ``strategy`` is ``saturation_largest_first`` or + ``independent_set`` and ``interchange`` is ``True``. + + References + ---------- + .. [1] Adrian Kosowski, and Krzysztof Manuszewski, + Classical Coloring of Graphs, Graph Colorings, 2-19, 2004. + ISBN 0-8218-3458-4. + .. [2] David W. Matula, and Leland L. Beck, "Smallest-last + ordering and clustering and graph coloring algorithms." *J. ACM* 30, + 3 (July 1983), 417–427. + .. [3] Maciej M. Sysło, Narsingh Deo, Janusz S. Kowalik, + Discrete Optimization Algorithms with Pascal Programs, 415-424, 1983. + ISBN 0-486-45353-7. + + """ + if len(G) == 0: + return {} + # Determine the strategy provided by the caller. + strategy = STRATEGIES.get(strategy, strategy) + if not callable(strategy): + raise nx.NetworkXError( + "strategy must be callable or a valid string. " f"{strategy} not valid." + ) + # Perform some validation on the arguments before executing any + # strategy functions. + if interchange: + if strategy is strategy_independent_set: + msg = "interchange cannot be used with independent_set" + raise nx.NetworkXPointlessConcept(msg) + if strategy is strategy_saturation_largest_first: + msg = "interchange cannot be used with" " saturation_largest_first" + raise nx.NetworkXPointlessConcept(msg) + colors = {} + nodes = strategy(G, colors) + if interchange: + return _greedy_coloring_with_interchange(G, nodes) + for u in nodes: + # Set to keep track of colors of neighbours + neighbour_colors = {colors[v] for v in G[u] if v in colors} + # Find the first unused color. + for color in itertools.count(): + if color not in neighbour_colors: + break + # Assign the new color to the current node. + colors[u] = color + return colors + + +# Tools for coloring with interchanges +class _Node: + __slots__ = ["node_id", "color", "adj_list", "adj_color"] + + def __init__(self, node_id, n): + self.node_id = node_id + self.color = -1 + self.adj_list = None + self.adj_color = [None for _ in range(n)] + + def __repr__(self): + return ( + f"Node_id: {self.node_id}, Color: {self.color}, " + f"Adj_list: ({self.adj_list}), adj_color: ({self.adj_color})" + ) + + def assign_color(self, adj_entry, color): + adj_entry.col_prev = None + adj_entry.col_next = self.adj_color[color] + self.adj_color[color] = adj_entry + if adj_entry.col_next is not None: + adj_entry.col_next.col_prev = adj_entry + + def clear_color(self, adj_entry, color): + if adj_entry.col_prev is None: + self.adj_color[color] = adj_entry.col_next + else: + adj_entry.col_prev.col_next = adj_entry.col_next + if adj_entry.col_next is not None: + adj_entry.col_next.col_prev = adj_entry.col_prev + + def iter_neighbors(self): + adj_node = self.adj_list + while adj_node is not None: + yield adj_node + adj_node = adj_node.next + + def iter_neighbors_color(self, color): + adj_color_node = self.adj_color[color] + while adj_color_node is not None: + yield adj_color_node.node_id + adj_color_node = adj_color_node.col_next + + +class _AdjEntry: + __slots__ = ["node_id", "next", "mate", "col_next", "col_prev"] + + def __init__(self, node_id): + self.node_id = node_id + self.next = None + self.mate = None + self.col_next = None + self.col_prev = None + + def __repr__(self): + col_next = None if self.col_next is None else self.col_next.node_id + col_prev = None if self.col_prev is None else self.col_prev.node_id + return ( + f"Node_id: {self.node_id}, Next: ({self.next}), " + f"Mate: ({self.mate.node_id}), " + f"col_next: ({col_next}), col_prev: ({col_prev})" + ) + + +def _greedy_coloring_with_interchange(G, nodes): + """Return a coloring for `original_graph` using interchange approach + + This procedure is an adaption of the algorithm described by [1]_, + and is an implementation of coloring with interchange. Please be + advised, that the datastructures used are rather complex because + they are optimized to minimize the time spent identifying + subcomponents of the graph, which are possible candidates for color + interchange. + + Parameters + ---------- + G : NetworkX graph + The graph to be colored + + nodes : list + nodes ordered using the strategy of choice + + Returns + ------- + dict : + A dictionary keyed by node to a color value + + References + ---------- + .. [1] Maciej M. Syslo, Narsingh Deo, Janusz S. Kowalik, + Discrete Optimization Algorithms with Pascal Programs, 415-424, 1983. + ISBN 0-486-45353-7. + """ + n = len(G) + + graph = {node: _Node(node, n) for node in G} + + for node1, node2 in G.edges(): + adj_entry1 = _AdjEntry(node2) + adj_entry2 = _AdjEntry(node1) + adj_entry1.mate = adj_entry2 + adj_entry2.mate = adj_entry1 + node1_head = graph[node1].adj_list + adj_entry1.next = node1_head + graph[node1].adj_list = adj_entry1 + node2_head = graph[node2].adj_list + adj_entry2.next = node2_head + graph[node2].adj_list = adj_entry2 + + k = 0 + for node in nodes: + # Find the smallest possible, unused color + neighbors = graph[node].iter_neighbors() + col_used = {graph[adj_node.node_id].color for adj_node in neighbors} + col_used.discard(-1) + k1 = next(itertools.dropwhile(lambda x: x in col_used, itertools.count())) + + # k1 is now the lowest available color + if k1 > k: + connected = True + visited = set() + col1 = -1 + col2 = -1 + while connected and col1 < k: + col1 += 1 + neighbor_cols = graph[node].iter_neighbors_color(col1) + col1_adj = list(neighbor_cols) + + col2 = col1 + while connected and col2 < k: + col2 += 1 + visited = set(col1_adj) + frontier = list(col1_adj) + i = 0 + while i < len(frontier): + search_node = frontier[i] + i += 1 + col_opp = col2 if graph[search_node].color == col1 else col1 + neighbor_cols = graph[search_node].iter_neighbors_color(col_opp) + + for neighbor in neighbor_cols: + if neighbor not in visited: + visited.add(neighbor) + frontier.append(neighbor) + + # Search if node is not adj to any col2 vertex + connected = ( + len( + visited.intersection(graph[node].iter_neighbors_color(col2)) + ) + > 0 + ) + + # If connected is false then we can swap !!! + if not connected: + # Update all the nodes in the component + for search_node in visited: + graph[search_node].color = ( + col2 if graph[search_node].color == col1 else col1 + ) + col2_adj = graph[search_node].adj_color[col2] + graph[search_node].adj_color[col2] = graph[search_node].adj_color[ + col1 + ] + graph[search_node].adj_color[col1] = col2_adj + + # Update all the neighboring nodes + for search_node in visited: + col = graph[search_node].color + col_opp = col1 if col == col2 else col2 + for adj_node in graph[search_node].iter_neighbors(): + if graph[adj_node.node_id].color != col_opp: + # Direct reference to entry + adj_mate = adj_node.mate + graph[adj_node.node_id].clear_color(adj_mate, col_opp) + graph[adj_node.node_id].assign_color(adj_mate, col) + k1 = col1 + + # We can color this node color k1 + graph[node].color = k1 + k = max(k1, k) + + # Update the neighbors of this node + for adj_node in graph[node].iter_neighbors(): + adj_mate = adj_node.mate + graph[adj_node.node_id].assign_color(adj_mate, k1) + + return {node.node_id: node.color for node in graph.values()} diff --git a/MLPY/Lib/site-packages/networkx/algorithms/coloring/tests/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/coloring/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/coloring/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/coloring/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2216ac45c5d21b9d255c9a0e8351331503036062 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/coloring/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/coloring/tests/__pycache__/test_coloring.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/coloring/tests/__pycache__/test_coloring.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2a55abd770344c60e10838cb8c58bf2c06e36b7 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/coloring/tests/__pycache__/test_coloring.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/coloring/tests/test_coloring.py b/MLPY/Lib/site-packages/networkx/algorithms/coloring/tests/test_coloring.py new file mode 100644 index 0000000000000000000000000000000000000000..a2a4e39589ea981445f6e9e222087714ef88e141 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/coloring/tests/test_coloring.py @@ -0,0 +1,865 @@ +"""Greedy coloring test suite. + +""" + +import itertools + +import pytest + +import networkx as nx + +is_coloring = nx.algorithms.coloring.equitable_coloring.is_coloring +is_equitable = nx.algorithms.coloring.equitable_coloring.is_equitable + + +ALL_STRATEGIES = [ + "largest_first", + "random_sequential", + "smallest_last", + "independent_set", + "connected_sequential_bfs", + "connected_sequential_dfs", + "connected_sequential", + "saturation_largest_first", + "DSATUR", +] + +# List of strategies where interchange=True results in an error +INTERCHANGE_INVALID = ["independent_set", "saturation_largest_first", "DSATUR"] + + +class TestColoring: + def test_basic_cases(self): + def check_basic_case(graph_func, n_nodes, strategy, interchange): + graph = graph_func() + coloring = nx.coloring.greedy_color( + graph, strategy=strategy, interchange=interchange + ) + assert verify_length(coloring, n_nodes) + assert verify_coloring(graph, coloring) + + for graph_func, n_nodes in BASIC_TEST_CASES.items(): + for interchange in [True, False]: + for strategy in ALL_STRATEGIES: + check_basic_case(graph_func, n_nodes, strategy, False) + if strategy not in INTERCHANGE_INVALID: + check_basic_case(graph_func, n_nodes, strategy, True) + + def test_special_cases(self): + def check_special_case(strategy, graph_func, interchange, colors): + graph = graph_func() + coloring = nx.coloring.greedy_color( + graph, strategy=strategy, interchange=interchange + ) + if not hasattr(colors, "__len__"): + colors = [colors] + assert any(verify_length(coloring, n_colors) for n_colors in colors) + assert verify_coloring(graph, coloring) + + for strategy, arglist in SPECIAL_TEST_CASES.items(): + for args in arglist: + check_special_case(strategy, args[0], args[1], args[2]) + + def test_interchange_invalid(self): + graph = one_node_graph() + for strategy in INTERCHANGE_INVALID: + pytest.raises( + nx.NetworkXPointlessConcept, + nx.coloring.greedy_color, + graph, + strategy=strategy, + interchange=True, + ) + + def test_bad_inputs(self): + graph = one_node_graph() + pytest.raises( + nx.NetworkXError, + nx.coloring.greedy_color, + graph, + strategy="invalid strategy", + ) + + def test_strategy_as_function(self): + graph = lf_shc() + colors_1 = nx.coloring.greedy_color(graph, "largest_first") + colors_2 = nx.coloring.greedy_color(graph, nx.coloring.strategy_largest_first) + assert colors_1 == colors_2 + + def test_seed_argument(self): + graph = lf_shc() + rs = nx.coloring.strategy_random_sequential + c1 = nx.coloring.greedy_color(graph, lambda g, c: rs(g, c, seed=1)) + for u, v in graph.edges: + assert c1[u] != c1[v] + + def test_is_coloring(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2)]) + coloring = {0: 0, 1: 1, 2: 0} + assert is_coloring(G, coloring) + + coloring[0] = 1 + assert not is_coloring(G, coloring) + assert not is_equitable(G, coloring) + + def test_is_equitable(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2)]) + coloring = {0: 0, 1: 1, 2: 0} + assert is_equitable(G, coloring) + + G.add_edges_from([(2, 3), (2, 4), (2, 5)]) + coloring[3] = 1 + coloring[4] = 1 + coloring[5] = 1 + assert is_coloring(G, coloring) + assert not is_equitable(G, coloring) + + def test_num_colors(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (0, 3)]) + pytest.raises(nx.NetworkXAlgorithmError, nx.coloring.equitable_color, G, 2) + + def test_equitable_color(self): + G = nx.fast_gnp_random_graph(n=10, p=0.2, seed=42) + coloring = nx.coloring.equitable_color(G, max_degree(G) + 1) + assert is_equitable(G, coloring) + + def test_equitable_color_empty(self): + G = nx.empty_graph() + coloring = nx.coloring.equitable_color(G, max_degree(G) + 1) + assert is_equitable(G, coloring) + + def test_equitable_color_large(self): + G = nx.fast_gnp_random_graph(100, 0.1, seed=42) + coloring = nx.coloring.equitable_color(G, max_degree(G) + 1) + assert is_equitable(G, coloring, num_colors=max_degree(G) + 1) + + def test_case_V_plus_not_in_A_cal(self): + # Hand crafted case to avoid the easy case. + L = { + 0: [2, 5], + 1: [3, 4], + 2: [0, 8], + 3: [1, 7], + 4: [1, 6], + 5: [0, 6], + 6: [4, 5], + 7: [3], + 8: [2], + } + + F = { + # Color 0 + 0: 0, + 1: 0, + # Color 1 + 2: 1, + 3: 1, + 4: 1, + 5: 1, + # Color 2 + 6: 2, + 7: 2, + 8: 2, + } + + C = nx.algorithms.coloring.equitable_coloring.make_C_from_F(F) + N = nx.algorithms.coloring.equitable_coloring.make_N_from_L_C(L, C) + H = nx.algorithms.coloring.equitable_coloring.make_H_from_C_N(C, N) + + nx.algorithms.coloring.equitable_coloring.procedure_P( + V_minus=0, V_plus=1, N=N, H=H, F=F, C=C, L=L + ) + check_state(L=L, N=N, H=H, F=F, C=C) + + def test_cast_no_solo(self): + L = { + 0: [8, 9], + 1: [10, 11], + 2: [8], + 3: [9], + 4: [10, 11], + 5: [8], + 6: [9], + 7: [10, 11], + 8: [0, 2, 5], + 9: [0, 3, 6], + 10: [1, 4, 7], + 11: [1, 4, 7], + } + + F = {0: 0, 1: 0, 2: 2, 3: 2, 4: 2, 5: 3, 6: 3, 7: 3, 8: 1, 9: 1, 10: 1, 11: 1} + + C = nx.algorithms.coloring.equitable_coloring.make_C_from_F(F) + N = nx.algorithms.coloring.equitable_coloring.make_N_from_L_C(L, C) + H = nx.algorithms.coloring.equitable_coloring.make_H_from_C_N(C, N) + + nx.algorithms.coloring.equitable_coloring.procedure_P( + V_minus=0, V_plus=1, N=N, H=H, F=F, C=C, L=L + ) + check_state(L=L, N=N, H=H, F=F, C=C) + + def test_hard_prob(self): + # Tests for two levels of recursion. + num_colors, s = 5, 5 + + G = nx.Graph() + G.add_edges_from( + [ + (0, 10), + (0, 11), + (0, 12), + (0, 23), + (10, 4), + (10, 9), + (10, 20), + (11, 4), + (11, 8), + (11, 16), + (12, 9), + (12, 22), + (12, 23), + (23, 7), + (1, 17), + (1, 18), + (1, 19), + (1, 24), + (17, 5), + (17, 13), + (17, 22), + (18, 5), + (19, 5), + (19, 6), + (19, 8), + (24, 7), + (24, 16), + (2, 4), + (2, 13), + (2, 14), + (2, 15), + (4, 6), + (13, 5), + (13, 21), + (14, 6), + (14, 15), + (15, 6), + (15, 21), + (3, 16), + (3, 20), + (3, 21), + (3, 22), + (16, 8), + (20, 8), + (21, 9), + (22, 7), + ] + ) + F = {node: node // s for node in range(num_colors * s)} + F[s - 1] = num_colors - 1 + + params = make_params_from_graph(G=G, F=F) + + nx.algorithms.coloring.equitable_coloring.procedure_P( + V_minus=0, V_plus=num_colors - 1, **params + ) + check_state(**params) + + def test_hardest_prob(self): + # Tests for two levels of recursion. + num_colors, s = 10, 4 + + G = nx.Graph() + G.add_edges_from( + [ + (0, 19), + (0, 24), + (0, 29), + (0, 30), + (0, 35), + (19, 3), + (19, 7), + (19, 9), + (19, 15), + (19, 21), + (19, 24), + (19, 30), + (19, 38), + (24, 5), + (24, 11), + (24, 13), + (24, 20), + (24, 30), + (24, 37), + (24, 38), + (29, 6), + (29, 10), + (29, 13), + (29, 15), + (29, 16), + (29, 17), + (29, 20), + (29, 26), + (30, 6), + (30, 10), + (30, 15), + (30, 22), + (30, 23), + (30, 39), + (35, 6), + (35, 9), + (35, 14), + (35, 18), + (35, 22), + (35, 23), + (35, 25), + (35, 27), + (1, 20), + (1, 26), + (1, 31), + (1, 34), + (1, 38), + (20, 4), + (20, 8), + (20, 14), + (20, 18), + (20, 28), + (20, 33), + (26, 7), + (26, 10), + (26, 14), + (26, 18), + (26, 21), + (26, 32), + (26, 39), + (31, 5), + (31, 8), + (31, 13), + (31, 16), + (31, 17), + (31, 21), + (31, 25), + (31, 27), + (34, 7), + (34, 8), + (34, 13), + (34, 18), + (34, 22), + (34, 23), + (34, 25), + (34, 27), + (38, 4), + (38, 9), + (38, 12), + (38, 14), + (38, 21), + (38, 27), + (2, 3), + (2, 18), + (2, 21), + (2, 28), + (2, 32), + (2, 33), + (2, 36), + (2, 37), + (2, 39), + (3, 5), + (3, 9), + (3, 13), + (3, 22), + (3, 23), + (3, 25), + (3, 27), + (18, 6), + (18, 11), + (18, 15), + (18, 39), + (21, 4), + (21, 10), + (21, 14), + (21, 36), + (28, 6), + (28, 10), + (28, 14), + (28, 16), + (28, 17), + (28, 25), + (28, 27), + (32, 5), + (32, 10), + (32, 12), + (32, 16), + (32, 17), + (32, 22), + (32, 23), + (33, 7), + (33, 10), + (33, 12), + (33, 16), + (33, 17), + (33, 25), + (33, 27), + (36, 5), + (36, 8), + (36, 15), + (36, 16), + (36, 17), + (36, 25), + (36, 27), + (37, 5), + (37, 11), + (37, 15), + (37, 16), + (37, 17), + (37, 22), + (37, 23), + (39, 7), + (39, 8), + (39, 15), + (39, 22), + (39, 23), + ] + ) + F = {node: node // s for node in range(num_colors * s)} + F[s - 1] = num_colors - 1 # V- = 0, V+ = num_colors - 1 + + params = make_params_from_graph(G=G, F=F) + + nx.algorithms.coloring.equitable_coloring.procedure_P( + V_minus=0, V_plus=num_colors - 1, **params + ) + check_state(**params) + + def test_strategy_saturation_largest_first(self): + def color_remaining_nodes( + G, + colored_nodes, + full_color_assignment=None, + nodes_to_add_between_calls=1, + ): + color_assignments = [] + aux_colored_nodes = colored_nodes.copy() + + node_iterator = nx.algorithms.coloring.greedy_coloring.strategy_saturation_largest_first( + G, aux_colored_nodes + ) + + for u in node_iterator: + # Set to keep track of colors of neighbours + neighbour_colors = { + aux_colored_nodes[v] for v in G[u] if v in aux_colored_nodes + } + # Find the first unused color. + for color in itertools.count(): + if color not in neighbour_colors: + break + aux_colored_nodes[u] = color + color_assignments.append((u, color)) + + # Color nodes between iterations + for i in range(nodes_to_add_between_calls - 1): + if not len(color_assignments) + len(colored_nodes) >= len( + full_color_assignment + ): + full_color_assignment_node, color = full_color_assignment[ + len(color_assignments) + len(colored_nodes) + ] + + # Assign the new color to the current node. + aux_colored_nodes[full_color_assignment_node] = color + color_assignments.append((full_color_assignment_node, color)) + + return color_assignments, aux_colored_nodes + + for G, _, _ in SPECIAL_TEST_CASES["saturation_largest_first"]: + G = G() + + # Check that function still works when nodes are colored between iterations + for nodes_to_add_between_calls in range(1, 5): + # Get a full color assignment, (including the order in which nodes were colored) + colored_nodes = {} + full_color_assignment, full_colored_nodes = color_remaining_nodes( + G, colored_nodes + ) + + # For each node in the color assignment, add it to colored_nodes and re-run the function + for ind, (node, color) in enumerate(full_color_assignment): + colored_nodes[node] = color + + ( + partial_color_assignment, + partial_colored_nodes, + ) = color_remaining_nodes( + G, + colored_nodes, + full_color_assignment=full_color_assignment, + nodes_to_add_between_calls=nodes_to_add_between_calls, + ) + + # Check that the color assignment and order of remaining nodes are the same + assert full_color_assignment[ind + 1 :] == partial_color_assignment + assert full_colored_nodes == partial_colored_nodes + + +# ############################ Utility functions ############################ +def verify_coloring(graph, coloring): + for node in graph.nodes(): + if node not in coloring: + return False + + color = coloring[node] + for neighbor in graph.neighbors(node): + if coloring[neighbor] == color: + return False + + return True + + +def verify_length(coloring, expected): + coloring = dict_to_sets(coloring) + return len(coloring) == expected + + +def dict_to_sets(colors): + if len(colors) == 0: + return [] + + k = max(colors.values()) + 1 + sets = [set() for _ in range(k)] + + for node, color in colors.items(): + sets[color].add(node) + + return sets + + +# ############################ Graph Generation ############################ + + +def empty_graph(): + return nx.Graph() + + +def one_node_graph(): + graph = nx.Graph() + graph.add_nodes_from([1]) + return graph + + +def two_node_graph(): + graph = nx.Graph() + graph.add_nodes_from([1, 2]) + graph.add_edges_from([(1, 2)]) + return graph + + +def three_node_clique(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3]) + graph.add_edges_from([(1, 2), (1, 3), (2, 3)]) + return graph + + +def disconnected(): + graph = nx.Graph() + graph.add_edges_from([(1, 2), (2, 3), (4, 5), (5, 6)]) + return graph + + +def rs_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4]) + graph.add_edges_from([(1, 2), (2, 3), (3, 4)]) + return graph + + +def slf_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from( + [(1, 2), (1, 5), (1, 6), (2, 3), (2, 7), (3, 4), (3, 7), (4, 5), (4, 6), (5, 6)] + ) + return graph + + +def slf_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + graph.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (2, 3), + (2, 4), + (2, 6), + (5, 7), + (5, 8), + (6, 7), + (6, 8), + (7, 8), + ] + ) + return graph + + +def lf_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6]) + graph.add_edges_from([(6, 1), (1, 4), (4, 3), (3, 2), (2, 5)]) + return graph + + +def lf_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from( + [ + (1, 7), + (1, 6), + (1, 3), + (1, 4), + (7, 2), + (2, 6), + (2, 3), + (2, 5), + (5, 3), + (5, 4), + (4, 3), + ] + ) + return graph + + +def sl_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6]) + graph.add_edges_from( + [(1, 2), (1, 3), (2, 3), (1, 4), (2, 5), (3, 6), (4, 5), (4, 6), (5, 6)] + ) + return graph + + +def sl_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8]) + graph.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 5), + (1, 7), + (2, 3), + (2, 4), + (2, 8), + (8, 4), + (8, 6), + (8, 7), + (7, 5), + (7, 6), + (3, 4), + (4, 6), + (6, 5), + (5, 3), + ] + ) + return graph + + +def gis_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4]) + graph.add_edges_from([(1, 2), (2, 3), (3, 4)]) + return graph + + +def gis_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6]) + graph.add_edges_from([(1, 5), (2, 5), (3, 6), (4, 6), (5, 6)]) + return graph + + +def cs_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5]) + graph.add_edges_from([(1, 2), (1, 5), (2, 3), (2, 4), (2, 5), (3, 4), (4, 5)]) + return graph + + +def rsi_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6]) + graph.add_edges_from( + [(1, 2), (1, 5), (1, 6), (2, 3), (3, 4), (4, 5), (4, 6), (5, 6)] + ) + return graph + + +def lfi_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from( + [(1, 2), (1, 5), (1, 6), (2, 3), (2, 7), (3, 4), (3, 7), (4, 5), (4, 6), (5, 6)] + ) + return graph + + +def lfi_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9]) + graph.add_edges_from( + [ + (1, 2), + (1, 5), + (1, 6), + (1, 7), + (2, 3), + (2, 8), + (2, 9), + (3, 4), + (3, 8), + (3, 9), + (4, 5), + (4, 6), + (4, 7), + (5, 6), + ] + ) + return graph + + +def sli_shc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 5), + (1, 7), + (2, 3), + (2, 6), + (3, 4), + (4, 5), + (4, 6), + (5, 7), + (6, 7), + ] + ) + return graph + + +def sli_hc(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9]) + graph.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (2, 3), + (2, 7), + (2, 8), + (2, 9), + (3, 6), + (3, 7), + (3, 9), + (4, 5), + (4, 6), + (4, 8), + (4, 9), + (5, 6), + (5, 7), + (5, 8), + (6, 7), + (6, 9), + (7, 8), + (8, 9), + ] + ) + return graph + + +# -------------------------------------------------------------------------- +# Basic tests for all strategies +# For each basic graph function, specify the number of expected colors. +BASIC_TEST_CASES = { + empty_graph: 0, + one_node_graph: 1, + two_node_graph: 2, + disconnected: 2, + three_node_clique: 3, +} + + +# -------------------------------------------------------------------------- +# Special test cases. Each strategy has a list of tuples of the form +# (graph function, interchange, valid # of colors) +SPECIAL_TEST_CASES = { + "random_sequential": [ + (rs_shc, False, (2, 3)), + (rs_shc, True, 2), + (rsi_shc, True, (3, 4)), + ], + "saturation_largest_first": [(slf_shc, False, (3, 4)), (slf_hc, False, 4)], + "largest_first": [ + (lf_shc, False, (2, 3)), + (lf_hc, False, 4), + (lf_shc, True, 2), + (lf_hc, True, 3), + (lfi_shc, True, (3, 4)), + (lfi_hc, True, 4), + ], + "smallest_last": [ + (sl_shc, False, (3, 4)), + (sl_hc, False, 5), + (sl_shc, True, 3), + (sl_hc, True, 4), + (sli_shc, True, (3, 4)), + (sli_hc, True, 5), + ], + "independent_set": [(gis_shc, False, (2, 3)), (gis_hc, False, 3)], + "connected_sequential": [(cs_shc, False, (3, 4)), (cs_shc, True, 3)], + "connected_sequential_dfs": [(cs_shc, False, (3, 4))], +} + + +# -------------------------------------------------------------------------- +# Helper functions to test +# (graph function, interchange, valid # of colors) + + +def check_state(L, N, H, F, C): + s = len(C[0]) + num_colors = len(C.keys()) + + assert all(u in L[v] for u in L for v in L[u]) + assert all(F[u] != F[v] for u in L for v in L[u]) + assert all(len(L[u]) < num_colors for u in L) + assert all(len(C[x]) == s for x in C) + assert all(H[(c1, c2)] >= 0 for c1 in C for c2 in C) + assert all(N[(u, F[u])] == 0 for u in F) + + +def max_degree(G): + """Get the maximum degree of any node in G.""" + return max(G.degree(node) for node in G.nodes) if len(G.nodes) > 0 else 0 + + +def make_params_from_graph(G, F): + """Returns {N, L, H, C} from the given graph.""" + num_nodes = len(G) + L = {u: [] for u in range(num_nodes)} + for u, v in G.edges: + L[u].append(v) + L[v].append(u) + + C = nx.algorithms.coloring.equitable_coloring.make_C_from_F(F) + N = nx.algorithms.coloring.equitable_coloring.make_N_from_L_C(L, C) + H = nx.algorithms.coloring.equitable_coloring.make_H_from_C_N(C, N) + + return {"N": N, "F": F, "C": C, "H": H, "L": L} diff --git a/MLPY/Lib/site-packages/networkx/algorithms/communicability_alg.py b/MLPY/Lib/site-packages/networkx/algorithms/communicability_alg.py new file mode 100644 index 0000000000000000000000000000000000000000..c9144a7b84fd42108e050f47315fee97430a66e6 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/communicability_alg.py @@ -0,0 +1,162 @@ +""" +Communicability. +""" +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["communicability", "communicability_exp"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def communicability(G): + r"""Returns communicability between all pairs of nodes in G. + + The communicability between pairs of nodes in G is the sum of + walks of different lengths starting at node u and ending at node v. + + Parameters + ---------- + G: graph + + Returns + ------- + comm: dictionary of dictionaries + Dictionary of dictionaries keyed by nodes with communicability + as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + See Also + -------- + communicability_exp: + Communicability between all pairs of nodes in G using spectral + decomposition. + communicability_betweenness_centrality: + Communicability betweenness centrality for each node in G. + + Notes + ----- + This algorithm uses a spectral decomposition of the adjacency matrix. + Let G=(V,E) be a simple undirected graph. Using the connection between + the powers of the adjacency matrix and the number of walks in the graph, + the communicability between nodes `u` and `v` based on the graph spectrum + is [1]_ + + .. math:: + C(u,v)=\sum_{j=1}^{n}\phi_{j}(u)\phi_{j}(v)e^{\lambda_{j}}, + + where `\phi_{j}(u)` is the `u\rm{th}` element of the `j\rm{th}` orthonormal + eigenvector of the adjacency matrix associated with the eigenvalue + `\lambda_{j}`. + + References + ---------- + .. [1] Ernesto Estrada, Naomichi Hatano, + "Communicability in complex networks", + Phys. Rev. E 77, 036111 (2008). + https://arxiv.org/abs/0707.0756 + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)]) + >>> c = nx.communicability(G) + """ + import numpy as np + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[A != 0.0] = 1 + w, vec = np.linalg.eigh(A) + expw = np.exp(w) + mapping = dict(zip(nodelist, range(len(nodelist)))) + c = {} + # computing communicabilities + for u in G: + c[u] = {} + for v in G: + s = 0 + p = mapping[u] + q = mapping[v] + for j in range(len(nodelist)): + s += vec[:, j][p] * vec[:, j][q] * expw[j] + c[u][v] = float(s) + return c + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def communicability_exp(G): + r"""Returns communicability between all pairs of nodes in G. + + Communicability between pair of node (u,v) of node in G is the sum of + walks of different lengths starting at node u and ending at node v. + + Parameters + ---------- + G: graph + + Returns + ------- + comm: dictionary of dictionaries + Dictionary of dictionaries keyed by nodes with communicability + as the value. + + Raises + ------ + NetworkXError + If the graph is not undirected and simple. + + See Also + -------- + communicability: + Communicability between pairs of nodes in G. + communicability_betweenness_centrality: + Communicability betweenness centrality for each node in G. + + Notes + ----- + This algorithm uses matrix exponentiation of the adjacency matrix. + + Let G=(V,E) be a simple undirected graph. Using the connection between + the powers of the adjacency matrix and the number of walks in the graph, + the communicability between nodes u and v is [1]_, + + .. math:: + C(u,v) = (e^A)_{uv}, + + where `A` is the adjacency matrix of G. + + References + ---------- + .. [1] Ernesto Estrada, Naomichi Hatano, + "Communicability in complex networks", + Phys. Rev. E 77, 036111 (2008). + https://arxiv.org/abs/0707.0756 + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (1, 5), (5, 4), (2, 4), (2, 3), (4, 3), (3, 6)]) + >>> c = nx.communicability_exp(G) + """ + import scipy as sp + + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_array(G, nodelist) + # convert to 0-1 matrix + A[A != 0.0] = 1 + # communicability matrix + expA = sp.linalg.expm(A) + mapping = dict(zip(nodelist, range(len(nodelist)))) + c = {} + for u in G: + c[u] = {} + for v in G: + c[u][v] = float(expA[mapping[u], mapping[v]]) + return c diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/community/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fa782201ddc7a5ad538d08af45f065ad37ef628b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/community/__init__.py @@ -0,0 +1,24 @@ +"""Functions for computing and measuring community structure. + +The ``community`` subpackage can be accessed by using :mod:`networkx.community`, then accessing the +functions as attributes of ``community``. For example:: + + >>> import networkx as nx + >>> G = nx.barbell_graph(5, 1) + >>> communities_generator = nx.community.girvan_newman(G) + >>> top_level_communities = next(communities_generator) + >>> next_level_communities = next(communities_generator) + >>> sorted(map(sorted, next_level_communities)) + [[0, 1, 2, 3, 4], [5], [6, 7, 8, 9, 10]] + +""" +from networkx.algorithms.community.asyn_fluid import * +from networkx.algorithms.community.centrality import * +from networkx.algorithms.community.kclique import * +from networkx.algorithms.community.kernighan_lin import * +from networkx.algorithms.community.label_propagation import * +from networkx.algorithms.community.lukes import * +from networkx.algorithms.community.modularity_max import * +from networkx.algorithms.community.quality import * +from networkx.algorithms.community.community_utils import * +from networkx.algorithms.community.louvain import * diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b2f5faef88334a56cbfc40d46b86a1d6dba1b05 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/asyn_fluid.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/asyn_fluid.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d093035e56f53c8ec44244dfb9e1bcae98c320bc Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/asyn_fluid.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/centrality.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..628d5e86e3f6f311c506022d950f61f1e8aad73e Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/centrality.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/community_utils.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/community_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0224db00c6bd3c4a2634fc3bd7788293b4060880 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/community_utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/kclique.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/kclique.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b696a474b7e4aebe6534f9286f42b223ce2faf66 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/kclique.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/kernighan_lin.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/kernighan_lin.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0340ea7e051409a2c189d7bc07363c7eeb2eb99 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/kernighan_lin.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/label_propagation.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/label_propagation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..403b224c04f100f1dbdbe13820a2d13861293e72 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/label_propagation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/louvain.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/louvain.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94aa7a0a8d5b0ffa7be99aee8472c47420a68058 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/louvain.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/lukes.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/lukes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..873845a1126be2816ce5ce230447b70e1a3c2fc4 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/lukes.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/modularity_max.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/modularity_max.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf372198e54b038c9cceab67447c2fc32bbf319e Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/modularity_max.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/quality.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/quality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ec8766071fa4d1b5e3ad8598176fd1e763349b0 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/__pycache__/quality.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/asyn_fluid.py b/MLPY/Lib/site-packages/networkx/algorithms/community/asyn_fluid.py new file mode 100644 index 0000000000000000000000000000000000000000..1a0029ae7ff6c52331f16d6f20b97743ddd12509 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/community/asyn_fluid.py @@ -0,0 +1,150 @@ +"""Asynchronous Fluid Communities algorithm for community detection.""" + +from collections import Counter + +import networkx as nx +from networkx.algorithms.components import is_connected +from networkx.exception import NetworkXError +from networkx.utils import groups, not_implemented_for, py_random_state + +__all__ = ["asyn_fluidc"] + + +@not_implemented_for("directed", "multigraph") +@py_random_state(3) +@nx._dispatch +def asyn_fluidc(G, k, max_iter=100, seed=None): + """Returns communities in `G` as detected by Fluid Communities algorithm. + + The asynchronous fluid communities algorithm is described in + [1]_. The algorithm is based on the simple idea of fluids interacting + in an environment, expanding and pushing each other. Its initialization is + random, so found communities may vary on different executions. + + The algorithm proceeds as follows. First each of the initial k communities + is initialized in a random vertex in the graph. Then the algorithm iterates + over all vertices in a random order, updating the community of each vertex + based on its own community and the communities of its neighbours. This + process is performed several times until convergence. + At all times, each community has a total density of 1, which is equally + distributed among the vertices it contains. If a vertex changes of + community, vertex densities of affected communities are adjusted + immediately. When a complete iteration over all vertices is done, such that + no vertex changes the community it belongs to, the algorithm has converged + and returns. + + This is the original version of the algorithm described in [1]_. + Unfortunately, it does not support weighted graphs yet. + + Parameters + ---------- + G : NetworkX graph + Graph must be simple and undirected. + + k : integer + The number of communities to be found. + + max_iter : integer + The number of maximum iterations allowed. By default 100. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + communities : iterable + Iterable of communities given as sets of nodes. + + Notes + ----- + k variable is not an optional argument. + + References + ---------- + .. [1] Parés F., Garcia-Gasulla D. et al. "Fluid Communities: A + Competitive and Highly Scalable Community Detection Algorithm". + [https://arxiv.org/pdf/1703.09307.pdf]. + """ + # Initial checks + if not isinstance(k, int): + raise NetworkXError("k must be an integer.") + if not k > 0: + raise NetworkXError("k must be greater than 0.") + if not is_connected(G): + raise NetworkXError("Fluid Communities require connected Graphs.") + if len(G) < k: + raise NetworkXError("k cannot be bigger than the number of nodes.") + # Initialization + max_density = 1.0 + vertices = list(G) + seed.shuffle(vertices) + communities = {n: i for i, n in enumerate(vertices[:k])} + density = {} + com_to_numvertices = {} + for vertex in communities: + com_to_numvertices[communities[vertex]] = 1 + density[communities[vertex]] = max_density + # Set up control variables and start iterating + iter_count = 0 + cont = True + while cont: + cont = False + iter_count += 1 + # Loop over all vertices in graph in a random order + vertices = list(G) + seed.shuffle(vertices) + for vertex in vertices: + # Updating rule + com_counter = Counter() + # Take into account self vertex community + try: + com_counter.update({communities[vertex]: density[communities[vertex]]}) + except KeyError: + pass + # Gather neighbour vertex communities + for v in G[vertex]: + try: + com_counter.update({communities[v]: density[communities[v]]}) + except KeyError: + continue + # Check which is the community with highest density + new_com = -1 + if len(com_counter.keys()) > 0: + max_freq = max(com_counter.values()) + best_communities = [ + com + for com, freq in com_counter.items() + if (max_freq - freq) < 0.0001 + ] + # If actual vertex com in best communities, it is preserved + try: + if communities[vertex] in best_communities: + new_com = communities[vertex] + except KeyError: + pass + # If vertex community changes... + if new_com == -1: + # Set flag of non-convergence + cont = True + # Randomly chose a new community from candidates + new_com = seed.choice(best_communities) + # Update previous community status + try: + com_to_numvertices[communities[vertex]] -= 1 + density[communities[vertex]] = ( + max_density / com_to_numvertices[communities[vertex]] + ) + except KeyError: + pass + # Update new community status + communities[vertex] = new_com + com_to_numvertices[communities[vertex]] += 1 + density[communities[vertex]] = ( + max_density / com_to_numvertices[communities[vertex]] + ) + # If maximum iterations reached --> output actual results + if iter_count > max_iter: + break + # Return results by grouping communities as list of vertices + return iter(groups(communities).values()) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/centrality.py b/MLPY/Lib/site-packages/networkx/algorithms/community/centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..efdc98460e4db7ccbbb988c77b125525e3efd18a --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/community/centrality.py @@ -0,0 +1,171 @@ +"""Functions for computing communities based on centrality notions.""" + +import networkx as nx + +__all__ = ["girvan_newman"] + + +@nx._dispatch(preserve_edge_attrs="most_valuable_edge") +def girvan_newman(G, most_valuable_edge=None): + """Finds communities in a graph using the Girvan–Newman method. + + Parameters + ---------- + G : NetworkX graph + + most_valuable_edge : function + Function that takes a graph as input and outputs an edge. The + edge returned by this function will be recomputed and removed at + each iteration of the algorithm. + + If not specified, the edge with the highest + :func:`networkx.edge_betweenness_centrality` will be used. + + Returns + ------- + iterator + Iterator over tuples of sets of nodes in `G`. Each set of node + is a community, each tuple is a sequence of communities at a + particular level of the algorithm. + + Examples + -------- + To get the first pair of communities:: + + >>> G = nx.path_graph(10) + >>> comp = nx.community.girvan_newman(G) + >>> tuple(sorted(c) for c in next(comp)) + ([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]) + + To get only the first *k* tuples of communities, use + :func:`itertools.islice`:: + + >>> import itertools + >>> G = nx.path_graph(8) + >>> k = 2 + >>> comp = nx.community.girvan_newman(G) + >>> for communities in itertools.islice(comp, k): + ... print(tuple(sorted(c) for c in communities)) + ... + ([0, 1, 2, 3], [4, 5, 6, 7]) + ([0, 1], [2, 3], [4, 5, 6, 7]) + + To stop getting tuples of communities once the number of communities + is greater than *k*, use :func:`itertools.takewhile`:: + + >>> import itertools + >>> G = nx.path_graph(8) + >>> k = 4 + >>> comp = nx.community.girvan_newman(G) + >>> limited = itertools.takewhile(lambda c: len(c) <= k, comp) + >>> for communities in limited: + ... print(tuple(sorted(c) for c in communities)) + ... + ([0, 1, 2, 3], [4, 5, 6, 7]) + ([0, 1], [2, 3], [4, 5, 6, 7]) + ([0, 1], [2, 3], [4, 5], [6, 7]) + + To just choose an edge to remove based on the weight:: + + >>> from operator import itemgetter + >>> G = nx.path_graph(10) + >>> edges = G.edges() + >>> nx.set_edge_attributes(G, {(u, v): v for u, v in edges}, "weight") + >>> def heaviest(G): + ... u, v, w = max(G.edges(data="weight"), key=itemgetter(2)) + ... return (u, v) + ... + >>> comp = nx.community.girvan_newman(G, most_valuable_edge=heaviest) + >>> tuple(sorted(c) for c in next(comp)) + ([0, 1, 2, 3, 4, 5, 6, 7, 8], [9]) + + To utilize edge weights when choosing an edge with, for example, the + highest betweenness centrality:: + + >>> from networkx import edge_betweenness_centrality as betweenness + >>> def most_central_edge(G): + ... centrality = betweenness(G, weight="weight") + ... return max(centrality, key=centrality.get) + ... + >>> G = nx.path_graph(10) + >>> comp = nx.community.girvan_newman(G, most_valuable_edge=most_central_edge) + >>> tuple(sorted(c) for c in next(comp)) + ([0, 1, 2, 3, 4], [5, 6, 7, 8, 9]) + + To specify a different ranking algorithm for edges, use the + `most_valuable_edge` keyword argument:: + + >>> from networkx import edge_betweenness_centrality + >>> from random import random + >>> def most_central_edge(G): + ... centrality = edge_betweenness_centrality(G) + ... max_cent = max(centrality.values()) + ... # Scale the centrality values so they are between 0 and 1, + ... # and add some random noise. + ... centrality = {e: c / max_cent for e, c in centrality.items()} + ... # Add some random noise. + ... centrality = {e: c + random() for e, c in centrality.items()} + ... return max(centrality, key=centrality.get) + ... + >>> G = nx.path_graph(10) + >>> comp = nx.community.girvan_newman(G, most_valuable_edge=most_central_edge) + + Notes + ----- + The Girvan–Newman algorithm detects communities by progressively + removing edges from the original graph. The algorithm removes the + "most valuable" edge, traditionally the edge with the highest + betweenness centrality, at each step. As the graph breaks down into + pieces, the tightly knit community structure is exposed and the + result can be depicted as a dendrogram. + + """ + # If the graph is already empty, simply return its connected + # components. + if G.number_of_edges() == 0: + yield tuple(nx.connected_components(G)) + return + # If no function is provided for computing the most valuable edge, + # use the edge betweenness centrality. + if most_valuable_edge is None: + + def most_valuable_edge(G): + """Returns the edge with the highest betweenness centrality + in the graph `G`. + + """ + # We have guaranteed that the graph is non-empty, so this + # dictionary will never be empty. + betweenness = nx.edge_betweenness_centrality(G) + return max(betweenness, key=betweenness.get) + + # The copy of G here must include the edge weight data. + g = G.copy().to_undirected() + # Self-loops must be removed because their removal has no effect on + # the connected components of the graph. + g.remove_edges_from(nx.selfloop_edges(g)) + while g.number_of_edges() > 0: + yield _without_most_central_edges(g, most_valuable_edge) + + +def _without_most_central_edges(G, most_valuable_edge): + """Returns the connected components of the graph that results from + repeatedly removing the most "valuable" edge in the graph. + + `G` must be a non-empty graph. This function modifies the graph `G` + in-place; that is, it removes edges on the graph `G`. + + `most_valuable_edge` is a function that takes the graph `G` as input + (or a subgraph with one or more edges of `G` removed) and returns an + edge. That edge will be removed and this process will be repeated + until the number of connected components in the graph increases. + + """ + original_num_components = nx.number_connected_components(G) + num_new_components = original_num_components + while num_new_components <= original_num_components: + edge = most_valuable_edge(G) + G.remove_edge(*edge) + new_components = tuple(nx.connected_components(G)) + num_new_components = len(new_components) + return new_components diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/community_utils.py b/MLPY/Lib/site-packages/networkx/algorithms/community/community_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5e4727eec42c5e4c7b25060a18d9894a9b8c4514 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/community/community_utils.py @@ -0,0 +1,29 @@ +"""Helper functions for community-finding algorithms.""" +import networkx as nx + +__all__ = ["is_partition"] + + +@nx._dispatch +def is_partition(G, communities): + """Returns *True* if `communities` is a partition of the nodes of `G`. + + A partition of a universe set is a family of pairwise disjoint sets + whose union is the entire universe set. + + Parameters + ---------- + G : NetworkX graph. + + communities : list or iterable of sets of nodes + If not a list, the iterable is converted internally to a list. + If it is an iterator it is exhausted. + + """ + # Alternate implementation: + # return all(sum(1 if v in c else 0 for c in communities) == 1 for v in G) + if not isinstance(communities, list): + communities = list(communities) + nodes = {n for c in communities for n in c if n in G} + + return len(G) == len(nodes) == sum(len(c) for c in communities) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/kclique.py b/MLPY/Lib/site-packages/networkx/algorithms/community/kclique.py new file mode 100644 index 0000000000000000000000000000000000000000..60433669cee8c9fa38bde0956ccd0c3d319fc715 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/community/kclique.py @@ -0,0 +1,79 @@ +from collections import defaultdict + +import networkx as nx + +__all__ = ["k_clique_communities"] + + +@nx._dispatch +def k_clique_communities(G, k, cliques=None): + """Find k-clique communities in graph using the percolation method. + + A k-clique community is the union of all cliques of size k that + can be reached through adjacent (sharing k-1 nodes) k-cliques. + + Parameters + ---------- + G : NetworkX graph + + k : int + Size of smallest clique + + cliques: list or generator + Precomputed cliques (use networkx.find_cliques(G)) + + Returns + ------- + Yields sets of nodes, one for each k-clique community. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> K5 = nx.convert_node_labels_to_integers(G, first_label=2) + >>> G.add_edges_from(K5.edges()) + >>> c = list(nx.community.k_clique_communities(G, 4)) + >>> sorted(list(c[0])) + [0, 1, 2, 3, 4, 5, 6] + >>> list(nx.community.k_clique_communities(G, 6)) + [] + + References + ---------- + .. [1] Gergely Palla, Imre Derényi, Illés Farkas1, and Tamás Vicsek, + Uncovering the overlapping community structure of complex networks + in nature and society Nature 435, 814-818, 2005, + doi:10.1038/nature03607 + """ + if k < 2: + raise nx.NetworkXError(f"k={k}, k must be greater than 1.") + if cliques is None: + cliques = nx.find_cliques(G) + cliques = [frozenset(c) for c in cliques if len(c) >= k] + + # First index which nodes are in which cliques + membership_dict = defaultdict(list) + for clique in cliques: + for node in clique: + membership_dict[node].append(clique) + + # For each clique, see which adjacent cliques percolate + perc_graph = nx.Graph() + perc_graph.add_nodes_from(cliques) + for clique in cliques: + for adj_clique in _get_adjacent_cliques(clique, membership_dict): + if len(clique.intersection(adj_clique)) >= (k - 1): + perc_graph.add_edge(clique, adj_clique) + + # Connected components of clique graph with perc edges + # are the percolated cliques + for component in nx.connected_components(perc_graph): + yield (frozenset.union(*component)) + + +def _get_adjacent_cliques(clique, membership_dict): + adjacent_cliques = set() + for n in clique: + for adj_clique in membership_dict[n]: + if clique != adj_clique: + adjacent_cliques.add(adj_clique) + return adjacent_cliques diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/kernighan_lin.py b/MLPY/Lib/site-packages/networkx/algorithms/community/kernighan_lin.py new file mode 100644 index 0000000000000000000000000000000000000000..a18c7779b5be52ed25a526b0e1a02d10d3222aa6 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/community/kernighan_lin.py @@ -0,0 +1,139 @@ +"""Functions for computing the Kernighan–Lin bipartition algorithm.""" + +from itertools import count + +import networkx as nx +from networkx.algorithms.community.community_utils import is_partition +from networkx.utils import BinaryHeap, not_implemented_for, py_random_state + +__all__ = ["kernighan_lin_bisection"] + + +def _kernighan_lin_sweep(edges, side): + """ + This is a modified form of Kernighan-Lin, which moves single nodes at a + time, alternating between sides to keep the bisection balanced. We keep + two min-heaps of swap costs to make optimal-next-move selection fast. + """ + costs0, costs1 = costs = BinaryHeap(), BinaryHeap() + for u, side_u, edges_u in zip(count(), side, edges): + cost_u = sum(w if side[v] else -w for v, w in edges_u) + costs[side_u].insert(u, cost_u if side_u else -cost_u) + + def _update_costs(costs_x, x): + for y, w in edges[x]: + costs_y = costs[side[y]] + cost_y = costs_y.get(y) + if cost_y is not None: + cost_y += 2 * (-w if costs_x is costs_y else w) + costs_y.insert(y, cost_y, True) + + i = 0 + totcost = 0 + while costs0 and costs1: + u, cost_u = costs0.pop() + _update_costs(costs0, u) + v, cost_v = costs1.pop() + _update_costs(costs1, v) + totcost += cost_u + cost_v + i += 1 + yield totcost, i, (u, v) + + +@not_implemented_for("directed") +@py_random_state(4) +@nx._dispatch(edge_attrs="weight") +def kernighan_lin_bisection(G, partition=None, max_iter=10, weight="weight", seed=None): + """Partition a graph into two blocks using the Kernighan–Lin + algorithm. + + This algorithm partitions a network into two sets by iteratively + swapping pairs of nodes to reduce the edge cut between the two sets. The + pairs are chosen according to a modified form of Kernighan-Lin [1]_, which + moves node individually, alternating between sides to keep the bisection + balanced. + + Parameters + ---------- + G : NetworkX graph + Graph must be undirected. + + partition : tuple + Pair of iterables containing an initial partition. If not + specified, a random balanced partition is used. + + max_iter : int + Maximum number of times to attempt swaps to find an + improvement before giving up. + + weight : key + Edge data key to use as weight. If None, the weights are all + set to one. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + Only used if partition is None + + Returns + ------- + partition : tuple + A pair of sets of nodes representing the bipartition. + + Raises + ------ + NetworkXError + If partition is not a valid partition of the nodes of the graph. + + References + ---------- + .. [1] Kernighan, B. W.; Lin, Shen (1970). + "An efficient heuristic procedure for partitioning graphs." + *Bell Systems Technical Journal* 49: 291--307. + Oxford University Press 2011. + + """ + n = len(G) + labels = list(G) + seed.shuffle(labels) + index = {v: i for i, v in enumerate(labels)} + + if partition is None: + side = [0] * (n // 2) + [1] * ((n + 1) // 2) + else: + try: + A, B = partition + except (TypeError, ValueError) as err: + raise nx.NetworkXError("partition must be two sets") from err + if not is_partition(G, (A, B)): + raise nx.NetworkXError("partition invalid") + side = [0] * n + for a in A: + side[index[a]] = 1 + + if G.is_multigraph(): + edges = [ + [ + (index[u], sum(e.get(weight, 1) for e in d.values())) + for u, d in G[v].items() + ] + for v in labels + ] + else: + edges = [ + [(index[u], e.get(weight, 1)) for u, e in G[v].items()] for v in labels + ] + + for i in range(max_iter): + costs = list(_kernighan_lin_sweep(edges, side)) + min_cost, min_i, _ = min(costs) + if min_cost >= 0: + break + + for _, _, (u, v) in costs[:min_i]: + side[u] = 1 + side[v] = 0 + + A = {u for u, s in zip(labels, side) if s == 0} + B = {u for u, s in zip(labels, side) if s == 1} + return A, B diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/label_propagation.py b/MLPY/Lib/site-packages/networkx/algorithms/community/label_propagation.py new file mode 100644 index 0000000000000000000000000000000000000000..c10938d627c23368b040c0e00d19177503029511 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/community/label_propagation.py @@ -0,0 +1,337 @@ +""" +Label propagation community detection algorithms. +""" +from collections import Counter, defaultdict, deque + +import networkx as nx +from networkx.utils import groups, not_implemented_for, py_random_state + +__all__ = [ + "label_propagation_communities", + "asyn_lpa_communities", + "fast_label_propagation_communities", +] + + +@py_random_state("seed") +@nx._dispatch(edge_attrs="weight") +def fast_label_propagation_communities(G, *, weight=None, seed=None): + """Returns communities in `G` as detected by fast label propagation. + + The fast label propagation algorithm is described in [1]_. The algorithm is + probabilistic and the found communities may vary in different executions. + + The algorithm operates as follows. First, the community label of each node is + set to a unique label. The algorithm then repeatedly updates the labels of + the nodes to the most frequent label in their neighborhood. In case of ties, + a random label is chosen from the most frequent labels. + + The algorithm maintains a queue of nodes that still need to be processed. + Initially, all nodes are added to the queue in a random order. Then the nodes + are removed from the queue one by one and processed. If a node updates its label, + all its neighbors that have a different label are added to the queue (if not + already in the queue). The algorithm stops when the queue is empty. + + Parameters + ---------- + G : Graph, DiGraph, MultiGraph, or MultiDiGraph + Any NetworkX graph. + + weight : string, or None (default) + The edge attribute representing a non-negative weight of an edge. If None, + each edge is assumed to have weight one. The weight of an edge is used in + determining the frequency with which a label appears among the neighbors of + a node (edge with weight `w` is equivalent to `w` unweighted edges). + + seed : integer, random_state, or None (default) + Indicator of random number generation state. See :ref:`Randomness`. + + Returns + ------- + communities : iterable + Iterable of communities given as sets of nodes. + + Notes + ----- + Edge directions are ignored for directed graphs. + Edge weights must be non-negative numbers. + + References + ---------- + .. [1] Vincent A. Traag & Lovro Šubelj. "Large network community detection by + fast label propagation." Scientific Reports 13 (2023): 2701. + https://doi.org/10.1038/s41598-023-29610-z + """ + + # Queue of nodes to be processed. + nodes_queue = deque(G) + seed.shuffle(nodes_queue) + + # Set of nodes in the queue. + nodes_set = set(G) + + # Assign unique label to each node. + comms = {node: i for i, node in enumerate(G)} + + while nodes_queue: + # Remove next node from the queue to process. + node = nodes_queue.popleft() + nodes_set.remove(node) + + # Isolated nodes retain their initial label. + if G.degree(node) > 0: + # Compute frequency of labels in node's neighborhood. + label_freqs = _fast_label_count(G, comms, node, weight) + max_freq = max(label_freqs.values()) + + # Always sample new label from most frequent labels. + comm = seed.choice( + [comm for comm in label_freqs if label_freqs[comm] == max_freq] + ) + + if comms[node] != comm: + comms[node] = comm + + # Add neighbors that have different label to the queue. + for nbr in nx.all_neighbors(G, node): + if comms[nbr] != comm and nbr not in nodes_set: + nodes_queue.append(nbr) + nodes_set.add(nbr) + + yield from groups(comms).values() + + +def _fast_label_count(G, comms, node, weight=None): + """Computes the frequency of labels in the neighborhood of a node. + + Returns a dictionary keyed by label to the frequency of that label. + """ + + if weight is None: + # Unweighted (un)directed simple graph. + if not G.is_multigraph(): + label_freqs = Counter(map(comms.get, nx.all_neighbors(G, node))) + + # Unweighted (un)directed multigraph. + else: + label_freqs = defaultdict(int) + for nbr in G[node]: + label_freqs[comms[nbr]] += len(G[node][nbr]) + + if G.is_directed(): + for nbr in G.pred[node]: + label_freqs[comms[nbr]] += len(G.pred[node][nbr]) + + else: + # Weighted undirected simple/multigraph. + label_freqs = defaultdict(float) + for _, nbr, w in G.edges(node, data=weight, default=1): + label_freqs[comms[nbr]] += w + + # Weighted directed simple/multigraph. + if G.is_directed(): + for nbr, _, w in G.in_edges(node, data=weight, default=1): + label_freqs[comms[nbr]] += w + + return label_freqs + + +@py_random_state(2) +@nx._dispatch(edge_attrs="weight") +def asyn_lpa_communities(G, weight=None, seed=None): + """Returns communities in `G` as detected by asynchronous label + propagation. + + The asynchronous label propagation algorithm is described in + [1]_. The algorithm is probabilistic and the found communities may + vary on different executions. + + The algorithm proceeds as follows. After initializing each node with + a unique label, the algorithm repeatedly sets the label of a node to + be the label that appears most frequently among that nodes + neighbors. The algorithm halts when each node has the label that + appears most frequently among its neighbors. The algorithm is + asynchronous because each node is updated without waiting for + updates on the remaining nodes. + + This generalized version of the algorithm in [1]_ accepts edge + weights. + + Parameters + ---------- + G : Graph + + weight : string + The edge attribute representing the weight of an edge. + If None, each edge is assumed to have weight one. In this + algorithm, the weight of an edge is used in determining the + frequency with which a label appears among the neighbors of a + node: a higher weight means the label appears more often. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + communities : iterable + Iterable of communities given as sets of nodes. + + Notes + ----- + Edge weight attributes must be numerical. + + References + ---------- + .. [1] Raghavan, Usha Nandini, Réka Albert, and Soundar Kumara. "Near + linear time algorithm to detect community structures in large-scale + networks." Physical Review E 76.3 (2007): 036106. + """ + + labels = {n: i for i, n in enumerate(G)} + cont = True + + while cont: + cont = False + nodes = list(G) + seed.shuffle(nodes) + + for node in nodes: + if not G[node]: + continue + + # Get label frequencies among adjacent nodes. + # Depending on the order they are processed in, + # some nodes will be in iteration t and others in t-1, + # making the algorithm asynchronous. + if weight is None: + # initialising a Counter from an iterator of labels is + # faster for getting unweighted label frequencies + label_freq = Counter(map(labels.get, G[node])) + else: + # updating a defaultdict is substantially faster + # for getting weighted label frequencies + label_freq = defaultdict(float) + for _, v, wt in G.edges(node, data=weight, default=1): + label_freq[labels[v]] += wt + + # Get the labels that appear with maximum frequency. + max_freq = max(label_freq.values()) + best_labels = [ + label for label, freq in label_freq.items() if freq == max_freq + ] + + # If the node does not have one of the maximum frequency labels, + # randomly choose one of them and update the node's label. + # Continue the iteration as long as at least one node + # doesn't have a maximum frequency label. + if labels[node] not in best_labels: + labels[node] = seed.choice(best_labels) + cont = True + + yield from groups(labels).values() + + +@not_implemented_for("directed") +@nx._dispatch +def label_propagation_communities(G): + """Generates community sets determined by label propagation + + Finds communities in `G` using a semi-synchronous label propagation + method [1]_. This method combines the advantages of both the synchronous + and asynchronous models. Not implemented for directed graphs. + + Parameters + ---------- + G : graph + An undirected NetworkX graph. + + Returns + ------- + communities : iterable + A dict_values object that contains a set of nodes for each community. + + Raises + ------ + NetworkXNotImplemented + If the graph is directed + + References + ---------- + .. [1] Cordasco, G., & Gargano, L. (2010, December). Community detection + via semi-synchronous label propagation algorithms. In Business + Applications of Social Network Analysis (BASNA), 2010 IEEE International + Workshop on (pp. 1-8). IEEE. + """ + coloring = _color_network(G) + # Create a unique label for each node in the graph + labeling = {v: k for k, v in enumerate(G)} + while not _labeling_complete(labeling, G): + # Update the labels of every node with the same color. + for color, nodes in coloring.items(): + for n in nodes: + _update_label(n, labeling, G) + + clusters = defaultdict(set) + for node, label in labeling.items(): + clusters[label].add(node) + return clusters.values() + + +def _color_network(G): + """Colors the network so that neighboring nodes all have distinct colors. + + Returns a dict keyed by color to a set of nodes with that color. + """ + coloring = {} # color => set(node) + colors = nx.coloring.greedy_color(G) + for node, color in colors.items(): + if color in coloring: + coloring[color].add(node) + else: + coloring[color] = {node} + return coloring + + +def _labeling_complete(labeling, G): + """Determines whether or not LPA is done. + + Label propagation is complete when all nodes have a label that is + in the set of highest frequency labels amongst its neighbors. + + Nodes with no neighbors are considered complete. + """ + return all( + labeling[v] in _most_frequent_labels(v, labeling, G) for v in G if len(G[v]) > 0 + ) + + +def _most_frequent_labels(node, labeling, G): + """Returns a set of all labels with maximum frequency in `labeling`. + + Input `labeling` should be a dict keyed by node to labels. + """ + if not G[node]: + # Nodes with no neighbors are themselves a community and are labeled + # accordingly, hence the immediate if statement. + return {labeling[node]} + + # Compute the frequencies of all neighbours of node + freqs = Counter(labeling[q] for q in G[node]) + max_freq = max(freqs.values()) + return {label for label, freq in freqs.items() if freq == max_freq} + + +def _update_label(node, labeling, G): + """Updates the label of a node using the Prec-Max tie breaking algorithm + + The algorithm is explained in: 'Community Detection via Semi-Synchronous + Label Propagation Algorithms' Cordasco and Gargano, 2011 + """ + high_labels = _most_frequent_labels(node, labeling, G) + if len(high_labels) == 1: + labeling[node] = high_labels.pop() + elif len(high_labels) > 1: + # Prec-Max + if labeling[node] not in high_labels: + labeling[node] = max(high_labels) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/louvain.py b/MLPY/Lib/site-packages/networkx/algorithms/community/louvain.py new file mode 100644 index 0000000000000000000000000000000000000000..772f4d79d69aea7b092565c5d24bc3b66c796d56 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/community/louvain.py @@ -0,0 +1,373 @@ +"""Function for detecting communities based on Louvain Community Detection +Algorithm""" + +from collections import defaultdict, deque + +import networkx as nx +from networkx.algorithms.community import modularity +from networkx.utils import py_random_state + +__all__ = ["louvain_communities", "louvain_partitions"] + + +@py_random_state("seed") +@nx._dispatch(edge_attrs="weight") +def louvain_communities( + G, weight="weight", resolution=1, threshold=0.0000001, seed=None +): + r"""Find the best partition of a graph using the Louvain Community Detection + Algorithm. + + Louvain Community Detection Algorithm is a simple method to extract the community + structure of a network. This is a heuristic method based on modularity optimization. [1]_ + + The algorithm works in 2 steps. On the first step it assigns every node to be + in its own community and then for each node it tries to find the maximum positive + modularity gain by moving each node to all of its neighbor communities. If no positive + gain is achieved the node remains in its original community. + + The modularity gain obtained by moving an isolated node $i$ into a community $C$ can + easily be calculated by the following formula (combining [1]_ [2]_ and some algebra): + + .. math:: + \Delta Q = \frac{k_{i,in}}{2m} - \gamma\frac{ \Sigma_{tot} \cdot k_i}{2m^2} + + where $m$ is the size of the graph, $k_{i,in}$ is the sum of the weights of the links + from $i$ to nodes in $C$, $k_i$ is the sum of the weights of the links incident to node $i$, + $\Sigma_{tot}$ is the sum of the weights of the links incident to nodes in $C$ and $\gamma$ + is the resolution parameter. + + For the directed case the modularity gain can be computed using this formula according to [3]_ + + .. math:: + \Delta Q = \frac{k_{i,in}}{m} + - \gamma\frac{k_i^{out} \cdot\Sigma_{tot}^{in} + k_i^{in} \cdot \Sigma_{tot}^{out}}{m^2} + + where $k_i^{out}$, $k_i^{in}$ are the outer and inner weighted degrees of node $i$ and + $\Sigma_{tot}^{in}$, $\Sigma_{tot}^{out}$ are the sum of in-going and out-going links incident + to nodes in $C$. + + The first phase continues until no individual move can improve the modularity. + + The second phase consists in building a new network whose nodes are now the communities + found in the first phase. To do so, the weights of the links between the new nodes are given by + the sum of the weight of the links between nodes in the corresponding two communities. Once this + phase is complete it is possible to reapply the first phase creating bigger communities with + increased modularity. + + The above two phases are executed until no modularity gain is achieved (or is less than + the `threshold`). + + Be careful with self-loops in the input graph. These are treated as + previously reduced communities -- as if the process had been started + in the middle of the algorithm. Large self-loop edge weights thus + represent strong communities and in practice may be hard to add + other nodes to. If your input graph edge weights for self-loops + do not represent already reduced communities you may want to remove + the self-loops before inputting that graph. + + Parameters + ---------- + G : NetworkX graph + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + resolution : float, optional (default=1) + If resolution is less than 1, the algorithm favors larger communities. + Greater than 1 favors smaller communities + threshold : float, optional (default=0.0000001) + Modularity gain threshold for each level. If the gain of modularity + between 2 levels of the algorithm is less than the given threshold + then the algorithm stops and returns the resulting communities. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + list + A list of sets (partition of `G`). Each set represents one community and contains + all the nodes that constitute it. + + Examples + -------- + >>> import networkx as nx + >>> G = nx.petersen_graph() + >>> nx.community.louvain_communities(G, seed=123) + [{0, 4, 5, 7, 9}, {1, 2, 3, 6, 8}] + + Notes + ----- + The order in which the nodes are considered can affect the final output. In the algorithm + the ordering happens using a random shuffle. + + References + ---------- + .. [1] Blondel, V.D. et al. Fast unfolding of communities in + large networks. J. Stat. Mech 10008, 1-12(2008). https://doi.org/10.1088/1742-5468/2008/10/P10008 + .. [2] Traag, V.A., Waltman, L. & van Eck, N.J. From Louvain to Leiden: guaranteeing + well-connected communities. Sci Rep 9, 5233 (2019). https://doi.org/10.1038/s41598-019-41695-z + .. [3] Nicolas Dugué, Anthony Perez. Directed Louvain : maximizing modularity in directed networks. + [Research Report] Université d’Orléans. 2015. hal-01231784. https://hal.archives-ouvertes.fr/hal-01231784 + + See Also + -------- + louvain_partitions + """ + + d = louvain_partitions(G, weight, resolution, threshold, seed) + q = deque(d, maxlen=1) + return q.pop() + + +@py_random_state("seed") +@nx._dispatch(edge_attrs="weight") +def louvain_partitions( + G, weight="weight", resolution=1, threshold=0.0000001, seed=None +): + """Yields partitions for each level of the Louvain Community Detection Algorithm + + Louvain Community Detection Algorithm is a simple method to extract the community + structure of a network. This is a heuristic method based on modularity optimization. [1]_ + + The partitions at each level (step of the algorithm) form a dendrogram of communities. + A dendrogram is a diagram representing a tree and each level represents + a partition of the G graph. The top level contains the smallest communities + and as you traverse to the bottom of the tree the communities get bigger + and the overall modularity increases making the partition better. + + Each level is generated by executing the two phases of the Louvain Community + Detection Algorithm. + + Be careful with self-loops in the input graph. These are treated as + previously reduced communities -- as if the process had been started + in the middle of the algorithm. Large self-loop edge weights thus + represent strong communities and in practice may be hard to add + other nodes to. If your input graph edge weights for self-loops + do not represent already reduced communities you may want to remove + the self-loops before inputting that graph. + + Parameters + ---------- + G : NetworkX graph + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + resolution : float, optional (default=1) + If resolution is less than 1, the algorithm favors larger communities. + Greater than 1 favors smaller communities + threshold : float, optional (default=0.0000001) + Modularity gain threshold for each level. If the gain of modularity + between 2 levels of the algorithm is less than the given threshold + then the algorithm stops and returns the resulting communities. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Yields + ------ + list + A list of sets (partition of `G`). Each set represents one community and contains + all the nodes that constitute it. + + References + ---------- + .. [1] Blondel, V.D. et al. Fast unfolding of communities in + large networks. J. Stat. Mech 10008, 1-12(2008) + + See Also + -------- + louvain_communities + """ + + partition = [{u} for u in G.nodes()] + if nx.is_empty(G): + yield partition + return + mod = modularity(G, partition, resolution=resolution, weight=weight) + is_directed = G.is_directed() + if G.is_multigraph(): + graph = _convert_multigraph(G, weight, is_directed) + else: + graph = G.__class__() + graph.add_nodes_from(G) + graph.add_weighted_edges_from(G.edges(data=weight, default=1)) + + m = graph.size(weight="weight") + partition, inner_partition, improvement = _one_level( + graph, m, partition, resolution, is_directed, seed + ) + improvement = True + while improvement: + # gh-5901 protect the sets in the yielded list from further manipulation here + yield [s.copy() for s in partition] + new_mod = modularity( + graph, inner_partition, resolution=resolution, weight="weight" + ) + if new_mod - mod <= threshold: + return + mod = new_mod + graph = _gen_graph(graph, inner_partition) + partition, inner_partition, improvement = _one_level( + graph, m, partition, resolution, is_directed, seed + ) + + +def _one_level(G, m, partition, resolution=1, is_directed=False, seed=None): + """Calculate one level of the Louvain partitions tree + + Parameters + ---------- + G : NetworkX Graph/DiGraph + The graph from which to detect communities + m : number + The size of the graph `G`. + partition : list of sets of nodes + A valid partition of the graph `G` + resolution : positive number + The resolution parameter for computing the modularity of a partition + is_directed : bool + True if `G` is a directed graph. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + """ + node2com = {u: i for i, u in enumerate(G.nodes())} + inner_partition = [{u} for u in G.nodes()] + if is_directed: + in_degrees = dict(G.in_degree(weight="weight")) + out_degrees = dict(G.out_degree(weight="weight")) + Stot_in = list(in_degrees.values()) + Stot_out = list(out_degrees.values()) + # Calculate weights for both in and out neighbours without considering self-loops + nbrs = {} + for u in G: + nbrs[u] = defaultdict(float) + for _, n, wt in G.out_edges(u, data="weight"): + if u != n: + nbrs[u][n] += wt + for n, _, wt in G.in_edges(u, data="weight"): + if u != n: + nbrs[u][n] += wt + else: + degrees = dict(G.degree(weight="weight")) + Stot = list(degrees.values()) + nbrs = {u: {v: data["weight"] for v, data in G[u].items() if v != u} for u in G} + rand_nodes = list(G.nodes) + seed.shuffle(rand_nodes) + nb_moves = 1 + improvement = False + while nb_moves > 0: + nb_moves = 0 + for u in rand_nodes: + best_mod = 0 + best_com = node2com[u] + weights2com = _neighbor_weights(nbrs[u], node2com) + if is_directed: + in_degree = in_degrees[u] + out_degree = out_degrees[u] + Stot_in[best_com] -= in_degree + Stot_out[best_com] -= out_degree + remove_cost = ( + -weights2com[best_com] / m + + resolution + * (out_degree * Stot_in[best_com] + in_degree * Stot_out[best_com]) + / m**2 + ) + else: + degree = degrees[u] + Stot[best_com] -= degree + remove_cost = -weights2com[best_com] / m + resolution * ( + Stot[best_com] * degree + ) / (2 * m**2) + for nbr_com, wt in weights2com.items(): + if is_directed: + gain = ( + remove_cost + + wt / m + - resolution + * ( + out_degree * Stot_in[nbr_com] + + in_degree * Stot_out[nbr_com] + ) + / m**2 + ) + else: + gain = ( + remove_cost + + wt / m + - resolution * (Stot[nbr_com] * degree) / (2 * m**2) + ) + if gain > best_mod: + best_mod = gain + best_com = nbr_com + if is_directed: + Stot_in[best_com] += in_degree + Stot_out[best_com] += out_degree + else: + Stot[best_com] += degree + if best_com != node2com[u]: + com = G.nodes[u].get("nodes", {u}) + partition[node2com[u]].difference_update(com) + inner_partition[node2com[u]].remove(u) + partition[best_com].update(com) + inner_partition[best_com].add(u) + improvement = True + nb_moves += 1 + node2com[u] = best_com + partition = list(filter(len, partition)) + inner_partition = list(filter(len, inner_partition)) + return partition, inner_partition, improvement + + +def _neighbor_weights(nbrs, node2com): + """Calculate weights between node and its neighbor communities. + + Parameters + ---------- + nbrs : dictionary + Dictionary with nodes' neighbours as keys and their edge weight as value. + node2com : dictionary + Dictionary with all graph's nodes as keys and their community index as value. + + """ + weights = defaultdict(float) + for nbr, wt in nbrs.items(): + weights[node2com[nbr]] += wt + return weights + + +def _gen_graph(G, partition): + """Generate a new graph based on the partitions of a given graph""" + H = G.__class__() + node2com = {} + for i, part in enumerate(partition): + nodes = set() + for node in part: + node2com[node] = i + nodes.update(G.nodes[node].get("nodes", {node})) + H.add_node(i, nodes=nodes) + + for node1, node2, wt in G.edges(data=True): + wt = wt["weight"] + com1 = node2com[node1] + com2 = node2com[node2] + temp = H.get_edge_data(com1, com2, {"weight": 0})["weight"] + H.add_edge(com1, com2, weight=wt + temp) + return H + + +def _convert_multigraph(G, weight, is_directed): + """Convert a Multigraph to normal Graph""" + if is_directed: + H = nx.DiGraph() + else: + H = nx.Graph() + H.add_nodes_from(G) + for u, v, wt in G.edges(data=weight, default=1): + if H.has_edge(u, v): + H[u][v]["weight"] += wt + else: + H.add_edge(u, v, weight=wt) + return H diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/lukes.py b/MLPY/Lib/site-packages/networkx/algorithms/community/lukes.py new file mode 100644 index 0000000000000000000000000000000000000000..600a4db63d6d572dcebbe9d99738d3fecb6c5896 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/community/lukes.py @@ -0,0 +1,226 @@ +"""Lukes Algorithm for exact optimal weighted tree partitioning.""" + +from copy import deepcopy +from functools import lru_cache +from random import choice + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["lukes_partitioning"] + +D_EDGE_W = "weight" +D_EDGE_VALUE = 1.0 +D_NODE_W = "weight" +D_NODE_VALUE = 1 +PKEY = "partitions" +CLUSTER_EVAL_CACHE_SIZE = 2048 + + +def _split_n_from(n, min_size_of_first_part): + # splits j in two parts of which the first is at least + # the second argument + assert n >= min_size_of_first_part + for p1 in range(min_size_of_first_part, n + 1): + yield p1, n - p1 + + +@nx._dispatch(node_attrs="node_weight", edge_attrs="edge_weight") +def lukes_partitioning(G, max_size, node_weight=None, edge_weight=None): + """Optimal partitioning of a weighted tree using the Lukes algorithm. + + This algorithm partitions a connected, acyclic graph featuring integer + node weights and float edge weights. The resulting clusters are such + that the total weight of the nodes in each cluster does not exceed + max_size and that the weight of the edges that are cut by the partition + is minimum. The algorithm is based on [1]_. + + Parameters + ---------- + G : NetworkX graph + + max_size : int + Maximum weight a partition can have in terms of sum of + node_weight for all nodes in the partition + + edge_weight : key + Edge data key to use as weight. If None, the weights are all + set to one. + + node_weight : key + Node data key to use as weight. If None, the weights are all + set to one. The data must be int. + + Returns + ------- + partition : list + A list of sets of nodes representing the clusters of the + partition. + + Raises + ------ + NotATree + If G is not a tree. + TypeError + If any of the values of node_weight is not int. + + References + ---------- + .. [1] Lukes, J. A. (1974). + "Efficient Algorithm for the Partitioning of Trees." + IBM Journal of Research and Development, 18(3), 217–224. + + """ + # First sanity check and tree preparation + if not nx.is_tree(G): + raise nx.NotATree("lukes_partitioning works only on trees") + else: + if nx.is_directed(G): + root = [n for n, d in G.in_degree() if d == 0] + assert len(root) == 1 + root = root[0] + t_G = deepcopy(G) + else: + root = choice(list(G.nodes)) + # this has the desirable side effect of not inheriting attributes + t_G = nx.dfs_tree(G, root) + + # Since we do not want to screw up the original graph, + # if we have a blank attribute, we make a deepcopy + if edge_weight is None or node_weight is None: + safe_G = deepcopy(G) + if edge_weight is None: + nx.set_edge_attributes(safe_G, D_EDGE_VALUE, D_EDGE_W) + edge_weight = D_EDGE_W + if node_weight is None: + nx.set_node_attributes(safe_G, D_NODE_VALUE, D_NODE_W) + node_weight = D_NODE_W + else: + safe_G = G + + # Second sanity check + # The values of node_weight MUST BE int. + # I cannot see any room for duck typing without incurring serious + # danger of subtle bugs. + all_n_attr = nx.get_node_attributes(safe_G, node_weight).values() + for x in all_n_attr: + if not isinstance(x, int): + raise TypeError( + "lukes_partitioning needs integer " + f"values for node_weight ({node_weight})" + ) + + # SUBROUTINES ----------------------- + # these functions are defined here for two reasons: + # - brevity: we can leverage global "safe_G" + # - caching: signatures are hashable + + @not_implemented_for("undirected") + # this is intended to be called only on t_G + def _leaves(gr): + for x in gr.nodes: + if not nx.descendants(gr, x): + yield x + + @not_implemented_for("undirected") + def _a_parent_of_leaves_only(gr): + tleaves = set(_leaves(gr)) + for n in set(gr.nodes) - tleaves: + if all(x in tleaves for x in nx.descendants(gr, n)): + return n + + @lru_cache(CLUSTER_EVAL_CACHE_SIZE) + def _value_of_cluster(cluster): + valid_edges = [e for e in safe_G.edges if e[0] in cluster and e[1] in cluster] + return sum(safe_G.edges[e][edge_weight] for e in valid_edges) + + def _value_of_partition(partition): + return sum(_value_of_cluster(frozenset(c)) for c in partition) + + @lru_cache(CLUSTER_EVAL_CACHE_SIZE) + def _weight_of_cluster(cluster): + return sum(safe_G.nodes[n][node_weight] for n in cluster) + + def _pivot(partition, node): + ccx = [c for c in partition if node in c] + assert len(ccx) == 1 + return ccx[0] + + def _concatenate_or_merge(partition_1, partition_2, x, i, ref_weight): + ccx = _pivot(partition_1, x) + cci = _pivot(partition_2, i) + merged_xi = ccx.union(cci) + + # We first check if we can do the merge. + # If so, we do the actual calculations, otherwise we concatenate + if _weight_of_cluster(frozenset(merged_xi)) <= ref_weight: + cp1 = list(filter(lambda x: x != ccx, partition_1)) + cp2 = list(filter(lambda x: x != cci, partition_2)) + + option_2 = [merged_xi] + cp1 + cp2 + return option_2, _value_of_partition(option_2) + else: + option_1 = partition_1 + partition_2 + return option_1, _value_of_partition(option_1) + + # INITIALIZATION ----------------------- + leaves = set(_leaves(t_G)) + for lv in leaves: + t_G.nodes[lv][PKEY] = {} + slot = safe_G.nodes[lv][node_weight] + t_G.nodes[lv][PKEY][slot] = [{lv}] + t_G.nodes[lv][PKEY][0] = [{lv}] + + for inner in [x for x in t_G.nodes if x not in leaves]: + t_G.nodes[inner][PKEY] = {} + slot = safe_G.nodes[inner][node_weight] + t_G.nodes[inner][PKEY][slot] = [{inner}] + + # CORE ALGORITHM ----------------------- + while True: + x_node = _a_parent_of_leaves_only(t_G) + weight_of_x = safe_G.nodes[x_node][node_weight] + best_value = 0 + best_partition = None + bp_buffer = {} + x_descendants = nx.descendants(t_G, x_node) + for i_node in x_descendants: + for j in range(weight_of_x, max_size + 1): + for a, b in _split_n_from(j, weight_of_x): + if ( + a not in t_G.nodes[x_node][PKEY] + or b not in t_G.nodes[i_node][PKEY] + ): + # it's not possible to form this particular weight sum + continue + + part1 = t_G.nodes[x_node][PKEY][a] + part2 = t_G.nodes[i_node][PKEY][b] + part, value = _concatenate_or_merge(part1, part2, x_node, i_node, j) + + if j not in bp_buffer or bp_buffer[j][1] < value: + # we annotate in the buffer the best partition for j + bp_buffer[j] = part, value + + # we also keep track of the overall best partition + if best_value <= value: + best_value = value + best_partition = part + + # as illustrated in Lukes, once we finished a child, we can + # discharge the partitions we found into the graph + # (the key phrase is make all x == x') + # so that they are used by the subsequent children + for w, (best_part_for_vl, vl) in bp_buffer.items(): + t_G.nodes[x_node][PKEY][w] = best_part_for_vl + bp_buffer.clear() + + # the absolute best partition for this node + # across all weights has to be stored at 0 + t_G.nodes[x_node][PKEY][0] = best_partition + t_G.remove_nodes_from(x_descendants) + + if x_node == root: + # the 0-labeled partition of root + # is the optimal one for the whole tree + return t_G.nodes[root][PKEY][0] diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/modularity_max.py b/MLPY/Lib/site-packages/networkx/algorithms/community/modularity_max.py new file mode 100644 index 0000000000000000000000000000000000000000..aba3267c33e8dcba06e1fa6ed95d5ec0b6b45dfe --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/community/modularity_max.py @@ -0,0 +1,448 @@ +"""Functions for detecting communities based on modularity.""" + +from collections import defaultdict + +import networkx as nx +from networkx.algorithms.community.quality import modularity +from networkx.utils import not_implemented_for +from networkx.utils.mapped_queue import MappedQueue + +__all__ = [ + "greedy_modularity_communities", + "naive_greedy_modularity_communities", +] + + +def _greedy_modularity_communities_generator(G, weight=None, resolution=1): + r"""Yield community partitions of G and the modularity change at each step. + + This function performs Clauset-Newman-Moore greedy modularity maximization [2]_ + At each step of the process it yields the change in modularity that will occur in + the next step followed by yielding the new community partition after that step. + + Greedy modularity maximization begins with each node in its own community + and repeatedly joins the pair of communities that lead to the largest + modularity until one community contains all nodes (the partition has one set). + + This function maximizes the generalized modularity, where `resolution` + is the resolution parameter, often expressed as $\gamma$. + See :func:`~networkx.algorithms.community.quality.modularity`. + + Parameters + ---------- + G : NetworkX graph + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + resolution : float (default=1) + If resolution is less than 1, modularity favors larger communities. + Greater than 1 favors smaller communities. + + Yields + ------ + Alternating yield statements produce the following two objects: + + communities: dict_values + A dict_values of frozensets of nodes, one for each community. + This represents a partition of the nodes of the graph into communities. + The first yield is the partition with each node in its own community. + + dq: float + The change in modularity when merging the next two communities + that leads to the largest modularity. + + See Also + -------- + modularity + + References + ---------- + .. [1] Newman, M. E. J. "Networks: An Introduction", page 224 + Oxford University Press 2011. + .. [2] Clauset, A., Newman, M. E., & Moore, C. + "Finding community structure in very large networks." + Physical Review E 70(6), 2004. + .. [3] Reichardt and Bornholdt "Statistical Mechanics of Community + Detection" Phys. Rev. E74, 2006. + .. [4] Newman, M. E. J."Analysis of weighted networks" + Physical Review E 70(5 Pt 2):056131, 2004. + """ + directed = G.is_directed() + N = G.number_of_nodes() + + # Count edges (or the sum of edge-weights for weighted graphs) + m = G.size(weight) + q0 = 1 / m + + # Calculate degrees (notation from the papers) + # a : the fraction of (weighted) out-degree for each node + # b : the fraction of (weighted) in-degree for each node + if directed: + a = {node: deg_out * q0 for node, deg_out in G.out_degree(weight=weight)} + b = {node: deg_in * q0 for node, deg_in in G.in_degree(weight=weight)} + else: + a = b = {node: deg * q0 * 0.5 for node, deg in G.degree(weight=weight)} + + # this preliminary step collects the edge weights for each node pair + # It handles multigraph and digraph and works fine for graph. + dq_dict = defaultdict(lambda: defaultdict(float)) + for u, v, wt in G.edges(data=weight, default=1): + if u == v: + continue + dq_dict[u][v] += wt + dq_dict[v][u] += wt + + # now scale and subtract the expected edge-weights term + for u, nbrdict in dq_dict.items(): + for v, wt in nbrdict.items(): + dq_dict[u][v] = q0 * wt - resolution * (a[u] * b[v] + b[u] * a[v]) + + # Use -dq to get a max_heap instead of a min_heap + # dq_heap holds a heap for each node's neighbors + dq_heap = {u: MappedQueue({(u, v): -dq for v, dq in dq_dict[u].items()}) for u in G} + # H -> all_dq_heap holds a heap with the best items for each node + H = MappedQueue([dq_heap[n].heap[0] for n in G if len(dq_heap[n]) > 0]) + + # Initialize single-node communities + communities = {n: frozenset([n]) for n in G} + yield communities.values() + + # Merge the two communities that lead to the largest modularity + while len(H) > 1: + # Find best merge + # Remove from heap of row maxes + # Ties will be broken by choosing the pair with lowest min community id + try: + negdq, u, v = H.pop() + except IndexError: + break + dq = -negdq + yield dq + # Remove best merge from row u heap + dq_heap[u].pop() + # Push new row max onto H + if len(dq_heap[u]) > 0: + H.push(dq_heap[u].heap[0]) + # If this element was also at the root of row v, we need to remove the + # duplicate entry from H + if dq_heap[v].heap[0] == (v, u): + H.remove((v, u)) + # Remove best merge from row v heap + dq_heap[v].remove((v, u)) + # Push new row max onto H + if len(dq_heap[v]) > 0: + H.push(dq_heap[v].heap[0]) + else: + # Duplicate wasn't in H, just remove from row v heap + dq_heap[v].remove((v, u)) + + # Perform merge + communities[v] = frozenset(communities[u] | communities[v]) + del communities[u] + + # Get neighbor communities connected to the merged communities + u_nbrs = set(dq_dict[u]) + v_nbrs = set(dq_dict[v]) + all_nbrs = (u_nbrs | v_nbrs) - {u, v} + both_nbrs = u_nbrs & v_nbrs + # Update dq for merge of u into v + for w in all_nbrs: + # Calculate new dq value + if w in both_nbrs: + dq_vw = dq_dict[v][w] + dq_dict[u][w] + elif w in v_nbrs: + dq_vw = dq_dict[v][w] - resolution * (a[u] * b[w] + a[w] * b[u]) + else: # w in u_nbrs + dq_vw = dq_dict[u][w] - resolution * (a[v] * b[w] + a[w] * b[v]) + # Update rows v and w + for row, col in [(v, w), (w, v)]: + dq_heap_row = dq_heap[row] + # Update dict for v,w only (u is removed below) + dq_dict[row][col] = dq_vw + # Save old max of per-row heap + if len(dq_heap_row) > 0: + d_oldmax = dq_heap_row.heap[0] + else: + d_oldmax = None + # Add/update heaps + d = (row, col) + d_negdq = -dq_vw + # Save old value for finding heap index + if w in v_nbrs: + # Update existing element in per-row heap + dq_heap_row.update(d, d, priority=d_negdq) + else: + # We're creating a new nonzero element, add to heap + dq_heap_row.push(d, priority=d_negdq) + # Update heap of row maxes if necessary + if d_oldmax is None: + # No entries previously in this row, push new max + H.push(d, priority=d_negdq) + else: + # We've updated an entry in this row, has the max changed? + row_max = dq_heap_row.heap[0] + if d_oldmax != row_max or d_oldmax.priority != row_max.priority: + H.update(d_oldmax, row_max) + + # Remove row/col u from dq_dict matrix + for w in dq_dict[u]: + # Remove from dict + dq_old = dq_dict[w][u] + del dq_dict[w][u] + # Remove from heaps if we haven't already + if w != v: + # Remove both row and column + for row, col in [(w, u), (u, w)]: + dq_heap_row = dq_heap[row] + # Check if replaced dq is row max + d_old = (row, col) + if dq_heap_row.heap[0] == d_old: + # Update per-row heap and heap of row maxes + dq_heap_row.remove(d_old) + H.remove(d_old) + # Update row max + if len(dq_heap_row) > 0: + H.push(dq_heap_row.heap[0]) + else: + # Only update per-row heap + dq_heap_row.remove(d_old) + + del dq_dict[u] + # Mark row u as deleted, but keep placeholder + dq_heap[u] = MappedQueue() + # Merge u into v and update a + a[v] += a[u] + a[u] = 0 + if directed: + b[v] += b[u] + b[u] = 0 + + yield communities.values() + + +@nx._dispatch(edge_attrs="weight") +def greedy_modularity_communities( + G, + weight=None, + resolution=1, + cutoff=1, + best_n=None, +): + r"""Find communities in G using greedy modularity maximization. + + This function uses Clauset-Newman-Moore greedy modularity maximization [2]_ + to find the community partition with the largest modularity. + + Greedy modularity maximization begins with each node in its own community + and repeatedly joins the pair of communities that lead to the largest + modularity until no further increase in modularity is possible (a maximum). + Two keyword arguments adjust the stopping condition. `cutoff` is a lower + limit on the number of communities so you can stop the process before + reaching a maximum (used to save computation time). `best_n` is an upper + limit on the number of communities so you can make the process continue + until at most n communities remain even if the maximum modularity occurs + for more. To obtain exactly n communities, set both `cutoff` and `best_n` to n. + + This function maximizes the generalized modularity, where `resolution` + is the resolution parameter, often expressed as $\gamma$. + See :func:`~networkx.algorithms.community.quality.modularity`. + + Parameters + ---------- + G : NetworkX graph + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + resolution : float, optional (default=1) + If resolution is less than 1, modularity favors larger communities. + Greater than 1 favors smaller communities. + + cutoff : int, optional (default=1) + A minimum number of communities below which the merging process stops. + The process stops at this number of communities even if modularity + is not maximized. The goal is to let the user stop the process early. + The process stops before the cutoff if it finds a maximum of modularity. + + best_n : int or None, optional (default=None) + A maximum number of communities above which the merging process will + not stop. This forces community merging to continue after modularity + starts to decrease until `best_n` communities remain. + If ``None``, don't force it to continue beyond a maximum. + + Raises + ------ + ValueError : If the `cutoff` or `best_n` value is not in the range + ``[1, G.number_of_nodes()]``, or if `best_n` < `cutoff`. + + Returns + ------- + communities: list + A list of frozensets of nodes, one for each community. + Sorted by length with largest communities first. + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> c = nx.community.greedy_modularity_communities(G) + >>> sorted(c[0]) + [8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33] + + See Also + -------- + modularity + + References + ---------- + .. [1] Newman, M. E. J. "Networks: An Introduction", page 224 + Oxford University Press 2011. + .. [2] Clauset, A., Newman, M. E., & Moore, C. + "Finding community structure in very large networks." + Physical Review E 70(6), 2004. + .. [3] Reichardt and Bornholdt "Statistical Mechanics of Community + Detection" Phys. Rev. E74, 2006. + .. [4] Newman, M. E. J."Analysis of weighted networks" + Physical Review E 70(5 Pt 2):056131, 2004. + """ + if (cutoff < 1) or (cutoff > G.number_of_nodes()): + raise ValueError(f"cutoff must be between 1 and {len(G)}. Got {cutoff}.") + if best_n is not None: + if (best_n < 1) or (best_n > G.number_of_nodes()): + raise ValueError(f"best_n must be between 1 and {len(G)}. Got {best_n}.") + if best_n < cutoff: + raise ValueError(f"Must have best_n >= cutoff. Got {best_n} < {cutoff}") + if best_n == 1: + return [set(G)] + else: + best_n = G.number_of_nodes() + + # retrieve generator object to construct output + community_gen = _greedy_modularity_communities_generator( + G, weight=weight, resolution=resolution + ) + + # construct the first best community + communities = next(community_gen) + + # continue merging communities until one of the breaking criteria is satisfied + while len(communities) > cutoff: + try: + dq = next(community_gen) + # StopIteration occurs when communities are the connected components + except StopIteration: + communities = sorted(communities, key=len, reverse=True) + # if best_n requires more merging, merge big sets for highest modularity + while len(communities) > best_n: + comm1, comm2, *rest = communities + communities = [comm1 ^ comm2] + communities.extend(rest) + return communities + + # keep going unless max_mod is reached or best_n says to merge more + if dq < 0 and len(communities) <= best_n: + break + communities = next(community_gen) + + return sorted(communities, key=len, reverse=True) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="weight") +def naive_greedy_modularity_communities(G, resolution=1, weight=None): + r"""Find communities in G using greedy modularity maximization. + + This implementation is O(n^4), much slower than alternatives, but it is + provided as an easy-to-understand reference implementation. + + Greedy modularity maximization begins with each node in its own community + and joins the pair of communities that most increases modularity until no + such pair exists. + + This function maximizes the generalized modularity, where `resolution` + is the resolution parameter, often expressed as $\gamma$. + See :func:`~networkx.algorithms.community.quality.modularity`. + + Parameters + ---------- + G : NetworkX graph + Graph must be simple and undirected. + + resolution : float (default=1) + If resolution is less than 1, modularity favors larger communities. + Greater than 1 favors smaller communities. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + list + A list of sets of nodes, one for each community. + Sorted by length with largest communities first. + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> c = nx.community.naive_greedy_modularity_communities(G) + >>> sorted(c[0]) + [8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33] + + See Also + -------- + greedy_modularity_communities + modularity + """ + # First create one community for each node + communities = [frozenset([u]) for u in G.nodes()] + # Track merges + merges = [] + # Greedily merge communities until no improvement is possible + old_modularity = None + new_modularity = modularity(G, communities, resolution=resolution, weight=weight) + while old_modularity is None or new_modularity > old_modularity: + # Save modularity for comparison + old_modularity = new_modularity + # Find best pair to merge + trial_communities = list(communities) + to_merge = None + for i, u in enumerate(communities): + for j, v in enumerate(communities): + # Skip i==j and empty communities + if j <= i or len(u) == 0 or len(v) == 0: + continue + # Merge communities u and v + trial_communities[j] = u | v + trial_communities[i] = frozenset([]) + trial_modularity = modularity( + G, trial_communities, resolution=resolution, weight=weight + ) + if trial_modularity >= new_modularity: + # Check if strictly better or tie + if trial_modularity > new_modularity: + # Found new best, save modularity and group indexes + new_modularity = trial_modularity + to_merge = (i, j, new_modularity - old_modularity) + elif to_merge and min(i, j) < min(to_merge[0], to_merge[1]): + # Break ties by choosing pair with lowest min id + new_modularity = trial_modularity + to_merge = (i, j, new_modularity - old_modularity) + # Un-merge + trial_communities[i] = u + trial_communities[j] = v + if to_merge is not None: + # If the best merge improves modularity, use it + merges.append(to_merge) + i, j, dq = to_merge + u, v = communities[i], communities[j] + communities[j] = u | v + communities[i] = frozenset([]) + # Remove empty communities and sort + return sorted((c for c in communities if len(c) > 0), key=len, reverse=True) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/quality.py b/MLPY/Lib/site-packages/networkx/algorithms/community/quality.py new file mode 100644 index 0000000000000000000000000000000000000000..ab86b09113ad078e2ada3cbc294eff46a97543ef --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/community/quality.py @@ -0,0 +1,346 @@ +"""Functions for measuring the quality of a partition (into +communities). + +""" + +from itertools import combinations + +import networkx as nx +from networkx import NetworkXError +from networkx.algorithms.community.community_utils import is_partition +from networkx.utils.decorators import argmap + +__all__ = ["modularity", "partition_quality"] + + +class NotAPartition(NetworkXError): + """Raised if a given collection is not a partition.""" + + def __init__(self, G, collection): + msg = f"{collection} is not a valid partition of the graph {G}" + super().__init__(msg) + + +def _require_partition(G, partition): + """Decorator to check that a valid partition is input to a function + + Raises :exc:`networkx.NetworkXError` if the partition is not valid. + + This decorator should be used on functions whose first two arguments + are a graph and a partition of the nodes of that graph (in that + order):: + + >>> @require_partition + ... def foo(G, partition): + ... print("partition is valid!") + ... + >>> G = nx.complete_graph(5) + >>> partition = [{0, 1}, {2, 3}, {4}] + >>> foo(G, partition) + partition is valid! + >>> partition = [{0}, {2, 3}, {4}] + >>> foo(G, partition) + Traceback (most recent call last): + ... + networkx.exception.NetworkXError: `partition` is not a valid partition of the nodes of G + >>> partition = [{0, 1}, {1, 2, 3}, {4}] + >>> foo(G, partition) + Traceback (most recent call last): + ... + networkx.exception.NetworkXError: `partition` is not a valid partition of the nodes of G + + """ + if is_partition(G, partition): + return G, partition + raise nx.NetworkXError("`partition` is not a valid partition of the nodes of G") + + +require_partition = argmap(_require_partition, (0, 1)) + + +@nx._dispatch +def intra_community_edges(G, partition): + """Returns the number of intra-community edges for a partition of `G`. + + Parameters + ---------- + G : NetworkX graph. + + partition : iterable of sets of nodes + This must be a partition of the nodes of `G`. + + The "intra-community edges" are those edges joining a pair of nodes + in the same block of the partition. + + """ + return sum(G.subgraph(block).size() for block in partition) + + +@nx._dispatch +def inter_community_edges(G, partition): + """Returns the number of inter-community edges for a partition of `G`. + according to the given + partition of the nodes of `G`. + + Parameters + ---------- + G : NetworkX graph. + + partition : iterable of sets of nodes + This must be a partition of the nodes of `G`. + + The *inter-community edges* are those edges joining a pair of nodes + in different blocks of the partition. + + Implementation note: this function creates an intermediate graph + that may require the same amount of memory as that of `G`. + + """ + # Alternate implementation that does not require constructing a new + # graph object (but does require constructing an affiliation + # dictionary): + # + # aff = dict(chain.from_iterable(((v, block) for v in block) + # for block in partition)) + # return sum(1 for u, v in G.edges() if aff[u] != aff[v]) + # + MG = nx.MultiDiGraph if G.is_directed() else nx.MultiGraph + return nx.quotient_graph(G, partition, create_using=MG).size() + + +@nx._dispatch +def inter_community_non_edges(G, partition): + """Returns the number of inter-community non-edges according to the + given partition of the nodes of `G`. + + Parameters + ---------- + G : NetworkX graph. + + partition : iterable of sets of nodes + This must be a partition of the nodes of `G`. + + A *non-edge* is a pair of nodes (undirected if `G` is undirected) + that are not adjacent in `G`. The *inter-community non-edges* are + those non-edges on a pair of nodes in different blocks of the + partition. + + Implementation note: this function creates two intermediate graphs, + which may require up to twice the amount of memory as required to + store `G`. + + """ + # Alternate implementation that does not require constructing two + # new graph objects (but does require constructing an affiliation + # dictionary): + # + # aff = dict(chain.from_iterable(((v, block) for v in block) + # for block in partition)) + # return sum(1 for u, v in nx.non_edges(G) if aff[u] != aff[v]) + # + return inter_community_edges(nx.complement(G), partition) + + +@nx._dispatch(edge_attrs="weight") +def modularity(G, communities, weight="weight", resolution=1): + r"""Returns the modularity of the given partition of the graph. + + Modularity is defined in [1]_ as + + .. math:: + Q = \frac{1}{2m} \sum_{ij} \left( A_{ij} - \gamma\frac{k_ik_j}{2m}\right) + \delta(c_i,c_j) + + where $m$ is the number of edges (or sum of all edge weights as in [5]_), + $A$ is the adjacency matrix of `G`, $k_i$ is the (weighted) degree of $i$, + $\gamma$ is the resolution parameter, and $\delta(c_i, c_j)$ is 1 if $i$ and + $j$ are in the same community else 0. + + According to [2]_ (and verified by some algebra) this can be reduced to + + .. math:: + Q = \sum_{c=1}^{n} + \left[ \frac{L_c}{m} - \gamma\left( \frac{k_c}{2m} \right) ^2 \right] + + where the sum iterates over all communities $c$, $m$ is the number of edges, + $L_c$ is the number of intra-community links for community $c$, + $k_c$ is the sum of degrees of the nodes in community $c$, + and $\gamma$ is the resolution parameter. + + The resolution parameter sets an arbitrary tradeoff between intra-group + edges and inter-group edges. More complex grouping patterns can be + discovered by analyzing the same network with multiple values of gamma + and then combining the results [3]_. That said, it is very common to + simply use gamma=1. More on the choice of gamma is in [4]_. + + The second formula is the one actually used in calculation of the modularity. + For directed graphs the second formula replaces $k_c$ with $k^{in}_c k^{out}_c$. + + Parameters + ---------- + G : NetworkX Graph + + communities : list or iterable of set of nodes + These node sets must represent a partition of G's nodes. + + weight : string or None, optional (default="weight") + The edge attribute that holds the numerical value used + as a weight. If None or an edge does not have that attribute, + then that edge has weight 1. + + resolution : float (default=1) + If resolution is less than 1, modularity favors larger communities. + Greater than 1 favors smaller communities. + + Returns + ------- + Q : float + The modularity of the partition. + + Raises + ------ + NotAPartition + If `communities` is not a partition of the nodes of `G`. + + Examples + -------- + >>> G = nx.barbell_graph(3, 0) + >>> nx.community.modularity(G, [{0, 1, 2}, {3, 4, 5}]) + 0.35714285714285715 + >>> nx.community.modularity(G, nx.community.label_propagation_communities(G)) + 0.35714285714285715 + + References + ---------- + .. [1] M. E. J. Newman "Networks: An Introduction", page 224. + Oxford University Press, 2011. + .. [2] Clauset, Aaron, Mark EJ Newman, and Cristopher Moore. + "Finding community structure in very large networks." + Phys. Rev. E 70.6 (2004). + .. [3] Reichardt and Bornholdt "Statistical Mechanics of Community Detection" + Phys. Rev. E 74, 016110, 2006. https://doi.org/10.1103/PhysRevE.74.016110 + .. [4] M. E. J. Newman, "Equivalence between modularity optimization and + maximum likelihood methods for community detection" + Phys. Rev. E 94, 052315, 2016. https://doi.org/10.1103/PhysRevE.94.052315 + .. [5] Blondel, V.D. et al. "Fast unfolding of communities in large + networks" J. Stat. Mech 10008, 1-12 (2008). + https://doi.org/10.1088/1742-5468/2008/10/P10008 + """ + if not isinstance(communities, list): + communities = list(communities) + if not is_partition(G, communities): + raise NotAPartition(G, communities) + + directed = G.is_directed() + if directed: + out_degree = dict(G.out_degree(weight=weight)) + in_degree = dict(G.in_degree(weight=weight)) + m = sum(out_degree.values()) + norm = 1 / m**2 + else: + out_degree = in_degree = dict(G.degree(weight=weight)) + deg_sum = sum(out_degree.values()) + m = deg_sum / 2 + norm = 1 / deg_sum**2 + + def community_contribution(community): + comm = set(community) + L_c = sum(wt for u, v, wt in G.edges(comm, data=weight, default=1) if v in comm) + + out_degree_sum = sum(out_degree[u] for u in comm) + in_degree_sum = sum(in_degree[u] for u in comm) if directed else out_degree_sum + + return L_c / m - resolution * out_degree_sum * in_degree_sum * norm + + return sum(map(community_contribution, communities)) + + +@require_partition +@nx._dispatch +def partition_quality(G, partition): + """Returns the coverage and performance of a partition of G. + + The *coverage* of a partition is the ratio of the number of + intra-community edges to the total number of edges in the graph. + + The *performance* of a partition is the number of + intra-community edges plus inter-community non-edges divided by the total + number of potential edges. + + This algorithm has complexity $O(C^2 + L)$ where C is the number of communities and L is the number of links. + + Parameters + ---------- + G : NetworkX graph + + partition : sequence + Partition of the nodes of `G`, represented as a sequence of + sets of nodes (blocks). Each block of the partition represents a + community. + + Returns + ------- + (float, float) + The (coverage, performance) tuple of the partition, as defined above. + + Raises + ------ + NetworkXError + If `partition` is not a valid partition of the nodes of `G`. + + Notes + ----- + If `G` is a multigraph; + - for coverage, the multiplicity of edges is counted + - for performance, the result is -1 (total number of possible edges is not defined) + + References + ---------- + .. [1] Santo Fortunato. + "Community Detection in Graphs". + *Physical Reports*, Volume 486, Issue 3--5 pp. 75--174 + + """ + + node_community = {} + for i, community in enumerate(partition): + for node in community: + node_community[node] = i + + # `performance` is not defined for multigraphs + if not G.is_multigraph(): + # Iterate over the communities, quadratic, to calculate `possible_inter_community_edges` + possible_inter_community_edges = sum( + len(p1) * len(p2) for p1, p2 in combinations(partition, 2) + ) + + if G.is_directed(): + possible_inter_community_edges *= 2 + else: + possible_inter_community_edges = 0 + + # Compute the number of edges in the complete graph -- `n` nodes, + # directed or undirected, depending on `G` + n = len(G) + total_pairs = n * (n - 1) + if not G.is_directed(): + total_pairs //= 2 + + intra_community_edges = 0 + inter_community_non_edges = possible_inter_community_edges + + # Iterate over the links to count `intra_community_edges` and `inter_community_non_edges` + for e in G.edges(): + if node_community[e[0]] == node_community[e[1]]: + intra_community_edges += 1 + else: + inter_community_non_edges -= 1 + + coverage = intra_community_edges / len(G.edges) + + if G.is_multigraph(): + performance = -1.0 + else: + performance = (intra_community_edges + inter_community_non_edges) / total_pairs + + return coverage, performance diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c0aa9583585b7e24ea9f1a0c642a856d3364589 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_asyn_fluid.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_asyn_fluid.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6655282facc39d97393ee80ee24611df8585367a Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_asyn_fluid.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_centrality.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_centrality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1eb4a160aa21dc2b345011a5853282d57eed686 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_centrality.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_kclique.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_kclique.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ea62b4e4c385e14b3acf9e3aff70b6a649d97a7 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_kclique.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_kernighan_lin.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_kernighan_lin.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab4ee1bb93f017fe3935ea6ffa9b6291c2577eba Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_kernighan_lin.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_label_propagation.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_label_propagation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee35cf252b3de86aaef311d1f94e28548b5866c7 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_label_propagation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_louvain.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_louvain.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a626c53ce631fcd51b870dbccc50cf8235c1b7f Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_louvain.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_lukes.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_lukes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b5311fb5e152eb8fec01b7ccba4a21e211a4adf Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_lukes.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_modularity_max.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_modularity_max.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..282b786ea7249f74a631810d0520d6f881ab203f Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_modularity_max.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_quality.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_quality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e47eabcf65757aefc740637ae957d678edb7799 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_quality.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_utils.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..337343fd44794958b47af6aed8df6bfaf8f61ed5 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/__pycache__/test_utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_asyn_fluid.py b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_asyn_fluid.py new file mode 100644 index 0000000000000000000000000000000000000000..cd108dda36949dabe8a1d574bde596335150205e --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_asyn_fluid.py @@ -0,0 +1,129 @@ +import pytest + +import networkx as nx +from networkx import Graph, NetworkXError +from networkx.algorithms.community import asyn_fluidc + + +def test_exceptions(): + test = Graph() + test.add_node("a") + pytest.raises(NetworkXError, asyn_fluidc, test, "hi") + pytest.raises(NetworkXError, asyn_fluidc, test, -1) + pytest.raises(NetworkXError, asyn_fluidc, test, 3) + test.add_node("b") + pytest.raises(NetworkXError, asyn_fluidc, test, 1) + + +def test_single_node(): + test = Graph() + + test.add_node("a") + + # ground truth + ground_truth = {frozenset(["a"])} + + communities = asyn_fluidc(test, 1) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + +def test_two_nodes(): + test = Graph() + + test.add_edge("a", "b") + + # ground truth + ground_truth = {frozenset(["a"]), frozenset(["b"])} + + communities = asyn_fluidc(test, 2) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + +def test_two_clique_communities(): + test = Graph() + + # c1 + test.add_edge("a", "b") + test.add_edge("a", "c") + test.add_edge("b", "c") + + # connection + test.add_edge("c", "d") + + # c2 + test.add_edge("d", "e") + test.add_edge("d", "f") + test.add_edge("f", "e") + + # ground truth + ground_truth = {frozenset(["a", "c", "b"]), frozenset(["e", "d", "f"])} + + communities = asyn_fluidc(test, 2, seed=7) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + +def test_five_clique_ring(): + test = Graph() + + # c1 + test.add_edge("1a", "1b") + test.add_edge("1a", "1c") + test.add_edge("1a", "1d") + test.add_edge("1b", "1c") + test.add_edge("1b", "1d") + test.add_edge("1c", "1d") + + # c2 + test.add_edge("2a", "2b") + test.add_edge("2a", "2c") + test.add_edge("2a", "2d") + test.add_edge("2b", "2c") + test.add_edge("2b", "2d") + test.add_edge("2c", "2d") + + # c3 + test.add_edge("3a", "3b") + test.add_edge("3a", "3c") + test.add_edge("3a", "3d") + test.add_edge("3b", "3c") + test.add_edge("3b", "3d") + test.add_edge("3c", "3d") + + # c4 + test.add_edge("4a", "4b") + test.add_edge("4a", "4c") + test.add_edge("4a", "4d") + test.add_edge("4b", "4c") + test.add_edge("4b", "4d") + test.add_edge("4c", "4d") + + # c5 + test.add_edge("5a", "5b") + test.add_edge("5a", "5c") + test.add_edge("5a", "5d") + test.add_edge("5b", "5c") + test.add_edge("5b", "5d") + test.add_edge("5c", "5d") + + # connections + test.add_edge("1a", "2c") + test.add_edge("2a", "3c") + test.add_edge("3a", "4c") + test.add_edge("4a", "5c") + test.add_edge("5a", "1c") + + # ground truth + ground_truth = { + frozenset(["1a", "1b", "1c", "1d"]), + frozenset(["2a", "2b", "2c", "2d"]), + frozenset(["3a", "3b", "3c", "3d"]), + frozenset(["4a", "4b", "4c", "4d"]), + frozenset(["5a", "5b", "5c", "5d"]), + } + + communities = asyn_fluidc(test, 5, seed=9) + result = {frozenset(c) for c in communities} + assert result == ground_truth diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_centrality.py b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_centrality.py new file mode 100644 index 0000000000000000000000000000000000000000..a31d9a8051e2e4362d214a0e2ac1d79694eca127 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_centrality.py @@ -0,0 +1,84 @@ +"""Unit tests for the :mod:`networkx.algorithms.community.centrality` +module. + +""" +from operator import itemgetter + +import networkx as nx + + +def set_of_sets(iterable): + return set(map(frozenset, iterable)) + + +def validate_communities(result, expected): + assert set_of_sets(result) == set_of_sets(expected) + + +def validate_possible_communities(result, *expected): + assert any(set_of_sets(result) == set_of_sets(p) for p in expected) + + +class TestGirvanNewman: + """Unit tests for the + :func:`networkx.algorithms.community.centrality.girvan_newman` + function. + + """ + + def test_no_edges(self): + G = nx.empty_graph(3) + communities = list(nx.community.girvan_newman(G)) + assert len(communities) == 1 + validate_communities(communities[0], [{0}, {1}, {2}]) + + def test_undirected(self): + # Start with the graph .-.-.-. + G = nx.path_graph(4) + communities = list(nx.community.girvan_newman(G)) + assert len(communities) == 3 + # After one removal, we get the graph .-. .-. + validate_communities(communities[0], [{0, 1}, {2, 3}]) + # After the next, we get the graph .-. . ., but there are two + # symmetric possible versions. + validate_possible_communities( + communities[1], [{0}, {1}, {2, 3}], [{0, 1}, {2}, {3}] + ) + # After the last removal, we always get the empty graph. + validate_communities(communities[2], [{0}, {1}, {2}, {3}]) + + def test_directed(self): + G = nx.DiGraph(nx.path_graph(4)) + communities = list(nx.community.girvan_newman(G)) + assert len(communities) == 3 + validate_communities(communities[0], [{0, 1}, {2, 3}]) + validate_possible_communities( + communities[1], [{0}, {1}, {2, 3}], [{0, 1}, {2}, {3}] + ) + validate_communities(communities[2], [{0}, {1}, {2}, {3}]) + + def test_selfloops(self): + G = nx.path_graph(4) + G.add_edge(0, 0) + G.add_edge(2, 2) + communities = list(nx.community.girvan_newman(G)) + assert len(communities) == 3 + validate_communities(communities[0], [{0, 1}, {2, 3}]) + validate_possible_communities( + communities[1], [{0}, {1}, {2, 3}], [{0, 1}, {2}, {3}] + ) + validate_communities(communities[2], [{0}, {1}, {2}, {3}]) + + def test_most_valuable_edge(self): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, 3), (1, 2, 2), (2, 3, 1)]) + # Let the most valuable edge be the one with the highest weight. + + def heaviest(G): + return max(G.edges(data="weight"), key=itemgetter(2))[:2] + + communities = list(nx.community.girvan_newman(G, heaviest)) + assert len(communities) == 3 + validate_communities(communities[0], [{0}, {1, 2, 3}]) + validate_communities(communities[1], [{0}, {1}, {2, 3}]) + validate_communities(communities[2], [{0}, {1}, {2}, {3}]) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_kclique.py b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_kclique.py new file mode 100644 index 0000000000000000000000000000000000000000..aa0b7e823e2780f734180562fa3fb8ce5a671312 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_kclique.py @@ -0,0 +1,91 @@ +from itertools import combinations + +import pytest + +import networkx as nx + + +def test_overlapping_K5(): + G = nx.Graph() + G.add_edges_from(combinations(range(5), 2)) # Add a five clique + G.add_edges_from(combinations(range(2, 7), 2)) # Add another five clique + c = list(nx.community.k_clique_communities(G, 4)) + assert c == [frozenset(range(7))] + c = set(nx.community.k_clique_communities(G, 5)) + assert c == {frozenset(range(5)), frozenset(range(2, 7))} + + +def test_isolated_K5(): + G = nx.Graph() + G.add_edges_from(combinations(range(5), 2)) # Add a five clique + G.add_edges_from(combinations(range(5, 10), 2)) # Add another five clique + c = set(nx.community.k_clique_communities(G, 5)) + assert c == {frozenset(range(5)), frozenset(range(5, 10))} + + +class TestZacharyKarateClub: + def setup_method(self): + self.G = nx.karate_club_graph() + + def _check_communities(self, k, expected): + communities = set(nx.community.k_clique_communities(self.G, k)) + assert communities == expected + + def test_k2(self): + # clique percolation with k=2 is just connected components + expected = {frozenset(self.G)} + self._check_communities(2, expected) + + def test_k3(self): + comm1 = [ + 0, + 1, + 2, + 3, + 7, + 8, + 12, + 13, + 14, + 15, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + ] + comm2 = [0, 4, 5, 6, 10, 16] + comm3 = [24, 25, 31] + expected = {frozenset(comm1), frozenset(comm2), frozenset(comm3)} + self._check_communities(3, expected) + + def test_k4(self): + expected = { + frozenset([0, 1, 2, 3, 7, 13]), + frozenset([8, 32, 30, 33]), + frozenset([32, 33, 29, 23]), + } + self._check_communities(4, expected) + + def test_k5(self): + expected = {frozenset([0, 1, 2, 3, 7, 13])} + self._check_communities(5, expected) + + def test_k6(self): + expected = set() + self._check_communities(6, expected) + + +def test_bad_k(): + with pytest.raises(nx.NetworkXError): + list(nx.community.k_clique_communities(nx.Graph(), 1)) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_kernighan_lin.py b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_kernighan_lin.py new file mode 100644 index 0000000000000000000000000000000000000000..3dc555475dd55dbe401c2d009ccd4fbdc0cb2d8b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_kernighan_lin.py @@ -0,0 +1,91 @@ +"""Unit tests for the :mod:`networkx.algorithms.community.kernighan_lin` +module. +""" +from itertools import permutations + +import pytest + +import networkx as nx +from networkx.algorithms.community import kernighan_lin_bisection + + +def assert_partition_equal(x, y): + assert set(map(frozenset, x)) == set(map(frozenset, y)) + + +def test_partition(): + G = nx.barbell_graph(3, 0) + C = kernighan_lin_bisection(G) + assert_partition_equal(C, [{0, 1, 2}, {3, 4, 5}]) + + +def test_partition_argument(): + G = nx.barbell_graph(3, 0) + partition = [{0, 1, 2}, {3, 4, 5}] + C = kernighan_lin_bisection(G, partition) + assert_partition_equal(C, partition) + + +def test_partition_argument_non_integer_nodes(): + G = nx.Graph([("A", "B"), ("A", "C"), ("B", "C"), ("C", "D")]) + partition = ({"A", "B"}, {"C", "D"}) + C = kernighan_lin_bisection(G, partition) + assert_partition_equal(C, partition) + + +def test_seed_argument(): + G = nx.barbell_graph(3, 0) + C = kernighan_lin_bisection(G, seed=1) + assert_partition_equal(C, [{0, 1, 2}, {3, 4, 5}]) + + +def test_non_disjoint_partition(): + with pytest.raises(nx.NetworkXError): + G = nx.barbell_graph(3, 0) + partition = ({0, 1, 2}, {2, 3, 4, 5}) + kernighan_lin_bisection(G, partition) + + +def test_too_many_blocks(): + with pytest.raises(nx.NetworkXError): + G = nx.barbell_graph(3, 0) + partition = ({0, 1}, {2}, {3, 4, 5}) + kernighan_lin_bisection(G, partition) + + +def test_multigraph(): + G = nx.cycle_graph(4) + M = nx.MultiGraph(G.edges()) + M.add_edges_from(G.edges()) + M.remove_edge(1, 2) + for labels in permutations(range(4)): + mapping = dict(zip(M, labels)) + A, B = kernighan_lin_bisection(nx.relabel_nodes(M, mapping), seed=0) + assert_partition_equal( + [A, B], [{mapping[0], mapping[1]}, {mapping[2], mapping[3]}] + ) + + +def test_max_iter_argument(): + G = nx.Graph( + [ + ("A", "B", {"weight": 1}), + ("A", "C", {"weight": 2}), + ("A", "D", {"weight": 3}), + ("A", "E", {"weight": 2}), + ("A", "F", {"weight": 4}), + ("B", "C", {"weight": 1}), + ("B", "D", {"weight": 4}), + ("B", "E", {"weight": 2}), + ("B", "F", {"weight": 1}), + ("C", "D", {"weight": 3}), + ("C", "E", {"weight": 2}), + ("C", "F", {"weight": 1}), + ("D", "E", {"weight": 4}), + ("D", "F", {"weight": 3}), + ("E", "F", {"weight": 2}), + ] + ) + partition = ({"A", "B", "C"}, {"D", "E", "F"}) + C = kernighan_lin_bisection(G, partition, max_iter=1) + assert_partition_equal(C, ({"A", "F", "C"}, {"D", "E", "B"})) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_label_propagation.py b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_label_propagation.py new file mode 100644 index 0000000000000000000000000000000000000000..9a0b3d89b6ea19403e02bad0ba635e7bdaa0a6d5 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_label_propagation.py @@ -0,0 +1,240 @@ +from itertools import chain, combinations + +import pytest + +import networkx as nx + + +def test_directed_not_supported(): + with pytest.raises(nx.NetworkXNotImplemented): + # not supported for directed graphs + test = nx.DiGraph() + test.add_edge("a", "b") + test.add_edge("a", "c") + test.add_edge("b", "d") + result = nx.community.label_propagation_communities(test) + + +def test_iterator_vs_iterable(): + G = nx.empty_graph("a") + assert list(nx.community.label_propagation_communities(G)) == [{"a"}] + for community in nx.community.label_propagation_communities(G): + assert community == {"a"} + pytest.raises(TypeError, next, nx.community.label_propagation_communities(G)) + + +def test_one_node(): + test = nx.Graph() + test.add_node("a") + + # The expected communities are: + ground_truth = {frozenset(["a"])} + + communities = nx.community.label_propagation_communities(test) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + +def test_unconnected_communities(): + test = nx.Graph() + # community 1 + test.add_edge("a", "c") + test.add_edge("a", "d") + test.add_edge("d", "c") + # community 2 + test.add_edge("b", "e") + test.add_edge("e", "f") + test.add_edge("f", "b") + + # The expected communities are: + ground_truth = {frozenset(["a", "c", "d"]), frozenset(["b", "e", "f"])} + + communities = nx.community.label_propagation_communities(test) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + +def test_connected_communities(): + test = nx.Graph() + # community 1 + test.add_edge("a", "b") + test.add_edge("c", "a") + test.add_edge("c", "b") + test.add_edge("d", "a") + test.add_edge("d", "b") + test.add_edge("d", "c") + test.add_edge("e", "a") + test.add_edge("e", "b") + test.add_edge("e", "c") + test.add_edge("e", "d") + # community 2 + test.add_edge("1", "2") + test.add_edge("3", "1") + test.add_edge("3", "2") + test.add_edge("4", "1") + test.add_edge("4", "2") + test.add_edge("4", "3") + test.add_edge("5", "1") + test.add_edge("5", "2") + test.add_edge("5", "3") + test.add_edge("5", "4") + # edge between community 1 and 2 + test.add_edge("a", "1") + # community 3 + test.add_edge("x", "y") + # community 4 with only a single node + test.add_node("z") + + # The expected communities are: + ground_truth1 = { + frozenset(["a", "b", "c", "d", "e"]), + frozenset(["1", "2", "3", "4", "5"]), + frozenset(["x", "y"]), + frozenset(["z"]), + } + ground_truth2 = { + frozenset(["a", "b", "c", "d", "e", "1", "2", "3", "4", "5"]), + frozenset(["x", "y"]), + frozenset(["z"]), + } + ground_truth = (ground_truth1, ground_truth2) + + communities = nx.community.label_propagation_communities(test) + result = {frozenset(c) for c in communities} + assert result in ground_truth + + +def test_termination(): + # ensure termination of asyn_lpa_communities in two cases + # that led to an endless loop in a previous version + test1 = nx.karate_club_graph() + test2 = nx.caveman_graph(2, 10) + test2.add_edges_from([(0, 20), (20, 10)]) + nx.community.asyn_lpa_communities(test1) + nx.community.asyn_lpa_communities(test2) + + +class TestAsynLpaCommunities: + def _check_communities(self, G, expected): + """Checks that the communities computed from the given graph ``G`` + using the :func:`~networkx.asyn_lpa_communities` function match + the set of nodes given in ``expected``. + + ``expected`` must be a :class:`set` of :class:`frozenset` + instances, each element of which is a node in the graph. + + """ + communities = nx.community.asyn_lpa_communities(G) + result = {frozenset(c) for c in communities} + assert result == expected + + def test_null_graph(self): + G = nx.null_graph() + ground_truth = set() + self._check_communities(G, ground_truth) + + def test_single_node(self): + G = nx.empty_graph(1) + ground_truth = {frozenset([0])} + self._check_communities(G, ground_truth) + + def test_simple_communities(self): + # This graph is the disjoint union of two triangles. + G = nx.Graph(["ab", "ac", "bc", "de", "df", "fe"]) + ground_truth = {frozenset("abc"), frozenset("def")} + self._check_communities(G, ground_truth) + + def test_seed_argument(self): + G = nx.Graph(["ab", "ac", "bc", "de", "df", "fe"]) + ground_truth = {frozenset("abc"), frozenset("def")} + communities = nx.community.asyn_lpa_communities(G, seed=1) + result = {frozenset(c) for c in communities} + assert result == ground_truth + + def test_several_communities(self): + # This graph is the disjoint union of five triangles. + ground_truth = {frozenset(range(3 * i, 3 * (i + 1))) for i in range(5)} + edges = chain.from_iterable(combinations(c, 2) for c in ground_truth) + G = nx.Graph(edges) + self._check_communities(G, ground_truth) + + +class TestFastLabelPropagationCommunities: + N = 100 # number of nodes + K = 15 # average node degree + + def _check_communities(self, G, truth, weight=None, seed=None): + C = nx.community.fast_label_propagation_communities(G, weight=weight, seed=seed) + assert {frozenset(c) for c in C} == truth + + def test_null_graph(self): + G = nx.null_graph() + truth = set() + self._check_communities(G, truth) + + def test_empty_graph(self): + G = nx.empty_graph(self.N) + truth = {frozenset([i]) for i in G} + self._check_communities(G, truth) + + def test_star_graph(self): + G = nx.star_graph(self.N) + truth = {frozenset(G)} + self._check_communities(G, truth) + + def test_complete_graph(self): + G = nx.complete_graph(self.N) + truth = {frozenset(G)} + self._check_communities(G, truth) + + def test_bipartite_graph(self): + G = nx.complete_bipartite_graph(self.N // 2, self.N // 2) + truth = {frozenset(G)} + self._check_communities(G, truth) + + def test_random_graph(self): + G = nx.gnm_random_graph(self.N, self.N * self.K // 2) + truth = {frozenset(G)} + self._check_communities(G, truth) + + def test_disjoin_cliques(self): + G = nx.Graph(["ab", "AB", "AC", "BC", "12", "13", "14", "23", "24", "34"]) + truth = {frozenset("ab"), frozenset("ABC"), frozenset("1234")} + self._check_communities(G, truth) + + def test_ring_of_cliques(self): + G = nx.ring_of_cliques(self.N, self.K) + truth = { + frozenset([self.K * i + k for k in range(self.K)]) for i in range(self.N) + } + self._check_communities(G, truth) + + def test_larger_graph(self): + G = nx.gnm_random_graph(100 * self.N, 50 * self.N * self.K) + nx.community.fast_label_propagation_communities(G) + + def test_graph_type(self): + G1 = nx.complete_graph(self.N, nx.MultiDiGraph()) + G2 = nx.MultiGraph(G1) + G3 = nx.DiGraph(G1) + G4 = nx.Graph(G1) + truth = {frozenset(G1)} + self._check_communities(G1, truth) + self._check_communities(G2, truth) + self._check_communities(G3, truth) + self._check_communities(G4, truth) + + def test_weight_argument(self): + G = nx.MultiDiGraph() + G.add_edge(1, 2, weight=1.41) + G.add_edge(2, 1, weight=1.41) + G.add_edge(2, 3) + G.add_edge(3, 4, weight=3.14) + truth = {frozenset({1, 2}), frozenset({3, 4})} + self._check_communities(G, truth, weight="weight") + + def test_seed_argument(self): + G = nx.karate_club_graph() + C = nx.community.fast_label_propagation_communities(G, seed=2023) + truth = {frozenset(c) for c in C} + self._check_communities(G, truth, seed=2023) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_louvain.py b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_louvain.py new file mode 100644 index 0000000000000000000000000000000000000000..60b95d377837e91e3f68af101055ca3145ae640d --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_louvain.py @@ -0,0 +1,244 @@ +import networkx as nx + + +def test_modularity_increase(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + partition = [{u} for u in G.nodes()] + mod = nx.community.modularity(G, partition) + partition = nx.community.louvain_communities(G) + + assert nx.community.modularity(G, partition) > mod + + +def test_valid_partition(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + H = G.to_directed() + partition = nx.community.louvain_communities(G) + partition2 = nx.community.louvain_communities(H) + + assert nx.community.is_partition(G, partition) + assert nx.community.is_partition(H, partition2) + + +def test_karate_club_partition(): + G = nx.karate_club_graph() + part = [ + {0, 1, 2, 3, 7, 9, 11, 12, 13, 17, 19, 21}, + {16, 4, 5, 6, 10}, + {23, 25, 27, 28, 24, 31}, + {32, 33, 8, 14, 15, 18, 20, 22, 26, 29, 30}, + ] + partition = nx.community.louvain_communities(G, seed=2, weight=None) + + assert part == partition + + +def test_partition_iterator(): + G = nx.path_graph(15) + parts_iter = nx.community.louvain_partitions(G, seed=42) + first_part = next(parts_iter) + first_copy = [s.copy() for s in first_part] + + # gh-5901 reports sets changing after next partition is yielded + assert first_copy[0] == first_part[0] + second_part = next(parts_iter) + assert first_copy[0] == first_part[0] + + +def test_undirected_selfloops(): + G = nx.karate_club_graph() + expected_partition = nx.community.louvain_communities(G, seed=2, weight=None) + part = [ + {0, 1, 2, 3, 7, 9, 11, 12, 13, 17, 19, 21}, + {16, 4, 5, 6, 10}, + {23, 25, 27, 28, 24, 31}, + {32, 33, 8, 14, 15, 18, 20, 22, 26, 29, 30}, + ] + assert expected_partition == part + + G.add_weighted_edges_from([(i, i, i * 1000) for i in range(9)]) + # large self-loop weight impacts partition + partition = nx.community.louvain_communities(G, seed=2, weight="weight") + assert part != partition + + # small self-loop weights aren't enough to impact partition in this graph + partition = nx.community.louvain_communities(G, seed=2, weight=None) + assert part == partition + + +def test_directed_selfloops(): + G = nx.DiGraph() + G.add_nodes_from(range(11)) + G_edges = [ + (0, 2), + (0, 1), + (1, 0), + (2, 1), + (2, 0), + (3, 4), + (4, 3), + (7, 8), + (8, 7), + (9, 10), + (10, 9), + ] + G.add_edges_from(G_edges) + G_expected_partition = nx.community.louvain_communities(G, seed=123, weight=None) + + G.add_weighted_edges_from([(i, i, i * 1000) for i in range(3)]) + # large self-loop weight impacts partition + G_partition = nx.community.louvain_communities(G, seed=123, weight="weight") + assert G_partition != G_expected_partition + + # small self-loop weights aren't enough to impact partition in this graph + G_partition = nx.community.louvain_communities(G, seed=123, weight=None) + assert G_partition == G_expected_partition + + +def test_directed_partition(): + """ + Test 2 cases that were looping infinitely + from issues #5175 and #5704 + """ + G = nx.DiGraph() + H = nx.DiGraph() + G.add_nodes_from(range(10)) + H.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + G_edges = [ + (0, 2), + (0, 1), + (1, 0), + (2, 1), + (2, 0), + (3, 4), + (4, 3), + (7, 8), + (8, 7), + (9, 10), + (10, 9), + ] + H_edges = [ + (1, 2), + (1, 6), + (1, 9), + (2, 3), + (2, 4), + (2, 5), + (3, 4), + (4, 3), + (4, 5), + (5, 4), + (6, 7), + (6, 8), + (9, 10), + (9, 11), + (10, 11), + (11, 10), + ] + G.add_edges_from(G_edges) + H.add_edges_from(H_edges) + + G_expected_partition = [{0, 1, 2}, {3, 4}, {5}, {6}, {8, 7}, {9, 10}] + G_partition = nx.community.louvain_communities(G, seed=123, weight=None) + + H_expected_partition = [{2, 3, 4, 5}, {8, 1, 6, 7}, {9, 10, 11}] + H_partition = nx.community.louvain_communities(H, seed=123, weight=None) + + assert G_partition == G_expected_partition + assert H_partition == H_expected_partition + + +def test_none_weight_param(): + G = nx.karate_club_graph() + nx.set_edge_attributes( + G, {edge: i * i for i, edge in enumerate(G.edges)}, name="foo" + ) + + part = [ + {0, 1, 2, 3, 7, 9, 11, 12, 13, 17, 19, 21}, + {16, 4, 5, 6, 10}, + {23, 25, 27, 28, 24, 31}, + {32, 33, 8, 14, 15, 18, 20, 22, 26, 29, 30}, + ] + partition1 = nx.community.louvain_communities(G, weight=None, seed=2) + partition2 = nx.community.louvain_communities(G, weight="foo", seed=2) + partition3 = nx.community.louvain_communities(G, weight="weight", seed=2) + + assert part == partition1 + assert part != partition2 + assert part != partition3 + assert partition2 != partition3 + + +def test_quality(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + H = nx.gn_graph(200, seed=1234) + I = nx.MultiGraph(G) + J = nx.MultiDiGraph(H) + + partition = nx.community.louvain_communities(G) + partition2 = nx.community.louvain_communities(H) + partition3 = nx.community.louvain_communities(I) + partition4 = nx.community.louvain_communities(J) + + quality = nx.community.partition_quality(G, partition)[0] + quality2 = nx.community.partition_quality(H, partition2)[0] + quality3 = nx.community.partition_quality(I, partition3)[0] + quality4 = nx.community.partition_quality(J, partition4)[0] + + assert quality >= 0.65 + assert quality2 >= 0.65 + assert quality3 >= 0.65 + assert quality4 >= 0.65 + + +def test_multigraph(): + G = nx.karate_club_graph() + H = nx.MultiGraph(G) + G.add_edge(0, 1, weight=10) + H.add_edge(0, 1, weight=9) + G.add_edge(0, 9, foo=20) + H.add_edge(0, 9, foo=20) + + partition1 = nx.community.louvain_communities(G, seed=1234) + partition2 = nx.community.louvain_communities(H, seed=1234) + partition3 = nx.community.louvain_communities(H, weight="foo", seed=1234) + + assert partition1 == partition2 != partition3 + + +def test_resolution(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + + partition1 = nx.community.louvain_communities(G, resolution=0.5, seed=12) + partition2 = nx.community.louvain_communities(G, seed=12) + partition3 = nx.community.louvain_communities(G, resolution=2, seed=12) + + assert len(partition1) <= len(partition2) <= len(partition3) + + +def test_threshold(): + G = nx.LFR_benchmark_graph( + 250, 3, 1.5, 0.009, average_degree=5, min_community=20, seed=10 + ) + partition1 = nx.community.louvain_communities(G, threshold=0.3, seed=2) + partition2 = nx.community.louvain_communities(G, seed=2) + mod1 = nx.community.modularity(G, partition1) + mod2 = nx.community.modularity(G, partition2) + + assert mod1 < mod2 + + +def test_empty_graph(): + G = nx.Graph() + G.add_nodes_from(range(5)) + expected = [{0}, {1}, {2}, {3}, {4}] + assert nx.community.louvain_communities(G) == expected diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_lukes.py b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_lukes.py new file mode 100644 index 0000000000000000000000000000000000000000..cfa48f0f47667ce4c4fa96c175bee4cb95a4852f --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_lukes.py @@ -0,0 +1,152 @@ +from itertools import product + +import pytest + +import networkx as nx + +EWL = "e_weight" +NWL = "n_weight" + + +# first test from the Lukes original paper +def paper_1_case(float_edge_wt=False, explicit_node_wt=True, directed=False): + # problem-specific constants + limit = 3 + + # configuration + if float_edge_wt: + shift = 0.001 + else: + shift = 0 + + if directed: + example_1 = nx.DiGraph() + else: + example_1 = nx.Graph() + + # graph creation + example_1.add_edge(1, 2, **{EWL: 3 + shift}) + example_1.add_edge(1, 4, **{EWL: 2 + shift}) + example_1.add_edge(2, 3, **{EWL: 4 + shift}) + example_1.add_edge(2, 5, **{EWL: 6 + shift}) + + # node weights + if explicit_node_wt: + nx.set_node_attributes(example_1, 1, NWL) + wtu = NWL + else: + wtu = None + + # partitioning + clusters_1 = { + frozenset(x) + for x in nx.community.lukes_partitioning( + example_1, limit, node_weight=wtu, edge_weight=EWL + ) + } + + return clusters_1 + + +# second test from the Lukes original paper +def paper_2_case(explicit_edge_wt=True, directed=False): + # problem specific constants + byte_block_size = 32 + + # configuration + if directed: + example_2 = nx.DiGraph() + else: + example_2 = nx.Graph() + + if explicit_edge_wt: + edic = {EWL: 1} + wtu = EWL + else: + edic = {} + wtu = None + + # graph creation + example_2.add_edge("name", "home_address", **edic) + example_2.add_edge("name", "education", **edic) + example_2.add_edge("education", "bs", **edic) + example_2.add_edge("education", "ms", **edic) + example_2.add_edge("education", "phd", **edic) + example_2.add_edge("name", "telephone", **edic) + example_2.add_edge("telephone", "home", **edic) + example_2.add_edge("telephone", "office", **edic) + example_2.add_edge("office", "no1", **edic) + example_2.add_edge("office", "no2", **edic) + + example_2.nodes["name"][NWL] = 20 + example_2.nodes["education"][NWL] = 10 + example_2.nodes["bs"][NWL] = 1 + example_2.nodes["ms"][NWL] = 1 + example_2.nodes["phd"][NWL] = 1 + example_2.nodes["home_address"][NWL] = 8 + example_2.nodes["telephone"][NWL] = 8 + example_2.nodes["home"][NWL] = 8 + example_2.nodes["office"][NWL] = 4 + example_2.nodes["no1"][NWL] = 1 + example_2.nodes["no2"][NWL] = 1 + + # partitioning + clusters_2 = { + frozenset(x) + for x in nx.community.lukes_partitioning( + example_2, byte_block_size, node_weight=NWL, edge_weight=wtu + ) + } + + return clusters_2 + + +def test_paper_1_case(): + ground_truth = {frozenset([1, 4]), frozenset([2, 3, 5])} + + tf = (True, False) + for flt, nwt, drc in product(tf, tf, tf): + part = paper_1_case(flt, nwt, drc) + assert part == ground_truth + + +def test_paper_2_case(): + ground_truth = { + frozenset(["education", "bs", "ms", "phd"]), + frozenset(["name", "home_address"]), + frozenset(["telephone", "home", "office", "no1", "no2"]), + } + + tf = (True, False) + for ewt, drc in product(tf, tf): + part = paper_2_case(ewt, drc) + assert part == ground_truth + + +def test_mandatory_tree(): + not_a_tree = nx.complete_graph(4) + + with pytest.raises(nx.NotATree): + nx.community.lukes_partitioning(not_a_tree, 5) + + +def test_mandatory_integrality(): + byte_block_size = 32 + + ex_1_broken = nx.DiGraph() + + ex_1_broken.add_edge(1, 2, **{EWL: 3.2}) + ex_1_broken.add_edge(1, 4, **{EWL: 2.4}) + ex_1_broken.add_edge(2, 3, **{EWL: 4.0}) + ex_1_broken.add_edge(2, 5, **{EWL: 6.3}) + + ex_1_broken.nodes[1][NWL] = 1.2 # ! + ex_1_broken.nodes[2][NWL] = 1 + ex_1_broken.nodes[3][NWL] = 1 + ex_1_broken.nodes[4][NWL] = 1 + ex_1_broken.nodes[5][NWL] = 2 + + with pytest.raises(TypeError): + nx.community.lukes_partitioning( + ex_1_broken, byte_block_size, node_weight=NWL, edge_weight=EWL + ) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_modularity_max.py b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_modularity_max.py new file mode 100644 index 0000000000000000000000000000000000000000..2757f3515a28900047468cfa4e1a39cd9abf88fd --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_modularity_max.py @@ -0,0 +1,333 @@ +import pytest + +import networkx as nx +from networkx.algorithms.community import ( + greedy_modularity_communities, + naive_greedy_modularity_communities, +) + + +@pytest.mark.parametrize( + "func", (greedy_modularity_communities, naive_greedy_modularity_communities) +) +def test_modularity_communities(func): + G = nx.karate_club_graph() + john_a = frozenset( + [8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33] + ) + mr_hi = frozenset([0, 4, 5, 6, 10, 11, 16, 19]) + overlap = frozenset([1, 2, 3, 7, 9, 12, 13, 17, 21]) + expected = {john_a, overlap, mr_hi} + assert set(func(G, weight=None)) == expected + + +@pytest.mark.parametrize( + "func", (greedy_modularity_communities, naive_greedy_modularity_communities) +) +def test_modularity_communities_categorical_labels(func): + # Using other than 0-starting contiguous integers as node-labels. + G = nx.Graph( + [ + ("a", "b"), + ("a", "c"), + ("b", "c"), + ("b", "d"), # inter-community edge + ("d", "e"), + ("d", "f"), + ("d", "g"), + ("f", "g"), + ("d", "e"), + ("f", "e"), + ] + ) + expected = {frozenset({"f", "g", "e", "d"}), frozenset({"a", "b", "c"})} + assert set(func(G)) == expected + + +def test_greedy_modularity_communities_components(): + # Test for gh-5530 + G = nx.Graph([(0, 1), (2, 3), (4, 5), (5, 6)]) + # usual case with 3 components + assert greedy_modularity_communities(G) == [{4, 5, 6}, {0, 1}, {2, 3}] + # best_n can make the algorithm continue even when modularity goes down + assert greedy_modularity_communities(G, best_n=3) == [{4, 5, 6}, {0, 1}, {2, 3}] + assert greedy_modularity_communities(G, best_n=2) == [{0, 1, 4, 5, 6}, {2, 3}] + assert greedy_modularity_communities(G, best_n=1) == [{0, 1, 2, 3, 4, 5, 6}] + + +def test_greedy_modularity_communities_relabeled(): + # Test for gh-4966 + G = nx.balanced_tree(2, 2) + mapping = {0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f", 6: "g", 7: "h"} + G = nx.relabel_nodes(G, mapping) + expected = [frozenset({"e", "d", "a", "b"}), frozenset({"c", "f", "g"})] + assert greedy_modularity_communities(G) == expected + + +def test_greedy_modularity_communities_directed(): + G = nx.DiGraph( + [ + ("a", "b"), + ("a", "c"), + ("b", "c"), + ("b", "d"), # inter-community edge + ("d", "e"), + ("d", "f"), + ("d", "g"), + ("f", "g"), + ("d", "e"), + ("f", "e"), + ] + ) + expected = [frozenset({"f", "g", "e", "d"}), frozenset({"a", "b", "c"})] + assert greedy_modularity_communities(G) == expected + + # with loops + G = nx.DiGraph() + G.add_edges_from( + [(1, 1), (1, 2), (1, 3), (2, 3), (1, 4), (4, 4), (5, 5), (4, 5), (4, 6), (5, 6)] + ) + expected = [frozenset({1, 2, 3}), frozenset({4, 5, 6})] + assert greedy_modularity_communities(G) == expected + + +@pytest.mark.parametrize( + "func", (greedy_modularity_communities, naive_greedy_modularity_communities) +) +def test_modularity_communities_weighted(func): + G = nx.balanced_tree(2, 3) + for a, b in G.edges: + if ((a == 1) or (a == 2)) and (b != 0): + G[a][b]["weight"] = 10.0 + else: + G[a][b]["weight"] = 1.0 + + expected = [{0, 1, 3, 4, 7, 8, 9, 10}, {2, 5, 6, 11, 12, 13, 14}] + + assert func(G, weight="weight") == expected + assert func(G, weight="weight", resolution=0.9) == expected + assert func(G, weight="weight", resolution=0.3) == expected + assert func(G, weight="weight", resolution=1.1) != expected + + +def test_modularity_communities_floating_point(): + # check for floating point error when used as key in the mapped_queue dict. + # Test for gh-4992 and gh-5000 + G = nx.Graph() + G.add_weighted_edges_from( + [(0, 1, 12), (1, 4, 71), (2, 3, 15), (2, 4, 10), (3, 6, 13)] + ) + expected = [{0, 1, 4}, {2, 3, 6}] + assert greedy_modularity_communities(G, weight="weight") == expected + assert ( + greedy_modularity_communities(G, weight="weight", resolution=0.99) == expected + ) + + +def test_modularity_communities_directed_weighted(): + G = nx.DiGraph() + G.add_weighted_edges_from( + [ + (1, 2, 5), + (1, 3, 3), + (2, 3, 6), + (2, 6, 1), + (1, 4, 1), + (4, 5, 3), + (4, 6, 7), + (5, 6, 2), + (5, 7, 5), + (5, 8, 4), + (6, 8, 3), + ] + ) + expected = [frozenset({4, 5, 6, 7, 8}), frozenset({1, 2, 3})] + assert greedy_modularity_communities(G, weight="weight") == expected + + # A large weight of the edge (2, 6) causes 6 to change group, even if it shares + # only one connection with the new group and 3 with the old one. + G[2][6]["weight"] = 20 + expected = [frozenset({1, 2, 3, 6}), frozenset({4, 5, 7, 8})] + assert greedy_modularity_communities(G, weight="weight") == expected + + +def test_greedy_modularity_communities_multigraph(): + G = nx.MultiGraph() + G.add_edges_from( + [ + (1, 2), + (1, 2), + (1, 3), + (2, 3), + (1, 4), + (2, 4), + (4, 5), + (5, 6), + (5, 7), + (5, 7), + (6, 7), + (7, 8), + (5, 8), + ] + ) + expected = [frozenset({1, 2, 3, 4}), frozenset({5, 6, 7, 8})] + assert greedy_modularity_communities(G) == expected + + # Converting (4, 5) into a multi-edge causes node 4 to change group. + G.add_edge(4, 5) + expected = [frozenset({4, 5, 6, 7, 8}), frozenset({1, 2, 3})] + assert greedy_modularity_communities(G) == expected + + +def test_greedy_modularity_communities_multigraph_weighted(): + G = nx.MultiGraph() + G.add_weighted_edges_from( + [ + (1, 2, 5), + (1, 2, 3), + (1, 3, 6), + (1, 3, 6), + (2, 3, 4), + (1, 4, 1), + (1, 4, 1), + (2, 4, 3), + (2, 4, 3), + (4, 5, 1), + (5, 6, 3), + (5, 6, 7), + (5, 6, 4), + (5, 7, 9), + (5, 7, 9), + (6, 7, 8), + (7, 8, 2), + (7, 8, 2), + (5, 8, 6), + (5, 8, 6), + ] + ) + expected = [frozenset({1, 2, 3, 4}), frozenset({5, 6, 7, 8})] + assert greedy_modularity_communities(G, weight="weight") == expected + + # Adding multi-edge (4, 5, 16) causes node 4 to change group. + G.add_edge(4, 5, weight=16) + expected = [frozenset({4, 5, 6, 7, 8}), frozenset({1, 2, 3})] + assert greedy_modularity_communities(G, weight="weight") == expected + + # Increasing the weight of edge (1, 4) causes node 4 to return to the former group. + G[1][4][1]["weight"] = 3 + expected = [frozenset({1, 2, 3, 4}), frozenset({5, 6, 7, 8})] + assert greedy_modularity_communities(G, weight="weight") == expected + + +def test_greed_modularity_communities_multidigraph(): + G = nx.MultiDiGraph() + G.add_edges_from( + [ + (1, 2), + (1, 2), + (3, 1), + (2, 3), + (2, 3), + (3, 2), + (1, 4), + (2, 4), + (4, 2), + (4, 5), + (5, 6), + (5, 6), + (6, 5), + (5, 7), + (6, 7), + (7, 8), + (5, 8), + (8, 4), + ] + ) + expected = [frozenset({1, 2, 3, 4}), frozenset({5, 6, 7, 8})] + assert greedy_modularity_communities(G, weight="weight") == expected + + +def test_greed_modularity_communities_multidigraph_weighted(): + G = nx.MultiDiGraph() + G.add_weighted_edges_from( + [ + (1, 2, 5), + (1, 2, 3), + (3, 1, 6), + (1, 3, 6), + (3, 2, 4), + (1, 4, 2), + (1, 4, 5), + (2, 4, 3), + (3, 2, 8), + (4, 2, 3), + (4, 3, 5), + (4, 5, 2), + (5, 6, 3), + (5, 6, 7), + (6, 5, 4), + (5, 7, 9), + (5, 7, 9), + (7, 6, 8), + (7, 8, 2), + (8, 7, 2), + (5, 8, 6), + (5, 8, 6), + ] + ) + expected = [frozenset({1, 2, 3, 4}), frozenset({5, 6, 7, 8})] + assert greedy_modularity_communities(G, weight="weight") == expected + + +def test_resolution_parameter_impact(): + G = nx.barbell_graph(5, 3) + + gamma = 1 + expected = [frozenset(range(5)), frozenset(range(8, 13)), frozenset(range(5, 8))] + assert greedy_modularity_communities(G, resolution=gamma) == expected + assert naive_greedy_modularity_communities(G, resolution=gamma) == expected + + gamma = 2.5 + expected = [{0, 1, 2, 3}, {9, 10, 11, 12}, {5, 6, 7}, {4}, {8}] + assert greedy_modularity_communities(G, resolution=gamma) == expected + assert naive_greedy_modularity_communities(G, resolution=gamma) == expected + + gamma = 0.3 + expected = [frozenset(range(8)), frozenset(range(8, 13))] + assert greedy_modularity_communities(G, resolution=gamma) == expected + assert naive_greedy_modularity_communities(G, resolution=gamma) == expected + + +def test_cutoff_parameter(): + G = nx.circular_ladder_graph(4) + + # No aggregation: + expected = [{k} for k in range(8)] + assert greedy_modularity_communities(G, cutoff=8) == expected + + # Aggregation to half order (number of nodes) + expected = [{k, k + 1} for k in range(0, 8, 2)] + assert greedy_modularity_communities(G, cutoff=4) == expected + + # Default aggregation case (here, 2 communities emerge) + expected = [frozenset(range(4)), frozenset(range(4, 8))] + assert greedy_modularity_communities(G, cutoff=1) == expected + + +def test_best_n(): + G = nx.barbell_graph(5, 3) + + # Same result as without enforcing cutoff: + best_n = 3 + expected = [frozenset(range(5)), frozenset(range(8, 13)), frozenset(range(5, 8))] + assert greedy_modularity_communities(G, best_n=best_n) == expected + + # One additional merging step: + best_n = 2 + expected = [frozenset(range(8)), frozenset(range(8, 13))] + assert greedy_modularity_communities(G, best_n=best_n) == expected + + # Two additional merging steps: + best_n = 1 + expected = [frozenset(range(13))] + assert greedy_modularity_communities(G, best_n=best_n) == expected diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_quality.py b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_quality.py new file mode 100644 index 0000000000000000000000000000000000000000..3447c9484e6da68e1245e71cbed5762a18827136 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_quality.py @@ -0,0 +1,138 @@ +"""Unit tests for the :mod:`networkx.algorithms.community.quality` +module. + +""" +import pytest + +import networkx as nx +from networkx import barbell_graph +from networkx.algorithms.community import modularity, partition_quality +from networkx.algorithms.community.quality import inter_community_edges + + +class TestPerformance: + """Unit tests for the :func:`performance` function.""" + + def test_bad_partition(self): + """Tests that a poor partition has a low performance measure.""" + G = barbell_graph(3, 0) + partition = [{0, 1, 4}, {2, 3, 5}] + assert 8 / 15 == pytest.approx(partition_quality(G, partition)[1], abs=1e-7) + + def test_good_partition(self): + """Tests that a good partition has a high performance measure.""" + G = barbell_graph(3, 0) + partition = [{0, 1, 2}, {3, 4, 5}] + assert 14 / 15 == pytest.approx(partition_quality(G, partition)[1], abs=1e-7) + + +class TestCoverage: + """Unit tests for the :func:`coverage` function.""" + + def test_bad_partition(self): + """Tests that a poor partition has a low coverage measure.""" + G = barbell_graph(3, 0) + partition = [{0, 1, 4}, {2, 3, 5}] + assert 3 / 7 == pytest.approx(partition_quality(G, partition)[0], abs=1e-7) + + def test_good_partition(self): + """Tests that a good partition has a high coverage measure.""" + G = barbell_graph(3, 0) + partition = [{0, 1, 2}, {3, 4, 5}] + assert 6 / 7 == pytest.approx(partition_quality(G, partition)[0], abs=1e-7) + + +def test_modularity(): + G = nx.barbell_graph(3, 0) + C = [{0, 1, 4}, {2, 3, 5}] + assert (-16 / (14**2)) == pytest.approx(modularity(G, C), abs=1e-7) + C = [{0, 1, 2}, {3, 4, 5}] + assert (35 * 2) / (14**2) == pytest.approx(modularity(G, C), abs=1e-7) + + n = 1000 + G = nx.erdos_renyi_graph(n, 0.09, seed=42, directed=True) + C = [set(range(n // 2)), set(range(n // 2, n))] + assert 0.00017154251389292754 == pytest.approx(modularity(G, C), abs=1e-7) + + G = nx.margulis_gabber_galil_graph(10) + mid_value = G.number_of_nodes() // 2 + nodes = list(G.nodes) + C = [set(nodes[:mid_value]), set(nodes[mid_value:])] + assert 0.13 == pytest.approx(modularity(G, C), abs=1e-7) + + G = nx.DiGraph() + G.add_edges_from([(2, 1), (2, 3), (3, 4)]) + C = [{1, 2}, {3, 4}] + assert 2 / 9 == pytest.approx(modularity(G, C), abs=1e-7) + + +def test_modularity_resolution(): + G = nx.barbell_graph(3, 0) + C = [{0, 1, 4}, {2, 3, 5}] + assert modularity(G, C) == pytest.approx(3 / 7 - 100 / 14**2) + gamma = 2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx(3 / 7 - gamma * 100 / 14**2) + gamma = 0.2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx(3 / 7 - gamma * 100 / 14**2) + + C = [{0, 1, 2}, {3, 4, 5}] + assert modularity(G, C) == pytest.approx(6 / 7 - 98 / 14**2) + gamma = 2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx(6 / 7 - gamma * 98 / 14**2) + gamma = 0.2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx(6 / 7 - gamma * 98 / 14**2) + + G = nx.barbell_graph(5, 3) + C = [frozenset(range(5)), frozenset(range(8, 13)), frozenset(range(5, 8))] + gamma = 1 + result = modularity(G, C, resolution=gamma) + # This C is maximal for gamma=1: modularity = 0.518229 + assert result == pytest.approx((22 / 24) - gamma * (918 / (48**2))) + gamma = 2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((22 / 24) - gamma * (918 / (48**2))) + gamma = 0.2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((22 / 24) - gamma * (918 / (48**2))) + + C = [{0, 1, 2, 3}, {9, 10, 11, 12}, {5, 6, 7}, {4}, {8}] + gamma = 1 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((14 / 24) - gamma * (598 / (48**2))) + gamma = 2.5 + result = modularity(G, C, resolution=gamma) + # This C is maximal for gamma=2.5: modularity = -0.06553819 + assert result == pytest.approx((14 / 24) - gamma * (598 / (48**2))) + gamma = 0.2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((14 / 24) - gamma * (598 / (48**2))) + + C = [frozenset(range(8)), frozenset(range(8, 13))] + gamma = 1 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((23 / 24) - gamma * (1170 / (48**2))) + gamma = 2 + result = modularity(G, C, resolution=gamma) + assert result == pytest.approx((23 / 24) - gamma * (1170 / (48**2))) + gamma = 0.3 + result = modularity(G, C, resolution=gamma) + # This C is maximal for gamma=0.3: modularity = 0.805990 + assert result == pytest.approx((23 / 24) - gamma * (1170 / (48**2))) + + +def test_inter_community_edges_with_digraphs(): + G = nx.complete_graph(2, create_using=nx.DiGraph()) + partition = [{0}, {1}] + assert inter_community_edges(G, partition) == 2 + + G = nx.complete_graph(10, create_using=nx.DiGraph()) + partition = [{0}, {1, 2}, {3, 4, 5}, {6, 7, 8, 9}] + assert inter_community_edges(G, partition) == 70 + + G = nx.cycle_graph(4, create_using=nx.DiGraph()) + partition = [{0, 1}, {2, 3}] + assert inter_community_edges(G, partition) == 2 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_utils.py b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..329ff66a4229ee67e0e7c58ddaf22aa4f22b290b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/community/tests/test_utils.py @@ -0,0 +1,28 @@ +"""Unit tests for the :mod:`networkx.algorithms.community.utils` module. + +""" + +import networkx as nx + + +def test_is_partition(): + G = nx.empty_graph(3) + assert nx.community.is_partition(G, [{0, 1}, {2}]) + assert nx.community.is_partition(G, ({0, 1}, {2})) + assert nx.community.is_partition(G, ([0, 1], [2])) + assert nx.community.is_partition(G, [[0, 1], [2]]) + + +def test_not_covering(): + G = nx.empty_graph(3) + assert not nx.community.is_partition(G, [{0}, {1}]) + + +def test_not_disjoint(): + G = nx.empty_graph(3) + assert not nx.community.is_partition(G, [{0, 1}, {1, 2}]) + + +def test_not_node(): + G = nx.empty_graph(3) + assert not nx.community.is_partition(G, [{0, 1}, {3}]) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/components/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f9ae2caba856daba534037f4a6f967abfad49552 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/components/__init__.py @@ -0,0 +1,6 @@ +from .connected import * +from .strongly_connected import * +from .weakly_connected import * +from .attracting import * +from .biconnected import * +from .semiconnected import * diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/components/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02220b878637981a06ea18ab00eb2d9892eb344e Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/components/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/__pycache__/attracting.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/components/__pycache__/attracting.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9cb05f186327d3649a1210c85d9155bd46fe891a Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/components/__pycache__/attracting.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/__pycache__/biconnected.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/components/__pycache__/biconnected.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b819ffc2d8320e5d060fa5d16e7ab8c4024674a Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/components/__pycache__/biconnected.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/__pycache__/connected.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/components/__pycache__/connected.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3aa3a85a0780bf7cc2de348c6994c824ec376659 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/components/__pycache__/connected.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/__pycache__/semiconnected.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/components/__pycache__/semiconnected.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db004fadaf7f7291a2c88d19c97bea25005cc35b Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/components/__pycache__/semiconnected.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/__pycache__/strongly_connected.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/components/__pycache__/strongly_connected.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4a59c85a54f08d89102c0c7b8b814b2e86192fe Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/components/__pycache__/strongly_connected.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/__pycache__/weakly_connected.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/components/__pycache__/weakly_connected.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6241e25a9bf45ab1ee885c202de5175c9eba440e Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/components/__pycache__/weakly_connected.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/attracting.py b/MLPY/Lib/site-packages/networkx/algorithms/components/attracting.py new file mode 100644 index 0000000000000000000000000000000000000000..1cc2e15615cbfa10bc8ea7389b9be2d88a55f127 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/components/attracting.py @@ -0,0 +1,114 @@ +"""Attracting components.""" +import networkx as nx +from networkx.utils.decorators import not_implemented_for + +__all__ = [ + "number_attracting_components", + "attracting_components", + "is_attracting_component", +] + + +@not_implemented_for("undirected") +@nx._dispatch +def attracting_components(G): + """Generates the attracting components in `G`. + + An attracting component in a directed graph `G` is a strongly connected + component with the property that a random walker on the graph will never + leave the component, once it enters the component. + + The nodes in attracting components can also be thought of as recurrent + nodes. If a random walker enters the attractor containing the node, then + the node will be visited infinitely often. + + To obtain induced subgraphs on each component use: + ``(G.subgraph(c).copy() for c in attracting_components(G))`` + + Parameters + ---------- + G : DiGraph, MultiDiGraph + The graph to be analyzed. + + Returns + ------- + attractors : generator of sets + A generator of sets of nodes, one for each attracting component of G. + + Raises + ------ + NetworkXNotImplemented + If the input graph is undirected. + + See Also + -------- + number_attracting_components + is_attracting_component + + """ + scc = list(nx.strongly_connected_components(G)) + cG = nx.condensation(G, scc) + for n in cG: + if cG.out_degree(n) == 0: + yield scc[n] + + +@not_implemented_for("undirected") +@nx._dispatch +def number_attracting_components(G): + """Returns the number of attracting components in `G`. + + Parameters + ---------- + G : DiGraph, MultiDiGraph + The graph to be analyzed. + + Returns + ------- + n : int + The number of attracting components in G. + + Raises + ------ + NetworkXNotImplemented + If the input graph is undirected. + + See Also + -------- + attracting_components + is_attracting_component + + """ + return sum(1 for ac in attracting_components(G)) + + +@not_implemented_for("undirected") +@nx._dispatch +def is_attracting_component(G): + """Returns True if `G` consists of a single attracting component. + + Parameters + ---------- + G : DiGraph, MultiDiGraph + The graph to be analyzed. + + Returns + ------- + attracting : bool + True if `G` has a single attracting component. Otherwise, False. + + Raises + ------ + NetworkXNotImplemented + If the input graph is undirected. + + See Also + -------- + attracting_components + number_attracting_components + + """ + ac = list(attracting_components(G)) + if len(ac) == 1: + return len(ac[0]) == len(G) + return False diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/biconnected.py b/MLPY/Lib/site-packages/networkx/algorithms/components/biconnected.py new file mode 100644 index 0000000000000000000000000000000000000000..632b2d598d7c8132e37fa7d6647c48c837ab3136 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/components/biconnected.py @@ -0,0 +1,393 @@ +"""Biconnected components and articulation points.""" +from itertools import chain + +import networkx as nx +from networkx.utils.decorators import not_implemented_for + +__all__ = [ + "biconnected_components", + "biconnected_component_edges", + "is_biconnected", + "articulation_points", +] + + +@not_implemented_for("directed") +@nx._dispatch +def is_biconnected(G): + """Returns True if the graph is biconnected, False otherwise. + + A graph is biconnected if, and only if, it cannot be disconnected by + removing only one node (and all edges incident on that node). If + removing a node increases the number of disconnected components + in the graph, that node is called an articulation point, or cut + vertex. A biconnected graph has no articulation points. + + Parameters + ---------- + G : NetworkX Graph + An undirected graph. + + Returns + ------- + biconnected : bool + True if the graph is biconnected, False otherwise. + + Raises + ------ + NetworkXNotImplemented + If the input graph is not undirected. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> print(nx.is_biconnected(G)) + False + >>> G.add_edge(0, 3) + >>> print(nx.is_biconnected(G)) + True + + See Also + -------- + biconnected_components + articulation_points + biconnected_component_edges + is_strongly_connected + is_weakly_connected + is_connected + is_semiconnected + + Notes + ----- + The algorithm to find articulation points and biconnected + components is implemented using a non-recursive depth-first-search + (DFS) that keeps track of the highest level that back edges reach + in the DFS tree. A node `n` is an articulation point if, and only + if, there exists a subtree rooted at `n` such that there is no + back edge from any successor of `n` that links to a predecessor of + `n` in the DFS tree. By keeping track of all the edges traversed + by the DFS we can obtain the biconnected components because all + edges of a bicomponent will be traversed consecutively between + articulation points. + + References + ---------- + .. [1] Hopcroft, J.; Tarjan, R. (1973). + "Efficient algorithms for graph manipulation". + Communications of the ACM 16: 372–378. doi:10.1145/362248.362272 + + """ + bccs = biconnected_components(G) + try: + bcc = next(bccs) + except StopIteration: + # No bicomponents (empty graph?) + return False + try: + next(bccs) + except StopIteration: + # Only one bicomponent + return len(bcc) == len(G) + else: + # Multiple bicomponents + return False + + +@not_implemented_for("directed") +@nx._dispatch +def biconnected_component_edges(G): + """Returns a generator of lists of edges, one list for each biconnected + component of the input graph. + + Biconnected components are maximal subgraphs such that the removal of a + node (and all edges incident on that node) will not disconnect the + subgraph. Note that nodes may be part of more than one biconnected + component. Those nodes are articulation points, or cut vertices. + However, each edge belongs to one, and only one, biconnected component. + + Notice that by convention a dyad is considered a biconnected component. + + Parameters + ---------- + G : NetworkX Graph + An undirected graph. + + Returns + ------- + edges : generator of lists + Generator of lists of edges, one list for each bicomponent. + + Raises + ------ + NetworkXNotImplemented + If the input graph is not undirected. + + Examples + -------- + >>> G = nx.barbell_graph(4, 2) + >>> print(nx.is_biconnected(G)) + False + >>> bicomponents_edges = list(nx.biconnected_component_edges(G)) + >>> len(bicomponents_edges) + 5 + >>> G.add_edge(2, 8) + >>> print(nx.is_biconnected(G)) + True + >>> bicomponents_edges = list(nx.biconnected_component_edges(G)) + >>> len(bicomponents_edges) + 1 + + See Also + -------- + is_biconnected, + biconnected_components, + articulation_points, + + Notes + ----- + The algorithm to find articulation points and biconnected + components is implemented using a non-recursive depth-first-search + (DFS) that keeps track of the highest level that back edges reach + in the DFS tree. A node `n` is an articulation point if, and only + if, there exists a subtree rooted at `n` such that there is no + back edge from any successor of `n` that links to a predecessor of + `n` in the DFS tree. By keeping track of all the edges traversed + by the DFS we can obtain the biconnected components because all + edges of a bicomponent will be traversed consecutively between + articulation points. + + References + ---------- + .. [1] Hopcroft, J.; Tarjan, R. (1973). + "Efficient algorithms for graph manipulation". + Communications of the ACM 16: 372–378. doi:10.1145/362248.362272 + + """ + yield from _biconnected_dfs(G, components=True) + + +@not_implemented_for("directed") +@nx._dispatch +def biconnected_components(G): + """Returns a generator of sets of nodes, one set for each biconnected + component of the graph + + Biconnected components are maximal subgraphs such that the removal of a + node (and all edges incident on that node) will not disconnect the + subgraph. Note that nodes may be part of more than one biconnected + component. Those nodes are articulation points, or cut vertices. The + removal of articulation points will increase the number of connected + components of the graph. + + Notice that by convention a dyad is considered a biconnected component. + + Parameters + ---------- + G : NetworkX Graph + An undirected graph. + + Returns + ------- + nodes : generator + Generator of sets of nodes, one set for each biconnected component. + + Raises + ------ + NetworkXNotImplemented + If the input graph is not undirected. + + Examples + -------- + >>> G = nx.lollipop_graph(5, 1) + >>> print(nx.is_biconnected(G)) + False + >>> bicomponents = list(nx.biconnected_components(G)) + >>> len(bicomponents) + 2 + >>> G.add_edge(0, 5) + >>> print(nx.is_biconnected(G)) + True + >>> bicomponents = list(nx.biconnected_components(G)) + >>> len(bicomponents) + 1 + + You can generate a sorted list of biconnected components, largest + first, using sort. + + >>> G.remove_edge(0, 5) + >>> [len(c) for c in sorted(nx.biconnected_components(G), key=len, reverse=True)] + [5, 2] + + If you only want the largest connected component, it's more + efficient to use max instead of sort. + + >>> Gc = max(nx.biconnected_components(G), key=len) + + To create the components as subgraphs use: + ``(G.subgraph(c).copy() for c in biconnected_components(G))`` + + See Also + -------- + is_biconnected + articulation_points + biconnected_component_edges + k_components : this function is a special case where k=2 + bridge_components : similar to this function, but is defined using + 2-edge-connectivity instead of 2-node-connectivity. + + Notes + ----- + The algorithm to find articulation points and biconnected + components is implemented using a non-recursive depth-first-search + (DFS) that keeps track of the highest level that back edges reach + in the DFS tree. A node `n` is an articulation point if, and only + if, there exists a subtree rooted at `n` such that there is no + back edge from any successor of `n` that links to a predecessor of + `n` in the DFS tree. By keeping track of all the edges traversed + by the DFS we can obtain the biconnected components because all + edges of a bicomponent will be traversed consecutively between + articulation points. + + References + ---------- + .. [1] Hopcroft, J.; Tarjan, R. (1973). + "Efficient algorithms for graph manipulation". + Communications of the ACM 16: 372–378. doi:10.1145/362248.362272 + + """ + for comp in _biconnected_dfs(G, components=True): + yield set(chain.from_iterable(comp)) + + +@not_implemented_for("directed") +@nx._dispatch +def articulation_points(G): + """Yield the articulation points, or cut vertices, of a graph. + + An articulation point or cut vertex is any node whose removal (along with + all its incident edges) increases the number of connected components of + a graph. An undirected connected graph without articulation points is + biconnected. Articulation points belong to more than one biconnected + component of a graph. + + Notice that by convention a dyad is considered a biconnected component. + + Parameters + ---------- + G : NetworkX Graph + An undirected graph. + + Yields + ------ + node + An articulation point in the graph. + + Raises + ------ + NetworkXNotImplemented + If the input graph is not undirected. + + Examples + -------- + + >>> G = nx.barbell_graph(4, 2) + >>> print(nx.is_biconnected(G)) + False + >>> len(list(nx.articulation_points(G))) + 4 + >>> G.add_edge(2, 8) + >>> print(nx.is_biconnected(G)) + True + >>> len(list(nx.articulation_points(G))) + 0 + + See Also + -------- + is_biconnected + biconnected_components + biconnected_component_edges + + Notes + ----- + The algorithm to find articulation points and biconnected + components is implemented using a non-recursive depth-first-search + (DFS) that keeps track of the highest level that back edges reach + in the DFS tree. A node `n` is an articulation point if, and only + if, there exists a subtree rooted at `n` such that there is no + back edge from any successor of `n` that links to a predecessor of + `n` in the DFS tree. By keeping track of all the edges traversed + by the DFS we can obtain the biconnected components because all + edges of a bicomponent will be traversed consecutively between + articulation points. + + References + ---------- + .. [1] Hopcroft, J.; Tarjan, R. (1973). + "Efficient algorithms for graph manipulation". + Communications of the ACM 16: 372–378. doi:10.1145/362248.362272 + + """ + seen = set() + for articulation in _biconnected_dfs(G, components=False): + if articulation not in seen: + seen.add(articulation) + yield articulation + + +@not_implemented_for("directed") +def _biconnected_dfs(G, components=True): + # depth-first search algorithm to generate articulation points + # and biconnected components + visited = set() + for start in G: + if start in visited: + continue + discovery = {start: 0} # time of first discovery of node during search + low = {start: 0} + root_children = 0 + visited.add(start) + edge_stack = [] + stack = [(start, start, iter(G[start]))] + edge_index = {} + while stack: + grandparent, parent, children = stack[-1] + try: + child = next(children) + if grandparent == child: + continue + if child in visited: + if discovery[child] <= discovery[parent]: # back edge + low[parent] = min(low[parent], discovery[child]) + if components: + edge_index[parent, child] = len(edge_stack) + edge_stack.append((parent, child)) + else: + low[child] = discovery[child] = len(discovery) + visited.add(child) + stack.append((parent, child, iter(G[child]))) + if components: + edge_index[parent, child] = len(edge_stack) + edge_stack.append((parent, child)) + + except StopIteration: + stack.pop() + if len(stack) > 1: + if low[parent] >= discovery[grandparent]: + if components: + ind = edge_index[grandparent, parent] + yield edge_stack[ind:] + del edge_stack[ind:] + + else: + yield grandparent + low[grandparent] = min(low[parent], low[grandparent]) + elif stack: # length 1 so grandparent is root + root_children += 1 + if components: + ind = edge_index[grandparent, parent] + yield edge_stack[ind:] + del edge_stack[ind:] + if not components: + # root node is articulation point if it has more than 1 child + if root_children > 1: + yield start diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/connected.py b/MLPY/Lib/site-packages/networkx/algorithms/components/connected.py new file mode 100644 index 0000000000000000000000000000000000000000..ff4fca5d0c1326d34f1b90cdaf56ab50d6898ab5 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/components/connected.py @@ -0,0 +1,208 @@ +"""Connected components.""" +import networkx as nx +from networkx.utils.decorators import not_implemented_for + +from ...utils import arbitrary_element + +__all__ = [ + "number_connected_components", + "connected_components", + "is_connected", + "node_connected_component", +] + + +@not_implemented_for("directed") +@nx._dispatch +def connected_components(G): + """Generate connected components. + + Parameters + ---------- + G : NetworkX graph + An undirected graph + + Returns + ------- + comp : generator of sets + A generator of sets of nodes, one for each component of G. + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + Examples + -------- + Generate a sorted list of connected components, largest first. + + >>> G = nx.path_graph(4) + >>> nx.add_path(G, [10, 11, 12]) + >>> [len(c) for c in sorted(nx.connected_components(G), key=len, reverse=True)] + [4, 3] + + If you only want the largest connected component, it's more + efficient to use max instead of sort. + + >>> largest_cc = max(nx.connected_components(G), key=len) + + To create the induced subgraph of each component use: + + >>> S = [G.subgraph(c).copy() for c in nx.connected_components(G)] + + See Also + -------- + strongly_connected_components + weakly_connected_components + + Notes + ----- + For undirected graphs only. + + """ + seen = set() + for v in G: + if v not in seen: + c = _plain_bfs(G, v) + seen.update(c) + yield c + + +@nx._dispatch +def number_connected_components(G): + """Returns the number of connected components. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + Returns + ------- + n : integer + Number of connected components + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (5, 6), (3, 4)]) + >>> nx.number_connected_components(G) + 3 + + See Also + -------- + connected_components + number_weakly_connected_components + number_strongly_connected_components + + Notes + ----- + For undirected graphs only. + + """ + return sum(1 for cc in connected_components(G)) + + +@not_implemented_for("directed") +@nx._dispatch +def is_connected(G): + """Returns True if the graph is connected, False otherwise. + + Parameters + ---------- + G : NetworkX Graph + An undirected graph. + + Returns + ------- + connected : bool + True if the graph is connected, false otherwise. + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> print(nx.is_connected(G)) + True + + See Also + -------- + is_strongly_connected + is_weakly_connected + is_semiconnected + is_biconnected + connected_components + + Notes + ----- + For undirected graphs only. + + """ + if len(G) == 0: + raise nx.NetworkXPointlessConcept( + "Connectivity is undefined ", "for the null graph." + ) + return sum(1 for node in _plain_bfs(G, arbitrary_element(G))) == len(G) + + +@not_implemented_for("directed") +@nx._dispatch +def node_connected_component(G, n): + """Returns the set of nodes in the component of graph containing node n. + + Parameters + ---------- + G : NetworkX Graph + An undirected graph. + + n : node label + A node in G + + Returns + ------- + comp : set + A set of nodes in the component of G containing node n. + + Raises + ------ + NetworkXNotImplemented + If G is directed. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (1, 2), (5, 6), (3, 4)]) + >>> nx.node_connected_component(G, 0) # nodes of component that contains node 0 + {0, 1, 2} + + See Also + -------- + connected_components + + Notes + ----- + For undirected graphs only. + + """ + return _plain_bfs(G, n) + + +def _plain_bfs(G, source): + """A fast BFS node generator""" + adj = G._adj + n = len(adj) + seen = {source} + nextlevel = [source] + while nextlevel: + thislevel = nextlevel + nextlevel = [] + for v in thislevel: + for w in adj[v]: + if w not in seen: + seen.add(w) + nextlevel.append(w) + if len(seen) == n: + return seen + return seen diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/semiconnected.py b/MLPY/Lib/site-packages/networkx/algorithms/components/semiconnected.py new file mode 100644 index 0000000000000000000000000000000000000000..24a89f34d448e65db9442c773708bb1febd22394 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/components/semiconnected.py @@ -0,0 +1,70 @@ +"""Semiconnectedness.""" +import networkx as nx +from networkx.utils import not_implemented_for, pairwise + +__all__ = ["is_semiconnected"] + + +@not_implemented_for("undirected") +@nx._dispatch +def is_semiconnected(G): + r"""Returns True if the graph is semiconnected, False otherwise. + + A graph is semiconnected if and only if for any pair of nodes, either one + is reachable from the other, or they are mutually reachable. + + This function uses a theorem that states that a DAG is semiconnected + if for any topological sort, for node $v_n$ in that sort, there is an + edge $(v_i, v_{i+1})$. That allows us to check if a non-DAG `G` is + semiconnected by condensing the graph: i.e. constructing a new graph `H` + with nodes being the strongly connected components of `G`, and edges + (scc_1, scc_2) if there is a edge $(v_1, v_2)$ in `G` for some + $v_1 \in scc_1$ and $v_2 \in scc_2$. That results in a DAG, so we compute + the topological sort of `H` and check if for every $n$ there is an edge + $(scc_n, scc_{n+1})$. + + Parameters + ---------- + G : NetworkX graph + A directed graph. + + Returns + ------- + semiconnected : bool + True if the graph is semiconnected, False otherwise. + + Raises + ------ + NetworkXNotImplemented + If the input graph is undirected. + + NetworkXPointlessConcept + If the graph is empty. + + Examples + -------- + >>> G = nx.path_graph(4, create_using=nx.DiGraph()) + >>> print(nx.is_semiconnected(G)) + True + >>> G = nx.DiGraph([(1, 2), (3, 2)]) + >>> print(nx.is_semiconnected(G)) + False + + See Also + -------- + is_strongly_connected + is_weakly_connected + is_connected + is_biconnected + """ + if len(G) == 0: + raise nx.NetworkXPointlessConcept( + "Connectivity is undefined for the null graph." + ) + + if not nx.is_weakly_connected(G): + return False + + H = nx.condensation(G) + + return all(H.has_edge(u, v) for u, v in pairwise(nx.topological_sort(H))) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/strongly_connected.py b/MLPY/Lib/site-packages/networkx/algorithms/components/strongly_connected.py new file mode 100644 index 0000000000000000000000000000000000000000..5bf5b9947667c3c83416ebb9593144bb07969952 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/components/strongly_connected.py @@ -0,0 +1,431 @@ +"""Strongly connected components.""" +import networkx as nx +from networkx.utils.decorators import not_implemented_for + +__all__ = [ + "number_strongly_connected_components", + "strongly_connected_components", + "is_strongly_connected", + "strongly_connected_components_recursive", + "kosaraju_strongly_connected_components", + "condensation", +] + + +@not_implemented_for("undirected") +@nx._dispatch +def strongly_connected_components(G): + """Generate nodes in strongly connected components of graph. + + Parameters + ---------- + G : NetworkX Graph + A directed graph. + + Returns + ------- + comp : generator of sets + A generator of sets of nodes, one for each strongly connected + component of G. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + Generate a sorted list of strongly connected components, largest first. + + >>> G = nx.cycle_graph(4, create_using=nx.DiGraph()) + >>> nx.add_cycle(G, [10, 11, 12]) + >>> [ + ... len(c) + ... for c in sorted(nx.strongly_connected_components(G), key=len, reverse=True) + ... ] + [4, 3] + + If you only want the largest component, it's more efficient to + use max instead of sort. + + >>> largest = max(nx.strongly_connected_components(G), key=len) + + See Also + -------- + connected_components + weakly_connected_components + kosaraju_strongly_connected_components + + Notes + ----- + Uses Tarjan's algorithm[1]_ with Nuutila's modifications[2]_. + Nonrecursive version of algorithm. + + References + ---------- + .. [1] Depth-first search and linear graph algorithms, R. Tarjan + SIAM Journal of Computing 1(2):146-160, (1972). + + .. [2] On finding the strongly connected components in a directed graph. + E. Nuutila and E. Soisalon-Soinen + Information Processing Letters 49(1): 9-14, (1994).. + + """ + preorder = {} + lowlink = {} + scc_found = set() + scc_queue = [] + i = 0 # Preorder counter + neighbors = {v: iter(G[v]) for v in G} + for source in G: + if source not in scc_found: + queue = [source] + while queue: + v = queue[-1] + if v not in preorder: + i = i + 1 + preorder[v] = i + done = True + for w in neighbors[v]: + if w not in preorder: + queue.append(w) + done = False + break + if done: + lowlink[v] = preorder[v] + for w in G[v]: + if w not in scc_found: + if preorder[w] > preorder[v]: + lowlink[v] = min([lowlink[v], lowlink[w]]) + else: + lowlink[v] = min([lowlink[v], preorder[w]]) + queue.pop() + if lowlink[v] == preorder[v]: + scc = {v} + while scc_queue and preorder[scc_queue[-1]] > preorder[v]: + k = scc_queue.pop() + scc.add(k) + scc_found.update(scc) + yield scc + else: + scc_queue.append(v) + + +@not_implemented_for("undirected") +@nx._dispatch +def kosaraju_strongly_connected_components(G, source=None): + """Generate nodes in strongly connected components of graph. + + Parameters + ---------- + G : NetworkX Graph + A directed graph. + + Returns + ------- + comp : generator of sets + A generator of sets of nodes, one for each strongly connected + component of G. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + Generate a sorted list of strongly connected components, largest first. + + >>> G = nx.cycle_graph(4, create_using=nx.DiGraph()) + >>> nx.add_cycle(G, [10, 11, 12]) + >>> [ + ... len(c) + ... for c in sorted( + ... nx.kosaraju_strongly_connected_components(G), key=len, reverse=True + ... ) + ... ] + [4, 3] + + If you only want the largest component, it's more efficient to + use max instead of sort. + + >>> largest = max(nx.kosaraju_strongly_connected_components(G), key=len) + + See Also + -------- + strongly_connected_components + + Notes + ----- + Uses Kosaraju's algorithm. + + """ + post = list(nx.dfs_postorder_nodes(G.reverse(copy=False), source=source)) + + seen = set() + while post: + r = post.pop() + if r in seen: + continue + c = nx.dfs_preorder_nodes(G, r) + new = {v for v in c if v not in seen} + seen.update(new) + yield new + + +@not_implemented_for("undirected") +@nx._dispatch +def strongly_connected_components_recursive(G): + """Generate nodes in strongly connected components of graph. + + .. deprecated:: 3.2 + + This function is deprecated and will be removed in a future version of + NetworkX. Use `strongly_connected_components` instead. + + Recursive version of algorithm. + + Parameters + ---------- + G : NetworkX Graph + A directed graph. + + Returns + ------- + comp : generator of sets + A generator of sets of nodes, one for each strongly connected + component of G. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + Generate a sorted list of strongly connected components, largest first. + + >>> G = nx.cycle_graph(4, create_using=nx.DiGraph()) + >>> nx.add_cycle(G, [10, 11, 12]) + >>> [ + ... len(c) + ... for c in sorted( + ... nx.strongly_connected_components_recursive(G), key=len, reverse=True + ... ) + ... ] + [4, 3] + + If you only want the largest component, it's more efficient to + use max instead of sort. + + >>> largest = max(nx.strongly_connected_components_recursive(G), key=len) + + To create the induced subgraph of the components use: + >>> S = [G.subgraph(c).copy() for c in nx.weakly_connected_components(G)] + + See Also + -------- + connected_components + + Notes + ----- + Uses Tarjan's algorithm[1]_ with Nuutila's modifications[2]_. + + References + ---------- + .. [1] Depth-first search and linear graph algorithms, R. Tarjan + SIAM Journal of Computing 1(2):146-160, (1972). + + .. [2] On finding the strongly connected components in a directed graph. + E. Nuutila and E. Soisalon-Soinen + Information Processing Letters 49(1): 9-14, (1994).. + + """ + import warnings + + warnings.warn( + ( + "\n\nstrongly_connected_components_recursive is deprecated and will be\n" + "removed in the future. Use strongly_connected_components instead." + ), + category=DeprecationWarning, + stacklevel=2, + ) + + yield from strongly_connected_components(G) + + +@not_implemented_for("undirected") +@nx._dispatch +def number_strongly_connected_components(G): + """Returns number of strongly connected components in graph. + + Parameters + ---------- + G : NetworkX graph + A directed graph. + + Returns + ------- + n : integer + Number of strongly connected components + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 0), (2, 3), (4, 5), (3, 4), (5, 6), (6, 3), (6, 7)]) + >>> nx.number_strongly_connected_components(G) + 3 + + See Also + -------- + strongly_connected_components + number_connected_components + number_weakly_connected_components + + Notes + ----- + For directed graphs only. + """ + return sum(1 for scc in strongly_connected_components(G)) + + +@not_implemented_for("undirected") +@nx._dispatch +def is_strongly_connected(G): + """Test directed graph for strong connectivity. + + A directed graph is strongly connected if and only if every vertex in + the graph is reachable from every other vertex. + + Parameters + ---------- + G : NetworkX Graph + A directed graph. + + Returns + ------- + connected : bool + True if the graph is strongly connected, False otherwise. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 0), (2, 4), (4, 2)]) + >>> nx.is_strongly_connected(G) + True + >>> G.remove_edge(2, 3) + >>> nx.is_strongly_connected(G) + False + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + See Also + -------- + is_weakly_connected + is_semiconnected + is_connected + is_biconnected + strongly_connected_components + + Notes + ----- + For directed graphs only. + """ + if len(G) == 0: + raise nx.NetworkXPointlessConcept( + """Connectivity is undefined for the null graph.""" + ) + + return len(next(strongly_connected_components(G))) == len(G) + + +@not_implemented_for("undirected") +@nx._dispatch +def condensation(G, scc=None): + """Returns the condensation of G. + + The condensation of G is the graph with each of the strongly connected + components contracted into a single node. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph. + + scc: list or generator (optional, default=None) + Strongly connected components. If provided, the elements in + `scc` must partition the nodes in `G`. If not provided, it will be + calculated as scc=nx.strongly_connected_components(G). + + Returns + ------- + C : NetworkX DiGraph + The condensation graph C of G. The node labels are integers + corresponding to the index of the component in the list of + strongly connected components of G. C has a graph attribute named + 'mapping' with a dictionary mapping the original nodes to the + nodes in C to which they belong. Each node in C also has a node + attribute 'members' with the set of original nodes in G that + form the SCC that the node in C represents. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + Contracting two sets of strongly connected nodes into two distinct SCC + using the barbell graph. + + >>> G = nx.barbell_graph(4, 0) + >>> G.remove_edge(3, 4) + >>> G = nx.DiGraph(G) + >>> H = nx.condensation(G) + >>> H.nodes.data() + NodeDataView({0: {'members': {0, 1, 2, 3}}, 1: {'members': {4, 5, 6, 7}}}) + >>> H.graph['mapping'] + {0: 0, 1: 0, 2: 0, 3: 0, 4: 1, 5: 1, 6: 1, 7: 1} + + Contracting a complete graph into one single SCC. + + >>> G = nx.complete_graph(7, create_using=nx.DiGraph) + >>> H = nx.condensation(G) + >>> H.nodes + NodeView((0,)) + >>> H.nodes.data() + NodeDataView({0: {'members': {0, 1, 2, 3, 4, 5, 6}}}) + + Notes + ----- + After contracting all strongly connected components to a single node, + the resulting graph is a directed acyclic graph. + + """ + if scc is None: + scc = nx.strongly_connected_components(G) + mapping = {} + members = {} + C = nx.DiGraph() + # Add mapping dict as graph attribute + C.graph["mapping"] = mapping + if len(G) == 0: + return C + for i, component in enumerate(scc): + members[i] = component + mapping.update((n, i) for n in component) + number_of_components = i + 1 + C.add_nodes_from(range(number_of_components)) + C.add_edges_from( + (mapping[u], mapping[v]) for u, v in G.edges() if mapping[u] != mapping[v] + ) + # Add a list of members (ie original nodes) to each node (ie scc) in C. + nx.set_node_attributes(C, members, "members") + return C diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d65c25df965291a172220a7de55ce60520d6c650 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__pycache__/test_attracting.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__pycache__/test_attracting.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87233cb66841adf564c22f798cfb99775c655c37 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__pycache__/test_attracting.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__pycache__/test_biconnected.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__pycache__/test_biconnected.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96d9f4e804c4d1edf7d35258267f8257b201580a Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__pycache__/test_biconnected.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__pycache__/test_connected.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__pycache__/test_connected.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1910dc457d132b6653c5b8219ef796c61a0bc163 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__pycache__/test_connected.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__pycache__/test_semiconnected.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__pycache__/test_semiconnected.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1919a3a1078fac83331f09f28328b1e08fd2a750 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__pycache__/test_semiconnected.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__pycache__/test_strongly_connected.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__pycache__/test_strongly_connected.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c45c2b292a3b6d60323a09a5a46110c32c17dc5d Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__pycache__/test_strongly_connected.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__pycache__/test_weakly_connected.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__pycache__/test_weakly_connected.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3c6500e54a7b71c47962c55d4bd0079cbf017e4 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/__pycache__/test_weakly_connected.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/tests/test_attracting.py b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/test_attracting.py new file mode 100644 index 0000000000000000000000000000000000000000..336c40ddc27162c1c2f5cc245f4fc840311506b5 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/test_attracting.py @@ -0,0 +1,70 @@ +import pytest + +import networkx as nx +from networkx import NetworkXNotImplemented + + +class TestAttractingComponents: + @classmethod + def setup_class(cls): + cls.G1 = nx.DiGraph() + cls.G1.add_edges_from( + [ + (5, 11), + (11, 2), + (11, 9), + (11, 10), + (7, 11), + (7, 8), + (8, 9), + (3, 8), + (3, 10), + ] + ) + cls.G2 = nx.DiGraph() + cls.G2.add_edges_from([(0, 1), (0, 2), (1, 1), (1, 2), (2, 1)]) + + cls.G3 = nx.DiGraph() + cls.G3.add_edges_from([(0, 1), (1, 2), (2, 1), (0, 3), (3, 4), (4, 3)]) + + cls.G4 = nx.DiGraph() + + def test_attracting_components(self): + ac = list(nx.attracting_components(self.G1)) + assert {2} in ac + assert {9} in ac + assert {10} in ac + + ac = list(nx.attracting_components(self.G2)) + ac = [tuple(sorted(x)) for x in ac] + assert ac == [(1, 2)] + + ac = list(nx.attracting_components(self.G3)) + ac = [tuple(sorted(x)) for x in ac] + assert (1, 2) in ac + assert (3, 4) in ac + assert len(ac) == 2 + + ac = list(nx.attracting_components(self.G4)) + assert ac == [] + + def test_number_attacting_components(self): + assert nx.number_attracting_components(self.G1) == 3 + assert nx.number_attracting_components(self.G2) == 1 + assert nx.number_attracting_components(self.G3) == 2 + assert nx.number_attracting_components(self.G4) == 0 + + def test_is_attracting_component(self): + assert not nx.is_attracting_component(self.G1) + assert not nx.is_attracting_component(self.G2) + assert not nx.is_attracting_component(self.G3) + g2 = self.G3.subgraph([1, 2]) + assert nx.is_attracting_component(g2) + assert not nx.is_attracting_component(self.G4) + + def test_connected_raise(self): + G = nx.Graph() + with pytest.raises(NetworkXNotImplemented): + next(nx.attracting_components(G)) + pytest.raises(NetworkXNotImplemented, nx.number_attracting_components, G) + pytest.raises(NetworkXNotImplemented, nx.is_attracting_component, G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/tests/test_biconnected.py b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/test_biconnected.py new file mode 100644 index 0000000000000000000000000000000000000000..19d2d8831ced26a516d101e735b6701f39865c1b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/test_biconnected.py @@ -0,0 +1,248 @@ +import pytest + +import networkx as nx +from networkx import NetworkXNotImplemented + + +def assert_components_edges_equal(x, y): + sx = {frozenset(frozenset(e) for e in c) for c in x} + sy = {frozenset(frozenset(e) for e in c) for c in y} + assert sx == sy + + +def assert_components_equal(x, y): + sx = {frozenset(c) for c in x} + sy = {frozenset(c) for c in y} + assert sx == sy + + +def test_barbell(): + G = nx.barbell_graph(8, 4) + nx.add_path(G, [7, 20, 21, 22]) + nx.add_cycle(G, [22, 23, 24, 25]) + pts = set(nx.articulation_points(G)) + assert pts == {7, 8, 9, 10, 11, 12, 20, 21, 22} + + answer = [ + {12, 13, 14, 15, 16, 17, 18, 19}, + {0, 1, 2, 3, 4, 5, 6, 7}, + {22, 23, 24, 25}, + {11, 12}, + {10, 11}, + {9, 10}, + {8, 9}, + {7, 8}, + {21, 22}, + {20, 21}, + {7, 20}, + ] + assert_components_equal(list(nx.biconnected_components(G)), answer) + + G.add_edge(2, 17) + pts = set(nx.articulation_points(G)) + assert pts == {7, 20, 21, 22} + + +def test_articulation_points_repetitions(): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3)]) + assert list(nx.articulation_points(G)) == [1] + + +def test_articulation_points_cycle(): + G = nx.cycle_graph(3) + nx.add_cycle(G, [1, 3, 4]) + pts = set(nx.articulation_points(G)) + assert pts == {1} + + +def test_is_biconnected(): + G = nx.cycle_graph(3) + assert nx.is_biconnected(G) + nx.add_cycle(G, [1, 3, 4]) + assert not nx.is_biconnected(G) + + +def test_empty_is_biconnected(): + G = nx.empty_graph(5) + assert not nx.is_biconnected(G) + G.add_edge(0, 1) + assert not nx.is_biconnected(G) + + +def test_biconnected_components_cycle(): + G = nx.cycle_graph(3) + nx.add_cycle(G, [1, 3, 4]) + answer = [{0, 1, 2}, {1, 3, 4}] + assert_components_equal(list(nx.biconnected_components(G)), answer) + + +def test_biconnected_components1(): + # graph example from + # https://web.archive.org/web/20121229123447/http://www.ibluemojo.com/school/articul_algorithm.html + edges = [ + (0, 1), + (0, 5), + (0, 6), + (0, 14), + (1, 5), + (1, 6), + (1, 14), + (2, 4), + (2, 10), + (3, 4), + (3, 15), + (4, 6), + (4, 7), + (4, 10), + (5, 14), + (6, 14), + (7, 9), + (8, 9), + (8, 12), + (8, 13), + (10, 15), + (11, 12), + (11, 13), + (12, 13), + ] + G = nx.Graph(edges) + pts = set(nx.articulation_points(G)) + assert pts == {4, 6, 7, 8, 9} + comps = list(nx.biconnected_component_edges(G)) + answer = [ + [(3, 4), (15, 3), (10, 15), (10, 4), (2, 10), (4, 2)], + [(13, 12), (13, 8), (11, 13), (12, 11), (8, 12)], + [(9, 8)], + [(7, 9)], + [(4, 7)], + [(6, 4)], + [(14, 0), (5, 1), (5, 0), (14, 5), (14, 1), (6, 14), (6, 0), (1, 6), (0, 1)], + ] + assert_components_edges_equal(comps, answer) + + +def test_biconnected_components2(): + G = nx.Graph() + nx.add_cycle(G, "ABC") + nx.add_cycle(G, "CDE") + nx.add_cycle(G, "FIJHG") + nx.add_cycle(G, "GIJ") + G.add_edge("E", "G") + comps = list(nx.biconnected_component_edges(G)) + answer = [ + [ + tuple("GF"), + tuple("FI"), + tuple("IG"), + tuple("IJ"), + tuple("JG"), + tuple("JH"), + tuple("HG"), + ], + [tuple("EG")], + [tuple("CD"), tuple("DE"), tuple("CE")], + [tuple("AB"), tuple("BC"), tuple("AC")], + ] + assert_components_edges_equal(comps, answer) + + +def test_biconnected_davis(): + D = nx.davis_southern_women_graph() + bcc = list(nx.biconnected_components(D))[0] + assert set(D) == bcc # All nodes in a giant bicomponent + # So no articulation points + assert len(list(nx.articulation_points(D))) == 0 + + +def test_biconnected_karate(): + K = nx.karate_club_graph() + answer = [ + { + 0, + 1, + 2, + 3, + 7, + 8, + 9, + 12, + 13, + 14, + 15, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + }, + {0, 4, 5, 6, 10, 16}, + {0, 11}, + ] + bcc = list(nx.biconnected_components(K)) + assert_components_equal(bcc, answer) + assert set(nx.articulation_points(K)) == {0} + + +def test_biconnected_eppstein(): + # tests from http://www.ics.uci.edu/~eppstein/PADS/Biconnectivity.py + G1 = nx.Graph( + { + 0: [1, 2, 5], + 1: [0, 5], + 2: [0, 3, 4], + 3: [2, 4, 5, 6], + 4: [2, 3, 5, 6], + 5: [0, 1, 3, 4], + 6: [3, 4], + } + ) + G2 = nx.Graph( + { + 0: [2, 5], + 1: [3, 8], + 2: [0, 3, 5], + 3: [1, 2, 6, 8], + 4: [7], + 5: [0, 2], + 6: [3, 8], + 7: [4], + 8: [1, 3, 6], + } + ) + assert nx.is_biconnected(G1) + assert not nx.is_biconnected(G2) + answer_G2 = [{1, 3, 6, 8}, {0, 2, 5}, {2, 3}, {4, 7}] + bcc = list(nx.biconnected_components(G2)) + assert_components_equal(bcc, answer_G2) + + +def test_null_graph(): + G = nx.Graph() + assert not nx.is_biconnected(G) + assert list(nx.biconnected_components(G)) == [] + assert list(nx.biconnected_component_edges(G)) == [] + assert list(nx.articulation_points(G)) == [] + + +def test_connected_raise(): + DG = nx.DiGraph() + with pytest.raises(NetworkXNotImplemented): + next(nx.biconnected_components(DG)) + with pytest.raises(NetworkXNotImplemented): + next(nx.biconnected_component_edges(DG)) + with pytest.raises(NetworkXNotImplemented): + next(nx.articulation_points(DG)) + pytest.raises(NetworkXNotImplemented, nx.is_biconnected, DG) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/tests/test_connected.py b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/test_connected.py new file mode 100644 index 0000000000000000000000000000000000000000..4c9b8d28fd56f55d97e14e9cb8fb07742d055bc2 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/test_connected.py @@ -0,0 +1,117 @@ +import pytest + +import networkx as nx +from networkx import NetworkXNotImplemented +from networkx import convert_node_labels_to_integers as cnlti +from networkx.classes.tests import dispatch_interface + + +class TestConnected: + @classmethod + def setup_class(cls): + G1 = cnlti(nx.grid_2d_graph(2, 2), first_label=0, ordering="sorted") + G2 = cnlti(nx.lollipop_graph(3, 3), first_label=4, ordering="sorted") + G3 = cnlti(nx.house_graph(), first_label=10, ordering="sorted") + cls.G = nx.union(G1, G2) + cls.G = nx.union(cls.G, G3) + cls.DG = nx.DiGraph([(1, 2), (1, 3), (2, 3)]) + cls.grid = cnlti(nx.grid_2d_graph(4, 4), first_label=1) + + cls.gc = [] + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2), + (2, 3), + (2, 8), + (3, 4), + (3, 7), + (4, 5), + (5, 3), + (5, 6), + (7, 4), + (7, 6), + (8, 1), + (8, 7), + ] + ) + C = [[3, 4, 5, 7], [1, 2, 8], [6]] + cls.gc.append((G, C)) + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (1, 3), (1, 4), (4, 2), (3, 4), (2, 3)]) + C = [[2, 3, 4], [1]] + cls.gc.append((G, C)) + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (2, 3), (3, 2), (2, 1)]) + C = [[1, 2, 3]] + cls.gc.append((G, C)) + + # Eppstein's tests + G = nx.DiGraph({0: [1], 1: [2, 3], 2: [4, 5], 3: [4, 5], 4: [6], 5: [], 6: []}) + C = [[0], [1], [2], [3], [4], [5], [6]] + cls.gc.append((G, C)) + + G = nx.DiGraph({0: [1], 1: [2, 3, 4], 2: [0, 3], 3: [4], 4: [3]}) + C = [[0, 1, 2], [3, 4]] + cls.gc.append((G, C)) + + G = nx.DiGraph() + C = [] + cls.gc.append((G, C)) + + # This additionally tests the @nx._dispatch mechanism, treating + # nx.connected_components as if it were a re-implementation from another package + @pytest.mark.parametrize("wrapper", [lambda x: x, dispatch_interface.convert]) + def test_connected_components(self, wrapper): + cc = nx.connected_components + G = wrapper(self.G) + C = { + frozenset([0, 1, 2, 3]), + frozenset([4, 5, 6, 7, 8, 9]), + frozenset([10, 11, 12, 13, 14]), + } + assert {frozenset(g) for g in cc(G)} == C + + def test_number_connected_components(self): + ncc = nx.number_connected_components + assert ncc(self.G) == 3 + + def test_number_connected_components2(self): + ncc = nx.number_connected_components + assert ncc(self.grid) == 1 + + def test_connected_components2(self): + cc = nx.connected_components + G = self.grid + C = {frozenset([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])} + assert {frozenset(g) for g in cc(G)} == C + + def test_node_connected_components(self): + ncc = nx.node_connected_component + G = self.grid + C = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + assert ncc(G, 1) == C + + def test_is_connected(self): + assert nx.is_connected(self.grid) + G = nx.Graph() + G.add_nodes_from([1, 2]) + assert not nx.is_connected(G) + + def test_connected_raise(self): + with pytest.raises(NetworkXNotImplemented): + next(nx.connected_components(self.DG)) + pytest.raises(NetworkXNotImplemented, nx.number_connected_components, self.DG) + pytest.raises(NetworkXNotImplemented, nx.node_connected_component, self.DG, 1) + pytest.raises(NetworkXNotImplemented, nx.is_connected, self.DG) + pytest.raises(nx.NetworkXPointlessConcept, nx.is_connected, nx.Graph()) + + def test_connected_mutability(self): + G = self.grid + seen = set() + for component in nx.connected_components(G): + assert len(seen & component) == 0 + seen.update(component) + component.clear() diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/tests/test_semiconnected.py b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/test_semiconnected.py new file mode 100644 index 0000000000000000000000000000000000000000..6376bbfb12a061e1724b0c74d2614e116149d8bf --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/test_semiconnected.py @@ -0,0 +1,55 @@ +from itertools import chain + +import pytest + +import networkx as nx + + +class TestIsSemiconnected: + def test_undirected(self): + pytest.raises(nx.NetworkXNotImplemented, nx.is_semiconnected, nx.Graph()) + pytest.raises(nx.NetworkXNotImplemented, nx.is_semiconnected, nx.MultiGraph()) + + def test_empty(self): + pytest.raises(nx.NetworkXPointlessConcept, nx.is_semiconnected, nx.DiGraph()) + pytest.raises( + nx.NetworkXPointlessConcept, nx.is_semiconnected, nx.MultiDiGraph() + ) + + def test_single_node_graph(self): + G = nx.DiGraph() + G.add_node(0) + assert nx.is_semiconnected(G) + + def test_path(self): + G = nx.path_graph(100, create_using=nx.DiGraph()) + assert nx.is_semiconnected(G) + G.add_edge(100, 99) + assert not nx.is_semiconnected(G) + + def test_cycle(self): + G = nx.cycle_graph(100, create_using=nx.DiGraph()) + assert nx.is_semiconnected(G) + G = nx.path_graph(100, create_using=nx.DiGraph()) + G.add_edge(0, 99) + assert nx.is_semiconnected(G) + + def test_tree(self): + G = nx.DiGraph() + G.add_edges_from( + chain.from_iterable([(i, 2 * i + 1), (i, 2 * i + 2)] for i in range(100)) + ) + assert not nx.is_semiconnected(G) + + def test_dumbbell(self): + G = nx.cycle_graph(100, create_using=nx.DiGraph()) + G.add_edges_from((i + 100, (i + 1) % 100 + 100) for i in range(100)) + assert not nx.is_semiconnected(G) # G is disconnected. + G.add_edge(100, 99) + assert nx.is_semiconnected(G) + + def test_alternating_path(self): + G = nx.DiGraph( + chain.from_iterable([(i, i - 1), (i, i + 1)] for i in range(0, 100, 2)) + ) + assert not nx.is_semiconnected(G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/tests/test_strongly_connected.py b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/test_strongly_connected.py new file mode 100644 index 0000000000000000000000000000000000000000..f1c773026a682bd8edc6bfaf07581f5152c912e8 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/test_strongly_connected.py @@ -0,0 +1,207 @@ +import pytest + +import networkx as nx +from networkx import NetworkXNotImplemented + + +class TestStronglyConnected: + @classmethod + def setup_class(cls): + cls.gc = [] + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2), + (2, 3), + (2, 8), + (3, 4), + (3, 7), + (4, 5), + (5, 3), + (5, 6), + (7, 4), + (7, 6), + (8, 1), + (8, 7), + ] + ) + C = {frozenset([3, 4, 5, 7]), frozenset([1, 2, 8]), frozenset([6])} + cls.gc.append((G, C)) + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (1, 3), (1, 4), (4, 2), (3, 4), (2, 3)]) + C = {frozenset([2, 3, 4]), frozenset([1])} + cls.gc.append((G, C)) + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (2, 3), (3, 2), (2, 1)]) + C = {frozenset([1, 2, 3])} + cls.gc.append((G, C)) + + # Eppstein's tests + G = nx.DiGraph({0: [1], 1: [2, 3], 2: [4, 5], 3: [4, 5], 4: [6], 5: [], 6: []}) + C = { + frozenset([0]), + frozenset([1]), + frozenset([2]), + frozenset([3]), + frozenset([4]), + frozenset([5]), + frozenset([6]), + } + cls.gc.append((G, C)) + + G = nx.DiGraph({0: [1], 1: [2, 3, 4], 2: [0, 3], 3: [4], 4: [3]}) + C = {frozenset([0, 1, 2]), frozenset([3, 4])} + cls.gc.append((G, C)) + + def test_tarjan(self): + scc = nx.strongly_connected_components + for G, C in self.gc: + assert {frozenset(g) for g in scc(G)} == C + + def test_tarjan_recursive(self): + scc = nx.strongly_connected_components_recursive + for G, C in self.gc: + with pytest.deprecated_call(): + assert {frozenset(g) for g in scc(G)} == C + + def test_kosaraju(self): + scc = nx.kosaraju_strongly_connected_components + for G, C in self.gc: + assert {frozenset(g) for g in scc(G)} == C + + def test_number_strongly_connected_components(self): + ncc = nx.number_strongly_connected_components + for G, C in self.gc: + assert ncc(G) == len(C) + + def test_is_strongly_connected(self): + for G, C in self.gc: + if len(C) == 1: + assert nx.is_strongly_connected(G) + else: + assert not nx.is_strongly_connected(G) + + def test_contract_scc1(self): + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2), + (2, 3), + (2, 11), + (2, 12), + (3, 4), + (4, 3), + (4, 5), + (5, 6), + (6, 5), + (6, 7), + (7, 8), + (7, 9), + (7, 10), + (8, 9), + (9, 7), + (10, 6), + (11, 2), + (11, 4), + (11, 6), + (12, 6), + (12, 11), + ] + ) + scc = list(nx.strongly_connected_components(G)) + cG = nx.condensation(G, scc) + # DAG + assert nx.is_directed_acyclic_graph(cG) + # nodes + assert sorted(cG.nodes()) == [0, 1, 2, 3] + # edges + mapping = {} + for i, component in enumerate(scc): + for n in component: + mapping[n] = i + edge = (mapping[2], mapping[3]) + assert cG.has_edge(*edge) + edge = (mapping[2], mapping[5]) + assert cG.has_edge(*edge) + edge = (mapping[3], mapping[5]) + assert cG.has_edge(*edge) + + def test_contract_scc_isolate(self): + # Bug found and fixed in [1687]. + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(2, 1) + scc = list(nx.strongly_connected_components(G)) + cG = nx.condensation(G, scc) + assert list(cG.nodes()) == [0] + assert list(cG.edges()) == [] + + def test_contract_scc_edge(self): + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(2, 1) + G.add_edge(2, 3) + G.add_edge(3, 4) + G.add_edge(4, 3) + scc = list(nx.strongly_connected_components(G)) + cG = nx.condensation(G, scc) + assert sorted(cG.nodes()) == [0, 1] + if 1 in scc[0]: + edge = (0, 1) + else: + edge = (1, 0) + assert list(cG.edges()) == [edge] + + def test_condensation_mapping_and_members(self): + G, C = self.gc[1] + C = sorted(C, key=len, reverse=True) + cG = nx.condensation(G) + mapping = cG.graph["mapping"] + assert all(n in G for n in mapping) + assert all(0 == cN for n, cN in mapping.items() if n in C[0]) + assert all(1 == cN for n, cN in mapping.items() if n in C[1]) + for n, d in cG.nodes(data=True): + assert set(C[n]) == cG.nodes[n]["members"] + + def test_null_graph(self): + G = nx.DiGraph() + assert list(nx.strongly_connected_components(G)) == [] + assert list(nx.kosaraju_strongly_connected_components(G)) == [] + with pytest.deprecated_call(): + assert list(nx.strongly_connected_components_recursive(G)) == [] + assert len(nx.condensation(G)) == 0 + pytest.raises( + nx.NetworkXPointlessConcept, nx.is_strongly_connected, nx.DiGraph() + ) + + def test_connected_raise(self): + G = nx.Graph() + with pytest.raises(NetworkXNotImplemented): + next(nx.strongly_connected_components(G)) + with pytest.raises(NetworkXNotImplemented): + next(nx.kosaraju_strongly_connected_components(G)) + with pytest.raises(NetworkXNotImplemented): + with pytest.deprecated_call(): + next(nx.strongly_connected_components_recursive(G)) + pytest.raises(NetworkXNotImplemented, nx.is_strongly_connected, G) + pytest.raises( + nx.NetworkXPointlessConcept, nx.is_strongly_connected, nx.DiGraph() + ) + pytest.raises(NetworkXNotImplemented, nx.condensation, G) + + strong_cc_methods = ( + nx.strongly_connected_components, + nx.kosaraju_strongly_connected_components, + ) + + @pytest.mark.parametrize("get_components", strong_cc_methods) + def test_connected_mutability(self, get_components): + DG = nx.path_graph(5, create_using=nx.DiGraph) + G = nx.disjoint_union(DG, DG) + seen = set() + for component in get_components(G): + assert len(seen & component) == 0 + seen.update(component) + component.clear() diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/tests/test_weakly_connected.py b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/test_weakly_connected.py new file mode 100644 index 0000000000000000000000000000000000000000..e313263668c07ad7b7a3cb2ad8f1b74b08347a14 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/components/tests/test_weakly_connected.py @@ -0,0 +1,90 @@ +import pytest + +import networkx as nx +from networkx import NetworkXNotImplemented + + +class TestWeaklyConnected: + @classmethod + def setup_class(cls): + cls.gc = [] + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2), + (2, 3), + (2, 8), + (3, 4), + (3, 7), + (4, 5), + (5, 3), + (5, 6), + (7, 4), + (7, 6), + (8, 1), + (8, 7), + ] + ) + C = [[3, 4, 5, 7], [1, 2, 8], [6]] + cls.gc.append((G, C)) + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (1, 3), (1, 4), (4, 2), (3, 4), (2, 3)]) + C = [[2, 3, 4], [1]] + cls.gc.append((G, C)) + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (2, 3), (3, 2), (2, 1)]) + C = [[1, 2, 3]] + cls.gc.append((G, C)) + + # Eppstein's tests + G = nx.DiGraph({0: [1], 1: [2, 3], 2: [4, 5], 3: [4, 5], 4: [6], 5: [], 6: []}) + C = [[0], [1], [2], [3], [4], [5], [6]] + cls.gc.append((G, C)) + + G = nx.DiGraph({0: [1], 1: [2, 3, 4], 2: [0, 3], 3: [4], 4: [3]}) + C = [[0, 1, 2], [3, 4]] + cls.gc.append((G, C)) + + def test_weakly_connected_components(self): + for G, C in self.gc: + U = G.to_undirected() + w = {frozenset(g) for g in nx.weakly_connected_components(G)} + c = {frozenset(g) for g in nx.connected_components(U)} + assert w == c + + def test_number_weakly_connected_components(self): + for G, C in self.gc: + U = G.to_undirected() + w = nx.number_weakly_connected_components(G) + c = nx.number_connected_components(U) + assert w == c + + def test_is_weakly_connected(self): + for G, C in self.gc: + U = G.to_undirected() + assert nx.is_weakly_connected(G) == nx.is_connected(U) + + def test_null_graph(self): + G = nx.DiGraph() + assert list(nx.weakly_connected_components(G)) == [] + assert nx.number_weakly_connected_components(G) == 0 + with pytest.raises(nx.NetworkXPointlessConcept): + next(nx.is_weakly_connected(G)) + + def test_connected_raise(self): + G = nx.Graph() + with pytest.raises(NetworkXNotImplemented): + next(nx.weakly_connected_components(G)) + pytest.raises(NetworkXNotImplemented, nx.number_weakly_connected_components, G) + pytest.raises(NetworkXNotImplemented, nx.is_weakly_connected, G) + + def test_connected_mutability(self): + DG = nx.path_graph(5, create_using=nx.DiGraph) + G = nx.disjoint_union(DG, DG) + seen = set() + for component in nx.weakly_connected_components(G): + assert len(seen & component) == 0 + seen.update(component) + component.clear() diff --git a/MLPY/Lib/site-packages/networkx/algorithms/components/weakly_connected.py b/MLPY/Lib/site-packages/networkx/algorithms/components/weakly_connected.py new file mode 100644 index 0000000000000000000000000000000000000000..c8dc2350ef16f01c4b41426fd8d945a05b186511 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/components/weakly_connected.py @@ -0,0 +1,196 @@ +"""Weakly connected components.""" +import networkx as nx +from networkx.utils.decorators import not_implemented_for + +__all__ = [ + "number_weakly_connected_components", + "weakly_connected_components", + "is_weakly_connected", +] + + +@not_implemented_for("undirected") +@nx._dispatch +def weakly_connected_components(G): + """Generate weakly connected components of G. + + Parameters + ---------- + G : NetworkX graph + A directed graph + + Returns + ------- + comp : generator of sets + A generator of sets of nodes, one for each weakly connected + component of G. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + Generate a sorted list of weakly connected components, largest first. + + >>> G = nx.path_graph(4, create_using=nx.DiGraph()) + >>> nx.add_path(G, [10, 11, 12]) + >>> [ + ... len(c) + ... for c in sorted(nx.weakly_connected_components(G), key=len, reverse=True) + ... ] + [4, 3] + + If you only want the largest component, it's more efficient to + use max instead of sort: + + >>> largest_cc = max(nx.weakly_connected_components(G), key=len) + + See Also + -------- + connected_components + strongly_connected_components + + Notes + ----- + For directed graphs only. + + """ + seen = set() + for v in G: + if v not in seen: + c = set(_plain_bfs(G, v)) + seen.update(c) + yield c + + +@not_implemented_for("undirected") +@nx._dispatch +def number_weakly_connected_components(G): + """Returns the number of weakly connected components in G. + + Parameters + ---------- + G : NetworkX graph + A directed graph. + + Returns + ------- + n : integer + Number of weakly connected components + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (2, 1), (3, 4)]) + >>> nx.number_weakly_connected_components(G) + 2 + + See Also + -------- + weakly_connected_components + number_connected_components + number_strongly_connected_components + + Notes + ----- + For directed graphs only. + + """ + return sum(1 for wcc in weakly_connected_components(G)) + + +@not_implemented_for("undirected") +@nx._dispatch +def is_weakly_connected(G): + """Test directed graph for weak connectivity. + + A directed graph is weakly connected if and only if the graph + is connected when the direction of the edge between nodes is ignored. + + Note that if a graph is strongly connected (i.e. the graph is connected + even when we account for directionality), it is by definition weakly + connected as well. + + Parameters + ---------- + G : NetworkX Graph + A directed graph. + + Returns + ------- + connected : bool + True if the graph is weakly connected, False otherwise. + + Raises + ------ + NetworkXNotImplemented + If G is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (2, 1)]) + >>> G.add_node(3) + >>> nx.is_weakly_connected(G) # node 3 is not connected to the graph + False + >>> G.add_edge(2, 3) + >>> nx.is_weakly_connected(G) + True + + See Also + -------- + is_strongly_connected + is_semiconnected + is_connected + is_biconnected + weakly_connected_components + + Notes + ----- + For directed graphs only. + + """ + if len(G) == 0: + raise nx.NetworkXPointlessConcept( + """Connectivity is undefined for the null graph.""" + ) + + return len(next(weakly_connected_components(G))) == len(G) + + +def _plain_bfs(G, source): + """A fast BFS node generator + + The direction of the edge between nodes is ignored. + + For directed graphs only. + + """ + n = len(G) + Gsucc = G._succ + Gpred = G._pred + seen = {source} + nextlevel = [source] + + yield source + while nextlevel: + thislevel = nextlevel + nextlevel = [] + for v in thislevel: + for w in Gsucc[v]: + if w not in seen: + seen.add(w) + nextlevel.append(w) + yield w + for w in Gpred[v]: + if w not in seen: + seen.add(w) + nextlevel.append(w) + yield w + if len(seen) == n: + return diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..15bc5abe5d0e1e0db9d152ccd39b9bf87f2533ee --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__init__.py @@ -0,0 +1,11 @@ +"""Connectivity and cut algorithms +""" +from .connectivity import * +from .cuts import * +from .edge_augmentation import * +from .edge_kcomponents import * +from .disjoint_paths import * +from .kcomponents import * +from .kcutsets import * +from .stoerwagner import * +from .utils import * diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdf13e6a050514f4c8dd2a5bbbe351d893dc2254 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/connectivity.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/connectivity.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a886d677a66c604e0c7dd57b5ba2a5f630fedd9c Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/connectivity.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/cuts.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/cuts.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..137459479c4f732d1844423c5f570b01c2605886 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/cuts.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/disjoint_paths.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/disjoint_paths.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13f4b6cf487260621d05437a4c37ce5edea0a699 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/disjoint_paths.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/edge_augmentation.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/edge_augmentation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce2b611d935df0f195f52eb1b9ca2abd13ae86d1 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/edge_augmentation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/edge_kcomponents.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/edge_kcomponents.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2cd60cfc2a25fcd95364e4eb095f4e1c7a31e46 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/edge_kcomponents.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/kcomponents.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/kcomponents.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff51f5e1c947e2fe3f971919043615a4b2ceaadc Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/kcomponents.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/kcutsets.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/kcutsets.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44eefd3742a32e7fc71eb1c3765689303212fd96 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/kcutsets.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/stoerwagner.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/stoerwagner.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..000432b29fe979a63c6a03016019d8b087515d8d Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/stoerwagner.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/utils.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72fc49d0433e60dd028abd55f9422f960dbdccc4 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/__pycache__/utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/connectivity.py b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/connectivity.py new file mode 100644 index 0000000000000000000000000000000000000000..cbb34152bba9b93fcdc2aa5fa2a0774ee4ef197c --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/connectivity.py @@ -0,0 +1,826 @@ +""" +Flow based connectivity algorithms +""" + +import itertools +from operator import itemgetter + +import networkx as nx + +# Define the default maximum flow function to use in all flow based +# connectivity algorithms. +from networkx.algorithms.flow import ( + boykov_kolmogorov, + build_residual_network, + dinitz, + edmonds_karp, + shortest_augmenting_path, +) + +default_flow_func = edmonds_karp + +from .utils import build_auxiliary_edge_connectivity, build_auxiliary_node_connectivity + +__all__ = [ + "average_node_connectivity", + "local_node_connectivity", + "node_connectivity", + "local_edge_connectivity", + "edge_connectivity", + "all_pairs_node_connectivity", +] + + +@nx._dispatch( + graphs={"G": 0, "auxiliary?": 4, "residual?": 5}, + preserve_edge_attrs={"residual": {"capacity": float("inf")}}, + preserve_graph_attrs={"auxiliary", "residual"}, +) +def local_node_connectivity( + G, s, t, flow_func=None, auxiliary=None, residual=None, cutoff=None +): + r"""Computes local node connectivity for nodes s and t. + + Local node connectivity for two non adjacent nodes s and t is the + minimum number of nodes that must be removed (along with their incident + edges) to disconnect them. + + This is a flow based implementation of node connectivity. We compute the + maximum flow on an auxiliary digraph build from the original input + graph (see below for details). + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + s : node + Source node + + t : node + Target node + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The choice + of the default function may change from version to version and + should not be relied on. Default value: None. + + auxiliary : NetworkX DiGraph + Auxiliary digraph to compute flow based node connectivity. It has + to have a graph attribute called mapping with a dictionary mapping + node names in G and in the auxiliary digraph. If provided + it will be reused instead of recreated. Default value: None. + + residual : NetworkX DiGraph + Residual network to compute maximum flow. If provided it will be + reused instead of recreated. Default value: None. + + cutoff : integer, float, or None (default: None) + If specified, the maximum flow algorithm will terminate when the + flow value reaches or exceeds the cutoff. This only works for flows + that support the cutoff parameter (most do) and is ignored otherwise. + + Returns + ------- + K : integer + local node connectivity for nodes s and t + + Examples + -------- + This function is not imported in the base NetworkX namespace, so you + have to explicitly import it from the connectivity package: + + >>> from networkx.algorithms.connectivity import local_node_connectivity + + We use in this example the platonic icosahedral graph, which has node + connectivity 5. + + >>> G = nx.icosahedral_graph() + >>> local_node_connectivity(G, 0, 6) + 5 + + If you need to compute local connectivity on several pairs of + nodes in the same graph, it is recommended that you reuse the + data structures that NetworkX uses in the computation: the + auxiliary digraph for node connectivity, and the residual + network for the underlying maximum flow computation. + + Example of how to compute local node connectivity among + all pairs of nodes of the platonic icosahedral graph reusing + the data structures. + + >>> import itertools + >>> # You also have to explicitly import the function for + >>> # building the auxiliary digraph from the connectivity package + >>> from networkx.algorithms.connectivity import build_auxiliary_node_connectivity + ... + >>> H = build_auxiliary_node_connectivity(G) + >>> # And the function for building the residual network from the + >>> # flow package + >>> from networkx.algorithms.flow import build_residual_network + >>> # Note that the auxiliary digraph has an edge attribute named capacity + >>> R = build_residual_network(H, "capacity") + >>> result = dict.fromkeys(G, dict()) + >>> # Reuse the auxiliary digraph and the residual network by passing them + >>> # as parameters + >>> for u, v in itertools.combinations(G, 2): + ... k = local_node_connectivity(G, u, v, auxiliary=H, residual=R) + ... result[u][v] = k + ... + >>> all(result[u][v] == 5 for u, v in itertools.combinations(G, 2)) + True + + You can also use alternative flow algorithms for computing node + connectivity. For instance, in dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better than + the default :meth:`edmonds_karp` which is faster for sparse + networks with highly skewed degree distributions. Alternative flow + functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> local_node_connectivity(G, 0, 6, flow_func=shortest_augmenting_path) + 5 + + Notes + ----- + This is a flow based implementation of node connectivity. We compute the + maximum flow using, by default, the :meth:`edmonds_karp` algorithm (see: + :meth:`maximum_flow`) on an auxiliary digraph build from the original + input graph: + + For an undirected graph G having `n` nodes and `m` edges we derive a + directed graph H with `2n` nodes and `2m+n` arcs by replacing each + original node `v` with two nodes `v_A`, `v_B` linked by an (internal) + arc in H. Then for each edge (`u`, `v`) in G we add two arcs + (`u_B`, `v_A`) and (`v_B`, `u_A`) in H. Finally we set the attribute + capacity = 1 for each arc in H [1]_ . + + For a directed graph G having `n` nodes and `m` arcs we derive a + directed graph H with `2n` nodes and `m+n` arcs by replacing each + original node `v` with two nodes `v_A`, `v_B` linked by an (internal) + arc (`v_A`, `v_B`) in H. Then for each arc (`u`, `v`) in G we add one arc + (`u_B`, `v_A`) in H. Finally we set the attribute capacity = 1 for + each arc in H. + + This is equal to the local node connectivity because the value of + a maximum s-t-flow is equal to the capacity of a minimum s-t-cut. + + See also + -------- + :meth:`local_edge_connectivity` + :meth:`node_connectivity` + :meth:`minimum_node_cut` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Kammer, Frank and Hanjo Taubig. Graph Connectivity. in Brandes and + Erlebach, 'Network Analysis: Methodological Foundations', Lecture + Notes in Computer Science, Volume 3418, Springer-Verlag, 2005. + http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf + + """ + if flow_func is None: + flow_func = default_flow_func + + if auxiliary is None: + H = build_auxiliary_node_connectivity(G) + else: + H = auxiliary + + mapping = H.graph.get("mapping", None) + if mapping is None: + raise nx.NetworkXError("Invalid auxiliary digraph.") + + kwargs = {"flow_func": flow_func, "residual": residual} + if flow_func is shortest_augmenting_path: + kwargs["cutoff"] = cutoff + kwargs["two_phase"] = True + elif flow_func is edmonds_karp: + kwargs["cutoff"] = cutoff + elif flow_func is dinitz: + kwargs["cutoff"] = cutoff + elif flow_func is boykov_kolmogorov: + kwargs["cutoff"] = cutoff + + return nx.maximum_flow_value(H, f"{mapping[s]}B", f"{mapping[t]}A", **kwargs) + + +@nx._dispatch +def node_connectivity(G, s=None, t=None, flow_func=None): + r"""Returns node connectivity for a graph or digraph G. + + Node connectivity is equal to the minimum number of nodes that + must be removed to disconnect G or render it trivial. If source + and target nodes are provided, this function returns the local node + connectivity: the minimum number of nodes that must be removed to break + all paths from source to target in G. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + s : node + Source node. Optional. Default value: None. + + t : node + Target node. Optional. Default value: None. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The + choice of the default function may change from version + to version and should not be relied on. Default value: None. + + Returns + ------- + K : integer + Node connectivity of G, or local node connectivity if source + and target are provided. + + Examples + -------- + >>> # Platonic icosahedral graph is 5-node-connected + >>> G = nx.icosahedral_graph() + >>> nx.node_connectivity(G) + 5 + + You can use alternative flow algorithms for the underlying maximum + flow computation. In dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better + than the default :meth:`edmonds_karp`, which is faster for + sparse networks with highly skewed degree distributions. Alternative + flow functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> nx.node_connectivity(G, flow_func=shortest_augmenting_path) + 5 + + If you specify a pair of nodes (source and target) as parameters, + this function returns the value of local node connectivity. + + >>> nx.node_connectivity(G, 3, 7) + 5 + + If you need to perform several local computations among different + pairs of nodes on the same graph, it is recommended that you reuse + the data structures used in the maximum flow computations. See + :meth:`local_node_connectivity` for details. + + Notes + ----- + This is a flow based implementation of node connectivity. The + algorithm works by solving $O((n-\delta-1+\delta(\delta-1)/2))$ + maximum flow problems on an auxiliary digraph. Where $\delta$ + is the minimum degree of G. For details about the auxiliary + digraph and the computation of local node connectivity see + :meth:`local_node_connectivity`. This implementation is based + on algorithm 11 in [1]_. + + See also + -------- + :meth:`local_node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + if (s is not None and t is None) or (s is None and t is not None): + raise nx.NetworkXError("Both source and target must be specified.") + + # Local node connectivity + if s is not None and t is not None: + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + return local_node_connectivity(G, s, t, flow_func=flow_func) + + # Global node connectivity + if G.is_directed(): + if not nx.is_weakly_connected(G): + return 0 + iter_func = itertools.permutations + # It is necessary to consider both predecessors + # and successors for directed graphs + + def neighbors(v): + return itertools.chain.from_iterable([G.predecessors(v), G.successors(v)]) + + else: + if not nx.is_connected(G): + return 0 + iter_func = itertools.combinations + neighbors = G.neighbors + + # Reuse the auxiliary digraph and the residual network + H = build_auxiliary_node_connectivity(G) + R = build_residual_network(H, "capacity") + kwargs = {"flow_func": flow_func, "auxiliary": H, "residual": R} + + # Pick a node with minimum degree + # Node connectivity is bounded by degree. + v, K = min(G.degree(), key=itemgetter(1)) + # compute local node connectivity with all its non-neighbors nodes + for w in set(G) - set(neighbors(v)) - {v}: + kwargs["cutoff"] = K + K = min(K, local_node_connectivity(G, v, w, **kwargs)) + # Also for non adjacent pairs of neighbors of v + for x, y in iter_func(neighbors(v), 2): + if y in G[x]: + continue + kwargs["cutoff"] = K + K = min(K, local_node_connectivity(G, x, y, **kwargs)) + + return K + + +@nx._dispatch +def average_node_connectivity(G, flow_func=None): + r"""Returns the average connectivity of a graph G. + + The average connectivity `\bar{\kappa}` of a graph G is the average + of local node connectivity over all pairs of nodes of G [1]_ . + + .. math:: + + \bar{\kappa}(G) = \frac{\sum_{u,v} \kappa_{G}(u,v)}{{n \choose 2}} + + Parameters + ---------- + + G : NetworkX graph + Undirected graph + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See :meth:`local_node_connectivity` + for details. The choice of the default function may change from + version to version and should not be relied on. Default value: None. + + Returns + ------- + K : float + Average node connectivity + + See also + -------- + :meth:`local_node_connectivity` + :meth:`node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Beineke, L., O. Oellermann, and R. Pippert (2002). The average + connectivity of a graph. Discrete mathematics 252(1-3), 31-45. + http://www.sciencedirect.com/science/article/pii/S0012365X01001807 + + """ + if G.is_directed(): + iter_func = itertools.permutations + else: + iter_func = itertools.combinations + + # Reuse the auxiliary digraph and the residual network + H = build_auxiliary_node_connectivity(G) + R = build_residual_network(H, "capacity") + kwargs = {"flow_func": flow_func, "auxiliary": H, "residual": R} + + num, den = 0, 0 + for u, v in iter_func(G, 2): + num += local_node_connectivity(G, u, v, **kwargs) + den += 1 + + if den == 0: # Null Graph + return 0 + return num / den + + +@nx._dispatch +def all_pairs_node_connectivity(G, nbunch=None, flow_func=None): + """Compute node connectivity between all pairs of nodes of G. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + nbunch: container + Container of nodes. If provided node connectivity will be computed + only over pairs of nodes in nbunch. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The + choice of the default function may change from version + to version and should not be relied on. Default value: None. + + Returns + ------- + all_pairs : dict + A dictionary with node connectivity between all pairs of nodes + in G, or in nbunch if provided. + + See also + -------- + :meth:`local_node_connectivity` + :meth:`edge_connectivity` + :meth:`local_edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + """ + if nbunch is None: + nbunch = G + else: + nbunch = set(nbunch) + + directed = G.is_directed() + if directed: + iter_func = itertools.permutations + else: + iter_func = itertools.combinations + + all_pairs = {n: {} for n in nbunch} + + # Reuse auxiliary digraph and residual network + H = build_auxiliary_node_connectivity(G) + mapping = H.graph["mapping"] + R = build_residual_network(H, "capacity") + kwargs = {"flow_func": flow_func, "auxiliary": H, "residual": R} + + for u, v in iter_func(nbunch, 2): + K = local_node_connectivity(G, u, v, **kwargs) + all_pairs[u][v] = K + if not directed: + all_pairs[v][u] = K + + return all_pairs + + +@nx._dispatch( + graphs={"G": 0, "auxiliary?": 4, "residual?": 5}, + preserve_edge_attrs={"residual": {"capacity": float("inf")}}, + preserve_graph_attrs={"residual"}, +) +def local_edge_connectivity( + G, s, t, flow_func=None, auxiliary=None, residual=None, cutoff=None +): + r"""Returns local edge connectivity for nodes s and t in G. + + Local edge connectivity for two nodes s and t is the minimum number + of edges that must be removed to disconnect them. + + This is a flow based implementation of edge connectivity. We compute the + maximum flow on an auxiliary digraph build from the original + network (see below for details). This is equal to the local edge + connectivity because the value of a maximum s-t-flow is equal to the + capacity of a minimum s-t-cut (Ford and Fulkerson theorem) [1]_ . + + Parameters + ---------- + G : NetworkX graph + Undirected or directed graph + + s : node + Source node + + t : node + Target node + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The + choice of the default function may change from version + to version and should not be relied on. Default value: None. + + auxiliary : NetworkX DiGraph + Auxiliary digraph for computing flow based edge connectivity. If + provided it will be reused instead of recreated. Default value: None. + + residual : NetworkX DiGraph + Residual network to compute maximum flow. If provided it will be + reused instead of recreated. Default value: None. + + cutoff : integer, float, or None (default: None) + If specified, the maximum flow algorithm will terminate when the + flow value reaches or exceeds the cutoff. This only works for flows + that support the cutoff parameter (most do) and is ignored otherwise. + + Returns + ------- + K : integer + local edge connectivity for nodes s and t. + + Examples + -------- + This function is not imported in the base NetworkX namespace, so you + have to explicitly import it from the connectivity package: + + >>> from networkx.algorithms.connectivity import local_edge_connectivity + + We use in this example the platonic icosahedral graph, which has edge + connectivity 5. + + >>> G = nx.icosahedral_graph() + >>> local_edge_connectivity(G, 0, 6) + 5 + + If you need to compute local connectivity on several pairs of + nodes in the same graph, it is recommended that you reuse the + data structures that NetworkX uses in the computation: the + auxiliary digraph for edge connectivity, and the residual + network for the underlying maximum flow computation. + + Example of how to compute local edge connectivity among + all pairs of nodes of the platonic icosahedral graph reusing + the data structures. + + >>> import itertools + >>> # You also have to explicitly import the function for + >>> # building the auxiliary digraph from the connectivity package + >>> from networkx.algorithms.connectivity import build_auxiliary_edge_connectivity + >>> H = build_auxiliary_edge_connectivity(G) + >>> # And the function for building the residual network from the + >>> # flow package + >>> from networkx.algorithms.flow import build_residual_network + >>> # Note that the auxiliary digraph has an edge attribute named capacity + >>> R = build_residual_network(H, "capacity") + >>> result = dict.fromkeys(G, dict()) + >>> # Reuse the auxiliary digraph and the residual network by passing them + >>> # as parameters + >>> for u, v in itertools.combinations(G, 2): + ... k = local_edge_connectivity(G, u, v, auxiliary=H, residual=R) + ... result[u][v] = k + >>> all(result[u][v] == 5 for u, v in itertools.combinations(G, 2)) + True + + You can also use alternative flow algorithms for computing edge + connectivity. For instance, in dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better than + the default :meth:`edmonds_karp` which is faster for sparse + networks with highly skewed degree distributions. Alternative flow + functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> local_edge_connectivity(G, 0, 6, flow_func=shortest_augmenting_path) + 5 + + Notes + ----- + This is a flow based implementation of edge connectivity. We compute the + maximum flow using, by default, the :meth:`edmonds_karp` algorithm on an + auxiliary digraph build from the original input graph: + + If the input graph is undirected, we replace each edge (`u`,`v`) with + two reciprocal arcs (`u`, `v`) and (`v`, `u`) and then we set the attribute + 'capacity' for each arc to 1. If the input graph is directed we simply + add the 'capacity' attribute. This is an implementation of algorithm 1 + in [1]_. + + The maximum flow in the auxiliary network is equal to the local edge + connectivity because the value of a maximum s-t-flow is equal to the + capacity of a minimum s-t-cut (Ford and Fulkerson theorem). + + See also + -------- + :meth:`edge_connectivity` + :meth:`local_node_connectivity` + :meth:`node_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + if flow_func is None: + flow_func = default_flow_func + + if auxiliary is None: + H = build_auxiliary_edge_connectivity(G) + else: + H = auxiliary + + kwargs = {"flow_func": flow_func, "residual": residual} + if flow_func is shortest_augmenting_path: + kwargs["cutoff"] = cutoff + kwargs["two_phase"] = True + elif flow_func is edmonds_karp: + kwargs["cutoff"] = cutoff + elif flow_func is dinitz: + kwargs["cutoff"] = cutoff + elif flow_func is boykov_kolmogorov: + kwargs["cutoff"] = cutoff + + return nx.maximum_flow_value(H, s, t, **kwargs) + + +@nx._dispatch +def edge_connectivity(G, s=None, t=None, flow_func=None, cutoff=None): + r"""Returns the edge connectivity of the graph or digraph G. + + The edge connectivity is equal to the minimum number of edges that + must be removed to disconnect G or render it trivial. If source + and target nodes are provided, this function returns the local edge + connectivity: the minimum number of edges that must be removed to + break all paths from source to target in G. + + Parameters + ---------- + G : NetworkX graph + Undirected or directed graph + + s : node + Source node. Optional. Default value: None. + + t : node + Target node. Optional. Default value: None. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The + choice of the default function may change from version + to version and should not be relied on. Default value: None. + + cutoff : integer, float, or None (default: None) + If specified, the maximum flow algorithm will terminate when the + flow value reaches or exceeds the cutoff. This only works for flows + that support the cutoff parameter (most do) and is ignored otherwise. + + Returns + ------- + K : integer + Edge connectivity for G, or local edge connectivity if source + and target were provided + + Examples + -------- + >>> # Platonic icosahedral graph is 5-edge-connected + >>> G = nx.icosahedral_graph() + >>> nx.edge_connectivity(G) + 5 + + You can use alternative flow algorithms for the underlying + maximum flow computation. In dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better + than the default :meth:`edmonds_karp`, which is faster for + sparse networks with highly skewed degree distributions. + Alternative flow functions have to be explicitly imported + from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> nx.edge_connectivity(G, flow_func=shortest_augmenting_path) + 5 + + If you specify a pair of nodes (source and target) as parameters, + this function returns the value of local edge connectivity. + + >>> nx.edge_connectivity(G, 3, 7) + 5 + + If you need to perform several local computations among different + pairs of nodes on the same graph, it is recommended that you reuse + the data structures used in the maximum flow computations. See + :meth:`local_edge_connectivity` for details. + + Notes + ----- + This is a flow based implementation of global edge connectivity. + For undirected graphs the algorithm works by finding a 'small' + dominating set of nodes of G (see algorithm 7 in [1]_ ) and + computing local maximum flow (see :meth:`local_edge_connectivity`) + between an arbitrary node in the dominating set and the rest of + nodes in it. This is an implementation of algorithm 6 in [1]_ . + For directed graphs, the algorithm does n calls to the maximum + flow function. This is an implementation of algorithm 8 in [1]_ . + + See also + -------- + :meth:`local_edge_connectivity` + :meth:`local_node_connectivity` + :meth:`node_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + :meth:`k_edge_components` + :meth:`k_edge_subgraphs` + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + if (s is not None and t is None) or (s is None and t is not None): + raise nx.NetworkXError("Both source and target must be specified.") + + # Local edge connectivity + if s is not None and t is not None: + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + return local_edge_connectivity(G, s, t, flow_func=flow_func, cutoff=cutoff) + + # Global edge connectivity + # reuse auxiliary digraph and residual network + H = build_auxiliary_edge_connectivity(G) + R = build_residual_network(H, "capacity") + kwargs = {"flow_func": flow_func, "auxiliary": H, "residual": R} + + if G.is_directed(): + # Algorithm 8 in [1] + if not nx.is_weakly_connected(G): + return 0 + + # initial value for \lambda is minimum degree + L = min(d for n, d in G.degree()) + nodes = list(G) + n = len(nodes) + + if cutoff is not None: + L = min(cutoff, L) + + for i in range(n): + kwargs["cutoff"] = L + try: + L = min(L, local_edge_connectivity(G, nodes[i], nodes[i + 1], **kwargs)) + except IndexError: # last node! + L = min(L, local_edge_connectivity(G, nodes[i], nodes[0], **kwargs)) + return L + else: # undirected + # Algorithm 6 in [1] + if not nx.is_connected(G): + return 0 + + # initial value for \lambda is minimum degree + L = min(d for n, d in G.degree()) + + if cutoff is not None: + L = min(cutoff, L) + + # A dominating set is \lambda-covering + # We need a dominating set with at least two nodes + for node in G: + D = nx.dominating_set(G, start_with=node) + v = D.pop() + if D: + break + else: + # in complete graphs the dominating sets will always be of one node + # thus we return min degree + return L + + for w in D: + kwargs["cutoff"] = L + L = min(L, local_edge_connectivity(G, v, w, **kwargs)) + + return L diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/cuts.py b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/cuts.py new file mode 100644 index 0000000000000000000000000000000000000000..d5883ba8f8e08563946565c28a95a36f13ec547b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/cuts.py @@ -0,0 +1,615 @@ +""" +Flow based cut algorithms +""" +import itertools + +import networkx as nx + +# Define the default maximum flow function to use in all flow based +# cut algorithms. +from networkx.algorithms.flow import build_residual_network, edmonds_karp + +default_flow_func = edmonds_karp + +from .utils import build_auxiliary_edge_connectivity, build_auxiliary_node_connectivity + +__all__ = [ + "minimum_st_node_cut", + "minimum_node_cut", + "minimum_st_edge_cut", + "minimum_edge_cut", +] + + +@nx._dispatch( + graphs={"G": 0, "auxiliary?": 4, "residual?": 5}, + preserve_edge_attrs={ + "auxiliary": {"capacity": float("inf")}, + "residual": {"capacity": float("inf")}, + }, + preserve_graph_attrs={"auxiliary", "residual"}, +) +def minimum_st_edge_cut(G, s, t, flow_func=None, auxiliary=None, residual=None): + """Returns the edges of the cut-set of a minimum (s, t)-cut. + + This function returns the set of edges of minimum cardinality that, + if removed, would destroy all paths among source and target in G. + Edge weights are not considered. See :meth:`minimum_cut` for + computing minimum cuts considering edge weights. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + auxiliary : NetworkX DiGraph + Auxiliary digraph to compute flow based node connectivity. It has + to have a graph attribute called mapping with a dictionary mapping + node names in G and in the auxiliary digraph. If provided + it will be reused instead of recreated. Default value: None. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See :meth:`node_connectivity` for + details. The choice of the default function may change from version + to version and should not be relied on. Default value: None. + + residual : NetworkX DiGraph + Residual network to compute maximum flow. If provided it will be + reused instead of recreated. Default value: None. + + Returns + ------- + cutset : set + Set of edges that, if removed from the graph, will disconnect it. + + See also + -------- + :meth:`minimum_cut` + :meth:`minimum_node_cut` + :meth:`minimum_edge_cut` + :meth:`stoer_wagner` + :meth:`node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Examples + -------- + This function is not imported in the base NetworkX namespace, so you + have to explicitly import it from the connectivity package: + + >>> from networkx.algorithms.connectivity import minimum_st_edge_cut + + We use in this example the platonic icosahedral graph, which has edge + connectivity 5. + + >>> G = nx.icosahedral_graph() + >>> len(minimum_st_edge_cut(G, 0, 6)) + 5 + + If you need to compute local edge cuts on several pairs of + nodes in the same graph, it is recommended that you reuse the + data structures that NetworkX uses in the computation: the + auxiliary digraph for edge connectivity, and the residual + network for the underlying maximum flow computation. + + Example of how to compute local edge cuts among all pairs of + nodes of the platonic icosahedral graph reusing the data + structures. + + >>> import itertools + >>> # You also have to explicitly import the function for + >>> # building the auxiliary digraph from the connectivity package + >>> from networkx.algorithms.connectivity import build_auxiliary_edge_connectivity + >>> H = build_auxiliary_edge_connectivity(G) + >>> # And the function for building the residual network from the + >>> # flow package + >>> from networkx.algorithms.flow import build_residual_network + >>> # Note that the auxiliary digraph has an edge attribute named capacity + >>> R = build_residual_network(H, "capacity") + >>> result = dict.fromkeys(G, dict()) + >>> # Reuse the auxiliary digraph and the residual network by passing them + >>> # as parameters + >>> for u, v in itertools.combinations(G, 2): + ... k = len(minimum_st_edge_cut(G, u, v, auxiliary=H, residual=R)) + ... result[u][v] = k + >>> all(result[u][v] == 5 for u, v in itertools.combinations(G, 2)) + True + + You can also use alternative flow algorithms for computing edge + cuts. For instance, in dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better than + the default :meth:`edmonds_karp` which is faster for sparse + networks with highly skewed degree distributions. Alternative flow + functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> len(minimum_st_edge_cut(G, 0, 6, flow_func=shortest_augmenting_path)) + 5 + + """ + if flow_func is None: + flow_func = default_flow_func + + if auxiliary is None: + H = build_auxiliary_edge_connectivity(G) + else: + H = auxiliary + + kwargs = {"capacity": "capacity", "flow_func": flow_func, "residual": residual} + + cut_value, partition = nx.minimum_cut(H, s, t, **kwargs) + reachable, non_reachable = partition + # Any edge in the original graph linking the two sets in the + # partition is part of the edge cutset + cutset = set() + for u, nbrs in ((n, G[n]) for n in reachable): + cutset.update((u, v) for v in nbrs if v in non_reachable) + + return cutset + + +@nx._dispatch( + graphs={"G": 0, "auxiliary?": 4, "residual?": 5}, + preserve_edge_attrs={"residual": {"capacity": float("inf")}}, + preserve_node_attrs={"auxiliary": {"id": None}}, + preserve_graph_attrs={"auxiliary", "residual"}, +) +def minimum_st_node_cut(G, s, t, flow_func=None, auxiliary=None, residual=None): + r"""Returns a set of nodes of minimum cardinality that disconnect source + from target in G. + + This function returns the set of nodes of minimum cardinality that, + if removed, would destroy all paths among source and target in G. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node. + + t : node + Target node. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The choice + of the default function may change from version to version and + should not be relied on. Default value: None. + + auxiliary : NetworkX DiGraph + Auxiliary digraph to compute flow based node connectivity. It has + to have a graph attribute called mapping with a dictionary mapping + node names in G and in the auxiliary digraph. If provided + it will be reused instead of recreated. Default value: None. + + residual : NetworkX DiGraph + Residual network to compute maximum flow. If provided it will be + reused instead of recreated. Default value: None. + + Returns + ------- + cutset : set + Set of nodes that, if removed, would destroy all paths between + source and target in G. + + Examples + -------- + This function is not imported in the base NetworkX namespace, so you + have to explicitly import it from the connectivity package: + + >>> from networkx.algorithms.connectivity import minimum_st_node_cut + + We use in this example the platonic icosahedral graph, which has node + connectivity 5. + + >>> G = nx.icosahedral_graph() + >>> len(minimum_st_node_cut(G, 0, 6)) + 5 + + If you need to compute local st cuts between several pairs of + nodes in the same graph, it is recommended that you reuse the + data structures that NetworkX uses in the computation: the + auxiliary digraph for node connectivity and node cuts, and the + residual network for the underlying maximum flow computation. + + Example of how to compute local st node cuts reusing the data + structures: + + >>> # You also have to explicitly import the function for + >>> # building the auxiliary digraph from the connectivity package + >>> from networkx.algorithms.connectivity import build_auxiliary_node_connectivity + >>> H = build_auxiliary_node_connectivity(G) + >>> # And the function for building the residual network from the + >>> # flow package + >>> from networkx.algorithms.flow import build_residual_network + >>> # Note that the auxiliary digraph has an edge attribute named capacity + >>> R = build_residual_network(H, "capacity") + >>> # Reuse the auxiliary digraph and the residual network by passing them + >>> # as parameters + >>> len(minimum_st_node_cut(G, 0, 6, auxiliary=H, residual=R)) + 5 + + You can also use alternative flow algorithms for computing minimum st + node cuts. For instance, in dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better than + the default :meth:`edmonds_karp` which is faster for sparse + networks with highly skewed degree distributions. Alternative flow + functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> len(minimum_st_node_cut(G, 0, 6, flow_func=shortest_augmenting_path)) + 5 + + Notes + ----- + This is a flow based implementation of minimum node cut. The algorithm + is based in solving a number of maximum flow computations to determine + the capacity of the minimum cut on an auxiliary directed network that + corresponds to the minimum node cut of G. It handles both directed + and undirected graphs. This implementation is based on algorithm 11 + in [1]_. + + See also + -------- + :meth:`minimum_node_cut` + :meth:`minimum_edge_cut` + :meth:`stoer_wagner` + :meth:`node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + if auxiliary is None: + H = build_auxiliary_node_connectivity(G) + else: + H = auxiliary + + mapping = H.graph.get("mapping", None) + if mapping is None: + raise nx.NetworkXError("Invalid auxiliary digraph.") + if G.has_edge(s, t) or G.has_edge(t, s): + return {} + kwargs = {"flow_func": flow_func, "residual": residual, "auxiliary": H} + + # The edge cut in the auxiliary digraph corresponds to the node cut in the + # original graph. + edge_cut = minimum_st_edge_cut(H, f"{mapping[s]}B", f"{mapping[t]}A", **kwargs) + # Each node in the original graph maps to two nodes of the auxiliary graph + node_cut = {H.nodes[node]["id"] for edge in edge_cut for node in edge} + return node_cut - {s, t} + + +@nx._dispatch +def minimum_node_cut(G, s=None, t=None, flow_func=None): + r"""Returns a set of nodes of minimum cardinality that disconnects G. + + If source and target nodes are provided, this function returns the + set of nodes of minimum cardinality that, if removed, would destroy + all paths among source and target in G. If not, it returns a set + of nodes of minimum cardinality that disconnects G. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node. Optional. Default value: None. + + t : node + Target node. Optional. Default value: None. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The + choice of the default function may change from version + to version and should not be relied on. Default value: None. + + Returns + ------- + cutset : set + Set of nodes that, if removed, would disconnect G. If source + and target nodes are provided, the set contains the nodes that + if removed, would destroy all paths between source and target. + + Examples + -------- + >>> # Platonic icosahedral graph has node connectivity 5 + >>> G = nx.icosahedral_graph() + >>> node_cut = nx.minimum_node_cut(G) + >>> len(node_cut) + 5 + + You can use alternative flow algorithms for the underlying maximum + flow computation. In dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better + than the default :meth:`edmonds_karp`, which is faster for + sparse networks with highly skewed degree distributions. Alternative + flow functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> node_cut == nx.minimum_node_cut(G, flow_func=shortest_augmenting_path) + True + + If you specify a pair of nodes (source and target) as parameters, + this function returns a local st node cut. + + >>> len(nx.minimum_node_cut(G, 3, 7)) + 5 + + If you need to perform several local st cuts among different + pairs of nodes on the same graph, it is recommended that you reuse + the data structures used in the maximum flow computations. See + :meth:`minimum_st_node_cut` for details. + + Notes + ----- + This is a flow based implementation of minimum node cut. The algorithm + is based in solving a number of maximum flow computations to determine + the capacity of the minimum cut on an auxiliary directed network that + corresponds to the minimum node cut of G. It handles both directed + and undirected graphs. This implementation is based on algorithm 11 + in [1]_. + + See also + -------- + :meth:`minimum_st_node_cut` + :meth:`minimum_cut` + :meth:`minimum_edge_cut` + :meth:`stoer_wagner` + :meth:`node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + if (s is not None and t is None) or (s is None and t is not None): + raise nx.NetworkXError("Both source and target must be specified.") + + # Local minimum node cut. + if s is not None and t is not None: + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + return minimum_st_node_cut(G, s, t, flow_func=flow_func) + + # Global minimum node cut. + # Analog to the algorithm 11 for global node connectivity in [1]. + if G.is_directed(): + if not nx.is_weakly_connected(G): + raise nx.NetworkXError("Input graph is not connected") + iter_func = itertools.permutations + + def neighbors(v): + return itertools.chain.from_iterable([G.predecessors(v), G.successors(v)]) + + else: + if not nx.is_connected(G): + raise nx.NetworkXError("Input graph is not connected") + iter_func = itertools.combinations + neighbors = G.neighbors + + # Reuse the auxiliary digraph and the residual network. + H = build_auxiliary_node_connectivity(G) + R = build_residual_network(H, "capacity") + kwargs = {"flow_func": flow_func, "auxiliary": H, "residual": R} + + # Choose a node with minimum degree. + v = min(G, key=G.degree) + # Initial node cutset is all neighbors of the node with minimum degree. + min_cut = set(G[v]) + # Compute st node cuts between v and all its non-neighbors nodes in G. + for w in set(G) - set(neighbors(v)) - {v}: + this_cut = minimum_st_node_cut(G, v, w, **kwargs) + if len(min_cut) >= len(this_cut): + min_cut = this_cut + # Also for non adjacent pairs of neighbors of v. + for x, y in iter_func(neighbors(v), 2): + if y in G[x]: + continue + this_cut = minimum_st_node_cut(G, x, y, **kwargs) + if len(min_cut) >= len(this_cut): + min_cut = this_cut + + return min_cut + + +@nx._dispatch +def minimum_edge_cut(G, s=None, t=None, flow_func=None): + r"""Returns a set of edges of minimum cardinality that disconnects G. + + If source and target nodes are provided, this function returns the + set of edges of minimum cardinality that, if removed, would break + all paths among source and target in G. If not, it returns a set of + edges of minimum cardinality that disconnects G. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node. Optional. Default value: None. + + t : node + Target node. Optional. Default value: None. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The + choice of the default function may change from version + to version and should not be relied on. Default value: None. + + Returns + ------- + cutset : set + Set of edges that, if removed, would disconnect G. If source + and target nodes are provided, the set contains the edges that + if removed, would destroy all paths between source and target. + + Examples + -------- + >>> # Platonic icosahedral graph has edge connectivity 5 + >>> G = nx.icosahedral_graph() + >>> len(nx.minimum_edge_cut(G)) + 5 + + You can use alternative flow algorithms for the underlying + maximum flow computation. In dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better + than the default :meth:`edmonds_karp`, which is faster for + sparse networks with highly skewed degree distributions. + Alternative flow functions have to be explicitly imported + from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> len(nx.minimum_edge_cut(G, flow_func=shortest_augmenting_path)) + 5 + + If you specify a pair of nodes (source and target) as parameters, + this function returns the value of local edge connectivity. + + >>> nx.edge_connectivity(G, 3, 7) + 5 + + If you need to perform several local computations among different + pairs of nodes on the same graph, it is recommended that you reuse + the data structures used in the maximum flow computations. See + :meth:`local_edge_connectivity` for details. + + Notes + ----- + This is a flow based implementation of minimum edge cut. For + undirected graphs the algorithm works by finding a 'small' dominating + set of nodes of G (see algorithm 7 in [1]_) and computing the maximum + flow between an arbitrary node in the dominating set and the rest of + nodes in it. This is an implementation of algorithm 6 in [1]_. For + directed graphs, the algorithm does n calls to the max flow function. + The function raises an error if the directed graph is not weakly + connected and returns an empty set if it is weakly connected. + It is an implementation of algorithm 8 in [1]_. + + See also + -------- + :meth:`minimum_st_edge_cut` + :meth:`minimum_node_cut` + :meth:`stoer_wagner` + :meth:`node_connectivity` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + if (s is not None and t is None) or (s is None and t is not None): + raise nx.NetworkXError("Both source and target must be specified.") + + # reuse auxiliary digraph and residual network + H = build_auxiliary_edge_connectivity(G) + R = build_residual_network(H, "capacity") + kwargs = {"flow_func": flow_func, "residual": R, "auxiliary": H} + + # Local minimum edge cut if s and t are not None + if s is not None and t is not None: + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + return minimum_st_edge_cut(H, s, t, **kwargs) + + # Global minimum edge cut + # Analog to the algorithm for global edge connectivity + if G.is_directed(): + # Based on algorithm 8 in [1] + if not nx.is_weakly_connected(G): + raise nx.NetworkXError("Input graph is not connected") + + # Initial cutset is all edges of a node with minimum degree + node = min(G, key=G.degree) + min_cut = set(G.edges(node)) + nodes = list(G) + n = len(nodes) + for i in range(n): + try: + this_cut = minimum_st_edge_cut(H, nodes[i], nodes[i + 1], **kwargs) + if len(this_cut) <= len(min_cut): + min_cut = this_cut + except IndexError: # Last node! + this_cut = minimum_st_edge_cut(H, nodes[i], nodes[0], **kwargs) + if len(this_cut) <= len(min_cut): + min_cut = this_cut + + return min_cut + + else: # undirected + # Based on algorithm 6 in [1] + if not nx.is_connected(G): + raise nx.NetworkXError("Input graph is not connected") + + # Initial cutset is all edges of a node with minimum degree + node = min(G, key=G.degree) + min_cut = set(G.edges(node)) + # A dominating set is \lambda-covering + # We need a dominating set with at least two nodes + for node in G: + D = nx.dominating_set(G, start_with=node) + v = D.pop() + if D: + break + else: + # in complete graphs the dominating set will always be of one node + # thus we return min_cut, which now contains the edges of a node + # with minimum degree + return min_cut + for w in D: + this_cut = minimum_st_edge_cut(H, v, w, **kwargs) + if len(this_cut) <= len(min_cut): + min_cut = this_cut + + return min_cut diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/disjoint_paths.py b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/disjoint_paths.py new file mode 100644 index 0000000000000000000000000000000000000000..3fe450517d52454b3086175a76eb40795c3565e4 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/disjoint_paths.py @@ -0,0 +1,412 @@ +"""Flow based node and edge disjoint paths.""" +import networkx as nx + +# Define the default maximum flow function to use for the underlying +# maximum flow computations +from networkx.algorithms.flow import ( + edmonds_karp, + preflow_push, + shortest_augmenting_path, +) +from networkx.exception import NetworkXNoPath + +default_flow_func = edmonds_karp +from itertools import filterfalse as _filterfalse + +# Functions to build auxiliary data structures. +from .utils import build_auxiliary_edge_connectivity, build_auxiliary_node_connectivity + +__all__ = ["edge_disjoint_paths", "node_disjoint_paths"] + + +@nx._dispatch( + graphs={"G": 0, "auxiliary?": 5, "residual?": 6}, + preserve_edge_attrs={ + "auxiliary": {"capacity": float("inf")}, + "residual": {"capacity": float("inf")}, + }, + preserve_graph_attrs={"residual"}, +) +def edge_disjoint_paths( + G, s, t, flow_func=None, cutoff=None, auxiliary=None, residual=None +): + """Returns the edges disjoint paths between source and target. + + Edge disjoint paths are paths that do not share any edge. The + number of edge disjoint paths between source and target is equal + to their edge connectivity. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. The choice of the default function + may change from version to version and should not be relied on. + Default value: None. + + cutoff : integer or None (default: None) + Maximum number of paths to yield. If specified, the maximum flow + algorithm will terminate when the flow value reaches or exceeds the + cutoff. This only works for flows that support the cutoff parameter + (most do) and is ignored otherwise. + + auxiliary : NetworkX DiGraph + Auxiliary digraph to compute flow based edge connectivity. It has + to have a graph attribute called mapping with a dictionary mapping + node names in G and in the auxiliary digraph. If provided + it will be reused instead of recreated. Default value: None. + + residual : NetworkX DiGraph + Residual network to compute maximum flow. If provided it will be + reused instead of recreated. Default value: None. + + Returns + ------- + paths : generator + A generator of edge independent paths. + + Raises + ------ + NetworkXNoPath + If there is no path between source and target. + + NetworkXError + If source or target are not in the graph G. + + See also + -------- + :meth:`node_disjoint_paths` + :meth:`edge_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Examples + -------- + We use in this example the platonic icosahedral graph, which has node + edge connectivity 5, thus there are 5 edge disjoint paths between any + pair of nodes. + + >>> G = nx.icosahedral_graph() + >>> len(list(nx.edge_disjoint_paths(G, 0, 6))) + 5 + + + If you need to compute edge disjoint paths on several pairs of + nodes in the same graph, it is recommended that you reuse the + data structures that NetworkX uses in the computation: the + auxiliary digraph for edge connectivity, and the residual + network for the underlying maximum flow computation. + + Example of how to compute edge disjoint paths among all pairs of + nodes of the platonic icosahedral graph reusing the data + structures. + + >>> import itertools + >>> # You also have to explicitly import the function for + >>> # building the auxiliary digraph from the connectivity package + >>> from networkx.algorithms.connectivity import build_auxiliary_edge_connectivity + >>> H = build_auxiliary_edge_connectivity(G) + >>> # And the function for building the residual network from the + >>> # flow package + >>> from networkx.algorithms.flow import build_residual_network + >>> # Note that the auxiliary digraph has an edge attribute named capacity + >>> R = build_residual_network(H, "capacity") + >>> result = {n: {} for n in G} + >>> # Reuse the auxiliary digraph and the residual network by passing them + >>> # as arguments + >>> for u, v in itertools.combinations(G, 2): + ... k = len(list(nx.edge_disjoint_paths(G, u, v, auxiliary=H, residual=R))) + ... result[u][v] = k + >>> all(result[u][v] == 5 for u, v in itertools.combinations(G, 2)) + True + + You can also use alternative flow algorithms for computing edge disjoint + paths. For instance, in dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better than + the default :meth:`edmonds_karp` which is faster for sparse + networks with highly skewed degree distributions. Alternative flow + functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> len(list(nx.edge_disjoint_paths(G, 0, 6, flow_func=shortest_augmenting_path))) + 5 + + Notes + ----- + This is a flow based implementation of edge disjoint paths. We compute + the maximum flow between source and target on an auxiliary directed + network. The saturated edges in the residual network after running the + maximum flow algorithm correspond to edge disjoint paths between source + and target in the original network. This function handles both directed + and undirected graphs, and can use all flow algorithms from NetworkX flow + package. + + """ + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + + if flow_func is None: + flow_func = default_flow_func + + if auxiliary is None: + H = build_auxiliary_edge_connectivity(G) + else: + H = auxiliary + + # Maximum possible edge disjoint paths + possible = min(H.out_degree(s), H.in_degree(t)) + if not possible: + raise NetworkXNoPath + + if cutoff is None: + cutoff = possible + else: + cutoff = min(cutoff, possible) + + # Compute maximum flow between source and target. Flow functions in + # NetworkX return a residual network. + kwargs = { + "capacity": "capacity", + "residual": residual, + "cutoff": cutoff, + "value_only": True, + } + if flow_func is preflow_push: + del kwargs["cutoff"] + if flow_func is shortest_augmenting_path: + kwargs["two_phase"] = True + R = flow_func(H, s, t, **kwargs) + + if R.graph["flow_value"] == 0: + raise NetworkXNoPath + + # Saturated edges in the residual network form the edge disjoint paths + # between source and target + cutset = [ + (u, v) + for u, v, d in R.edges(data=True) + if d["capacity"] == d["flow"] and d["flow"] > 0 + ] + # This is equivalent of what flow.utils.build_flow_dict returns, but + # only for the nodes with saturated edges and without reporting 0 flows. + flow_dict = {n: {} for edge in cutset for n in edge} + for u, v in cutset: + flow_dict[u][v] = 1 + + # Rebuild the edge disjoint paths from the flow dictionary. + paths_found = 0 + for v in list(flow_dict[s]): + if paths_found >= cutoff: + # preflow_push does not support cutoff: we have to + # keep track of the paths founds and stop at cutoff. + break + path = [s] + if v == t: + path.append(v) + yield path + continue + u = v + while u != t: + path.append(u) + try: + u, _ = flow_dict[u].popitem() + except KeyError: + break + else: + path.append(t) + yield path + paths_found += 1 + + +@nx._dispatch( + graphs={"G": 0, "auxiliary?": 5, "residual?": 6}, + preserve_edge_attrs={"residual": {"capacity": float("inf")}}, + preserve_node_attrs={"auxiliary": {"id": None}}, + preserve_graph_attrs={"auxiliary", "residual"}, +) +def node_disjoint_paths( + G, s, t, flow_func=None, cutoff=None, auxiliary=None, residual=None +): + r"""Computes node disjoint paths between source and target. + + Node disjoint paths are paths that only share their first and last + nodes. The number of node independent paths between two nodes is + equal to their local node connectivity. + + Parameters + ---------- + G : NetworkX graph + + s : node + Source node. + + t : node + Target node. + + flow_func : function + A function for computing the maximum flow among a pair of nodes. + The function has to accept at least three parameters: a Digraph, + a source node, and a target node. And return a residual network + that follows NetworkX conventions (see :meth:`maximum_flow` for + details). If flow_func is None, the default maximum flow function + (:meth:`edmonds_karp`) is used. See below for details. The choice + of the default function may change from version to version and + should not be relied on. Default value: None. + + cutoff : integer or None (default: None) + Maximum number of paths to yield. If specified, the maximum flow + algorithm will terminate when the flow value reaches or exceeds the + cutoff. This only works for flows that support the cutoff parameter + (most do) and is ignored otherwise. + + auxiliary : NetworkX DiGraph + Auxiliary digraph to compute flow based node connectivity. It has + to have a graph attribute called mapping with a dictionary mapping + node names in G and in the auxiliary digraph. If provided + it will be reused instead of recreated. Default value: None. + + residual : NetworkX DiGraph + Residual network to compute maximum flow. If provided it will be + reused instead of recreated. Default value: None. + + Returns + ------- + paths : generator + Generator of node disjoint paths. + + Raises + ------ + NetworkXNoPath + If there is no path between source and target. + + NetworkXError + If source or target are not in the graph G. + + Examples + -------- + We use in this example the platonic icosahedral graph, which has node + connectivity 5, thus there are 5 node disjoint paths between any pair + of non neighbor nodes. + + >>> G = nx.icosahedral_graph() + >>> len(list(nx.node_disjoint_paths(G, 0, 6))) + 5 + + If you need to compute node disjoint paths between several pairs of + nodes in the same graph, it is recommended that you reuse the + data structures that NetworkX uses in the computation: the + auxiliary digraph for node connectivity and node cuts, and the + residual network for the underlying maximum flow computation. + + Example of how to compute node disjoint paths reusing the data + structures: + + >>> # You also have to explicitly import the function for + >>> # building the auxiliary digraph from the connectivity package + >>> from networkx.algorithms.connectivity import build_auxiliary_node_connectivity + >>> H = build_auxiliary_node_connectivity(G) + >>> # And the function for building the residual network from the + >>> # flow package + >>> from networkx.algorithms.flow import build_residual_network + >>> # Note that the auxiliary digraph has an edge attribute named capacity + >>> R = build_residual_network(H, "capacity") + >>> # Reuse the auxiliary digraph and the residual network by passing them + >>> # as arguments + >>> len(list(nx.node_disjoint_paths(G, 0, 6, auxiliary=H, residual=R))) + 5 + + You can also use alternative flow algorithms for computing node disjoint + paths. For instance, in dense networks the algorithm + :meth:`shortest_augmenting_path` will usually perform better than + the default :meth:`edmonds_karp` which is faster for sparse + networks with highly skewed degree distributions. Alternative flow + functions have to be explicitly imported from the flow package. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> len(list(nx.node_disjoint_paths(G, 0, 6, flow_func=shortest_augmenting_path))) + 5 + + Notes + ----- + This is a flow based implementation of node disjoint paths. We compute + the maximum flow between source and target on an auxiliary directed + network. The saturated edges in the residual network after running the + maximum flow algorithm correspond to node disjoint paths between source + and target in the original network. This function handles both directed + and undirected graphs, and can use all flow algorithms from NetworkX flow + package. + + See also + -------- + :meth:`edge_disjoint_paths` + :meth:`node_connectivity` + :meth:`maximum_flow` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + """ + if s not in G: + raise nx.NetworkXError(f"node {s} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {t} not in graph") + + if auxiliary is None: + H = build_auxiliary_node_connectivity(G) + else: + H = auxiliary + + mapping = H.graph.get("mapping", None) + if mapping is None: + raise nx.NetworkXError("Invalid auxiliary digraph.") + + # Maximum possible edge disjoint paths + possible = min(H.out_degree(f"{mapping[s]}B"), H.in_degree(f"{mapping[t]}A")) + if not possible: + raise NetworkXNoPath + + if cutoff is None: + cutoff = possible + else: + cutoff = min(cutoff, possible) + + kwargs = { + "flow_func": flow_func, + "residual": residual, + "auxiliary": H, + "cutoff": cutoff, + } + + # The edge disjoint paths in the auxiliary digraph correspond to the node + # disjoint paths in the original graph. + paths_edges = edge_disjoint_paths(H, f"{mapping[s]}B", f"{mapping[t]}A", **kwargs) + for path in paths_edges: + # Each node in the original graph maps to two nodes in auxiliary graph + yield list(_unique_everseen(H.nodes[node]["id"] for node in path)) + + +def _unique_everseen(iterable): + # Adapted from https://docs.python.org/3/library/itertools.html examples + "List unique elements, preserving order. Remember all elements ever seen." + # unique_everseen('AAAABBBCCDAABBB') --> A B C D + seen = set() + seen_add = seen.add + for element in _filterfalse(seen.__contains__, iterable): + seen_add(element) + yield element diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/edge_augmentation.py b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/edge_augmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..c1215509e8860fbc62b16f87c6d01f784a1bec3f --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/edge_augmentation.py @@ -0,0 +1,1269 @@ +""" +Algorithms for finding k-edge-augmentations + +A k-edge-augmentation is a set of edges, that once added to a graph, ensures +that the graph is k-edge-connected; i.e. the graph cannot be disconnected +unless k or more edges are removed. Typically, the goal is to find the +augmentation with minimum weight. In general, it is not guaranteed that a +k-edge-augmentation exists. + +See Also +-------- +:mod:`edge_kcomponents` : algorithms for finding k-edge-connected components +:mod:`connectivity` : algorithms for determining edge connectivity. +""" +import itertools as it +import math +from collections import defaultdict, namedtuple + +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = ["k_edge_augmentation", "is_k_edge_connected", "is_locally_k_edge_connected"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def is_k_edge_connected(G, k): + """Tests to see if a graph is k-edge-connected. + + Is it impossible to disconnect the graph by removing fewer than k edges? + If so, then G is k-edge-connected. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + k : integer + edge connectivity to test for + + Returns + ------- + boolean + True if G is k-edge-connected. + + See Also + -------- + :func:`is_locally_k_edge_connected` + + Examples + -------- + >>> G = nx.barbell_graph(10, 0) + >>> nx.is_k_edge_connected(G, k=1) + True + >>> nx.is_k_edge_connected(G, k=2) + False + """ + if k < 1: + raise ValueError(f"k must be positive, not {k}") + # First try to quickly determine if G is not k-edge-connected + if G.number_of_nodes() < k + 1: + return False + elif any(d < k for n, d in G.degree()): + return False + else: + # Otherwise perform the full check + if k == 1: + return nx.is_connected(G) + elif k == 2: + return nx.is_connected(G) and not nx.has_bridges(G) + else: + return nx.edge_connectivity(G, cutoff=k) >= k + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def is_locally_k_edge_connected(G, s, t, k): + """Tests to see if an edge in a graph is locally k-edge-connected. + + Is it impossible to disconnect s and t by removing fewer than k edges? + If so, then s and t are locally k-edge-connected in G. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + s : node + Source node + + t : node + Target node + + k : integer + local edge connectivity for nodes s and t + + Returns + ------- + boolean + True if s and t are locally k-edge-connected in G. + + See Also + -------- + :func:`is_k_edge_connected` + + Examples + -------- + >>> from networkx.algorithms.connectivity import is_locally_k_edge_connected + >>> G = nx.barbell_graph(10, 0) + >>> is_locally_k_edge_connected(G, 5, 15, k=1) + True + >>> is_locally_k_edge_connected(G, 5, 15, k=2) + False + >>> is_locally_k_edge_connected(G, 1, 5, k=2) + True + """ + if k < 1: + raise ValueError(f"k must be positive, not {k}") + + # First try to quickly determine s, t is not k-locally-edge-connected in G + if G.degree(s) < k or G.degree(t) < k: + return False + else: + # Otherwise perform the full check + if k == 1: + return nx.has_path(G, s, t) + else: + localk = nx.connectivity.local_edge_connectivity(G, s, t, cutoff=k) + return localk >= k + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def k_edge_augmentation(G, k, avail=None, weight=None, partial=False): + """Finds set of edges to k-edge-connect G. + + Adding edges from the augmentation to G make it impossible to disconnect G + unless k or more edges are removed. This function uses the most efficient + function available (depending on the value of k and if the problem is + weighted or unweighted) to search for a minimum weight subset of available + edges that k-edge-connects G. In general, finding a k-edge-augmentation is + NP-hard, so solutions are not guaranteed to be minimal. Furthermore, a + k-edge-augmentation may not exist. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + k : integer + Desired edge connectivity + + avail : dict or a set of 2 or 3 tuples + The available edges that can be used in the augmentation. + + If unspecified, then all edges in the complement of G are available. + Otherwise, each item is an available edge (with an optional weight). + + In the unweighted case, each item is an edge ``(u, v)``. + + In the weighted case, each item is a 3-tuple ``(u, v, d)`` or a dict + with items ``(u, v): d``. The third item, ``d``, can be a dictionary + or a real number. If ``d`` is a dictionary ``d[weight]`` + correspondings to the weight. + + weight : string + key to use to find weights if ``avail`` is a set of 3-tuples where the + third item in each tuple is a dictionary. + + partial : boolean + If partial is True and no feasible k-edge-augmentation exists, then all + a partial k-edge-augmentation is generated. Adding the edges in a + partial augmentation to G, minimizes the number of k-edge-connected + components and maximizes the edge connectivity between those + components. For details, see :func:`partial_k_edge_augmentation`. + + Yields + ------ + edge : tuple + Edges that, once added to G, would cause G to become k-edge-connected. + If partial is False, an error is raised if this is not possible. + Otherwise, generated edges form a partial augmentation, which + k-edge-connects any part of G where it is possible, and maximally + connects the remaining parts. + + Raises + ------ + NetworkXUnfeasible + If partial is False and no k-edge-augmentation exists. + + NetworkXNotImplemented + If the input graph is directed or a multigraph. + + ValueError: + If k is less than 1 + + Notes + ----- + When k=1 this returns an optimal solution. + + When k=2 and ``avail`` is None, this returns an optimal solution. + Otherwise when k=2, this returns a 2-approximation of the optimal solution. + + For k>3, this problem is NP-hard and this uses a randomized algorithm that + produces a feasible solution, but provides no guarantees on the + solution weight. + + Examples + -------- + >>> # Unweighted cases + >>> G = nx.path_graph((1, 2, 3, 4)) + >>> G.add_node(5) + >>> sorted(nx.k_edge_augmentation(G, k=1)) + [(1, 5)] + >>> sorted(nx.k_edge_augmentation(G, k=2)) + [(1, 5), (5, 4)] + >>> sorted(nx.k_edge_augmentation(G, k=3)) + [(1, 4), (1, 5), (2, 5), (3, 5), (4, 5)] + >>> complement = list(nx.k_edge_augmentation(G, k=5, partial=True)) + >>> G.add_edges_from(complement) + >>> nx.edge_connectivity(G) + 4 + + >>> # Weighted cases + >>> G = nx.path_graph((1, 2, 3, 4)) + >>> G.add_node(5) + >>> # avail can be a tuple with a dict + >>> avail = [(1, 5, {"weight": 11}), (2, 5, {"weight": 10})] + >>> sorted(nx.k_edge_augmentation(G, k=1, avail=avail, weight="weight")) + [(2, 5)] + >>> # or avail can be a 3-tuple with a real number + >>> avail = [(1, 5, 11), (2, 5, 10), (4, 3, 1), (4, 5, 51)] + >>> sorted(nx.k_edge_augmentation(G, k=2, avail=avail)) + [(1, 5), (2, 5), (4, 5)] + >>> # or avail can be a dict + >>> avail = {(1, 5): 11, (2, 5): 10, (4, 3): 1, (4, 5): 51} + >>> sorted(nx.k_edge_augmentation(G, k=2, avail=avail)) + [(1, 5), (2, 5), (4, 5)] + >>> # If augmentation is infeasible, then a partial solution can be found + >>> avail = {(1, 5): 11} + >>> sorted(nx.k_edge_augmentation(G, k=2, avail=avail, partial=True)) + [(1, 5)] + """ + try: + if k <= 0: + raise ValueError(f"k must be a positive integer, not {k}") + elif G.number_of_nodes() < k + 1: + msg = f"impossible to {k} connect in graph with less than {k + 1} nodes" + raise nx.NetworkXUnfeasible(msg) + elif avail is not None and len(avail) == 0: + if not nx.is_k_edge_connected(G, k): + raise nx.NetworkXUnfeasible("no available edges") + aug_edges = [] + elif k == 1: + aug_edges = one_edge_augmentation( + G, avail=avail, weight=weight, partial=partial + ) + elif k == 2: + aug_edges = bridge_augmentation(G, avail=avail, weight=weight) + else: + # raise NotImplementedError(f'not implemented for k>2. k={k}') + aug_edges = greedy_k_edge_augmentation( + G, k=k, avail=avail, weight=weight, seed=0 + ) + # Do eager evaluation so we can catch any exceptions + # Before executing partial code. + yield from list(aug_edges) + except nx.NetworkXUnfeasible: + if partial: + # Return all available edges + if avail is None: + aug_edges = complement_edges(G) + else: + # If we can't k-edge-connect the entire graph, try to + # k-edge-connect as much as possible + aug_edges = partial_k_edge_augmentation( + G, k=k, avail=avail, weight=weight + ) + yield from aug_edges + else: + raise + + +@nx._dispatch +def partial_k_edge_augmentation(G, k, avail, weight=None): + """Finds augmentation that k-edge-connects as much of the graph as possible. + + When a k-edge-augmentation is not possible, we can still try to find a + small set of edges that partially k-edge-connects as much of the graph as + possible. All possible edges are generated between remaining parts. + This minimizes the number of k-edge-connected subgraphs in the resulting + graph and maximizes the edge connectivity between those subgraphs. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + k : integer + Desired edge connectivity + + avail : dict or a set of 2 or 3 tuples + For more details, see :func:`k_edge_augmentation`. + + weight : string + key to use to find weights if ``avail`` is a set of 3-tuples. + For more details, see :func:`k_edge_augmentation`. + + Yields + ------ + edge : tuple + Edges in the partial augmentation of G. These edges k-edge-connect any + part of G where it is possible, and maximally connects the remaining + parts. In other words, all edges from avail are generated except for + those within subgraphs that have already become k-edge-connected. + + Notes + ----- + Construct H that augments G with all edges in avail. + Find the k-edge-subgraphs of H. + For each k-edge-subgraph, if the number of nodes is more than k, then find + the k-edge-augmentation of that graph and add it to the solution. Then add + all edges in avail between k-edge subgraphs to the solution. + + See Also + -------- + :func:`k_edge_augmentation` + + Examples + -------- + >>> G = nx.path_graph((1, 2, 3, 4, 5, 6, 7)) + >>> G.add_node(8) + >>> avail = [(1, 3), (1, 4), (1, 5), (2, 4), (2, 5), (3, 5), (1, 8)] + >>> sorted(partial_k_edge_augmentation(G, k=2, avail=avail)) + [(1, 5), (1, 8)] + """ + + def _edges_between_disjoint(H, only1, only2): + """finds edges between disjoint nodes""" + only1_adj = {u: set(H.adj[u]) for u in only1} + for u, neighbs in only1_adj.items(): + # Find the neighbors of u in only1 that are also in only2 + neighbs12 = neighbs.intersection(only2) + for v in neighbs12: + yield (u, v) + + avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=G) + + # Find which parts of the graph can be k-edge-connected + H = G.copy() + H.add_edges_from( + ( + (u, v, {"weight": w, "generator": (u, v)}) + for (u, v), w in zip(avail, avail_w) + ) + ) + k_edge_subgraphs = list(nx.k_edge_subgraphs(H, k=k)) + + # Generate edges to k-edge-connect internal subgraphs + for nodes in k_edge_subgraphs: + if len(nodes) > 1: + # Get the k-edge-connected subgraph + C = H.subgraph(nodes).copy() + # Find the internal edges that were available + sub_avail = { + d["generator"]: d["weight"] + for (u, v, d) in C.edges(data=True) + if "generator" in d + } + # Remove potential augmenting edges + C.remove_edges_from(sub_avail.keys()) + # Find a subset of these edges that makes the component + # k-edge-connected and ignore the rest + yield from nx.k_edge_augmentation(C, k=k, avail=sub_avail) + + # Generate all edges between CCs that could not be k-edge-connected + for cc1, cc2 in it.combinations(k_edge_subgraphs, 2): + for u, v in _edges_between_disjoint(H, cc1, cc2): + d = H.get_edge_data(u, v) + edge = d.get("generator", None) + if edge is not None: + yield edge + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +@nx._dispatch +def one_edge_augmentation(G, avail=None, weight=None, partial=False): + """Finds minimum weight set of edges to connect G. + + Equivalent to :func:`k_edge_augmentation` when k=1. Adding the resulting + edges to G will make it 1-edge-connected. The solution is optimal for both + weighted and non-weighted variants. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + avail : dict or a set of 2 or 3 tuples + For more details, see :func:`k_edge_augmentation`. + + weight : string + key to use to find weights if ``avail`` is a set of 3-tuples. + For more details, see :func:`k_edge_augmentation`. + + partial : boolean + If partial is True and no feasible k-edge-augmentation exists, then the + augmenting edges minimize the number of connected components. + + Yields + ------ + edge : tuple + Edges in the one-augmentation of G + + Raises + ------ + NetworkXUnfeasible + If partial is False and no one-edge-augmentation exists. + + Notes + ----- + Uses either :func:`unconstrained_one_edge_augmentation` or + :func:`weighted_one_edge_augmentation` depending on whether ``avail`` is + specified. Both algorithms are based on finding a minimum spanning tree. + As such both algorithms find optimal solutions and run in linear time. + + See Also + -------- + :func:`k_edge_augmentation` + """ + if avail is None: + return unconstrained_one_edge_augmentation(G) + else: + return weighted_one_edge_augmentation( + G, avail=avail, weight=weight, partial=partial + ) + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +@nx._dispatch +def bridge_augmentation(G, avail=None, weight=None): + """Finds the a set of edges that bridge connects G. + + Equivalent to :func:`k_edge_augmentation` when k=2, and partial=False. + Adding the resulting edges to G will make it 2-edge-connected. If no + constraints are specified the returned set of edges is minimum an optimal, + otherwise the solution is approximated. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + avail : dict or a set of 2 or 3 tuples + For more details, see :func:`k_edge_augmentation`. + + weight : string + key to use to find weights if ``avail`` is a set of 3-tuples. + For more details, see :func:`k_edge_augmentation`. + + Yields + ------ + edge : tuple + Edges in the bridge-augmentation of G + + Raises + ------ + NetworkXUnfeasible + If no bridge-augmentation exists. + + Notes + ----- + If there are no constraints the solution can be computed in linear time + using :func:`unconstrained_bridge_augmentation`. Otherwise, the problem + becomes NP-hard and is the solution is approximated by + :func:`weighted_bridge_augmentation`. + + See Also + -------- + :func:`k_edge_augmentation` + """ + if G.number_of_nodes() < 3: + raise nx.NetworkXUnfeasible("impossible to bridge connect less than 3 nodes") + if avail is None: + return unconstrained_bridge_augmentation(G) + else: + return weighted_bridge_augmentation(G, avail, weight=weight) + + +# --- Algorithms and Helpers --- + + +def _ordered(u, v): + """Returns the nodes in an undirected edge in lower-triangular order""" + return (u, v) if u < v else (v, u) + + +def _unpack_available_edges(avail, weight=None, G=None): + """Helper to separate avail into edges and corresponding weights""" + if weight is None: + weight = "weight" + if isinstance(avail, dict): + avail_uv = list(avail.keys()) + avail_w = list(avail.values()) + else: + + def _try_getitem(d): + try: + return d[weight] + except TypeError: + return d + + avail_uv = [tup[0:2] for tup in avail] + avail_w = [1 if len(tup) == 2 else _try_getitem(tup[-1]) for tup in avail] + + if G is not None: + # Edges already in the graph are filtered + flags = [not G.has_edge(u, v) for u, v in avail_uv] + avail_uv = list(it.compress(avail_uv, flags)) + avail_w = list(it.compress(avail_w, flags)) + return avail_uv, avail_w + + +MetaEdge = namedtuple("MetaEdge", ("meta_uv", "uv", "w")) + + +def _lightest_meta_edges(mapping, avail_uv, avail_w): + """Maps available edges in the original graph to edges in the metagraph. + + Parameters + ---------- + mapping : dict + mapping produced by :func:`collapse`, that maps each node in the + original graph to a node in the meta graph + + avail_uv : list + list of edges + + avail_w : list + list of edge weights + + Notes + ----- + Each node in the metagraph is a k-edge-connected component in the original + graph. We don't care about any edge within the same k-edge-connected + component, so we ignore self edges. We also are only interested in the + minimum weight edge bridging each k-edge-connected component so, we group + the edges by meta-edge and take the lightest in each group. + + Examples + -------- + >>> # Each group represents a meta-node + >>> groups = ([1, 2, 3], [4, 5], [6]) + >>> mapping = {n: meta_n for meta_n, ns in enumerate(groups) for n in ns} + >>> avail_uv = [(1, 2), (3, 6), (1, 4), (5, 2), (6, 1), (2, 6), (3, 1)] + >>> avail_w = [20, 99, 20, 15, 50, 99, 20] + >>> sorted(_lightest_meta_edges(mapping, avail_uv, avail_w)) + [MetaEdge(meta_uv=(0, 1), uv=(5, 2), w=15), MetaEdge(meta_uv=(0, 2), uv=(6, 1), w=50)] + """ + grouped_wuv = defaultdict(list) + for w, (u, v) in zip(avail_w, avail_uv): + # Order the meta-edge so it can be used as a dict key + meta_uv = _ordered(mapping[u], mapping[v]) + # Group each available edge using the meta-edge as a key + grouped_wuv[meta_uv].append((w, u, v)) + + # Now that all available edges are grouped, choose one per group + for (mu, mv), choices_wuv in grouped_wuv.items(): + # Ignore available edges within the same meta-node + if mu != mv: + # Choose the lightest available edge belonging to each meta-edge + w, u, v = min(choices_wuv) + yield MetaEdge((mu, mv), (u, v), w) + + +@nx._dispatch +def unconstrained_one_edge_augmentation(G): + """Finds the smallest set of edges to connect G. + + This is a variant of the unweighted MST problem. + If G is not empty, a feasible solution always exists. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + Yields + ------ + edge : tuple + Edges in the one-edge-augmentation of G + + See Also + -------- + :func:`one_edge_augmentation` + :func:`k_edge_augmentation` + + Examples + -------- + >>> G = nx.Graph([(1, 2), (2, 3), (4, 5)]) + >>> G.add_nodes_from([6, 7, 8]) + >>> sorted(unconstrained_one_edge_augmentation(G)) + [(1, 4), (4, 6), (6, 7), (7, 8)] + """ + ccs1 = list(nx.connected_components(G)) + C = collapse(G, ccs1) + # When we are not constrained, we can just make a meta graph tree. + meta_nodes = list(C.nodes()) + # build a path in the metagraph + meta_aug = list(zip(meta_nodes, meta_nodes[1:])) + # map that path to the original graph + inverse = defaultdict(list) + for k, v in C.graph["mapping"].items(): + inverse[v].append(k) + for mu, mv in meta_aug: + yield (inverse[mu][0], inverse[mv][0]) + + +@nx._dispatch +def weighted_one_edge_augmentation(G, avail, weight=None, partial=False): + """Finds the minimum weight set of edges to connect G if one exists. + + This is a variant of the weighted MST problem. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + avail : dict or a set of 2 or 3 tuples + For more details, see :func:`k_edge_augmentation`. + + weight : string + key to use to find weights if ``avail`` is a set of 3-tuples. + For more details, see :func:`k_edge_augmentation`. + + partial : boolean + If partial is True and no feasible k-edge-augmentation exists, then the + augmenting edges minimize the number of connected components. + + Yields + ------ + edge : tuple + Edges in the subset of avail chosen to connect G. + + See Also + -------- + :func:`one_edge_augmentation` + :func:`k_edge_augmentation` + + Examples + -------- + >>> G = nx.Graph([(1, 2), (2, 3), (4, 5)]) + >>> G.add_nodes_from([6, 7, 8]) + >>> # any edge not in avail has an implicit weight of infinity + >>> avail = [(1, 3), (1, 5), (4, 7), (4, 8), (6, 1), (8, 1), (8, 2)] + >>> sorted(weighted_one_edge_augmentation(G, avail)) + [(1, 5), (4, 7), (6, 1), (8, 1)] + >>> # find another solution by giving large weights to edges in the + >>> # previous solution (note some of the old edges must be used) + >>> avail = [(1, 3), (1, 5, 99), (4, 7, 9), (6, 1, 99), (8, 1, 99), (8, 2)] + >>> sorted(weighted_one_edge_augmentation(G, avail)) + [(1, 5), (4, 7), (6, 1), (8, 2)] + """ + avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=G) + # Collapse CCs in the original graph into nodes in a metagraph + # Then find an MST of the metagraph instead of the original graph + C = collapse(G, nx.connected_components(G)) + mapping = C.graph["mapping"] + # Assign each available edge to an edge in the metagraph + candidate_mapping = _lightest_meta_edges(mapping, avail_uv, avail_w) + # nx.set_edge_attributes(C, name='weight', values=0) + C.add_edges_from( + (mu, mv, {"weight": w, "generator": uv}) + for (mu, mv), uv, w in candidate_mapping + ) + # Find MST of the meta graph + meta_mst = nx.minimum_spanning_tree(C) + if not partial and not nx.is_connected(meta_mst): + raise nx.NetworkXUnfeasible("Not possible to connect G with available edges") + # Yield the edge that generated the meta-edge + for mu, mv, d in meta_mst.edges(data=True): + if "generator" in d: + edge = d["generator"] + yield edge + + +@nx._dispatch +def unconstrained_bridge_augmentation(G): + """Finds an optimal 2-edge-augmentation of G using the fewest edges. + + This is an implementation of the algorithm detailed in [1]_. + The basic idea is to construct a meta-graph of bridge-ccs, connect leaf + nodes of the trees to connect the entire graph, and finally connect the + leafs of the tree in dfs-preorder to bridge connect the entire graph. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + Yields + ------ + edge : tuple + Edges in the bridge augmentation of G + + Notes + ----- + Input: a graph G. + First find the bridge components of G and collapse each bridge-cc into a + node of a metagraph graph C, which is guaranteed to be a forest of trees. + + C contains p "leafs" --- nodes with exactly one incident edge. + C contains q "isolated nodes" --- nodes with no incident edges. + + Theorem: If p + q > 1, then at least :math:`ceil(p / 2) + q` edges are + needed to bridge connect C. This algorithm achieves this min number. + + The method first adds enough edges to make G into a tree and then pairs + leafs in a simple fashion. + + Let n be the number of trees in C. Let v(i) be an isolated vertex in the + i-th tree if one exists, otherwise it is a pair of distinct leafs nodes + in the i-th tree. Alternating edges from these sets (i.e. adding edges + A1 = [(v(i)[0], v(i + 1)[1]), v(i + 1)[0], v(i + 2)[1])...]) connects C + into a tree T. This tree has p' = p + 2q - 2(n -1) leafs and no isolated + vertices. A1 has n - 1 edges. The next step finds ceil(p' / 2) edges to + biconnect any tree with p' leafs. + + Convert T into an arborescence T' by picking an arbitrary root node with + degree >= 2 and directing all edges away from the root. Note the + implementation implicitly constructs T'. + + The leafs of T are the nodes with no existing edges in T'. + Order the leafs of T' by DFS preorder. Then break this list in half + and add the zipped pairs to A2. + + The set A = A1 + A2 is the minimum augmentation in the metagraph. + + To convert this to edges in the original graph + + References + ---------- + .. [1] Eswaran, Kapali P., and R. Endre Tarjan. (1975) Augmentation problems. + http://epubs.siam.org/doi/abs/10.1137/0205044 + + See Also + -------- + :func:`bridge_augmentation` + :func:`k_edge_augmentation` + + Examples + -------- + >>> G = nx.path_graph((1, 2, 3, 4, 5, 6, 7)) + >>> sorted(unconstrained_bridge_augmentation(G)) + [(1, 7)] + >>> G = nx.path_graph((1, 2, 3, 2, 4, 5, 6, 7)) + >>> sorted(unconstrained_bridge_augmentation(G)) + [(1, 3), (3, 7)] + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2)]) + >>> G.add_node(4) + >>> sorted(unconstrained_bridge_augmentation(G)) + [(1, 4), (4, 0)] + """ + # ----- + # Mapping of terms from (Eswaran and Tarjan): + # G = G_0 - the input graph + # C = G_0' - the bridge condensation of G. (This is a forest of trees) + # A1 = A_1 - the edges to connect the forest into a tree + # leaf = pendant - a node with degree of 1 + + # alpha(v) = maps the node v in G to its meta-node in C + # beta(x) = maps the meta-node x in C to any node in the bridge + # component of G corresponding to x. + + # find the 2-edge-connected components of G + bridge_ccs = list(nx.connectivity.bridge_components(G)) + # condense G into an forest C + C = collapse(G, bridge_ccs) + + # Choose pairs of distinct leaf nodes in each tree. If this is not + # possible then make a pair using the single isolated node in the tree. + vset1 = [ + tuple(cc) * 2 # case1: an isolated node + if len(cc) == 1 + else sorted(cc, key=C.degree)[0:2] # case2: pair of leaf nodes + for cc in nx.connected_components(C) + ] + if len(vset1) > 1: + # Use this set to construct edges that connect C into a tree. + nodes1 = [vs[0] for vs in vset1] + nodes2 = [vs[1] for vs in vset1] + A1 = list(zip(nodes1[1:], nodes2)) + else: + A1 = [] + # Connect each tree in the forest to construct an arborescence + T = C.copy() + T.add_edges_from(A1) + + # If there are only two leaf nodes, we simply connect them. + leafs = [n for n, d in T.degree() if d == 1] + if len(leafs) == 1: + A2 = [] + if len(leafs) == 2: + A2 = [tuple(leafs)] + else: + # Choose an arbitrary non-leaf root + try: + root = next(n for n, d in T.degree() if d > 1) + except StopIteration: # no nodes found with degree > 1 + return + # order the leaves of C by (induced directed) preorder + v2 = [n for n in nx.dfs_preorder_nodes(T, root) if T.degree(n) == 1] + # connecting first half of the leafs in pre-order to the second + # half will bridge connect the tree with the fewest edges. + half = math.ceil(len(v2) / 2) + A2 = list(zip(v2[:half], v2[-half:])) + + # collect the edges used to augment the original forest + aug_tree_edges = A1 + A2 + + # Construct the mapping (beta) from meta-nodes to regular nodes + inverse = defaultdict(list) + for k, v in C.graph["mapping"].items(): + inverse[v].append(k) + # sort so we choose minimum degree nodes first + inverse = { + mu: sorted(mapped, key=lambda u: (G.degree(u), u)) + for mu, mapped in inverse.items() + } + + # For each meta-edge, map back to an arbitrary pair in the original graph + G2 = G.copy() + for mu, mv in aug_tree_edges: + # Find the first available edge that doesn't exist and return it + for u, v in it.product(inverse[mu], inverse[mv]): + if not G2.has_edge(u, v): + G2.add_edge(u, v) + yield u, v + break + + +@nx._dispatch +def weighted_bridge_augmentation(G, avail, weight=None): + """Finds an approximate min-weight 2-edge-augmentation of G. + + This is an implementation of the approximation algorithm detailed in [1]_. + It chooses a set of edges from avail to add to G that renders it + 2-edge-connected if such a subset exists. This is done by finding a + minimum spanning arborescence of a specially constructed metagraph. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + avail : set of 2 or 3 tuples. + candidate edges (with optional weights) to choose from + + weight : string + key to use to find weights if avail is a set of 3-tuples where the + third item in each tuple is a dictionary. + + Yields + ------ + edge : tuple + Edges in the subset of avail chosen to bridge augment G. + + Notes + ----- + Finding a weighted 2-edge-augmentation is NP-hard. + Any edge not in ``avail`` is considered to have a weight of infinity. + The approximation factor is 2 if ``G`` is connected and 3 if it is not. + Runs in :math:`O(m + n log(n))` time + + References + ---------- + .. [1] Khuller, Samir, and Ramakrishna Thurimella. (1993) Approximation + algorithms for graph augmentation. + http://www.sciencedirect.com/science/article/pii/S0196677483710102 + + See Also + -------- + :func:`bridge_augmentation` + :func:`k_edge_augmentation` + + Examples + -------- + >>> G = nx.path_graph((1, 2, 3, 4)) + >>> # When the weights are equal, (1, 4) is the best + >>> avail = [(1, 4, 1), (1, 3, 1), (2, 4, 1)] + >>> sorted(weighted_bridge_augmentation(G, avail)) + [(1, 4)] + >>> # Giving (1, 4) a high weight makes the two edge solution the best. + >>> avail = [(1, 4, 1000), (1, 3, 1), (2, 4, 1)] + >>> sorted(weighted_bridge_augmentation(G, avail)) + [(1, 3), (2, 4)] + >>> # ------ + >>> G = nx.path_graph((1, 2, 3, 4)) + >>> G.add_node(5) + >>> avail = [(1, 5, 11), (2, 5, 10), (4, 3, 1), (4, 5, 1)] + >>> sorted(weighted_bridge_augmentation(G, avail=avail)) + [(1, 5), (4, 5)] + >>> avail = [(1, 5, 11), (2, 5, 10), (4, 3, 1), (4, 5, 51)] + >>> sorted(weighted_bridge_augmentation(G, avail=avail)) + [(1, 5), (2, 5), (4, 5)] + """ + + if weight is None: + weight = "weight" + + # If input G is not connected the approximation factor increases to 3 + if not nx.is_connected(G): + H = G.copy() + connectors = list(one_edge_augmentation(H, avail=avail, weight=weight)) + H.add_edges_from(connectors) + + yield from connectors + else: + connectors = [] + H = G + + if len(avail) == 0: + if nx.has_bridges(H): + raise nx.NetworkXUnfeasible("no augmentation possible") + + avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=H) + + # Collapse input into a metagraph. Meta nodes are bridge-ccs + bridge_ccs = nx.connectivity.bridge_components(H) + C = collapse(H, bridge_ccs) + + # Use the meta graph to shrink avail to a small feasible subset + mapping = C.graph["mapping"] + # Choose the minimum weight feasible edge in each group + meta_to_wuv = { + (mu, mv): (w, uv) + for (mu, mv), uv, w in _lightest_meta_edges(mapping, avail_uv, avail_w) + } + + # Mapping of terms from (Khuller and Thurimella): + # C : G_0 = (V, E^0) + # This is the metagraph where each node is a 2-edge-cc in G. + # The edges in C represent bridges in the original graph. + # (mu, mv) : E - E^0 # they group both avail and given edges in E + # T : \Gamma + # D : G^D = (V, E_D) + + # The paper uses ancestor because children point to parents, which is + # contrary to networkx standards. So, we actually need to run + # nx.least_common_ancestor on the reversed Tree. + + # Pick an arbitrary leaf from C as the root + try: + root = next(n for n, d in C.degree() if d == 1) + except StopIteration: # no nodes found with degree == 1 + return + # Root C into a tree TR by directing all edges away from the root + # Note in their paper T directs edges towards the root + TR = nx.dfs_tree(C, root) + + # Add to D the directed edges of T and set their weight to zero + # This indicates that it costs nothing to use edges that were given. + D = nx.reverse(TR).copy() + + nx.set_edge_attributes(D, name="weight", values=0) + + # The LCA of mu and mv in T is the shared ancestor of mu and mv that is + # located farthest from the root. + lca_gen = nx.tree_all_pairs_lowest_common_ancestor( + TR, root=root, pairs=meta_to_wuv.keys() + ) + + for (mu, mv), lca in lca_gen: + w, uv = meta_to_wuv[(mu, mv)] + if lca == mu: + # If u is an ancestor of v in TR, then add edge u->v to D + D.add_edge(lca, mv, weight=w, generator=uv) + elif lca == mv: + # If v is an ancestor of u in TR, then add edge v->u to D + D.add_edge(lca, mu, weight=w, generator=uv) + else: + # If neither u nor v is a ancestor of the other in TR + # let t = lca(TR, u, v) and add edges t->u and t->v + # Track the original edge that GENERATED these edges. + D.add_edge(lca, mu, weight=w, generator=uv) + D.add_edge(lca, mv, weight=w, generator=uv) + + # Then compute a minimum rooted branching + try: + # Note the original edges must be directed towards to root for the + # branching to give us a bridge-augmentation. + A = _minimum_rooted_branching(D, root) + except nx.NetworkXException as err: + # If there is no branching then augmentation is not possible + raise nx.NetworkXUnfeasible("no 2-edge-augmentation possible") from err + + # For each edge e, in the branching that did not belong to the directed + # tree T, add the corresponding edge that **GENERATED** it (this is not + # necessarily e itself!) + + # ensure the third case does not generate edges twice + bridge_connectors = set() + for mu, mv in A.edges(): + data = D.get_edge_data(mu, mv) + if "generator" in data: + # Add the avail edge that generated the branching edge. + edge = data["generator"] + bridge_connectors.add(edge) + + yield from bridge_connectors + + +def _minimum_rooted_branching(D, root): + """Helper function to compute a minimum rooted branching (aka rooted + arborescence) + + Before the branching can be computed, the directed graph must be rooted by + removing the predecessors of root. + + A branching / arborescence of rooted graph G is a subgraph that contains a + directed path from the root to every other vertex. It is the directed + analog of the minimum spanning tree problem. + + References + ---------- + [1] Khuller, Samir (2002) Advanced Algorithms Lecture 24 Notes. + https://web.archive.org/web/20121030033722/https://www.cs.umd.edu/class/spring2011/cmsc651/lec07.pdf + """ + rooted = D.copy() + # root the graph by removing all predecessors to `root`. + rooted.remove_edges_from([(u, root) for u in D.predecessors(root)]) + # Then compute the branching / arborescence. + A = nx.minimum_spanning_arborescence(rooted) + return A + + +@nx._dispatch +def collapse(G, grouped_nodes): + """Collapses each group of nodes into a single node. + + This is similar to condensation, but works on undirected graphs. + + Parameters + ---------- + G : NetworkX Graph + + grouped_nodes: list or generator + Grouping of nodes to collapse. The grouping must be disjoint. + If grouped_nodes are strongly_connected_components then this is + equivalent to :func:`condensation`. + + Returns + ------- + C : NetworkX Graph + The collapsed graph C of G with respect to the node grouping. The node + labels are integers corresponding to the index of the component in the + list of grouped_nodes. C has a graph attribute named 'mapping' with a + dictionary mapping the original nodes to the nodes in C to which they + belong. Each node in C also has a node attribute 'members' with the set + of original nodes in G that form the group that the node in C + represents. + + Examples + -------- + >>> # Collapses a graph using disjoint groups, but not necessarily connected + >>> G = nx.Graph([(1, 0), (2, 3), (3, 1), (3, 4), (4, 5), (5, 6), (5, 7)]) + >>> G.add_node("A") + >>> grouped_nodes = [{0, 1, 2, 3}, {5, 6, 7}] + >>> C = collapse(G, grouped_nodes) + >>> members = nx.get_node_attributes(C, "members") + >>> sorted(members.keys()) + [0, 1, 2, 3] + >>> member_values = set(map(frozenset, members.values())) + >>> assert {0, 1, 2, 3} in member_values + >>> assert {4} in member_values + >>> assert {5, 6, 7} in member_values + >>> assert {"A"} in member_values + """ + mapping = {} + members = {} + C = G.__class__() + i = 0 # required if G is empty + remaining = set(G.nodes()) + for i, group in enumerate(grouped_nodes): + group = set(group) + assert remaining.issuperset( + group + ), "grouped nodes must exist in G and be disjoint" + remaining.difference_update(group) + members[i] = group + mapping.update((n, i) for n in group) + # remaining nodes are in their own group + for i, node in enumerate(remaining, start=i + 1): + group = {node} + members[i] = group + mapping.update((n, i) for n in group) + number_of_groups = i + 1 + C.add_nodes_from(range(number_of_groups)) + C.add_edges_from( + (mapping[u], mapping[v]) for u, v in G.edges() if mapping[u] != mapping[v] + ) + # Add a list of members (ie original nodes) to each node (ie scc) in C. + nx.set_node_attributes(C, name="members", values=members) + # Add mapping dict as graph attribute + C.graph["mapping"] = mapping + return C + + +@nx._dispatch +def complement_edges(G): + """Returns only the edges in the complement of G + + Parameters + ---------- + G : NetworkX Graph + + Yields + ------ + edge : tuple + Edges in the complement of G + + Examples + -------- + >>> G = nx.path_graph((1, 2, 3, 4)) + >>> sorted(complement_edges(G)) + [(1, 3), (1, 4), (2, 4)] + >>> G = nx.path_graph((1, 2, 3, 4), nx.DiGraph()) + >>> sorted(complement_edges(G)) + [(1, 3), (1, 4), (2, 1), (2, 4), (3, 1), (3, 2), (4, 1), (4, 2), (4, 3)] + >>> G = nx.complete_graph(1000) + >>> sorted(complement_edges(G)) + [] + """ + G_adj = G._adj # Store as a variable to eliminate attribute lookup + if G.is_directed(): + for u, v in it.combinations(G.nodes(), 2): + if v not in G_adj[u]: + yield (u, v) + if u not in G_adj[v]: + yield (v, u) + else: + for u, v in it.combinations(G.nodes(), 2): + if v not in G_adj[u]: + yield (u, v) + + +def _compat_shuffle(rng, input): + """wrapper around rng.shuffle for python 2 compatibility reasons""" + rng.shuffle(input) + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +@py_random_state(4) +@nx._dispatch +def greedy_k_edge_augmentation(G, k, avail=None, weight=None, seed=None): + """Greedy algorithm for finding a k-edge-augmentation + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + k : integer + Desired edge connectivity + + avail : dict or a set of 2 or 3 tuples + For more details, see :func:`k_edge_augmentation`. + + weight : string + key to use to find weights if ``avail`` is a set of 3-tuples. + For more details, see :func:`k_edge_augmentation`. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Yields + ------ + edge : tuple + Edges in the greedy augmentation of G + + Notes + ----- + The algorithm is simple. Edges are incrementally added between parts of the + graph that are not yet locally k-edge-connected. Then edges are from the + augmenting set are pruned as long as local-edge-connectivity is not broken. + + This algorithm is greedy and does not provide optimality guarantees. It + exists only to provide :func:`k_edge_augmentation` with the ability to + generate a feasible solution for arbitrary k. + + See Also + -------- + :func:`k_edge_augmentation` + + Examples + -------- + >>> G = nx.path_graph((1, 2, 3, 4, 5, 6, 7)) + >>> sorted(greedy_k_edge_augmentation(G, k=2)) + [(1, 7)] + >>> sorted(greedy_k_edge_augmentation(G, k=1, avail=[])) + [] + >>> G = nx.path_graph((1, 2, 3, 4, 5, 6, 7)) + >>> avail = {(u, v): 1 for (u, v) in complement_edges(G)} + >>> # randomized pruning process can produce different solutions + >>> sorted(greedy_k_edge_augmentation(G, k=4, avail=avail, seed=2)) + [(1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (2, 4), (2, 6), (3, 7), (5, 7)] + >>> sorted(greedy_k_edge_augmentation(G, k=4, avail=avail, seed=3)) + [(1, 3), (1, 5), (1, 6), (2, 4), (2, 6), (3, 7), (4, 7), (5, 7)] + """ + # Result set + aug_edges = [] + + done = is_k_edge_connected(G, k) + if done: + return + if avail is None: + # all edges are available + avail_uv = list(complement_edges(G)) + avail_w = [1] * len(avail_uv) + else: + # Get the unique set of unweighted edges + avail_uv, avail_w = _unpack_available_edges(avail, weight=weight, G=G) + + # Greedy: order lightest edges. Use degree sum to tie-break + tiebreaker = [sum(map(G.degree, uv)) for uv in avail_uv] + avail_wduv = sorted(zip(avail_w, tiebreaker, avail_uv)) + avail_uv = [uv for w, d, uv in avail_wduv] + + # Incrementally add edges in until we are k-connected + H = G.copy() + for u, v in avail_uv: + done = False + if not is_locally_k_edge_connected(H, u, v, k=k): + # Only add edges in parts that are not yet locally k-edge-connected + aug_edges.append((u, v)) + H.add_edge(u, v) + # Did adding this edge help? + if H.degree(u) >= k and H.degree(v) >= k: + done = is_k_edge_connected(H, k) + if done: + break + + # Check for feasibility + if not done: + raise nx.NetworkXUnfeasible("not able to k-edge-connect with available edges") + + # Randomized attempt to reduce the size of the solution + _compat_shuffle(seed, aug_edges) + for u, v in list(aug_edges): + # Don't remove if we know it would break connectivity + if H.degree(u) <= k or H.degree(v) <= k: + continue + H.remove_edge(u, v) + aug_edges.remove((u, v)) + if not is_k_edge_connected(H, k=k): + # If removing this edge breaks feasibility, undo + H.add_edge(u, v) + aug_edges.append((u, v)) + + # Generate results + yield from aug_edges diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/edge_kcomponents.py b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/edge_kcomponents.py new file mode 100644 index 0000000000000000000000000000000000000000..e602c33aaeed31266293feee93d12429ebe05f7b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/edge_kcomponents.py @@ -0,0 +1,584 @@ +""" +Algorithms for finding k-edge-connected components and subgraphs. + +A k-edge-connected component (k-edge-cc) is a maximal set of nodes in G, such +that all pairs of node have an edge-connectivity of at least k. + +A k-edge-connected subgraph (k-edge-subgraph) is a maximal set of nodes in G, +such that the subgraph of G defined by the nodes has an edge-connectivity at +least k. +""" +import itertools as it +from functools import partial + +import networkx as nx +from networkx.utils import arbitrary_element, not_implemented_for + +__all__ = [ + "k_edge_components", + "k_edge_subgraphs", + "bridge_components", + "EdgeComponentAuxGraph", +] + + +@not_implemented_for("multigraph") +@nx._dispatch +def k_edge_components(G, k): + """Generates nodes in each maximal k-edge-connected component in G. + + Parameters + ---------- + G : NetworkX graph + + k : Integer + Desired edge connectivity + + Returns + ------- + k_edge_components : a generator of k-edge-ccs. Each set of returned nodes + will have k-edge-connectivity in the graph G. + + See Also + -------- + :func:`local_edge_connectivity` + :func:`k_edge_subgraphs` : similar to this function, but the subgraph + defined by the nodes must also have k-edge-connectivity. + :func:`k_components` : similar to this function, but uses node-connectivity + instead of edge-connectivity + + Raises + ------ + NetworkXNotImplemented + If the input graph is a multigraph. + + ValueError: + If k is less than 1 + + Notes + ----- + Attempts to use the most efficient implementation available based on k. + If k=1, this is simply connected components for directed graphs and + connected components for undirected graphs. + If k=2 on an efficient bridge connected component algorithm from _[1] is + run based on the chain decomposition. + Otherwise, the algorithm from _[2] is used. + + Examples + -------- + >>> import itertools as it + >>> from networkx.utils import pairwise + >>> paths = [ + ... (1, 2, 4, 3, 1, 4), + ... (5, 6, 7, 8, 5, 7, 8, 6), + ... ] + >>> G = nx.Graph() + >>> G.add_nodes_from(it.chain(*paths)) + >>> G.add_edges_from(it.chain(*[pairwise(path) for path in paths])) + >>> # note this returns {1, 4} unlike k_edge_subgraphs + >>> sorted(map(sorted, nx.k_edge_components(G, k=3))) + [[1, 4], [2], [3], [5, 6, 7, 8]] + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Bridge_%28graph_theory%29 + .. [2] Wang, Tianhao, et al. (2015) A simple algorithm for finding all + k-edge-connected components. + http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0136264 + """ + # Compute k-edge-ccs using the most efficient algorithms available. + if k < 1: + raise ValueError("k cannot be less than 1") + if G.is_directed(): + if k == 1: + return nx.strongly_connected_components(G) + else: + # TODO: investigate https://arxiv.org/abs/1412.6466 for k=2 + aux_graph = EdgeComponentAuxGraph.construct(G) + return aux_graph.k_edge_components(k) + else: + if k == 1: + return nx.connected_components(G) + elif k == 2: + return bridge_components(G) + else: + aux_graph = EdgeComponentAuxGraph.construct(G) + return aux_graph.k_edge_components(k) + + +@not_implemented_for("multigraph") +@nx._dispatch +def k_edge_subgraphs(G, k): + """Generates nodes in each maximal k-edge-connected subgraph in G. + + Parameters + ---------- + G : NetworkX graph + + k : Integer + Desired edge connectivity + + Returns + ------- + k_edge_subgraphs : a generator of k-edge-subgraphs + Each k-edge-subgraph is a maximal set of nodes that defines a subgraph + of G that is k-edge-connected. + + See Also + -------- + :func:`edge_connectivity` + :func:`k_edge_components` : similar to this function, but nodes only + need to have k-edge-connectivity within the graph G and the subgraphs + might not be k-edge-connected. + + Raises + ------ + NetworkXNotImplemented + If the input graph is a multigraph. + + ValueError: + If k is less than 1 + + Notes + ----- + Attempts to use the most efficient implementation available based on k. + If k=1, or k=2 and the graph is undirected, then this simply calls + `k_edge_components`. Otherwise the algorithm from _[1] is used. + + Examples + -------- + >>> import itertools as it + >>> from networkx.utils import pairwise + >>> paths = [ + ... (1, 2, 4, 3, 1, 4), + ... (5, 6, 7, 8, 5, 7, 8, 6), + ... ] + >>> G = nx.Graph() + >>> G.add_nodes_from(it.chain(*paths)) + >>> G.add_edges_from(it.chain(*[pairwise(path) for path in paths])) + >>> # note this does not return {1, 4} unlike k_edge_components + >>> sorted(map(sorted, nx.k_edge_subgraphs(G, k=3))) + [[1], [2], [3], [4], [5, 6, 7, 8]] + + References + ---------- + .. [1] Zhou, Liu, et al. (2012) Finding maximal k-edge-connected subgraphs + from a large graph. ACM International Conference on Extending Database + Technology 2012 480-–491. + https://openproceedings.org/2012/conf/edbt/ZhouLYLCL12.pdf + """ + if k < 1: + raise ValueError("k cannot be less than 1") + if G.is_directed(): + if k <= 1: + # For directed graphs , + # When k == 1, k-edge-ccs and k-edge-subgraphs are the same + return k_edge_components(G, k) + else: + return _k_edge_subgraphs_nodes(G, k) + else: + if k <= 2: + # For undirected graphs, + # when k <= 2, k-edge-ccs and k-edge-subgraphs are the same + return k_edge_components(G, k) + else: + return _k_edge_subgraphs_nodes(G, k) + + +def _k_edge_subgraphs_nodes(G, k): + """Helper to get the nodes from the subgraphs. + + This allows k_edge_subgraphs to return a generator. + """ + for C in general_k_edge_subgraphs(G, k): + yield set(C.nodes()) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def bridge_components(G): + """Finds all bridge-connected components G. + + Parameters + ---------- + G : NetworkX undirected graph + + Returns + ------- + bridge_components : a generator of 2-edge-connected components + + + See Also + -------- + :func:`k_edge_subgraphs` : this function is a special case for an + undirected graph where k=2. + :func:`biconnected_components` : similar to this function, but is defined + using 2-node-connectivity instead of 2-edge-connectivity. + + Raises + ------ + NetworkXNotImplemented + If the input graph is directed or a multigraph. + + Notes + ----- + Bridge-connected components are also known as 2-edge-connected components. + + Examples + -------- + >>> # The barbell graph with parameter zero has a single bridge + >>> G = nx.barbell_graph(5, 0) + >>> from networkx.algorithms.connectivity.edge_kcomponents import bridge_components + >>> sorted(map(sorted, bridge_components(G))) + [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] + """ + H = G.copy() + H.remove_edges_from(nx.bridges(G)) + yield from nx.connected_components(H) + + +class EdgeComponentAuxGraph: + r"""A simple algorithm to find all k-edge-connected components in a graph. + + Constructing the auxiliary graph (which may take some time) allows for the + k-edge-ccs to be found in linear time for arbitrary k. + + Notes + ----- + This implementation is based on [1]_. The idea is to construct an auxiliary + graph from which the k-edge-ccs can be extracted in linear time. The + auxiliary graph is constructed in $O(|V|\cdot F)$ operations, where F is the + complexity of max flow. Querying the components takes an additional $O(|V|)$ + operations. This algorithm can be slow for large graphs, but it handles an + arbitrary k and works for both directed and undirected inputs. + + The undirected case for k=1 is exactly connected components. + The undirected case for k=2 is exactly bridge connected components. + The directed case for k=1 is exactly strongly connected components. + + References + ---------- + .. [1] Wang, Tianhao, et al. (2015) A simple algorithm for finding all + k-edge-connected components. + http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0136264 + + Examples + -------- + >>> import itertools as it + >>> from networkx.utils import pairwise + >>> from networkx.algorithms.connectivity import EdgeComponentAuxGraph + >>> # Build an interesting graph with multiple levels of k-edge-ccs + >>> paths = [ + ... (1, 2, 3, 4, 1, 3, 4, 2), # a 3-edge-cc (a 4 clique) + ... (5, 6, 7, 5), # a 2-edge-cc (a 3 clique) + ... (1, 5), # combine first two ccs into a 1-edge-cc + ... (0,), # add an additional disconnected 1-edge-cc + ... ] + >>> G = nx.Graph() + >>> G.add_nodes_from(it.chain(*paths)) + >>> G.add_edges_from(it.chain(*[pairwise(path) for path in paths])) + >>> # Constructing the AuxGraph takes about O(n ** 4) + >>> aux_graph = EdgeComponentAuxGraph.construct(G) + >>> # Once constructed, querying takes O(n) + >>> sorted(map(sorted, aux_graph.k_edge_components(k=1))) + [[0], [1, 2, 3, 4, 5, 6, 7]] + >>> sorted(map(sorted, aux_graph.k_edge_components(k=2))) + [[0], [1, 2, 3, 4], [5, 6, 7]] + >>> sorted(map(sorted, aux_graph.k_edge_components(k=3))) + [[0], [1, 2, 3, 4], [5], [6], [7]] + >>> sorted(map(sorted, aux_graph.k_edge_components(k=4))) + [[0], [1], [2], [3], [4], [5], [6], [7]] + + The auxiliary graph is primarily used for k-edge-ccs but it + can also speed up the queries of k-edge-subgraphs by refining the + search space. + + >>> import itertools as it + >>> from networkx.utils import pairwise + >>> from networkx.algorithms.connectivity import EdgeComponentAuxGraph + >>> paths = [ + ... (1, 2, 4, 3, 1, 4), + ... ] + >>> G = nx.Graph() + >>> G.add_nodes_from(it.chain(*paths)) + >>> G.add_edges_from(it.chain(*[pairwise(path) for path in paths])) + >>> aux_graph = EdgeComponentAuxGraph.construct(G) + >>> sorted(map(sorted, aux_graph.k_edge_subgraphs(k=3))) + [[1], [2], [3], [4]] + >>> sorted(map(sorted, aux_graph.k_edge_components(k=3))) + [[1, 4], [2], [3]] + """ + + # @not_implemented_for('multigraph') # TODO: fix decor for classmethods + @classmethod + def construct(EdgeComponentAuxGraph, G): + """Builds an auxiliary graph encoding edge-connectivity between nodes. + + Notes + ----- + Given G=(V, E), initialize an empty auxiliary graph A. + Choose an arbitrary source node s. Initialize a set N of available + nodes (that can be used as the sink). The algorithm picks an + arbitrary node t from N - {s}, and then computes the minimum st-cut + (S, T) with value w. If G is directed the minimum of the st-cut or + the ts-cut is used instead. Then, the edge (s, t) is added to the + auxiliary graph with weight w. The algorithm is called recursively + first using S as the available nodes and s as the source, and then + using T and t. Recursion stops when the source is the only available + node. + + Parameters + ---------- + G : NetworkX graph + """ + # workaround for classmethod decorator + not_implemented_for("multigraph")(lambda G: G)(G) + + def _recursive_build(H, A, source, avail): + # Terminate once the flow has been compute to every node. + if {source} == avail: + return + # pick an arbitrary node as the sink + sink = arbitrary_element(avail - {source}) + # find the minimum cut and its weight + value, (S, T) = nx.minimum_cut(H, source, sink) + if H.is_directed(): + # check if the reverse direction has a smaller cut + value_, (T_, S_) = nx.minimum_cut(H, sink, source) + if value_ < value: + value, S, T = value_, S_, T_ + # add edge with weight of cut to the aux graph + A.add_edge(source, sink, weight=value) + # recursively call until all but one node is used + _recursive_build(H, A, source, avail.intersection(S)) + _recursive_build(H, A, sink, avail.intersection(T)) + + # Copy input to ensure all edges have unit capacity + H = G.__class__() + H.add_nodes_from(G.nodes()) + H.add_edges_from(G.edges(), capacity=1) + + # A is the auxiliary graph to be constructed + # It is a weighted undirected tree + A = nx.Graph() + + # Pick an arbitrary node as the source + if H.number_of_nodes() > 0: + source = arbitrary_element(H.nodes()) + # Initialize a set of elements that can be chosen as the sink + avail = set(H.nodes()) + + # This constructs A + _recursive_build(H, A, source, avail) + + # This class is a container the holds the auxiliary graph A and + # provides access the k_edge_components function. + self = EdgeComponentAuxGraph() + self.A = A + self.H = H + return self + + def k_edge_components(self, k): + """Queries the auxiliary graph for k-edge-connected components. + + Parameters + ---------- + k : Integer + Desired edge connectivity + + Returns + ------- + k_edge_components : a generator of k-edge-ccs + + Notes + ----- + Given the auxiliary graph, the k-edge-connected components can be + determined in linear time by removing all edges with weights less than + k from the auxiliary graph. The resulting connected components are the + k-edge-ccs in the original graph. + """ + if k < 1: + raise ValueError("k cannot be less than 1") + A = self.A + # "traverse the auxiliary graph A and delete all edges with weights less + # than k" + aux_weights = nx.get_edge_attributes(A, "weight") + # Create a relevant graph with the auxiliary edges with weights >= k + R = nx.Graph() + R.add_nodes_from(A.nodes()) + R.add_edges_from(e for e, w in aux_weights.items() if w >= k) + + # Return the nodes that are k-edge-connected in the original graph + yield from nx.connected_components(R) + + def k_edge_subgraphs(self, k): + """Queries the auxiliary graph for k-edge-connected subgraphs. + + Parameters + ---------- + k : Integer + Desired edge connectivity + + Returns + ------- + k_edge_subgraphs : a generator of k-edge-subgraphs + + Notes + ----- + Refines the k-edge-ccs into k-edge-subgraphs. The running time is more + than $O(|V|)$. + + For single values of k it is faster to use `nx.k_edge_subgraphs`. + But for multiple values of k, it can be faster to build AuxGraph and + then use this method. + """ + if k < 1: + raise ValueError("k cannot be less than 1") + H = self.H + A = self.A + # "traverse the auxiliary graph A and delete all edges with weights less + # than k" + aux_weights = nx.get_edge_attributes(A, "weight") + # Create a relevant graph with the auxiliary edges with weights >= k + R = nx.Graph() + R.add_nodes_from(A.nodes()) + R.add_edges_from(e for e, w in aux_weights.items() if w >= k) + + # Return the components whose subgraphs are k-edge-connected + for cc in nx.connected_components(R): + if len(cc) < k: + # Early return optimization + for node in cc: + yield {node} + else: + # Call subgraph solution to refine the results + C = H.subgraph(cc) + yield from k_edge_subgraphs(C, k) + + +def _low_degree_nodes(G, k, nbunch=None): + """Helper for finding nodes with degree less than k.""" + # Nodes with degree less than k cannot be k-edge-connected. + if G.is_directed(): + # Consider both in and out degree in the directed case + seen = set() + for node, degree in G.out_degree(nbunch): + if degree < k: + seen.add(node) + yield node + for node, degree in G.in_degree(nbunch): + if node not in seen and degree < k: + seen.add(node) + yield node + else: + # Only the degree matters in the undirected case + for node, degree in G.degree(nbunch): + if degree < k: + yield node + + +def _high_degree_components(G, k): + """Helper for filtering components that can't be k-edge-connected. + + Removes and generates each node with degree less than k. Then generates + remaining components where all nodes have degree at least k. + """ + # Iteratively remove parts of the graph that are not k-edge-connected + H = G.copy() + singletons = set(_low_degree_nodes(H, k)) + while singletons: + # Only search neighbors of removed nodes + nbunch = set(it.chain.from_iterable(map(H.neighbors, singletons))) + nbunch.difference_update(singletons) + H.remove_nodes_from(singletons) + for node in singletons: + yield {node} + singletons = set(_low_degree_nodes(H, k, nbunch)) + + # Note: remaining connected components may not be k-edge-connected + if G.is_directed(): + yield from nx.strongly_connected_components(H) + else: + yield from nx.connected_components(H) + + +@nx._dispatch +def general_k_edge_subgraphs(G, k): + """General algorithm to find all maximal k-edge-connected subgraphs in G. + + Returns + ------- + k_edge_subgraphs : a generator of nx.Graphs that are k-edge-subgraphs + Each k-edge-subgraph is a maximal set of nodes that defines a subgraph + of G that is k-edge-connected. + + Notes + ----- + Implementation of the basic algorithm from _[1]. The basic idea is to find + a global minimum cut of the graph. If the cut value is at least k, then the + graph is a k-edge-connected subgraph and can be added to the results. + Otherwise, the cut is used to split the graph in two and the procedure is + applied recursively. If the graph is just a single node, then it is also + added to the results. At the end, each result is either guaranteed to be + a single node or a subgraph of G that is k-edge-connected. + + This implementation contains optimizations for reducing the number of calls + to max-flow, but there are other optimizations in _[1] that could be + implemented. + + References + ---------- + .. [1] Zhou, Liu, et al. (2012) Finding maximal k-edge-connected subgraphs + from a large graph. ACM International Conference on Extending Database + Technology 2012 480-–491. + https://openproceedings.org/2012/conf/edbt/ZhouLYLCL12.pdf + + Examples + -------- + >>> from networkx.utils import pairwise + >>> paths = [ + ... (11, 12, 13, 14, 11, 13, 14, 12), # a 4-clique + ... (21, 22, 23, 24, 21, 23, 24, 22), # another 4-clique + ... # connect the cliques with high degree but low connectivity + ... (50, 13), + ... (12, 50, 22), + ... (13, 102, 23), + ... (14, 101, 24), + ... ] + >>> G = nx.Graph(it.chain(*[pairwise(path) for path in paths])) + >>> sorted(map(len, k_edge_subgraphs(G, k=3))) + [1, 1, 1, 4, 4] + """ + if k < 1: + raise ValueError("k cannot be less than 1") + + # Node pruning optimization (incorporates early return) + # find_ccs is either connected_components/strongly_connected_components + find_ccs = partial(_high_degree_components, k=k) + + # Quick return optimization + if G.number_of_nodes() < k: + for node in G.nodes(): + yield G.subgraph([node]).copy() + return + + # Intermediate results + R0 = {G.subgraph(cc).copy() for cc in find_ccs(G)} + # Subdivide CCs in the intermediate results until they are k-conn + while R0: + G1 = R0.pop() + if G1.number_of_nodes() == 1: + yield G1 + else: + # Find a global minimum cut + cut_edges = nx.minimum_edge_cut(G1) + cut_value = len(cut_edges) + if cut_value < k: + # G1 is not k-edge-connected, so subdivide it + G1.remove_edges_from(cut_edges) + for cc in find_ccs(G1): + R0.add(G1.subgraph(cc).copy()) + else: + # Otherwise we found a k-edge-connected subgraph + yield G1 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/kcomponents.py b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/kcomponents.py new file mode 100644 index 0000000000000000000000000000000000000000..19a6e486b8477be111ab0ce6b10fd17039e6fb91 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/kcomponents.py @@ -0,0 +1,222 @@ +""" +Moody and White algorithm for k-components +""" +from collections import defaultdict +from itertools import combinations +from operator import itemgetter + +import networkx as nx + +# Define the default maximum flow function. +from networkx.algorithms.flow import edmonds_karp +from networkx.utils import not_implemented_for + +default_flow_func = edmonds_karp + +__all__ = ["k_components"] + + +@not_implemented_for("directed") +@nx._dispatch +def k_components(G, flow_func=None): + r"""Returns the k-component structure of a graph G. + + A `k`-component is a maximal subgraph of a graph G that has, at least, + node connectivity `k`: we need to remove at least `k` nodes to break it + into more components. `k`-components have an inherent hierarchical + structure because they are nested in terms of connectivity: a connected + graph can contain several 2-components, each of which can contain + one or more 3-components, and so forth. + + Parameters + ---------- + G : NetworkX graph + + flow_func : function + Function to perform the underlying flow computations. Default value + :meth:`edmonds_karp`. This function performs better in sparse graphs with + right tailed degree distributions. :meth:`shortest_augmenting_path` will + perform better in denser graphs. + + Returns + ------- + k_components : dict + Dictionary with all connectivity levels `k` in the input Graph as keys + and a list of sets of nodes that form a k-component of level `k` as + values. + + Raises + ------ + NetworkXNotImplemented + If the input graph is directed. + + Examples + -------- + >>> # Petersen graph has 10 nodes and it is triconnected, thus all + >>> # nodes are in a single component on all three connectivity levels + >>> G = nx.petersen_graph() + >>> k_components = nx.k_components(G) + + Notes + ----- + Moody and White [1]_ (appendix A) provide an algorithm for identifying + k-components in a graph, which is based on Kanevsky's algorithm [2]_ + for finding all minimum-size node cut-sets of a graph (implemented in + :meth:`all_node_cuts` function): + + 1. Compute node connectivity, k, of the input graph G. + + 2. Identify all k-cutsets at the current level of connectivity using + Kanevsky's algorithm. + + 3. Generate new graph components based on the removal of + these cutsets. Nodes in a cutset belong to both sides + of the induced cut. + + 4. If the graph is neither complete nor trivial, return to 1; + else end. + + This implementation also uses some heuristics (see [3]_ for details) + to speed up the computation. + + See also + -------- + node_connectivity + all_node_cuts + biconnected_components : special case of this function when k=2 + k_edge_components : similar to this function, but uses edge-connectivity + instead of node-connectivity + + References + ---------- + .. [1] Moody, J. and D. White (2003). Social cohesion and embeddedness: + A hierarchical conception of social groups. + American Sociological Review 68(1), 103--28. + http://www2.asanet.org/journals/ASRFeb03MoodyWhite.pdf + + .. [2] Kanevsky, A. (1993). Finding all minimum-size separating vertex + sets in a graph. Networks 23(6), 533--541. + http://onlinelibrary.wiley.com/doi/10.1002/net.3230230604/abstract + + .. [3] Torrents, J. and F. Ferraro (2015). Structural Cohesion: + Visualization and Heuristics for Fast Computation. + https://arxiv.org/pdf/1503.04476v1 + + """ + # Dictionary with connectivity level (k) as keys and a list of + # sets of nodes that form a k-component as values. Note that + # k-components can overlap (but only k - 1 nodes). + k_components = defaultdict(list) + # Define default flow function + if flow_func is None: + flow_func = default_flow_func + # Bicomponents as a base to check for higher order k-components + for component in nx.connected_components(G): + # isolated nodes have connectivity 0 + comp = set(component) + if len(comp) > 1: + k_components[1].append(comp) + bicomponents = [G.subgraph(c) for c in nx.biconnected_components(G)] + for bicomponent in bicomponents: + bicomp = set(bicomponent) + # avoid considering dyads as bicomponents + if len(bicomp) > 2: + k_components[2].append(bicomp) + for B in bicomponents: + if len(B) <= 2: + continue + k = nx.node_connectivity(B, flow_func=flow_func) + if k > 2: + k_components[k].append(set(B)) + # Perform cuts in a DFS like order. + cuts = list(nx.all_node_cuts(B, k=k, flow_func=flow_func)) + stack = [(k, _generate_partition(B, cuts, k))] + while stack: + (parent_k, partition) = stack[-1] + try: + nodes = next(partition) + C = B.subgraph(nodes) + this_k = nx.node_connectivity(C, flow_func=flow_func) + if this_k > parent_k and this_k > 2: + k_components[this_k].append(set(C)) + cuts = list(nx.all_node_cuts(C, k=this_k, flow_func=flow_func)) + if cuts: + stack.append((this_k, _generate_partition(C, cuts, this_k))) + except StopIteration: + stack.pop() + + # This is necessary because k-components may only be reported at their + # maximum k level. But we want to return a dictionary in which keys are + # connectivity levels and values list of sets of components, without + # skipping any connectivity level. Also, it's possible that subsets of + # an already detected k-component appear at a level k. Checking for this + # in the while loop above penalizes the common case. Thus we also have to + # _consolidate all connectivity levels in _reconstruct_k_components. + return _reconstruct_k_components(k_components) + + +def _consolidate(sets, k): + """Merge sets that share k or more elements. + + See: http://rosettacode.org/wiki/Set_consolidation + + The iterative python implementation posted there is + faster than this because of the overhead of building a + Graph and calling nx.connected_components, but it's not + clear for us if we can use it in NetworkX because there + is no licence for the code. + + """ + G = nx.Graph() + nodes = dict(enumerate(sets)) + G.add_nodes_from(nodes) + G.add_edges_from( + (u, v) for u, v in combinations(nodes, 2) if len(nodes[u] & nodes[v]) >= k + ) + for component in nx.connected_components(G): + yield set.union(*[nodes[n] for n in component]) + + +def _generate_partition(G, cuts, k): + def has_nbrs_in_partition(G, node, partition): + return any(n in partition for n in G[node]) + + components = [] + nodes = {n for n, d in G.degree() if d > k} - {n for cut in cuts for n in cut} + H = G.subgraph(nodes) + for cc in nx.connected_components(H): + component = set(cc) + for cut in cuts: + for node in cut: + if has_nbrs_in_partition(G, node, cc): + component.add(node) + if len(component) < G.order(): + components.append(component) + yield from _consolidate(components, k + 1) + + +def _reconstruct_k_components(k_comps): + result = {} + max_k = max(k_comps) + for k in reversed(range(1, max_k + 1)): + if k == max_k: + result[k] = list(_consolidate(k_comps[k], k)) + elif k not in k_comps: + result[k] = list(_consolidate(result[k + 1], k)) + else: + nodes_at_k = set.union(*k_comps[k]) + to_add = [c for c in result[k + 1] if any(n not in nodes_at_k for n in c)] + if to_add: + result[k] = list(_consolidate(k_comps[k] + to_add, k)) + else: + result[k] = list(_consolidate(k_comps[k], k)) + return result + + +def build_k_number_dict(kcomps): + result = {} + for k, comps in sorted(kcomps.items(), key=itemgetter(0)): + for comp in comps: + for node in comp: + result[node] = k + return result diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/kcutsets.py b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/kcutsets.py new file mode 100644 index 0000000000000000000000000000000000000000..bc04ed185893fb119957052ae9574ab58eff47c1 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/kcutsets.py @@ -0,0 +1,233 @@ +""" +Kanevsky all minimum node k cutsets algorithm. +""" +import copy +from collections import defaultdict +from itertools import combinations +from operator import itemgetter + +import networkx as nx +from networkx.algorithms.flow import ( + build_residual_network, + edmonds_karp, + shortest_augmenting_path, +) + +from .utils import build_auxiliary_node_connectivity + +default_flow_func = edmonds_karp + + +__all__ = ["all_node_cuts"] + + +@nx._dispatch +def all_node_cuts(G, k=None, flow_func=None): + r"""Returns all minimum k cutsets of an undirected graph G. + + This implementation is based on Kanevsky's algorithm [1]_ for finding all + minimum-size node cut-sets of an undirected graph G; ie the set (or sets) + of nodes of cardinality equal to the node connectivity of G. Thus if + removed, would break G into two or more connected components. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + k : Integer + Node connectivity of the input graph. If k is None, then it is + computed. Default value: None. + + flow_func : function + Function to perform the underlying flow computations. Default value is + :func:`~networkx.algorithms.flow.edmonds_karp`. This function performs + better in sparse graphs with right tailed degree distributions. + :func:`~networkx.algorithms.flow.shortest_augmenting_path` will + perform better in denser graphs. + + + Returns + ------- + cuts : a generator of node cutsets + Each node cutset has cardinality equal to the node connectivity of + the input graph. + + Examples + -------- + >>> # A two-dimensional grid graph has 4 cutsets of cardinality 2 + >>> G = nx.grid_2d_graph(5, 5) + >>> cutsets = list(nx.all_node_cuts(G)) + >>> len(cutsets) + 4 + >>> all(2 == len(cutset) for cutset in cutsets) + True + >>> nx.node_connectivity(G) + 2 + + Notes + ----- + This implementation is based on the sequential algorithm for finding all + minimum-size separating vertex sets in a graph [1]_. The main idea is to + compute minimum cuts using local maximum flow computations among a set + of nodes of highest degree and all other non-adjacent nodes in the Graph. + Once we find a minimum cut, we add an edge between the high degree + node and the target node of the local maximum flow computation to make + sure that we will not find that minimum cut again. + + See also + -------- + node_connectivity + edmonds_karp + shortest_augmenting_path + + References + ---------- + .. [1] Kanevsky, A. (1993). Finding all minimum-size separating vertex + sets in a graph. Networks 23(6), 533--541. + http://onlinelibrary.wiley.com/doi/10.1002/net.3230230604/abstract + + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Input graph is disconnected.") + + # Address some corner cases first. + # For complete Graphs + if nx.density(G) == 1: + for cut_set in combinations(G, len(G) - 1): + yield set(cut_set) + return + # Initialize data structures. + # Keep track of the cuts already computed so we do not repeat them. + seen = [] + # Even-Tarjan reduction is what we call auxiliary digraph + # for node connectivity. + H = build_auxiliary_node_connectivity(G) + H_nodes = H.nodes # for speed + mapping = H.graph["mapping"] + # Keep a copy of original predecessors, H will be modified later. + # Shallow copy is enough. + original_H_pred = copy.copy(H._pred) + R = build_residual_network(H, "capacity") + kwargs = {"capacity": "capacity", "residual": R} + # Define default flow function + if flow_func is None: + flow_func = default_flow_func + if flow_func is shortest_augmenting_path: + kwargs["two_phase"] = True + # Begin the actual algorithm + # step 1: Find node connectivity k of G + if k is None: + k = nx.node_connectivity(G, flow_func=flow_func) + # step 2: + # Find k nodes with top degree, call it X: + X = {n for n, d in sorted(G.degree(), key=itemgetter(1), reverse=True)[:k]} + # Check if X is a k-node-cutset + if _is_separating_set(G, X): + seen.append(X) + yield X + + for x in X: + # step 3: Compute local connectivity flow of x with all other + # non adjacent nodes in G + non_adjacent = set(G) - X - set(G[x]) + for v in non_adjacent: + # step 4: compute maximum flow in an Even-Tarjan reduction H of G + # and step 5: build the associated residual network R + R = flow_func(H, f"{mapping[x]}B", f"{mapping[v]}A", **kwargs) + flow_value = R.graph["flow_value"] + + if flow_value == k: + # Find the nodes incident to the flow. + E1 = flowed_edges = [ + (u, w) for (u, w, d) in R.edges(data=True) if d["flow"] != 0 + ] + VE1 = incident_nodes = {n for edge in E1 for n in edge} + # Remove saturated edges form the residual network. + # Note that reversed edges are introduced with capacity 0 + # in the residual graph and they need to be removed too. + saturated_edges = [ + (u, w, d) + for (u, w, d) in R.edges(data=True) + if d["capacity"] == d["flow"] or d["capacity"] == 0 + ] + R.remove_edges_from(saturated_edges) + R_closure = nx.transitive_closure(R) + # step 6: shrink the strongly connected components of + # residual flow network R and call it L. + L = nx.condensation(R) + cmap = L.graph["mapping"] + inv_cmap = defaultdict(list) + for n, scc in cmap.items(): + inv_cmap[scc].append(n) + # Find the incident nodes in the condensed graph. + VE1 = {cmap[n] for n in VE1} + # step 7: Compute all antichains of L; + # they map to closed sets in H. + # Any edge in H that links a closed set is part of a cutset. + for antichain in nx.antichains(L): + # Only antichains that are subsets of incident nodes counts. + # Lemma 8 in reference. + if not set(antichain).issubset(VE1): + continue + # Nodes in an antichain of the condensation graph of + # the residual network map to a closed set of nodes that + # define a node partition of the auxiliary digraph H + # through taking all of antichain's predecessors in the + # transitive closure. + S = set() + for scc in antichain: + S.update(inv_cmap[scc]) + S_ancestors = set() + for n in S: + S_ancestors.update(R_closure._pred[n]) + S.update(S_ancestors) + if f"{mapping[x]}B" not in S or f"{mapping[v]}A" in S: + continue + # Find the cutset that links the node partition (S,~S) in H + cutset = set() + for u in S: + cutset.update((u, w) for w in original_H_pred[u] if w not in S) + # The edges in H that form the cutset are internal edges + # (ie edges that represent a node of the original graph G) + if any(H_nodes[u]["id"] != H_nodes[w]["id"] for u, w in cutset): + continue + node_cut = {H_nodes[u]["id"] for u, _ in cutset} + + if len(node_cut) == k: + # The cut is invalid if it includes internal edges of + # end nodes. The other half of Lemma 8 in ref. + if x in node_cut or v in node_cut: + continue + if node_cut not in seen: + yield node_cut + seen.append(node_cut) + + # Add an edge (x, v) to make sure that we do not + # find this cutset again. This is equivalent + # of adding the edge in the input graph + # G.add_edge(x, v) and then regenerate H and R: + # Add edges to the auxiliary digraph. + # See build_residual_network for convention we used + # in residual graphs. + H.add_edge(f"{mapping[x]}B", f"{mapping[v]}A", capacity=1) + H.add_edge(f"{mapping[v]}B", f"{mapping[x]}A", capacity=1) + # Add edges to the residual network. + R.add_edge(f"{mapping[x]}B", f"{mapping[v]}A", capacity=1) + R.add_edge(f"{mapping[v]}A", f"{mapping[x]}B", capacity=0) + R.add_edge(f"{mapping[v]}B", f"{mapping[x]}A", capacity=1) + R.add_edge(f"{mapping[x]}A", f"{mapping[v]}B", capacity=0) + + # Add again the saturated edges to reuse the residual network + R.add_edges_from(saturated_edges) + + +def _is_separating_set(G, cut): + """Assumes that the input graph is connected""" + if len(cut) == len(G) - 1: + return True + + H = nx.restricted_view(G, cut, []) + if nx.is_connected(H): + return False + return True diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/stoerwagner.py b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/stoerwagner.py new file mode 100644 index 0000000000000000000000000000000000000000..dc95877e221d7214bc476a672c74cb19330a03ec --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/stoerwagner.py @@ -0,0 +1,150 @@ +""" +Stoer-Wagner minimum cut algorithm. +""" +from itertools import islice + +import networkx as nx + +from ...utils import BinaryHeap, arbitrary_element, not_implemented_for + +__all__ = ["stoer_wagner"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="weight") +def stoer_wagner(G, weight="weight", heap=BinaryHeap): + r"""Returns the weighted minimum edge cut using the Stoer-Wagner algorithm. + + Determine the minimum edge cut of a connected graph using the + Stoer-Wagner algorithm. In weighted cases, all weights must be + nonnegative. + + The running time of the algorithm depends on the type of heaps used: + + ============== ============================================= + Type of heap Running time + ============== ============================================= + Binary heap $O(n (m + n) \log n)$ + Fibonacci heap $O(nm + n^2 \log n)$ + Pairing heap $O(2^{2 \sqrt{\log \log n}} nm + n^2 \log n)$ + ============== ============================================= + + Parameters + ---------- + G : NetworkX graph + Edges of the graph are expected to have an attribute named by the + weight parameter below. If this attribute is not present, the edge is + considered to have unit weight. + + weight : string + Name of the weight attribute of the edges. If the attribute is not + present, unit weight is assumed. Default value: 'weight'. + + heap : class + Type of heap to be used in the algorithm. It should be a subclass of + :class:`MinHeap` or implement a compatible interface. + + If a stock heap implementation is to be used, :class:`BinaryHeap` is + recommended over :class:`PairingHeap` for Python implementations without + optimized attribute accesses (e.g., CPython) despite a slower + asymptotic running time. For Python implementations with optimized + attribute accesses (e.g., PyPy), :class:`PairingHeap` provides better + performance. Default value: :class:`BinaryHeap`. + + Returns + ------- + cut_value : integer or float + The sum of weights of edges in a minimum cut. + + partition : pair of node lists + A partitioning of the nodes that defines a minimum cut. + + Raises + ------ + NetworkXNotImplemented + If the graph is directed or a multigraph. + + NetworkXError + If the graph has less than two nodes, is not connected or has a + negative-weighted edge. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edge("x", "a", weight=3) + >>> G.add_edge("x", "b", weight=1) + >>> G.add_edge("a", "c", weight=3) + >>> G.add_edge("b", "c", weight=5) + >>> G.add_edge("b", "d", weight=4) + >>> G.add_edge("d", "e", weight=2) + >>> G.add_edge("c", "y", weight=2) + >>> G.add_edge("e", "y", weight=3) + >>> cut_value, partition = nx.stoer_wagner(G) + >>> cut_value + 4 + """ + n = len(G) + if n < 2: + raise nx.NetworkXError("graph has less than two nodes.") + if not nx.is_connected(G): + raise nx.NetworkXError("graph is not connected.") + + # Make a copy of the graph for internal use. + G = nx.Graph( + (u, v, {"weight": e.get(weight, 1)}) for u, v, e in G.edges(data=True) if u != v + ) + + for u, v, e in G.edges(data=True): + if e["weight"] < 0: + raise nx.NetworkXError("graph has a negative-weighted edge.") + + cut_value = float("inf") + nodes = set(G) + contractions = [] # contracted node pairs + + # Repeatedly pick a pair of nodes to contract until only one node is left. + for i in range(n - 1): + # Pick an arbitrary node u and create a set A = {u}. + u = arbitrary_element(G) + A = {u} + # Repeatedly pick the node "most tightly connected" to A and add it to + # A. The tightness of connectivity of a node not in A is defined by the + # of edges connecting it to nodes in A. + h = heap() # min-heap emulating a max-heap + for v, e in G[u].items(): + h.insert(v, -e["weight"]) + # Repeat until all but one node has been added to A. + for j in range(n - i - 2): + u = h.pop()[0] + A.add(u) + for v, e in G[u].items(): + if v not in A: + h.insert(v, h.get(v, 0) - e["weight"]) + # A and the remaining node v define a "cut of the phase". There is a + # minimum cut of the original graph that is also a cut of the phase. + # Due to contractions in earlier phases, v may in fact represent + # multiple nodes in the original graph. + v, w = h.min() + w = -w + if w < cut_value: + cut_value = w + best_phase = i + # Contract v and the last node added to A. + contractions.append((u, v)) + for w, e in G[v].items(): + if w != u: + if w not in G[u]: + G.add_edge(u, w, weight=e["weight"]) + else: + G[u][w]["weight"] += e["weight"] + G.remove_node(v) + + # Recover the optimal partitioning from the contractions. + G = nx.Graph(islice(contractions, best_phase)) + v = contractions[best_phase][1] + G.add_node(v) + reachable = set(nx.single_source_shortest_path_length(G, v)) + partition = (list(reachable), list(nodes - reachable)) + + return cut_value, partition diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c55a560d28d4c69d63622b8e0acb8bcf3695d22 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_connectivity.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_connectivity.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..916583e60f2e8d9da1b52f64d2eb90b333808762 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_connectivity.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_cuts.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_cuts.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ccac98e645936361a48982a1abd5df219af9ee5 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_cuts.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_disjoint_paths.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_disjoint_paths.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34dba473c530eb4760c02a8b6473def0c790da65 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_disjoint_paths.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_edge_augmentation.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_edge_augmentation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6c03752bfccd068786268a97e767eb676866973 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_edge_augmentation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_edge_kcomponents.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_edge_kcomponents.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72d2e0a390fd976b3957a36a1161b0ef625ff061 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_edge_kcomponents.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_kcomponents.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_kcomponents.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96c35c2d9cd6a20dc88c6fb8c21a346870874503 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_kcomponents.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_kcutsets.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_kcutsets.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1900f667ea2a699285677366caff512f12a8b8df Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_kcutsets.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_stoer_wagner.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_stoer_wagner.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc3b6305f7ea6d41e6066192cda551528b84600d Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/__pycache__/test_stoer_wagner.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_connectivity.py b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_connectivity.py new file mode 100644 index 0000000000000000000000000000000000000000..7aef2477d1331bcefc7e5dfdacd415b27ffcd3c8 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_connectivity.py @@ -0,0 +1,421 @@ +import itertools + +import pytest + +import networkx as nx +from networkx.algorithms import flow +from networkx.algorithms.connectivity import ( + local_edge_connectivity, + local_node_connectivity, +) + +flow_funcs = [ + flow.boykov_kolmogorov, + flow.dinitz, + flow.edmonds_karp, + flow.preflow_push, + flow.shortest_augmenting_path, +] + + +# helper functions for tests + + +def _generate_no_biconnected(max_attempts=50): + attempts = 0 + while True: + G = nx.fast_gnp_random_graph(100, 0.0575, seed=42) + if nx.is_connected(G) and not nx.is_biconnected(G): + attempts = 0 + yield G + else: + if attempts >= max_attempts: + msg = f"Tried {max_attempts} times: no suitable Graph." + raise Exception(msg) + else: + attempts += 1 + + +def test_average_connectivity(): + # figure 1 from: + # Beineke, L., O. Oellermann, and R. Pippert (2002). The average + # connectivity of a graph. Discrete mathematics 252(1-3), 31-45 + # http://www.sciencedirect.com/science/article/pii/S0012365X01001807 + G1 = nx.path_graph(3) + G1.add_edges_from([(1, 3), (1, 4)]) + G2 = nx.path_graph(3) + G2.add_edges_from([(1, 3), (1, 4), (0, 3), (0, 4), (3, 4)]) + G3 = nx.Graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert nx.average_node_connectivity(G1, **kwargs) == 1, errmsg + assert nx.average_node_connectivity(G2, **kwargs) == 2.2, errmsg + assert nx.average_node_connectivity(G3, **kwargs) == 0, errmsg + + +def test_average_connectivity_directed(): + G = nx.DiGraph([(1, 3), (1, 4), (1, 5)]) + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert nx.average_node_connectivity(G) == 0.25, errmsg + + +def test_articulation_points(): + Ggen = _generate_no_biconnected() + for flow_func in flow_funcs: + for i in range(3): + G = next(Ggen) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert nx.node_connectivity(G, flow_func=flow_func) == 1, errmsg + + +def test_brandes_erlebach(): + # Figure 1 chapter 7: Connectivity + # http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf + G = nx.Graph() + G.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (2, 3), + (2, 6), + (3, 4), + (3, 6), + (4, 6), + (4, 7), + (5, 7), + (6, 8), + (6, 9), + (7, 8), + (7, 10), + (8, 11), + (9, 10), + (9, 11), + (10, 11), + ] + ) + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 3 == local_edge_connectivity(G, 1, 11, **kwargs), errmsg + assert 3 == nx.edge_connectivity(G, 1, 11, **kwargs), errmsg + assert 2 == local_node_connectivity(G, 1, 11, **kwargs), errmsg + assert 2 == nx.node_connectivity(G, 1, 11, **kwargs), errmsg + assert 2 == nx.edge_connectivity(G, **kwargs), errmsg + assert 2 == nx.node_connectivity(G, **kwargs), errmsg + if flow_func is flow.preflow_push: + assert 3 == nx.edge_connectivity(G, 1, 11, cutoff=2, **kwargs), errmsg + else: + assert 2 == nx.edge_connectivity(G, 1, 11, cutoff=2, **kwargs), errmsg + + +def test_white_harary_1(): + # Figure 1b white and harary (2001) + # https://doi.org/10.1111/0081-1750.00098 + # A graph with high adhesion (edge connectivity) and low cohesion + # (vertex connectivity) + G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4)) + G.remove_node(7) + for i in range(4, 7): + G.add_edge(0, i) + G = nx.disjoint_union(G, nx.complete_graph(4)) + G.remove_node(G.order() - 1) + for i in range(7, 10): + G.add_edge(0, i) + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 1 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 3 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_white_harary_2(): + # Figure 8 white and harary (2001) + # https://doi.org/10.1111/0081-1750.00098 + G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4)) + G.add_edge(0, 4) + # kappa <= lambda <= delta + assert 3 == min(nx.core_number(G).values()) + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 1 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 1 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_complete_graphs(): + for n in range(5, 20, 5): + for flow_func in flow_funcs: + G = nx.complete_graph(n) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert n - 1 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert n - 1 == nx.node_connectivity( + G.to_directed(), flow_func=flow_func + ), errmsg + assert n - 1 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + assert n - 1 == nx.edge_connectivity( + G.to_directed(), flow_func=flow_func + ), errmsg + + +def test_empty_graphs(): + for k in range(5, 25, 5): + G = nx.empty_graph(k) + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 0 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 0 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_petersen(): + G = nx.petersen_graph() + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 3 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 3 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_tutte(): + G = nx.tutte_graph() + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 3 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 3 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_dodecahedral(): + G = nx.dodecahedral_graph() + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 3 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 3 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_octahedral(): + G = nx.octahedral_graph() + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 4 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 4 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_icosahedral(): + G = nx.icosahedral_graph() + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 5 == nx.node_connectivity(G, flow_func=flow_func), errmsg + assert 5 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + + +def test_missing_source(): + G = nx.path_graph(4) + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXError, nx.node_connectivity, G, 10, 1, flow_func=flow_func + ) + + +def test_missing_target(): + G = nx.path_graph(4) + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXError, nx.node_connectivity, G, 1, 10, flow_func=flow_func + ) + + +def test_edge_missing_source(): + G = nx.path_graph(4) + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXError, nx.edge_connectivity, G, 10, 1, flow_func=flow_func + ) + + +def test_edge_missing_target(): + G = nx.path_graph(4) + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXError, nx.edge_connectivity, G, 1, 10, flow_func=flow_func + ) + + +def test_not_weakly_connected(): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert nx.node_connectivity(G) == 0, errmsg + assert nx.edge_connectivity(G) == 0, errmsg + + +def test_not_connected(): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert nx.node_connectivity(G) == 0, errmsg + assert nx.edge_connectivity(G) == 0, errmsg + + +def test_directed_edge_connectivity(): + G = nx.cycle_graph(10, create_using=nx.DiGraph()) # only one direction + D = nx.cycle_graph(10).to_directed() # 2 reciprocal edges + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert 1 == nx.edge_connectivity(G, flow_func=flow_func), errmsg + assert 1 == local_edge_connectivity(G, 1, 4, flow_func=flow_func), errmsg + assert 1 == nx.edge_connectivity(G, 1, 4, flow_func=flow_func), errmsg + assert 2 == nx.edge_connectivity(D, flow_func=flow_func), errmsg + assert 2 == local_edge_connectivity(D, 1, 4, flow_func=flow_func), errmsg + assert 2 == nx.edge_connectivity(D, 1, 4, flow_func=flow_func), errmsg + + +def test_cutoff(): + G = nx.complete_graph(5) + for local_func in [local_edge_connectivity, local_node_connectivity]: + for flow_func in flow_funcs: + if flow_func is flow.preflow_push: + # cutoff is not supported by preflow_push + continue + for cutoff in [3, 2, 1]: + result = local_func(G, 0, 4, flow_func=flow_func, cutoff=cutoff) + assert cutoff == result, f"cutoff error in {flow_func.__name__}" + + +def test_invalid_auxiliary(): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXError, local_node_connectivity, G, 0, 3, auxiliary=G) + + +def test_interface_only_source(): + G = nx.complete_graph(5) + for interface_func in [nx.node_connectivity, nx.edge_connectivity]: + pytest.raises(nx.NetworkXError, interface_func, G, s=0) + + +def test_interface_only_target(): + G = nx.complete_graph(5) + for interface_func in [nx.node_connectivity, nx.edge_connectivity]: + pytest.raises(nx.NetworkXError, interface_func, G, t=3) + + +def test_edge_connectivity_flow_vs_stoer_wagner(): + graph_funcs = [nx.icosahedral_graph, nx.octahedral_graph, nx.dodecahedral_graph] + for graph_func in graph_funcs: + G = graph_func() + assert nx.stoer_wagner(G)[0] == nx.edge_connectivity(G) + + +class TestAllPairsNodeConnectivity: + @classmethod + def setup_class(cls): + cls.path = nx.path_graph(7) + cls.directed_path = nx.path_graph(7, create_using=nx.DiGraph()) + cls.cycle = nx.cycle_graph(7) + cls.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph()) + cls.gnp = nx.gnp_random_graph(30, 0.1, seed=42) + cls.directed_gnp = nx.gnp_random_graph(30, 0.1, directed=True, seed=42) + cls.K20 = nx.complete_graph(20) + cls.K10 = nx.complete_graph(10) + cls.K5 = nx.complete_graph(5) + cls.G_list = [ + cls.path, + cls.directed_path, + cls.cycle, + cls.directed_cycle, + cls.gnp, + cls.directed_gnp, + cls.K10, + cls.K5, + cls.K20, + ] + + def test_cycles(self): + K_undir = nx.all_pairs_node_connectivity(self.cycle) + for source in K_undir: + for target, k in K_undir[source].items(): + assert k == 2 + K_dir = nx.all_pairs_node_connectivity(self.directed_cycle) + for source in K_dir: + for target, k in K_dir[source].items(): + assert k == 1 + + def test_complete(self): + for G in [self.K10, self.K5, self.K20]: + K = nx.all_pairs_node_connectivity(G) + for source in K: + for target, k in K[source].items(): + assert k == len(G) - 1 + + def test_paths(self): + K_undir = nx.all_pairs_node_connectivity(self.path) + for source in K_undir: + for target, k in K_undir[source].items(): + assert k == 1 + K_dir = nx.all_pairs_node_connectivity(self.directed_path) + for source in K_dir: + for target, k in K_dir[source].items(): + if source < target: + assert k == 1 + else: + assert k == 0 + + def test_all_pairs_connectivity_nbunch(self): + G = nx.complete_graph(5) + nbunch = [0, 2, 3] + C = nx.all_pairs_node_connectivity(G, nbunch=nbunch) + assert len(C) == len(nbunch) + + def test_all_pairs_connectivity_icosahedral(self): + G = nx.icosahedral_graph() + C = nx.all_pairs_node_connectivity(G) + assert all(5 == C[u][v] for u, v in itertools.combinations(G, 2)) + + def test_all_pairs_connectivity(self): + G = nx.Graph() + nodes = [0, 1, 2, 3] + nx.add_path(G, nodes) + A = {n: {} for n in G} + for u, v in itertools.combinations(nodes, 2): + A[u][v] = A[v][u] = nx.node_connectivity(G, u, v) + C = nx.all_pairs_node_connectivity(G) + assert sorted((k, sorted(v)) for k, v in A.items()) == sorted( + (k, sorted(v)) for k, v in C.items() + ) + + def test_all_pairs_connectivity_directed(self): + G = nx.DiGraph() + nodes = [0, 1, 2, 3] + nx.add_path(G, nodes) + A = {n: {} for n in G} + for u, v in itertools.permutations(nodes, 2): + A[u][v] = nx.node_connectivity(G, u, v) + C = nx.all_pairs_node_connectivity(G) + assert sorted((k, sorted(v)) for k, v in A.items()) == sorted( + (k, sorted(v)) for k, v in C.items() + ) + + def test_all_pairs_connectivity_nbunch_combinations(self): + G = nx.complete_graph(5) + nbunch = [0, 2, 3] + A = {n: {} for n in nbunch} + for u, v in itertools.combinations(nbunch, 2): + A[u][v] = A[v][u] = nx.node_connectivity(G, u, v) + C = nx.all_pairs_node_connectivity(G, nbunch=nbunch) + assert sorted((k, sorted(v)) for k, v in A.items()) == sorted( + (k, sorted(v)) for k, v in C.items() + ) + + def test_all_pairs_connectivity_nbunch_iter(self): + G = nx.complete_graph(5) + nbunch = [0, 2, 3] + A = {n: {} for n in nbunch} + for u, v in itertools.combinations(nbunch, 2): + A[u][v] = A[v][u] = nx.node_connectivity(G, u, v) + C = nx.all_pairs_node_connectivity(G, nbunch=iter(nbunch)) + assert sorted((k, sorted(v)) for k, v in A.items()) == sorted( + (k, sorted(v)) for k, v in C.items() + ) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_cuts.py b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_cuts.py new file mode 100644 index 0000000000000000000000000000000000000000..7a485be399d87db147f7e4567f903fb5271ad63b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_cuts.py @@ -0,0 +1,309 @@ +import pytest + +import networkx as nx +from networkx.algorithms import flow +from networkx.algorithms.connectivity import minimum_st_edge_cut, minimum_st_node_cut +from networkx.utils import arbitrary_element + +flow_funcs = [ + flow.boykov_kolmogorov, + flow.dinitz, + flow.edmonds_karp, + flow.preflow_push, + flow.shortest_augmenting_path, +] + +# Tests for node and edge cutsets + + +def _generate_no_biconnected(max_attempts=50): + attempts = 0 + while True: + G = nx.fast_gnp_random_graph(100, 0.0575, seed=42) + if nx.is_connected(G) and not nx.is_biconnected(G): + attempts = 0 + yield G + else: + if attempts >= max_attempts: + msg = f"Tried {attempts} times: no suitable Graph." + raise Exception(msg) + else: + attempts += 1 + + +def test_articulation_points(): + Ggen = _generate_no_biconnected() + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + for i in range(1): # change 1 to 3 or more for more realizations. + G = next(Ggen) + cut = nx.minimum_node_cut(G, flow_func=flow_func) + assert len(cut) == 1, errmsg + assert cut.pop() in set(nx.articulation_points(G)), errmsg + + +def test_brandes_erlebach_book(): + # Figure 1 chapter 7: Connectivity + # http://www.informatik.uni-augsburg.de/thi/personen/kammer/Graph_Connectivity.pdf + G = nx.Graph() + G.add_edges_from( + [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (2, 3), + (2, 6), + (3, 4), + (3, 6), + (4, 6), + (4, 7), + (5, 7), + (6, 8), + (6, 9), + (7, 8), + (7, 10), + (8, 11), + (9, 10), + (9, 11), + (10, 11), + ] + ) + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge cutsets + assert 3 == len(nx.minimum_edge_cut(G, 1, 11, **kwargs)), errmsg + edge_cut = nx.minimum_edge_cut(G, **kwargs) + # Node 5 has only two edges + assert 2 == len(edge_cut), errmsg + H = G.copy() + H.remove_edges_from(edge_cut) + assert not nx.is_connected(H), errmsg + # node cuts + assert {6, 7} == minimum_st_node_cut(G, 1, 11, **kwargs), errmsg + assert {6, 7} == nx.minimum_node_cut(G, 1, 11, **kwargs), errmsg + node_cut = nx.minimum_node_cut(G, **kwargs) + assert 2 == len(node_cut), errmsg + H = G.copy() + H.remove_nodes_from(node_cut) + assert not nx.is_connected(H), errmsg + + +def test_white_harary_paper(): + # Figure 1b white and harary (2001) + # https://doi.org/10.1111/0081-1750.00098 + # A graph with high adhesion (edge connectivity) and low cohesion + # (node connectivity) + G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4)) + G.remove_node(7) + for i in range(4, 7): + G.add_edge(0, i) + G = nx.disjoint_union(G, nx.complete_graph(4)) + G.remove_node(G.order() - 1) + for i in range(7, 10): + G.add_edge(0, i) + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge cuts + edge_cut = nx.minimum_edge_cut(G, **kwargs) + assert 3 == len(edge_cut), errmsg + H = G.copy() + H.remove_edges_from(edge_cut) + assert not nx.is_connected(H), errmsg + # node cuts + node_cut = nx.minimum_node_cut(G, **kwargs) + assert {0} == node_cut, errmsg + H = G.copy() + H.remove_nodes_from(node_cut) + assert not nx.is_connected(H), errmsg + + +def test_petersen_cutset(): + G = nx.petersen_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge cuts + edge_cut = nx.minimum_edge_cut(G, **kwargs) + assert 3 == len(edge_cut), errmsg + H = G.copy() + H.remove_edges_from(edge_cut) + assert not nx.is_connected(H), errmsg + # node cuts + node_cut = nx.minimum_node_cut(G, **kwargs) + assert 3 == len(node_cut), errmsg + H = G.copy() + H.remove_nodes_from(node_cut) + assert not nx.is_connected(H), errmsg + + +def test_octahedral_cutset(): + G = nx.octahedral_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge cuts + edge_cut = nx.minimum_edge_cut(G, **kwargs) + assert 4 == len(edge_cut), errmsg + H = G.copy() + H.remove_edges_from(edge_cut) + assert not nx.is_connected(H), errmsg + # node cuts + node_cut = nx.minimum_node_cut(G, **kwargs) + assert 4 == len(node_cut), errmsg + H = G.copy() + H.remove_nodes_from(node_cut) + assert not nx.is_connected(H), errmsg + + +def test_icosahedral_cutset(): + G = nx.icosahedral_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge cuts + edge_cut = nx.minimum_edge_cut(G, **kwargs) + assert 5 == len(edge_cut), errmsg + H = G.copy() + H.remove_edges_from(edge_cut) + assert not nx.is_connected(H), errmsg + # node cuts + node_cut = nx.minimum_node_cut(G, **kwargs) + assert 5 == len(node_cut), errmsg + H = G.copy() + H.remove_nodes_from(node_cut) + assert not nx.is_connected(H), errmsg + + +def test_node_cutset_exception(): + G = nx.Graph() + G.add_edges_from([(1, 2), (3, 4)]) + for flow_func in flow_funcs: + pytest.raises(nx.NetworkXError, nx.minimum_node_cut, G, flow_func=flow_func) + + +def test_node_cutset_random_graphs(): + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + for i in range(3): + G = nx.fast_gnp_random_graph(50, 0.25, seed=42) + if not nx.is_connected(G): + ccs = iter(nx.connected_components(G)) + start = arbitrary_element(next(ccs)) + G.add_edges_from((start, arbitrary_element(c)) for c in ccs) + cutset = nx.minimum_node_cut(G, flow_func=flow_func) + assert nx.node_connectivity(G) == len(cutset), errmsg + G.remove_nodes_from(cutset) + assert not nx.is_connected(G), errmsg + + +def test_edge_cutset_random_graphs(): + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + for i in range(3): + G = nx.fast_gnp_random_graph(50, 0.25, seed=42) + if not nx.is_connected(G): + ccs = iter(nx.connected_components(G)) + start = arbitrary_element(next(ccs)) + G.add_edges_from((start, arbitrary_element(c)) for c in ccs) + cutset = nx.minimum_edge_cut(G, flow_func=flow_func) + assert nx.edge_connectivity(G) == len(cutset), errmsg + G.remove_edges_from(cutset) + assert not nx.is_connected(G), errmsg + + +def test_empty_graphs(): + G = nx.Graph() + D = nx.DiGraph() + for interface_func in [nx.minimum_node_cut, nx.minimum_edge_cut]: + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXPointlessConcept, interface_func, G, flow_func=flow_func + ) + pytest.raises( + nx.NetworkXPointlessConcept, interface_func, D, flow_func=flow_func + ) + + +def test_unbounded(): + G = nx.complete_graph(5) + for flow_func in flow_funcs: + assert 4 == len(minimum_st_edge_cut(G, 1, 4, flow_func=flow_func)) + + +def test_missing_source(): + G = nx.path_graph(4) + for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]: + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXError, interface_func, G, 10, 1, flow_func=flow_func + ) + + +def test_missing_target(): + G = nx.path_graph(4) + for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]: + for flow_func in flow_funcs: + pytest.raises( + nx.NetworkXError, interface_func, G, 1, 10, flow_func=flow_func + ) + + +def test_not_weakly_connected(): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]: + for flow_func in flow_funcs: + pytest.raises(nx.NetworkXError, interface_func, G, flow_func=flow_func) + + +def test_not_connected(): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]: + for flow_func in flow_funcs: + pytest.raises(nx.NetworkXError, interface_func, G, flow_func=flow_func) + + +def tests_min_cut_complete(): + G = nx.complete_graph(5) + for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]: + for flow_func in flow_funcs: + assert 4 == len(interface_func(G, flow_func=flow_func)) + + +def tests_min_cut_complete_directed(): + G = nx.complete_graph(5) + G = G.to_directed() + for interface_func in [nx.minimum_edge_cut, nx.minimum_node_cut]: + for flow_func in flow_funcs: + assert 4 == len(interface_func(G, flow_func=flow_func)) + + +def tests_minimum_st_node_cut(): + G = nx.Graph() + G.add_nodes_from([0, 1, 2, 3, 7, 8, 11, 12]) + G.add_edges_from([(7, 11), (1, 11), (1, 12), (12, 8), (0, 1)]) + nodelist = minimum_st_node_cut(G, 7, 11) + assert nodelist == {} + + +def test_invalid_auxiliary(): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXError, minimum_st_node_cut, G, 0, 3, auxiliary=G) + + +def test_interface_only_source(): + G = nx.complete_graph(5) + for interface_func in [nx.minimum_node_cut, nx.minimum_edge_cut]: + pytest.raises(nx.NetworkXError, interface_func, G, s=0) + + +def test_interface_only_target(): + G = nx.complete_graph(5) + for interface_func in [nx.minimum_node_cut, nx.minimum_edge_cut]: + pytest.raises(nx.NetworkXError, interface_func, G, t=3) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_disjoint_paths.py b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_disjoint_paths.py new file mode 100644 index 0000000000000000000000000000000000000000..0c0fad9f5ca474a6b547a399f8f284f7ff6e33a4 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_disjoint_paths.py @@ -0,0 +1,249 @@ +import pytest + +import networkx as nx +from networkx.algorithms import flow +from networkx.utils import pairwise + +flow_funcs = [ + flow.boykov_kolmogorov, + flow.edmonds_karp, + flow.dinitz, + flow.preflow_push, + flow.shortest_augmenting_path, +] + + +def is_path(G, path): + return all(v in G[u] for u, v in pairwise(path)) + + +def are_edge_disjoint_paths(G, paths): + if not paths: + return False + for path in paths: + assert is_path(G, path) + paths_edges = [list(pairwise(p)) for p in paths] + num_of_edges = sum(len(e) for e in paths_edges) + num_unique_edges = len(set.union(*[set(es) for es in paths_edges])) + if num_of_edges == num_unique_edges: + return True + return False + + +def are_node_disjoint_paths(G, paths): + if not paths: + return False + for path in paths: + assert is_path(G, path) + # first and last nodes are source and target + st = {paths[0][0], paths[0][-1]} + num_of_nodes = len([n for path in paths for n in path if n not in st]) + num_unique_nodes = len({n for path in paths for n in path if n not in st}) + if num_of_nodes == num_unique_nodes: + return True + return False + + +def test_graph_from_pr_2053(): + G = nx.Graph() + G.add_edges_from( + [ + ("A", "B"), + ("A", "D"), + ("A", "F"), + ("A", "G"), + ("B", "C"), + ("B", "D"), + ("B", "G"), + ("C", "D"), + ("C", "E"), + ("C", "Z"), + ("D", "E"), + ("D", "F"), + ("E", "F"), + ("E", "Z"), + ("F", "Z"), + ("G", "Z"), + ] + ) + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_paths = list(nx.edge_disjoint_paths(G, "A", "Z", **kwargs)) + assert are_edge_disjoint_paths(G, edge_paths), errmsg + assert nx.edge_connectivity(G, "A", "Z") == len(edge_paths), errmsg + # node disjoint paths + node_paths = list(nx.node_disjoint_paths(G, "A", "Z", **kwargs)) + assert are_node_disjoint_paths(G, node_paths), errmsg + assert nx.node_connectivity(G, "A", "Z") == len(node_paths), errmsg + + +def test_florentine_families(): + G = nx.florentine_families_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, "Medici", "Strozzi", **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert nx.edge_connectivity(G, "Medici", "Strozzi") == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, "Medici", "Strozzi", **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert nx.node_connectivity(G, "Medici", "Strozzi") == len(node_dpaths), errmsg + + +def test_karate(): + G = nx.karate_club_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, 0, 33, **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert nx.edge_connectivity(G, 0, 33) == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, 0, 33, **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert nx.node_connectivity(G, 0, 33) == len(node_dpaths), errmsg + + +def test_petersen_disjoint_paths(): + G = nx.petersen_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, 0, 6, **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert 3 == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, 0, 6, **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert 3 == len(node_dpaths), errmsg + + +def test_octahedral_disjoint_paths(): + G = nx.octahedral_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, 0, 5, **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert 4 == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, 0, 5, **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert 4 == len(node_dpaths), errmsg + + +def test_icosahedral_disjoint_paths(): + G = nx.icosahedral_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, 0, 6, **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert 5 == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, 0, 6, **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert 5 == len(node_dpaths), errmsg + + +def test_cutoff_disjoint_paths(): + G = nx.icosahedral_graph() + for flow_func in flow_funcs: + kwargs = {"flow_func": flow_func} + errmsg = f"Assertion failed in function: {flow_func.__name__}" + for cutoff in [2, 4]: + kwargs["cutoff"] = cutoff + # edge disjoint paths + edge_dpaths = list(nx.edge_disjoint_paths(G, 0, 6, **kwargs)) + assert are_edge_disjoint_paths(G, edge_dpaths), errmsg + assert cutoff == len(edge_dpaths), errmsg + # node disjoint paths + node_dpaths = list(nx.node_disjoint_paths(G, 0, 6, **kwargs)) + assert are_node_disjoint_paths(G, node_dpaths), errmsg + assert cutoff == len(node_dpaths), errmsg + + +def test_missing_source_edge_paths(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + list(nx.edge_disjoint_paths(G, 10, 1)) + + +def test_missing_source_node_paths(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + list(nx.node_disjoint_paths(G, 10, 1)) + + +def test_missing_target_edge_paths(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + list(nx.edge_disjoint_paths(G, 1, 10)) + + +def test_missing_target_node_paths(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + list(nx.node_disjoint_paths(G, 1, 10)) + + +def test_not_weakly_connected_edges(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + list(nx.edge_disjoint_paths(G, 1, 5)) + + +def test_not_weakly_connected_nodes(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + list(nx.node_disjoint_paths(G, 1, 5)) + + +def test_not_connected_edges(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + list(nx.edge_disjoint_paths(G, 1, 5)) + + +def test_not_connected_nodes(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5]) + list(nx.node_disjoint_paths(G, 1, 5)) + + +def test_isolated_edges(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + G.add_node(1) + nx.add_path(G, [4, 5]) + list(nx.edge_disjoint_paths(G, 1, 5)) + + +def test_isolated_nodes(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + G.add_node(1) + nx.add_path(G, [4, 5]) + list(nx.node_disjoint_paths(G, 1, 5)) + + +def test_invalid_auxiliary(): + with pytest.raises(nx.NetworkXError): + G = nx.complete_graph(5) + list(nx.node_disjoint_paths(G, 0, 3, auxiliary=G)) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_edge_augmentation.py b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_edge_augmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..e1d92d99616ac593d3d0ed358a804732d629f62e --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_edge_augmentation.py @@ -0,0 +1,502 @@ +import itertools as it +import random + +import pytest + +import networkx as nx +from networkx.algorithms.connectivity import k_edge_augmentation +from networkx.algorithms.connectivity.edge_augmentation import ( + _unpack_available_edges, + collapse, + complement_edges, + is_k_edge_connected, + is_locally_k_edge_connected, +) +from networkx.utils import pairwise + +# This should be set to the largest k for which an efficient algorithm is +# explicitly defined. +MAX_EFFICIENT_K = 2 + + +def tarjan_bridge_graph(): + # graph from tarjan paper + # RE Tarjan - "A note on finding the bridges of a graph" + # Information Processing Letters, 1974 - Elsevier + # doi:10.1016/0020-0190(74)90003-9. + # define 2-connected components and bridges + ccs = [ + (1, 2, 4, 3, 1, 4), + (5, 6, 7, 5), + (8, 9, 10, 8), + (17, 18, 16, 15, 17), + (11, 12, 14, 13, 11, 14), + ] + bridges = [(4, 8), (3, 5), (3, 17)] + G = nx.Graph(it.chain(*(pairwise(path) for path in ccs + bridges))) + return G + + +def test_weight_key(): + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4, 5, 6, 7, 8, 9]) + G.add_edges_from([(3, 8), (1, 2), (2, 3)]) + impossible = {(3, 6), (3, 9)} + rng = random.Random(0) + avail_uv = list(set(complement_edges(G)) - impossible) + avail = [(u, v, {"cost": rng.random()}) for u, v in avail_uv] + + _augment_and_check(G, k=1) + _augment_and_check(G, k=1, avail=avail_uv) + _augment_and_check(G, k=1, avail=avail, weight="cost") + + _check_augmentations(G, avail, weight="cost") + + +def test_is_locally_k_edge_connected_exceptions(): + pytest.raises(nx.NetworkXNotImplemented, is_k_edge_connected, nx.DiGraph(), k=0) + pytest.raises(nx.NetworkXNotImplemented, is_k_edge_connected, nx.MultiGraph(), k=0) + pytest.raises(ValueError, is_k_edge_connected, nx.Graph(), k=0) + + +def test_is_k_edge_connected(): + G = nx.barbell_graph(10, 0) + assert is_k_edge_connected(G, k=1) + assert not is_k_edge_connected(G, k=2) + + G = nx.Graph() + G.add_nodes_from([5, 15]) + assert not is_k_edge_connected(G, k=1) + assert not is_k_edge_connected(G, k=2) + + G = nx.complete_graph(5) + assert is_k_edge_connected(G, k=1) + assert is_k_edge_connected(G, k=2) + assert is_k_edge_connected(G, k=3) + assert is_k_edge_connected(G, k=4) + + G = nx.compose(nx.complete_graph([0, 1, 2]), nx.complete_graph([3, 4, 5])) + assert not is_k_edge_connected(G, k=1) + assert not is_k_edge_connected(G, k=2) + assert not is_k_edge_connected(G, k=3) + + +def test_is_k_edge_connected_exceptions(): + pytest.raises( + nx.NetworkXNotImplemented, is_locally_k_edge_connected, nx.DiGraph(), 1, 2, k=0 + ) + pytest.raises( + nx.NetworkXNotImplemented, + is_locally_k_edge_connected, + nx.MultiGraph(), + 1, + 2, + k=0, + ) + pytest.raises(ValueError, is_locally_k_edge_connected, nx.Graph(), 1, 2, k=0) + + +def test_is_locally_k_edge_connected(): + G = nx.barbell_graph(10, 0) + assert is_locally_k_edge_connected(G, 5, 15, k=1) + assert not is_locally_k_edge_connected(G, 5, 15, k=2) + + G = nx.Graph() + G.add_nodes_from([5, 15]) + assert not is_locally_k_edge_connected(G, 5, 15, k=2) + + +def test_null_graph(): + G = nx.Graph() + _check_augmentations(G, max_k=MAX_EFFICIENT_K + 2) + + +def test_cliques(): + for n in range(1, 10): + G = nx.complete_graph(n) + _check_augmentations(G, max_k=MAX_EFFICIENT_K + 2) + + +def test_clique_and_node(): + for n in range(1, 10): + G = nx.complete_graph(n) + G.add_node(n + 1) + _check_augmentations(G, max_k=MAX_EFFICIENT_K + 2) + + +def test_point_graph(): + G = nx.Graph() + G.add_node(1) + _check_augmentations(G, max_k=MAX_EFFICIENT_K + 2) + + +def test_edgeless_graph(): + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4]) + _check_augmentations(G) + + +def test_invalid_k(): + G = nx.Graph() + pytest.raises(ValueError, list, k_edge_augmentation(G, k=-1)) + pytest.raises(ValueError, list, k_edge_augmentation(G, k=0)) + + +def test_unfeasible(): + G = tarjan_bridge_graph() + pytest.raises(nx.NetworkXUnfeasible, list, k_edge_augmentation(G, k=1, avail=[])) + + pytest.raises(nx.NetworkXUnfeasible, list, k_edge_augmentation(G, k=2, avail=[])) + + pytest.raises( + nx.NetworkXUnfeasible, list, k_edge_augmentation(G, k=2, avail=[(7, 9)]) + ) + + # partial solutions should not error if real solutions are infeasible + aug_edges = list(k_edge_augmentation(G, k=2, avail=[(7, 9)], partial=True)) + assert aug_edges == [(7, 9)] + + _check_augmentations(G, avail=[], max_k=MAX_EFFICIENT_K + 2) + + _check_augmentations(G, avail=[(7, 9)], max_k=MAX_EFFICIENT_K + 2) + + +def test_tarjan(): + G = tarjan_bridge_graph() + + aug_edges = set(_augment_and_check(G, k=2)[0]) + print(f"aug_edges = {aug_edges!r}") + # can't assert edge exactly equality due to non-determinant edge order + # but we do know the size of the solution must be 3 + assert len(aug_edges) == 3 + + avail = [ + (9, 7), + (8, 5), + (2, 10), + (6, 13), + (11, 18), + (1, 17), + (2, 3), + (16, 17), + (18, 14), + (15, 14), + ] + aug_edges = set(_augment_and_check(G, avail=avail, k=2)[0]) + + # Can't assert exact length since approximation depends on the order of a + # dict traversal. + assert len(aug_edges) <= 3 * 2 + + _check_augmentations(G, avail) + + +def test_configuration(): + # seeds = [2718183590, 2470619828, 1694705158, 3001036531, 2401251497] + seeds = [1001, 1002, 1003, 1004] + for seed in seeds: + deg_seq = nx.random_powerlaw_tree_sequence(20, seed=seed, tries=5000) + G = nx.Graph(nx.configuration_model(deg_seq, seed=seed)) + G.remove_edges_from(nx.selfloop_edges(G)) + _check_augmentations(G) + + +def test_shell(): + # seeds = [2057382236, 3331169846, 1840105863, 476020778, 2247498425] + seeds = [18] + for seed in seeds: + constructor = [(12, 70, 0.8), (15, 40, 0.6)] + G = nx.random_shell_graph(constructor, seed=seed) + _check_augmentations(G) + + +def test_karate(): + G = nx.karate_club_graph() + _check_augmentations(G) + + +def test_star(): + G = nx.star_graph(3) + _check_augmentations(G) + + G = nx.star_graph(5) + _check_augmentations(G) + + G = nx.star_graph(10) + _check_augmentations(G) + + +def test_barbell(): + G = nx.barbell_graph(5, 0) + _check_augmentations(G) + + G = nx.barbell_graph(5, 2) + _check_augmentations(G) + + G = nx.barbell_graph(5, 3) + _check_augmentations(G) + + G = nx.barbell_graph(5, 4) + _check_augmentations(G) + + +def test_bridge(): + G = nx.Graph([(2393, 2257), (2393, 2685), (2685, 2257), (1758, 2257)]) + _check_augmentations(G) + + +def test_gnp_augmentation(): + rng = random.Random(0) + G = nx.gnp_random_graph(30, 0.005, seed=0) + # Randomly make edges available + avail = { + (u, v): 1 + rng.random() for u, v in complement_edges(G) if rng.random() < 0.25 + } + _check_augmentations(G, avail) + + +def _assert_solution_properties(G, aug_edges, avail_dict=None): + """Checks that aug_edges are consistently formatted""" + if avail_dict is not None: + assert all( + e in avail_dict for e in aug_edges + ), "when avail is specified aug-edges should be in avail" + + unique_aug = set(map(tuple, map(sorted, aug_edges))) + unique_aug = list(map(tuple, map(sorted, aug_edges))) + assert len(aug_edges) == len(unique_aug), "edges should be unique" + + assert not any(u == v for u, v in unique_aug), "should be no self-edges" + + assert not any( + G.has_edge(u, v) for u, v in unique_aug + ), "aug edges and G.edges should be disjoint" + + +def _augment_and_check( + G, k, avail=None, weight=None, verbose=False, orig_k=None, max_aug_k=None +): + """ + Does one specific augmentation and checks for properties of the result + """ + if orig_k is None: + try: + orig_k = nx.edge_connectivity(G) + except nx.NetworkXPointlessConcept: + orig_k = 0 + info = {} + try: + if avail is not None: + # ensure avail is in dict form + avail_dict = dict(zip(*_unpack_available_edges(avail, weight=weight))) + else: + avail_dict = None + try: + # Find the augmentation if possible + generator = nx.k_edge_augmentation(G, k=k, weight=weight, avail=avail) + assert not isinstance(generator, list), "should always return an iter" + aug_edges = [] + for edge in generator: + aug_edges.append(edge) + except nx.NetworkXUnfeasible: + infeasible = True + info["infeasible"] = True + assert len(aug_edges) == 0, "should not generate anything if unfeasible" + + if avail is None: + n_nodes = G.number_of_nodes() + assert n_nodes <= k, ( + "unconstrained cases are only unfeasible if |V| <= k. " + f"Got |V|={n_nodes} and k={k}" + ) + else: + if max_aug_k is None: + G_aug_all = G.copy() + G_aug_all.add_edges_from(avail_dict.keys()) + try: + max_aug_k = nx.edge_connectivity(G_aug_all) + except nx.NetworkXPointlessConcept: + max_aug_k = 0 + + assert max_aug_k < k, ( + "avail should only be unfeasible if using all edges " + "does not achieve k-edge-connectivity" + ) + + # Test for a partial solution + partial_edges = list( + nx.k_edge_augmentation(G, k=k, weight=weight, partial=True, avail=avail) + ) + + info["n_partial_edges"] = len(partial_edges) + + if avail_dict is None: + assert set(partial_edges) == set( + complement_edges(G) + ), "unweighted partial solutions should be the complement" + elif len(avail_dict) > 0: + H = G.copy() + + # Find the partial / full augmented connectivity + H.add_edges_from(partial_edges) + partial_conn = nx.edge_connectivity(H) + + H.add_edges_from(set(avail_dict.keys())) + full_conn = nx.edge_connectivity(H) + + # Full connectivity should be no better than our partial + # solution. + assert ( + partial_conn == full_conn + ), "adding more edges should not increase k-conn" + + # Find the new edge-connectivity after adding the augmenting edges + aug_edges = partial_edges + else: + infeasible = False + + # Find the weight of the augmentation + num_edges = len(aug_edges) + if avail is not None: + total_weight = sum(avail_dict[e] for e in aug_edges) + else: + total_weight = num_edges + + info["total_weight"] = total_weight + info["num_edges"] = num_edges + + # Find the new edge-connectivity after adding the augmenting edges + G_aug = G.copy() + G_aug.add_edges_from(aug_edges) + try: + aug_k = nx.edge_connectivity(G_aug) + except nx.NetworkXPointlessConcept: + aug_k = 0 + info["aug_k"] = aug_k + + # Do checks + if not infeasible and orig_k < k: + assert info["aug_k"] >= k, f"connectivity should increase to k={k} or more" + + assert info["aug_k"] >= orig_k, "augmenting should never reduce connectivity" + + _assert_solution_properties(G, aug_edges, avail_dict) + + except Exception: + info["failed"] = True + print(f"edges = {list(G.edges())}") + print(f"nodes = {list(G.nodes())}") + print(f"aug_edges = {list(aug_edges)}") + print(f"info = {info}") + raise + else: + if verbose: + print(f"info = {info}") + + if infeasible: + aug_edges = None + return aug_edges, info + + +def _check_augmentations(G, avail=None, max_k=None, weight=None, verbose=False): + """Helper to check weighted/unweighted cases with multiple values of k""" + # Using all available edges, find the maximum edge-connectivity + try: + orig_k = nx.edge_connectivity(G) + except nx.NetworkXPointlessConcept: + orig_k = 0 + + if avail is not None: + all_aug_edges = _unpack_available_edges(avail, weight=weight)[0] + G_aug_all = G.copy() + G_aug_all.add_edges_from(all_aug_edges) + try: + max_aug_k = nx.edge_connectivity(G_aug_all) + except nx.NetworkXPointlessConcept: + max_aug_k = 0 + else: + max_aug_k = G.number_of_nodes() - 1 + + if max_k is None: + max_k = min(4, max_aug_k) + + avail_uniform = {e: 1 for e in complement_edges(G)} + + if verbose: + print("\n=== CHECK_AUGMENTATION ===") + print(f"G.number_of_nodes = {G.number_of_nodes()!r}") + print(f"G.number_of_edges = {G.number_of_edges()!r}") + print(f"max_k = {max_k!r}") + print(f"max_aug_k = {max_aug_k!r}") + print(f"orig_k = {orig_k!r}") + + # check augmentation for multiple values of k + for k in range(1, max_k + 1): + if verbose: + print("---------------") + print(f"Checking k = {k}") + + # Check the unweighted version + if verbose: + print("unweighted case") + aug_edges1, info1 = _augment_and_check(G, k=k, verbose=verbose, orig_k=orig_k) + + # Check that the weighted version with all available edges and uniform + # weights gives a similar solution to the unweighted case. + if verbose: + print("weighted uniform case") + aug_edges2, info2 = _augment_and_check( + G, + k=k, + avail=avail_uniform, + verbose=verbose, + orig_k=orig_k, + max_aug_k=G.number_of_nodes() - 1, + ) + + # Check the weighted version + if avail is not None: + if verbose: + print("weighted case") + aug_edges3, info3 = _augment_and_check( + G, + k=k, + avail=avail, + weight=weight, + verbose=verbose, + max_aug_k=max_aug_k, + orig_k=orig_k, + ) + + if aug_edges1 is not None: + # Check approximation ratios + if k == 1: + # when k=1, both solutions should be optimal + assert info2["total_weight"] == info1["total_weight"] + if k == 2: + # when k=2, the weighted version is an approximation + if orig_k == 0: + # the approximation ratio is 3 if G is not connected + assert info2["total_weight"] <= info1["total_weight"] * 3 + else: + # the approximation ratio is 2 if G is was connected + assert info2["total_weight"] <= info1["total_weight"] * 2 + _check_unconstrained_bridge_property(G, info1) + + +def _check_unconstrained_bridge_property(G, info1): + # Check Theorem 5 from Eswaran and Tarjan. (1975) Augmentation problems + import math + + bridge_ccs = list(nx.connectivity.bridge_components(G)) + # condense G into an forest C + C = collapse(G, bridge_ccs) + + p = len([n for n, d in C.degree() if d == 1]) # leafs + q = len([n for n, d in C.degree() if d == 0]) # isolated + if p + q > 1: + size_target = math.ceil(p / 2) + q + size_aug = info1["num_edges"] + assert ( + size_aug == size_target + ), "augmentation size is different from what theory predicts" diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_edge_kcomponents.py b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_edge_kcomponents.py new file mode 100644 index 0000000000000000000000000000000000000000..4a1f681ab3da3f1f965ecbbf8dcf84eb49a512b9 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_edge_kcomponents.py @@ -0,0 +1,488 @@ +import itertools as it + +import pytest + +import networkx as nx +from networkx.algorithms.connectivity import EdgeComponentAuxGraph, bridge_components +from networkx.algorithms.connectivity.edge_kcomponents import general_k_edge_subgraphs +from networkx.utils import pairwise + +# ---------------- +# Helper functions +# ---------------- + + +def fset(list_of_sets): + """allows == to be used for list of sets""" + return set(map(frozenset, list_of_sets)) + + +def _assert_subgraph_edge_connectivity(G, ccs_subgraph, k): + """ + tests properties of k-edge-connected subgraphs + + the actual edge connectivity should be no less than k unless the cc is a + single node. + """ + for cc in ccs_subgraph: + C = G.subgraph(cc) + if len(cc) > 1: + connectivity = nx.edge_connectivity(C) + assert connectivity >= k + + +def _memo_connectivity(G, u, v, memo): + edge = (u, v) + if edge in memo: + return memo[edge] + if not G.is_directed(): + redge = (v, u) + if redge in memo: + return memo[redge] + memo[edge] = nx.edge_connectivity(G, *edge) + return memo[edge] + + +def _all_pairs_connectivity(G, cc, k, memo): + # Brute force check + for u, v in it.combinations(cc, 2): + # Use a memoization dict to save on computation + connectivity = _memo_connectivity(G, u, v, memo) + if G.is_directed(): + connectivity = min(connectivity, _memo_connectivity(G, v, u, memo)) + assert connectivity >= k + + +def _assert_local_cc_edge_connectivity(G, ccs_local, k, memo): + """ + tests properties of k-edge-connected components + + the local edge connectivity between each pair of nodes in the original + graph should be no less than k unless the cc is a single node. + """ + for cc in ccs_local: + if len(cc) > 1: + # Strategy for testing a bit faster: If the subgraph has high edge + # connectivity then it must have local connectivity + C = G.subgraph(cc) + connectivity = nx.edge_connectivity(C) + if connectivity < k: + # Otherwise do the brute force (with memoization) check + _all_pairs_connectivity(G, cc, k, memo) + + +# Helper function +def _check_edge_connectivity(G): + """ + Helper - generates all k-edge-components using the aux graph. Checks the + both local and subgraph edge connectivity of each cc. Also checks that + alternate methods of computing the k-edge-ccs generate the same result. + """ + # Construct the auxiliary graph that can be used to make each k-cc or k-sub + aux_graph = EdgeComponentAuxGraph.construct(G) + + # memoize the local connectivity in this graph + memo = {} + + for k in it.count(1): + # Test "local" k-edge-components and k-edge-subgraphs + ccs_local = fset(aux_graph.k_edge_components(k)) + ccs_subgraph = fset(aux_graph.k_edge_subgraphs(k)) + + # Check connectivity properties that should be guaranteed by the + # algorithms. + _assert_local_cc_edge_connectivity(G, ccs_local, k, memo) + _assert_subgraph_edge_connectivity(G, ccs_subgraph, k) + + if k == 1 or k == 2 and not G.is_directed(): + assert ( + ccs_local == ccs_subgraph + ), "Subgraphs and components should be the same when k == 1 or (k == 2 and not G.directed())" + + if G.is_directed(): + # Test special case methods are the same as the aux graph + if k == 1: + alt_sccs = fset(nx.strongly_connected_components(G)) + assert alt_sccs == ccs_local, "k=1 failed alt" + assert alt_sccs == ccs_subgraph, "k=1 failed alt" + else: + # Test special case methods are the same as the aux graph + if k == 1: + alt_ccs = fset(nx.connected_components(G)) + assert alt_ccs == ccs_local, "k=1 failed alt" + assert alt_ccs == ccs_subgraph, "k=1 failed alt" + elif k == 2: + alt_bridge_ccs = fset(bridge_components(G)) + assert alt_bridge_ccs == ccs_local, "k=2 failed alt" + assert alt_bridge_ccs == ccs_subgraph, "k=2 failed alt" + # if new methods for k == 3 or k == 4 are implemented add them here + + # Check the general subgraph method works by itself + alt_subgraph_ccs = fset( + [set(C.nodes()) for C in general_k_edge_subgraphs(G, k=k)] + ) + assert alt_subgraph_ccs == ccs_subgraph, "alt subgraph method failed" + + # Stop once k is larger than all special case methods + # and we cannot break down ccs any further. + if k > 2 and all(len(cc) == 1 for cc in ccs_local): + break + + +# ---------------- +# Misc tests +# ---------------- + + +def test_zero_k_exception(): + G = nx.Graph() + # functions that return generators error immediately + pytest.raises(ValueError, nx.k_edge_components, G, k=0) + pytest.raises(ValueError, nx.k_edge_subgraphs, G, k=0) + + # actual generators only error when you get the first item + aux_graph = EdgeComponentAuxGraph.construct(G) + pytest.raises(ValueError, list, aux_graph.k_edge_components(k=0)) + pytest.raises(ValueError, list, aux_graph.k_edge_subgraphs(k=0)) + + pytest.raises(ValueError, list, general_k_edge_subgraphs(G, k=0)) + + +def test_empty_input(): + G = nx.Graph() + assert [] == list(nx.k_edge_components(G, k=5)) + assert [] == list(nx.k_edge_subgraphs(G, k=5)) + + G = nx.DiGraph() + assert [] == list(nx.k_edge_components(G, k=5)) + assert [] == list(nx.k_edge_subgraphs(G, k=5)) + + +def test_not_implemented(): + G = nx.MultiGraph() + pytest.raises(nx.NetworkXNotImplemented, EdgeComponentAuxGraph.construct, G) + pytest.raises(nx.NetworkXNotImplemented, nx.k_edge_components, G, k=2) + pytest.raises(nx.NetworkXNotImplemented, nx.k_edge_subgraphs, G, k=2) + with pytest.raises(nx.NetworkXNotImplemented): + next(bridge_components(G)) + with pytest.raises(nx.NetworkXNotImplemented): + next(bridge_components(nx.DiGraph())) + + +def test_general_k_edge_subgraph_quick_return(): + # tests quick return optimization + G = nx.Graph() + G.add_node(0) + subgraphs = list(general_k_edge_subgraphs(G, k=1)) + assert len(subgraphs) == 1 + for subgraph in subgraphs: + assert subgraph.number_of_nodes() == 1 + + G.add_node(1) + subgraphs = list(general_k_edge_subgraphs(G, k=1)) + assert len(subgraphs) == 2 + for subgraph in subgraphs: + assert subgraph.number_of_nodes() == 1 + + +# ---------------- +# Undirected tests +# ---------------- + + +def test_random_gnp(): + # seeds = [1550709854, 1309423156, 4208992358, 2785630813, 1915069929] + seeds = [12, 13] + + for seed in seeds: + G = nx.gnp_random_graph(20, 0.2, seed=seed) + _check_edge_connectivity(G) + + +def test_configuration(): + # seeds = [2718183590, 2470619828, 1694705158, 3001036531, 2401251497] + seeds = [14, 15] + for seed in seeds: + deg_seq = nx.random_powerlaw_tree_sequence(20, seed=seed, tries=5000) + G = nx.Graph(nx.configuration_model(deg_seq, seed=seed)) + G.remove_edges_from(nx.selfloop_edges(G)) + _check_edge_connectivity(G) + + +def test_shell(): + # seeds = [2057382236, 3331169846, 1840105863, 476020778, 2247498425] + seeds = [20] + for seed in seeds: + constructor = [(12, 70, 0.8), (15, 40, 0.6)] + G = nx.random_shell_graph(constructor, seed=seed) + _check_edge_connectivity(G) + + +def test_karate(): + G = nx.karate_club_graph() + _check_edge_connectivity(G) + + +def test_tarjan_bridge(): + # graph from tarjan paper + # RE Tarjan - "A note on finding the bridges of a graph" + # Information Processing Letters, 1974 - Elsevier + # doi:10.1016/0020-0190(74)90003-9. + # define 2-connected components and bridges + ccs = [ + (1, 2, 4, 3, 1, 4), + (5, 6, 7, 5), + (8, 9, 10, 8), + (17, 18, 16, 15, 17), + (11, 12, 14, 13, 11, 14), + ] + bridges = [(4, 8), (3, 5), (3, 17)] + G = nx.Graph(it.chain(*(pairwise(path) for path in ccs + bridges))) + _check_edge_connectivity(G) + + +def test_bridge_cc(): + # define 2-connected components and bridges + cc2 = [(1, 2, 4, 3, 1, 4), (8, 9, 10, 8), (11, 12, 13, 11)] + bridges = [(4, 8), (3, 5), (20, 21), (22, 23, 24)] + G = nx.Graph(it.chain(*(pairwise(path) for path in cc2 + bridges))) + bridge_ccs = fset(bridge_components(G)) + target_ccs = fset( + [{1, 2, 3, 4}, {5}, {8, 9, 10}, {11, 12, 13}, {20}, {21}, {22}, {23}, {24}] + ) + assert bridge_ccs == target_ccs + _check_edge_connectivity(G) + + +def test_undirected_aux_graph(): + # Graph similar to the one in + # http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0136264 + a, b, c, d, e, f, g, h, i = "abcdefghi" + paths = [ + (a, d, b, f, c), + (a, e, b), + (a, e, b, c, g, b, a), + (c, b), + (f, g, f), + (h, i), + ] + G = nx.Graph(it.chain(*[pairwise(path) for path in paths])) + aux_graph = EdgeComponentAuxGraph.construct(G) + + components_1 = fset(aux_graph.k_edge_subgraphs(k=1)) + target_1 = fset([{a, b, c, d, e, f, g}, {h, i}]) + assert target_1 == components_1 + + # Check that the undirected case for k=1 agrees with CCs + alt_1 = fset(nx.k_edge_subgraphs(G, k=1)) + assert alt_1 == components_1 + + components_2 = fset(aux_graph.k_edge_subgraphs(k=2)) + target_2 = fset([{a, b, c, d, e, f, g}, {h}, {i}]) + assert target_2 == components_2 + + # Check that the undirected case for k=2 agrees with bridge components + alt_2 = fset(nx.k_edge_subgraphs(G, k=2)) + assert alt_2 == components_2 + + components_3 = fset(aux_graph.k_edge_subgraphs(k=3)) + target_3 = fset([{a}, {b, c, f, g}, {d}, {e}, {h}, {i}]) + assert target_3 == components_3 + + components_4 = fset(aux_graph.k_edge_subgraphs(k=4)) + target_4 = fset([{a}, {b}, {c}, {d}, {e}, {f}, {g}, {h}, {i}]) + assert target_4 == components_4 + + _check_edge_connectivity(G) + + +def test_local_subgraph_difference(): + paths = [ + (11, 12, 13, 14, 11, 13, 14, 12), # first 4-clique + (21, 22, 23, 24, 21, 23, 24, 22), # second 4-clique + # paths connecting each node of the 4 cliques + (11, 101, 21), + (12, 102, 22), + (13, 103, 23), + (14, 104, 24), + ] + G = nx.Graph(it.chain(*[pairwise(path) for path in paths])) + aux_graph = EdgeComponentAuxGraph.construct(G) + + # Each clique is returned separately in k-edge-subgraphs + subgraph_ccs = fset(aux_graph.k_edge_subgraphs(3)) + subgraph_target = fset( + [{101}, {102}, {103}, {104}, {21, 22, 23, 24}, {11, 12, 13, 14}] + ) + assert subgraph_ccs == subgraph_target + + # But in k-edge-ccs they are returned together + # because they are locally 3-edge-connected + local_ccs = fset(aux_graph.k_edge_components(3)) + local_target = fset([{101}, {102}, {103}, {104}, {11, 12, 13, 14, 21, 22, 23, 24}]) + assert local_ccs == local_target + + +def test_local_subgraph_difference_directed(): + dipaths = [(1, 2, 3, 4, 1), (1, 3, 1)] + G = nx.DiGraph(it.chain(*[pairwise(path) for path in dipaths])) + + assert fset(nx.k_edge_components(G, k=1)) == fset(nx.k_edge_subgraphs(G, k=1)) + + # Unlike undirected graphs, when k=2, for directed graphs there is a case + # where the k-edge-ccs are not the same as the k-edge-subgraphs. + # (in directed graphs ccs and subgraphs are the same when k=2) + assert fset(nx.k_edge_components(G, k=2)) != fset(nx.k_edge_subgraphs(G, k=2)) + + assert fset(nx.k_edge_components(G, k=3)) == fset(nx.k_edge_subgraphs(G, k=3)) + + _check_edge_connectivity(G) + + +def test_triangles(): + paths = [ + (11, 12, 13, 11), # first 3-clique + (21, 22, 23, 21), # second 3-clique + (11, 21), # connected by an edge + ] + G = nx.Graph(it.chain(*[pairwise(path) for path in paths])) + + # subgraph and ccs are the same in all cases here + assert fset(nx.k_edge_components(G, k=1)) == fset(nx.k_edge_subgraphs(G, k=1)) + + assert fset(nx.k_edge_components(G, k=2)) == fset(nx.k_edge_subgraphs(G, k=2)) + + assert fset(nx.k_edge_components(G, k=3)) == fset(nx.k_edge_subgraphs(G, k=3)) + + _check_edge_connectivity(G) + + +def test_four_clique(): + paths = [ + (11, 12, 13, 14, 11, 13, 14, 12), # first 4-clique + (21, 22, 23, 24, 21, 23, 24, 22), # second 4-clique + # paths connecting the 4 cliques such that they are + # 3-connected in G, but not in the subgraph. + # Case where the nodes bridging them do not have degree less than 3. + (100, 13), + (12, 100, 22), + (13, 200, 23), + (14, 300, 24), + ] + G = nx.Graph(it.chain(*[pairwise(path) for path in paths])) + + # The subgraphs and ccs are different for k=3 + local_ccs = fset(nx.k_edge_components(G, k=3)) + subgraphs = fset(nx.k_edge_subgraphs(G, k=3)) + assert local_ccs != subgraphs + + # The cliques ares in the same cc + clique1 = frozenset(paths[0]) + clique2 = frozenset(paths[1]) + assert clique1.union(clique2).union({100}) in local_ccs + + # but different subgraphs + assert clique1 in subgraphs + assert clique2 in subgraphs + + assert G.degree(100) == 3 + + _check_edge_connectivity(G) + + +def test_five_clique(): + # Make a graph that can be disconnected less than 4 edges, but no node has + # degree less than 4. + G = nx.disjoint_union(nx.complete_graph(5), nx.complete_graph(5)) + paths = [ + # add aux-connections + (1, 100, 6), + (2, 100, 7), + (3, 200, 8), + (4, 200, 100), + ] + G.add_edges_from(it.chain(*[pairwise(path) for path in paths])) + assert min(dict(nx.degree(G)).values()) == 4 + + # For k=3 they are the same + assert fset(nx.k_edge_components(G, k=3)) == fset(nx.k_edge_subgraphs(G, k=3)) + + # For k=4 they are the different + # the aux nodes are in the same CC as clique 1 but no the same subgraph + assert fset(nx.k_edge_components(G, k=4)) != fset(nx.k_edge_subgraphs(G, k=4)) + + # For k=5 they are not the same + assert fset(nx.k_edge_components(G, k=5)) != fset(nx.k_edge_subgraphs(G, k=5)) + + # For k=6 they are the same + assert fset(nx.k_edge_components(G, k=6)) == fset(nx.k_edge_subgraphs(G, k=6)) + _check_edge_connectivity(G) + + +# ---------------- +# Undirected tests +# ---------------- + + +def test_directed_aux_graph(): + # Graph similar to the one in + # http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0136264 + a, b, c, d, e, f, g, h, i = "abcdefghi" + dipaths = [ + (a, d, b, f, c), + (a, e, b), + (a, e, b, c, g, b, a), + (c, b), + (f, g, f), + (h, i), + ] + G = nx.DiGraph(it.chain(*[pairwise(path) for path in dipaths])) + aux_graph = EdgeComponentAuxGraph.construct(G) + + components_1 = fset(aux_graph.k_edge_subgraphs(k=1)) + target_1 = fset([{a, b, c, d, e, f, g}, {h}, {i}]) + assert target_1 == components_1 + + # Check that the directed case for k=1 agrees with SCCs + alt_1 = fset(nx.strongly_connected_components(G)) + assert alt_1 == components_1 + + components_2 = fset(aux_graph.k_edge_subgraphs(k=2)) + target_2 = fset([{i}, {e}, {d}, {b, c, f, g}, {h}, {a}]) + assert target_2 == components_2 + + components_3 = fset(aux_graph.k_edge_subgraphs(k=3)) + target_3 = fset([{a}, {b}, {c}, {d}, {e}, {f}, {g}, {h}, {i}]) + assert target_3 == components_3 + + +def test_random_gnp_directed(): + # seeds = [3894723670, 500186844, 267231174, 2181982262, 1116750056] + seeds = [21] + for seed in seeds: + G = nx.gnp_random_graph(20, 0.2, directed=True, seed=seed) + _check_edge_connectivity(G) + + +def test_configuration_directed(): + # seeds = [671221681, 2403749451, 124433910, 672335939, 1193127215] + seeds = [67] + for seed in seeds: + deg_seq = nx.random_powerlaw_tree_sequence(20, seed=seed, tries=5000) + G = nx.DiGraph(nx.configuration_model(deg_seq, seed=seed)) + G.remove_edges_from(nx.selfloop_edges(G)) + _check_edge_connectivity(G) + + +def test_shell_directed(): + # seeds = [3134027055, 4079264063, 1350769518, 1405643020, 530038094] + seeds = [31] + for seed in seeds: + constructor = [(12, 70, 0.8), (15, 40, 0.6)] + G = nx.random_shell_graph(constructor, seed=seed).to_directed() + _check_edge_connectivity(G) + + +def test_karate_directed(): + G = nx.karate_club_graph().to_directed() + _check_edge_connectivity(G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_kcomponents.py b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_kcomponents.py new file mode 100644 index 0000000000000000000000000000000000000000..f4436acd07fe57cb510fee138b36f10923a9688a --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_kcomponents.py @@ -0,0 +1,296 @@ +# Test for Moody and White k-components algorithm +import pytest + +import networkx as nx +from networkx.algorithms.connectivity.kcomponents import ( + _consolidate, + build_k_number_dict, +) + +## +# A nice synthetic graph +## + + +def torrents_and_ferraro_graph(): + # Graph from https://arxiv.org/pdf/1503.04476v1 p.26 + G = nx.convert_node_labels_to_integers( + nx.grid_graph([5, 5]), label_attribute="labels" + ) + rlabels = nx.get_node_attributes(G, "labels") + labels = {v: k for k, v in rlabels.items()} + + for nodes in [(labels[(0, 4)], labels[(1, 4)]), (labels[(3, 4)], labels[(4, 4)])]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing a node + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + # This edge makes the graph biconnected; it's + # needed because K5s share only one node. + G.add_edge(new_node + 16, new_node + 8) + + for nodes in [(labels[(0, 0)], labels[(1, 0)]), (labels[(3, 0)], labels[(4, 0)])]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing two nodes + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + nbrs2 = G[new_node + 9] + G.remove_node(new_node + 9) + for nbr in nbrs2: + G.add_edge(new_node + 18, nbr) + return G + + +def test_directed(): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.gnp_random_graph(10, 0.2, directed=True, seed=42) + nx.k_components(G) + + +# Helper function +def _check_connectivity(G, k_components): + for k, components in k_components.items(): + if k < 3: + continue + # check that k-components have node connectivity >= k. + for component in components: + C = G.subgraph(component) + K = nx.node_connectivity(C) + assert K >= k + + +@pytest.mark.slow +def test_torrents_and_ferraro_graph(): + G = torrents_and_ferraro_graph() + result = nx.k_components(G) + _check_connectivity(G, result) + + # In this example graph there are 8 3-components, 4 with 15 nodes + # and 4 with 5 nodes. + assert len(result[3]) == 8 + assert len([c for c in result[3] if len(c) == 15]) == 4 + assert len([c for c in result[3] if len(c) == 5]) == 4 + # There are also 8 4-components all with 5 nodes. + assert len(result[4]) == 8 + assert all(len(c) == 5 for c in result[4]) + + +@pytest.mark.slow +def test_random_gnp(): + G = nx.gnp_random_graph(50, 0.2, seed=42) + result = nx.k_components(G) + _check_connectivity(G, result) + + +@pytest.mark.slow +def test_shell(): + constructor = [(20, 80, 0.8), (80, 180, 0.6)] + G = nx.random_shell_graph(constructor, seed=42) + result = nx.k_components(G) + _check_connectivity(G, result) + + +def test_configuration(): + deg_seq = nx.random_powerlaw_tree_sequence(100, tries=5, seed=72) + G = nx.Graph(nx.configuration_model(deg_seq)) + G.remove_edges_from(nx.selfloop_edges(G)) + result = nx.k_components(G) + _check_connectivity(G, result) + + +def test_karate(): + G = nx.karate_club_graph() + result = nx.k_components(G) + _check_connectivity(G, result) + + +def test_karate_component_number(): + karate_k_num = { + 0: 4, + 1: 4, + 2: 4, + 3: 4, + 4: 3, + 5: 3, + 6: 3, + 7: 4, + 8: 4, + 9: 2, + 10: 3, + 11: 1, + 12: 2, + 13: 4, + 14: 2, + 15: 2, + 16: 2, + 17: 2, + 18: 2, + 19: 3, + 20: 2, + 21: 2, + 22: 2, + 23: 3, + 24: 3, + 25: 3, + 26: 2, + 27: 3, + 28: 3, + 29: 3, + 30: 4, + 31: 3, + 32: 4, + 33: 4, + } + G = nx.karate_club_graph() + k_components = nx.k_components(G) + k_num = build_k_number_dict(k_components) + assert karate_k_num == k_num + + +def test_davis_southern_women(): + G = nx.davis_southern_women_graph() + result = nx.k_components(G) + _check_connectivity(G, result) + + +def test_davis_southern_women_detail_3_and_4(): + solution = { + 3: [ + { + "Nora Fayette", + "E10", + "Myra Liddel", + "E12", + "E14", + "Frances Anderson", + "Evelyn Jefferson", + "Ruth DeSand", + "Helen Lloyd", + "Eleanor Nye", + "E9", + "E8", + "E5", + "E4", + "E7", + "E6", + "E1", + "Verne Sanderson", + "E3", + "E2", + "Theresa Anderson", + "Pearl Oglethorpe", + "Katherina Rogers", + "Brenda Rogers", + "E13", + "Charlotte McDowd", + "Sylvia Avondale", + "Laura Mandeville", + } + ], + 4: [ + { + "Nora Fayette", + "E10", + "Verne Sanderson", + "E12", + "Frances Anderson", + "Evelyn Jefferson", + "Ruth DeSand", + "Helen Lloyd", + "Eleanor Nye", + "E9", + "E8", + "E5", + "E4", + "E7", + "E6", + "Myra Liddel", + "E3", + "Theresa Anderson", + "Katherina Rogers", + "Brenda Rogers", + "Charlotte McDowd", + "Sylvia Avondale", + "Laura Mandeville", + } + ], + } + G = nx.davis_southern_women_graph() + result = nx.k_components(G) + for k, components in result.items(): + if k < 3: + continue + assert len(components) == len(solution[k]) + for component in components: + assert component in solution[k] + + +def test_set_consolidation_rosettacode(): + # Tests from http://rosettacode.org/wiki/Set_consolidation + def list_of_sets_equal(result, solution): + assert {frozenset(s) for s in result} == {frozenset(s) for s in solution} + + question = [{"A", "B"}, {"C", "D"}] + solution = [{"A", "B"}, {"C", "D"}] + list_of_sets_equal(_consolidate(question, 1), solution) + question = [{"A", "B"}, {"B", "C"}] + solution = [{"A", "B", "C"}] + list_of_sets_equal(_consolidate(question, 1), solution) + question = [{"A", "B"}, {"C", "D"}, {"D", "B"}] + solution = [{"A", "C", "B", "D"}] + list_of_sets_equal(_consolidate(question, 1), solution) + question = [{"H", "I", "K"}, {"A", "B"}, {"C", "D"}, {"D", "B"}, {"F", "G", "H"}] + solution = [{"A", "C", "B", "D"}, {"G", "F", "I", "H", "K"}] + list_of_sets_equal(_consolidate(question, 1), solution) + question = [ + {"A", "H"}, + {"H", "I", "K"}, + {"A", "B"}, + {"C", "D"}, + {"D", "B"}, + {"F", "G", "H"}, + ] + solution = [{"A", "C", "B", "D", "G", "F", "I", "H", "K"}] + list_of_sets_equal(_consolidate(question, 1), solution) + question = [ + {"H", "I", "K"}, + {"A", "B"}, + {"C", "D"}, + {"D", "B"}, + {"F", "G", "H"}, + {"A", "H"}, + ] + solution = [{"A", "C", "B", "D", "G", "F", "I", "H", "K"}] + list_of_sets_equal(_consolidate(question, 1), solution) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_kcutsets.py b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_kcutsets.py new file mode 100644 index 0000000000000000000000000000000000000000..d5b3b089e23bc41201fb4350a51549daa79495e1 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_kcutsets.py @@ -0,0 +1,266 @@ +# Jordi Torrents +# Test for k-cutsets +import itertools + +import pytest + +import networkx as nx +from networkx.algorithms import flow +from networkx.algorithms.connectivity.kcutsets import _is_separating_set + +MAX_CUTSETS_TO_TEST = 4 # originally 100. cut to decrease testing time + +flow_funcs = [ + flow.boykov_kolmogorov, + flow.dinitz, + flow.edmonds_karp, + flow.preflow_push, + flow.shortest_augmenting_path, +] + + +## +# Some nice synthetic graphs +## +def graph_example_1(): + G = nx.convert_node_labels_to_integers( + nx.grid_graph([5, 5]), label_attribute="labels" + ) + rlabels = nx.get_node_attributes(G, "labels") + labels = {v: k for k, v in rlabels.items()} + + for nodes in [ + (labels[(0, 0)], labels[(1, 0)]), + (labels[(0, 4)], labels[(1, 4)]), + (labels[(3, 0)], labels[(4, 0)]), + (labels[(3, 4)], labels[(4, 4)]), + ]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing a node + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + G.add_edge(new_node + 16, new_node + 5) + return G + + +def torrents_and_ferraro_graph(): + G = nx.convert_node_labels_to_integers( + nx.grid_graph([5, 5]), label_attribute="labels" + ) + rlabels = nx.get_node_attributes(G, "labels") + labels = {v: k for k, v in rlabels.items()} + + for nodes in [(labels[(0, 4)], labels[(1, 4)]), (labels[(3, 4)], labels[(4, 4)])]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing a node + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + # Commenting this makes the graph not biconnected !! + # This stupid mistake make one reviewer very angry :P + G.add_edge(new_node + 16, new_node + 8) + + for nodes in [(labels[(0, 0)], labels[(1, 0)]), (labels[(3, 0)], labels[(4, 0)])]: + new_node = G.order() + 1 + # Petersen graph is triconnected + P = nx.petersen_graph() + G = nx.disjoint_union(G, P) + # Add two edges between the grid and P + G.add_edge(new_node + 1, nodes[0]) + G.add_edge(new_node, nodes[1]) + # K5 is 4-connected + K = nx.complete_graph(5) + G = nx.disjoint_union(G, K) + # Add three edges between P and K5 + G.add_edge(new_node + 2, new_node + 11) + G.add_edge(new_node + 3, new_node + 12) + G.add_edge(new_node + 4, new_node + 13) + # Add another K5 sharing two nodes + G = nx.disjoint_union(G, K) + nbrs = G[new_node + 10] + G.remove_node(new_node + 10) + for nbr in nbrs: + G.add_edge(new_node + 17, nbr) + nbrs2 = G[new_node + 9] + G.remove_node(new_node + 9) + for nbr in nbrs2: + G.add_edge(new_node + 18, nbr) + return G + + +# Helper function +def _check_separating_sets(G): + for cc in nx.connected_components(G): + if len(cc) < 3: + continue + Gc = G.subgraph(cc) + node_conn = nx.node_connectivity(Gc) + all_cuts = nx.all_node_cuts(Gc) + # Only test a limited number of cut sets to reduce test time. + for cut in itertools.islice(all_cuts, MAX_CUTSETS_TO_TEST): + assert node_conn == len(cut) + assert not nx.is_connected(nx.restricted_view(G, cut, [])) + + +@pytest.mark.slow +def test_torrents_and_ferraro_graph(): + G = torrents_and_ferraro_graph() + _check_separating_sets(G) + + +def test_example_1(): + G = graph_example_1() + _check_separating_sets(G) + + +def test_random_gnp(): + G = nx.gnp_random_graph(100, 0.1, seed=42) + _check_separating_sets(G) + + +def test_shell(): + constructor = [(20, 80, 0.8), (80, 180, 0.6)] + G = nx.random_shell_graph(constructor, seed=42) + _check_separating_sets(G) + + +def test_configuration(): + deg_seq = nx.random_powerlaw_tree_sequence(100, tries=5, seed=72) + G = nx.Graph(nx.configuration_model(deg_seq)) + G.remove_edges_from(nx.selfloop_edges(G)) + _check_separating_sets(G) + + +def test_karate(): + G = nx.karate_club_graph() + _check_separating_sets(G) + + +def _generate_no_biconnected(max_attempts=50): + attempts = 0 + while True: + G = nx.fast_gnp_random_graph(100, 0.0575, seed=42) + if nx.is_connected(G) and not nx.is_biconnected(G): + attempts = 0 + yield G + else: + if attempts >= max_attempts: + msg = f"Tried {attempts} times: no suitable Graph." + raise Exception(msg) + else: + attempts += 1 + + +def test_articulation_points(): + Ggen = _generate_no_biconnected() + for i in range(1): # change 1 to 3 or more for more realizations. + G = next(Ggen) + articulation_points = [{a} for a in nx.articulation_points(G)] + for cut in nx.all_node_cuts(G): + assert cut in articulation_points + + +def test_grid_2d_graph(): + # All minimum node cuts of a 2d grid + # are the four pairs of nodes that are + # neighbors of the four corner nodes. + G = nx.grid_2d_graph(5, 5) + solution = [{(0, 1), (1, 0)}, {(3, 0), (4, 1)}, {(3, 4), (4, 3)}, {(0, 3), (1, 4)}] + for cut in nx.all_node_cuts(G): + assert cut in solution + + +def test_disconnected_graph(): + G = nx.fast_gnp_random_graph(100, 0.01, seed=42) + cuts = nx.all_node_cuts(G) + pytest.raises(nx.NetworkXError, next, cuts) + + +@pytest.mark.slow +def test_alternative_flow_functions(): + graphs = [nx.grid_2d_graph(4, 4), nx.cycle_graph(5)] + for G in graphs: + node_conn = nx.node_connectivity(G) + for flow_func in flow_funcs: + all_cuts = nx.all_node_cuts(G, flow_func=flow_func) + # Only test a limited number of cut sets to reduce test time. + for cut in itertools.islice(all_cuts, MAX_CUTSETS_TO_TEST): + assert node_conn == len(cut) + assert not nx.is_connected(nx.restricted_view(G, cut, [])) + + +def test_is_separating_set_complete_graph(): + G = nx.complete_graph(5) + assert _is_separating_set(G, {0, 1, 2, 3}) + + +def test_is_separating_set(): + for i in [5, 10, 15]: + G = nx.star_graph(i) + max_degree_node = max(G, key=G.degree) + assert _is_separating_set(G, {max_degree_node}) + + +def test_non_repeated_cuts(): + # The algorithm was repeating the cut {0, 1} for the giant biconnected + # component of the Karate club graph. + K = nx.karate_club_graph() + bcc = max(list(nx.biconnected_components(K)), key=len) + G = K.subgraph(bcc) + solution = [{32, 33}, {2, 33}, {0, 3}, {0, 1}, {29, 33}] + cuts = list(nx.all_node_cuts(G)) + if len(solution) != len(cuts): + print(f"Solution: {solution}") + print(f"Result: {cuts}") + assert len(solution) == len(cuts) + for cut in cuts: + assert cut in solution + + +def test_cycle_graph(): + G = nx.cycle_graph(5) + solution = [{0, 2}, {0, 3}, {1, 3}, {1, 4}, {2, 4}] + cuts = list(nx.all_node_cuts(G)) + assert len(solution) == len(cuts) + for cut in cuts: + assert cut in solution + + +def test_complete_graph(): + G = nx.complete_graph(5) + solution = [{0, 1, 2, 3}, {0, 1, 2, 4}, {0, 1, 3, 4}, {0, 2, 3, 4}, {1, 2, 3, 4}] + cuts = list(nx.all_node_cuts(G)) + assert len(solution) == len(cuts) + for cut in cuts: + assert cut in solution diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_stoer_wagner.py b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_stoer_wagner.py new file mode 100644 index 0000000000000000000000000000000000000000..2b9e2bab41eb29067166b6faa331e022d4074ce3 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/tests/test_stoer_wagner.py @@ -0,0 +1,102 @@ +from itertools import chain + +import pytest + +import networkx as nx + + +def _check_partition(G, cut_value, partition, weight): + assert isinstance(partition, tuple) + assert len(partition) == 2 + assert isinstance(partition[0], list) + assert isinstance(partition[1], list) + assert len(partition[0]) > 0 + assert len(partition[1]) > 0 + assert sum(map(len, partition)) == len(G) + assert set(chain.from_iterable(partition)) == set(G) + partition = tuple(map(set, partition)) + w = 0 + for u, v, e in G.edges(data=True): + if (u in partition[0]) == (v in partition[1]): + w += e.get(weight, 1) + assert w == cut_value + + +def _test_stoer_wagner(G, answer, weight="weight"): + cut_value, partition = nx.stoer_wagner(G, weight, heap=nx.utils.PairingHeap) + assert cut_value == answer + _check_partition(G, cut_value, partition, weight) + cut_value, partition = nx.stoer_wagner(G, weight, heap=nx.utils.BinaryHeap) + assert cut_value == answer + _check_partition(G, cut_value, partition, weight) + + +def test_graph1(): + G = nx.Graph() + G.add_edge("x", "a", weight=3) + G.add_edge("x", "b", weight=1) + G.add_edge("a", "c", weight=3) + G.add_edge("b", "c", weight=5) + G.add_edge("b", "d", weight=4) + G.add_edge("d", "e", weight=2) + G.add_edge("c", "y", weight=2) + G.add_edge("e", "y", weight=3) + _test_stoer_wagner(G, 4) + + +def test_graph2(): + G = nx.Graph() + G.add_edge("x", "a") + G.add_edge("x", "b") + G.add_edge("a", "c") + G.add_edge("b", "c") + G.add_edge("b", "d") + G.add_edge("d", "e") + G.add_edge("c", "y") + G.add_edge("e", "y") + _test_stoer_wagner(G, 2) + + +def test_graph3(): + # Source: + # Stoer, M. and Wagner, F. (1997). "A simple min-cut algorithm". Journal of + # the ACM 44 (4), 585-591. + G = nx.Graph() + G.add_edge(1, 2, weight=2) + G.add_edge(1, 5, weight=3) + G.add_edge(2, 3, weight=3) + G.add_edge(2, 5, weight=2) + G.add_edge(2, 6, weight=2) + G.add_edge(3, 4, weight=4) + G.add_edge(3, 7, weight=2) + G.add_edge(4, 7, weight=2) + G.add_edge(4, 8, weight=2) + G.add_edge(5, 6, weight=3) + G.add_edge(6, 7, weight=1) + G.add_edge(7, 8, weight=3) + _test_stoer_wagner(G, 4) + + +def test_weight_name(): + G = nx.Graph() + G.add_edge(1, 2, weight=1, cost=8) + G.add_edge(1, 3, cost=2) + G.add_edge(2, 3, cost=4) + _test_stoer_wagner(G, 6, weight="cost") + + +def test_exceptions(): + G = nx.Graph() + pytest.raises(nx.NetworkXError, nx.stoer_wagner, G) + G.add_node(1) + pytest.raises(nx.NetworkXError, nx.stoer_wagner, G) + G.add_node(2) + pytest.raises(nx.NetworkXError, nx.stoer_wagner, G) + G.add_edge(1, 2, weight=-2) + pytest.raises(nx.NetworkXError, nx.stoer_wagner, G) + G = nx.DiGraph() + pytest.raises(nx.NetworkXNotImplemented, nx.stoer_wagner, G) + G = nx.MultiGraph() + pytest.raises(nx.NetworkXNotImplemented, nx.stoer_wagner, G) + G = nx.MultiDiGraph() + pytest.raises(nx.NetworkXNotImplemented, nx.stoer_wagner, G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/connectivity/utils.py b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bf6860a208f0f11b50d2950e8462f1c5649b4243 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/connectivity/utils.py @@ -0,0 +1,87 @@ +""" +Utilities for connectivity package +""" +import networkx as nx + +__all__ = ["build_auxiliary_node_connectivity", "build_auxiliary_edge_connectivity"] + + +@nx._dispatch +def build_auxiliary_node_connectivity(G): + r"""Creates a directed graph D from an undirected graph G to compute flow + based node connectivity. + + For an undirected graph G having `n` nodes and `m` edges we derive a + directed graph D with `2n` nodes and `2m+n` arcs by replacing each + original node `v` with two nodes `vA`, `vB` linked by an (internal) + arc in D. Then for each edge (`u`, `v`) in G we add two arcs (`uB`, `vA`) + and (`vB`, `uA`) in D. Finally we set the attribute capacity = 1 for each + arc in D [1]_. + + For a directed graph having `n` nodes and `m` arcs we derive a + directed graph D with `2n` nodes and `m+n` arcs by replacing each + original node `v` with two nodes `vA`, `vB` linked by an (internal) + arc (`vA`, `vB`) in D. Then for each arc (`u`, `v`) in G we add one + arc (`uB`, `vA`) in D. Finally we set the attribute capacity = 1 for + each arc in D. + + A dictionary with a mapping between nodes in the original graph and the + auxiliary digraph is stored as a graph attribute: D.graph['mapping']. + + References + ---------- + .. [1] Kammer, Frank and Hanjo Taubig. Graph Connectivity. in Brandes and + Erlebach, 'Network Analysis: Methodological Foundations', Lecture + Notes in Computer Science, Volume 3418, Springer-Verlag, 2005. + https://doi.org/10.1007/978-3-540-31955-9_7 + + """ + directed = G.is_directed() + + mapping = {} + H = nx.DiGraph() + + for i, node in enumerate(G): + mapping[node] = i + H.add_node(f"{i}A", id=node) + H.add_node(f"{i}B", id=node) + H.add_edge(f"{i}A", f"{i}B", capacity=1) + + edges = [] + for source, target in G.edges(): + edges.append((f"{mapping[source]}B", f"{mapping[target]}A")) + if not directed: + edges.append((f"{mapping[target]}B", f"{mapping[source]}A")) + H.add_edges_from(edges, capacity=1) + + # Store mapping as graph attribute + H.graph["mapping"] = mapping + return H + + +@nx._dispatch +def build_auxiliary_edge_connectivity(G): + """Auxiliary digraph for computing flow based edge connectivity + + If the input graph is undirected, we replace each edge (`u`,`v`) with + two reciprocal arcs (`u`, `v`) and (`v`, `u`) and then we set the attribute + 'capacity' for each arc to 1. If the input graph is directed we simply + add the 'capacity' attribute. Part of algorithm 1 in [1]_ . + + References + ---------- + .. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms. (this is a + chapter, look for the reference of the book). + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + """ + if G.is_directed(): + H = nx.DiGraph() + H.add_nodes_from(G.nodes()) + H.add_edges_from(G.edges(), capacity=1) + return H + else: + H = nx.DiGraph() + H.add_nodes_from(G.nodes()) + for source, target in G.edges(): + H.add_edges_from([(source, target), (target, source)], capacity=1) + return H diff --git a/MLPY/Lib/site-packages/networkx/algorithms/core.py b/MLPY/Lib/site-packages/networkx/algorithms/core.py new file mode 100644 index 0000000000000000000000000000000000000000..09a1275c794b96529439499e872dbcf5dbd04d44 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/core.py @@ -0,0 +1,545 @@ +""" +Find the k-cores of a graph. + +The k-core is found by recursively pruning nodes with degrees less than k. + +See the following references for details: + +An O(m) Algorithm for Cores Decomposition of Networks +Vladimir Batagelj and Matjaz Zaversnik, 2003. +https://arxiv.org/abs/cs.DS/0310049 + +Generalized Cores +Vladimir Batagelj and Matjaz Zaversnik, 2002. +https://arxiv.org/pdf/cs/0202039 + +For directed graphs a more general notion is that of D-cores which +looks at (k, l) restrictions on (in, out) degree. The (k, k) D-core +is the k-core. + +D-cores: Measuring Collaboration of Directed Graphs Based on Degeneracy +Christos Giatsidis, Dimitrios M. Thilikos, Michalis Vazirgiannis, ICDM 2011. +http://www.graphdegeneracy.org/dcores_ICDM_2011.pdf + +Multi-scale structure and topological anomaly detection via a new network \ +statistic: The onion decomposition +L. Hébert-Dufresne, J. A. Grochow, and A. Allard +Scientific Reports 6, 31708 (2016) +http://doi.org/10.1038/srep31708 + +""" +import networkx as nx +from networkx.exception import NetworkXError +from networkx.utils import not_implemented_for + +__all__ = [ + "core_number", + "k_core", + "k_shell", + "k_crust", + "k_corona", + "k_truss", + "onion_layers", +] + + +@not_implemented_for("multigraph") +@nx._dispatch +def core_number(G): + """Returns the core number for each vertex. + + A k-core is a maximal subgraph that contains nodes of degree k or more. + + The core number of a node is the largest value k of a k-core containing + that node. + + Parameters + ---------- + G : NetworkX graph + A graph or directed graph + + Returns + ------- + core_number : dictionary + A dictionary keyed by node to the core number. + + Raises + ------ + NetworkXError + The k-core is not implemented for graphs with self loops + or parallel edges. + + Notes + ----- + Not implemented for graphs with parallel edges or self loops. + + For directed graphs the node degree is defined to be the + in-degree + out-degree. + + References + ---------- + .. [1] An O(m) Algorithm for Cores Decomposition of Networks + Vladimir Batagelj and Matjaz Zaversnik, 2003. + https://arxiv.org/abs/cs.DS/0310049 + """ + if nx.number_of_selfloops(G) > 0: + msg = ( + "Input graph has self loops which is not permitted; " + "Consider using G.remove_edges_from(nx.selfloop_edges(G))." + ) + raise NetworkXError(msg) + degrees = dict(G.degree()) + # Sort nodes by degree. + nodes = sorted(degrees, key=degrees.get) + bin_boundaries = [0] + curr_degree = 0 + for i, v in enumerate(nodes): + if degrees[v] > curr_degree: + bin_boundaries.extend([i] * (degrees[v] - curr_degree)) + curr_degree = degrees[v] + node_pos = {v: pos for pos, v in enumerate(nodes)} + # The initial guess for the core number of a node is its degree. + core = degrees + nbrs = {v: list(nx.all_neighbors(G, v)) for v in G} + for v in nodes: + for u in nbrs[v]: + if core[u] > core[v]: + nbrs[u].remove(v) + pos = node_pos[u] + bin_start = bin_boundaries[core[u]] + node_pos[u] = bin_start + node_pos[nodes[bin_start]] = pos + nodes[bin_start], nodes[pos] = nodes[pos], nodes[bin_start] + bin_boundaries[core[u]] += 1 + core[u] -= 1 + return core + + +def _core_subgraph(G, k_filter, k=None, core=None): + """Returns the subgraph induced by nodes passing filter `k_filter`. + + Parameters + ---------- + G : NetworkX graph + The graph or directed graph to process + k_filter : filter function + This function filters the nodes chosen. It takes three inputs: + A node of G, the filter's cutoff, and the core dict of the graph. + The function should return a Boolean value. + k : int, optional + The order of the core. If not specified use the max core number. + This value is used as the cutoff for the filter. + core : dict, optional + Precomputed core numbers keyed by node for the graph `G`. + If not specified, the core numbers will be computed from `G`. + + """ + if core is None: + core = core_number(G) + if k is None: + k = max(core.values()) + nodes = (v for v in core if k_filter(v, k, core)) + return G.subgraph(nodes).copy() + + +@nx._dispatch(preserve_all_attrs=True) +def k_core(G, k=None, core_number=None): + """Returns the k-core of G. + + A k-core is a maximal subgraph that contains nodes of degree k or more. + + Parameters + ---------- + G : NetworkX graph + A graph or directed graph + k : int, optional + The order of the core. If not specified return the main core. + core_number : dictionary, optional + Precomputed core numbers for the graph G. + + Returns + ------- + G : NetworkX graph + The k-core subgraph + + Raises + ------ + NetworkXError + The k-core is not defined for graphs with self loops or parallel edges. + + Notes + ----- + The main core is the core with the largest degree. + + Not implemented for graphs with parallel edges or self loops. + + For directed graphs the node degree is defined to be the + in-degree + out-degree. + + Graph, node, and edge attributes are copied to the subgraph. + + See Also + -------- + core_number + + References + ---------- + .. [1] An O(m) Algorithm for Cores Decomposition of Networks + Vladimir Batagelj and Matjaz Zaversnik, 2003. + https://arxiv.org/abs/cs.DS/0310049 + """ + + def k_filter(v, k, c): + return c[v] >= k + + return _core_subgraph(G, k_filter, k, core_number) + + +@nx._dispatch(preserve_all_attrs=True) +def k_shell(G, k=None, core_number=None): + """Returns the k-shell of G. + + The k-shell is the subgraph induced by nodes with core number k. + That is, nodes in the k-core that are not in the (k+1)-core. + + Parameters + ---------- + G : NetworkX graph + A graph or directed graph. + k : int, optional + The order of the shell. If not specified return the outer shell. + core_number : dictionary, optional + Precomputed core numbers for the graph G. + + + Returns + ------- + G : NetworkX graph + The k-shell subgraph + + Raises + ------ + NetworkXError + The k-shell is not implemented for graphs with self loops + or parallel edges. + + Notes + ----- + This is similar to k_corona but in that case only neighbors in the + k-core are considered. + + Not implemented for graphs with parallel edges or self loops. + + For directed graphs the node degree is defined to be the + in-degree + out-degree. + + Graph, node, and edge attributes are copied to the subgraph. + + See Also + -------- + core_number + k_corona + + + References + ---------- + .. [1] A model of Internet topology using k-shell decomposition + Shai Carmi, Shlomo Havlin, Scott Kirkpatrick, Yuval Shavitt, + and Eran Shir, PNAS July 3, 2007 vol. 104 no. 27 11150-11154 + http://www.pnas.org/content/104/27/11150.full + """ + + def k_filter(v, k, c): + return c[v] == k + + return _core_subgraph(G, k_filter, k, core_number) + + +@nx._dispatch(preserve_all_attrs=True) +def k_crust(G, k=None, core_number=None): + """Returns the k-crust of G. + + The k-crust is the graph G with the edges of the k-core removed + and isolated nodes found after the removal of edges are also removed. + + Parameters + ---------- + G : NetworkX graph + A graph or directed graph. + k : int, optional + The order of the shell. If not specified return the main crust. + core_number : dictionary, optional + Precomputed core numbers for the graph G. + + Returns + ------- + G : NetworkX graph + The k-crust subgraph + + Raises + ------ + NetworkXError + The k-crust is not implemented for graphs with self loops + or parallel edges. + + Notes + ----- + This definition of k-crust is different than the definition in [1]_. + The k-crust in [1]_ is equivalent to the k+1 crust of this algorithm. + + Not implemented for graphs with parallel edges or self loops. + + For directed graphs the node degree is defined to be the + in-degree + out-degree. + + Graph, node, and edge attributes are copied to the subgraph. + + See Also + -------- + core_number + + References + ---------- + .. [1] A model of Internet topology using k-shell decomposition + Shai Carmi, Shlomo Havlin, Scott Kirkpatrick, Yuval Shavitt, + and Eran Shir, PNAS July 3, 2007 vol. 104 no. 27 11150-11154 + http://www.pnas.org/content/104/27/11150.full + """ + # Default for k is one less than in _core_subgraph, so just inline. + # Filter is c[v] <= k + if core_number is None: + core_number = nx.core_number(G) + if k is None: + k = max(core_number.values()) - 1 + nodes = (v for v in core_number if core_number[v] <= k) + return G.subgraph(nodes).copy() + + +@nx._dispatch(preserve_all_attrs=True) +def k_corona(G, k, core_number=None): + """Returns the k-corona of G. + + The k-corona is the subgraph of nodes in the k-core which have + exactly k neighbours in the k-core. + + Parameters + ---------- + G : NetworkX graph + A graph or directed graph + k : int + The order of the corona. + core_number : dictionary, optional + Precomputed core numbers for the graph G. + + Returns + ------- + G : NetworkX graph + The k-corona subgraph + + Raises + ------ + NetworkXError + The k-corona is not defined for graphs with self loops or + parallel edges. + + Notes + ----- + Not implemented for graphs with parallel edges or self loops. + + For directed graphs the node degree is defined to be the + in-degree + out-degree. + + Graph, node, and edge attributes are copied to the subgraph. + + See Also + -------- + core_number + + References + ---------- + .. [1] k -core (bootstrap) percolation on complex networks: + Critical phenomena and nonlocal effects, + A. V. Goltsev, S. N. Dorogovtsev, and J. F. F. Mendes, + Phys. Rev. E 73, 056101 (2006) + http://link.aps.org/doi/10.1103/PhysRevE.73.056101 + """ + + def func(v, k, c): + return c[v] == k and k == sum(1 for w in G[v] if c[w] >= k) + + return _core_subgraph(G, func, k, core_number) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch(preserve_all_attrs=True) +def k_truss(G, k): + """Returns the k-truss of `G`. + + The k-truss is the maximal induced subgraph of `G` which contains at least + three vertices where every edge is incident to at least `k-2` triangles. + + Parameters + ---------- + G : NetworkX graph + An undirected graph + k : int + The order of the truss + + Returns + ------- + H : NetworkX graph + The k-truss subgraph + + Raises + ------ + NetworkXError + + The k-truss is not defined for graphs with self loops, directed graphs + and multigraphs. + + Notes + ----- + A k-clique is a (k-2)-truss and a k-truss is a (k+1)-core. + + Not implemented for digraphs or graphs with parallel edges or self loops. + + Graph, node, and edge attributes are copied to the subgraph. + + K-trusses were originally defined in [2] which states that the k-truss + is the maximal induced subgraph where each edge belongs to at least + `k-2` triangles. A more recent paper, [1], uses a slightly different + definition requiring that each edge belong to at least `k` triangles. + This implementation uses the original definition of `k-2` triangles. + + References + ---------- + .. [1] Bounds and Algorithms for k-truss. Paul Burkhardt, Vance Faber, + David G. Harris, 2018. https://arxiv.org/abs/1806.05523v2 + .. [2] Trusses: Cohesive Subgraphs for Social Network Analysis. Jonathan + Cohen, 2005. + """ + if nx.number_of_selfloops(G) > 0: + msg = ( + "Input graph has self loops which is not permitted; " + "Consider using G.remove_edges_from(nx.selfloop_edges(G))." + ) + raise NetworkXError(msg) + + H = G.copy() + + n_dropped = 1 + while n_dropped > 0: + n_dropped = 0 + to_drop = [] + seen = set() + for u in H: + nbrs_u = set(H[u]) + seen.add(u) + new_nbrs = [v for v in nbrs_u if v not in seen] + for v in new_nbrs: + if len(nbrs_u & set(H[v])) < (k - 2): + to_drop.append((u, v)) + H.remove_edges_from(to_drop) + n_dropped = len(to_drop) + H.remove_nodes_from(list(nx.isolates(H))) + + return H + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +@nx._dispatch +def onion_layers(G): + """Returns the layer of each vertex in an onion decomposition of the graph. + + The onion decomposition refines the k-core decomposition by providing + information on the internal organization of each k-shell. It is usually + used alongside the `core numbers`. + + Parameters + ---------- + G : NetworkX graph + A simple graph without self loops or parallel edges + + Returns + ------- + od_layers : dictionary + A dictionary keyed by vertex to the onion layer. The layers are + contiguous integers starting at 1. + + Raises + ------ + NetworkXError + The onion decomposition is not implemented for graphs with self loops + or parallel edges or for directed graphs. + + Notes + ----- + Not implemented for graphs with parallel edges or self loops. + + Not implemented for directed graphs. + + See Also + -------- + core_number + + References + ---------- + .. [1] Multi-scale structure and topological anomaly detection via a new + network statistic: The onion decomposition + L. Hébert-Dufresne, J. A. Grochow, and A. Allard + Scientific Reports 6, 31708 (2016) + http://doi.org/10.1038/srep31708 + .. [2] Percolation and the effective structure of complex networks + A. Allard and L. Hébert-Dufresne + Physical Review X 9, 011023 (2019) + http://doi.org/10.1103/PhysRevX.9.011023 + """ + if nx.number_of_selfloops(G) > 0: + msg = ( + "Input graph contains self loops which is not permitted; " + "Consider using G.remove_edges_from(nx.selfloop_edges(G))." + ) + raise NetworkXError(msg) + # Dictionaries to register the k-core/onion decompositions. + od_layers = {} + # Adjacency list + neighbors = {v: list(nx.all_neighbors(G, v)) for v in G} + # Effective degree of nodes. + degrees = dict(G.degree()) + # Performs the onion decomposition. + current_core = 1 + current_layer = 1 + # Sets vertices of degree 0 to layer 1, if any. + isolated_nodes = list(nx.isolates(G)) + if len(isolated_nodes) > 0: + for v in isolated_nodes: + od_layers[v] = current_layer + degrees.pop(v) + current_layer = 2 + # Finds the layer for the remaining nodes. + while len(degrees) > 0: + # Sets the order for looking at nodes. + nodes = sorted(degrees, key=degrees.get) + # Sets properly the current core. + min_degree = degrees[nodes[0]] + if min_degree > current_core: + current_core = min_degree + # Identifies vertices in the current layer. + this_layer = [] + for n in nodes: + if degrees[n] > current_core: + break + this_layer.append(n) + # Identifies the core/layer of the vertices in the current layer. + for v in this_layer: + od_layers[v] = current_layer + for n in neighbors[v]: + neighbors[n].remove(v) + degrees[n] = degrees[n] - 1 + degrees.pop(v) + # Updates the layer count. + current_layer = current_layer + 1 + # Returns the dictionaries containing the onion layer of each vertices. + return od_layers diff --git a/MLPY/Lib/site-packages/networkx/algorithms/covering.py b/MLPY/Lib/site-packages/networkx/algorithms/covering.py new file mode 100644 index 0000000000000000000000000000000000000000..f4b44985aa42661f843eb7dfa1f055f53a39fc83 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/covering.py @@ -0,0 +1,142 @@ +""" Functions related to graph covers.""" + +from functools import partial +from itertools import chain + +import networkx as nx +from networkx.utils import arbitrary_element, not_implemented_for + +__all__ = ["min_edge_cover", "is_edge_cover"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def min_edge_cover(G, matching_algorithm=None): + """Returns the min cardinality edge cover of the graph as a set of edges. + + A smallest edge cover can be found in polynomial time by finding + a maximum matching and extending it greedily so that all nodes + are covered. This function follows that process. A maximum matching + algorithm can be specified for the first step of the algorithm. + The resulting set may return a set with one 2-tuple for each edge, + (the usual case) or with both 2-tuples `(u, v)` and `(v, u)` for + each edge. The latter is only done when a bipartite matching algorithm + is specified as `matching_algorithm`. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + matching_algorithm : function + A function that returns a maximum cardinality matching for `G`. + The function must take one input, the graph `G`, and return + either a set of edges (with only one direction for the pair of nodes) + or a dictionary mapping each node to its mate. If not specified, + :func:`~networkx.algorithms.matching.max_weight_matching` is used. + Common bipartite matching functions include + :func:`~networkx.algorithms.bipartite.matching.hopcroft_karp_matching` + or + :func:`~networkx.algorithms.bipartite.matching.eppstein_matching`. + + Returns + ------- + min_cover : set + + A set of the edges in a minimum edge cover in the form of tuples. + It contains only one of the equivalent 2-tuples `(u, v)` and `(v, u)` + for each edge. If a bipartite method is used to compute the matching, + the returned set contains both the 2-tuples `(u, v)` and `(v, u)` + for each edge of a minimum edge cover. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> sorted(nx.min_edge_cover(G)) + [(2, 1), (3, 0)] + + Notes + ----- + An edge cover of a graph is a set of edges such that every node of + the graph is incident to at least one edge of the set. + The minimum edge cover is an edge covering of smallest cardinality. + + Due to its implementation, the worst-case running time of this algorithm + is bounded by the worst-case running time of the function + ``matching_algorithm``. + + Minimum edge cover for `G` can also be found using the `min_edge_covering` + function in :mod:`networkx.algorithms.bipartite.covering` which is + simply this function with a default matching algorithm of + :func:`~networkx.algorithms.bipartite.matching.hopcraft_karp_matching` + """ + if len(G) == 0: + return set() + if nx.number_of_isolates(G) > 0: + # ``min_cover`` does not exist as there is an isolated node + raise nx.NetworkXException( + "Graph has a node with no edge incident on it, " "so no edge cover exists." + ) + if matching_algorithm is None: + matching_algorithm = partial(nx.max_weight_matching, maxcardinality=True) + maximum_matching = matching_algorithm(G) + # ``min_cover`` is superset of ``maximum_matching`` + try: + # bipartite matching algs return dict so convert if needed + min_cover = set(maximum_matching.items()) + bipartite_cover = True + except AttributeError: + min_cover = maximum_matching + bipartite_cover = False + # iterate for uncovered nodes + uncovered_nodes = set(G) - {v for u, v in min_cover} - {u for u, v in min_cover} + for v in uncovered_nodes: + # Since `v` is uncovered, each edge incident to `v` will join it + # with a covered node (otherwise, if there were an edge joining + # uncovered nodes `u` and `v`, the maximum matching algorithm + # would have found it), so we can choose an arbitrary edge + # incident to `v`. (This applies only in a simple graph, not a + # multigraph.) + u = arbitrary_element(G[v]) + min_cover.add((u, v)) + if bipartite_cover: + min_cover.add((v, u)) + return min_cover + + +@not_implemented_for("directed") +@nx._dispatch +def is_edge_cover(G, cover): + """Decides whether a set of edges is a valid edge cover of the graph. + + Given a set of edges, whether it is an edge covering can + be decided if we just check whether all nodes of the graph + has an edge from the set, incident on it. + + Parameters + ---------- + G : NetworkX graph + An undirected bipartite graph. + + cover : set + Set of edges to be checked. + + Returns + ------- + bool + Whether the set of edges is a valid edge cover of the graph. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> cover = {(2, 1), (3, 0)} + >>> nx.is_edge_cover(G, cover) + True + + Notes + ----- + An edge cover of a graph is a set of edges such that every node of + the graph is incident to at least one edge of the set. + """ + return set(G) <= set(chain.from_iterable(cover)) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/cuts.py b/MLPY/Lib/site-packages/networkx/algorithms/cuts.py new file mode 100644 index 0000000000000000000000000000000000000000..ce455eb47c86ad369abc2ce7e426cc21b5c7c4b1 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/cuts.py @@ -0,0 +1,400 @@ +"""Functions for finding and evaluating cuts in a graph. + +""" + +from itertools import chain + +import networkx as nx + +__all__ = [ + "boundary_expansion", + "conductance", + "cut_size", + "edge_expansion", + "mixing_expansion", + "node_expansion", + "normalized_cut_size", + "volume", +] + + +# TODO STILL NEED TO UPDATE ALL THE DOCUMENTATION! + + +@nx._dispatch(edge_attrs="weight") +def cut_size(G, S, T=None, weight=None): + """Returns the size of the cut between two sets of nodes. + + A *cut* is a partition of the nodes of a graph into two sets. The + *cut size* is the sum of the weights of the edges "between" the two + sets of nodes. + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + T : collection + A collection of nodes in `G`. If not specified, this is taken to + be the set complement of `S`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + Total weight of all edges from nodes in set `S` to nodes in + set `T` (and, in the case of directed graphs, all edges from + nodes in `T` to nodes in `S`). + + Examples + -------- + In the graph with two cliques joined by a single edges, the natural + bipartition of the graph into two blocks, one for each clique, + yields a cut of weight one:: + + >>> G = nx.barbell_graph(3, 0) + >>> S = {0, 1, 2} + >>> T = {3, 4, 5} + >>> nx.cut_size(G, S, T) + 1 + + Each parallel edge in a multigraph is counted when determining the + cut size:: + + >>> G = nx.MultiGraph(["ab", "ab"]) + >>> S = {"a"} + >>> T = {"b"} + >>> nx.cut_size(G, S, T) + 2 + + Notes + ----- + In a multigraph, the cut size is the total weight of edges including + multiplicity. + + """ + edges = nx.edge_boundary(G, S, T, data=weight, default=1) + if G.is_directed(): + edges = chain(edges, nx.edge_boundary(G, T, S, data=weight, default=1)) + return sum(weight for u, v, weight in edges) + + +@nx._dispatch(edge_attrs="weight") +def volume(G, S, weight=None): + """Returns the volume of a set of nodes. + + The *volume* of a set *S* is the sum of the (out-)degrees of nodes + in *S* (taking into account parallel edges in multigraphs). [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + The volume of the set of nodes represented by `S` in the graph + `G`. + + See also + -------- + conductance + cut_size + edge_expansion + edge_boundary + normalized_cut_size + + References + ---------- + .. [1] David Gleich. + *Hierarchical Directed Spectral Graph Partitioning*. + + + """ + degree = G.out_degree if G.is_directed() else G.degree + return sum(d for v, d in degree(S, weight=weight)) + + +@nx._dispatch(edge_attrs="weight") +def normalized_cut_size(G, S, T=None, weight=None): + """Returns the normalized size of the cut between two sets of nodes. + + The *normalized cut size* is the cut size times the sum of the + reciprocal sizes of the volumes of the two sets. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + T : collection + A collection of nodes in `G`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + The normalized cut size between the two sets `S` and `T`. + + Notes + ----- + In a multigraph, the cut size is the total weight of edges including + multiplicity. + + See also + -------- + conductance + cut_size + edge_expansion + volume + + References + ---------- + .. [1] David Gleich. + *Hierarchical Directed Spectral Graph Partitioning*. + + + """ + if T is None: + T = set(G) - set(S) + num_cut_edges = cut_size(G, S, T=T, weight=weight) + volume_S = volume(G, S, weight=weight) + volume_T = volume(G, T, weight=weight) + return num_cut_edges * ((1 / volume_S) + (1 / volume_T)) + + +@nx._dispatch(edge_attrs="weight") +def conductance(G, S, T=None, weight=None): + """Returns the conductance of two sets of nodes. + + The *conductance* is the quotient of the cut size and the smaller of + the volumes of the two sets. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + T : collection + A collection of nodes in `G`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + The conductance between the two sets `S` and `T`. + + See also + -------- + cut_size + edge_expansion + normalized_cut_size + volume + + References + ---------- + .. [1] David Gleich. + *Hierarchical Directed Spectral Graph Partitioning*. + + + """ + if T is None: + T = set(G) - set(S) + num_cut_edges = cut_size(G, S, T, weight=weight) + volume_S = volume(G, S, weight=weight) + volume_T = volume(G, T, weight=weight) + return num_cut_edges / min(volume_S, volume_T) + + +@nx._dispatch(edge_attrs="weight") +def edge_expansion(G, S, T=None, weight=None): + """Returns the edge expansion between two node sets. + + The *edge expansion* is the quotient of the cut size and the smaller + of the cardinalities of the two sets. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + T : collection + A collection of nodes in `G`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + The edge expansion between the two sets `S` and `T`. + + See also + -------- + boundary_expansion + mixing_expansion + node_expansion + + References + ---------- + .. [1] Fan Chung. + *Spectral Graph Theory*. + (CBMS Regional Conference Series in Mathematics, No. 92), + American Mathematical Society, 1997, ISBN 0-8218-0315-8 + + + """ + if T is None: + T = set(G) - set(S) + num_cut_edges = cut_size(G, S, T=T, weight=weight) + return num_cut_edges / min(len(S), len(T)) + + +@nx._dispatch(edge_attrs="weight") +def mixing_expansion(G, S, T=None, weight=None): + """Returns the mixing expansion between two node sets. + + The *mixing expansion* is the quotient of the cut size and twice the + number of edges in the graph. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + T : collection + A collection of nodes in `G`. + + weight : object + Edge attribute key to use as weight. If not specified, edges + have weight one. + + Returns + ------- + number + The mixing expansion between the two sets `S` and `T`. + + See also + -------- + boundary_expansion + edge_expansion + node_expansion + + References + ---------- + .. [1] Vadhan, Salil P. + "Pseudorandomness." + *Foundations and Trends + in Theoretical Computer Science* 7.1–3 (2011): 1–336. + + + """ + num_cut_edges = cut_size(G, S, T=T, weight=weight) + num_total_edges = G.number_of_edges() + return num_cut_edges / (2 * num_total_edges) + + +# TODO What is the generalization to two arguments, S and T? Does the +# denominator become `min(len(S), len(T))`? +@nx._dispatch +def node_expansion(G, S): + """Returns the node expansion of the set `S`. + + The *node expansion* is the quotient of the size of the node + boundary of *S* and the cardinality of *S*. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + Returns + ------- + number + The node expansion of the set `S`. + + See also + -------- + boundary_expansion + edge_expansion + mixing_expansion + + References + ---------- + .. [1] Vadhan, Salil P. + "Pseudorandomness." + *Foundations and Trends + in Theoretical Computer Science* 7.1–3 (2011): 1–336. + + + """ + neighborhood = set(chain.from_iterable(G.neighbors(v) for v in S)) + return len(neighborhood) / len(S) + + +# TODO What is the generalization to two arguments, S and T? Does the +# denominator become `min(len(S), len(T))`? +@nx._dispatch +def boundary_expansion(G, S): + """Returns the boundary expansion of the set `S`. + + The *boundary expansion* is the quotient of the size + of the node boundary and the cardinality of *S*. [1] + + Parameters + ---------- + G : NetworkX graph + + S : collection + A collection of nodes in `G`. + + Returns + ------- + number + The boundary expansion of the set `S`. + + See also + -------- + edge_expansion + mixing_expansion + node_expansion + + References + ---------- + .. [1] Vadhan, Salil P. + "Pseudorandomness." + *Foundations and Trends in Theoretical Computer Science* + 7.1–3 (2011): 1–336. + + + """ + return len(nx.node_boundary(G, S)) / len(S) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/cycles.py b/MLPY/Lib/site-packages/networkx/algorithms/cycles.py new file mode 100644 index 0000000000000000000000000000000000000000..9149e9eb10dcf4dfd99cfeab80f90ede47a71d95 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/cycles.py @@ -0,0 +1,1230 @@ +""" +======================== +Cycle finding algorithms +======================== +""" + +from collections import Counter, defaultdict +from itertools import combinations, product +from math import inf + +import networkx as nx +from networkx.utils import not_implemented_for, pairwise + +__all__ = [ + "cycle_basis", + "simple_cycles", + "recursive_simple_cycles", + "find_cycle", + "minimum_cycle_basis", + "chordless_cycles", + "girth", +] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def cycle_basis(G, root=None): + """Returns a list of cycles which form a basis for cycles of G. + + A basis for cycles of a network is a minimal collection of + cycles such that any cycle in the network can be written + as a sum of cycles in the basis. Here summation of cycles + is defined as "exclusive or" of the edges. Cycle bases are + useful, e.g. when deriving equations for electric circuits + using Kirchhoff's Laws. + + Parameters + ---------- + G : NetworkX Graph + root : node, optional + Specify starting node for basis. + + Returns + ------- + A list of cycle lists. Each cycle list is a list of nodes + which forms a cycle (loop) in G. + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_cycle(G, [0, 1, 2, 3]) + >>> nx.add_cycle(G, [0, 3, 4, 5]) + >>> nx.cycle_basis(G, 0) + [[3, 4, 5, 0], [1, 2, 3, 0]] + + Notes + ----- + This is adapted from algorithm CACM 491 [1]_. + + References + ---------- + .. [1] Paton, K. An algorithm for finding a fundamental set of + cycles of a graph. Comm. ACM 12, 9 (Sept 1969), 514-518. + + See Also + -------- + simple_cycles + """ + gnodes = dict.fromkeys(G) # set-like object that maintains node order + cycles = [] + while gnodes: # loop over connected components + if root is None: + root = gnodes.popitem()[0] + stack = [root] + pred = {root: root} + used = {root: set()} + while stack: # walk the spanning tree finding cycles + z = stack.pop() # use last-in so cycles easier to find + zused = used[z] + for nbr in G[z]: + if nbr not in used: # new node + pred[nbr] = z + stack.append(nbr) + used[nbr] = {z} + elif nbr == z: # self loops + cycles.append([z]) + elif nbr not in zused: # found a cycle + pn = used[nbr] + cycle = [nbr, z] + p = pred[z] + while p not in pn: + cycle.append(p) + p = pred[p] + cycle.append(p) + cycles.append(cycle) + used[nbr].add(z) + for node in pred: + gnodes.pop(node, None) + root = None + return cycles + + +@nx._dispatch +def simple_cycles(G, length_bound=None): + """Find simple cycles (elementary circuits) of a graph. + + A `simple cycle`, or `elementary circuit`, is a closed path where + no node appears twice. In a directed graph, two simple cycles are distinct + if they are not cyclic permutations of each other. In an undirected graph, + two simple cycles are distinct if they are not cyclic permutations of each + other nor of the other's reversal. + + Optionally, the cycles are bounded in length. In the unbounded case, we use + a nonrecursive, iterator/generator version of Johnson's algorithm [1]_. In + the bounded case, we use a version of the algorithm of Gupta and + Suzumura[2]_. There may be better algorithms for some cases [3]_ [4]_ [5]_. + + The algorithms of Johnson, and Gupta and Suzumura, are enhanced by some + well-known preprocessing techniques. When G is directed, we restrict our + attention to strongly connected components of G, generate all simple cycles + containing a certain node, remove that node, and further decompose the + remainder into strongly connected components. When G is undirected, we + restrict our attention to biconnected components, generate all simple cycles + containing a particular edge, remove that edge, and further decompose the + remainder into biconnected components. + + Note that multigraphs are supported by this function -- and in undirected + multigraphs, a pair of parallel edges is considered a cycle of length 2. + Likewise, self-loops are considered to be cycles of length 1. We define + cycles as sequences of nodes; so the presence of loops and parallel edges + does not change the number of simple cycles in a graph. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + length_bound : int or None, optional (default=None) + If length_bound is an int, generate all simple cycles of G with length at + most length_bound. Otherwise, generate all simple cycles of G. + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + + Examples + -------- + >>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)] + >>> G = nx.DiGraph(edges) + >>> sorted(nx.simple_cycles(G)) + [[0], [0, 1, 2], [0, 2], [1, 2], [2]] + + To filter the cycles so that they don't include certain nodes or edges, + copy your graph and eliminate those nodes or edges before calling. + For example, to exclude self-loops from the above example: + + >>> H = G.copy() + >>> H.remove_edges_from(nx.selfloop_edges(G)) + >>> sorted(nx.simple_cycles(H)) + [[0, 1, 2], [0, 2], [1, 2]] + + Notes + ----- + When length_bound is None, the time complexity is $O((n+e)(c+1))$ for $n$ + nodes, $e$ edges and $c$ simple circuits. Otherwise, when length_bound > 1, + the time complexity is $O((c+n)(k-1)d^k)$ where $d$ is the average degree of + the nodes of G and $k$ = length_bound. + + Raises + ------ + ValueError + when length_bound < 0. + + References + ---------- + .. [1] Finding all the elementary circuits of a directed graph. + D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975. + https://doi.org/10.1137/0204007 + .. [2] Finding All Bounded-Length Simple Cycles in a Directed Graph + A. Gupta and T. Suzumura https://arxiv.org/abs/2105.10094 + .. [3] Enumerating the cycles of a digraph: a new preprocessing strategy. + G. Loizou and P. Thanish, Information Sciences, v. 27, 163-182, 1982. + .. [4] A search strategy for the elementary cycles of a directed graph. + J.L. Szwarcfiter and P.E. Lauer, BIT NUMERICAL MATHEMATICS, + v. 16, no. 2, 192-204, 1976. + .. [5] Optimal Listing of Cycles and st-Paths in Undirected Graphs + R. Ferreira and R. Grossi and A. Marino and N. Pisanti and R. Rizzi and + G. Sacomoto https://arxiv.org/abs/1205.2766 + + See Also + -------- + cycle_basis + chordless_cycles + """ + + if length_bound is not None: + if length_bound == 0: + return + elif length_bound < 0: + raise ValueError("length bound must be non-negative") + + directed = G.is_directed() + yield from ([v] for v, Gv in G.adj.items() if v in Gv) + + if length_bound is not None and length_bound == 1: + return + + if G.is_multigraph() and not directed: + visited = set() + for u, Gu in G.adj.items(): + multiplicity = ((v, len(Guv)) for v, Guv in Gu.items() if v in visited) + yield from ([u, v] for v, m in multiplicity if m > 1) + visited.add(u) + + # explicitly filter out loops; implicitly filter out parallel edges + if directed: + G = nx.DiGraph((u, v) for u, Gu in G.adj.items() for v in Gu if v != u) + else: + G = nx.Graph((u, v) for u, Gu in G.adj.items() for v in Gu if v != u) + + # this case is not strictly necessary but improves performance + if length_bound is not None and length_bound == 2: + if directed: + visited = set() + for u, Gu in G.adj.items(): + yield from ( + [v, u] for v in visited.intersection(Gu) if G.has_edge(v, u) + ) + visited.add(u) + return + + if directed: + yield from _directed_cycle_search(G, length_bound) + else: + yield from _undirected_cycle_search(G, length_bound) + + +def _directed_cycle_search(G, length_bound): + """A dispatch function for `simple_cycles` for directed graphs. + + We generate all cycles of G through binary partition. + + 1. Pick a node v in G which belongs to at least one cycle + a. Generate all cycles of G which contain the node v. + b. Recursively generate all cycles of G \\ v. + + This is accomplished through the following: + + 1. Compute the strongly connected components SCC of G. + 2. Select and remove a biconnected component C from BCC. Select a + non-tree edge (u, v) of a depth-first search of G[C]. + 3. For each simple cycle P containing v in G[C], yield P. + 4. Add the biconnected components of G[C \\ v] to BCC. + + If the parameter length_bound is not None, then step 3 will be limited to + simple cycles of length at most length_bound. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + length_bound : int or None + If length_bound is an int, generate all simple cycles of G with length at most length_bound. + Otherwise, generate all simple cycles of G. + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + """ + + scc = nx.strongly_connected_components + components = [c for c in scc(G) if len(c) >= 2] + while components: + c = components.pop() + Gc = G.subgraph(c) + v = next(iter(c)) + if length_bound is None: + yield from _johnson_cycle_search(Gc, [v]) + else: + yield from _bounded_cycle_search(Gc, [v], length_bound) + # delete v after searching G, to make sure we can find v + G.remove_node(v) + components.extend(c for c in scc(Gc) if len(c) >= 2) + + +def _undirected_cycle_search(G, length_bound): + """A dispatch function for `simple_cycles` for undirected graphs. + + We generate all cycles of G through binary partition. + + 1. Pick an edge (u, v) in G which belongs to at least one cycle + a. Generate all cycles of G which contain the edge (u, v) + b. Recursively generate all cycles of G \\ (u, v) + + This is accomplished through the following: + + 1. Compute the biconnected components BCC of G. + 2. Select and remove a biconnected component C from BCC. Select a + non-tree edge (u, v) of a depth-first search of G[C]. + 3. For each (v -> u) path P remaining in G[C] \\ (u, v), yield P. + 4. Add the biconnected components of G[C] \\ (u, v) to BCC. + + If the parameter length_bound is not None, then step 3 will be limited to simple paths + of length at most length_bound. + + Parameters + ---------- + G : NetworkX Graph + An undirected graph + + length_bound : int or None + If length_bound is an int, generate all simple cycles of G with length at most length_bound. + Otherwise, generate all simple cycles of G. + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + """ + + bcc = nx.biconnected_components + components = [c for c in bcc(G) if len(c) >= 3] + while components: + c = components.pop() + Gc = G.subgraph(c) + uv = list(next(iter(Gc.edges))) + G.remove_edge(*uv) + # delete (u, v) before searching G, to avoid fake 3-cycles [u, v, u] + if length_bound is None: + yield from _johnson_cycle_search(Gc, uv) + else: + yield from _bounded_cycle_search(Gc, uv, length_bound) + components.extend(c for c in bcc(Gc) if len(c) >= 3) + + +class _NeighborhoodCache(dict): + """Very lightweight graph wrapper which caches neighborhoods as list. + + This dict subclass uses the __missing__ functionality to query graphs for + their neighborhoods, and store the result as a list. This is used to avoid + the performance penalty incurred by subgraph views. + """ + + def __init__(self, G): + self.G = G + + def __missing__(self, v): + Gv = self[v] = list(self.G[v]) + return Gv + + +def _johnson_cycle_search(G, path): + """The main loop of the cycle-enumeration algorithm of Johnson. + + Parameters + ---------- + G : NetworkX Graph or DiGraph + A graph + + path : list + A cycle prefix. All cycles generated will begin with this prefix. + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + + References + ---------- + .. [1] Finding all the elementary circuits of a directed graph. + D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975. + https://doi.org/10.1137/0204007 + + """ + + G = _NeighborhoodCache(G) + blocked = set(path) + B = defaultdict(set) # graph portions that yield no elementary circuit + start = path[0] + stack = [iter(G[path[-1]])] + closed = [False] + while stack: + nbrs = stack[-1] + for w in nbrs: + if w == start: + yield path[:] + closed[-1] = True + elif w not in blocked: + path.append(w) + closed.append(False) + stack.append(iter(G[w])) + blocked.add(w) + break + else: # no more nbrs + stack.pop() + v = path.pop() + if closed.pop(): + if closed: + closed[-1] = True + unblock_stack = {v} + while unblock_stack: + u = unblock_stack.pop() + if u in blocked: + blocked.remove(u) + unblock_stack.update(B[u]) + B[u].clear() + else: + for w in G[v]: + B[w].add(v) + + +def _bounded_cycle_search(G, path, length_bound): + """The main loop of the cycle-enumeration algorithm of Gupta and Suzumura. + + Parameters + ---------- + G : NetworkX Graph or DiGraph + A graph + + path : list + A cycle prefix. All cycles generated will begin with this prefix. + + length_bound: int + A length bound. All cycles generated will have length at most length_bound. + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + + References + ---------- + .. [1] Finding All Bounded-Length Simple Cycles in a Directed Graph + A. Gupta and T. Suzumura https://arxiv.org/abs/2105.10094 + + """ + G = _NeighborhoodCache(G) + lock = {v: 0 for v in path} + B = defaultdict(set) + start = path[0] + stack = [iter(G[path[-1]])] + blen = [length_bound] + while stack: + nbrs = stack[-1] + for w in nbrs: + if w == start: + yield path[:] + blen[-1] = 1 + elif len(path) < lock.get(w, length_bound): + path.append(w) + blen.append(length_bound) + lock[w] = len(path) + stack.append(iter(G[w])) + break + else: + stack.pop() + v = path.pop() + bl = blen.pop() + if blen: + blen[-1] = min(blen[-1], bl) + if bl < length_bound: + relax_stack = [(bl, v)] + while relax_stack: + bl, u = relax_stack.pop() + if lock.get(u, length_bound) < length_bound - bl + 1: + lock[u] = length_bound - bl + 1 + relax_stack.extend((bl + 1, w) for w in B[u].difference(path)) + else: + for w in G[v]: + B[w].add(v) + + +@nx._dispatch +def chordless_cycles(G, length_bound=None): + """Find simple chordless cycles of a graph. + + A `simple cycle` is a closed path where no node appears twice. In a simple + cycle, a `chord` is an additional edge between two nodes in the cycle. A + `chordless cycle` is a simple cycle without chords. Said differently, a + chordless cycle is a cycle C in a graph G where the number of edges in the + induced graph G[C] is equal to the length of `C`. + + Note that some care must be taken in the case that G is not a simple graph + nor a simple digraph. Some authors limit the definition of chordless cycles + to have a prescribed minimum length; we do not. + + 1. We interpret self-loops to be chordless cycles, except in multigraphs + with multiple loops in parallel. Likewise, in a chordless cycle of + length greater than 1, there can be no nodes with self-loops. + + 2. We interpret directed two-cycles to be chordless cycles, except in + multi-digraphs when any edge in a two-cycle has a parallel copy. + + 3. We interpret parallel pairs of undirected edges as two-cycles, except + when a third (or more) parallel edge exists between the two nodes. + + 4. Generalizing the above, edges with parallel clones may not occur in + chordless cycles. + + In a directed graph, two chordless cycles are distinct if they are not + cyclic permutations of each other. In an undirected graph, two chordless + cycles are distinct if they are not cyclic permutations of each other nor of + the other's reversal. + + Optionally, the cycles are bounded in length. + + We use an algorithm strongly inspired by that of Dias et al [1]_. It has + been modified in the following ways: + + 1. Recursion is avoided, per Python's limitations + + 2. The labeling function is not necessary, because the starting paths + are chosen (and deleted from the host graph) to prevent multiple + occurrences of the same path + + 3. The search is optionally bounded at a specified length + + 4. Support for directed graphs is provided by extending cycles along + forward edges, and blocking nodes along forward and reverse edges + + 5. Support for multigraphs is provided by omitting digons from the set + of forward edges + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + length_bound : int or None, optional (default=None) + If length_bound is an int, generate all simple cycles of G with length at + most length_bound. Otherwise, generate all simple cycles of G. + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + + Examples + -------- + >>> sorted(list(nx.chordless_cycles(nx.complete_graph(4)))) + [[1, 0, 2], [1, 0, 3], [2, 0, 3], [2, 1, 3]] + + Notes + ----- + When length_bound is None, and the graph is simple, the time complexity is + $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$ chordless cycles. + + Raises + ------ + ValueError + when length_bound < 0. + + References + ---------- + .. [1] Efficient enumeration of chordless cycles + E. Dias and D. Castonguay and H. Longo and W.A.R. Jradi + https://arxiv.org/abs/1309.1051 + + See Also + -------- + simple_cycles + """ + + if length_bound is not None: + if length_bound == 0: + return + elif length_bound < 0: + raise ValueError("length bound must be non-negative") + + directed = G.is_directed() + multigraph = G.is_multigraph() + + if multigraph: + yield from ([v] for v, Gv in G.adj.items() if len(Gv.get(v, ())) == 1) + else: + yield from ([v] for v, Gv in G.adj.items() if v in Gv) + + if length_bound is not None and length_bound == 1: + return + + # Nodes with loops cannot belong to longer cycles. Let's delete them here. + # also, we implicitly reduce the multiplicity of edges down to 1 in the case + # of multiedges. + if directed: + F = nx.DiGraph((u, v) for u, Gu in G.adj.items() if u not in Gu for v in Gu) + B = F.to_undirected(as_view=False) + else: + F = nx.Graph((u, v) for u, Gu in G.adj.items() if u not in Gu for v in Gu) + B = None + + # If we're given a multigraph, we have a few cases to consider with parallel + # edges. + # + # 1. If we have 2 or more edges in parallel between the nodes (u, v), we + # must not construct longer cycles along (u, v). + # 2. If G is not directed, then a pair of parallel edges between (u, v) is a + # chordless cycle unless there exists a third (or more) parallel edge. + # 3. If G is directed, then parallel edges do not form cycles, but do + # preclude back-edges from forming cycles (handled in the next section), + # Thus, if an edge (u, v) is duplicated and the reverse (v, u) is also + # present, then we remove both from F. + # + # In directed graphs, we need to consider both directions that edges can + # take, so iterate over all edges (u, v) and possibly (v, u). In undirected + # graphs, we need to be a little careful to only consider every edge once, + # so we use a "visited" set to emulate node-order comparisons. + + if multigraph: + if not directed: + B = F.copy() + visited = set() + for u, Gu in G.adj.items(): + if directed: + multiplicity = ((v, len(Guv)) for v, Guv in Gu.items()) + for v, m in multiplicity: + if m > 1: + F.remove_edges_from(((u, v), (v, u))) + else: + multiplicity = ((v, len(Guv)) for v, Guv in Gu.items() if v in visited) + for v, m in multiplicity: + if m == 2: + yield [u, v] + if m > 1: + F.remove_edge(u, v) + visited.add(u) + + # If we're given a directed graphs, we need to think about digons. If we + # have two edges (u, v) and (v, u), then that's a two-cycle. If either edge + # was duplicated above, then we removed both from F. So, any digons we find + # here are chordless. After finding digons, we remove their edges from F + # to avoid traversing them in the search for chordless cycles. + if directed: + for u, Fu in F.adj.items(): + digons = [[u, v] for v in Fu if F.has_edge(v, u)] + yield from digons + F.remove_edges_from(digons) + F.remove_edges_from(e[::-1] for e in digons) + + if length_bound is not None and length_bound == 2: + return + + # Now, we prepare to search for cycles. We have removed all cycles of + # lengths 1 and 2, so F is a simple graph or simple digraph. We repeatedly + # separate digraphs into their strongly connected components, and undirected + # graphs into their biconnected components. For each component, we pick a + # node v, search for chordless cycles based at each "stem" (u, v, w), and + # then remove v from that component before separating the graph again. + if directed: + separate = nx.strongly_connected_components + + # Directed stems look like (u -> v -> w), so we use the product of + # predecessors of v with successors of v. + def stems(C, v): + for u, w in product(C.pred[v], C.succ[v]): + if not G.has_edge(u, w): # omit stems with acyclic chords + yield [u, v, w], F.has_edge(w, u) + + else: + separate = nx.biconnected_components + + # Undirected stems look like (u ~ v ~ w), but we must not also search + # (w ~ v ~ u), so we use combinations of v's neighbors of length 2. + def stems(C, v): + yield from (([u, v, w], F.has_edge(w, u)) for u, w in combinations(C[v], 2)) + + components = [c for c in separate(F) if len(c) > 2] + while components: + c = components.pop() + v = next(iter(c)) + Fc = F.subgraph(c) + Fcc = Bcc = None + for S, is_triangle in stems(Fc, v): + if is_triangle: + yield S + else: + if Fcc is None: + Fcc = _NeighborhoodCache(Fc) + Bcc = Fcc if B is None else _NeighborhoodCache(B.subgraph(c)) + yield from _chordless_cycle_search(Fcc, Bcc, S, length_bound) + + components.extend(c for c in separate(F.subgraph(c - {v})) if len(c) > 2) + + +def _chordless_cycle_search(F, B, path, length_bound): + """The main loop for chordless cycle enumeration. + + This algorithm is strongly inspired by that of Dias et al [1]_. It has been + modified in the following ways: + + 1. Recursion is avoided, per Python's limitations + + 2. The labeling function is not necessary, because the starting paths + are chosen (and deleted from the host graph) to prevent multiple + occurrences of the same path + + 3. The search is optionally bounded at a specified length + + 4. Support for directed graphs is provided by extending cycles along + forward edges, and blocking nodes along forward and reverse edges + + 5. Support for multigraphs is provided by omitting digons from the set + of forward edges + + Parameters + ---------- + F : _NeighborhoodCache + A graph of forward edges to follow in constructing cycles + + B : _NeighborhoodCache + A graph of blocking edges to prevent the production of chordless cycles + + path : list + A cycle prefix. All cycles generated will begin with this prefix. + + length_bound : int + A length bound. All cycles generated will have length at most length_bound. + + + Yields + ------ + list of nodes + Each cycle is represented by a list of nodes along the cycle. + + References + ---------- + .. [1] Efficient enumeration of chordless cycles + E. Dias and D. Castonguay and H. Longo and W.A.R. Jradi + https://arxiv.org/abs/1309.1051 + + """ + blocked = defaultdict(int) + target = path[0] + blocked[path[1]] = 1 + for w in path[1:]: + for v in B[w]: + blocked[v] += 1 + + stack = [iter(F[path[2]])] + while stack: + nbrs = stack[-1] + for w in nbrs: + if blocked[w] == 1 and (length_bound is None or len(path) < length_bound): + Fw = F[w] + if target in Fw: + yield path + [w] + else: + Bw = B[w] + if target in Bw: + continue + for v in Bw: + blocked[v] += 1 + path.append(w) + stack.append(iter(Fw)) + break + else: + stack.pop() + for v in B[path.pop()]: + blocked[v] -= 1 + + +@not_implemented_for("undirected") +@nx._dispatch +def recursive_simple_cycles(G): + """Find simple cycles (elementary circuits) of a directed graph. + + A `simple cycle`, or `elementary circuit`, is a closed path where + no node appears twice. Two elementary circuits are distinct if they + are not cyclic permutations of each other. + + This version uses a recursive algorithm to build a list of cycles. + You should probably use the iterator version called simple_cycles(). + Warning: This recursive version uses lots of RAM! + It appears in NetworkX for pedagogical value. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + Returns + ------- + A list of cycles, where each cycle is represented by a list of nodes + along the cycle. + + Example: + + >>> edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)] + >>> G = nx.DiGraph(edges) + >>> nx.recursive_simple_cycles(G) + [[0], [2], [0, 1, 2], [0, 2], [1, 2]] + + Notes + ----- + The implementation follows pp. 79-80 in [1]_. + + The time complexity is $O((n+e)(c+1))$ for $n$ nodes, $e$ edges and $c$ + elementary circuits. + + References + ---------- + .. [1] Finding all the elementary circuits of a directed graph. + D. B. Johnson, SIAM Journal on Computing 4, no. 1, 77-84, 1975. + https://doi.org/10.1137/0204007 + + See Also + -------- + simple_cycles, cycle_basis + """ + + # Jon Olav Vik, 2010-08-09 + def _unblock(thisnode): + """Recursively unblock and remove nodes from B[thisnode].""" + if blocked[thisnode]: + blocked[thisnode] = False + while B[thisnode]: + _unblock(B[thisnode].pop()) + + def circuit(thisnode, startnode, component): + closed = False # set to True if elementary path is closed + path.append(thisnode) + blocked[thisnode] = True + for nextnode in component[thisnode]: # direct successors of thisnode + if nextnode == startnode: + result.append(path[:]) + closed = True + elif not blocked[nextnode]: + if circuit(nextnode, startnode, component): + closed = True + if closed: + _unblock(thisnode) + else: + for nextnode in component[thisnode]: + if thisnode not in B[nextnode]: # TODO: use set for speedup? + B[nextnode].append(thisnode) + path.pop() # remove thisnode from path + return closed + + path = [] # stack of nodes in current path + blocked = defaultdict(bool) # vertex: blocked from search? + B = defaultdict(list) # graph portions that yield no elementary circuit + result = [] # list to accumulate the circuits found + + # Johnson's algorithm exclude self cycle edges like (v, v) + # To be backward compatible, we record those cycles in advance + # and then remove from subG + for v in G: + if G.has_edge(v, v): + result.append([v]) + G.remove_edge(v, v) + + # Johnson's algorithm requires some ordering of the nodes. + # They might not be sortable so we assign an arbitrary ordering. + ordering = dict(zip(G, range(len(G)))) + for s in ordering: + # Build the subgraph induced by s and following nodes in the ordering + subgraph = G.subgraph(node for node in G if ordering[node] >= ordering[s]) + # Find the strongly connected component in the subgraph + # that contains the least node according to the ordering + strongcomp = nx.strongly_connected_components(subgraph) + mincomp = min(strongcomp, key=lambda ns: min(ordering[n] for n in ns)) + component = G.subgraph(mincomp) + if len(component) > 1: + # smallest node in the component according to the ordering + startnode = min(component, key=ordering.__getitem__) + for node in component: + blocked[node] = False + B[node][:] = [] + dummy = circuit(startnode, startnode, component) + return result + + +@nx._dispatch +def find_cycle(G, source=None, orientation=None): + """Returns a cycle found via depth-first traversal. + + The cycle is a list of edges indicating the cyclic path. + Orientation of directed edges is controlled by `orientation`. + + Parameters + ---------- + G : graph + A directed/undirected graph/multigraph. + + source : node, list of nodes + The node from which the traversal begins. If None, then a source + is chosen arbitrarily and repeatedly until all edges from each node in + the graph are searched. + + orientation : None | 'original' | 'reverse' | 'ignore' (default: None) + For directed graphs and directed multigraphs, edge traversals need not + respect the original orientation of the edges. + When set to 'reverse' every edge is traversed in the reverse direction. + When set to 'ignore', every edge is treated as undirected. + When set to 'original', every edge is treated as directed. + In all three cases, the yielded edge tuples add a last entry to + indicate the direction in which that edge was traversed. + If orientation is None, the yielded edge has no direction indicated. + The direction is respected, but not reported. + + Returns + ------- + edges : directed edges + A list of directed edges indicating the path taken for the loop. + If no cycle is found, then an exception is raised. + For graphs, an edge is of the form `(u, v)` where `u` and `v` + are the tail and head of the edge as determined by the traversal. + For multigraphs, an edge is of the form `(u, v, key)`, where `key` is + the key of the edge. When the graph is directed, then `u` and `v` + are always in the order of the actual directed edge. + If orientation is not None then the edge tuple is extended to include + the direction of traversal ('forward' or 'reverse') on that edge. + + Raises + ------ + NetworkXNoCycle + If no cycle was found. + + Examples + -------- + In this example, we construct a DAG and find, in the first call, that there + are no directed cycles, and so an exception is raised. In the second call, + we ignore edge orientations and find that there is an undirected cycle. + Note that the second call finds a directed cycle while effectively + traversing an undirected graph, and so, we found an "undirected cycle". + This means that this DAG structure does not form a directed tree (which + is also known as a polytree). + + >>> G = nx.DiGraph([(0, 1), (0, 2), (1, 2)]) + >>> nx.find_cycle(G, orientation="original") + Traceback (most recent call last): + ... + networkx.exception.NetworkXNoCycle: No cycle found. + >>> list(nx.find_cycle(G, orientation="ignore")) + [(0, 1, 'forward'), (1, 2, 'forward'), (0, 2, 'reverse')] + + See Also + -------- + simple_cycles + """ + if not G.is_directed() or orientation in (None, "original"): + + def tailhead(edge): + return edge[:2] + + elif orientation == "reverse": + + def tailhead(edge): + return edge[1], edge[0] + + elif orientation == "ignore": + + def tailhead(edge): + if edge[-1] == "reverse": + return edge[1], edge[0] + return edge[:2] + + explored = set() + cycle = [] + final_node = None + for start_node in G.nbunch_iter(source): + if start_node in explored: + # No loop is possible. + continue + + edges = [] + # All nodes seen in this iteration of edge_dfs + seen = {start_node} + # Nodes in active path. + active_nodes = {start_node} + previous_head = None + + for edge in nx.edge_dfs(G, start_node, orientation): + # Determine if this edge is a continuation of the active path. + tail, head = tailhead(edge) + if head in explored: + # Then we've already explored it. No loop is possible. + continue + if previous_head is not None and tail != previous_head: + # This edge results from backtracking. + # Pop until we get a node whose head equals the current tail. + # So for example, we might have: + # (0, 1), (1, 2), (2, 3), (1, 4) + # which must become: + # (0, 1), (1, 4) + while True: + try: + popped_edge = edges.pop() + except IndexError: + edges = [] + active_nodes = {tail} + break + else: + popped_head = tailhead(popped_edge)[1] + active_nodes.remove(popped_head) + + if edges: + last_head = tailhead(edges[-1])[1] + if tail == last_head: + break + edges.append(edge) + + if head in active_nodes: + # We have a loop! + cycle.extend(edges) + final_node = head + break + else: + seen.add(head) + active_nodes.add(head) + previous_head = head + + if cycle: + break + else: + explored.update(seen) + + else: + assert len(cycle) == 0 + raise nx.exception.NetworkXNoCycle("No cycle found.") + + # We now have a list of edges which ends on a cycle. + # So we need to remove from the beginning edges that are not relevant. + + for i, edge in enumerate(cycle): + tail, head = tailhead(edge) + if tail == final_node: + break + + return cycle[i:] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="weight") +def minimum_cycle_basis(G, weight=None): + """Returns a minimum weight cycle basis for G + + Minimum weight means a cycle basis for which the total weight + (length for unweighted graphs) of all the cycles is minimum. + + Parameters + ---------- + G : NetworkX Graph + weight: string + name of the edge attribute to use for edge weights + + Returns + ------- + A list of cycle lists. Each cycle list is a list of nodes + which forms a cycle (loop) in G. Note that the nodes are not + necessarily returned in a order by which they appear in the cycle + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_cycle(G, [0, 1, 2, 3]) + >>> nx.add_cycle(G, [0, 3, 4, 5]) + >>> nx.minimum_cycle_basis(G) + [[5, 4, 3, 0], [3, 2, 1, 0]] + + References: + [1] Kavitha, Telikepalli, et al. "An O(m^2n) Algorithm for + Minimum Cycle Basis of Graphs." + http://link.springer.com/article/10.1007/s00453-007-9064-z + [2] de Pina, J. 1995. Applications of shortest path methods. + Ph.D. thesis, University of Amsterdam, Netherlands + + See Also + -------- + simple_cycles, cycle_basis + """ + # We first split the graph in connected subgraphs + return sum( + (_min_cycle_basis(G.subgraph(c), weight) for c in nx.connected_components(G)), + [], + ) + + +def _min_cycle_basis(G, weight): + cb = [] + # We extract the edges not in a spanning tree. We do not really need a + # *minimum* spanning tree. That is why we call the next function with + # weight=None. Depending on implementation, it may be faster as well + tree_edges = list(nx.minimum_spanning_edges(G, weight=None, data=False)) + chords = G.edges - tree_edges - {(v, u) for u, v in tree_edges} + + # We maintain a set of vectors orthogonal to sofar found cycles + set_orth = [{edge} for edge in chords] + while set_orth: + base = set_orth.pop() + # kth cycle is "parallel" to kth vector in set_orth + cycle_edges = _min_cycle(G, base, weight) + cb.append([v for u, v in cycle_edges]) + + # now update set_orth so that k+1,k+2... th elements are + # orthogonal to the newly found cycle, as per [p. 336, 1] + set_orth = [ + ( + {e for e in orth if e not in base if e[::-1] not in base} + | {e for e in base if e not in orth if e[::-1] not in orth} + ) + if sum((e in orth or e[::-1] in orth) for e in cycle_edges) % 2 + else orth + for orth in set_orth + ] + return cb + + +def _min_cycle(G, orth, weight): + """ + Computes the minimum weight cycle in G, + orthogonal to the vector orth as per [p. 338, 1] + Use (u, 1) to indicate the lifted copy of u (denoted u' in paper). + """ + Gi = nx.Graph() + + # Add 2 copies of each edge in G to Gi. + # If edge is in orth, add cross edge; otherwise in-plane edge + for u, v, wt in G.edges(data=weight, default=1): + if (u, v) in orth or (v, u) in orth: + Gi.add_edges_from([(u, (v, 1)), ((u, 1), v)], Gi_weight=wt) + else: + Gi.add_edges_from([(u, v), ((u, 1), (v, 1))], Gi_weight=wt) + + # find the shortest length in Gi between n and (n, 1) for each n + # Note: Use "Gi_weight" for name of weight attribute + spl = nx.shortest_path_length + lift = {n: spl(Gi, source=n, target=(n, 1), weight="Gi_weight") for n in G} + + # Now compute that short path in Gi, which translates to a cycle in G + start = min(lift, key=lift.get) + end = (start, 1) + min_path_i = nx.shortest_path(Gi, source=start, target=end, weight="Gi_weight") + + # Now we obtain the actual path, re-map nodes in Gi to those in G + min_path = [n if n in G else n[0] for n in min_path_i] + + # Now remove the edges that occur two times + # two passes: flag which edges get kept, then build it + edgelist = list(pairwise(min_path)) + edgeset = set() + for e in edgelist: + if e in edgeset: + edgeset.remove(e) + elif e[::-1] in edgeset: + edgeset.remove(e[::-1]) + else: + edgeset.add(e) + + min_edgelist = [] + for e in edgelist: + if e in edgeset: + min_edgelist.append(e) + edgeset.remove(e) + elif e[::-1] in edgeset: + min_edgelist.append(e[::-1]) + edgeset.remove(e[::-1]) + + return min_edgelist + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def girth(G): + """Returns the girth of the graph. + + The girth of a graph is the length of its shortest cycle, or infinity if + the graph is acyclic. The algorithm follows the description given on the + Wikipedia page [1]_, and runs in time O(mn) on a graph with m edges and n + nodes. + + Parameters + ---------- + G : NetworkX Graph + + Returns + ------- + int or math.inf + + Examples + -------- + All examples below (except P_5) can easily be checked using Wikipedia, + which has a page for each of these famous graphs. + + >>> nx.girth(nx.chvatal_graph()) + 4 + >>> nx.girth(nx.tutte_graph()) + 4 + >>> nx.girth(nx.petersen_graph()) + 5 + >>> nx.girth(nx.heawood_graph()) + 6 + >>> nx.girth(nx.pappus_graph()) + 6 + >>> nx.girth(nx.path_graph(5)) + inf + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Girth_(graph_theory) + + """ + girth = depth_limit = inf + tree_edge = nx.algorithms.traversal.breadth_first_search.TREE_EDGE + level_edge = nx.algorithms.traversal.breadth_first_search.LEVEL_EDGE + for n in G: + # run a BFS from source n, keeping track of distances; since we want + # the shortest cycle, no need to explore beyond the current minimum length + depth = {n: 0} + for u, v, label in nx.bfs_labeled_edges(G, n): + du = depth[u] + if du > depth_limit: + break + if label is tree_edge: + depth[v] = du + 1 + else: + # if (u, v) is a level edge, the length is du + du + 1 (odd) + # otherwise, it's a forward edge; length is du + (du + 1) + 1 (even) + delta = label is level_edge + length = du + du + 2 - delta + if length < girth: + girth = length + depth_limit = du - delta + + return girth diff --git a/MLPY/Lib/site-packages/networkx/algorithms/d_separation.py b/MLPY/Lib/site-packages/networkx/algorithms/d_separation.py new file mode 100644 index 0000000000000000000000000000000000000000..4322b095822ad750ff016fabeabfeb71540143b1 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/d_separation.py @@ -0,0 +1,457 @@ +""" +Algorithm for testing d-separation in DAGs. + +*d-separation* is a test for conditional independence in probability +distributions that can be factorized using DAGs. It is a purely +graphical test that uses the underlying graph and makes no reference +to the actual distribution parameters. See [1]_ for a formal +definition. + +The implementation is based on the conceptually simple linear time +algorithm presented in [2]_. Refer to [3]_, [4]_ for a couple of +alternative algorithms. + +Here, we provide a brief overview of d-separation and related concepts that +are relevant for understanding it: + +Blocking paths +-------------- + +Before we overview, we introduce the following terminology to describe paths: + +- "open" path: A path between two nodes that can be traversed +- "blocked" path: A path between two nodes that cannot be traversed + +A **collider** is a triplet of nodes along a path that is like the following: +``... u -> c <- v ...``), where 'c' is a common successor of ``u`` and ``v``. A path +through a collider is considered "blocked". When +a node that is a collider, or a descendant of a collider is included in +the d-separating set, then the path through that collider node is "open". If the +path through the collider node is open, then we will call this node an open collider. + +The d-separation set blocks the paths between ``u`` and ``v``. If you include colliders, +or their descendant nodes in the d-separation set, then those colliders will open up, +enabling a path to be traversed if it is not blocked some other way. + +Illustration of D-separation with examples +------------------------------------------ + +For a pair of two nodes, ``u`` and ``v``, all paths are considered open if +there is a path between ``u`` and ``v`` that is not blocked. That means, there is an open +path between ``u`` and ``v`` that does not encounter a collider, or a variable in the +d-separating set. + +For example, if the d-separating set is the empty set, then the following paths are +unblocked between ``u`` and ``v``: + +- u <- z -> v +- u -> w -> ... -> z -> v + +If for example, 'z' is in the d-separating set, then 'z' blocks those paths +between ``u`` and ``v``. + +Colliders block a path by default if they and their descendants are not included +in the d-separating set. An example of a path that is blocked when the d-separating +set is empty is: + +- u -> w -> ... -> z <- v + +because 'z' is a collider in this path and 'z' is not in the d-separating set. However, +if 'z' or a descendant of 'z' is included in the d-separating set, then the path through +the collider at 'z' (... -> z <- ...) is now "open". + +D-separation is concerned with blocking all paths between u and v. Therefore, a +d-separating set between ``u`` and ``v`` is one where all paths are blocked. + +D-separation and its applications in probability +------------------------------------------------ + +D-separation is commonly used in probabilistic graphical models. D-separation +connects the idea of probabilistic "dependence" with separation in a graph. If +one assumes the causal Markov condition [5]_, then d-separation implies conditional +independence in probability distributions. + +Examples +-------- + +>>> +>>> # HMM graph with five states and observation nodes +... g = nx.DiGraph() +>>> g.add_edges_from( +... [ +... ("S1", "S2"), +... ("S2", "S3"), +... ("S3", "S4"), +... ("S4", "S5"), +... ("S1", "O1"), +... ("S2", "O2"), +... ("S3", "O3"), +... ("S4", "O4"), +... ("S5", "O5"), +... ] +... ) +>>> +>>> # states/obs before 'S3' are d-separated from states/obs after 'S3' +... nx.d_separated(g, {"S1", "S2", "O1", "O2"}, {"S4", "S5", "O4", "O5"}, {"S3"}) +True + + +References +---------- + +.. [1] Pearl, J. (2009). Causality. Cambridge: Cambridge University Press. + +.. [2] Darwiche, A. (2009). Modeling and reasoning with Bayesian networks. + Cambridge: Cambridge University Press. + +.. [3] Shachter, R. D. (1998). + Bayes-ball: rational pastime (for determining irrelevance and requisite + information in belief networks and influence diagrams). + In , Proceedings of the Fourteenth Conference on Uncertainty in Artificial + Intelligence (pp. 480–487). + San Francisco, CA, USA: Morgan Kaufmann Publishers Inc. + +.. [4] Koller, D., & Friedman, N. (2009). + Probabilistic graphical models: principles and techniques. The MIT Press. + +.. [5] https://en.wikipedia.org/wiki/Causal_Markov_condition + +""" + +from collections import deque + +import networkx as nx +from networkx.utils import UnionFind, not_implemented_for + +__all__ = ["d_separated", "minimal_d_separator", "is_minimal_d_separator"] + + +@not_implemented_for("undirected") +@nx._dispatch +def d_separated(G, x, y, z): + """ + Return whether node sets ``x`` and ``y`` are d-separated by ``z``. + + Parameters + ---------- + G : graph + A NetworkX DAG. + + x : set + First set of nodes in ``G``. + + y : set + Second set of nodes in ``G``. + + z : set + Set of conditioning nodes in ``G``. Can be empty set. + + Returns + ------- + b : bool + A boolean that is true if ``x`` is d-separated from ``y`` given ``z`` in ``G``. + + Raises + ------ + NetworkXError + The *d-separation* test is commonly used with directed + graphical models which are acyclic. Accordingly, the algorithm + raises a :exc:`NetworkXError` if the input graph is not a DAG. + + NodeNotFound + If any of the input nodes are not found in the graph, + a :exc:`NodeNotFound` exception is raised. + + Notes + ----- + A d-separating set in a DAG is a set of nodes that + blocks all paths between the two sets. Nodes in `z` + block a path if they are part of the path and are not a collider, + or a descendant of a collider. A collider structure along a path + is ``... -> c <- ...`` where ``c`` is the collider node. + + https://en.wikipedia.org/wiki/Bayesian_network#d-separation + """ + + if not nx.is_directed_acyclic_graph(G): + raise nx.NetworkXError("graph should be directed acyclic") + + union_xyz = x.union(y).union(z) + + if any(n not in G.nodes for n in union_xyz): + raise nx.NodeNotFound("one or more specified nodes not found in the graph") + + G_copy = G.copy() + + # transform the graph by removing leaves that are not in x | y | z + # until no more leaves can be removed. + leaves = deque([n for n in G_copy.nodes if G_copy.out_degree[n] == 0]) + while len(leaves) > 0: + leaf = leaves.popleft() + if leaf not in union_xyz: + for p in G_copy.predecessors(leaf): + if G_copy.out_degree[p] == 1: + leaves.append(p) + G_copy.remove_node(leaf) + + # transform the graph by removing outgoing edges from the + # conditioning set. + edges_to_remove = list(G_copy.out_edges(z)) + G_copy.remove_edges_from(edges_to_remove) + + # use disjoint-set data structure to check if any node in `x` + # occurs in the same weakly connected component as a node in `y`. + disjoint_set = UnionFind(G_copy.nodes()) + for component in nx.weakly_connected_components(G_copy): + disjoint_set.union(*component) + disjoint_set.union(*x) + disjoint_set.union(*y) + + if x and y and disjoint_set[next(iter(x))] == disjoint_set[next(iter(y))]: + return False + else: + return True + + +@not_implemented_for("undirected") +@nx._dispatch +def minimal_d_separator(G, u, v): + """Compute a minimal d-separating set between 'u' and 'v'. + + A d-separating set in a DAG is a set of nodes that blocks all paths + between the two nodes, 'u' and 'v'. This function + constructs a d-separating set that is "minimal", meaning it is the smallest + d-separating set for 'u' and 'v'. This is not necessarily + unique. For more details, see Notes. + + Parameters + ---------- + G : graph + A networkx DAG. + u : node + A node in the graph, G. + v : node + A node in the graph, G. + + Raises + ------ + NetworkXError + Raises a :exc:`NetworkXError` if the input graph is not a DAG. + + NodeNotFound + If any of the input nodes are not found in the graph, + a :exc:`NodeNotFound` exception is raised. + + References + ---------- + .. [1] Tian, J., & Paz, A. (1998). Finding Minimal D-separators. + + Notes + ----- + This function only finds ``a`` minimal d-separator. It does not guarantee + uniqueness, since in a DAG there may be more than one minimal d-separator + between two nodes. Moreover, this only checks for minimal separators + between two nodes, not two sets. Finding minimal d-separators between + two sets of nodes is not supported. + + Uses the algorithm presented in [1]_. The complexity of the algorithm + is :math:`O(|E_{An}^m|)`, where :math:`|E_{An}^m|` stands for the + number of edges in the moralized graph of the sub-graph consisting + of only the ancestors of 'u' and 'v'. For full details, see [1]_. + + The algorithm works by constructing the moral graph consisting of just + the ancestors of `u` and `v`. Then it constructs a candidate for + a separating set ``Z'`` from the predecessors of `u` and `v`. + Then BFS is run starting from `u` and marking nodes + found from ``Z'`` and calling those nodes ``Z''``. + Then BFS is run again starting from `v` and marking nodes if they are + present in ``Z''``. Those marked nodes are the returned minimal + d-separating set. + + https://en.wikipedia.org/wiki/Bayesian_network#d-separation + """ + if not nx.is_directed_acyclic_graph(G): + raise nx.NetworkXError("graph should be directed acyclic") + + union_uv = {u, v} + + if any(n not in G.nodes for n in union_uv): + raise nx.NodeNotFound("one or more specified nodes not found in the graph") + + # first construct the set of ancestors of X and Y + x_anc = nx.ancestors(G, u) + y_anc = nx.ancestors(G, v) + D_anc_xy = x_anc.union(y_anc) + D_anc_xy.update((u, v)) + + # second, construct the moralization of the subgraph of Anc(X,Y) + moral_G = nx.moral_graph(G.subgraph(D_anc_xy)) + + # find a separating set Z' in moral_G + Z_prime = set(G.predecessors(u)).union(set(G.predecessors(v))) + + # perform BFS on the graph from 'x' to mark + Z_dprime = _bfs_with_marks(moral_G, u, Z_prime) + Z = _bfs_with_marks(moral_G, v, Z_dprime) + return Z + + +@not_implemented_for("undirected") +@nx._dispatch +def is_minimal_d_separator(G, u, v, z): + """Determine if a d-separating set is minimal. + + A d-separating set, `z`, in a DAG is a set of nodes that blocks + all paths between the two nodes, `u` and `v`. This function + verifies that a set is "minimal", meaning there is no smaller + d-separating set between the two nodes. + + Note: This function checks whether `z` is a d-separator AND is minimal. + One can use the function `d_separated` to only check if `z` is a d-separator. + See examples below. + + Parameters + ---------- + G : nx.DiGraph + The graph. + u : node + A node in the graph. + v : node + A node in the graph. + z : Set of nodes + The set of nodes to check if it is a minimal d-separating set. + The function :func:`d_separated` is called inside this function + to verify that `z` is in fact a d-separator. + + Returns + ------- + bool + Whether or not the set `z` is a d-separator and is also minimal. + + Examples + -------- + >>> G = nx.path_graph([0, 1, 2, 3], create_using=nx.DiGraph) + >>> G.add_node(4) + >>> nx.is_minimal_d_separator(G, 0, 2, {1}) + True + >>> # since {1} is the minimal d-separator, {1, 3, 4} is not minimal + >>> nx.is_minimal_d_separator(G, 0, 2, {1, 3, 4}) + False + >>> # alternatively, if we only want to check that {1, 3, 4} is a d-separator + >>> nx.d_separated(G, {0}, {4}, {1, 3, 4}) + True + + Raises + ------ + NetworkXError + Raises a :exc:`NetworkXError` if the input graph is not a DAG. + + NodeNotFound + If any of the input nodes are not found in the graph, + a :exc:`NodeNotFound` exception is raised. + + References + ---------- + .. [1] Tian, J., & Paz, A. (1998). Finding Minimal D-separators. + + Notes + ----- + This function only works on verifying a d-separating set is minimal + between two nodes. To verify that a d-separating set is minimal between + two sets of nodes is not supported. + + Uses algorithm 2 presented in [1]_. The complexity of the algorithm + is :math:`O(|E_{An}^m|)`, where :math:`|E_{An}^m|` stands for the + number of edges in the moralized graph of the sub-graph consisting + of only the ancestors of ``u`` and ``v``. + + The algorithm works by constructing the moral graph consisting of just + the ancestors of `u` and `v`. First, it performs BFS on the moral graph + starting from `u` and marking any nodes it encounters that are part of + the separating set, `z`. If a node is marked, then it does not continue + along that path. In the second stage, BFS with markings is repeated on the + moral graph starting from `v`. If at any stage, any node in `z` is + not marked, then `z` is considered not minimal. If the end of the algorithm + is reached, then `z` is minimal. + + For full details, see [1]_. + + https://en.wikipedia.org/wiki/Bayesian_network#d-separation + """ + if not nx.d_separated(G, {u}, {v}, z): + return False + + x_anc = nx.ancestors(G, u) + y_anc = nx.ancestors(G, v) + xy_anc = x_anc.union(y_anc) + + # if Z contains any node which is not in ancestors of X or Y + # then it is definitely not minimal + if any(node not in xy_anc for node in z): + return False + + D_anc_xy = x_anc.union(y_anc) + D_anc_xy.update((u, v)) + + # second, construct the moralization of the subgraph + moral_G = nx.moral_graph(G.subgraph(D_anc_xy)) + + # start BFS from X + marks = _bfs_with_marks(moral_G, u, z) + + # if not all the Z is marked, then the set is not minimal + if any(node not in marks for node in z): + return False + + # similarly, start BFS from Y and check the marks + marks = _bfs_with_marks(moral_G, v, z) + # if not all the Z is marked, then the set is not minimal + if any(node not in marks for node in z): + return False + + return True + + +@not_implemented_for("directed") +def _bfs_with_marks(G, start_node, check_set): + """Breadth-first-search with markings. + + Performs BFS starting from ``start_node`` and whenever a node + inside ``check_set`` is met, it is "marked". Once a node is marked, + BFS does not continue along that path. The resulting marked nodes + are returned. + + Parameters + ---------- + G : nx.Graph + An undirected graph. + start_node : node + The start of the BFS. + check_set : set + The set of nodes to check against. + + Returns + ------- + marked : set + A set of nodes that were marked. + """ + visited = {} + marked = set() + queue = [] + + visited[start_node] = None + queue.append(start_node) + while queue: + m = queue.pop(0) + + for nbr in G.neighbors(m): + if nbr not in visited: + # memoize where we visited so far + visited[nbr] = None + + # mark the node in Z' and do not continue along that path + if nbr in check_set: + marked.add(nbr) + else: + queue.append(nbr) + return marked diff --git a/MLPY/Lib/site-packages/networkx/algorithms/dag.py b/MLPY/Lib/site-packages/networkx/algorithms/dag.py new file mode 100644 index 0000000000000000000000000000000000000000..fb74df81c6dbaa5137569e70b087e313abfaa974 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/dag.py @@ -0,0 +1,1258 @@ +"""Algorithms for directed acyclic graphs (DAGs). + +Note that most of these functions are only guaranteed to work for DAGs. +In general, these functions do not check for acyclic-ness, so it is up +to the user to check for that. +""" + +import heapq +from collections import deque +from functools import partial +from itertools import chain, combinations, product, starmap +from math import gcd + +import networkx as nx +from networkx.utils import arbitrary_element, not_implemented_for, pairwise + +__all__ = [ + "descendants", + "ancestors", + "topological_sort", + "lexicographical_topological_sort", + "all_topological_sorts", + "topological_generations", + "is_directed_acyclic_graph", + "is_aperiodic", + "transitive_closure", + "transitive_closure_dag", + "transitive_reduction", + "antichains", + "dag_longest_path", + "dag_longest_path_length", + "dag_to_branching", + "compute_v_structures", +] + +chaini = chain.from_iterable + + +@nx._dispatch +def descendants(G, source): + """Returns all nodes reachable from `source` in `G`. + + Parameters + ---------- + G : NetworkX Graph + source : node in `G` + + Returns + ------- + set() + The descendants of `source` in `G` + + Raises + ------ + NetworkXError + If node `source` is not in `G`. + + Examples + -------- + >>> DG = nx.path_graph(5, create_using=nx.DiGraph) + >>> sorted(nx.descendants(DG, 2)) + [3, 4] + + The `source` node is not a descendant of itself, but can be included manually: + + >>> sorted(nx.descendants(DG, 2) | {2}) + [2, 3, 4] + + See also + -------- + ancestors + """ + return {child for parent, child in nx.bfs_edges(G, source)} + + +@nx._dispatch +def ancestors(G, source): + """Returns all nodes having a path to `source` in `G`. + + Parameters + ---------- + G : NetworkX Graph + source : node in `G` + + Returns + ------- + set() + The ancestors of `source` in `G` + + Raises + ------ + NetworkXError + If node `source` is not in `G`. + + Examples + -------- + >>> DG = nx.path_graph(5, create_using=nx.DiGraph) + >>> sorted(nx.ancestors(DG, 2)) + [0, 1] + + The `source` node is not an ancestor of itself, but can be included manually: + + >>> sorted(nx.ancestors(DG, 2) | {2}) + [0, 1, 2] + + See also + -------- + descendants + """ + return {child for parent, child in nx.bfs_edges(G, source, reverse=True)} + + +@nx._dispatch +def has_cycle(G): + """Decides whether the directed graph has a cycle.""" + try: + # Feed the entire iterator into a zero-length deque. + deque(topological_sort(G), maxlen=0) + except nx.NetworkXUnfeasible: + return True + else: + return False + + +@nx._dispatch +def is_directed_acyclic_graph(G): + """Returns True if the graph `G` is a directed acyclic graph (DAG) or + False if not. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + bool + True if `G` is a DAG, False otherwise + + Examples + -------- + Undirected graph:: + + >>> G = nx.Graph([(1, 2), (2, 3)]) + >>> nx.is_directed_acyclic_graph(G) + False + + Directed graph with cycle:: + + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + >>> nx.is_directed_acyclic_graph(G) + False + + Directed acyclic graph:: + + >>> G = nx.DiGraph([(1, 2), (2, 3)]) + >>> nx.is_directed_acyclic_graph(G) + True + + See also + -------- + topological_sort + """ + return G.is_directed() and not has_cycle(G) + + +@nx._dispatch +def topological_generations(G): + """Stratifies a DAG into generations. + + A topological generation is node collection in which ancestors of a node in each + generation are guaranteed to be in a previous generation, and any descendants of + a node are guaranteed to be in a following generation. Nodes are guaranteed to + be in the earliest possible generation that they can belong to. + + Parameters + ---------- + G : NetworkX digraph + A directed acyclic graph (DAG) + + Yields + ------ + sets of nodes + Yields sets of nodes representing each generation. + + Raises + ------ + NetworkXError + Generations are defined for directed graphs only. If the graph + `G` is undirected, a :exc:`NetworkXError` is raised. + + NetworkXUnfeasible + If `G` is not a directed acyclic graph (DAG) no topological generations + exist and a :exc:`NetworkXUnfeasible` exception is raised. This can also + be raised if `G` is changed while the returned iterator is being processed + + RuntimeError + If `G` is changed while the returned iterator is being processed. + + Examples + -------- + >>> DG = nx.DiGraph([(2, 1), (3, 1)]) + >>> [sorted(generation) for generation in nx.topological_generations(DG)] + [[2, 3], [1]] + + Notes + ----- + The generation in which a node resides can also be determined by taking the + max-path-distance from the node to the farthest leaf node. That value can + be obtained with this function using `enumerate(topological_generations(G))`. + + See also + -------- + topological_sort + """ + if not G.is_directed(): + raise nx.NetworkXError("Topological sort not defined on undirected graphs.") + + multigraph = G.is_multigraph() + indegree_map = {v: d for v, d in G.in_degree() if d > 0} + zero_indegree = [v for v, d in G.in_degree() if d == 0] + + while zero_indegree: + this_generation = zero_indegree + zero_indegree = [] + for node in this_generation: + if node not in G: + raise RuntimeError("Graph changed during iteration") + for child in G.neighbors(node): + try: + indegree_map[child] -= len(G[node][child]) if multigraph else 1 + except KeyError as err: + raise RuntimeError("Graph changed during iteration") from err + if indegree_map[child] == 0: + zero_indegree.append(child) + del indegree_map[child] + yield this_generation + + if indegree_map: + raise nx.NetworkXUnfeasible( + "Graph contains a cycle or graph changed during iteration" + ) + + +@nx._dispatch +def topological_sort(G): + """Returns a generator of nodes in topologically sorted order. + + A topological sort is a nonunique permutation of the nodes of a + directed graph such that an edge from u to v implies that u + appears before v in the topological sort order. This ordering is + valid only if the graph has no directed cycles. + + Parameters + ---------- + G : NetworkX digraph + A directed acyclic graph (DAG) + + Yields + ------ + nodes + Yields the nodes in topological sorted order. + + Raises + ------ + NetworkXError + Topological sort is defined for directed graphs only. If the graph `G` + is undirected, a :exc:`NetworkXError` is raised. + + NetworkXUnfeasible + If `G` is not a directed acyclic graph (DAG) no topological sort exists + and a :exc:`NetworkXUnfeasible` exception is raised. This can also be + raised if `G` is changed while the returned iterator is being processed + + RuntimeError + If `G` is changed while the returned iterator is being processed. + + Examples + -------- + To get the reverse order of the topological sort: + + >>> DG = nx.DiGraph([(1, 2), (2, 3)]) + >>> list(reversed(list(nx.topological_sort(DG)))) + [3, 2, 1] + + If your DiGraph naturally has the edges representing tasks/inputs + and nodes representing people/processes that initiate tasks, then + topological_sort is not quite what you need. You will have to change + the tasks to nodes with dependence reflected by edges. The result is + a kind of topological sort of the edges. This can be done + with :func:`networkx.line_graph` as follows: + + >>> list(nx.topological_sort(nx.line_graph(DG))) + [(1, 2), (2, 3)] + + Notes + ----- + This algorithm is based on a description and proof in + "Introduction to Algorithms: A Creative Approach" [1]_ . + + See also + -------- + is_directed_acyclic_graph, lexicographical_topological_sort + + References + ---------- + .. [1] Manber, U. (1989). + *Introduction to Algorithms - A Creative Approach.* Addison-Wesley. + """ + for generation in nx.topological_generations(G): + yield from generation + + +@nx._dispatch +def lexicographical_topological_sort(G, key=None): + """Generate the nodes in the unique lexicographical topological sort order. + + Generates a unique ordering of nodes by first sorting topologically (for which there are often + multiple valid orderings) and then additionally by sorting lexicographically. + + A topological sort arranges the nodes of a directed graph so that the + upstream node of each directed edge precedes the downstream node. + It is always possible to find a solution for directed graphs that have no cycles. + There may be more than one valid solution. + + Lexicographical sorting is just sorting alphabetically. It is used here to break ties in the + topological sort and to determine a single, unique ordering. This can be useful in comparing + sort results. + + The lexicographical order can be customized by providing a function to the `key=` parameter. + The definition of the key function is the same as used in python's built-in `sort()`. + The function takes a single argument and returns a key to use for sorting purposes. + + Lexicographical sorting can fail if the node names are un-sortable. See the example below. + The solution is to provide a function to the `key=` argument that returns sortable keys. + + + Parameters + ---------- + G : NetworkX digraph + A directed acyclic graph (DAG) + + key : function, optional + A function of one argument that converts a node name to a comparison key. + It defines and resolves ambiguities in the sort order. Defaults to the identity function. + + Yields + ------ + nodes + Yields the nodes of G in lexicographical topological sort order. + + Raises + ------ + NetworkXError + Topological sort is defined for directed graphs only. If the graph `G` + is undirected, a :exc:`NetworkXError` is raised. + + NetworkXUnfeasible + If `G` is not a directed acyclic graph (DAG) no topological sort exists + and a :exc:`NetworkXUnfeasible` exception is raised. This can also be + raised if `G` is changed while the returned iterator is being processed + + RuntimeError + If `G` is changed while the returned iterator is being processed. + + TypeError + Results from un-sortable node names. + Consider using `key=` parameter to resolve ambiguities in the sort order. + + Examples + -------- + >>> DG = nx.DiGraph([(2, 1), (2, 5), (1, 3), (1, 4), (5, 4)]) + >>> list(nx.lexicographical_topological_sort(DG)) + [2, 1, 3, 5, 4] + >>> list(nx.lexicographical_topological_sort(DG, key=lambda x: -x)) + [2, 5, 1, 4, 3] + + The sort will fail for any graph with integer and string nodes. Comparison of integer to strings + is not defined in python. Is 3 greater or less than 'red'? + + >>> DG = nx.DiGraph([(1, 'red'), (3, 'red'), (1, 'green'), (2, 'blue')]) + >>> list(nx.lexicographical_topological_sort(DG)) + Traceback (most recent call last): + ... + TypeError: '<' not supported between instances of 'str' and 'int' + ... + + Incomparable nodes can be resolved using a `key` function. This example function + allows comparison of integers and strings by returning a tuple where the first + element is True for `str`, False otherwise. The second element is the node name. + This groups the strings and integers separately so they can be compared only among themselves. + + >>> key = lambda node: (isinstance(node, str), node) + >>> list(nx.lexicographical_topological_sort(DG, key=key)) + [1, 2, 3, 'blue', 'green', 'red'] + + Notes + ----- + This algorithm is based on a description and proof in + "Introduction to Algorithms: A Creative Approach" [1]_ . + + See also + -------- + topological_sort + + References + ---------- + .. [1] Manber, U. (1989). + *Introduction to Algorithms - A Creative Approach.* Addison-Wesley. + """ + if not G.is_directed(): + msg = "Topological sort not defined on undirected graphs." + raise nx.NetworkXError(msg) + + if key is None: + + def key(node): + return node + + nodeid_map = {n: i for i, n in enumerate(G)} + + def create_tuple(node): + return key(node), nodeid_map[node], node + + indegree_map = {v: d for v, d in G.in_degree() if d > 0} + # These nodes have zero indegree and ready to be returned. + zero_indegree = [create_tuple(v) for v, d in G.in_degree() if d == 0] + heapq.heapify(zero_indegree) + + while zero_indegree: + _, _, node = heapq.heappop(zero_indegree) + + if node not in G: + raise RuntimeError("Graph changed during iteration") + for _, child in G.edges(node): + try: + indegree_map[child] -= 1 + except KeyError as err: + raise RuntimeError("Graph changed during iteration") from err + if indegree_map[child] == 0: + try: + heapq.heappush(zero_indegree, create_tuple(child)) + except TypeError as err: + raise TypeError( + f"{err}\nConsider using `key=` parameter to resolve ambiguities in the sort order." + ) + del indegree_map[child] + + yield node + + if indegree_map: + msg = "Graph contains a cycle or graph changed during iteration" + raise nx.NetworkXUnfeasible(msg) + + +@not_implemented_for("undirected") +@nx._dispatch +def all_topological_sorts(G): + """Returns a generator of _all_ topological sorts of the directed graph G. + + A topological sort is a nonunique permutation of the nodes such that an + edge from u to v implies that u appears before v in the topological sort + order. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + Yields + ------ + topological_sort_order : list + a list of nodes in `G`, representing one of the topological sort orders + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed + NetworkXUnfeasible + If `G` is not acyclic + + Examples + -------- + To enumerate all topological sorts of directed graph: + + >>> DG = nx.DiGraph([(1, 2), (2, 3), (2, 4)]) + >>> list(nx.all_topological_sorts(DG)) + [[1, 2, 4, 3], [1, 2, 3, 4]] + + Notes + ----- + Implements an iterative version of the algorithm given in [1]. + + References + ---------- + .. [1] Knuth, Donald E., Szwarcfiter, Jayme L. (1974). + "A Structured Program to Generate All Topological Sorting Arrangements" + Information Processing Letters, Volume 2, Issue 6, 1974, Pages 153-157, + ISSN 0020-0190, + https://doi.org/10.1016/0020-0190(74)90001-5. + Elsevier (North-Holland), Amsterdam + """ + if not G.is_directed(): + raise nx.NetworkXError("Topological sort not defined on undirected graphs.") + + # the names of count and D are chosen to match the global variables in [1] + # number of edges originating in a vertex v + count = dict(G.in_degree()) + # vertices with indegree 0 + D = deque([v for v, d in G.in_degree() if d == 0]) + # stack of first value chosen at a position k in the topological sort + bases = [] + current_sort = [] + + # do-while construct + while True: + assert all(count[v] == 0 for v in D) + + if len(current_sort) == len(G): + yield list(current_sort) + + # clean-up stack + while len(current_sort) > 0: + assert len(bases) == len(current_sort) + q = current_sort.pop() + + # "restores" all edges (q, x) + # NOTE: it is important to iterate over edges instead + # of successors, so count is updated correctly in multigraphs + for _, j in G.out_edges(q): + count[j] += 1 + assert count[j] >= 0 + # remove entries from D + while len(D) > 0 and count[D[-1]] > 0: + D.pop() + + # corresponds to a circular shift of the values in D + # if the first value chosen (the base) is in the first + # position of D again, we are done and need to consider the + # previous condition + D.appendleft(q) + if D[-1] == bases[-1]: + # all possible values have been chosen at current position + # remove corresponding marker + bases.pop() + else: + # there are still elements that have not been fixed + # at the current position in the topological sort + # stop removing elements, escape inner loop + break + + else: + if len(D) == 0: + raise nx.NetworkXUnfeasible("Graph contains a cycle.") + + # choose next node + q = D.pop() + # "erase" all edges (q, x) + # NOTE: it is important to iterate over edges instead + # of successors, so count is updated correctly in multigraphs + for _, j in G.out_edges(q): + count[j] -= 1 + assert count[j] >= 0 + if count[j] == 0: + D.append(j) + current_sort.append(q) + + # base for current position might _not_ be fixed yet + if len(bases) < len(current_sort): + bases.append(q) + + if len(bases) == 0: + break + + +@nx._dispatch +def is_aperiodic(G): + """Returns True if `G` is aperiodic. + + A directed graph is aperiodic if there is no integer k > 1 that + divides the length of every cycle in the graph. + + Parameters + ---------- + G : NetworkX DiGraph + A directed graph + + Returns + ------- + bool + True if the graph is aperiodic False otherwise + + Raises + ------ + NetworkXError + If `G` is not directed + + Examples + -------- + A graph consisting of one cycle, the length of which is 2. Therefore ``k = 2`` + divides the length of every cycle in the graph and thus the graph + is *not aperiodic*:: + + >>> DG = nx.DiGraph([(1, 2), (2, 1)]) + >>> nx.is_aperiodic(DG) + False + + A graph consisting of two cycles: one of length 2 and the other of length 3. + The cycle lengths are coprime, so there is no single value of k where ``k > 1`` + that divides each cycle length and therefore the graph is *aperiodic*:: + + >>> DG = nx.DiGraph([(1, 2), (2, 3), (3, 1), (1, 4), (4, 1)]) + >>> nx.is_aperiodic(DG) + True + + A graph consisting of two cycles: one of length 2 and the other of length 4. + The lengths of the cycles share a common factor ``k = 2``, and therefore + the graph is *not aperiodic*:: + + >>> DG = nx.DiGraph([(1, 2), (2, 1), (3, 4), (4, 5), (5, 6), (6, 3)]) + >>> nx.is_aperiodic(DG) + False + + An acyclic graph, therefore the graph is *not aperiodic*:: + + >>> DG = nx.DiGraph([(1, 2), (2, 3)]) + >>> nx.is_aperiodic(DG) + False + + Notes + ----- + This uses the method outlined in [1]_, which runs in $O(m)$ time + given $m$ edges in `G`. Note that a graph is not aperiodic if it is + acyclic as every integer trivial divides length 0 cycles. + + References + ---------- + .. [1] Jarvis, J. P.; Shier, D. R. (1996), + "Graph-theoretic analysis of finite Markov chains," + in Shier, D. R.; Wallenius, K. T., Applied Mathematical Modeling: + A Multidisciplinary Approach, CRC Press. + """ + if not G.is_directed(): + raise nx.NetworkXError("is_aperiodic not defined for undirected graphs") + + s = arbitrary_element(G) + levels = {s: 0} + this_level = [s] + g = 0 + lev = 1 + while this_level: + next_level = [] + for u in this_level: + for v in G[u]: + if v in levels: # Non-Tree Edge + g = gcd(g, levels[u] - levels[v] + 1) + else: # Tree Edge + next_level.append(v) + levels[v] = lev + this_level = next_level + lev += 1 + if len(levels) == len(G): # All nodes in tree + return g == 1 + else: + return g == 1 and nx.is_aperiodic(G.subgraph(set(G) - set(levels))) + + +@nx._dispatch(preserve_all_attrs=True) +def transitive_closure(G, reflexive=False): + """Returns transitive closure of a graph + + The transitive closure of G = (V,E) is a graph G+ = (V,E+) such that + for all v, w in V there is an edge (v, w) in E+ if and only if there + is a path from v to w in G. + + Handling of paths from v to v has some flexibility within this definition. + A reflexive transitive closure creates a self-loop for the path + from v to v of length 0. The usual transitive closure creates a + self-loop only if a cycle exists (a path from v to v with length > 0). + We also allow an option for no self-loops. + + Parameters + ---------- + G : NetworkX Graph + A directed/undirected graph/multigraph. + reflexive : Bool or None, optional (default: False) + Determines when cycles create self-loops in the Transitive Closure. + If True, trivial cycles (length 0) create self-loops. The result + is a reflexive transitive closure of G. + If False (the default) non-trivial cycles create self-loops. + If None, self-loops are not created. + + Returns + ------- + NetworkX graph + The transitive closure of `G` + + Raises + ------ + NetworkXError + If `reflexive` not in `{None, True, False}` + + Examples + -------- + The treatment of trivial (i.e. length 0) cycles is controlled by the + `reflexive` parameter. + + Trivial (i.e. length 0) cycles do not create self-loops when + ``reflexive=False`` (the default):: + + >>> DG = nx.DiGraph([(1, 2), (2, 3)]) + >>> TC = nx.transitive_closure(DG, reflexive=False) + >>> TC.edges() + OutEdgeView([(1, 2), (1, 3), (2, 3)]) + + However, nontrivial (i.e. length greater than 0) cycles create self-loops + when ``reflexive=False`` (the default):: + + >>> DG = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + >>> TC = nx.transitive_closure(DG, reflexive=False) + >>> TC.edges() + OutEdgeView([(1, 2), (1, 3), (1, 1), (2, 3), (2, 1), (2, 2), (3, 1), (3, 2), (3, 3)]) + + Trivial cycles (length 0) create self-loops when ``reflexive=True``:: + + >>> DG = nx.DiGraph([(1, 2), (2, 3)]) + >>> TC = nx.transitive_closure(DG, reflexive=True) + >>> TC.edges() + OutEdgeView([(1, 2), (1, 1), (1, 3), (2, 3), (2, 2), (3, 3)]) + + And the third option is not to create self-loops at all when ``reflexive=None``:: + + >>> DG = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + >>> TC = nx.transitive_closure(DG, reflexive=None) + >>> TC.edges() + OutEdgeView([(1, 2), (1, 3), (2, 3), (2, 1), (3, 1), (3, 2)]) + + References + ---------- + .. [1] https://www.ics.uci.edu/~eppstein/PADS/PartialOrder.py + """ + TC = G.copy() + + if reflexive not in {None, True, False}: + raise nx.NetworkXError("Incorrect value for the parameter `reflexive`") + + for v in G: + if reflexive is None: + TC.add_edges_from((v, u) for u in nx.descendants(G, v) if u not in TC[v]) + elif reflexive is True: + TC.add_edges_from( + (v, u) for u in nx.descendants(G, v) | {v} if u not in TC[v] + ) + elif reflexive is False: + TC.add_edges_from((v, e[1]) for e in nx.edge_bfs(G, v) if e[1] not in TC[v]) + + return TC + + +@not_implemented_for("undirected") +@nx._dispatch(preserve_all_attrs=True) +def transitive_closure_dag(G, topo_order=None): + """Returns the transitive closure of a directed acyclic graph. + + This function is faster than the function `transitive_closure`, but fails + if the graph has a cycle. + + The transitive closure of G = (V,E) is a graph G+ = (V,E+) such that + for all v, w in V there is an edge (v, w) in E+ if and only if there + is a non-null path from v to w in G. + + Parameters + ---------- + G : NetworkX DiGraph + A directed acyclic graph (DAG) + + topo_order: list or tuple, optional + A topological order for G (if None, the function will compute one) + + Returns + ------- + NetworkX DiGraph + The transitive closure of `G` + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed + NetworkXUnfeasible + If `G` has a cycle + + Examples + -------- + >>> DG = nx.DiGraph([(1, 2), (2, 3)]) + >>> TC = nx.transitive_closure_dag(DG) + >>> TC.edges() + OutEdgeView([(1, 2), (1, 3), (2, 3)]) + + Notes + ----- + This algorithm is probably simple enough to be well-known but I didn't find + a mention in the literature. + """ + if topo_order is None: + topo_order = list(topological_sort(G)) + + TC = G.copy() + + # idea: traverse vertices following a reverse topological order, connecting + # each vertex to its descendants at distance 2 as we go + for v in reversed(topo_order): + TC.add_edges_from((v, u) for u in nx.descendants_at_distance(TC, v, 2)) + + return TC + + +@not_implemented_for("undirected") +@nx._dispatch +def transitive_reduction(G): + """Returns transitive reduction of a directed graph + + The transitive reduction of G = (V,E) is a graph G- = (V,E-) such that + for all v,w in V there is an edge (v,w) in E- if and only if (v,w) is + in E and there is no path from v to w in G with length greater than 1. + + Parameters + ---------- + G : NetworkX DiGraph + A directed acyclic graph (DAG) + + Returns + ------- + NetworkX DiGraph + The transitive reduction of `G` + + Raises + ------ + NetworkXError + If `G` is not a directed acyclic graph (DAG) transitive reduction is + not uniquely defined and a :exc:`NetworkXError` exception is raised. + + Examples + -------- + To perform transitive reduction on a DiGraph: + + >>> DG = nx.DiGraph([(1, 2), (2, 3), (1, 3)]) + >>> TR = nx.transitive_reduction(DG) + >>> list(TR.edges) + [(1, 2), (2, 3)] + + To avoid unnecessary data copies, this implementation does not return a + DiGraph with node/edge data. + To perform transitive reduction on a DiGraph and transfer node/edge data: + + >>> DG = nx.DiGraph() + >>> DG.add_edges_from([(1, 2), (2, 3), (1, 3)], color='red') + >>> TR = nx.transitive_reduction(DG) + >>> TR.add_nodes_from(DG.nodes(data=True)) + >>> TR.add_edges_from((u, v, DG.edges[u, v]) for u, v in TR.edges) + >>> list(TR.edges(data=True)) + [(1, 2, {'color': 'red'}), (2, 3, {'color': 'red'})] + + References + ---------- + https://en.wikipedia.org/wiki/Transitive_reduction + + """ + if not is_directed_acyclic_graph(G): + msg = "Directed Acyclic Graph required for transitive_reduction" + raise nx.NetworkXError(msg) + TR = nx.DiGraph() + TR.add_nodes_from(G.nodes()) + descendants = {} + # count before removing set stored in descendants + check_count = dict(G.in_degree) + for u in G: + u_nbrs = set(G[u]) + for v in G[u]: + if v in u_nbrs: + if v not in descendants: + descendants[v] = {y for x, y in nx.dfs_edges(G, v)} + u_nbrs -= descendants[v] + check_count[v] -= 1 + if check_count[v] == 0: + del descendants[v] + TR.add_edges_from((u, v) for v in u_nbrs) + return TR + + +@not_implemented_for("undirected") +@nx._dispatch +def antichains(G, topo_order=None): + """Generates antichains from a directed acyclic graph (DAG). + + An antichain is a subset of a partially ordered set such that any + two elements in the subset are incomparable. + + Parameters + ---------- + G : NetworkX DiGraph + A directed acyclic graph (DAG) + + topo_order: list or tuple, optional + A topological order for G (if None, the function will compute one) + + Yields + ------ + antichain : list + a list of nodes in `G` representing an antichain + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed + + NetworkXUnfeasible + If `G` contains a cycle + + Examples + -------- + >>> DG = nx.DiGraph([(1, 2), (1, 3)]) + >>> list(nx.antichains(DG)) + [[], [3], [2], [2, 3], [1]] + + Notes + ----- + This function was originally developed by Peter Jipsen and Franco Saliola + for the SAGE project. It's included in NetworkX with permission from the + authors. Original SAGE code at: + + https://github.com/sagemath/sage/blob/master/src/sage/combinat/posets/hasse_diagram.py + + References + ---------- + .. [1] Free Lattices, by R. Freese, J. Jezek and J. B. Nation, + AMS, Vol 42, 1995, p. 226. + """ + if topo_order is None: + topo_order = list(nx.topological_sort(G)) + + TC = nx.transitive_closure_dag(G, topo_order) + antichains_stacks = [([], list(reversed(topo_order)))] + + while antichains_stacks: + (antichain, stack) = antichains_stacks.pop() + # Invariant: + # - the elements of antichain are independent + # - the elements of stack are independent from those of antichain + yield antichain + while stack: + x = stack.pop() + new_antichain = antichain + [x] + new_stack = [t for t in stack if not ((t in TC[x]) or (x in TC[t]))] + antichains_stacks.append((new_antichain, new_stack)) + + +@not_implemented_for("undirected") +@nx._dispatch(edge_attrs={"weight": "default_weight"}) +def dag_longest_path(G, weight="weight", default_weight=1, topo_order=None): + """Returns the longest path in a directed acyclic graph (DAG). + + If `G` has edges with `weight` attribute the edge data are used as + weight values. + + Parameters + ---------- + G : NetworkX DiGraph + A directed acyclic graph (DAG) + + weight : str, optional + Edge data key to use for weight + + default_weight : int, optional + The weight of edges that do not have a weight attribute + + topo_order: list or tuple, optional + A topological order for `G` (if None, the function will compute one) + + Returns + ------- + list + Longest path + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed + + Examples + -------- + >>> DG = nx.DiGraph([(0, 1, {'cost':1}), (1, 2, {'cost':1}), (0, 2, {'cost':42})]) + >>> list(nx.all_simple_paths(DG, 0, 2)) + [[0, 1, 2], [0, 2]] + >>> nx.dag_longest_path(DG) + [0, 1, 2] + >>> nx.dag_longest_path(DG, weight="cost") + [0, 2] + + In the case where multiple valid topological orderings exist, `topo_order` + can be used to specify a specific ordering: + + >>> DG = nx.DiGraph([(0, 1), (0, 2)]) + >>> sorted(nx.all_topological_sorts(DG)) # Valid topological orderings + [[0, 1, 2], [0, 2, 1]] + >>> nx.dag_longest_path(DG, topo_order=[0, 1, 2]) + [0, 1] + >>> nx.dag_longest_path(DG, topo_order=[0, 2, 1]) + [0, 2] + + See also + -------- + dag_longest_path_length + + """ + if not G: + return [] + + if topo_order is None: + topo_order = nx.topological_sort(G) + + dist = {} # stores {v : (length, u)} + for v in topo_order: + us = [ + ( + dist[u][0] + + ( + max(data.values(), key=lambda x: x.get(weight, default_weight)) + if G.is_multigraph() + else data + ).get(weight, default_weight), + u, + ) + for u, data in G.pred[v].items() + ] + + # Use the best predecessor if there is one and its distance is + # non-negative, otherwise terminate. + maxu = max(us, key=lambda x: x[0]) if us else (0, v) + dist[v] = maxu if maxu[0] >= 0 else (0, v) + + u = None + v = max(dist, key=lambda x: dist[x][0]) + path = [] + while u != v: + path.append(v) + u = v + v = dist[v][1] + + path.reverse() + return path + + +@not_implemented_for("undirected") +@nx._dispatch(edge_attrs={"weight": "default_weight"}) +def dag_longest_path_length(G, weight="weight", default_weight=1): + """Returns the longest path length in a DAG + + Parameters + ---------- + G : NetworkX DiGraph + A directed acyclic graph (DAG) + + weight : string, optional + Edge data key to use for weight + + default_weight : int, optional + The weight of edges that do not have a weight attribute + + Returns + ------- + int + Longest path length + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed + + Examples + -------- + >>> DG = nx.DiGraph([(0, 1, {'cost':1}), (1, 2, {'cost':1}), (0, 2, {'cost':42})]) + >>> list(nx.all_simple_paths(DG, 0, 2)) + [[0, 1, 2], [0, 2]] + >>> nx.dag_longest_path_length(DG) + 2 + >>> nx.dag_longest_path_length(DG, weight="cost") + 42 + + See also + -------- + dag_longest_path + """ + path = nx.dag_longest_path(G, weight, default_weight) + path_length = 0 + if G.is_multigraph(): + for u, v in pairwise(path): + i = max(G[u][v], key=lambda x: G[u][v][x].get(weight, default_weight)) + path_length += G[u][v][i].get(weight, default_weight) + else: + for u, v in pairwise(path): + path_length += G[u][v].get(weight, default_weight) + + return path_length + + +@nx._dispatch +def root_to_leaf_paths(G): + """Yields root-to-leaf paths in a directed acyclic graph. + + `G` must be a directed acyclic graph. If not, the behavior of this + function is undefined. A "root" in this graph is a node of in-degree + zero and a "leaf" a node of out-degree zero. + + When invoked, this function iterates over each path from any root to + any leaf. A path is a list of nodes. + + """ + roots = (v for v, d in G.in_degree() if d == 0) + leaves = (v for v, d in G.out_degree() if d == 0) + all_paths = partial(nx.all_simple_paths, G) + # TODO In Python 3, this would be better as `yield from ...`. + return chaini(starmap(all_paths, product(roots, leaves))) + + +@not_implemented_for("multigraph") +@not_implemented_for("undirected") +@nx._dispatch +def dag_to_branching(G): + """Returns a branching representing all (overlapping) paths from + root nodes to leaf nodes in the given directed acyclic graph. + + As described in :mod:`networkx.algorithms.tree.recognition`, a + *branching* is a directed forest in which each node has at most one + parent. In other words, a branching is a disjoint union of + *arborescences*. For this function, each node of in-degree zero in + `G` becomes a root of one of the arborescences, and there will be + one leaf node for each distinct path from that root to a leaf node + in `G`. + + Each node `v` in `G` with *k* parents becomes *k* distinct nodes in + the returned branching, one for each parent, and the sub-DAG rooted + at `v` is duplicated for each copy. The algorithm then recurses on + the children of each copy of `v`. + + Parameters + ---------- + G : NetworkX graph + A directed acyclic graph. + + Returns + ------- + DiGraph + The branching in which there is a bijection between root-to-leaf + paths in `G` (in which multiple paths may share the same leaf) + and root-to-leaf paths in the branching (in which there is a + unique path from a root to a leaf). + + Each node has an attribute 'source' whose value is the original + node to which this node corresponds. No other graph, node, or + edge attributes are copied into this new graph. + + Raises + ------ + NetworkXNotImplemented + If `G` is not directed, or if `G` is a multigraph. + + HasACycle + If `G` is not acyclic. + + Examples + -------- + To examine which nodes in the returned branching were produced by + which original node in the directed acyclic graph, we can collect + the mapping from source node to new nodes into a dictionary. For + example, consider the directed diamond graph:: + + >>> from collections import defaultdict + >>> from operator import itemgetter + >>> + >>> G = nx.DiGraph(nx.utils.pairwise("abd")) + >>> G.add_edges_from(nx.utils.pairwise("acd")) + >>> B = nx.dag_to_branching(G) + >>> + >>> sources = defaultdict(set) + >>> for v, source in B.nodes(data="source"): + ... sources[source].add(v) + >>> len(sources["a"]) + 1 + >>> len(sources["d"]) + 2 + + To copy node attributes from the original graph to the new graph, + you can use a dictionary like the one constructed in the above + example:: + + >>> for source, nodes in sources.items(): + ... for v in nodes: + ... B.nodes[v].update(G.nodes[source]) + + Notes + ----- + This function is not idempotent in the sense that the node labels in + the returned branching may be uniquely generated each time the + function is invoked. In fact, the node labels may not be integers; + in order to relabel the nodes to be more readable, you can use the + :func:`networkx.convert_node_labels_to_integers` function. + + The current implementation of this function uses + :func:`networkx.prefix_tree`, so it is subject to the limitations of + that function. + + """ + if has_cycle(G): + msg = "dag_to_branching is only defined for acyclic graphs" + raise nx.HasACycle(msg) + paths = root_to_leaf_paths(G) + B = nx.prefix_tree(paths) + # Remove the synthetic `root`(0) and `NIL`(-1) nodes from the tree + B.remove_node(0) + B.remove_node(-1) + return B + + +@not_implemented_for("undirected") +@nx._dispatch +def compute_v_structures(G): + """Iterate through the graph to compute all v-structures. + + V-structures are triples in the directed graph where + two parent nodes point to the same child and the two parent nodes + are not adjacent. + + Parameters + ---------- + G : graph + A networkx DiGraph. + + Returns + ------- + vstructs : iterator of tuples + The v structures within the graph. Each v structure is a 3-tuple with the + parent, collider, and other parent. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edges_from([(1, 2), (0, 5), (3, 1), (2, 4), (3, 1), (4, 5), (1, 5)]) + >>> sorted(nx.compute_v_structures(G)) + [(0, 5, 1), (0, 5, 4), (1, 5, 4)] + + Notes + ----- + https://en.wikipedia.org/wiki/Collider_(statistics) + """ + for collider, preds in G.pred.items(): + for common_parents in combinations(preds, r=2): + # ensure that the colliders are the same + common_parents = sorted(common_parents) + yield (common_parents[0], collider, common_parents[1]) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/distance_measures.py b/MLPY/Lib/site-packages/networkx/algorithms/distance_measures.py new file mode 100644 index 0000000000000000000000000000000000000000..17e264821ba896d0fd30657453cec341259eb0ae --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/distance_measures.py @@ -0,0 +1,869 @@ +"""Graph diameter, radius, eccentricity and other properties.""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "eccentricity", + "diameter", + "radius", + "periphery", + "center", + "barycenter", + "resistance_distance", + "kemeny_constant", +] + + +def _extrema_bounding(G, compute="diameter", weight=None): + """Compute requested extreme distance metric of undirected graph G + + Computation is based on smart lower and upper bounds, and in practice + linear in the number of nodes, rather than quadratic (except for some + border cases such as complete graphs or circle shaped graphs). + + Parameters + ---------- + G : NetworkX graph + An undirected graph + + compute : string denoting the requesting metric + "diameter" for the maximal eccentricity value, + "radius" for the minimal eccentricity value, + "periphery" for the set of nodes with eccentricity equal to the diameter, + "center" for the set of nodes with eccentricity equal to the radius, + "eccentricities" for the maximum distance from each node to all other nodes in G + + weight : string, function, or None + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + If this is None, every edge has weight/distance/cost 1. + + Weights stored as floating point values can lead to small round-off + errors in distances. Use integer weights to avoid this. + + Weights should be positive, since they are distances. + + Returns + ------- + value : value of the requested metric + int for "diameter" and "radius" or + list of nodes for "center" and "periphery" or + dictionary of eccentricity values keyed by node for "eccentricities" + + Raises + ------ + NetworkXError + If the graph consists of multiple components + ValueError + If `compute` is not one of "diameter", "radius", "periphery", "center", or "eccentricities". + + Notes + ----- + This algorithm was proposed in [1]_ and discussed further in [2]_ and [3]_. + + References + ---------- + .. [1] F. W. Takes, W. A. Kosters, + "Determining the diameter of small world networks." + Proceedings of the 20th ACM international conference on Information and knowledge management, 2011 + https://dl.acm.org/doi/abs/10.1145/2063576.2063748 + .. [2] F. W. Takes, W. A. Kosters, + "Computing the Eccentricity Distribution of Large Graphs." + Algorithms, 2013 + https://www.mdpi.com/1999-4893/6/1/100 + .. [3] M. Borassi, P. Crescenzi, M. Habib, W. A. Kosters, A. Marino, F. W. Takes, + "Fast diameter and radius BFS-based computation in (weakly connected) real-world graphs: With an application to the six degrees of separation games. " + Theoretical Computer Science, 2015 + https://www.sciencedirect.com/science/article/pii/S0304397515001644 + """ + # init variables + degrees = dict(G.degree()) # start with the highest degree node + minlowernode = max(degrees, key=degrees.get) + N = len(degrees) # number of nodes + # alternate between smallest lower and largest upper bound + high = False + # status variables + ecc_lower = dict.fromkeys(G, 0) + ecc_upper = dict.fromkeys(G, N) + candidates = set(G) + + # (re)set bound extremes + minlower = N + maxlower = 0 + minupper = N + maxupper = 0 + + # repeat the following until there are no more candidates + while candidates: + if high: + current = maxuppernode # select node with largest upper bound + else: + current = minlowernode # select node with smallest lower bound + high = not high + + # get distances from/to current node and derive eccentricity + dist = nx.shortest_path_length(G, source=current, weight=weight) + + if len(dist) != N: + msg = "Cannot compute metric because graph is not connected." + raise nx.NetworkXError(msg) + current_ecc = max(dist.values()) + + # print status update + # print ("ecc of " + str(current) + " (" + str(ecc_lower[current]) + "/" + # + str(ecc_upper[current]) + ", deg: " + str(dist[current]) + ") is " + # + str(current_ecc)) + # print(ecc_upper) + + # (re)set bound extremes + maxuppernode = None + minlowernode = None + + # update node bounds + for i in candidates: + # update eccentricity bounds + d = dist[i] + ecc_lower[i] = low = max(ecc_lower[i], max(d, (current_ecc - d))) + ecc_upper[i] = upp = min(ecc_upper[i], current_ecc + d) + + # update min/max values of lower and upper bounds + minlower = min(ecc_lower[i], minlower) + maxlower = max(ecc_lower[i], maxlower) + minupper = min(ecc_upper[i], minupper) + maxupper = max(ecc_upper[i], maxupper) + + # update candidate set + if compute == "diameter": + ruled_out = { + i + for i in candidates + if ecc_upper[i] <= maxlower and 2 * ecc_lower[i] >= maxupper + } + elif compute == "radius": + ruled_out = { + i + for i in candidates + if ecc_lower[i] >= minupper and ecc_upper[i] + 1 <= 2 * minlower + } + elif compute == "periphery": + ruled_out = { + i + for i in candidates + if ecc_upper[i] < maxlower + and (maxlower == maxupper or ecc_lower[i] > maxupper) + } + elif compute == "center": + ruled_out = { + i + for i in candidates + if ecc_lower[i] > minupper + and (minlower == minupper or ecc_upper[i] + 1 < 2 * minlower) + } + elif compute == "eccentricities": + ruled_out = set() + else: + msg = "compute must be one of 'diameter', 'radius', 'periphery', 'center', 'eccentricities'" + raise ValueError(msg) + + ruled_out.update(i for i in candidates if ecc_lower[i] == ecc_upper[i]) + candidates -= ruled_out + + # for i in ruled_out: + # print("removing %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g"% + # (i,ecc_upper[i],maxlower,ecc_lower[i],maxupper)) + # print("node %g: ecc_u: %g maxl: %g ecc_l: %g maxu: %g"% + # (4,ecc_upper[4],maxlower,ecc_lower[4],maxupper)) + # print("NODE 4: %g"%(ecc_upper[4] <= maxlower)) + # print("NODE 4: %g"%(2 * ecc_lower[4] >= maxupper)) + # print("NODE 4: %g"%(ecc_upper[4] <= maxlower + # and 2 * ecc_lower[4] >= maxupper)) + + # updating maxuppernode and minlowernode for selection in next round + for i in candidates: + if ( + minlowernode is None + or ( + ecc_lower[i] == ecc_lower[minlowernode] + and degrees[i] > degrees[minlowernode] + ) + or (ecc_lower[i] < ecc_lower[minlowernode]) + ): + minlowernode = i + + if ( + maxuppernode is None + or ( + ecc_upper[i] == ecc_upper[maxuppernode] + and degrees[i] > degrees[maxuppernode] + ) + or (ecc_upper[i] > ecc_upper[maxuppernode]) + ): + maxuppernode = i + + # print status update + # print (" min=" + str(minlower) + "/" + str(minupper) + + # " max=" + str(maxlower) + "/" + str(maxupper) + + # " candidates: " + str(len(candidates))) + # print("cand:",candidates) + # print("ecc_l",ecc_lower) + # print("ecc_u",ecc_upper) + # wait = input("press Enter to continue") + + # return the correct value of the requested metric + if compute == "diameter": + return maxlower + if compute == "radius": + return minupper + if compute == "periphery": + p = [v for v in G if ecc_lower[v] == maxlower] + return p + if compute == "center": + c = [v for v in G if ecc_upper[v] == minupper] + return c + if compute == "eccentricities": + return ecc_lower + return None + + +@nx._dispatch(edge_attrs="weight") +def eccentricity(G, v=None, sp=None, weight=None): + """Returns the eccentricity of nodes in G. + + The eccentricity of a node v is the maximum distance from v to + all other nodes in G. + + Parameters + ---------- + G : NetworkX graph + A graph + + v : node, optional + Return value of specified node + + sp : dict of dicts, optional + All pairs shortest path lengths as a dictionary of dictionaries + + weight : string, function, or None (default=None) + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + If this is None, every edge has weight/distance/cost 1. + + Weights stored as floating point values can lead to small round-off + errors in distances. Use integer weights to avoid this. + + Weights should be positive, since they are distances. + + Returns + ------- + ecc : dictionary + A dictionary of eccentricity values keyed by node. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> dict(nx.eccentricity(G)) + {1: 2, 2: 3, 3: 2, 4: 2, 5: 3} + + >>> dict(nx.eccentricity(G, v=[1, 5])) # This returns the eccentricity of node 1 & 5 + {1: 2, 5: 3} + + """ + # if v is None: # none, use entire graph + # nodes=G.nodes() + # elif v in G: # is v a single node + # nodes=[v] + # else: # assume v is a container of nodes + # nodes=v + order = G.order() + e = {} + for n in G.nbunch_iter(v): + if sp is None: + length = nx.shortest_path_length(G, source=n, weight=weight) + + L = len(length) + else: + try: + length = sp[n] + L = len(length) + except TypeError as err: + raise nx.NetworkXError('Format of "sp" is invalid.') from err + if L != order: + if G.is_directed(): + msg = ( + "Found infinite path length because the digraph is not" + " strongly connected" + ) + else: + msg = "Found infinite path length because the graph is not" " connected" + raise nx.NetworkXError(msg) + + e[n] = max(length.values()) + + if v in G: + return e[v] # return single value + return e + + +@nx._dispatch(edge_attrs="weight") +def diameter(G, e=None, usebounds=False, weight=None): + """Returns the diameter of the graph G. + + The diameter is the maximum eccentricity. + + Parameters + ---------- + G : NetworkX graph + A graph + + e : eccentricity dictionary, optional + A precomputed dictionary of eccentricities. + + weight : string, function, or None + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + If this is None, every edge has weight/distance/cost 1. + + Weights stored as floating point values can lead to small round-off + errors in distances. Use integer weights to avoid this. + + Weights should be positive, since they are distances. + + Returns + ------- + d : integer + Diameter of graph + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> nx.diameter(G) + 3 + + See Also + -------- + eccentricity + """ + if usebounds is True and e is None and not G.is_directed(): + return _extrema_bounding(G, compute="diameter", weight=weight) + if e is None: + e = eccentricity(G, weight=weight) + return max(e.values()) + + +@nx._dispatch(edge_attrs="weight") +def periphery(G, e=None, usebounds=False, weight=None): + """Returns the periphery of the graph G. + + The periphery is the set of nodes with eccentricity equal to the diameter. + + Parameters + ---------- + G : NetworkX graph + A graph + + e : eccentricity dictionary, optional + A precomputed dictionary of eccentricities. + + weight : string, function, or None + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + If this is None, every edge has weight/distance/cost 1. + + Weights stored as floating point values can lead to small round-off + errors in distances. Use integer weights to avoid this. + + Weights should be positive, since they are distances. + + Returns + ------- + p : list + List of nodes in periphery + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> nx.periphery(G) + [2, 5] + + See Also + -------- + barycenter + center + """ + if usebounds is True and e is None and not G.is_directed(): + return _extrema_bounding(G, compute="periphery", weight=weight) + if e is None: + e = eccentricity(G, weight=weight) + diameter = max(e.values()) + p = [v for v in e if e[v] == diameter] + return p + + +@nx._dispatch(edge_attrs="weight") +def radius(G, e=None, usebounds=False, weight=None): + """Returns the radius of the graph G. + + The radius is the minimum eccentricity. + + Parameters + ---------- + G : NetworkX graph + A graph + + e : eccentricity dictionary, optional + A precomputed dictionary of eccentricities. + + weight : string, function, or None + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + If this is None, every edge has weight/distance/cost 1. + + Weights stored as floating point values can lead to small round-off + errors in distances. Use integer weights to avoid this. + + Weights should be positive, since they are distances. + + Returns + ------- + r : integer + Radius of graph + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> nx.radius(G) + 2 + + """ + if usebounds is True and e is None and not G.is_directed(): + return _extrema_bounding(G, compute="radius", weight=weight) + if e is None: + e = eccentricity(G, weight=weight) + return min(e.values()) + + +@nx._dispatch(edge_attrs="weight") +def center(G, e=None, usebounds=False, weight=None): + """Returns the center of the graph G. + + The center is the set of nodes with eccentricity equal to radius. + + Parameters + ---------- + G : NetworkX graph + A graph + + e : eccentricity dictionary, optional + A precomputed dictionary of eccentricities. + + weight : string, function, or None + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + If this is None, every edge has weight/distance/cost 1. + + Weights stored as floating point values can lead to small round-off + errors in distances. Use integer weights to avoid this. + + Weights should be positive, since they are distances. + + Returns + ------- + c : list + List of nodes in center + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> list(nx.center(G)) + [1, 3, 4] + + See Also + -------- + barycenter + periphery + """ + if usebounds is True and e is None and not G.is_directed(): + return _extrema_bounding(G, compute="center", weight=weight) + if e is None: + e = eccentricity(G, weight=weight) + radius = min(e.values()) + p = [v for v in e if e[v] == radius] + return p + + +@nx._dispatch(edge_attrs="weight") +def barycenter(G, weight=None, attr=None, sp=None): + r"""Calculate barycenter of a connected graph, optionally with edge weights. + + The :dfn:`barycenter` a + :func:`connected ` graph + :math:`G` is the subgraph induced by the set of its nodes :math:`v` + minimizing the objective function + + .. math:: + + \sum_{u \in V(G)} d_G(u, v), + + where :math:`d_G` is the (possibly weighted) :func:`path length + `. + The barycenter is also called the :dfn:`median`. See [West01]_, p. 78. + + Parameters + ---------- + G : :class:`networkx.Graph` + The connected graph :math:`G`. + weight : :class:`str`, optional + Passed through to + :func:`~networkx.algorithms.shortest_paths.generic.shortest_path_length`. + attr : :class:`str`, optional + If given, write the value of the objective function to each node's + `attr` attribute. Otherwise do not store the value. + sp : dict of dicts, optional + All pairs shortest path lengths as a dictionary of dictionaries + + Returns + ------- + list + Nodes of `G` that induce the barycenter of `G`. + + Raises + ------ + NetworkXNoPath + If `G` is disconnected. `G` may appear disconnected to + :func:`barycenter` if `sp` is given but is missing shortest path + lengths for any pairs. + ValueError + If `sp` and `weight` are both given. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> nx.barycenter(G) + [1, 3, 4] + + See Also + -------- + center + periphery + """ + if sp is None: + sp = nx.shortest_path_length(G, weight=weight) + else: + sp = sp.items() + if weight is not None: + raise ValueError("Cannot use both sp, weight arguments together") + smallest, barycenter_vertices, n = float("inf"), [], len(G) + for v, dists in sp: + if len(dists) < n: + raise nx.NetworkXNoPath( + f"Input graph {G} is disconnected, so every induced subgraph " + "has infinite barycentricity." + ) + barycentricity = sum(dists.values()) + if attr is not None: + G.nodes[v][attr] = barycentricity + if barycentricity < smallest: + smallest = barycentricity + barycenter_vertices = [v] + elif barycentricity == smallest: + barycenter_vertices.append(v) + return barycenter_vertices + + +def _count_lu_permutations(perm_array): + """Counts the number of permutations in SuperLU perm_c or perm_r""" + perm_cnt = 0 + arr = perm_array.tolist() + for i in range(len(arr)): + if i != arr[i]: + perm_cnt += 1 + n = arr.index(i) + arr[n] = arr[i] + arr[i] = i + + return perm_cnt + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def resistance_distance(G, nodeA=None, nodeB=None, weight=None, invert_weight=True): + """Returns the resistance distance between every pair of nodes on graph G. + + The resistance distance between two nodes of a graph is akin to treating + the graph as a grid of resistors with a resistance equal to the provided + weight [1]_, [2]_. + + If weight is not provided, then a weight of 1 is used for all edges. + + If two nodes are the same, the resistance distance is zero. + + Parameters + ---------- + G : NetworkX graph + A graph + + nodeA : node or None, optional (default=None) + A node within graph G. + If None, compute resistance distance using all nodes as source nodes. + + nodeB : node or None, optional (default=None) + A node within graph G. + If None, compute resistance distance using all nodes as target nodes. + + weight : string or None, optional (default=None) + The edge data key used to compute the resistance distance. + If None, then each edge has weight 1. + + invert_weight : boolean (default=True) + Proper calculation of resistance distance requires building the + Laplacian matrix with the reciprocal of the weight. Not required + if the weight is already inverted. Weight cannot be zero. + + Returns + ------- + rd : dict or float + If `nodeA` and `nodeB` are given, resistance distance between `nodeA` + and `nodeB`. If `nodeA` or `nodeB` is unspecified (the default), a + dictionary of nodes with resistance distances as the value. + + Raises + ------ + NetworkXNotImplemented + If `G` is a directed graph. + + NetworkXError + If `G` is not connected, or contains no nodes, + or `nodeA` is not in `G` or `nodeB` is not in `G`. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)]) + >>> round(nx.resistance_distance(G, 1, 3), 10) + 0.625 + + Notes + ----- + The implementation is based on Theorem A in [2]_. Self-loops are ignored. + Multi-edges are contracted in one edge with weight equal to the harmonic sum of the weights. + + References + ---------- + .. [1] Wikipedia + "Resistance distance." + https://en.wikipedia.org/wiki/Resistance_distance + .. [2] D. J. Klein and M. Randic. + Resistance distance. + J. of Math. Chem. 12:81-95, 1993. + """ + import numpy as np + + if len(G) == 0: + raise nx.NetworkXError("Graph G must contain at least one node.") + if not nx.is_connected(G): + raise nx.NetworkXError("Graph G must be strongly connected.") + if nodeA is not None and nodeA not in G: + raise nx.NetworkXError("Node A is not in graph G.") + if nodeB is not None and nodeB not in G: + raise nx.NetworkXError("Node B is not in graph G.") + + G = G.copy() + node_list = list(G) + + # Invert weights + if invert_weight and weight is not None: + if G.is_multigraph(): + for u, v, k, d in G.edges(keys=True, data=True): + d[weight] = 1 / d[weight] + else: + for u, v, d in G.edges(data=True): + d[weight] = 1 / d[weight] + + # Compute resistance distance using the Pseudo-inverse of the Laplacian + # Self-loops are ignored + L = nx.laplacian_matrix(G, weight=weight).todense() + Linv = np.linalg.pinv(L, hermitian=True) + + # Return relevant distances + if nodeA is not None and nodeB is not None: + i = node_list.index(nodeA) + j = node_list.index(nodeB) + return Linv[i, i] + Linv[j, j] - Linv[i, j] - Linv[j, i] + + elif nodeA is not None: + i = node_list.index(nodeA) + d = {} + for n in G: + j = node_list.index(n) + d[n] = Linv[i, i] + Linv[j, j] - Linv[i, j] - Linv[j, i] + return d + + elif nodeB is not None: + j = node_list.index(nodeB) + d = {} + for n in G: + i = node_list.index(n) + d[n] = Linv[i, i] + Linv[j, j] - Linv[i, j] - Linv[j, i] + return d + + else: + d = {} + for n in G: + i = node_list.index(n) + d[n] = {} + for n2 in G: + j = node_list.index(n2) + d[n][n2] = Linv[i, i] + Linv[j, j] - Linv[i, j] - Linv[j, i] + return d + + +@nx.utils.not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def kemeny_constant(G, *, weight=None): + """Returns the Kemeny constant of the given graph. + + The *Kemeny constant* (or Kemeny's constant) of a graph `G` + can be computed by regarding the graph as a Markov chain. + The Kemeny constant is then the expected number of time steps + to transition from a starting state i to a random destination state + sampled from the Markov chain's stationary distribution. + The Kemeny constant is independent of the chosen initial state [1]_. + + The Kemeny constant measures the time needed for spreading + across a graph. Low values indicate a closely connected graph + whereas high values indicate a spread-out graph. + + If weight is not provided, then a weight of 1 is used for all edges. + + Since `G` represents a Markov chain, the weights must be positive. + + Parameters + ---------- + G : NetworkX graph + + weight : string or None, optional (default=None) + The edge data key used to compute the Kemeny constant. + If None, then each edge has weight 1. + + Returns + ------- + K : float + The Kemeny constant of the graph `G`. + + Raises + ------ + NetworkXNotImplemented + If the graph `G` is directed. + + NetworkXError + If the graph `G` is not connected, or contains no nodes, + or has edges with negative weights. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> round(nx.kemeny_constant(G), 10) + 3.2 + + Notes + ----- + The implementation is based on equation (3.3) in [2]_. + Self-loops are allowed and indicate a Markov chain where + the state can remain the same. Multi-edges are contracted + in one edge with weight equal to the sum of the weights. + + References + ---------- + .. [1] Wikipedia + "Kemeny's constant." + https://en.wikipedia.org/wiki/Kemeny%27s_constant + .. [2] Lovász L. + Random walks on graphs: A survey. + Paul Erdös is Eighty, vol. 2, Bolyai Society, + Mathematical Studies, Keszthely, Hungary (1993), pp. 1-46 + """ + import numpy as np + import scipy as sp + + if len(G) == 0: + raise nx.NetworkXError("Graph G must contain at least one node.") + if not nx.is_connected(G): + raise nx.NetworkXError("Graph G must be connected.") + if nx.is_negatively_weighted(G, weight=weight): + raise nx.NetworkXError("The weights of graph G must be nonnegative.") + + # Compute matrix H = D^-1/2 A D^-1/2 + A = nx.adjacency_matrix(G, weight=weight) + n, m = A.shape + diags = A.sum(axis=1) + with np.errstate(divide="ignore"): + diags_sqrt = 1.0 / np.sqrt(diags) + diags_sqrt[np.isinf(diags_sqrt)] = 0 + DH = sp.sparse.csr_array(sp.sparse.spdiags(diags_sqrt, 0, m, n, format="csr")) + H = DH @ (A @ DH) + + # Compute eigenvalues of H + eig = np.sort(sp.linalg.eigvalsh(H.todense())) + + # Compute the Kemeny constant + return np.sum(1 / (1 - eig[:-1])) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/distance_regular.py b/MLPY/Lib/site-packages/networkx/algorithms/distance_regular.py new file mode 100644 index 0000000000000000000000000000000000000000..18c19ee00e07394f6127095766ff6219ae47598a --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/distance_regular.py @@ -0,0 +1,234 @@ +""" +======================= +Distance-regular graphs +======================= +""" + +import networkx as nx +from networkx.utils import not_implemented_for + +from .distance_measures import diameter + +__all__ = [ + "is_distance_regular", + "is_strongly_regular", + "intersection_array", + "global_parameters", +] + + +@nx._dispatch +def is_distance_regular(G): + """Returns True if the graph is distance regular, False otherwise. + + A connected graph G is distance-regular if for any nodes x,y + and any integers i,j=0,1,...,d (where d is the graph + diameter), the number of vertices at distance i from x and + distance j from y depends only on i,j and the graph distance + between x and y, independently of the choice of x and y. + + Parameters + ---------- + G: Networkx graph (undirected) + + Returns + ------- + bool + True if the graph is Distance Regular, False otherwise + + Examples + -------- + >>> G = nx.hypercube_graph(6) + >>> nx.is_distance_regular(G) + True + + See Also + -------- + intersection_array, global_parameters + + Notes + ----- + For undirected and simple graphs only + + References + ---------- + .. [1] Brouwer, A. E.; Cohen, A. M.; and Neumaier, A. + Distance-Regular Graphs. New York: Springer-Verlag, 1989. + .. [2] Weisstein, Eric W. "Distance-Regular Graph." + http://mathworld.wolfram.com/Distance-RegularGraph.html + + """ + try: + intersection_array(G) + return True + except nx.NetworkXError: + return False + + +def global_parameters(b, c): + """Returns global parameters for a given intersection array. + + Given a distance-regular graph G with integers b_i, c_i,i = 0,....,d + such that for any 2 vertices x,y in G at a distance i=d(x,y), there + are exactly c_i neighbors of y at a distance of i-1 from x and b_i + neighbors of y at a distance of i+1 from x. + + Thus, a distance regular graph has the global parameters, + [[c_0,a_0,b_0],[c_1,a_1,b_1],......,[c_d,a_d,b_d]] for the + intersection array [b_0,b_1,.....b_{d-1};c_1,c_2,.....c_d] + where a_i+b_i+c_i=k , k= degree of every vertex. + + Parameters + ---------- + b : list + + c : list + + Returns + ------- + iterable + An iterable over three tuples. + + Examples + -------- + >>> G = nx.dodecahedral_graph() + >>> b, c = nx.intersection_array(G) + >>> list(nx.global_parameters(b, c)) + [(0, 0, 3), (1, 0, 2), (1, 1, 1), (1, 1, 1), (2, 0, 1), (3, 0, 0)] + + References + ---------- + .. [1] Weisstein, Eric W. "Global Parameters." + From MathWorld--A Wolfram Web Resource. + http://mathworld.wolfram.com/GlobalParameters.html + + See Also + -------- + intersection_array + """ + return ((y, b[0] - x - y, x) for x, y in zip(b + [0], [0] + c)) + + +@not_implemented_for("directed", "multigraph") +@nx._dispatch +def intersection_array(G): + """Returns the intersection array of a distance-regular graph. + + Given a distance-regular graph G with integers b_i, c_i,i = 0,....,d + such that for any 2 vertices x,y in G at a distance i=d(x,y), there + are exactly c_i neighbors of y at a distance of i-1 from x and b_i + neighbors of y at a distance of i+1 from x. + + A distance regular graph's intersection array is given by, + [b_0,b_1,.....b_{d-1};c_1,c_2,.....c_d] + + Parameters + ---------- + G: Networkx graph (undirected) + + Returns + ------- + b,c: tuple of lists + + Examples + -------- + >>> G = nx.icosahedral_graph() + >>> nx.intersection_array(G) + ([5, 2, 1], [1, 2, 5]) + + References + ---------- + .. [1] Weisstein, Eric W. "Intersection Array." + From MathWorld--A Wolfram Web Resource. + http://mathworld.wolfram.com/IntersectionArray.html + + See Also + -------- + global_parameters + """ + # test for regular graph (all degrees must be equal) + degree = iter(G.degree()) + (_, k) = next(degree) + for _, knext in degree: + if knext != k: + raise nx.NetworkXError("Graph is not distance regular.") + k = knext + path_length = dict(nx.all_pairs_shortest_path_length(G)) + diameter = max(max(path_length[n].values()) for n in path_length) + bint = {} # 'b' intersection array + cint = {} # 'c' intersection array + for u in G: + for v in G: + try: + i = path_length[u][v] + except KeyError as err: # graph must be connected + raise nx.NetworkXError("Graph is not distance regular.") from err + # number of neighbors of v at a distance of i-1 from u + c = len([n for n in G[v] if path_length[n][u] == i - 1]) + # number of neighbors of v at a distance of i+1 from u + b = len([n for n in G[v] if path_length[n][u] == i + 1]) + # b,c are independent of u and v + if cint.get(i, c) != c or bint.get(i, b) != b: + raise nx.NetworkXError("Graph is not distance regular") + bint[i] = b + cint[i] = c + return ( + [bint.get(j, 0) for j in range(diameter)], + [cint.get(j + 1, 0) for j in range(diameter)], + ) + + +# TODO There is a definition for directed strongly regular graphs. +@not_implemented_for("directed", "multigraph") +@nx._dispatch +def is_strongly_regular(G): + """Returns True if and only if the given graph is strongly + regular. + + An undirected graph is *strongly regular* if + + * it is regular, + * each pair of adjacent vertices has the same number of neighbors in + common, + * each pair of nonadjacent vertices has the same number of neighbors + in common. + + Each strongly regular graph is a distance-regular graph. + Conversely, if a distance-regular graph has diameter two, then it is + a strongly regular graph. For more information on distance-regular + graphs, see :func:`is_distance_regular`. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + Returns + ------- + bool + Whether `G` is strongly regular. + + Examples + -------- + + The cycle graph on five vertices is strongly regular. It is + two-regular, each pair of adjacent vertices has no shared neighbors, + and each pair of nonadjacent vertices has one shared neighbor:: + + >>> G = nx.cycle_graph(5) + >>> nx.is_strongly_regular(G) + True + + """ + # Here is an alternate implementation based directly on the + # definition of strongly regular graphs: + # + # return (all_equal(G.degree().values()) + # and all_equal(len(common_neighbors(G, u, v)) + # for u, v in G.edges()) + # and all_equal(len(common_neighbors(G, u, v)) + # for u, v in non_edges(G))) + # + # We instead use the fact that a distance-regular graph of diameter + # two is strongly regular. + return is_distance_regular(G) and diameter(G) == 2 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/dominance.py b/MLPY/Lib/site-packages/networkx/algorithms/dominance.py new file mode 100644 index 0000000000000000000000000000000000000000..ffdbe7d21391872c20cab7a9e6c06a522af97ac3 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/dominance.py @@ -0,0 +1,135 @@ +""" +Dominance algorithms. +""" + +from functools import reduce + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["immediate_dominators", "dominance_frontiers"] + + +@not_implemented_for("undirected") +@nx._dispatch +def immediate_dominators(G, start): + """Returns the immediate dominators of all nodes of a directed graph. + + Parameters + ---------- + G : a DiGraph or MultiDiGraph + The graph where dominance is to be computed. + + start : node + The start node of dominance computation. + + Returns + ------- + idom : dict keyed by nodes + A dict containing the immediate dominators of each node reachable from + `start`. + + Raises + ------ + NetworkXNotImplemented + If `G` is undirected. + + NetworkXError + If `start` is not in `G`. + + Notes + ----- + Except for `start`, the immediate dominators are the parents of their + corresponding nodes in the dominator tree. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 5), (3, 4), (4, 5)]) + >>> sorted(nx.immediate_dominators(G, 1).items()) + [(1, 1), (2, 1), (3, 1), (4, 3), (5, 1)] + + References + ---------- + .. [1] K. D. Cooper, T. J. Harvey, and K. Kennedy. + A simple, fast dominance algorithm. + Software Practice & Experience, 4:110, 2001. + """ + if start not in G: + raise nx.NetworkXError("start is not in G") + + idom = {start: start} + + order = list(nx.dfs_postorder_nodes(G, start)) + dfn = {u: i for i, u in enumerate(order)} + order.pop() + order.reverse() + + def intersect(u, v): + while u != v: + while dfn[u] < dfn[v]: + u = idom[u] + while dfn[u] > dfn[v]: + v = idom[v] + return u + + changed = True + while changed: + changed = False + for u in order: + new_idom = reduce(intersect, (v for v in G.pred[u] if v in idom)) + if u not in idom or idom[u] != new_idom: + idom[u] = new_idom + changed = True + + return idom + + +@nx._dispatch +def dominance_frontiers(G, start): + """Returns the dominance frontiers of all nodes of a directed graph. + + Parameters + ---------- + G : a DiGraph or MultiDiGraph + The graph where dominance is to be computed. + + start : node + The start node of dominance computation. + + Returns + ------- + df : dict keyed by nodes + A dict containing the dominance frontiers of each node reachable from + `start` as lists. + + Raises + ------ + NetworkXNotImplemented + If `G` is undirected. + + NetworkXError + If `start` is not in `G`. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 5), (3, 4), (4, 5)]) + >>> sorted((u, sorted(df)) for u, df in nx.dominance_frontiers(G, 1).items()) + [(1, []), (2, [5]), (3, [5]), (4, [5]), (5, [])] + + References + ---------- + .. [1] K. D. Cooper, T. J. Harvey, and K. Kennedy. + A simple, fast dominance algorithm. + Software Practice & Experience, 4:110, 2001. + """ + idom = nx.immediate_dominators(G, start) + + df = {u: set() for u in idom} + for u in idom: + if len(G.pred[u]) >= 2: + for v in G.pred[u]: + if v in idom: + while v != idom[u]: + df[v].add(u) + v = idom[v] + return df diff --git a/MLPY/Lib/site-packages/networkx/algorithms/dominating.py b/MLPY/Lib/site-packages/networkx/algorithms/dominating.py new file mode 100644 index 0000000000000000000000000000000000000000..97408ab4380515244437742624571a852098a74e --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/dominating.py @@ -0,0 +1,94 @@ +"""Functions for computing dominating sets in a graph.""" +from itertools import chain + +import networkx as nx +from networkx.utils import arbitrary_element + +__all__ = ["dominating_set", "is_dominating_set"] + + +@nx._dispatch +def dominating_set(G, start_with=None): + r"""Finds a dominating set for the graph G. + + A *dominating set* for a graph with node set *V* is a subset *D* of + *V* such that every node not in *D* is adjacent to at least one + member of *D* [1]_. + + Parameters + ---------- + G : NetworkX graph + + start_with : node (default=None) + Node to use as a starting point for the algorithm. + + Returns + ------- + D : set + A dominating set for G. + + Notes + ----- + This function is an implementation of algorithm 7 in [2]_ which + finds some dominating set, not necessarily the smallest one. + + See also + -------- + is_dominating_set + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Dominating_set + + .. [2] Abdol-Hossein Esfahanian. Connectivity Algorithms. + http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf + + """ + all_nodes = set(G) + if start_with is None: + start_with = arbitrary_element(all_nodes) + if start_with not in G: + raise nx.NetworkXError(f"node {start_with} is not in G") + dominating_set = {start_with} + dominated_nodes = set(G[start_with]) + remaining_nodes = all_nodes - dominated_nodes - dominating_set + while remaining_nodes: + # Choose an arbitrary node and determine its undominated neighbors. + v = remaining_nodes.pop() + undominated_neighbors = set(G[v]) - dominating_set + # Add the node to the dominating set and the neighbors to the + # dominated set. Finally, remove all of those nodes from the set + # of remaining nodes. + dominating_set.add(v) + dominated_nodes |= undominated_neighbors + remaining_nodes -= undominated_neighbors + return dominating_set + + +@nx._dispatch +def is_dominating_set(G, nbunch): + """Checks if `nbunch` is a dominating set for `G`. + + A *dominating set* for a graph with node set *V* is a subset *D* of + *V* such that every node not in *D* is adjacent to at least one + member of *D* [1]_. + + Parameters + ---------- + G : NetworkX graph + + nbunch : iterable + An iterable of nodes in the graph `G`. + + See also + -------- + dominating_set + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Dominating_set + + """ + testset = {n for n in nbunch if n in G} + nbrs = set(chain.from_iterable(G[n] for n in testset)) + return len(set(G) - testset - nbrs) == 0 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/efficiency_measures.py b/MLPY/Lib/site-packages/networkx/algorithms/efficiency_measures.py new file mode 100644 index 0000000000000000000000000000000000000000..3beea38b013ac5f1bed237ae6a06d739be3c9d1e --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/efficiency_measures.py @@ -0,0 +1,168 @@ +"""Provides functions for computing the efficiency of nodes and graphs.""" + +import networkx as nx +from networkx.exception import NetworkXNoPath + +from ..utils import not_implemented_for + +__all__ = ["efficiency", "local_efficiency", "global_efficiency"] + + +@not_implemented_for("directed") +@nx._dispatch +def efficiency(G, u, v): + """Returns the efficiency of a pair of nodes in a graph. + + The *efficiency* of a pair of nodes is the multiplicative inverse of the + shortest path distance between the nodes [1]_. Returns 0 if no path + between nodes. + + Parameters + ---------- + G : :class:`networkx.Graph` + An undirected graph for which to compute the average local efficiency. + u, v : node + Nodes in the graph ``G``. + + Returns + ------- + float + Multiplicative inverse of the shortest path distance between the nodes. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.efficiency(G, 2, 3) # this gives efficiency for node 2 and 3 + 0.5 + + Notes + ----- + Edge weights are ignored when computing the shortest path distances. + + See also + -------- + local_efficiency + global_efficiency + + References + ---------- + .. [1] Latora, Vito, and Massimo Marchiori. + "Efficient behavior of small-world networks." + *Physical Review Letters* 87.19 (2001): 198701. + + + """ + try: + eff = 1 / nx.shortest_path_length(G, u, v) + except NetworkXNoPath: + eff = 0 + return eff + + +@not_implemented_for("directed") +@nx._dispatch +def global_efficiency(G): + """Returns the average global efficiency of the graph. + + The *efficiency* of a pair of nodes in a graph is the multiplicative + inverse of the shortest path distance between the nodes. The *average + global efficiency* of a graph is the average efficiency of all pairs of + nodes [1]_. + + Parameters + ---------- + G : :class:`networkx.Graph` + An undirected graph for which to compute the average global efficiency. + + Returns + ------- + float + The average global efficiency of the graph. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> round(nx.global_efficiency(G), 12) + 0.916666666667 + + Notes + ----- + Edge weights are ignored when computing the shortest path distances. + + See also + -------- + local_efficiency + + References + ---------- + .. [1] Latora, Vito, and Massimo Marchiori. + "Efficient behavior of small-world networks." + *Physical Review Letters* 87.19 (2001): 198701. + + + """ + n = len(G) + denom = n * (n - 1) + if denom != 0: + lengths = nx.all_pairs_shortest_path_length(G) + g_eff = 0 + for source, targets in lengths: + for target, distance in targets.items(): + if distance > 0: + g_eff += 1 / distance + g_eff /= denom + # g_eff = sum(1 / d for s, tgts in lengths + # for t, d in tgts.items() if d > 0) / denom + else: + g_eff = 0 + # TODO This can be made more efficient by computing all pairs shortest + # path lengths in parallel. + return g_eff + + +@not_implemented_for("directed") +@nx._dispatch +def local_efficiency(G): + """Returns the average local efficiency of the graph. + + The *efficiency* of a pair of nodes in a graph is the multiplicative + inverse of the shortest path distance between the nodes. The *local + efficiency* of a node in the graph is the average global efficiency of the + subgraph induced by the neighbors of the node. The *average local + efficiency* is the average of the local efficiencies of each node [1]_. + + Parameters + ---------- + G : :class:`networkx.Graph` + An undirected graph for which to compute the average local efficiency. + + Returns + ------- + float + The average local efficiency of the graph. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)]) + >>> nx.local_efficiency(G) + 0.9166666666666667 + + Notes + ----- + Edge weights are ignored when computing the shortest path distances. + + See also + -------- + global_efficiency + + References + ---------- + .. [1] Latora, Vito, and Massimo Marchiori. + "Efficient behavior of small-world networks." + *Physical Review Letters* 87.19 (2001): 198701. + + + """ + # TODO This summation can be trivially parallelized. + efficiency_list = (global_efficiency(G.subgraph(G[v])) for v in G) + return sum(efficiency_list) / len(G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/euler.py b/MLPY/Lib/site-packages/networkx/algorithms/euler.py new file mode 100644 index 0000000000000000000000000000000000000000..9d61b5e4130b819588fb9b55d626eeb5e225523d --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/euler.py @@ -0,0 +1,469 @@ +""" +Eulerian circuits and graphs. +""" +from itertools import combinations + +import networkx as nx + +from ..utils import arbitrary_element, not_implemented_for + +__all__ = [ + "is_eulerian", + "eulerian_circuit", + "eulerize", + "is_semieulerian", + "has_eulerian_path", + "eulerian_path", +] + + +@nx._dispatch +def is_eulerian(G): + """Returns True if and only if `G` is Eulerian. + + A graph is *Eulerian* if it has an Eulerian circuit. An *Eulerian + circuit* is a closed walk that includes each edge of a graph exactly + once. + + Graphs with isolated vertices (i.e. vertices with zero degree) are not + considered to have Eulerian circuits. Therefore, if the graph is not + connected (or not strongly connected, for directed graphs), this function + returns False. + + Parameters + ---------- + G : NetworkX graph + A graph, either directed or undirected. + + Examples + -------- + >>> nx.is_eulerian(nx.DiGraph({0: [3], 1: [2], 2: [3], 3: [0, 1]})) + True + >>> nx.is_eulerian(nx.complete_graph(5)) + True + >>> nx.is_eulerian(nx.petersen_graph()) + False + + If you prefer to allow graphs with isolated vertices to have Eulerian circuits, + you can first remove such vertices and then call `is_eulerian` as below example shows. + + >>> G = nx.Graph([(0, 1), (1, 2), (0, 2)]) + >>> G.add_node(3) + >>> nx.is_eulerian(G) + False + + >>> G.remove_nodes_from(list(nx.isolates(G))) + >>> nx.is_eulerian(G) + True + + + """ + if G.is_directed(): + # Every node must have equal in degree and out degree and the + # graph must be strongly connected + return all( + G.in_degree(n) == G.out_degree(n) for n in G + ) and nx.is_strongly_connected(G) + # An undirected Eulerian graph has no vertices of odd degree and + # must be connected. + return all(d % 2 == 0 for v, d in G.degree()) and nx.is_connected(G) + + +@nx._dispatch +def is_semieulerian(G): + """Return True iff `G` is semi-Eulerian. + + G is semi-Eulerian if it has an Eulerian path but no Eulerian circuit. + + See Also + -------- + has_eulerian_path + is_eulerian + """ + return has_eulerian_path(G) and not is_eulerian(G) + + +def _find_path_start(G): + """Return a suitable starting vertex for an Eulerian path. + + If no path exists, return None. + """ + if not has_eulerian_path(G): + return None + + if is_eulerian(G): + return arbitrary_element(G) + + if G.is_directed(): + v1, v2 = (v for v in G if G.in_degree(v) != G.out_degree(v)) + # Determines which is the 'start' node (as opposed to the 'end') + if G.out_degree(v1) > G.in_degree(v1): + return v1 + else: + return v2 + + else: + # In an undirected graph randomly choose one of the possibilities + start = [v for v in G if G.degree(v) % 2 != 0][0] + return start + + +def _simplegraph_eulerian_circuit(G, source): + if G.is_directed(): + degree = G.out_degree + edges = G.out_edges + else: + degree = G.degree + edges = G.edges + vertex_stack = [source] + last_vertex = None + while vertex_stack: + current_vertex = vertex_stack[-1] + if degree(current_vertex) == 0: + if last_vertex is not None: + yield (last_vertex, current_vertex) + last_vertex = current_vertex + vertex_stack.pop() + else: + _, next_vertex = arbitrary_element(edges(current_vertex)) + vertex_stack.append(next_vertex) + G.remove_edge(current_vertex, next_vertex) + + +def _multigraph_eulerian_circuit(G, source): + if G.is_directed(): + degree = G.out_degree + edges = G.out_edges + else: + degree = G.degree + edges = G.edges + vertex_stack = [(source, None)] + last_vertex = None + last_key = None + while vertex_stack: + current_vertex, current_key = vertex_stack[-1] + if degree(current_vertex) == 0: + if last_vertex is not None: + yield (last_vertex, current_vertex, last_key) + last_vertex, last_key = current_vertex, current_key + vertex_stack.pop() + else: + triple = arbitrary_element(edges(current_vertex, keys=True)) + _, next_vertex, next_key = triple + vertex_stack.append((next_vertex, next_key)) + G.remove_edge(current_vertex, next_vertex, next_key) + + +@nx._dispatch +def eulerian_circuit(G, source=None, keys=False): + """Returns an iterator over the edges of an Eulerian circuit in `G`. + + An *Eulerian circuit* is a closed walk that includes each edge of a + graph exactly once. + + Parameters + ---------- + G : NetworkX graph + A graph, either directed or undirected. + + source : node, optional + Starting node for circuit. + + keys : bool + If False, edges generated by this function will be of the form + ``(u, v)``. Otherwise, edges will be of the form ``(u, v, k)``. + This option is ignored unless `G` is a multigraph. + + Returns + ------- + edges : iterator + An iterator over edges in the Eulerian circuit. + + Raises + ------ + NetworkXError + If the graph is not Eulerian. + + See Also + -------- + is_eulerian + + Notes + ----- + This is a linear time implementation of an algorithm adapted from [1]_. + + For general information about Euler tours, see [2]_. + + References + ---------- + .. [1] J. Edmonds, E. L. Johnson. + Matching, Euler tours and the Chinese postman. + Mathematical programming, Volume 5, Issue 1 (1973), 111-114. + .. [2] https://en.wikipedia.org/wiki/Eulerian_path + + Examples + -------- + To get an Eulerian circuit in an undirected graph:: + + >>> G = nx.complete_graph(3) + >>> list(nx.eulerian_circuit(G)) + [(0, 2), (2, 1), (1, 0)] + >>> list(nx.eulerian_circuit(G, source=1)) + [(1, 2), (2, 0), (0, 1)] + + To get the sequence of vertices in an Eulerian circuit:: + + >>> [u for u, v in nx.eulerian_circuit(G)] + [0, 2, 1] + + """ + if not is_eulerian(G): + raise nx.NetworkXError("G is not Eulerian.") + if G.is_directed(): + G = G.reverse() + else: + G = G.copy() + if source is None: + source = arbitrary_element(G) + if G.is_multigraph(): + for u, v, k in _multigraph_eulerian_circuit(G, source): + if keys: + yield u, v, k + else: + yield u, v + else: + yield from _simplegraph_eulerian_circuit(G, source) + + +@nx._dispatch +def has_eulerian_path(G, source=None): + """Return True iff `G` has an Eulerian path. + + An Eulerian path is a path in a graph which uses each edge of a graph + exactly once. If `source` is specified, then this function checks + whether an Eulerian path that starts at node `source` exists. + + A directed graph has an Eulerian path iff: + - at most one vertex has out_degree - in_degree = 1, + - at most one vertex has in_degree - out_degree = 1, + - every other vertex has equal in_degree and out_degree, + - and all of its vertices belong to a single connected + component of the underlying undirected graph. + + If `source` is not None, an Eulerian path starting at `source` exists if no + other node has out_degree - in_degree = 1. This is equivalent to either + there exists an Eulerian circuit or `source` has out_degree - in_degree = 1 + and the conditions above hold. + + An undirected graph has an Eulerian path iff: + - exactly zero or two vertices have odd degree, + - and all of its vertices belong to a single connected component. + + If `source` is not None, an Eulerian path starting at `source` exists if + either there exists an Eulerian circuit or `source` has an odd degree and the + conditions above hold. + + Graphs with isolated vertices (i.e. vertices with zero degree) are not considered + to have an Eulerian path. Therefore, if the graph is not connected (or not strongly + connected, for directed graphs), this function returns False. + + Parameters + ---------- + G : NetworkX Graph + The graph to find an euler path in. + + source : node, optional + Starting node for path. + + Returns + ------- + Bool : True if G has an Eulerian path. + + Examples + -------- + If you prefer to allow graphs with isolated vertices to have Eulerian path, + you can first remove such vertices and then call `has_eulerian_path` as below example shows. + + >>> G = nx.Graph([(0, 1), (1, 2), (0, 2)]) + >>> G.add_node(3) + >>> nx.has_eulerian_path(G) + False + + >>> G.remove_nodes_from(list(nx.isolates(G))) + >>> nx.has_eulerian_path(G) + True + + See Also + -------- + is_eulerian + eulerian_path + """ + if nx.is_eulerian(G): + return True + + if G.is_directed(): + ins = G.in_degree + outs = G.out_degree + # Since we know it is not eulerian, outs - ins must be 1 for source + if source is not None and outs[source] - ins[source] != 1: + return False + + unbalanced_ins = 0 + unbalanced_outs = 0 + for v in G: + if ins[v] - outs[v] == 1: + unbalanced_ins += 1 + elif outs[v] - ins[v] == 1: + unbalanced_outs += 1 + elif ins[v] != outs[v]: + return False + + return ( + unbalanced_ins <= 1 and unbalanced_outs <= 1 and nx.is_weakly_connected(G) + ) + else: + # We know it is not eulerian, so degree of source must be odd. + if source is not None and G.degree[source] % 2 != 1: + return False + + # Sum is 2 since we know it is not eulerian (which implies sum is 0) + return sum(d % 2 == 1 for v, d in G.degree()) == 2 and nx.is_connected(G) + + +@nx._dispatch +def eulerian_path(G, source=None, keys=False): + """Return an iterator over the edges of an Eulerian path in `G`. + + Parameters + ---------- + G : NetworkX Graph + The graph in which to look for an eulerian path. + source : node or None (default: None) + The node at which to start the search. None means search over all + starting nodes. + keys : Bool (default: False) + Indicates whether to yield edge 3-tuples (u, v, edge_key). + The default yields edge 2-tuples + + Yields + ------ + Edge tuples along the eulerian path. + + Warning: If `source` provided is not the start node of an Euler path + will raise error even if an Euler Path exists. + """ + if not has_eulerian_path(G, source): + raise nx.NetworkXError("Graph has no Eulerian paths.") + if G.is_directed(): + G = G.reverse() + if source is None or nx.is_eulerian(G) is False: + source = _find_path_start(G) + if G.is_multigraph(): + for u, v, k in _multigraph_eulerian_circuit(G, source): + if keys: + yield u, v, k + else: + yield u, v + else: + yield from _simplegraph_eulerian_circuit(G, source) + else: + G = G.copy() + if source is None: + source = _find_path_start(G) + if G.is_multigraph(): + if keys: + yield from reversed( + [(v, u, k) for u, v, k in _multigraph_eulerian_circuit(G, source)] + ) + else: + yield from reversed( + [(v, u) for u, v, k in _multigraph_eulerian_circuit(G, source)] + ) + else: + yield from reversed( + [(v, u) for u, v in _simplegraph_eulerian_circuit(G, source)] + ) + + +@not_implemented_for("directed") +@nx._dispatch +def eulerize(G): + """Transforms a graph into an Eulerian graph. + + If `G` is Eulerian the result is `G` as a MultiGraph, otherwise the result is a smallest + (in terms of the number of edges) multigraph whose underlying simple graph is `G`. + + Parameters + ---------- + G : NetworkX graph + An undirected graph + + Returns + ------- + G : NetworkX multigraph + + Raises + ------ + NetworkXError + If the graph is not connected. + + See Also + -------- + is_eulerian + eulerian_circuit + + References + ---------- + .. [1] J. Edmonds, E. L. Johnson. + Matching, Euler tours and the Chinese postman. + Mathematical programming, Volume 5, Issue 1 (1973), 111-114. + .. [2] https://en.wikipedia.org/wiki/Eulerian_path + .. [3] http://web.math.princeton.edu/math_alive/5/Notes1.pdf + + Examples + -------- + >>> G = nx.complete_graph(10) + >>> H = nx.eulerize(G) + >>> nx.is_eulerian(H) + True + + """ + if G.order() == 0: + raise nx.NetworkXPointlessConcept("Cannot Eulerize null graph") + if not nx.is_connected(G): + raise nx.NetworkXError("G is not connected") + odd_degree_nodes = [n for n, d in G.degree() if d % 2 == 1] + G = nx.MultiGraph(G) + if len(odd_degree_nodes) == 0: + return G + + # get all shortest paths between vertices of odd degree + odd_deg_pairs_paths = [ + (m, {n: nx.shortest_path(G, source=m, target=n)}) + for m, n in combinations(odd_degree_nodes, 2) + ] + + # use the number of vertices in a graph + 1 as an upper bound on + # the maximum length of a path in G + upper_bound_on_max_path_length = len(G) + 1 + + # use "len(G) + 1 - len(P)", + # where P is a shortest path between vertices n and m, + # as edge-weights in a new graph + # store the paths in the graph for easy indexing later + Gp = nx.Graph() + for n, Ps in odd_deg_pairs_paths: + for m, P in Ps.items(): + if n != m: + Gp.add_edge( + m, n, weight=upper_bound_on_max_path_length - len(P), path=P + ) + + # find the minimum weight matching of edges in the weighted graph + best_matching = nx.Graph(list(nx.max_weight_matching(Gp))) + + # duplicate each edge along each path in the set of paths in Gp + for m, n in best_matching.edges(): + path = Gp[m][n]["path"] + G.add_edges_from(nx.utils.pairwise(path)) + return G diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/flow/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c5d19abed99501086359c87670edc31a680fe36c --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/flow/__init__.py @@ -0,0 +1,11 @@ +from .maxflow import * +from .mincost import * +from .boykovkolmogorov import * +from .dinitz_alg import * +from .edmondskarp import * +from .gomory_hu import * +from .preflowpush import * +from .shortestaugmentingpath import * +from .capacityscaling import * +from .networksimplex import * +from .utils import build_flow_dict, build_residual_network diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e8a7ff07d2b120652f04a51200536ed2479b2cd Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/boykovkolmogorov.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/boykovkolmogorov.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e620d25bb7f6dc5d7d15a98ca536f25db9cb3cad Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/boykovkolmogorov.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/capacityscaling.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/capacityscaling.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e70a93b83b5e5eb052ac92e74aaad0f57523dda Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/capacityscaling.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/dinitz_alg.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/dinitz_alg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfce9ba1496a16295ca45935564451fc65bd2646 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/dinitz_alg.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/edmondskarp.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/edmondskarp.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3c460cafd48e6e17de01aad25f70d9b52dfeef5 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/edmondskarp.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/gomory_hu.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/gomory_hu.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d727c9220208a33960661f8c18bdc207c0027fc6 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/gomory_hu.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/maxflow.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/maxflow.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf2dd5b0879d931fe2c060a20ddb96e3a3202a6a Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/maxflow.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/mincost.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/mincost.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1f3c1011d193ec1e5c1004711ae09649d224be4 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/mincost.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/networksimplex.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/networksimplex.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b313911c2a67e6103a3d37985b13231b3453ce1 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/networksimplex.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/preflowpush.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/preflowpush.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6d08f2eebdd094fadb24e046b25720fa246e95e Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/preflowpush.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/shortestaugmentingpath.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/shortestaugmentingpath.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80b2ba776692d420ced6acf11e4014ec555b3e2f Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/shortestaugmentingpath.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/utils.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f35d3abdac8b358998b442d1baf10044671982a7 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/flow/__pycache__/utils.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/boykovkolmogorov.py b/MLPY/Lib/site-packages/networkx/algorithms/flow/boykovkolmogorov.py new file mode 100644 index 0000000000000000000000000000000000000000..f571c98aebcaf2dd4cb882fd4d466fd2551d59f5 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/flow/boykovkolmogorov.py @@ -0,0 +1,373 @@ +""" +Boykov-Kolmogorov algorithm for maximum flow problems. +""" +from collections import deque +from operator import itemgetter + +import networkx as nx +from networkx.algorithms.flow.utils import build_residual_network + +__all__ = ["boykov_kolmogorov"] + + +@nx._dispatch( + graphs={"G": 0, "residual?": 4}, + edge_attrs={"capacity": float("inf")}, + preserve_edge_attrs={"residual": {"capacity": float("inf")}}, + preserve_graph_attrs={"residual"}, +) +def boykov_kolmogorov( + G, s, t, capacity="capacity", residual=None, value_only=False, cutoff=None +): + r"""Find a maximum single-commodity flow using Boykov-Kolmogorov algorithm. + + This function returns the residual network resulting after computing + the maximum flow. See below for details about the conventions + NetworkX uses for defining residual networks. + + This algorithm has worse case complexity $O(n^2 m |C|)$ for $n$ nodes, $m$ + edges, and $|C|$ the cost of the minimum cut [1]_. This implementation + uses the marking heuristic defined in [2]_ which improves its running + time in many practical problems. + + Parameters + ---------- + G : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + residual : NetworkX graph + Residual network on which the algorithm is to be executed. If None, a + new residual network is created. Default value: None. + + value_only : bool + If True compute only the value of the maximum flow. This parameter + will be ignored by this algorithm because it is not applicable. + + cutoff : integer, float + If specified, the algorithm will terminate when the flow value reaches + or exceeds the cutoff. In this case, it may be unable to immediately + determine a minimum cut. Default value: None. + + Returns + ------- + R : NetworkX DiGraph + Residual network after computing the maximum flow. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow` + :meth:`minimum_cut` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not + specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such + that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Examples + -------- + >>> from networkx.algorithms.flow import boykov_kolmogorov + + The functions that implement flow algorithms and output a residual + network, such as this one, are not imported to the base NetworkX + namespace, so you have to explicitly import them from the flow package. + + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + >>> R = boykov_kolmogorov(G, "x", "y") + >>> flow_value = nx.maximum_flow_value(G, "x", "y") + >>> flow_value + 3.0 + >>> flow_value == R.graph["flow_value"] + True + + A nice feature of the Boykov-Kolmogorov algorithm is that a partition + of the nodes that defines a minimum cut can be easily computed based + on the search trees used during the algorithm. These trees are stored + in the graph attribute `trees` of the residual network. + + >>> source_tree, target_tree = R.graph["trees"] + >>> partition = (set(source_tree), set(G) - set(source_tree)) + + Or equivalently: + + >>> partition = (set(G) - set(target_tree), set(target_tree)) + + References + ---------- + .. [1] Boykov, Y., & Kolmogorov, V. (2004). An experimental comparison + of min-cut/max-flow algorithms for energy minimization in vision. + Pattern Analysis and Machine Intelligence, IEEE Transactions on, + 26(9), 1124-1137. + https://doi.org/10.1109/TPAMI.2004.60 + + .. [2] Vladimir Kolmogorov. Graph-based Algorithms for Multi-camera + Reconstruction Problem. PhD thesis, Cornell University, CS Department, + 2003. pp. 109-114. + https://web.archive.org/web/20170809091249/https://pub.ist.ac.at/~vnk/papers/thesis.pdf + + """ + R = boykov_kolmogorov_impl(G, s, t, capacity, residual, cutoff) + R.graph["algorithm"] = "boykov_kolmogorov" + return R + + +def boykov_kolmogorov_impl(G, s, t, capacity, residual, cutoff): + if s not in G: + raise nx.NetworkXError(f"node {str(s)} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {str(t)} not in graph") + if s == t: + raise nx.NetworkXError("source and sink are the same node") + + if residual is None: + R = build_residual_network(G, capacity) + else: + R = residual + + # Initialize/reset the residual network. + # This is way too slow + # nx.set_edge_attributes(R, 0, 'flow') + for u in R: + for e in R[u].values(): + e["flow"] = 0 + + # Use an arbitrary high value as infinite. It is computed + # when building the residual network. + INF = R.graph["inf"] + + if cutoff is None: + cutoff = INF + + R_succ = R.succ + R_pred = R.pred + + def grow(): + """Bidirectional breadth-first search for the growth stage. + + Returns a connecting edge, that is and edge that connects + a node from the source search tree with a node from the + target search tree. + The first node in the connecting edge is always from the + source tree and the last node from the target tree. + """ + while active: + u = active[0] + if u in source_tree: + this_tree = source_tree + other_tree = target_tree + neighbors = R_succ + else: + this_tree = target_tree + other_tree = source_tree + neighbors = R_pred + for v, attr in neighbors[u].items(): + if attr["capacity"] - attr["flow"] > 0: + if v not in this_tree: + if v in other_tree: + return (u, v) if this_tree is source_tree else (v, u) + this_tree[v] = u + dist[v] = dist[u] + 1 + timestamp[v] = timestamp[u] + active.append(v) + elif v in this_tree and _is_closer(u, v): + this_tree[v] = u + dist[v] = dist[u] + 1 + timestamp[v] = timestamp[u] + _ = active.popleft() + return None, None + + def augment(u, v): + """Augmentation stage. + + Reconstruct path and determine its residual capacity. + We start from a connecting edge, which links a node + from the source tree to a node from the target tree. + The connecting edge is the output of the grow function + and the input of this function. + """ + attr = R_succ[u][v] + flow = min(INF, attr["capacity"] - attr["flow"]) + path = [u] + # Trace a path from u to s in source_tree. + w = u + while w != s: + n = w + w = source_tree[n] + attr = R_pred[n][w] + flow = min(flow, attr["capacity"] - attr["flow"]) + path.append(w) + path.reverse() + # Trace a path from v to t in target_tree. + path.append(v) + w = v + while w != t: + n = w + w = target_tree[n] + attr = R_succ[n][w] + flow = min(flow, attr["capacity"] - attr["flow"]) + path.append(w) + # Augment flow along the path and check for saturated edges. + it = iter(path) + u = next(it) + these_orphans = [] + for v in it: + R_succ[u][v]["flow"] += flow + R_succ[v][u]["flow"] -= flow + if R_succ[u][v]["flow"] == R_succ[u][v]["capacity"]: + if v in source_tree: + source_tree[v] = None + these_orphans.append(v) + if u in target_tree: + target_tree[u] = None + these_orphans.append(u) + u = v + orphans.extend(sorted(these_orphans, key=dist.get)) + return flow + + def adopt(): + """Adoption stage. + + Reconstruct search trees by adopting or discarding orphans. + During augmentation stage some edges got saturated and thus + the source and target search trees broke down to forests, with + orphans as roots of some of its trees. We have to reconstruct + the search trees rooted to source and target before we can grow + them again. + """ + while orphans: + u = orphans.popleft() + if u in source_tree: + tree = source_tree + neighbors = R_pred + else: + tree = target_tree + neighbors = R_succ + nbrs = ((n, attr, dist[n]) for n, attr in neighbors[u].items() if n in tree) + for v, attr, d in sorted(nbrs, key=itemgetter(2)): + if attr["capacity"] - attr["flow"] > 0: + if _has_valid_root(v, tree): + tree[u] = v + dist[u] = dist[v] + 1 + timestamp[u] = time + break + else: + nbrs = ( + (n, attr, dist[n]) for n, attr in neighbors[u].items() if n in tree + ) + for v, attr, d in sorted(nbrs, key=itemgetter(2)): + if attr["capacity"] - attr["flow"] > 0: + if v not in active: + active.append(v) + if tree[v] == u: + tree[v] = None + orphans.appendleft(v) + if u in active: + active.remove(u) + del tree[u] + + def _has_valid_root(n, tree): + path = [] + v = n + while v is not None: + path.append(v) + if v in (s, t): + base_dist = 0 + break + elif timestamp[v] == time: + base_dist = dist[v] + break + v = tree[v] + else: + return False + length = len(path) + for i, u in enumerate(path, 1): + dist[u] = base_dist + length - i + timestamp[u] = time + return True + + def _is_closer(u, v): + return timestamp[v] <= timestamp[u] and dist[v] > dist[u] + 1 + + source_tree = {s: None} + target_tree = {t: None} + active = deque([s, t]) + orphans = deque() + flow_value = 0 + # data structures for the marking heuristic + time = 1 + timestamp = {s: time, t: time} + dist = {s: 0, t: 0} + while flow_value < cutoff: + # Growth stage + u, v = grow() + if u is None: + break + time += 1 + # Augmentation stage + flow_value += augment(u, v) + # Adoption stage + adopt() + + if flow_value * 2 > INF: + raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.") + + # Add source and target tree in a graph attribute. + # A partition that defines a minimum cut can be directly + # computed from the search trees as explained in the docstrings. + R.graph["trees"] = (source_tree, target_tree) + # Add the standard flow_value graph attribute. + R.graph["flow_value"] = flow_value + return R diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/capacityscaling.py b/MLPY/Lib/site-packages/networkx/algorithms/flow/capacityscaling.py new file mode 100644 index 0000000000000000000000000000000000000000..2c0002d86c11a83ca8d4b0209b817e7e9c1d4c88 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/flow/capacityscaling.py @@ -0,0 +1,405 @@ +""" +Capacity scaling minimum cost flow algorithm. +""" + +__all__ = ["capacity_scaling"] + +from itertools import chain +from math import log + +import networkx as nx + +from ...utils import BinaryHeap, arbitrary_element, not_implemented_for + + +def _detect_unboundedness(R): + """Detect infinite-capacity negative cycles.""" + G = nx.DiGraph() + G.add_nodes_from(R) + + # Value simulating infinity. + inf = R.graph["inf"] + # True infinity. + f_inf = float("inf") + for u in R: + for v, e in R[u].items(): + # Compute the minimum weight of infinite-capacity (u, v) edges. + w = f_inf + for k, e in e.items(): + if e["capacity"] == inf: + w = min(w, e["weight"]) + if w != f_inf: + G.add_edge(u, v, weight=w) + + if nx.negative_edge_cycle(G): + raise nx.NetworkXUnbounded( + "Negative cost cycle of infinite capacity found. " + "Min cost flow may be unbounded below." + ) + + +@not_implemented_for("undirected") +def _build_residual_network(G, demand, capacity, weight): + """Build a residual network and initialize a zero flow.""" + if sum(G.nodes[u].get(demand, 0) for u in G) != 0: + raise nx.NetworkXUnfeasible("Sum of the demands should be 0.") + + R = nx.MultiDiGraph() + R.add_nodes_from( + (u, {"excess": -G.nodes[u].get(demand, 0), "potential": 0}) for u in G + ) + + inf = float("inf") + # Detect selfloops with infinite capacities and negative weights. + for u, v, e in nx.selfloop_edges(G, data=True): + if e.get(weight, 0) < 0 and e.get(capacity, inf) == inf: + raise nx.NetworkXUnbounded( + "Negative cost cycle of infinite capacity found. " + "Min cost flow may be unbounded below." + ) + + # Extract edges with positive capacities. Self loops excluded. + if G.is_multigraph(): + edge_list = [ + (u, v, k, e) + for u, v, k, e in G.edges(data=True, keys=True) + if u != v and e.get(capacity, inf) > 0 + ] + else: + edge_list = [ + (u, v, 0, e) + for u, v, e in G.edges(data=True) + if u != v and e.get(capacity, inf) > 0 + ] + # Simulate infinity with the larger of the sum of absolute node imbalances + # the sum of finite edge capacities or any positive value if both sums are + # zero. This allows the infinite-capacity edges to be distinguished for + # unboundedness detection and directly participate in residual capacity + # calculation. + inf = ( + max( + sum(abs(R.nodes[u]["excess"]) for u in R), + 2 + * sum( + e[capacity] + for u, v, k, e in edge_list + if capacity in e and e[capacity] != inf + ), + ) + or 1 + ) + for u, v, k, e in edge_list: + r = min(e.get(capacity, inf), inf) + w = e.get(weight, 0) + # Add both (u, v) and (v, u) into the residual network marked with the + # original key. (key[1] == True) indicates the (u, v) is in the + # original network. + R.add_edge(u, v, key=(k, True), capacity=r, weight=w, flow=0) + R.add_edge(v, u, key=(k, False), capacity=0, weight=-w, flow=0) + + # Record the value simulating infinity. + R.graph["inf"] = inf + + _detect_unboundedness(R) + + return R + + +def _build_flow_dict(G, R, capacity, weight): + """Build a flow dictionary from a residual network.""" + inf = float("inf") + flow_dict = {} + if G.is_multigraph(): + for u in G: + flow_dict[u] = {} + for v, es in G[u].items(): + flow_dict[u][v] = { + # Always saturate negative selfloops. + k: ( + 0 + if ( + u != v or e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0 + ) + else e[capacity] + ) + for k, e in es.items() + } + for v, es in R[u].items(): + if v in flow_dict[u]: + flow_dict[u][v].update( + (k[0], e["flow"]) for k, e in es.items() if e["flow"] > 0 + ) + else: + for u in G: + flow_dict[u] = { + # Always saturate negative selfloops. + v: ( + 0 + if (u != v or e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0) + else e[capacity] + ) + for v, e in G[u].items() + } + flow_dict[u].update( + (v, e["flow"]) + for v, es in R[u].items() + for e in es.values() + if e["flow"] > 0 + ) + return flow_dict + + +@nx._dispatch(node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0}) +def capacity_scaling( + G, demand="demand", capacity="capacity", weight="weight", heap=BinaryHeap +): + r"""Find a minimum cost flow satisfying all demands in digraph G. + + This is a capacity scaling successive shortest augmenting path algorithm. + + G is a digraph with edge costs and capacities and in which nodes + have demand, i.e., they want to send or receive some amount of + flow. A negative demand means that the node wants to send flow, a + positive demand means that the node want to receive flow. A flow on + the digraph G satisfies all demand if the net flow into each node + is equal to the demand of that node. + + Parameters + ---------- + G : NetworkX graph + DiGraph or MultiDiGraph on which a minimum cost flow satisfying all + demands is to be found. + + demand : string + Nodes of the graph G are expected to have an attribute demand + that indicates how much flow a node wants to send (negative + demand) or receive (positive demand). Note that the sum of the + demands should be 0 otherwise the problem in not feasible. If + this attribute is not present, a node is considered to have 0 + demand. Default value: 'demand'. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + weight : string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + heap : class + Type of heap to be used in the algorithm. It should be a subclass of + :class:`MinHeap` or implement a compatible interface. + + If a stock heap implementation is to be used, :class:`BinaryHeap` is + recommended over :class:`PairingHeap` for Python implementations without + optimized attribute accesses (e.g., CPython) despite a slower + asymptotic running time. For Python implementations with optimized + attribute accesses (e.g., PyPy), :class:`PairingHeap` provides better + performance. Default value: :class:`BinaryHeap`. + + Returns + ------- + flowCost : integer + Cost of a minimum cost flow satisfying all demands. + + flowDict : dictionary + If G is a digraph, a dict-of-dicts keyed by nodes such that + flowDict[u][v] is the flow on edge (u, v). + If G is a MultiDiGraph, a dict-of-dicts-of-dicts keyed by nodes + so that flowDict[u][v][key] is the flow on edge (u, v, key). + + Raises + ------ + NetworkXError + This exception is raised if the input graph is not directed, + not connected. + + NetworkXUnfeasible + This exception is raised in the following situations: + + * The sum of the demands is not zero. Then, there is no + flow satisfying all demands. + * There is no flow satisfying all demand. + + NetworkXUnbounded + This exception is raised if the digraph G has a cycle of + negative cost and infinite capacity. Then, the cost of a flow + satisfying all demands is unbounded below. + + Notes + ----- + This algorithm does not work if edge weights are floating-point numbers. + + See also + -------- + :meth:`network_simplex` + + Examples + -------- + A simple example of a min cost flow problem. + + >>> G = nx.DiGraph() + >>> G.add_node("a", demand=-5) + >>> G.add_node("d", demand=5) + >>> G.add_edge("a", "b", weight=3, capacity=4) + >>> G.add_edge("a", "c", weight=6, capacity=10) + >>> G.add_edge("b", "d", weight=1, capacity=9) + >>> G.add_edge("c", "d", weight=2, capacity=5) + >>> flowCost, flowDict = nx.capacity_scaling(G) + >>> flowCost + 24 + >>> flowDict + {'a': {'b': 4, 'c': 1}, 'd': {}, 'b': {'d': 4}, 'c': {'d': 1}} + + It is possible to change the name of the attributes used for the + algorithm. + + >>> G = nx.DiGraph() + >>> G.add_node("p", spam=-4) + >>> G.add_node("q", spam=2) + >>> G.add_node("a", spam=-2) + >>> G.add_node("d", spam=-1) + >>> G.add_node("t", spam=2) + >>> G.add_node("w", spam=3) + >>> G.add_edge("p", "q", cost=7, vacancies=5) + >>> G.add_edge("p", "a", cost=1, vacancies=4) + >>> G.add_edge("q", "d", cost=2, vacancies=3) + >>> G.add_edge("t", "q", cost=1, vacancies=2) + >>> G.add_edge("a", "t", cost=2, vacancies=4) + >>> G.add_edge("d", "w", cost=3, vacancies=4) + >>> G.add_edge("t", "w", cost=4, vacancies=1) + >>> flowCost, flowDict = nx.capacity_scaling( + ... G, demand="spam", capacity="vacancies", weight="cost" + ... ) + >>> flowCost + 37 + >>> flowDict + {'p': {'q': 2, 'a': 2}, 'q': {'d': 1}, 'a': {'t': 4}, 'd': {'w': 2}, 't': {'q': 1, 'w': 1}, 'w': {}} + """ + R = _build_residual_network(G, demand, capacity, weight) + + inf = float("inf") + # Account cost of negative selfloops. + flow_cost = sum( + 0 + if e.get(capacity, inf) <= 0 or e.get(weight, 0) >= 0 + else e[capacity] * e[weight] + for u, v, e in nx.selfloop_edges(G, data=True) + ) + + # Determine the maximum edge capacity. + wmax = max(chain([-inf], (e["capacity"] for u, v, e in R.edges(data=True)))) + if wmax == -inf: + # Residual network has no edges. + return flow_cost, _build_flow_dict(G, R, capacity, weight) + + R_nodes = R.nodes + R_succ = R.succ + + delta = 2 ** int(log(wmax, 2)) + while delta >= 1: + # Saturate Δ-residual edges with negative reduced costs to achieve + # Δ-optimality. + for u in R: + p_u = R_nodes[u]["potential"] + for v, es in R_succ[u].items(): + for k, e in es.items(): + flow = e["capacity"] - e["flow"] + if e["weight"] - p_u + R_nodes[v]["potential"] < 0: + flow = e["capacity"] - e["flow"] + if flow >= delta: + e["flow"] += flow + R_succ[v][u][(k[0], not k[1])]["flow"] -= flow + R_nodes[u]["excess"] -= flow + R_nodes[v]["excess"] += flow + # Determine the Δ-active nodes. + S = set() + T = set() + S_add = S.add + S_remove = S.remove + T_add = T.add + T_remove = T.remove + for u in R: + excess = R_nodes[u]["excess"] + if excess >= delta: + S_add(u) + elif excess <= -delta: + T_add(u) + # Repeatedly augment flow from S to T along shortest paths until + # Δ-feasibility is achieved. + while S and T: + s = arbitrary_element(S) + t = None + # Search for a shortest path in terms of reduce costs from s to + # any t in T in the Δ-residual network. + d = {} + pred = {s: None} + h = heap() + h_insert = h.insert + h_get = h.get + h_insert(s, 0) + while h: + u, d_u = h.pop() + d[u] = d_u + if u in T: + # Path found. + t = u + break + p_u = R_nodes[u]["potential"] + for v, es in R_succ[u].items(): + if v in d: + continue + wmin = inf + # Find the minimum-weighted (u, v) Δ-residual edge. + for k, e in es.items(): + if e["capacity"] - e["flow"] >= delta: + w = e["weight"] + if w < wmin: + wmin = w + kmin = k + emin = e + if wmin == inf: + continue + # Update the distance label of v. + d_v = d_u + wmin - p_u + R_nodes[v]["potential"] + if h_insert(v, d_v): + pred[v] = (u, kmin, emin) + if t is not None: + # Augment Δ units of flow from s to t. + while u != s: + v = u + u, k, e = pred[v] + e["flow"] += delta + R_succ[v][u][(k[0], not k[1])]["flow"] -= delta + # Account node excess and deficit. + R_nodes[s]["excess"] -= delta + R_nodes[t]["excess"] += delta + if R_nodes[s]["excess"] < delta: + S_remove(s) + if R_nodes[t]["excess"] > -delta: + T_remove(t) + # Update node potentials. + d_t = d[t] + for u, d_u in d.items(): + R_nodes[u]["potential"] -= d_u - d_t + else: + # Path not found. + S_remove(s) + delta //= 2 + + if any(R.nodes[u]["excess"] != 0 for u in R): + raise nx.NetworkXUnfeasible("No flow satisfying all demands.") + + # Calculate the flow cost. + for u in R: + for v, es in R_succ[u].items(): + for e in es.values(): + flow = e["flow"] + if flow > 0: + flow_cost += flow * e["weight"] + + return flow_cost, _build_flow_dict(G, R, capacity, weight) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/dinitz_alg.py b/MLPY/Lib/site-packages/networkx/algorithms/flow/dinitz_alg.py new file mode 100644 index 0000000000000000000000000000000000000000..b33e0ea28234d609f540eeaf7ecad3224d85875f --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/flow/dinitz_alg.py @@ -0,0 +1,217 @@ +""" +Dinitz' algorithm for maximum flow problems. +""" +from collections import deque + +import networkx as nx +from networkx.algorithms.flow.utils import build_residual_network +from networkx.utils import pairwise + +__all__ = ["dinitz"] + + +@nx._dispatch( + graphs={"G": 0, "residual?": 4}, + edge_attrs={"capacity": float("inf")}, + preserve_edge_attrs={"residual": {"capacity": float("inf")}}, + preserve_graph_attrs={"residual"}, +) +def dinitz(G, s, t, capacity="capacity", residual=None, value_only=False, cutoff=None): + """Find a maximum single-commodity flow using Dinitz' algorithm. + + This function returns the residual network resulting after computing + the maximum flow. See below for details about the conventions + NetworkX uses for defining residual networks. + + This algorithm has a running time of $O(n^2 m)$ for $n$ nodes and $m$ + edges [1]_. + + + Parameters + ---------- + G : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + residual : NetworkX graph + Residual network on which the algorithm is to be executed. If None, a + new residual network is created. Default value: None. + + value_only : bool + If True compute only the value of the maximum flow. This parameter + will be ignored by this algorithm because it is not applicable. + + cutoff : integer, float + If specified, the algorithm will terminate when the flow value reaches + or exceeds the cutoff. In this case, it may be unable to immediately + determine a minimum cut. Default value: None. + + Returns + ------- + R : NetworkX DiGraph + Residual network after computing the maximum flow. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow` + :meth:`minimum_cut` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not + specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such + that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Examples + -------- + >>> from networkx.algorithms.flow import dinitz + + The functions that implement flow algorithms and output a residual + network, such as this one, are not imported to the base NetworkX + namespace, so you have to explicitly import them from the flow package. + + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + >>> R = dinitz(G, "x", "y") + >>> flow_value = nx.maximum_flow_value(G, "x", "y") + >>> flow_value + 3.0 + >>> flow_value == R.graph["flow_value"] + True + + References + ---------- + .. [1] Dinitz' Algorithm: The Original Version and Even's Version. + 2006. Yefim Dinitz. In Theoretical Computer Science. Lecture + Notes in Computer Science. Volume 3895. pp 218-240. + https://doi.org/10.1007/11685654_10 + + """ + R = dinitz_impl(G, s, t, capacity, residual, cutoff) + R.graph["algorithm"] = "dinitz" + return R + + +def dinitz_impl(G, s, t, capacity, residual, cutoff): + if s not in G: + raise nx.NetworkXError(f"node {str(s)} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {str(t)} not in graph") + if s == t: + raise nx.NetworkXError("source and sink are the same node") + + if residual is None: + R = build_residual_network(G, capacity) + else: + R = residual + + # Initialize/reset the residual network. + for u in R: + for e in R[u].values(): + e["flow"] = 0 + + # Use an arbitrary high value as infinite. It is computed + # when building the residual network. + INF = R.graph["inf"] + + if cutoff is None: + cutoff = INF + + R_succ = R.succ + R_pred = R.pred + + def breath_first_search(): + parents = {} + queue = deque([s]) + while queue: + if t in parents: + break + u = queue.popleft() + for v in R_succ[u]: + attr = R_succ[u][v] + if v not in parents and attr["capacity"] - attr["flow"] > 0: + parents[v] = u + queue.append(v) + return parents + + def depth_first_search(parents): + """Build a path using DFS starting from the sink""" + path = [] + u = t + flow = INF + while u != s: + path.append(u) + v = parents[u] + flow = min(flow, R_pred[u][v]["capacity"] - R_pred[u][v]["flow"]) + u = v + path.append(s) + # Augment the flow along the path found + if flow > 0: + for u, v in pairwise(path): + R_pred[u][v]["flow"] += flow + R_pred[v][u]["flow"] -= flow + return flow + + flow_value = 0 + while flow_value < cutoff: + parents = breath_first_search() + if t not in parents: + break + this_flow = depth_first_search(parents) + if this_flow * 2 > INF: + raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.") + flow_value += this_flow + + R.graph["flow_value"] = flow_value + return R diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/edmondskarp.py b/MLPY/Lib/site-packages/networkx/algorithms/flow/edmondskarp.py new file mode 100644 index 0000000000000000000000000000000000000000..7c8440a451965fd7432679c458a17447a18e39ab --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/flow/edmondskarp.py @@ -0,0 +1,250 @@ +""" +Edmonds-Karp algorithm for maximum flow problems. +""" + +import networkx as nx +from networkx.algorithms.flow.utils import build_residual_network + +__all__ = ["edmonds_karp"] + + +@nx._dispatch( + graphs="R", + preserve_edge_attrs={"R": {"capacity": float("inf"), "flow": 0}}, + preserve_graph_attrs=True, +) +def edmonds_karp_core(R, s, t, cutoff): + """Implementation of the Edmonds-Karp algorithm.""" + R_nodes = R.nodes + R_pred = R.pred + R_succ = R.succ + + inf = R.graph["inf"] + + def augment(path): + """Augment flow along a path from s to t.""" + # Determine the path residual capacity. + flow = inf + it = iter(path) + u = next(it) + for v in it: + attr = R_succ[u][v] + flow = min(flow, attr["capacity"] - attr["flow"]) + u = v + if flow * 2 > inf: + raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.") + # Augment flow along the path. + it = iter(path) + u = next(it) + for v in it: + R_succ[u][v]["flow"] += flow + R_succ[v][u]["flow"] -= flow + u = v + return flow + + def bidirectional_bfs(): + """Bidirectional breadth-first search for an augmenting path.""" + pred = {s: None} + q_s = [s] + succ = {t: None} + q_t = [t] + while True: + q = [] + if len(q_s) <= len(q_t): + for u in q_s: + for v, attr in R_succ[u].items(): + if v not in pred and attr["flow"] < attr["capacity"]: + pred[v] = u + if v in succ: + return v, pred, succ + q.append(v) + if not q: + return None, None, None + q_s = q + else: + for u in q_t: + for v, attr in R_pred[u].items(): + if v not in succ and attr["flow"] < attr["capacity"]: + succ[v] = u + if v in pred: + return v, pred, succ + q.append(v) + if not q: + return None, None, None + q_t = q + + # Look for shortest augmenting paths using breadth-first search. + flow_value = 0 + while flow_value < cutoff: + v, pred, succ = bidirectional_bfs() + if pred is None: + break + path = [v] + # Trace a path from s to v. + u = v + while u != s: + u = pred[u] + path.append(u) + path.reverse() + # Trace a path from v to t. + u = v + while u != t: + u = succ[u] + path.append(u) + flow_value += augment(path) + + return flow_value + + +def edmonds_karp_impl(G, s, t, capacity, residual, cutoff): + """Implementation of the Edmonds-Karp algorithm.""" + if s not in G: + raise nx.NetworkXError(f"node {str(s)} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {str(t)} not in graph") + if s == t: + raise nx.NetworkXError("source and sink are the same node") + + if residual is None: + R = build_residual_network(G, capacity) + else: + R = residual + + # Initialize/reset the residual network. + for u in R: + for e in R[u].values(): + e["flow"] = 0 + + if cutoff is None: + cutoff = float("inf") + R.graph["flow_value"] = edmonds_karp_core(R, s, t, cutoff) + + return R + + +@nx._dispatch( + graphs={"G": 0, "residual?": 4}, + edge_attrs={"capacity": float("inf")}, + preserve_edge_attrs={"residual": {"capacity": float("inf")}}, + preserve_graph_attrs={"residual"}, +) +def edmonds_karp( + G, s, t, capacity="capacity", residual=None, value_only=False, cutoff=None +): + """Find a maximum single-commodity flow using the Edmonds-Karp algorithm. + + This function returns the residual network resulting after computing + the maximum flow. See below for details about the conventions + NetworkX uses for defining residual networks. + + This algorithm has a running time of $O(n m^2)$ for $n$ nodes and $m$ + edges. + + + Parameters + ---------- + G : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + residual : NetworkX graph + Residual network on which the algorithm is to be executed. If None, a + new residual network is created. Default value: None. + + value_only : bool + If True compute only the value of the maximum flow. This parameter + will be ignored by this algorithm because it is not applicable. + + cutoff : integer, float + If specified, the algorithm will terminate when the flow value reaches + or exceeds the cutoff. In this case, it may be unable to immediately + determine a minimum cut. Default value: None. + + Returns + ------- + R : NetworkX DiGraph + Residual network after computing the maximum flow. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow` + :meth:`minimum_cut` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not + specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such + that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Examples + -------- + >>> from networkx.algorithms.flow import edmonds_karp + + The functions that implement flow algorithms and output a residual + network, such as this one, are not imported to the base NetworkX + namespace, so you have to explicitly import them from the flow package. + + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + >>> R = edmonds_karp(G, "x", "y") + >>> flow_value = nx.maximum_flow_value(G, "x", "y") + >>> flow_value + 3.0 + >>> flow_value == R.graph["flow_value"] + True + + """ + R = edmonds_karp_impl(G, s, t, capacity, residual, cutoff) + R.graph["algorithm"] = "edmonds_karp" + return R diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/gomory_hu.py b/MLPY/Lib/site-packages/networkx/algorithms/flow/gomory_hu.py new file mode 100644 index 0000000000000000000000000000000000000000..0be27d5649415b289e3ff4c5fa7f0cc012920bf4 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/flow/gomory_hu.py @@ -0,0 +1,177 @@ +""" +Gomory-Hu tree of undirected Graphs. +""" +import networkx as nx +from networkx.utils import not_implemented_for + +from .edmondskarp import edmonds_karp +from .utils import build_residual_network + +default_flow_func = edmonds_karp + +__all__ = ["gomory_hu_tree"] + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs={"capacity": float("inf")}) +def gomory_hu_tree(G, capacity="capacity", flow_func=None): + r"""Returns the Gomory-Hu tree of an undirected graph G. + + A Gomory-Hu tree of an undirected graph with capacities is a + weighted tree that represents the minimum s-t cuts for all s-t + pairs in the graph. + + It only requires `n-1` minimum cut computations instead of the + obvious `n(n-1)/2`. The tree represents all s-t cuts as the + minimum cut value among any pair of nodes is the minimum edge + weight in the shortest path between the two nodes in the + Gomory-Hu tree. + + The Gomory-Hu tree also has the property that removing the + edge with the minimum weight in the shortest path between + any two nodes leaves two connected components that form + a partition of the nodes in G that defines the minimum s-t + cut. + + See Examples section below for details. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + flow_func : function + Function to perform the underlying flow computations. Default value + :func:`edmonds_karp`. This function performs better in sparse graphs + with right tailed degree distributions. + :func:`shortest_augmenting_path` will perform better in denser + graphs. + + Returns + ------- + Tree : NetworkX graph + A NetworkX graph representing the Gomory-Hu tree of the input graph. + + Raises + ------ + NetworkXNotImplemented + Raised if the input graph is directed. + + NetworkXError + Raised if the input graph is an empty Graph. + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> nx.set_edge_attributes(G, 1, "capacity") + >>> T = nx.gomory_hu_tree(G) + >>> # The value of the minimum cut between any pair + ... # of nodes in G is the minimum edge weight in the + ... # shortest path between the two nodes in the + ... # Gomory-Hu tree. + ... def minimum_edge_weight_in_shortest_path(T, u, v): + ... path = nx.shortest_path(T, u, v, weight="weight") + ... return min((T[u][v]["weight"], (u, v)) for (u, v) in zip(path, path[1:])) + >>> u, v = 0, 33 + >>> cut_value, edge = minimum_edge_weight_in_shortest_path(T, u, v) + >>> cut_value + 10 + >>> nx.minimum_cut_value(G, u, v) + 10 + >>> # The Gomory-Hu tree also has the property that removing the + ... # edge with the minimum weight in the shortest path between + ... # any two nodes leaves two connected components that form + ... # a partition of the nodes in G that defines the minimum s-t + ... # cut. + ... cut_value, edge = minimum_edge_weight_in_shortest_path(T, u, v) + >>> T.remove_edge(*edge) + >>> U, V = list(nx.connected_components(T)) + >>> # Thus U and V form a partition that defines a minimum cut + ... # between u and v in G. You can compute the edge cut set, + ... # that is, the set of edges that if removed from G will + ... # disconnect u from v in G, with this information: + ... cutset = set() + >>> for x, nbrs in ((n, G[n]) for n in U): + ... cutset.update((x, y) for y in nbrs if y in V) + >>> # Because we have set the capacities of all edges to 1 + ... # the cutset contains ten edges + ... len(cutset) + 10 + >>> # You can use any maximum flow algorithm for the underlying + ... # flow computations using the argument flow_func + ... from networkx.algorithms import flow + >>> T = nx.gomory_hu_tree(G, flow_func=flow.boykov_kolmogorov) + >>> cut_value, edge = minimum_edge_weight_in_shortest_path(T, u, v) + >>> cut_value + 10 + >>> nx.minimum_cut_value(G, u, v, flow_func=flow.boykov_kolmogorov) + 10 + + Notes + ----- + This implementation is based on Gusfield approach [1]_ to compute + Gomory-Hu trees, which does not require node contractions and has + the same computational complexity than the original method. + + See also + -------- + :func:`minimum_cut` + :func:`maximum_flow` + + References + ---------- + .. [1] Gusfield D: Very simple methods for all pairs network flow analysis. + SIAM J Comput 19(1):143-155, 1990. + + """ + if flow_func is None: + flow_func = default_flow_func + + if len(G) == 0: # empty graph + msg = "Empty Graph does not have a Gomory-Hu tree representation" + raise nx.NetworkXError(msg) + + # Start the tree as a star graph with an arbitrary node at the center + tree = {} + labels = {} + iter_nodes = iter(G) + root = next(iter_nodes) + for n in iter_nodes: + tree[n] = root + + # Reuse residual network + R = build_residual_network(G, capacity) + + # For all the leaves in the star graph tree (that is n-1 nodes). + for source in tree: + # Find neighbor in the tree + target = tree[source] + # compute minimum cut + cut_value, partition = nx.minimum_cut( + G, source, target, capacity=capacity, flow_func=flow_func, residual=R + ) + labels[(source, target)] = cut_value + # Update the tree + # Source will always be in partition[0] and target in partition[1] + for node in partition[0]: + if node != source and node in tree and tree[node] == target: + tree[node] = source + labels[node, source] = labels.get((node, target), cut_value) + # + if target != root and tree[target] in partition[0]: + labels[source, tree[target]] = labels[target, tree[target]] + labels[target, source] = cut_value + tree[source] = tree[target] + tree[target] = source + + # Build the tree + T = nx.Graph() + T.add_nodes_from(G) + T.add_weighted_edges_from(((u, v, labels[u, v]) for u, v in tree.items())) + return T diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/maxflow.py b/MLPY/Lib/site-packages/networkx/algorithms/flow/maxflow.py new file mode 100644 index 0000000000000000000000000000000000000000..35f359ee3b04ca2d427de2c20666fe53d750f561 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/flow/maxflow.py @@ -0,0 +1,607 @@ +""" +Maximum flow (and minimum cut) algorithms on capacitated graphs. +""" +import networkx as nx + +from .boykovkolmogorov import boykov_kolmogorov +from .dinitz_alg import dinitz +from .edmondskarp import edmonds_karp +from .preflowpush import preflow_push +from .shortestaugmentingpath import shortest_augmenting_path +from .utils import build_flow_dict + +# Define the default flow function for computing maximum flow. +default_flow_func = preflow_push + +__all__ = ["maximum_flow", "maximum_flow_value", "minimum_cut", "minimum_cut_value"] + + +@nx._dispatch(graphs="flowG", edge_attrs={"capacity": float("inf")}) +def maximum_flow(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs): + """Find a maximum single-commodity flow. + + Parameters + ---------- + flowG : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + _s : node + Source node for the flow. + + _t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + flow_func : function + A function for computing the maximum flow among a pair of nodes + in a capacitated graph. The function has to accept at least three + parameters: a Graph or Digraph, a source node, and a target node. + And return a residual network that follows NetworkX conventions + (see Notes). If flow_func is None, the default maximum + flow function (:meth:`preflow_push`) is used. See below for + alternative algorithms. The choice of the default function may change + from version to version and should not be relied on. Default value: + None. + + kwargs : Any other keyword parameter is passed to the function that + computes the maximum flow. + + Returns + ------- + flow_value : integer, float + Value of the maximum flow, i.e., net outflow from the source. + + flow_dict : dict + A dictionary containing the value of the flow that went through + each edge. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow_value` + :meth:`minimum_cut` + :meth:`minimum_cut_value` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The function used in the flow_func parameter has to return a residual + network that follows NetworkX conventions: + + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using + only edges :samp:`(u, v)` such that + :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Specific algorithms may store extra data in :samp:`R`. + + The function should supports an optional boolean parameter value_only. When + True, it can optionally terminate the algorithm as soon as the maximum flow + value and the minimum cut can be determined. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + + maximum_flow returns both the value of the maximum flow and a + dictionary with all flows. + + >>> flow_value, flow_dict = nx.maximum_flow(G, "x", "y") + >>> flow_value + 3.0 + >>> print(flow_dict["x"]["b"]) + 1.0 + + You can also use alternative algorithms for computing the + maximum flow by using the flow_func parameter. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> flow_value == nx.maximum_flow(G, "x", "y", flow_func=shortest_augmenting_path)[ + ... 0 + ... ] + True + + """ + if flow_func is None: + if kwargs: + raise nx.NetworkXError( + "You have to explicitly set a flow_func if" + " you need to pass parameters via kwargs." + ) + flow_func = default_flow_func + + if not callable(flow_func): + raise nx.NetworkXError("flow_func has to be callable.") + + R = flow_func(flowG, _s, _t, capacity=capacity, value_only=False, **kwargs) + flow_dict = build_flow_dict(flowG, R) + + return (R.graph["flow_value"], flow_dict) + + +@nx._dispatch(graphs="flowG", edge_attrs={"capacity": float("inf")}) +def maximum_flow_value(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs): + """Find the value of maximum single-commodity flow. + + Parameters + ---------- + flowG : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + _s : node + Source node for the flow. + + _t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + flow_func : function + A function for computing the maximum flow among a pair of nodes + in a capacitated graph. The function has to accept at least three + parameters: a Graph or Digraph, a source node, and a target node. + And return a residual network that follows NetworkX conventions + (see Notes). If flow_func is None, the default maximum + flow function (:meth:`preflow_push`) is used. See below for + alternative algorithms. The choice of the default function may change + from version to version and should not be relied on. Default value: + None. + + kwargs : Any other keyword parameter is passed to the function that + computes the maximum flow. + + Returns + ------- + flow_value : integer, float + Value of the maximum flow, i.e., net outflow from the source. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow` + :meth:`minimum_cut` + :meth:`minimum_cut_value` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The function used in the flow_func parameter has to return a residual + network that follows NetworkX conventions: + + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using + only edges :samp:`(u, v)` such that + :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Specific algorithms may store extra data in :samp:`R`. + + The function should supports an optional boolean parameter value_only. When + True, it can optionally terminate the algorithm as soon as the maximum flow + value and the minimum cut can be determined. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + + maximum_flow_value computes only the value of the + maximum flow: + + >>> flow_value = nx.maximum_flow_value(G, "x", "y") + >>> flow_value + 3.0 + + You can also use alternative algorithms for computing the + maximum flow by using the flow_func parameter. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> flow_value == nx.maximum_flow_value( + ... G, "x", "y", flow_func=shortest_augmenting_path + ... ) + True + + """ + if flow_func is None: + if kwargs: + raise nx.NetworkXError( + "You have to explicitly set a flow_func if" + " you need to pass parameters via kwargs." + ) + flow_func = default_flow_func + + if not callable(flow_func): + raise nx.NetworkXError("flow_func has to be callable.") + + R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs) + + return R.graph["flow_value"] + + +@nx._dispatch(graphs="flowG", edge_attrs={"capacity": float("inf")}) +def minimum_cut(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs): + """Compute the value and the node partition of a minimum (s, t)-cut. + + Use the max-flow min-cut theorem, i.e., the capacity of a minimum + capacity cut is equal to the flow value of a maximum flow. + + Parameters + ---------- + flowG : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + _s : node + Source node for the flow. + + _t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + flow_func : function + A function for computing the maximum flow among a pair of nodes + in a capacitated graph. The function has to accept at least three + parameters: a Graph or Digraph, a source node, and a target node. + And return a residual network that follows NetworkX conventions + (see Notes). If flow_func is None, the default maximum + flow function (:meth:`preflow_push`) is used. See below for + alternative algorithms. The choice of the default function may change + from version to version and should not be relied on. Default value: + None. + + kwargs : Any other keyword parameter is passed to the function that + computes the maximum flow. + + Returns + ------- + cut_value : integer, float + Value of the minimum cut. + + partition : pair of node sets + A partitioning of the nodes that defines a minimum cut. + + Raises + ------ + NetworkXUnbounded + If the graph has a path of infinite capacity, all cuts have + infinite capacity and the function raises a NetworkXError. + + See also + -------- + :meth:`maximum_flow` + :meth:`maximum_flow_value` + :meth:`minimum_cut_value` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The function used in the flow_func parameter has to return a residual + network that follows NetworkX conventions: + + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using + only edges :samp:`(u, v)` such that + :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Specific algorithms may store extra data in :samp:`R`. + + The function should supports an optional boolean parameter value_only. When + True, it can optionally terminate the algorithm as soon as the maximum flow + value and the minimum cut can be determined. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + + minimum_cut computes both the value of the + minimum cut and the node partition: + + >>> cut_value, partition = nx.minimum_cut(G, "x", "y") + >>> reachable, non_reachable = partition + + 'partition' here is a tuple with the two sets of nodes that define + the minimum cut. You can compute the cut set of edges that induce + the minimum cut as follows: + + >>> cutset = set() + >>> for u, nbrs in ((n, G[n]) for n in reachable): + ... cutset.update((u, v) for v in nbrs if v in non_reachable) + >>> print(sorted(cutset)) + [('c', 'y'), ('x', 'b')] + >>> cut_value == sum(G.edges[u, v]["capacity"] for (u, v) in cutset) + True + + You can also use alternative algorithms for computing the + minimum cut by using the flow_func parameter. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> cut_value == nx.minimum_cut(G, "x", "y", flow_func=shortest_augmenting_path)[0] + True + + """ + if flow_func is None: + if kwargs: + raise nx.NetworkXError( + "You have to explicitly set a flow_func if" + " you need to pass parameters via kwargs." + ) + flow_func = default_flow_func + + if not callable(flow_func): + raise nx.NetworkXError("flow_func has to be callable.") + + if kwargs.get("cutoff") is not None and flow_func is preflow_push: + raise nx.NetworkXError("cutoff should not be specified.") + + R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs) + # Remove saturated edges from the residual network + cutset = [(u, v, d) for u, v, d in R.edges(data=True) if d["flow"] == d["capacity"]] + R.remove_edges_from(cutset) + + # Then, reachable and non reachable nodes from source in the + # residual network form the node partition that defines + # the minimum cut. + non_reachable = set(dict(nx.shortest_path_length(R, target=_t))) + partition = (set(flowG) - non_reachable, non_reachable) + # Finally add again cutset edges to the residual network to make + # sure that it is reusable. + if cutset is not None: + R.add_edges_from(cutset) + return (R.graph["flow_value"], partition) + + +@nx._dispatch(graphs="flowG", edge_attrs={"capacity": float("inf")}) +def minimum_cut_value(flowG, _s, _t, capacity="capacity", flow_func=None, **kwargs): + """Compute the value of a minimum (s, t)-cut. + + Use the max-flow min-cut theorem, i.e., the capacity of a minimum + capacity cut is equal to the flow value of a maximum flow. + + Parameters + ---------- + flowG : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + _s : node + Source node for the flow. + + _t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + flow_func : function + A function for computing the maximum flow among a pair of nodes + in a capacitated graph. The function has to accept at least three + parameters: a Graph or Digraph, a source node, and a target node. + And return a residual network that follows NetworkX conventions + (see Notes). If flow_func is None, the default maximum + flow function (:meth:`preflow_push`) is used. See below for + alternative algorithms. The choice of the default function may change + from version to version and should not be relied on. Default value: + None. + + kwargs : Any other keyword parameter is passed to the function that + computes the maximum flow. + + Returns + ------- + cut_value : integer, float + Value of the minimum cut. + + Raises + ------ + NetworkXUnbounded + If the graph has a path of infinite capacity, all cuts have + infinite capacity and the function raises a NetworkXError. + + See also + -------- + :meth:`maximum_flow` + :meth:`maximum_flow_value` + :meth:`minimum_cut` + :meth:`edmonds_karp` + :meth:`preflow_push` + :meth:`shortest_augmenting_path` + + Notes + ----- + The function used in the flow_func parameter has to return a residual + network that follows NetworkX conventions: + + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using + only edges :samp:`(u, v)` such that + :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Specific algorithms may store extra data in :samp:`R`. + + The function should supports an optional boolean parameter value_only. When + True, it can optionally terminate the algorithm as soon as the maximum flow + value and the minimum cut can be determined. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + + minimum_cut_value computes only the value of the + minimum cut: + + >>> cut_value = nx.minimum_cut_value(G, "x", "y") + >>> cut_value + 3.0 + + You can also use alternative algorithms for computing the + minimum cut by using the flow_func parameter. + + >>> from networkx.algorithms.flow import shortest_augmenting_path + >>> cut_value == nx.minimum_cut_value( + ... G, "x", "y", flow_func=shortest_augmenting_path + ... ) + True + + """ + if flow_func is None: + if kwargs: + raise nx.NetworkXError( + "You have to explicitly set a flow_func if" + " you need to pass parameters via kwargs." + ) + flow_func = default_flow_func + + if not callable(flow_func): + raise nx.NetworkXError("flow_func has to be callable.") + + if kwargs.get("cutoff") is not None and flow_func is preflow_push: + raise nx.NetworkXError("cutoff should not be specified.") + + R = flow_func(flowG, _s, _t, capacity=capacity, value_only=True, **kwargs) + + return R.graph["flow_value"] diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/mincost.py b/MLPY/Lib/site-packages/networkx/algorithms/flow/mincost.py new file mode 100644 index 0000000000000000000000000000000000000000..cc8626c7c3c3cdbc6e992c6acf3051594d325c6a --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/flow/mincost.py @@ -0,0 +1,335 @@ +""" +Minimum cost flow algorithms on directed connected graphs. +""" + +__all__ = ["min_cost_flow_cost", "min_cost_flow", "cost_of_flow", "max_flow_min_cost"] + +import networkx as nx + + +@nx._dispatch(node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0}) +def min_cost_flow_cost(G, demand="demand", capacity="capacity", weight="weight"): + r"""Find the cost of a minimum cost flow satisfying all demands in digraph G. + + G is a digraph with edge costs and capacities and in which nodes + have demand, i.e., they want to send or receive some amount of + flow. A negative demand means that the node wants to send flow, a + positive demand means that the node want to receive flow. A flow on + the digraph G satisfies all demand if the net flow into each node + is equal to the demand of that node. + + Parameters + ---------- + G : NetworkX graph + DiGraph on which a minimum cost flow satisfying all demands is + to be found. + + demand : string + Nodes of the graph G are expected to have an attribute demand + that indicates how much flow a node wants to send (negative + demand) or receive (positive demand). Note that the sum of the + demands should be 0 otherwise the problem in not feasible. If + this attribute is not present, a node is considered to have 0 + demand. Default value: 'demand'. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + weight : string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + Returns + ------- + flowCost : integer, float + Cost of a minimum cost flow satisfying all demands. + + Raises + ------ + NetworkXError + This exception is raised if the input graph is not directed or + not connected. + + NetworkXUnfeasible + This exception is raised in the following situations: + + * The sum of the demands is not zero. Then, there is no + flow satisfying all demands. + * There is no flow satisfying all demand. + + NetworkXUnbounded + This exception is raised if the digraph G has a cycle of + negative cost and infinite capacity. Then, the cost of a flow + satisfying all demands is unbounded below. + + See also + -------- + cost_of_flow, max_flow_min_cost, min_cost_flow, network_simplex + + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + + Examples + -------- + A simple example of a min cost flow problem. + + >>> G = nx.DiGraph() + >>> G.add_node("a", demand=-5) + >>> G.add_node("d", demand=5) + >>> G.add_edge("a", "b", weight=3, capacity=4) + >>> G.add_edge("a", "c", weight=6, capacity=10) + >>> G.add_edge("b", "d", weight=1, capacity=9) + >>> G.add_edge("c", "d", weight=2, capacity=5) + >>> flowCost = nx.min_cost_flow_cost(G) + >>> flowCost + 24 + """ + return nx.network_simplex(G, demand=demand, capacity=capacity, weight=weight)[0] + + +@nx._dispatch(node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0}) +def min_cost_flow(G, demand="demand", capacity="capacity", weight="weight"): + r"""Returns a minimum cost flow satisfying all demands in digraph G. + + G is a digraph with edge costs and capacities and in which nodes + have demand, i.e., they want to send or receive some amount of + flow. A negative demand means that the node wants to send flow, a + positive demand means that the node want to receive flow. A flow on + the digraph G satisfies all demand if the net flow into each node + is equal to the demand of that node. + + Parameters + ---------- + G : NetworkX graph + DiGraph on which a minimum cost flow satisfying all demands is + to be found. + + demand : string + Nodes of the graph G are expected to have an attribute demand + that indicates how much flow a node wants to send (negative + demand) or receive (positive demand). Note that the sum of the + demands should be 0 otherwise the problem in not feasible. If + this attribute is not present, a node is considered to have 0 + demand. Default value: 'demand'. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + weight : string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + Returns + ------- + flowDict : dictionary + Dictionary of dictionaries keyed by nodes such that + flowDict[u][v] is the flow edge (u, v). + + Raises + ------ + NetworkXError + This exception is raised if the input graph is not directed or + not connected. + + NetworkXUnfeasible + This exception is raised in the following situations: + + * The sum of the demands is not zero. Then, there is no + flow satisfying all demands. + * There is no flow satisfying all demand. + + NetworkXUnbounded + This exception is raised if the digraph G has a cycle of + negative cost and infinite capacity. Then, the cost of a flow + satisfying all demands is unbounded below. + + See also + -------- + cost_of_flow, max_flow_min_cost, min_cost_flow_cost, network_simplex + + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + + Examples + -------- + A simple example of a min cost flow problem. + + >>> G = nx.DiGraph() + >>> G.add_node("a", demand=-5) + >>> G.add_node("d", demand=5) + >>> G.add_edge("a", "b", weight=3, capacity=4) + >>> G.add_edge("a", "c", weight=6, capacity=10) + >>> G.add_edge("b", "d", weight=1, capacity=9) + >>> G.add_edge("c", "d", weight=2, capacity=5) + >>> flowDict = nx.min_cost_flow(G) + """ + return nx.network_simplex(G, demand=demand, capacity=capacity, weight=weight)[1] + + +@nx._dispatch(edge_attrs={"weight": 0}) +def cost_of_flow(G, flowDict, weight="weight"): + """Compute the cost of the flow given by flowDict on graph G. + + Note that this function does not check for the validity of the + flow flowDict. This function will fail if the graph G and the + flow don't have the same edge set. + + Parameters + ---------- + G : NetworkX graph + DiGraph on which a minimum cost flow satisfying all demands is + to be found. + + weight : string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + flowDict : dictionary + Dictionary of dictionaries keyed by nodes such that + flowDict[u][v] is the flow edge (u, v). + + Returns + ------- + cost : Integer, float + The total cost of the flow. This is given by the sum over all + edges of the product of the edge's flow and the edge's weight. + + See also + -------- + max_flow_min_cost, min_cost_flow, min_cost_flow_cost, network_simplex + + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + """ + return sum((flowDict[u][v] * d.get(weight, 0) for u, v, d in G.edges(data=True))) + + +@nx._dispatch(edge_attrs={"capacity": float("inf"), "weight": 0}) +def max_flow_min_cost(G, s, t, capacity="capacity", weight="weight"): + """Returns a maximum (s, t)-flow of minimum cost. + + G is a digraph with edge costs and capacities. There is a source + node s and a sink node t. This function finds a maximum flow from + s to t whose total cost is minimized. + + Parameters + ---------- + G : NetworkX graph + DiGraph on which a minimum cost flow satisfying all demands is + to be found. + + s: node label + Source of the flow. + + t: node label + Destination of the flow. + + capacity: string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + weight: string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + Returns + ------- + flowDict: dictionary + Dictionary of dictionaries keyed by nodes such that + flowDict[u][v] is the flow edge (u, v). + + Raises + ------ + NetworkXError + This exception is raised if the input graph is not directed or + not connected. + + NetworkXUnbounded + This exception is raised if there is an infinite capacity path + from s to t in G. In this case there is no maximum flow. This + exception is also raised if the digraph G has a cycle of + negative cost and infinite capacity. Then, the cost of a flow + is unbounded below. + + See also + -------- + cost_of_flow, min_cost_flow, min_cost_flow_cost, network_simplex + + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edges_from( + ... [ + ... (1, 2, {"capacity": 12, "weight": 4}), + ... (1, 3, {"capacity": 20, "weight": 6}), + ... (2, 3, {"capacity": 6, "weight": -3}), + ... (2, 6, {"capacity": 14, "weight": 1}), + ... (3, 4, {"weight": 9}), + ... (3, 5, {"capacity": 10, "weight": 5}), + ... (4, 2, {"capacity": 19, "weight": 13}), + ... (4, 5, {"capacity": 4, "weight": 0}), + ... (5, 7, {"capacity": 28, "weight": 2}), + ... (6, 5, {"capacity": 11, "weight": 1}), + ... (6, 7, {"weight": 8}), + ... (7, 4, {"capacity": 6, "weight": 6}), + ... ] + ... ) + >>> mincostFlow = nx.max_flow_min_cost(G, 1, 7) + >>> mincost = nx.cost_of_flow(G, mincostFlow) + >>> mincost + 373 + >>> from networkx.algorithms.flow import maximum_flow + >>> maxFlow = maximum_flow(G, 1, 7)[1] + >>> nx.cost_of_flow(G, maxFlow) >= mincost + True + >>> mincostFlowValue = sum((mincostFlow[u][7] for u in G.predecessors(7))) - sum( + ... (mincostFlow[7][v] for v in G.successors(7)) + ... ) + >>> mincostFlowValue == nx.maximum_flow_value(G, 1, 7) + True + + """ + maxFlow = nx.maximum_flow_value(G, s, t, capacity=capacity) + H = nx.DiGraph(G) + H.add_node(s, demand=-maxFlow) + H.add_node(t, demand=maxFlow) + return min_cost_flow(H, capacity=capacity, weight=weight) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/networksimplex.py b/MLPY/Lib/site-packages/networkx/algorithms/flow/networksimplex.py new file mode 100644 index 0000000000000000000000000000000000000000..9fa3589a0c90c65ae73681c9cf48713dde675ca1 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/flow/networksimplex.py @@ -0,0 +1,664 @@ +""" +Minimum cost flow algorithms on directed connected graphs. +""" + +__all__ = ["network_simplex"] + +from itertools import chain, islice, repeat +from math import ceil, sqrt + +import networkx as nx +from networkx.utils import not_implemented_for + + +class _DataEssentialsAndFunctions: + def __init__( + self, G, multigraph, demand="demand", capacity="capacity", weight="weight" + ): + # Number all nodes and edges and hereafter reference them using ONLY their numbers + self.node_list = list(G) # nodes + self.node_indices = {u: i for i, u in enumerate(self.node_list)} # node indices + self.node_demands = [ + G.nodes[u].get(demand, 0) for u in self.node_list + ] # node demands + + self.edge_sources = [] # edge sources + self.edge_targets = [] # edge targets + if multigraph: + self.edge_keys = [] # edge keys + self.edge_indices = {} # edge indices + self.edge_capacities = [] # edge capacities + self.edge_weights = [] # edge weights + + if not multigraph: + edges = G.edges(data=True) + else: + edges = G.edges(data=True, keys=True) + + inf = float("inf") + edges = (e for e in edges if e[0] != e[1] and e[-1].get(capacity, inf) != 0) + for i, e in enumerate(edges): + self.edge_sources.append(self.node_indices[e[0]]) + self.edge_targets.append(self.node_indices[e[1]]) + if multigraph: + self.edge_keys.append(e[2]) + self.edge_indices[e[:-1]] = i + self.edge_capacities.append(e[-1].get(capacity, inf)) + self.edge_weights.append(e[-1].get(weight, 0)) + + # spanning tree specific data to be initialized + + self.edge_count = None # number of edges + self.edge_flow = None # edge flows + self.node_potentials = None # node potentials + self.parent = None # parent nodes + self.parent_edge = None # edges to parents + self.subtree_size = None # subtree sizes + self.next_node_dft = None # next nodes in depth-first thread + self.prev_node_dft = None # previous nodes in depth-first thread + self.last_descendent_dft = None # last descendants in depth-first thread + self._spanning_tree_initialized = ( + False # False until initialize_spanning_tree() is called + ) + + def initialize_spanning_tree(self, n, faux_inf): + self.edge_count = len(self.edge_indices) # number of edges + self.edge_flow = list( + chain(repeat(0, self.edge_count), (abs(d) for d in self.node_demands)) + ) # edge flows + self.node_potentials = [ + faux_inf if d <= 0 else -faux_inf for d in self.node_demands + ] # node potentials + self.parent = list(chain(repeat(-1, n), [None])) # parent nodes + self.parent_edge = list( + range(self.edge_count, self.edge_count + n) + ) # edges to parents + self.subtree_size = list(chain(repeat(1, n), [n + 1])) # subtree sizes + self.next_node_dft = list( + chain(range(1, n), [-1, 0]) + ) # next nodes in depth-first thread + self.prev_node_dft = list(range(-1, n)) # previous nodes in depth-first thread + self.last_descendent_dft = list( + chain(range(n), [n - 1]) + ) # last descendants in depth-first thread + self._spanning_tree_initialized = True # True only if all the assignments pass + + def find_apex(self, p, q): + """ + Find the lowest common ancestor of nodes p and q in the spanning tree. + """ + size_p = self.subtree_size[p] + size_q = self.subtree_size[q] + while True: + while size_p < size_q: + p = self.parent[p] + size_p = self.subtree_size[p] + while size_p > size_q: + q = self.parent[q] + size_q = self.subtree_size[q] + if size_p == size_q: + if p != q: + p = self.parent[p] + size_p = self.subtree_size[p] + q = self.parent[q] + size_q = self.subtree_size[q] + else: + return p + + def trace_path(self, p, w): + """ + Returns the nodes and edges on the path from node p to its ancestor w. + """ + Wn = [p] + We = [] + while p != w: + We.append(self.parent_edge[p]) + p = self.parent[p] + Wn.append(p) + return Wn, We + + def find_cycle(self, i, p, q): + """ + Returns the nodes and edges on the cycle containing edge i == (p, q) + when the latter is added to the spanning tree. + + The cycle is oriented in the direction from p to q. + """ + w = self.find_apex(p, q) + Wn, We = self.trace_path(p, w) + Wn.reverse() + We.reverse() + if We != [i]: + We.append(i) + WnR, WeR = self.trace_path(q, w) + del WnR[-1] + Wn += WnR + We += WeR + return Wn, We + + def augment_flow(self, Wn, We, f): + """ + Augment f units of flow along a cycle represented by Wn and We. + """ + for i, p in zip(We, Wn): + if self.edge_sources[i] == p: + self.edge_flow[i] += f + else: + self.edge_flow[i] -= f + + def trace_subtree(self, p): + """ + Yield the nodes in the subtree rooted at a node p. + """ + yield p + l = self.last_descendent_dft[p] + while p != l: + p = self.next_node_dft[p] + yield p + + def remove_edge(self, s, t): + """ + Remove an edge (s, t) where parent[t] == s from the spanning tree. + """ + size_t = self.subtree_size[t] + prev_t = self.prev_node_dft[t] + last_t = self.last_descendent_dft[t] + next_last_t = self.next_node_dft[last_t] + # Remove (s, t). + self.parent[t] = None + self.parent_edge[t] = None + # Remove the subtree rooted at t from the depth-first thread. + self.next_node_dft[prev_t] = next_last_t + self.prev_node_dft[next_last_t] = prev_t + self.next_node_dft[last_t] = t + self.prev_node_dft[t] = last_t + # Update the subtree sizes and last descendants of the (old) ancestors + # of t. + while s is not None: + self.subtree_size[s] -= size_t + if self.last_descendent_dft[s] == last_t: + self.last_descendent_dft[s] = prev_t + s = self.parent[s] + + def make_root(self, q): + """ + Make a node q the root of its containing subtree. + """ + ancestors = [] + while q is not None: + ancestors.append(q) + q = self.parent[q] + ancestors.reverse() + for p, q in zip(ancestors, islice(ancestors, 1, None)): + size_p = self.subtree_size[p] + last_p = self.last_descendent_dft[p] + prev_q = self.prev_node_dft[q] + last_q = self.last_descendent_dft[q] + next_last_q = self.next_node_dft[last_q] + # Make p a child of q. + self.parent[p] = q + self.parent[q] = None + self.parent_edge[p] = self.parent_edge[q] + self.parent_edge[q] = None + self.subtree_size[p] = size_p - self.subtree_size[q] + self.subtree_size[q] = size_p + # Remove the subtree rooted at q from the depth-first thread. + self.next_node_dft[prev_q] = next_last_q + self.prev_node_dft[next_last_q] = prev_q + self.next_node_dft[last_q] = q + self.prev_node_dft[q] = last_q + if last_p == last_q: + self.last_descendent_dft[p] = prev_q + last_p = prev_q + # Add the remaining parts of the subtree rooted at p as a subtree + # of q in the depth-first thread. + self.prev_node_dft[p] = last_q + self.next_node_dft[last_q] = p + self.next_node_dft[last_p] = q + self.prev_node_dft[q] = last_p + self.last_descendent_dft[q] = last_p + + def add_edge(self, i, p, q): + """ + Add an edge (p, q) to the spanning tree where q is the root of a subtree. + """ + last_p = self.last_descendent_dft[p] + next_last_p = self.next_node_dft[last_p] + size_q = self.subtree_size[q] + last_q = self.last_descendent_dft[q] + # Make q a child of p. + self.parent[q] = p + self.parent_edge[q] = i + # Insert the subtree rooted at q into the depth-first thread. + self.next_node_dft[last_p] = q + self.prev_node_dft[q] = last_p + self.prev_node_dft[next_last_p] = last_q + self.next_node_dft[last_q] = next_last_p + # Update the subtree sizes and last descendants of the (new) ancestors + # of q. + while p is not None: + self.subtree_size[p] += size_q + if self.last_descendent_dft[p] == last_p: + self.last_descendent_dft[p] = last_q + p = self.parent[p] + + def update_potentials(self, i, p, q): + """ + Update the potentials of the nodes in the subtree rooted at a node + q connected to its parent p by an edge i. + """ + if q == self.edge_targets[i]: + d = self.node_potentials[p] - self.edge_weights[i] - self.node_potentials[q] + else: + d = self.node_potentials[p] + self.edge_weights[i] - self.node_potentials[q] + for q in self.trace_subtree(q): + self.node_potentials[q] += d + + def reduced_cost(self, i): + """Returns the reduced cost of an edge i.""" + c = ( + self.edge_weights[i] + - self.node_potentials[self.edge_sources[i]] + + self.node_potentials[self.edge_targets[i]] + ) + return c if self.edge_flow[i] == 0 else -c + + def find_entering_edges(self): + """Yield entering edges until none can be found.""" + if self.edge_count == 0: + return + + # Entering edges are found by combining Dantzig's rule and Bland's + # rule. The edges are cyclically grouped into blocks of size B. Within + # each block, Dantzig's rule is applied to find an entering edge. The + # blocks to search is determined following Bland's rule. + B = int(ceil(sqrt(self.edge_count))) # pivot block size + M = (self.edge_count + B - 1) // B # number of blocks needed to cover all edges + m = 0 # number of consecutive blocks without eligible + # entering edges + f = 0 # first edge in block + while m < M: + # Determine the next block of edges. + l = f + B + if l <= self.edge_count: + edges = range(f, l) + else: + l -= self.edge_count + edges = chain(range(f, self.edge_count), range(l)) + f = l + # Find the first edge with the lowest reduced cost. + i = min(edges, key=self.reduced_cost) + c = self.reduced_cost(i) + if c >= 0: + # No entering edge found in the current block. + m += 1 + else: + # Entering edge found. + if self.edge_flow[i] == 0: + p = self.edge_sources[i] + q = self.edge_targets[i] + else: + p = self.edge_targets[i] + q = self.edge_sources[i] + yield i, p, q + m = 0 + # All edges have nonnegative reduced costs. The current flow is + # optimal. + + def residual_capacity(self, i, p): + """Returns the residual capacity of an edge i in the direction away + from its endpoint p. + """ + return ( + self.edge_capacities[i] - self.edge_flow[i] + if self.edge_sources[i] == p + else self.edge_flow[i] + ) + + def find_leaving_edge(self, Wn, We): + """Returns the leaving edge in a cycle represented by Wn and We.""" + j, s = min( + zip(reversed(We), reversed(Wn)), + key=lambda i_p: self.residual_capacity(*i_p), + ) + t = self.edge_targets[j] if self.edge_sources[j] == s else self.edge_sources[j] + return j, s, t + + +@not_implemented_for("undirected") +@nx._dispatch(node_attrs="demand", edge_attrs={"capacity": float("inf"), "weight": 0}) +def network_simplex(G, demand="demand", capacity="capacity", weight="weight"): + r"""Find a minimum cost flow satisfying all demands in digraph G. + + This is a primal network simplex algorithm that uses the leaving + arc rule to prevent cycling. + + G is a digraph with edge costs and capacities and in which nodes + have demand, i.e., they want to send or receive some amount of + flow. A negative demand means that the node wants to send flow, a + positive demand means that the node want to receive flow. A flow on + the digraph G satisfies all demand if the net flow into each node + is equal to the demand of that node. + + Parameters + ---------- + G : NetworkX graph + DiGraph on which a minimum cost flow satisfying all demands is + to be found. + + demand : string + Nodes of the graph G are expected to have an attribute demand + that indicates how much flow a node wants to send (negative + demand) or receive (positive demand). Note that the sum of the + demands should be 0 otherwise the problem in not feasible. If + this attribute is not present, a node is considered to have 0 + demand. Default value: 'demand'. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + weight : string + Edges of the graph G are expected to have an attribute weight + that indicates the cost incurred by sending one unit of flow on + that edge. If not present, the weight is considered to be 0. + Default value: 'weight'. + + Returns + ------- + flowCost : integer, float + Cost of a minimum cost flow satisfying all demands. + + flowDict : dictionary + Dictionary of dictionaries keyed by nodes such that + flowDict[u][v] is the flow edge (u, v). + + Raises + ------ + NetworkXError + This exception is raised if the input graph is not directed or + not connected. + + NetworkXUnfeasible + This exception is raised in the following situations: + + * The sum of the demands is not zero. Then, there is no + flow satisfying all demands. + * There is no flow satisfying all demand. + + NetworkXUnbounded + This exception is raised if the digraph G has a cycle of + negative cost and infinite capacity. Then, the cost of a flow + satisfying all demands is unbounded below. + + Notes + ----- + This algorithm is not guaranteed to work if edge weights or demands + are floating point numbers (overflows and roundoff errors can + cause problems). As a workaround you can use integer numbers by + multiplying the relevant edge attributes by a convenient + constant factor (eg 100). + + See also + -------- + cost_of_flow, max_flow_min_cost, min_cost_flow, min_cost_flow_cost + + Examples + -------- + A simple example of a min cost flow problem. + + >>> G = nx.DiGraph() + >>> G.add_node("a", demand=-5) + >>> G.add_node("d", demand=5) + >>> G.add_edge("a", "b", weight=3, capacity=4) + >>> G.add_edge("a", "c", weight=6, capacity=10) + >>> G.add_edge("b", "d", weight=1, capacity=9) + >>> G.add_edge("c", "d", weight=2, capacity=5) + >>> flowCost, flowDict = nx.network_simplex(G) + >>> flowCost + 24 + >>> flowDict + {'a': {'b': 4, 'c': 1}, 'd': {}, 'b': {'d': 4}, 'c': {'d': 1}} + + The mincost flow algorithm can also be used to solve shortest path + problems. To find the shortest path between two nodes u and v, + give all edges an infinite capacity, give node u a demand of -1 and + node v a demand a 1. Then run the network simplex. The value of a + min cost flow will be the distance between u and v and edges + carrying positive flow will indicate the path. + + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from( + ... [ + ... ("s", "u", 10), + ... ("s", "x", 5), + ... ("u", "v", 1), + ... ("u", "x", 2), + ... ("v", "y", 1), + ... ("x", "u", 3), + ... ("x", "v", 5), + ... ("x", "y", 2), + ... ("y", "s", 7), + ... ("y", "v", 6), + ... ] + ... ) + >>> G.add_node("s", demand=-1) + >>> G.add_node("v", demand=1) + >>> flowCost, flowDict = nx.network_simplex(G) + >>> flowCost == nx.shortest_path_length(G, "s", "v", weight="weight") + True + >>> sorted([(u, v) for u in flowDict for v in flowDict[u] if flowDict[u][v] > 0]) + [('s', 'x'), ('u', 'v'), ('x', 'u')] + >>> nx.shortest_path(G, "s", "v", weight="weight") + ['s', 'x', 'u', 'v'] + + It is possible to change the name of the attributes used for the + algorithm. + + >>> G = nx.DiGraph() + >>> G.add_node("p", spam=-4) + >>> G.add_node("q", spam=2) + >>> G.add_node("a", spam=-2) + >>> G.add_node("d", spam=-1) + >>> G.add_node("t", spam=2) + >>> G.add_node("w", spam=3) + >>> G.add_edge("p", "q", cost=7, vacancies=5) + >>> G.add_edge("p", "a", cost=1, vacancies=4) + >>> G.add_edge("q", "d", cost=2, vacancies=3) + >>> G.add_edge("t", "q", cost=1, vacancies=2) + >>> G.add_edge("a", "t", cost=2, vacancies=4) + >>> G.add_edge("d", "w", cost=3, vacancies=4) + >>> G.add_edge("t", "w", cost=4, vacancies=1) + >>> flowCost, flowDict = nx.network_simplex( + ... G, demand="spam", capacity="vacancies", weight="cost" + ... ) + >>> flowCost + 37 + >>> flowDict + {'p': {'q': 2, 'a': 2}, 'q': {'d': 1}, 'a': {'t': 4}, 'd': {'w': 2}, 't': {'q': 1, 'w': 1}, 'w': {}} + + References + ---------- + .. [1] Z. Kiraly, P. Kovacs. + Efficient implementation of minimum-cost flow algorithms. + Acta Universitatis Sapientiae, Informatica 4(1):67--118. 2012. + .. [2] R. Barr, F. Glover, D. Klingman. + Enhancement of spanning tree labeling procedures for network + optimization. + INFOR 17(1):16--34. 1979. + """ + ########################################################################### + # Problem essentials extraction and sanity check + ########################################################################### + + if len(G) == 0: + raise nx.NetworkXError("graph has no nodes") + + multigraph = G.is_multigraph() + + # extracting data essential to problem + DEAF = _DataEssentialsAndFunctions( + G, multigraph, demand=demand, capacity=capacity, weight=weight + ) + + ########################################################################### + # Quick Error Detection + ########################################################################### + + inf = float("inf") + for u, d in zip(DEAF.node_list, DEAF.node_demands): + if abs(d) == inf: + raise nx.NetworkXError(f"node {u!r} has infinite demand") + for e, w in zip(DEAF.edge_indices, DEAF.edge_weights): + if abs(w) == inf: + raise nx.NetworkXError(f"edge {e!r} has infinite weight") + if not multigraph: + edges = nx.selfloop_edges(G, data=True) + else: + edges = nx.selfloop_edges(G, data=True, keys=True) + for e in edges: + if abs(e[-1].get(weight, 0)) == inf: + raise nx.NetworkXError(f"edge {e[:-1]!r} has infinite weight") + + ########################################################################### + # Quick Infeasibility Detection + ########################################################################### + + if sum(DEAF.node_demands) != 0: + raise nx.NetworkXUnfeasible("total node demand is not zero") + for e, c in zip(DEAF.edge_indices, DEAF.edge_capacities): + if c < 0: + raise nx.NetworkXUnfeasible(f"edge {e!r} has negative capacity") + if not multigraph: + edges = nx.selfloop_edges(G, data=True) + else: + edges = nx.selfloop_edges(G, data=True, keys=True) + for e in edges: + if e[-1].get(capacity, inf) < 0: + raise nx.NetworkXUnfeasible(f"edge {e[:-1]!r} has negative capacity") + + ########################################################################### + # Initialization + ########################################################################### + + # Add a dummy node -1 and connect all existing nodes to it with infinite- + # capacity dummy edges. Node -1 will serve as the root of the + # spanning tree of the network simplex method. The new edges will used to + # trivially satisfy the node demands and create an initial strongly + # feasible spanning tree. + for i, d in enumerate(DEAF.node_demands): + # Must be greater-than here. Zero-demand nodes must have + # edges pointing towards the root to ensure strong feasibility. + if d > 0: + DEAF.edge_sources.append(-1) + DEAF.edge_targets.append(i) + else: + DEAF.edge_sources.append(i) + DEAF.edge_targets.append(-1) + faux_inf = ( + 3 + * max( + chain( + [ + sum(c for c in DEAF.edge_capacities if c < inf), + sum(abs(w) for w in DEAF.edge_weights), + ], + (abs(d) for d in DEAF.node_demands), + ) + ) + or 1 + ) + + n = len(DEAF.node_list) # number of nodes + DEAF.edge_weights.extend(repeat(faux_inf, n)) + DEAF.edge_capacities.extend(repeat(faux_inf, n)) + + # Construct the initial spanning tree. + DEAF.initialize_spanning_tree(n, faux_inf) + + ########################################################################### + # Pivot loop + ########################################################################### + + for i, p, q in DEAF.find_entering_edges(): + Wn, We = DEAF.find_cycle(i, p, q) + j, s, t = DEAF.find_leaving_edge(Wn, We) + DEAF.augment_flow(Wn, We, DEAF.residual_capacity(j, s)) + # Do nothing more if the entering edge is the same as the leaving edge. + if i != j: + if DEAF.parent[t] != s: + # Ensure that s is the parent of t. + s, t = t, s + if We.index(i) > We.index(j): + # Ensure that q is in the subtree rooted at t. + p, q = q, p + DEAF.remove_edge(s, t) + DEAF.make_root(q) + DEAF.add_edge(i, p, q) + DEAF.update_potentials(i, p, q) + + ########################################################################### + # Infeasibility and unboundedness detection + ########################################################################### + + if any(DEAF.edge_flow[i] != 0 for i in range(-n, 0)): + raise nx.NetworkXUnfeasible("no flow satisfies all node demands") + + if any(DEAF.edge_flow[i] * 2 >= faux_inf for i in range(DEAF.edge_count)) or any( + e[-1].get(capacity, inf) == inf and e[-1].get(weight, 0) < 0 + for e in nx.selfloop_edges(G, data=True) + ): + raise nx.NetworkXUnbounded("negative cycle with infinite capacity found") + + ########################################################################### + # Flow cost calculation and flow dict construction + ########################################################################### + + del DEAF.edge_flow[DEAF.edge_count :] + flow_cost = sum(w * x for w, x in zip(DEAF.edge_weights, DEAF.edge_flow)) + flow_dict = {n: {} for n in DEAF.node_list} + + def add_entry(e): + """Add a flow dict entry.""" + d = flow_dict[e[0]] + for k in e[1:-2]: + try: + d = d[k] + except KeyError: + t = {} + d[k] = t + d = t + d[e[-2]] = e[-1] + + DEAF.edge_sources = ( + DEAF.node_list[s] for s in DEAF.edge_sources + ) # Use original nodes. + DEAF.edge_targets = ( + DEAF.node_list[t] for t in DEAF.edge_targets + ) # Use original nodes. + if not multigraph: + for e in zip(DEAF.edge_sources, DEAF.edge_targets, DEAF.edge_flow): + add_entry(e) + edges = G.edges(data=True) + else: + for e in zip( + DEAF.edge_sources, DEAF.edge_targets, DEAF.edge_keys, DEAF.edge_flow + ): + add_entry(e) + edges = G.edges(data=True, keys=True) + for e in edges: + if e[0] != e[1]: + if e[-1].get(capacity, inf) == 0: + add_entry(e[:-1] + (0,)) + else: + w = e[-1].get(weight, 0) + if w >= 0: + add_entry(e[:-1] + (0,)) + else: + c = e[-1][capacity] + flow_cost += w * c + add_entry(e[:-1] + (c,)) + + return flow_cost, flow_dict diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/preflowpush.py b/MLPY/Lib/site-packages/networkx/algorithms/flow/preflowpush.py new file mode 100644 index 0000000000000000000000000000000000000000..05b982ba1ff48e0d6f4ca1bb4312a26b84fb94f8 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/flow/preflowpush.py @@ -0,0 +1,429 @@ +""" +Highest-label preflow-push algorithm for maximum flow problems. +""" + +from collections import deque +from itertools import islice + +import networkx as nx + +from ...utils import arbitrary_element +from .utils import ( + CurrentEdge, + GlobalRelabelThreshold, + Level, + build_residual_network, + detect_unboundedness, +) + +__all__ = ["preflow_push"] + + +def preflow_push_impl(G, s, t, capacity, residual, global_relabel_freq, value_only): + """Implementation of the highest-label preflow-push algorithm.""" + if s not in G: + raise nx.NetworkXError(f"node {str(s)} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {str(t)} not in graph") + if s == t: + raise nx.NetworkXError("source and sink are the same node") + + if global_relabel_freq is None: + global_relabel_freq = 0 + if global_relabel_freq < 0: + raise nx.NetworkXError("global_relabel_freq must be nonnegative.") + + if residual is None: + R = build_residual_network(G, capacity) + else: + R = residual + + detect_unboundedness(R, s, t) + + R_nodes = R.nodes + R_pred = R.pred + R_succ = R.succ + + # Initialize/reset the residual network. + for u in R: + R_nodes[u]["excess"] = 0 + for e in R_succ[u].values(): + e["flow"] = 0 + + def reverse_bfs(src): + """Perform a reverse breadth-first search from src in the residual + network. + """ + heights = {src: 0} + q = deque([(src, 0)]) + while q: + u, height = q.popleft() + height += 1 + for v, attr in R_pred[u].items(): + if v not in heights and attr["flow"] < attr["capacity"]: + heights[v] = height + q.append((v, height)) + return heights + + # Initialize heights of the nodes. + heights = reverse_bfs(t) + + if s not in heights: + # t is not reachable from s in the residual network. The maximum flow + # must be zero. + R.graph["flow_value"] = 0 + return R + + n = len(R) + # max_height represents the height of the highest level below level n with + # at least one active node. + max_height = max(heights[u] for u in heights if u != s) + heights[s] = n + + grt = GlobalRelabelThreshold(n, R.size(), global_relabel_freq) + + # Initialize heights and 'current edge' data structures of the nodes. + for u in R: + R_nodes[u]["height"] = heights[u] if u in heights else n + 1 + R_nodes[u]["curr_edge"] = CurrentEdge(R_succ[u]) + + def push(u, v, flow): + """Push flow units of flow from u to v.""" + R_succ[u][v]["flow"] += flow + R_succ[v][u]["flow"] -= flow + R_nodes[u]["excess"] -= flow + R_nodes[v]["excess"] += flow + + # The maximum flow must be nonzero now. Initialize the preflow by + # saturating all edges emanating from s. + for u, attr in R_succ[s].items(): + flow = attr["capacity"] + if flow > 0: + push(s, u, flow) + + # Partition nodes into levels. + levels = [Level() for i in range(2 * n)] + for u in R: + if u != s and u != t: + level = levels[R_nodes[u]["height"]] + if R_nodes[u]["excess"] > 0: + level.active.add(u) + else: + level.inactive.add(u) + + def activate(v): + """Move a node from the inactive set to the active set of its level.""" + if v != s and v != t: + level = levels[R_nodes[v]["height"]] + if v in level.inactive: + level.inactive.remove(v) + level.active.add(v) + + def relabel(u): + """Relabel a node to create an admissible edge.""" + grt.add_work(len(R_succ[u])) + return ( + min( + R_nodes[v]["height"] + for v, attr in R_succ[u].items() + if attr["flow"] < attr["capacity"] + ) + + 1 + ) + + def discharge(u, is_phase1): + """Discharge a node until it becomes inactive or, during phase 1 (see + below), its height reaches at least n. The node is known to have the + largest height among active nodes. + """ + height = R_nodes[u]["height"] + curr_edge = R_nodes[u]["curr_edge"] + # next_height represents the next height to examine after discharging + # the current node. During phase 1, it is capped to below n. + next_height = height + levels[height].active.remove(u) + while True: + v, attr = curr_edge.get() + if height == R_nodes[v]["height"] + 1 and attr["flow"] < attr["capacity"]: + flow = min(R_nodes[u]["excess"], attr["capacity"] - attr["flow"]) + push(u, v, flow) + activate(v) + if R_nodes[u]["excess"] == 0: + # The node has become inactive. + levels[height].inactive.add(u) + break + try: + curr_edge.move_to_next() + except StopIteration: + # We have run off the end of the adjacency list, and there can + # be no more admissible edges. Relabel the node to create one. + height = relabel(u) + if is_phase1 and height >= n - 1: + # Although the node is still active, with a height at least + # n - 1, it is now known to be on the s side of the minimum + # s-t cut. Stop processing it until phase 2. + levels[height].active.add(u) + break + # The first relabel operation after global relabeling may not + # increase the height of the node since the 'current edge' data + # structure is not rewound. Use height instead of (height - 1) + # in case other active nodes at the same level are missed. + next_height = height + R_nodes[u]["height"] = height + return next_height + + def gap_heuristic(height): + """Apply the gap heuristic.""" + # Move all nodes at levels (height + 1) to max_height to level n + 1. + for level in islice(levels, height + 1, max_height + 1): + for u in level.active: + R_nodes[u]["height"] = n + 1 + for u in level.inactive: + R_nodes[u]["height"] = n + 1 + levels[n + 1].active.update(level.active) + level.active.clear() + levels[n + 1].inactive.update(level.inactive) + level.inactive.clear() + + def global_relabel(from_sink): + """Apply the global relabeling heuristic.""" + src = t if from_sink else s + heights = reverse_bfs(src) + if not from_sink: + # s must be reachable from t. Remove t explicitly. + del heights[t] + max_height = max(heights.values()) + if from_sink: + # Also mark nodes from which t is unreachable for relabeling. This + # serves the same purpose as the gap heuristic. + for u in R: + if u not in heights and R_nodes[u]["height"] < n: + heights[u] = n + 1 + else: + # Shift the computed heights because the height of s is n. + for u in heights: + heights[u] += n + max_height += n + del heights[src] + for u, new_height in heights.items(): + old_height = R_nodes[u]["height"] + if new_height != old_height: + if u in levels[old_height].active: + levels[old_height].active.remove(u) + levels[new_height].active.add(u) + else: + levels[old_height].inactive.remove(u) + levels[new_height].inactive.add(u) + R_nodes[u]["height"] = new_height + return max_height + + # Phase 1: Find the maximum preflow by pushing as much flow as possible to + # t. + + height = max_height + while height > 0: + # Discharge active nodes in the current level. + while True: + level = levels[height] + if not level.active: + # All active nodes in the current level have been discharged. + # Move to the next lower level. + height -= 1 + break + # Record the old height and level for the gap heuristic. + old_height = height + old_level = level + u = arbitrary_element(level.active) + height = discharge(u, True) + if grt.is_reached(): + # Global relabeling heuristic: Recompute the exact heights of + # all nodes. + height = global_relabel(True) + max_height = height + grt.clear_work() + elif not old_level.active and not old_level.inactive: + # Gap heuristic: If the level at old_height is empty (a 'gap'), + # a minimum cut has been identified. All nodes with heights + # above old_height can have their heights set to n + 1 and not + # be further processed before a maximum preflow is found. + gap_heuristic(old_height) + height = old_height - 1 + max_height = height + else: + # Update the height of the highest level with at least one + # active node. + max_height = max(max_height, height) + + # A maximum preflow has been found. The excess at t is the maximum flow + # value. + if value_only: + R.graph["flow_value"] = R_nodes[t]["excess"] + return R + + # Phase 2: Convert the maximum preflow into a maximum flow by returning the + # excess to s. + + # Relabel all nodes so that they have accurate heights. + height = global_relabel(False) + grt.clear_work() + + # Continue to discharge the active nodes. + while height > n: + # Discharge active nodes in the current level. + while True: + level = levels[height] + if not level.active: + # All active nodes in the current level have been discharged. + # Move to the next lower level. + height -= 1 + break + u = arbitrary_element(level.active) + height = discharge(u, False) + if grt.is_reached(): + # Global relabeling heuristic. + height = global_relabel(False) + grt.clear_work() + + R.graph["flow_value"] = R_nodes[t]["excess"] + return R + + +@nx._dispatch( + graphs={"G": 0, "residual?": 4}, + edge_attrs={"capacity": float("inf")}, + preserve_edge_attrs={"residual": {"capacity": float("inf")}}, + preserve_graph_attrs={"residual"}, +) +def preflow_push( + G, s, t, capacity="capacity", residual=None, global_relabel_freq=1, value_only=False +): + r"""Find a maximum single-commodity flow using the highest-label + preflow-push algorithm. + + This function returns the residual network resulting after computing + the maximum flow. See below for details about the conventions + NetworkX uses for defining residual networks. + + This algorithm has a running time of $O(n^2 \sqrt{m})$ for $n$ nodes and + $m$ edges. + + + Parameters + ---------- + G : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + residual : NetworkX graph + Residual network on which the algorithm is to be executed. If None, a + new residual network is created. Default value: None. + + global_relabel_freq : integer, float + Relative frequency of applying the global relabeling heuristic to speed + up the algorithm. If it is None, the heuristic is disabled. Default + value: 1. + + value_only : bool + If False, compute a maximum flow; otherwise, compute a maximum preflow + which is enough for computing the maximum flow value. Default value: + False. + + Returns + ------- + R : NetworkX DiGraph + Residual network after computing the maximum flow. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow` + :meth:`minimum_cut` + :meth:`edmonds_karp` + :meth:`shortest_augmenting_path` + + Notes + ----- + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. For each node :samp:`u` in :samp:`R`, + :samp:`R.nodes[u]['excess']` represents the difference between flow into + :samp:`u` and flow out of :samp:`u`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. Reachability to :samp:`t` using + only edges :samp:`(u, v)` such that + :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Examples + -------- + >>> from networkx.algorithms.flow import preflow_push + + The functions that implement flow algorithms and output a residual + network, such as this one, are not imported to the base NetworkX + namespace, so you have to explicitly import them from the flow package. + + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + >>> R = preflow_push(G, "x", "y") + >>> flow_value = nx.maximum_flow_value(G, "x", "y") + >>> flow_value == R.graph["flow_value"] + True + >>> # preflow_push also stores the maximum flow value + >>> # in the excess attribute of the sink node t + >>> flow_value == R.nodes["y"]["excess"] + True + >>> # For some problems, you might only want to compute a + >>> # maximum preflow. + >>> R = preflow_push(G, "x", "y", value_only=True) + >>> flow_value == R.graph["flow_value"] + True + >>> flow_value == R.nodes["y"]["excess"] + True + + """ + R = preflow_push_impl(G, s, t, capacity, residual, global_relabel_freq, value_only) + R.graph["algorithm"] = "preflow_push" + return R diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/shortestaugmentingpath.py b/MLPY/Lib/site-packages/networkx/algorithms/flow/shortestaugmentingpath.py new file mode 100644 index 0000000000000000000000000000000000000000..d06a88b7bc84a49898272c5925088e20f8d71aef --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/flow/shortestaugmentingpath.py @@ -0,0 +1,304 @@ +""" +Shortest augmenting path algorithm for maximum flow problems. +""" + +from collections import deque + +import networkx as nx + +from .edmondskarp import edmonds_karp_core +from .utils import CurrentEdge, build_residual_network + +__all__ = ["shortest_augmenting_path"] + + +def shortest_augmenting_path_impl(G, s, t, capacity, residual, two_phase, cutoff): + """Implementation of the shortest augmenting path algorithm.""" + if s not in G: + raise nx.NetworkXError(f"node {str(s)} not in graph") + if t not in G: + raise nx.NetworkXError(f"node {str(t)} not in graph") + if s == t: + raise nx.NetworkXError("source and sink are the same node") + + if residual is None: + R = build_residual_network(G, capacity) + else: + R = residual + + R_nodes = R.nodes + R_pred = R.pred + R_succ = R.succ + + # Initialize/reset the residual network. + for u in R: + for e in R_succ[u].values(): + e["flow"] = 0 + + # Initialize heights of the nodes. + heights = {t: 0} + q = deque([(t, 0)]) + while q: + u, height = q.popleft() + height += 1 + for v, attr in R_pred[u].items(): + if v not in heights and attr["flow"] < attr["capacity"]: + heights[v] = height + q.append((v, height)) + + if s not in heights: + # t is not reachable from s in the residual network. The maximum flow + # must be zero. + R.graph["flow_value"] = 0 + return R + + n = len(G) + m = R.size() / 2 + + # Initialize heights and 'current edge' data structures of the nodes. + for u in R: + R_nodes[u]["height"] = heights[u] if u in heights else n + R_nodes[u]["curr_edge"] = CurrentEdge(R_succ[u]) + + # Initialize counts of nodes in each level. + counts = [0] * (2 * n - 1) + for u in R: + counts[R_nodes[u]["height"]] += 1 + + inf = R.graph["inf"] + + def augment(path): + """Augment flow along a path from s to t.""" + # Determine the path residual capacity. + flow = inf + it = iter(path) + u = next(it) + for v in it: + attr = R_succ[u][v] + flow = min(flow, attr["capacity"] - attr["flow"]) + u = v + if flow * 2 > inf: + raise nx.NetworkXUnbounded("Infinite capacity path, flow unbounded above.") + # Augment flow along the path. + it = iter(path) + u = next(it) + for v in it: + R_succ[u][v]["flow"] += flow + R_succ[v][u]["flow"] -= flow + u = v + return flow + + def relabel(u): + """Relabel a node to create an admissible edge.""" + height = n - 1 + for v, attr in R_succ[u].items(): + if attr["flow"] < attr["capacity"]: + height = min(height, R_nodes[v]["height"]) + return height + 1 + + if cutoff is None: + cutoff = float("inf") + + # Phase 1: Look for shortest augmenting paths using depth-first search. + + flow_value = 0 + path = [s] + u = s + d = n if not two_phase else int(min(m**0.5, 2 * n ** (2.0 / 3))) + done = R_nodes[s]["height"] >= d + while not done: + height = R_nodes[u]["height"] + curr_edge = R_nodes[u]["curr_edge"] + # Depth-first search for the next node on the path to t. + while True: + v, attr = curr_edge.get() + if height == R_nodes[v]["height"] + 1 and attr["flow"] < attr["capacity"]: + # Advance to the next node following an admissible edge. + path.append(v) + u = v + break + try: + curr_edge.move_to_next() + except StopIteration: + counts[height] -= 1 + if counts[height] == 0: + # Gap heuristic: If relabeling causes a level to become + # empty, a minimum cut has been identified. The algorithm + # can now be terminated. + R.graph["flow_value"] = flow_value + return R + height = relabel(u) + if u == s and height >= d: + if not two_phase: + # t is disconnected from s in the residual network. No + # more augmenting paths exist. + R.graph["flow_value"] = flow_value + return R + else: + # t is at least d steps away from s. End of phase 1. + done = True + break + counts[height] += 1 + R_nodes[u]["height"] = height + if u != s: + # After relabeling, the last edge on the path is no longer + # admissible. Retreat one step to look for an alternative. + path.pop() + u = path[-1] + break + if u == t: + # t is reached. Augment flow along the path and reset it for a new + # depth-first search. + flow_value += augment(path) + if flow_value >= cutoff: + R.graph["flow_value"] = flow_value + return R + path = [s] + u = s + + # Phase 2: Look for shortest augmenting paths using breadth-first search. + flow_value += edmonds_karp_core(R, s, t, cutoff - flow_value) + + R.graph["flow_value"] = flow_value + return R + + +@nx._dispatch( + graphs={"G": 0, "residual?": 4}, + edge_attrs={"capacity": float("inf")}, + preserve_edge_attrs={"residual": {"capacity": float("inf")}}, + preserve_graph_attrs={"residual"}, +) +def shortest_augmenting_path( + G, + s, + t, + capacity="capacity", + residual=None, + value_only=False, + two_phase=False, + cutoff=None, +): + r"""Find a maximum single-commodity flow using the shortest augmenting path + algorithm. + + This function returns the residual network resulting after computing + the maximum flow. See below for details about the conventions + NetworkX uses for defining residual networks. + + This algorithm has a running time of $O(n^2 m)$ for $n$ nodes and $m$ + edges. + + + Parameters + ---------- + G : NetworkX graph + Edges of the graph are expected to have an attribute called + 'capacity'. If this attribute is not present, the edge is + considered to have infinite capacity. + + s : node + Source node for the flow. + + t : node + Sink node for the flow. + + capacity : string + Edges of the graph G are expected to have an attribute capacity + that indicates how much flow the edge can support. If this + attribute is not present, the edge is considered to have + infinite capacity. Default value: 'capacity'. + + residual : NetworkX graph + Residual network on which the algorithm is to be executed. If None, a + new residual network is created. Default value: None. + + value_only : bool + If True compute only the value of the maximum flow. This parameter + will be ignored by this algorithm because it is not applicable. + + two_phase : bool + If True, a two-phase variant is used. The two-phase variant improves + the running time on unit-capacity networks from $O(nm)$ to + $O(\min(n^{2/3}, m^{1/2}) m)$. Default value: False. + + cutoff : integer, float + If specified, the algorithm will terminate when the flow value reaches + or exceeds the cutoff. In this case, it may be unable to immediately + determine a minimum cut. Default value: None. + + Returns + ------- + R : NetworkX DiGraph + Residual network after computing the maximum flow. + + Raises + ------ + NetworkXError + The algorithm does not support MultiGraph and MultiDiGraph. If + the input graph is an instance of one of these two classes, a + NetworkXError is raised. + + NetworkXUnbounded + If the graph has a path of infinite capacity, the value of a + feasible flow on the graph is unbounded above and the function + raises a NetworkXUnbounded. + + See also + -------- + :meth:`maximum_flow` + :meth:`minimum_cut` + :meth:`edmonds_karp` + :meth:`preflow_push` + + Notes + ----- + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not + specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such + that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + Examples + -------- + >>> from networkx.algorithms.flow import shortest_augmenting_path + + The functions that implement flow algorithms and output a residual + network, such as this one, are not imported to the base NetworkX + namespace, so you have to explicitly import them from the flow package. + + >>> G = nx.DiGraph() + >>> G.add_edge("x", "a", capacity=3.0) + >>> G.add_edge("x", "b", capacity=1.0) + >>> G.add_edge("a", "c", capacity=3.0) + >>> G.add_edge("b", "c", capacity=5.0) + >>> G.add_edge("b", "d", capacity=4.0) + >>> G.add_edge("d", "e", capacity=2.0) + >>> G.add_edge("c", "y", capacity=2.0) + >>> G.add_edge("e", "y", capacity=3.0) + >>> R = shortest_augmenting_path(G, "x", "y") + >>> flow_value = nx.maximum_flow_value(G, "x", "y") + >>> flow_value + 3.0 + >>> flow_value == R.graph["flow_value"] + True + + """ + R = shortest_augmenting_path_impl(G, s, t, capacity, residual, two_phase, cutoff) + R.graph["algorithm"] = "shortest_augmenting_path" + return R diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a750d2ee1f682f8298b91b023b65c6e53c4ef1a9 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/__pycache__/test_gomory_hu.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/__pycache__/test_gomory_hu.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9087000528d5e5ac0e7cfdbb6d9895935d06e2df Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/__pycache__/test_gomory_hu.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/__pycache__/test_maxflow.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/__pycache__/test_maxflow.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a58f2b0520d1df2c6ca1a58ddd409cc424490df4 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/__pycache__/test_maxflow.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/__pycache__/test_maxflow_large_graph.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/__pycache__/test_maxflow_large_graph.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4567bdbfca5fd8cf208134f70d2452d49d7f72e8 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/__pycache__/test_maxflow_large_graph.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/__pycache__/test_mincost.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/__pycache__/test_mincost.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24b02cc7938be878eb147a01fe8d38f9de35d86e Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/__pycache__/test_mincost.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/__pycache__/test_networksimplex.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/__pycache__/test_networksimplex.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..983fc4596fb7176c77180f9a25ce3c8408478a29 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/__pycache__/test_networksimplex.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/gl1.gpickle.bz2 b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/gl1.gpickle.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..5e9291ea7aa77204bbaab28651e6a4d4f47a4bea --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/gl1.gpickle.bz2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf8f81ceb5eaaee1621aa60b892d83e596a6173f6f6517359b679ff3daa1b0f8 +size 44623 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/gw1.gpickle.bz2 b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/gw1.gpickle.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..356e5deb3d243226bd9942e3ce02129d3d7a0201 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/gw1.gpickle.bz2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f79f0e90fa4c51ec79165f15963e1ed89477576e06bcaa67ae622c260411931 +size 42248 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/netgen-2.gpickle.bz2 b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/netgen-2.gpickle.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..9351606de26547246c807a6f74ffa81c84448456 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/netgen-2.gpickle.bz2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b17e66cdeda8edb8d1dec72626c77f1f65dd4675e3f76dc2fc4fd84aa038e30 +size 18972 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/test_gomory_hu.py b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/test_gomory_hu.py new file mode 100644 index 0000000000000000000000000000000000000000..1649ec82c719226e9caa68268d8953f7cae6ef74 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/test_gomory_hu.py @@ -0,0 +1,128 @@ +from itertools import combinations + +import pytest + +import networkx as nx +from networkx.algorithms.flow import ( + boykov_kolmogorov, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +) + +flow_funcs = [ + boykov_kolmogorov, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +] + + +class TestGomoryHuTree: + def minimum_edge_weight(self, T, u, v): + path = nx.shortest_path(T, u, v, weight="weight") + return min((T[u][v]["weight"], (u, v)) for (u, v) in zip(path, path[1:])) + + def compute_cutset(self, G, T_orig, edge): + T = T_orig.copy() + T.remove_edge(*edge) + U, V = list(nx.connected_components(T)) + cutset = set() + for x, nbrs in ((n, G[n]) for n in U): + cutset.update((x, y) for y in nbrs if y in V) + return cutset + + def test_default_flow_function_karate_club_graph(self): + G = nx.karate_club_graph() + nx.set_edge_attributes(G, 1, "capacity") + T = nx.gomory_hu_tree(G) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v) == cut_value + + def test_karate_club_graph(self): + G = nx.karate_club_graph() + nx.set_edge_attributes(G, 1, "capacity") + for flow_func in flow_funcs: + T = nx.gomory_hu_tree(G, flow_func=flow_func) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v) == cut_value + + def test_davis_southern_women_graph(self): + G = nx.davis_southern_women_graph() + nx.set_edge_attributes(G, 1, "capacity") + for flow_func in flow_funcs: + T = nx.gomory_hu_tree(G, flow_func=flow_func) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v) == cut_value + + def test_florentine_families_graph(self): + G = nx.florentine_families_graph() + nx.set_edge_attributes(G, 1, "capacity") + for flow_func in flow_funcs: + T = nx.gomory_hu_tree(G, flow_func=flow_func) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v) == cut_value + + @pytest.mark.slow + def test_les_miserables_graph_cutset(self): + G = nx.les_miserables_graph() + nx.set_edge_attributes(G, 1, "capacity") + for flow_func in flow_funcs: + T = nx.gomory_hu_tree(G, flow_func=flow_func) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v) == cut_value + + def test_karate_club_graph_cutset(self): + G = nx.karate_club_graph() + nx.set_edge_attributes(G, 1, "capacity") + T = nx.gomory_hu_tree(G) + assert nx.is_tree(T) + u, v = 0, 33 + cut_value, edge = self.minimum_edge_weight(T, u, v) + cutset = self.compute_cutset(G, T, edge) + assert cut_value == len(cutset) + + def test_wikipedia_example(self): + # Example from https://en.wikipedia.org/wiki/Gomory%E2%80%93Hu_tree + G = nx.Graph() + G.add_weighted_edges_from( + ( + (0, 1, 1), + (0, 2, 7), + (1, 2, 1), + (1, 3, 3), + (1, 4, 2), + (2, 4, 4), + (3, 4, 1), + (3, 5, 6), + (4, 5, 2), + ) + ) + for flow_func in flow_funcs: + T = nx.gomory_hu_tree(G, capacity="weight", flow_func=flow_func) + assert nx.is_tree(T) + for u, v in combinations(G, 2): + cut_value, edge = self.minimum_edge_weight(T, u, v) + assert nx.minimum_cut_value(G, u, v, capacity="weight") == cut_value + + def test_directed_raises(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.DiGraph() + T = nx.gomory_hu_tree(G) + + def test_empty_raises(self): + with pytest.raises(nx.NetworkXError): + G = nx.empty_graph() + T = nx.gomory_hu_tree(G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/test_maxflow.py b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/test_maxflow.py new file mode 100644 index 0000000000000000000000000000000000000000..c31d8422843cdc8407032220ab9a76de52661a6e --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/test_maxflow.py @@ -0,0 +1,560 @@ +"""Maximum flow algorithms test suite. +""" +import pytest + +import networkx as nx +from networkx.algorithms.flow import ( + boykov_kolmogorov, + build_flow_dict, + build_residual_network, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +) + +flow_funcs = { + boykov_kolmogorov, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +} + +max_min_funcs = {nx.maximum_flow, nx.minimum_cut} +flow_value_funcs = {nx.maximum_flow_value, nx.minimum_cut_value} +interface_funcs = max_min_funcs & flow_value_funcs +all_funcs = flow_funcs & interface_funcs + + +def compute_cutset(G, partition): + reachable, non_reachable = partition + cutset = set() + for u, nbrs in ((n, G[n]) for n in reachable): + cutset.update((u, v) for v in nbrs if v in non_reachable) + return cutset + + +def validate_flows(G, s, t, flowDict, solnValue, capacity, flow_func): + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert set(G) == set(flowDict), errmsg + for u in G: + assert set(G[u]) == set(flowDict[u]), errmsg + excess = {u: 0 for u in flowDict} + for u in flowDict: + for v, flow in flowDict[u].items(): + if capacity in G[u][v]: + assert flow <= G[u][v][capacity] + assert flow >= 0, errmsg + excess[u] -= flow + excess[v] += flow + for u, exc in excess.items(): + if u == s: + assert exc == -solnValue, errmsg + elif u == t: + assert exc == solnValue, errmsg + else: + assert exc == 0, errmsg + + +def validate_cuts(G, s, t, solnValue, partition, capacity, flow_func): + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert all(n in G for n in partition[0]), errmsg + assert all(n in G for n in partition[1]), errmsg + cutset = compute_cutset(G, partition) + assert all(G.has_edge(u, v) for (u, v) in cutset), errmsg + assert solnValue == sum(G[u][v][capacity] for (u, v) in cutset), errmsg + H = G.copy() + H.remove_edges_from(cutset) + if not G.is_directed(): + assert not nx.is_connected(H), errmsg + else: + assert not nx.is_strongly_connected(H), errmsg + + +def compare_flows_and_cuts(G, s, t, solnFlows, solnValue, capacity="capacity"): + for flow_func in flow_funcs: + errmsg = f"Assertion failed in function: {flow_func.__name__}" + R = flow_func(G, s, t, capacity) + # Test both legacy and new implementations. + flow_value = R.graph["flow_value"] + flow_dict = build_flow_dict(G, R) + assert flow_value == solnValue, errmsg + validate_flows(G, s, t, flow_dict, solnValue, capacity, flow_func) + # Minimum cut + cut_value, partition = nx.minimum_cut( + G, s, t, capacity=capacity, flow_func=flow_func + ) + validate_cuts(G, s, t, solnValue, partition, capacity, flow_func) + + +class TestMaxflowMinCutCommon: + def test_graph1(self): + # Trivial undirected graph + G = nx.Graph() + G.add_edge(1, 2, capacity=1.0) + + solnFlows = {1: {2: 1.0}, 2: {1: 1.0}} + + compare_flows_and_cuts(G, 1, 2, solnFlows, 1.0) + + def test_graph2(self): + # A more complex undirected graph + # adapted from https://web.archive.org/web/20220815055650/https://www.topcoder.com/thrive/articles/Maximum%20Flow:%20Part%20One + G = nx.Graph() + G.add_edge("x", "a", capacity=3.0) + G.add_edge("x", "b", capacity=1.0) + G.add_edge("a", "c", capacity=3.0) + G.add_edge("b", "c", capacity=5.0) + G.add_edge("b", "d", capacity=4.0) + G.add_edge("d", "e", capacity=2.0) + G.add_edge("c", "y", capacity=2.0) + G.add_edge("e", "y", capacity=3.0) + + H = { + "x": {"a": 3, "b": 1}, + "a": {"c": 3, "x": 3}, + "b": {"c": 1, "d": 2, "x": 1}, + "c": {"a": 3, "b": 1, "y": 2}, + "d": {"b": 2, "e": 2}, + "e": {"d": 2, "y": 2}, + "y": {"c": 2, "e": 2}, + } + + compare_flows_and_cuts(G, "x", "y", H, 4.0) + + def test_digraph1(self): + # The classic directed graph example + G = nx.DiGraph() + G.add_edge("a", "b", capacity=1000.0) + G.add_edge("a", "c", capacity=1000.0) + G.add_edge("b", "c", capacity=1.0) + G.add_edge("b", "d", capacity=1000.0) + G.add_edge("c", "d", capacity=1000.0) + + H = { + "a": {"b": 1000.0, "c": 1000.0}, + "b": {"c": 0, "d": 1000.0}, + "c": {"d": 1000.0}, + "d": {}, + } + + compare_flows_and_cuts(G, "a", "d", H, 2000.0) + + def test_digraph2(self): + # An example in which some edges end up with zero flow. + G = nx.DiGraph() + G.add_edge("s", "b", capacity=2) + G.add_edge("s", "c", capacity=1) + G.add_edge("c", "d", capacity=1) + G.add_edge("d", "a", capacity=1) + G.add_edge("b", "a", capacity=2) + G.add_edge("a", "t", capacity=2) + + H = { + "s": {"b": 2, "c": 0}, + "c": {"d": 0}, + "d": {"a": 0}, + "b": {"a": 2}, + "a": {"t": 2}, + "t": {}, + } + + compare_flows_and_cuts(G, "s", "t", H, 2) + + def test_digraph3(self): + # A directed graph example from Cormen et al. + G = nx.DiGraph() + G.add_edge("s", "v1", capacity=16.0) + G.add_edge("s", "v2", capacity=13.0) + G.add_edge("v1", "v2", capacity=10.0) + G.add_edge("v2", "v1", capacity=4.0) + G.add_edge("v1", "v3", capacity=12.0) + G.add_edge("v3", "v2", capacity=9.0) + G.add_edge("v2", "v4", capacity=14.0) + G.add_edge("v4", "v3", capacity=7.0) + G.add_edge("v3", "t", capacity=20.0) + G.add_edge("v4", "t", capacity=4.0) + + H = { + "s": {"v1": 12.0, "v2": 11.0}, + "v2": {"v1": 0, "v4": 11.0}, + "v1": {"v2": 0, "v3": 12.0}, + "v3": {"v2": 0, "t": 19.0}, + "v4": {"v3": 7.0, "t": 4.0}, + "t": {}, + } + + compare_flows_and_cuts(G, "s", "t", H, 23.0) + + def test_digraph4(self): + # A more complex directed graph + # from https://web.archive.org/web/20220815055650/https://www.topcoder.com/thrive/articles/Maximum%20Flow:%20Part%20One + G = nx.DiGraph() + G.add_edge("x", "a", capacity=3.0) + G.add_edge("x", "b", capacity=1.0) + G.add_edge("a", "c", capacity=3.0) + G.add_edge("b", "c", capacity=5.0) + G.add_edge("b", "d", capacity=4.0) + G.add_edge("d", "e", capacity=2.0) + G.add_edge("c", "y", capacity=2.0) + G.add_edge("e", "y", capacity=3.0) + + H = { + "x": {"a": 2.0, "b": 1.0}, + "a": {"c": 2.0}, + "b": {"c": 0, "d": 1.0}, + "c": {"y": 2.0}, + "d": {"e": 1.0}, + "e": {"y": 1.0}, + "y": {}, + } + + compare_flows_and_cuts(G, "x", "y", H, 3.0) + + def test_wikipedia_dinitz_example(self): + # Nice example from https://en.wikipedia.org/wiki/Dinic's_algorithm + G = nx.DiGraph() + G.add_edge("s", 1, capacity=10) + G.add_edge("s", 2, capacity=10) + G.add_edge(1, 3, capacity=4) + G.add_edge(1, 4, capacity=8) + G.add_edge(1, 2, capacity=2) + G.add_edge(2, 4, capacity=9) + G.add_edge(3, "t", capacity=10) + G.add_edge(4, 3, capacity=6) + G.add_edge(4, "t", capacity=10) + + solnFlows = { + 1: {2: 0, 3: 4, 4: 6}, + 2: {4: 9}, + 3: {"t": 9}, + 4: {3: 5, "t": 10}, + "s": {1: 10, 2: 9}, + "t": {}, + } + + compare_flows_and_cuts(G, "s", "t", solnFlows, 19) + + def test_optional_capacity(self): + # Test optional capacity parameter. + G = nx.DiGraph() + G.add_edge("x", "a", spam=3.0) + G.add_edge("x", "b", spam=1.0) + G.add_edge("a", "c", spam=3.0) + G.add_edge("b", "c", spam=5.0) + G.add_edge("b", "d", spam=4.0) + G.add_edge("d", "e", spam=2.0) + G.add_edge("c", "y", spam=2.0) + G.add_edge("e", "y", spam=3.0) + + solnFlows = { + "x": {"a": 2.0, "b": 1.0}, + "a": {"c": 2.0}, + "b": {"c": 0, "d": 1.0}, + "c": {"y": 2.0}, + "d": {"e": 1.0}, + "e": {"y": 1.0}, + "y": {}, + } + solnValue = 3.0 + s = "x" + t = "y" + + compare_flows_and_cuts(G, s, t, solnFlows, solnValue, capacity="spam") + + def test_digraph_infcap_edges(self): + # DiGraph with infinite capacity edges + G = nx.DiGraph() + G.add_edge("s", "a") + G.add_edge("s", "b", capacity=30) + G.add_edge("a", "c", capacity=25) + G.add_edge("b", "c", capacity=12) + G.add_edge("a", "t", capacity=60) + G.add_edge("c", "t") + + H = { + "s": {"a": 85, "b": 12}, + "a": {"c": 25, "t": 60}, + "b": {"c": 12}, + "c": {"t": 37}, + "t": {}, + } + + compare_flows_and_cuts(G, "s", "t", H, 97) + + # DiGraph with infinite capacity digon + G = nx.DiGraph() + G.add_edge("s", "a", capacity=85) + G.add_edge("s", "b", capacity=30) + G.add_edge("a", "c") + G.add_edge("c", "a") + G.add_edge("b", "c", capacity=12) + G.add_edge("a", "t", capacity=60) + G.add_edge("c", "t", capacity=37) + + H = { + "s": {"a": 85, "b": 12}, + "a": {"c": 25, "t": 60}, + "c": {"a": 0, "t": 37}, + "b": {"c": 12}, + "t": {}, + } + + compare_flows_and_cuts(G, "s", "t", H, 97) + + def test_digraph_infcap_path(self): + # Graph with infinite capacity (s, t)-path + G = nx.DiGraph() + G.add_edge("s", "a") + G.add_edge("s", "b", capacity=30) + G.add_edge("a", "c") + G.add_edge("b", "c", capacity=12) + G.add_edge("a", "t", capacity=60) + G.add_edge("c", "t") + + for flow_func in all_funcs: + pytest.raises(nx.NetworkXUnbounded, flow_func, G, "s", "t") + + def test_graph_infcap_edges(self): + # Undirected graph with infinite capacity edges + G = nx.Graph() + G.add_edge("s", "a") + G.add_edge("s", "b", capacity=30) + G.add_edge("a", "c", capacity=25) + G.add_edge("b", "c", capacity=12) + G.add_edge("a", "t", capacity=60) + G.add_edge("c", "t") + + H = { + "s": {"a": 85, "b": 12}, + "a": {"c": 25, "s": 85, "t": 60}, + "b": {"c": 12, "s": 12}, + "c": {"a": 25, "b": 12, "t": 37}, + "t": {"a": 60, "c": 37}, + } + + compare_flows_and_cuts(G, "s", "t", H, 97) + + def test_digraph5(self): + # From ticket #429 by mfrasca. + G = nx.DiGraph() + G.add_edge("s", "a", capacity=2) + G.add_edge("s", "b", capacity=2) + G.add_edge("a", "b", capacity=5) + G.add_edge("a", "t", capacity=1) + G.add_edge("b", "a", capacity=1) + G.add_edge("b", "t", capacity=3) + flowSoln = { + "a": {"b": 1, "t": 1}, + "b": {"a": 0, "t": 3}, + "s": {"a": 2, "b": 2}, + "t": {}, + } + compare_flows_and_cuts(G, "s", "t", flowSoln, 4) + + def test_disconnected(self): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)], weight="capacity") + G.remove_node(1) + assert nx.maximum_flow_value(G, 0, 3) == 0 + flowSoln = {0: {}, 2: {3: 0}, 3: {2: 0}} + compare_flows_and_cuts(G, 0, 3, flowSoln, 0) + + def test_source_target_not_in_graph(self): + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)], weight="capacity") + G.remove_node(0) + for flow_func in all_funcs: + pytest.raises(nx.NetworkXError, flow_func, G, 0, 3) + G.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)], weight="capacity") + G.remove_node(3) + for flow_func in all_funcs: + pytest.raises(nx.NetworkXError, flow_func, G, 0, 3) + + def test_source_target_coincide(self): + G = nx.Graph() + G.add_node(0) + for flow_func in all_funcs: + pytest.raises(nx.NetworkXError, flow_func, G, 0, 0) + + def test_multigraphs_raise(self): + G = nx.MultiGraph() + M = nx.MultiDiGraph() + G.add_edges_from([(0, 1), (1, 0)], capacity=True) + for flow_func in all_funcs: + pytest.raises(nx.NetworkXError, flow_func, G, 0, 0) + + +class TestMaxFlowMinCutInterface: + def setup_method(self): + G = nx.DiGraph() + G.add_edge("x", "a", capacity=3.0) + G.add_edge("x", "b", capacity=1.0) + G.add_edge("a", "c", capacity=3.0) + G.add_edge("b", "c", capacity=5.0) + G.add_edge("b", "d", capacity=4.0) + G.add_edge("d", "e", capacity=2.0) + G.add_edge("c", "y", capacity=2.0) + G.add_edge("e", "y", capacity=3.0) + self.G = G + H = nx.DiGraph() + H.add_edge(0, 1, capacity=1.0) + H.add_edge(1, 2, capacity=1.0) + self.H = H + + def test_flow_func_not_callable(self): + elements = ["this_should_be_callable", 10, {1, 2, 3}] + G = nx.Graph() + G.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1)], weight="capacity") + for flow_func in interface_funcs: + for element in elements: + pytest.raises(nx.NetworkXError, flow_func, G, 0, 1, flow_func=element) + pytest.raises(nx.NetworkXError, flow_func, G, 0, 1, flow_func=element) + + def test_flow_func_parameters(self): + G = self.G + fv = 3.0 + for interface_func in interface_funcs: + for flow_func in flow_funcs: + errmsg = ( + f"Assertion failed in function: {flow_func.__name__} " + f"in interface {interface_func.__name__}" + ) + result = interface_func(G, "x", "y", flow_func=flow_func) + if interface_func in max_min_funcs: + result = result[0] + assert fv == result, errmsg + + def test_minimum_cut_no_cutoff(self): + G = self.G + pytest.raises( + nx.NetworkXError, + nx.minimum_cut, + G, + "x", + "y", + flow_func=preflow_push, + cutoff=1.0, + ) + pytest.raises( + nx.NetworkXError, + nx.minimum_cut_value, + G, + "x", + "y", + flow_func=preflow_push, + cutoff=1.0, + ) + + def test_kwargs(self): + G = self.H + fv = 1.0 + to_test = ( + (shortest_augmenting_path, {"two_phase": True}), + (preflow_push, {"global_relabel_freq": 5}), + ) + for interface_func in interface_funcs: + for flow_func, kwargs in to_test: + errmsg = ( + f"Assertion failed in function: {flow_func.__name__} " + f"in interface {interface_func.__name__}" + ) + result = interface_func(G, 0, 2, flow_func=flow_func, **kwargs) + if interface_func in max_min_funcs: + result = result[0] + assert fv == result, errmsg + + def test_kwargs_default_flow_func(self): + G = self.H + for interface_func in interface_funcs: + pytest.raises( + nx.NetworkXError, interface_func, G, 0, 1, global_relabel_freq=2 + ) + + def test_reusing_residual(self): + G = self.G + fv = 3.0 + s, t = "x", "y" + R = build_residual_network(G, "capacity") + for interface_func in interface_funcs: + for flow_func in flow_funcs: + errmsg = ( + f"Assertion failed in function: {flow_func.__name__} " + f"in interface {interface_func.__name__}" + ) + for i in range(3): + result = interface_func( + G, "x", "y", flow_func=flow_func, residual=R + ) + if interface_func in max_min_funcs: + result = result[0] + assert fv == result, errmsg + + +# Tests specific to one algorithm +def test_preflow_push_global_relabel_freq(): + G = nx.DiGraph() + G.add_edge(1, 2, capacity=1) + R = preflow_push(G, 1, 2, global_relabel_freq=None) + assert R.graph["flow_value"] == 1 + pytest.raises(nx.NetworkXError, preflow_push, G, 1, 2, global_relabel_freq=-1) + + +def test_preflow_push_makes_enough_space(): + # From ticket #1542 + G = nx.DiGraph() + nx.add_path(G, [0, 1, 3], capacity=1) + nx.add_path(G, [1, 2, 3], capacity=1) + R = preflow_push(G, 0, 3, value_only=False) + assert R.graph["flow_value"] == 1 + + +def test_shortest_augmenting_path_two_phase(): + k = 5 + p = 1000 + G = nx.DiGraph() + for i in range(k): + G.add_edge("s", (i, 0), capacity=1) + nx.add_path(G, ((i, j) for j in range(p)), capacity=1) + G.add_edge((i, p - 1), "t", capacity=1) + R = shortest_augmenting_path(G, "s", "t", two_phase=True) + assert R.graph["flow_value"] == k + R = shortest_augmenting_path(G, "s", "t", two_phase=False) + assert R.graph["flow_value"] == k + + +class TestCutoff: + def test_cutoff(self): + k = 5 + p = 1000 + G = nx.DiGraph() + for i in range(k): + G.add_edge("s", (i, 0), capacity=2) + nx.add_path(G, ((i, j) for j in range(p)), capacity=2) + G.add_edge((i, p - 1), "t", capacity=2) + R = shortest_augmenting_path(G, "s", "t", two_phase=True, cutoff=k) + assert k <= R.graph["flow_value"] <= (2 * k) + R = shortest_augmenting_path(G, "s", "t", two_phase=False, cutoff=k) + assert k <= R.graph["flow_value"] <= (2 * k) + R = edmonds_karp(G, "s", "t", cutoff=k) + assert k <= R.graph["flow_value"] <= (2 * k) + R = dinitz(G, "s", "t", cutoff=k) + assert k <= R.graph["flow_value"] <= (2 * k) + R = boykov_kolmogorov(G, "s", "t", cutoff=k) + assert k <= R.graph["flow_value"] <= (2 * k) + + def test_complete_graph_cutoff(self): + G = nx.complete_graph(5) + nx.set_edge_attributes(G, {(u, v): 1 for u, v in G.edges()}, "capacity") + for flow_func in [ + shortest_augmenting_path, + edmonds_karp, + dinitz, + boykov_kolmogorov, + ]: + for cutoff in [3, 2, 1]: + result = nx.maximum_flow_value( + G, 0, 4, flow_func=flow_func, cutoff=cutoff + ) + assert cutoff == result, f"cutoff error in {flow_func.__name__}" diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/test_maxflow_large_graph.py b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/test_maxflow_large_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..7b4859c8a8b325c6d2e3ab06430de4afbc007803 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/test_maxflow_large_graph.py @@ -0,0 +1,157 @@ +"""Maximum flow algorithms test suite on large graphs. +""" + +import bz2 +import importlib.resources +import os +import pickle + +import pytest + +import networkx as nx +from networkx.algorithms.flow import ( + boykov_kolmogorov, + build_flow_dict, + build_residual_network, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +) + +flow_funcs = [ + boykov_kolmogorov, + dinitz, + edmonds_karp, + preflow_push, + shortest_augmenting_path, +] + + +def gen_pyramid(N): + # This graph admits a flow of value 1 for which every arc is at + # capacity (except the arcs incident to the sink which have + # infinite capacity). + G = nx.DiGraph() + + for i in range(N - 1): + cap = 1.0 / (i + 2) + for j in range(i + 1): + G.add_edge((i, j), (i + 1, j), capacity=cap) + cap = 1.0 / (i + 1) - cap + G.add_edge((i, j), (i + 1, j + 1), capacity=cap) + cap = 1.0 / (i + 2) - cap + + for j in range(N): + G.add_edge((N - 1, j), "t") + + return G + + +def read_graph(name): + fname = ( + importlib.resources.files("networkx.algorithms.flow.tests") + / f"{name}.gpickle.bz2" + ) + + with bz2.BZ2File(fname, "rb") as f: + G = pickle.load(f) + return G + + +def validate_flows(G, s, t, soln_value, R, flow_func): + flow_value = R.graph["flow_value"] + flow_dict = build_flow_dict(G, R) + errmsg = f"Assertion failed in function: {flow_func.__name__}" + assert soln_value == flow_value, errmsg + assert set(G) == set(flow_dict), errmsg + for u in G: + assert set(G[u]) == set(flow_dict[u]), errmsg + excess = {u: 0 for u in flow_dict} + for u in flow_dict: + for v, flow in flow_dict[u].items(): + assert flow <= G[u][v].get("capacity", float("inf")), errmsg + assert flow >= 0, errmsg + excess[u] -= flow + excess[v] += flow + for u, exc in excess.items(): + if u == s: + assert exc == -soln_value, errmsg + elif u == t: + assert exc == soln_value, errmsg + else: + assert exc == 0, errmsg + + +class TestMaxflowLargeGraph: + def test_complete_graph(self): + N = 50 + G = nx.complete_graph(N) + nx.set_edge_attributes(G, 5, "capacity") + R = build_residual_network(G, "capacity") + kwargs = {"residual": R} + + for flow_func in flow_funcs: + kwargs["flow_func"] = flow_func + errmsg = f"Assertion failed in function: {flow_func.__name__}" + flow_value = nx.maximum_flow_value(G, 1, 2, **kwargs) + assert flow_value == 5 * (N - 1), errmsg + + def test_pyramid(self): + N = 10 + # N = 100 # this gives a graph with 5051 nodes + G = gen_pyramid(N) + R = build_residual_network(G, "capacity") + kwargs = {"residual": R} + + for flow_func in flow_funcs: + kwargs["flow_func"] = flow_func + errmsg = f"Assertion failed in function: {flow_func.__name__}" + flow_value = nx.maximum_flow_value(G, (0, 0), "t", **kwargs) + assert flow_value == pytest.approx(1.0, abs=1e-7) + + def test_gl1(self): + G = read_graph("gl1") + s = 1 + t = len(G) + R = build_residual_network(G, "capacity") + kwargs = {"residual": R} + + # do one flow_func to save time + flow_func = flow_funcs[0] + validate_flows(G, s, t, 156545, flow_func(G, s, t, **kwargs), flow_func) + + # for flow_func in flow_funcs: + # validate_flows(G, s, t, 156545, flow_func(G, s, t, **kwargs), + # flow_func) + + @pytest.mark.slow + def test_gw1(self): + G = read_graph("gw1") + s = 1 + t = len(G) + R = build_residual_network(G, "capacity") + kwargs = {"residual": R} + + for flow_func in flow_funcs: + validate_flows(G, s, t, 1202018, flow_func(G, s, t, **kwargs), flow_func) + + def test_wlm3(self): + G = read_graph("wlm3") + s = 1 + t = len(G) + R = build_residual_network(G, "capacity") + kwargs = {"residual": R} + + # do one flow_func to save time + flow_func = flow_funcs[0] + validate_flows(G, s, t, 11875108, flow_func(G, s, t, **kwargs), flow_func) + + # for flow_func in flow_funcs: + # validate_flows(G, s, t, 11875108, flow_func(G, s, t, **kwargs), + # flow_func) + + def test_preflow_push_global_relabel(self): + G = read_graph("gw1") + R = preflow_push(G, 1, len(G), global_relabel_freq=50) + assert R.graph["flow_value"] == 1202018 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/test_mincost.py b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/test_mincost.py new file mode 100644 index 0000000000000000000000000000000000000000..5b1794b152f8d5523f660d3d7bd15f841e5470f9 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/test_mincost.py @@ -0,0 +1,476 @@ +import bz2 +import importlib.resources +import os +import pickle + +import pytest + +import networkx as nx + + +class TestMinCostFlow: + def test_simple_digraph(self): + G = nx.DiGraph() + G.add_node("a", demand=-5) + G.add_node("d", demand=5) + G.add_edge("a", "b", weight=3, capacity=4) + G.add_edge("a", "c", weight=6, capacity=10) + G.add_edge("b", "d", weight=1, capacity=9) + G.add_edge("c", "d", weight=2, capacity=5) + flowCost, H = nx.network_simplex(G) + soln = {"a": {"b": 4, "c": 1}, "b": {"d": 4}, "c": {"d": 1}, "d": {}} + assert flowCost == 24 + assert nx.min_cost_flow_cost(G) == 24 + assert H == soln + assert nx.min_cost_flow(G) == soln + assert nx.cost_of_flow(G, H) == 24 + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 24 + assert nx.cost_of_flow(G, H) == 24 + assert H == soln + + def test_negcycle_infcap(self): + G = nx.DiGraph() + G.add_node("s", demand=-5) + G.add_node("t", demand=5) + G.add_edge("s", "a", weight=1, capacity=3) + G.add_edge("a", "b", weight=3) + G.add_edge("c", "a", weight=-6) + G.add_edge("b", "d", weight=1) + G.add_edge("d", "c", weight=-2) + G.add_edge("d", "t", weight=1, capacity=3) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G) + + def test_sum_demands_not_zero(self): + G = nx.DiGraph() + G.add_node("s", demand=-5) + G.add_node("t", demand=4) + G.add_edge("s", "a", weight=1, capacity=3) + G.add_edge("a", "b", weight=3) + G.add_edge("a", "c", weight=-6) + G.add_edge("b", "d", weight=1) + G.add_edge("c", "d", weight=-2) + G.add_edge("d", "t", weight=1, capacity=3) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + + def test_no_flow_satisfying_demands(self): + G = nx.DiGraph() + G.add_node("s", demand=-5) + G.add_node("t", demand=5) + G.add_edge("s", "a", weight=1, capacity=3) + G.add_edge("a", "b", weight=3) + G.add_edge("a", "c", weight=-6) + G.add_edge("b", "d", weight=1) + G.add_edge("c", "d", weight=-2) + G.add_edge("d", "t", weight=1, capacity=3) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + + def test_transshipment(self): + G = nx.DiGraph() + G.add_node("a", demand=1) + G.add_node("b", demand=-2) + G.add_node("c", demand=-2) + G.add_node("d", demand=3) + G.add_node("e", demand=-4) + G.add_node("f", demand=-4) + G.add_node("g", demand=3) + G.add_node("h", demand=2) + G.add_node("r", demand=3) + G.add_edge("a", "c", weight=3) + G.add_edge("r", "a", weight=2) + G.add_edge("b", "a", weight=9) + G.add_edge("r", "c", weight=0) + G.add_edge("b", "r", weight=-6) + G.add_edge("c", "d", weight=5) + G.add_edge("e", "r", weight=4) + G.add_edge("e", "f", weight=3) + G.add_edge("h", "b", weight=4) + G.add_edge("f", "d", weight=7) + G.add_edge("f", "h", weight=12) + G.add_edge("g", "d", weight=12) + G.add_edge("f", "g", weight=-1) + G.add_edge("h", "g", weight=-10) + flowCost, H = nx.network_simplex(G) + soln = { + "a": {"c": 0}, + "b": {"a": 0, "r": 2}, + "c": {"d": 3}, + "d": {}, + "e": {"r": 3, "f": 1}, + "f": {"d": 0, "g": 3, "h": 2}, + "g": {"d": 0}, + "h": {"b": 0, "g": 0}, + "r": {"a": 1, "c": 1}, + } + assert flowCost == 41 + assert nx.min_cost_flow_cost(G) == 41 + assert H == soln + assert nx.min_cost_flow(G) == soln + assert nx.cost_of_flow(G, H) == 41 + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 41 + assert nx.cost_of_flow(G, H) == 41 + assert H == soln + + def test_max_flow_min_cost(self): + G = nx.DiGraph() + G.add_edge("s", "a", bandwidth=6) + G.add_edge("s", "c", bandwidth=10, cost=10) + G.add_edge("a", "b", cost=6) + G.add_edge("b", "d", bandwidth=8, cost=7) + G.add_edge("c", "d", cost=10) + G.add_edge("d", "t", bandwidth=5, cost=5) + soln = { + "s": {"a": 5, "c": 0}, + "a": {"b": 5}, + "b": {"d": 5}, + "c": {"d": 0}, + "d": {"t": 5}, + "t": {}, + } + flow = nx.max_flow_min_cost(G, "s", "t", capacity="bandwidth", weight="cost") + assert flow == soln + assert nx.cost_of_flow(G, flow, weight="cost") == 90 + + G.add_edge("t", "s", cost=-100) + flowCost, flow = nx.capacity_scaling(G, capacity="bandwidth", weight="cost") + G.remove_edge("t", "s") + assert flowCost == -410 + assert flow["t"]["s"] == 5 + del flow["t"]["s"] + assert flow == soln + assert nx.cost_of_flow(G, flow, weight="cost") == 90 + + def test_digraph1(self): + # From Bradley, S. P., Hax, A. C. and Magnanti, T. L. Applied + # Mathematical Programming. Addison-Wesley, 1977. + G = nx.DiGraph() + G.add_node(1, demand=-20) + G.add_node(4, demand=5) + G.add_node(5, demand=15) + G.add_edges_from( + [ + (1, 2, {"capacity": 15, "weight": 4}), + (1, 3, {"capacity": 8, "weight": 4}), + (2, 3, {"weight": 2}), + (2, 4, {"capacity": 4, "weight": 2}), + (2, 5, {"capacity": 10, "weight": 6}), + (3, 4, {"capacity": 15, "weight": 1}), + (3, 5, {"capacity": 5, "weight": 3}), + (4, 5, {"weight": 2}), + (5, 3, {"capacity": 4, "weight": 1}), + ] + ) + flowCost, H = nx.network_simplex(G) + soln = { + 1: {2: 12, 3: 8}, + 2: {3: 8, 4: 4, 5: 0}, + 3: {4: 11, 5: 5}, + 4: {5: 10}, + 5: {3: 0}, + } + assert flowCost == 150 + assert nx.min_cost_flow_cost(G) == 150 + assert H == soln + assert nx.min_cost_flow(G) == soln + assert nx.cost_of_flow(G, H) == 150 + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 150 + assert H == soln + assert nx.cost_of_flow(G, H) == 150 + + def test_digraph2(self): + # Example from ticket #430 from mfrasca. Original source: + # http://www.cs.princeton.edu/courses/archive/spr03/cs226/lectures/mincost.4up.pdf, slide 11. + G = nx.DiGraph() + G.add_edge("s", 1, capacity=12) + G.add_edge("s", 2, capacity=6) + G.add_edge("s", 3, capacity=14) + G.add_edge(1, 2, capacity=11, weight=4) + G.add_edge(2, 3, capacity=9, weight=6) + G.add_edge(1, 4, capacity=5, weight=5) + G.add_edge(1, 5, capacity=2, weight=12) + G.add_edge(2, 5, capacity=4, weight=4) + G.add_edge(2, 6, capacity=2, weight=6) + G.add_edge(3, 6, capacity=31, weight=3) + G.add_edge(4, 5, capacity=18, weight=4) + G.add_edge(5, 6, capacity=9, weight=5) + G.add_edge(4, "t", capacity=3) + G.add_edge(5, "t", capacity=7) + G.add_edge(6, "t", capacity=22) + flow = nx.max_flow_min_cost(G, "s", "t") + soln = { + 1: {2: 6, 4: 5, 5: 1}, + 2: {3: 6, 5: 4, 6: 2}, + 3: {6: 20}, + 4: {5: 2, "t": 3}, + 5: {6: 0, "t": 7}, + 6: {"t": 22}, + "s": {1: 12, 2: 6, 3: 14}, + "t": {}, + } + assert flow == soln + + G.add_edge("t", "s", weight=-100) + flowCost, flow = nx.capacity_scaling(G) + G.remove_edge("t", "s") + assert flow["t"]["s"] == 32 + assert flowCost == -3007 + del flow["t"]["s"] + assert flow == soln + assert nx.cost_of_flow(G, flow) == 193 + + def test_digraph3(self): + """Combinatorial Optimization: Algorithms and Complexity, + Papadimitriou Steiglitz at page 140 has an example, 7.1, but that + admits multiple solutions, so I alter it a bit. From ticket #430 + by mfrasca.""" + + G = nx.DiGraph() + G.add_edge("s", "a") + G["s"]["a"].update({0: 2, 1: 4}) + G.add_edge("s", "b") + G["s"]["b"].update({0: 2, 1: 1}) + G.add_edge("a", "b") + G["a"]["b"].update({0: 5, 1: 2}) + G.add_edge("a", "t") + G["a"]["t"].update({0: 1, 1: 5}) + G.add_edge("b", "a") + G["b"]["a"].update({0: 1, 1: 3}) + G.add_edge("b", "t") + G["b"]["t"].update({0: 3, 1: 2}) + + "PS.ex.7.1: testing main function" + sol = nx.max_flow_min_cost(G, "s", "t", capacity=0, weight=1) + flow = sum(v for v in sol["s"].values()) + assert 4 == flow + assert 23 == nx.cost_of_flow(G, sol, weight=1) + assert sol["s"] == {"a": 2, "b": 2} + assert sol["a"] == {"b": 1, "t": 1} + assert sol["b"] == {"a": 0, "t": 3} + assert sol["t"] == {} + + G.add_edge("t", "s") + G["t"]["s"].update({1: -100}) + flowCost, sol = nx.capacity_scaling(G, capacity=0, weight=1) + G.remove_edge("t", "s") + flow = sum(v for v in sol["s"].values()) + assert 4 == flow + assert sol["t"]["s"] == 4 + assert flowCost == -377 + del sol["t"]["s"] + assert sol["s"] == {"a": 2, "b": 2} + assert sol["a"] == {"b": 1, "t": 1} + assert sol["b"] == {"a": 0, "t": 3} + assert sol["t"] == {} + assert nx.cost_of_flow(G, sol, weight=1) == 23 + + def test_zero_capacity_edges(self): + """Address issue raised in ticket #617 by arv.""" + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2, {"capacity": 1, "weight": 1}), + (1, 5, {"capacity": 1, "weight": 1}), + (2, 3, {"capacity": 0, "weight": 1}), + (2, 5, {"capacity": 1, "weight": 1}), + (5, 3, {"capacity": 2, "weight": 1}), + (5, 4, {"capacity": 0, "weight": 1}), + (3, 4, {"capacity": 2, "weight": 1}), + ] + ) + G.nodes[1]["demand"] = -1 + G.nodes[2]["demand"] = -1 + G.nodes[4]["demand"] = 2 + + flowCost, H = nx.network_simplex(G) + soln = {1: {2: 0, 5: 1}, 2: {3: 0, 5: 1}, 3: {4: 2}, 4: {}, 5: {3: 2, 4: 0}} + assert flowCost == 6 + assert nx.min_cost_flow_cost(G) == 6 + assert H == soln + assert nx.min_cost_flow(G) == soln + assert nx.cost_of_flow(G, H) == 6 + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 6 + assert H == soln + assert nx.cost_of_flow(G, H) == 6 + + def test_digon(self): + """Check if digons are handled properly. Taken from ticket + #618 by arv.""" + nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})] + edges = [ + (1, 2, {"capacity": 3, "weight": 600000}), + (2, 1, {"capacity": 2, "weight": 0}), + (2, 3, {"capacity": 5, "weight": 714285}), + (3, 2, {"capacity": 2, "weight": 0}), + ] + G = nx.DiGraph(edges) + G.add_nodes_from(nodes) + flowCost, H = nx.network_simplex(G) + soln = {1: {2: 0}, 2: {1: 0, 3: 4}, 3: {2: 0}} + assert flowCost == 2857140 + assert nx.min_cost_flow_cost(G) == 2857140 + assert H == soln + assert nx.min_cost_flow(G) == soln + assert nx.cost_of_flow(G, H) == 2857140 + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 2857140 + assert H == soln + assert nx.cost_of_flow(G, H) == 2857140 + + def test_deadend(self): + """Check if one-node cycles are handled properly. Taken from ticket + #2906 from @sshraven.""" + G = nx.DiGraph() + + G.add_nodes_from(range(5), demand=0) + G.nodes[4]["demand"] = -13 + G.nodes[3]["demand"] = 13 + + G.add_edges_from([(0, 2), (0, 3), (2, 1)], capacity=20, weight=0.1) + pytest.raises(nx.NetworkXUnfeasible, nx.min_cost_flow, G) + + def test_infinite_capacity_neg_digon(self): + """An infinite capacity negative cost digon results in an unbounded + instance.""" + nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})] + edges = [ + (1, 2, {"weight": -600}), + (2, 1, {"weight": 0}), + (2, 3, {"capacity": 5, "weight": 714285}), + (3, 2, {"capacity": 2, "weight": 0}), + ] + G = nx.DiGraph(edges) + G.add_nodes_from(nodes) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G) + + def test_finite_capacity_neg_digon(self): + """The digon should receive the maximum amount of flow it can handle. + Taken from ticket #749 by @chuongdo.""" + G = nx.DiGraph() + G.add_edge("a", "b", capacity=1, weight=-1) + G.add_edge("b", "a", capacity=1, weight=-1) + min_cost = -2 + assert nx.min_cost_flow_cost(G) == min_cost + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == -2 + assert H == {"a": {"b": 1}, "b": {"a": 1}} + assert nx.cost_of_flow(G, H) == -2 + + def test_multidigraph(self): + """Multidigraphs are acceptable.""" + G = nx.MultiDiGraph() + G.add_weighted_edges_from([(1, 2, 1), (2, 3, 2)], weight="capacity") + flowCost, H = nx.network_simplex(G) + assert flowCost == 0 + assert H == {1: {2: {0: 0}}, 2: {3: {0: 0}}, 3: {}} + + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 0 + assert H == {1: {2: {0: 0}}, 2: {3: {0: 0}}, 3: {}} + + def test_negative_selfloops(self): + """Negative selfloops should cause an exception if uncapacitated and + always be saturated otherwise. + """ + G = nx.DiGraph() + G.add_edge(1, 1, weight=-1) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G) + G[1][1]["capacity"] = 2 + flowCost, H = nx.network_simplex(G) + assert flowCost == -2 + assert H == {1: {1: 2}} + flowCost, H = nx.capacity_scaling(G) + assert flowCost == -2 + assert H == {1: {1: 2}} + + G = nx.MultiDiGraph() + G.add_edge(1, 1, "x", weight=-1) + G.add_edge(1, 1, "y", weight=1) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G) + G[1][1]["x"]["capacity"] = 2 + flowCost, H = nx.network_simplex(G) + assert flowCost == -2 + assert H == {1: {1: {"x": 2, "y": 0}}} + flowCost, H = nx.capacity_scaling(G) + assert flowCost == -2 + assert H == {1: {1: {"x": 2, "y": 0}}} + + def test_bone_shaped(self): + # From #1283 + G = nx.DiGraph() + G.add_node(0, demand=-4) + G.add_node(1, demand=2) + G.add_node(2, demand=2) + G.add_node(3, demand=4) + G.add_node(4, demand=-2) + G.add_node(5, demand=-2) + G.add_edge(0, 1, capacity=4) + G.add_edge(0, 2, capacity=4) + G.add_edge(4, 3, capacity=4) + G.add_edge(5, 3, capacity=4) + G.add_edge(0, 3, capacity=0) + flowCost, H = nx.network_simplex(G) + assert flowCost == 0 + assert H == {0: {1: 2, 2: 2, 3: 0}, 1: {}, 2: {}, 3: {}, 4: {3: 2}, 5: {3: 2}} + flowCost, H = nx.capacity_scaling(G) + assert flowCost == 0 + assert H == {0: {1: 2, 2: 2, 3: 0}, 1: {}, 2: {}, 3: {}, 4: {3: 2}, 5: {3: 2}} + + def test_exceptions(self): + G = nx.Graph() + pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G) + pytest.raises(nx.NetworkXNotImplemented, nx.capacity_scaling, G) + G = nx.MultiGraph() + pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G) + pytest.raises(nx.NetworkXNotImplemented, nx.capacity_scaling, G) + G = nx.DiGraph() + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + # pytest.raises(nx.NetworkXError, nx.capacity_scaling, G) + G.add_node(0, demand=float("inf")) + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + G.nodes[0]["demand"] = 0 + G.add_node(1, demand=0) + G.add_edge(0, 1, weight=-float("inf")) + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + G[0][1]["weight"] = 0 + G.add_edge(0, 0, weight=float("inf")) + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + # pytest.raises(nx.NetworkXError, nx.capacity_scaling, G) + G[0][0]["weight"] = 0 + G[0][1]["capacity"] = -1 + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + # pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + G[0][1]["capacity"] = 0 + G[0][0]["capacity"] = -1 + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + # pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G) + + def test_large(self): + fname = ( + importlib.resources.files("networkx.algorithms.flow.tests") + / "netgen-2.gpickle.bz2" + ) + with bz2.BZ2File(fname, "rb") as f: + G = pickle.load(f) + flowCost, flowDict = nx.network_simplex(G) + assert 6749969302 == flowCost + assert 6749969302 == nx.cost_of_flow(G, flowDict) + flowCost, flowDict = nx.capacity_scaling(G) + assert 6749969302 == flowCost + assert 6749969302 == nx.cost_of_flow(G, flowDict) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/test_networksimplex.py b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/test_networksimplex.py new file mode 100644 index 0000000000000000000000000000000000000000..5b3b5f6dd069e38b8be536cf4037a46da2366cc2 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/test_networksimplex.py @@ -0,0 +1,387 @@ +import bz2 +import importlib.resources +import os +import pickle + +import pytest + +import networkx as nx + + +@pytest.fixture +def simple_flow_graph(): + G = nx.DiGraph() + G.add_node("a", demand=0) + G.add_node("b", demand=-5) + G.add_node("c", demand=50000000) + G.add_node("d", demand=-49999995) + G.add_edge("a", "b", weight=3, capacity=4) + G.add_edge("a", "c", weight=6, capacity=10) + G.add_edge("b", "d", weight=1, capacity=9) + G.add_edge("c", "d", weight=2, capacity=5) + return G + + +@pytest.fixture +def simple_no_flow_graph(): + G = nx.DiGraph() + G.add_node("s", demand=-5) + G.add_node("t", demand=5) + G.add_edge("s", "a", weight=1, capacity=3) + G.add_edge("a", "b", weight=3) + G.add_edge("a", "c", weight=-6) + G.add_edge("b", "d", weight=1) + G.add_edge("c", "d", weight=-2) + G.add_edge("d", "t", weight=1, capacity=3) + return G + + +def get_flowcost_from_flowdict(G, flowDict): + """Returns flow cost calculated from flow dictionary""" + flowCost = 0 + for u in flowDict: + for v in flowDict[u]: + flowCost += flowDict[u][v] * G[u][v]["weight"] + return flowCost + + +def test_infinite_demand_raise(simple_flow_graph): + G = simple_flow_graph + inf = float("inf") + nx.set_node_attributes(G, {"a": {"demand": inf}}) + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + + +def test_neg_infinite_demand_raise(simple_flow_graph): + G = simple_flow_graph + inf = float("inf") + nx.set_node_attributes(G, {"a": {"demand": -inf}}) + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + + +def test_infinite_weight_raise(simple_flow_graph): + G = simple_flow_graph + inf = float("inf") + nx.set_edge_attributes( + G, {("a", "b"): {"weight": inf}, ("b", "d"): {"weight": inf}} + ) + pytest.raises(nx.NetworkXError, nx.network_simplex, G) + + +def test_nonzero_net_demand_raise(simple_flow_graph): + G = simple_flow_graph + nx.set_node_attributes(G, {"b": {"demand": -4}}) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + + +def test_negative_capacity_raise(simple_flow_graph): + G = simple_flow_graph + nx.set_edge_attributes(G, {("a", "b"): {"weight": 1}, ("b", "d"): {"capacity": -9}}) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + + +def test_no_flow_satisfying_demands(simple_no_flow_graph): + G = simple_no_flow_graph + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + + +def test_sum_demands_not_zero(simple_no_flow_graph): + G = simple_no_flow_graph + nx.set_node_attributes(G, {"t": {"demand": 4}}) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + + +def test_google_or_tools_example(): + """ + https://developers.google.com/optimization/flow/mincostflow + """ + G = nx.DiGraph() + start_nodes = [0, 0, 1, 1, 1, 2, 2, 3, 4] + end_nodes = [1, 2, 2, 3, 4, 3, 4, 4, 2] + capacities = [15, 8, 20, 4, 10, 15, 4, 20, 5] + unit_costs = [4, 4, 2, 2, 6, 1, 3, 2, 3] + supplies = [20, 0, 0, -5, -15] + answer = 150 + + for i in range(len(supplies)): + G.add_node(i, demand=(-1) * supplies[i]) # supplies are negative of demand + + for i in range(len(start_nodes)): + G.add_edge( + start_nodes[i], end_nodes[i], weight=unit_costs[i], capacity=capacities[i] + ) + + flowCost, flowDict = nx.network_simplex(G) + assert flowCost == answer + assert flowCost == get_flowcost_from_flowdict(G, flowDict) + + +def test_google_or_tools_example2(): + """ + https://developers.google.com/optimization/flow/mincostflow + """ + G = nx.DiGraph() + start_nodes = [0, 0, 1, 1, 1, 2, 2, 3, 4, 3] + end_nodes = [1, 2, 2, 3, 4, 3, 4, 4, 2, 5] + capacities = [15, 8, 20, 4, 10, 15, 4, 20, 5, 10] + unit_costs = [4, 4, 2, 2, 6, 1, 3, 2, 3, 4] + supplies = [23, 0, 0, -5, -15, -3] + answer = 183 + + for i in range(len(supplies)): + G.add_node(i, demand=(-1) * supplies[i]) # supplies are negative of demand + + for i in range(len(start_nodes)): + G.add_edge( + start_nodes[i], end_nodes[i], weight=unit_costs[i], capacity=capacities[i] + ) + + flowCost, flowDict = nx.network_simplex(G) + assert flowCost == answer + assert flowCost == get_flowcost_from_flowdict(G, flowDict) + + +def test_large(): + fname = ( + importlib.resources.files("networkx.algorithms.flow.tests") + / "netgen-2.gpickle.bz2" + ) + + with bz2.BZ2File(fname, "rb") as f: + G = pickle.load(f) + flowCost, flowDict = nx.network_simplex(G) + assert 6749969302 == flowCost + assert 6749969302 == nx.cost_of_flow(G, flowDict) + + +def test_simple_digraph(): + G = nx.DiGraph() + G.add_node("a", demand=-5) + G.add_node("d", demand=5) + G.add_edge("a", "b", weight=3, capacity=4) + G.add_edge("a", "c", weight=6, capacity=10) + G.add_edge("b", "d", weight=1, capacity=9) + G.add_edge("c", "d", weight=2, capacity=5) + flowCost, H = nx.network_simplex(G) + soln = {"a": {"b": 4, "c": 1}, "b": {"d": 4}, "c": {"d": 1}, "d": {}} + assert flowCost == 24 + assert nx.min_cost_flow_cost(G) == 24 + assert H == soln + + +def test_negcycle_infcap(): + G = nx.DiGraph() + G.add_node("s", demand=-5) + G.add_node("t", demand=5) + G.add_edge("s", "a", weight=1, capacity=3) + G.add_edge("a", "b", weight=3) + G.add_edge("c", "a", weight=-6) + G.add_edge("b", "d", weight=1) + G.add_edge("d", "c", weight=-2) + G.add_edge("d", "t", weight=1, capacity=3) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + + +def test_transshipment(): + G = nx.DiGraph() + G.add_node("a", demand=1) + G.add_node("b", demand=-2) + G.add_node("c", demand=-2) + G.add_node("d", demand=3) + G.add_node("e", demand=-4) + G.add_node("f", demand=-4) + G.add_node("g", demand=3) + G.add_node("h", demand=2) + G.add_node("r", demand=3) + G.add_edge("a", "c", weight=3) + G.add_edge("r", "a", weight=2) + G.add_edge("b", "a", weight=9) + G.add_edge("r", "c", weight=0) + G.add_edge("b", "r", weight=-6) + G.add_edge("c", "d", weight=5) + G.add_edge("e", "r", weight=4) + G.add_edge("e", "f", weight=3) + G.add_edge("h", "b", weight=4) + G.add_edge("f", "d", weight=7) + G.add_edge("f", "h", weight=12) + G.add_edge("g", "d", weight=12) + G.add_edge("f", "g", weight=-1) + G.add_edge("h", "g", weight=-10) + flowCost, H = nx.network_simplex(G) + soln = { + "a": {"c": 0}, + "b": {"a": 0, "r": 2}, + "c": {"d": 3}, + "d": {}, + "e": {"r": 3, "f": 1}, + "f": {"d": 0, "g": 3, "h": 2}, + "g": {"d": 0}, + "h": {"b": 0, "g": 0}, + "r": {"a": 1, "c": 1}, + } + assert flowCost == 41 + assert H == soln + + +def test_digraph1(): + # From Bradley, S. P., Hax, A. C. and Magnanti, T. L. Applied + # Mathematical Programming. Addison-Wesley, 1977. + G = nx.DiGraph() + G.add_node(1, demand=-20) + G.add_node(4, demand=5) + G.add_node(5, demand=15) + G.add_edges_from( + [ + (1, 2, {"capacity": 15, "weight": 4}), + (1, 3, {"capacity": 8, "weight": 4}), + (2, 3, {"weight": 2}), + (2, 4, {"capacity": 4, "weight": 2}), + (2, 5, {"capacity": 10, "weight": 6}), + (3, 4, {"capacity": 15, "weight": 1}), + (3, 5, {"capacity": 5, "weight": 3}), + (4, 5, {"weight": 2}), + (5, 3, {"capacity": 4, "weight": 1}), + ] + ) + flowCost, H = nx.network_simplex(G) + soln = { + 1: {2: 12, 3: 8}, + 2: {3: 8, 4: 4, 5: 0}, + 3: {4: 11, 5: 5}, + 4: {5: 10}, + 5: {3: 0}, + } + assert flowCost == 150 + assert nx.min_cost_flow_cost(G) == 150 + assert H == soln + + +def test_zero_capacity_edges(): + """Address issue raised in ticket #617 by arv.""" + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2, {"capacity": 1, "weight": 1}), + (1, 5, {"capacity": 1, "weight": 1}), + (2, 3, {"capacity": 0, "weight": 1}), + (2, 5, {"capacity": 1, "weight": 1}), + (5, 3, {"capacity": 2, "weight": 1}), + (5, 4, {"capacity": 0, "weight": 1}), + (3, 4, {"capacity": 2, "weight": 1}), + ] + ) + G.nodes[1]["demand"] = -1 + G.nodes[2]["demand"] = -1 + G.nodes[4]["demand"] = 2 + + flowCost, H = nx.network_simplex(G) + soln = {1: {2: 0, 5: 1}, 2: {3: 0, 5: 1}, 3: {4: 2}, 4: {}, 5: {3: 2, 4: 0}} + assert flowCost == 6 + assert nx.min_cost_flow_cost(G) == 6 + assert H == soln + + +def test_digon(): + """Check if digons are handled properly. Taken from ticket + #618 by arv.""" + nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})] + edges = [ + (1, 2, {"capacity": 3, "weight": 600000}), + (2, 1, {"capacity": 2, "weight": 0}), + (2, 3, {"capacity": 5, "weight": 714285}), + (3, 2, {"capacity": 2, "weight": 0}), + ] + G = nx.DiGraph(edges) + G.add_nodes_from(nodes) + flowCost, H = nx.network_simplex(G) + soln = {1: {2: 0}, 2: {1: 0, 3: 4}, 3: {2: 0}} + assert flowCost == 2857140 + + +def test_deadend(): + """Check if one-node cycles are handled properly. Taken from ticket + #2906 from @sshraven.""" + G = nx.DiGraph() + + G.add_nodes_from(range(5), demand=0) + G.nodes[4]["demand"] = -13 + G.nodes[3]["demand"] = 13 + + G.add_edges_from([(0, 2), (0, 3), (2, 1)], capacity=20, weight=0.1) + pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G) + + +def test_infinite_capacity_neg_digon(): + """An infinite capacity negative cost digon results in an unbounded + instance.""" + nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})] + edges = [ + (1, 2, {"weight": -600}), + (2, 1, {"weight": 0}), + (2, 3, {"capacity": 5, "weight": 714285}), + (3, 2, {"capacity": 2, "weight": 0}), + ] + G = nx.DiGraph(edges) + G.add_nodes_from(nodes) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + + +def test_multidigraph(): + """Multidigraphs are acceptable.""" + G = nx.MultiDiGraph() + G.add_weighted_edges_from([(1, 2, 1), (2, 3, 2)], weight="capacity") + flowCost, H = nx.network_simplex(G) + assert flowCost == 0 + assert H == {1: {2: {0: 0}}, 2: {3: {0: 0}}, 3: {}} + + +def test_negative_selfloops(): + """Negative selfloops should cause an exception if uncapacitated and + always be saturated otherwise. + """ + G = nx.DiGraph() + G.add_edge(1, 1, weight=-1) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + + G[1][1]["capacity"] = 2 + flowCost, H = nx.network_simplex(G) + assert flowCost == -2 + assert H == {1: {1: 2}} + + G = nx.MultiDiGraph() + G.add_edge(1, 1, "x", weight=-1) + G.add_edge(1, 1, "y", weight=1) + pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G) + + G[1][1]["x"]["capacity"] = 2 + flowCost, H = nx.network_simplex(G) + assert flowCost == -2 + assert H == {1: {1: {"x": 2, "y": 0}}} + + +def test_bone_shaped(): + # From #1283 + G = nx.DiGraph() + G.add_node(0, demand=-4) + G.add_node(1, demand=2) + G.add_node(2, demand=2) + G.add_node(3, demand=4) + G.add_node(4, demand=-2) + G.add_node(5, demand=-2) + G.add_edge(0, 1, capacity=4) + G.add_edge(0, 2, capacity=4) + G.add_edge(4, 3, capacity=4) + G.add_edge(5, 3, capacity=4) + G.add_edge(0, 3, capacity=0) + flowCost, H = nx.network_simplex(G) + assert flowCost == 0 + assert H == {0: {1: 2, 2: 2, 3: 0}, 1: {}, 2: {}, 3: {}, 4: {3: 2}, 5: {3: 2}} + + +def test_graphs_type_exceptions(): + G = nx.Graph() + pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G) + G = nx.MultiGraph() + pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G) + G = nx.DiGraph() + pytest.raises(nx.NetworkXError, nx.network_simplex, G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/wlm3.gpickle.bz2 b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/wlm3.gpickle.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..c95da5b280f27411afeeb215cac8a99219e89078 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/flow/tests/wlm3.gpickle.bz2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ccacba1e0fbfb30bec361f0e48ec88c999d3474fcda5ddf93bd444ace17cfa0e +size 88132 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/flow/utils.py b/MLPY/Lib/site-packages/networkx/algorithms/flow/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..349be4b302a089c0827d100d9e94a61801d46b54 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/flow/utils.py @@ -0,0 +1,188 @@ +""" +Utility classes and functions for network flow algorithms. +""" + +from collections import deque + +import networkx as nx + +__all__ = [ + "CurrentEdge", + "Level", + "GlobalRelabelThreshold", + "build_residual_network", + "detect_unboundedness", + "build_flow_dict", +] + + +class CurrentEdge: + """Mechanism for iterating over out-edges incident to a node in a circular + manner. StopIteration exception is raised when wraparound occurs. + """ + + __slots__ = ("_edges", "_it", "_curr") + + def __init__(self, edges): + self._edges = edges + if self._edges: + self._rewind() + + def get(self): + return self._curr + + def move_to_next(self): + try: + self._curr = next(self._it) + except StopIteration: + self._rewind() + raise + + def _rewind(self): + self._it = iter(self._edges.items()) + self._curr = next(self._it) + + +class Level: + """Active and inactive nodes in a level.""" + + __slots__ = ("active", "inactive") + + def __init__(self): + self.active = set() + self.inactive = set() + + +class GlobalRelabelThreshold: + """Measurement of work before the global relabeling heuristic should be + applied. + """ + + def __init__(self, n, m, freq): + self._threshold = (n + m) / freq if freq else float("inf") + self._work = 0 + + def add_work(self, work): + self._work += work + + def is_reached(self): + return self._work >= self._threshold + + def clear_work(self): + self._work = 0 + + +@nx._dispatch(edge_attrs={"capacity": float("inf")}) +def build_residual_network(G, capacity): + """Build a residual network and initialize a zero flow. + + The residual network :samp:`R` from an input graph :samp:`G` has the + same nodes as :samp:`G`. :samp:`R` is a DiGraph that contains a pair + of edges :samp:`(u, v)` and :samp:`(v, u)` iff :samp:`(u, v)` is not a + self-loop, and at least one of :samp:`(u, v)` and :samp:`(v, u)` exists + in :samp:`G`. + + For each edge :samp:`(u, v)` in :samp:`R`, :samp:`R[u][v]['capacity']` + is equal to the capacity of :samp:`(u, v)` in :samp:`G` if it exists + in :samp:`G` or zero otherwise. If the capacity is infinite, + :samp:`R[u][v]['capacity']` will have a high arbitrary finite value + that does not affect the solution of the problem. This value is stored in + :samp:`R.graph['inf']`. For each edge :samp:`(u, v)` in :samp:`R`, + :samp:`R[u][v]['flow']` represents the flow function of :samp:`(u, v)` and + satisfies :samp:`R[u][v]['flow'] == -R[v][u]['flow']`. + + The flow value, defined as the total flow into :samp:`t`, the sink, is + stored in :samp:`R.graph['flow_value']`. If :samp:`cutoff` is not + specified, reachability to :samp:`t` using only edges :samp:`(u, v)` such + that :samp:`R[u][v]['flow'] < R[u][v]['capacity']` induces a minimum + :samp:`s`-:samp:`t` cut. + + """ + if G.is_multigraph(): + raise nx.NetworkXError("MultiGraph and MultiDiGraph not supported (yet).") + + R = nx.DiGraph() + R.add_nodes_from(G) + + inf = float("inf") + # Extract edges with positive capacities. Self loops excluded. + edge_list = [ + (u, v, attr) + for u, v, attr in G.edges(data=True) + if u != v and attr.get(capacity, inf) > 0 + ] + # Simulate infinity with three times the sum of the finite edge capacities + # or any positive value if the sum is zero. This allows the + # infinite-capacity edges to be distinguished for unboundedness detection + # and directly participate in residual capacity calculation. If the maximum + # flow is finite, these edges cannot appear in the minimum cut and thus + # guarantee correctness. Since the residual capacity of an + # infinite-capacity edge is always at least 2/3 of inf, while that of an + # finite-capacity edge is at most 1/3 of inf, if an operation moves more + # than 1/3 of inf units of flow to t, there must be an infinite-capacity + # s-t path in G. + inf = ( + 3 + * sum( + attr[capacity] + for u, v, attr in edge_list + if capacity in attr and attr[capacity] != inf + ) + or 1 + ) + if G.is_directed(): + for u, v, attr in edge_list: + r = min(attr.get(capacity, inf), inf) + if not R.has_edge(u, v): + # Both (u, v) and (v, u) must be present in the residual + # network. + R.add_edge(u, v, capacity=r) + R.add_edge(v, u, capacity=0) + else: + # The edge (u, v) was added when (v, u) was visited. + R[u][v]["capacity"] = r + else: + for u, v, attr in edge_list: + # Add a pair of edges with equal residual capacities. + r = min(attr.get(capacity, inf), inf) + R.add_edge(u, v, capacity=r) + R.add_edge(v, u, capacity=r) + + # Record the value simulating infinity. + R.graph["inf"] = inf + + return R + + +@nx._dispatch( + graphs="R", + preserve_edge_attrs={"R": {"capacity": float("inf")}}, + preserve_graph_attrs=True, +) +def detect_unboundedness(R, s, t): + """Detect an infinite-capacity s-t path in R.""" + q = deque([s]) + seen = {s} + inf = R.graph["inf"] + while q: + u = q.popleft() + for v, attr in R[u].items(): + if attr["capacity"] == inf and v not in seen: + if v == t: + raise nx.NetworkXUnbounded( + "Infinite capacity path, flow unbounded above." + ) + seen.add(v) + q.append(v) + + +@nx._dispatch(graphs={"G": 0, "R": 1}, preserve_edge_attrs={"R": {"flow": None}}) +def build_flow_dict(G, R): + """Build a flow dictionary from a residual network.""" + flow_dict = {} + for u in G: + flow_dict[u] = {v: 0 for v in G[u]} + flow_dict[u].update( + (v, attr["flow"]) for v, attr in R[u].items() if attr["flow"] > 0 + ) + return flow_dict diff --git a/MLPY/Lib/site-packages/networkx/algorithms/graph_hashing.py b/MLPY/Lib/site-packages/networkx/algorithms/graph_hashing.py new file mode 100644 index 0000000000000000000000000000000000000000..d85a44a3604a70b9bac7f214e52d2e2ae12bc79a --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/graph_hashing.py @@ -0,0 +1,313 @@ +""" +Functions for hashing graphs to strings. +Isomorphic graphs should be assigned identical hashes. +For now, only Weisfeiler-Lehman hashing is implemented. +""" + +from collections import Counter, defaultdict +from hashlib import blake2b + +import networkx as nx + +__all__ = ["weisfeiler_lehman_graph_hash", "weisfeiler_lehman_subgraph_hashes"] + + +def _hash_label(label, digest_size): + return blake2b(label.encode("ascii"), digest_size=digest_size).hexdigest() + + +def _init_node_labels(G, edge_attr, node_attr): + if node_attr: + return {u: str(dd[node_attr]) for u, dd in G.nodes(data=True)} + elif edge_attr: + return {u: "" for u in G} + else: + return {u: str(deg) for u, deg in G.degree()} + + +def _neighborhood_aggregate(G, node, node_labels, edge_attr=None): + """ + Compute new labels for given node by aggregating + the labels of each node's neighbors. + """ + label_list = [] + for nbr in G.neighbors(node): + prefix = "" if edge_attr is None else str(G[node][nbr][edge_attr]) + label_list.append(prefix + node_labels[nbr]) + return node_labels[node] + "".join(sorted(label_list)) + + +@nx._dispatch(edge_attrs={"edge_attr": None}, node_attrs="node_attr") +def weisfeiler_lehman_graph_hash( + G, edge_attr=None, node_attr=None, iterations=3, digest_size=16 +): + """Return Weisfeiler Lehman (WL) graph hash. + + The function iteratively aggregates and hashes neighbourhoods of each node. + After each node's neighbors are hashed to obtain updated node labels, + a hashed histogram of resulting labels is returned as the final hash. + + Hashes are identical for isomorphic graphs and strong guarantees that + non-isomorphic graphs will get different hashes. See [1]_ for details. + + If no node or edge attributes are provided, the degree of each node + is used as its initial label. + Otherwise, node and/or edge labels are used to compute the hash. + + Parameters + ---------- + G: graph + The graph to be hashed. + Can have node and/or edge attributes. Can also have no attributes. + edge_attr: string, default=None + The key in edge attribute dictionary to be used for hashing. + If None, edge labels are ignored. + node_attr: string, default=None + The key in node attribute dictionary to be used for hashing. + If None, and no edge_attr given, use the degrees of the nodes as labels. + iterations: int, default=3 + Number of neighbor aggregations to perform. + Should be larger for larger graphs. + digest_size: int, default=16 + Size (in bits) of blake2b hash digest to use for hashing node labels. + + Returns + ------- + h : string + Hexadecimal string corresponding to hash of the input graph. + + Examples + -------- + Two graphs with edge attributes that are isomorphic, except for + differences in the edge labels. + + >>> G1 = nx.Graph() + >>> G1.add_edges_from( + ... [ + ... (1, 2, {"label": "A"}), + ... (2, 3, {"label": "A"}), + ... (3, 1, {"label": "A"}), + ... (1, 4, {"label": "B"}), + ... ] + ... ) + >>> G2 = nx.Graph() + >>> G2.add_edges_from( + ... [ + ... (5, 6, {"label": "B"}), + ... (6, 7, {"label": "A"}), + ... (7, 5, {"label": "A"}), + ... (7, 8, {"label": "A"}), + ... ] + ... ) + + Omitting the `edge_attr` option, results in identical hashes. + + >>> nx.weisfeiler_lehman_graph_hash(G1) + '7bc4dde9a09d0b94c5097b219891d81a' + >>> nx.weisfeiler_lehman_graph_hash(G2) + '7bc4dde9a09d0b94c5097b219891d81a' + + With edge labels, the graphs are no longer assigned + the same hash digest. + + >>> nx.weisfeiler_lehman_graph_hash(G1, edge_attr="label") + 'c653d85538bcf041d88c011f4f905f10' + >>> nx.weisfeiler_lehman_graph_hash(G2, edge_attr="label") + '3dcd84af1ca855d0eff3c978d88e7ec7' + + Notes + ----- + To return the WL hashes of each subgraph of a graph, use + `weisfeiler_lehman_subgraph_hashes` + + Similarity between hashes does not imply similarity between graphs. + + References + ---------- + .. [1] Shervashidze, Nino, Pascal Schweitzer, Erik Jan Van Leeuwen, + Kurt Mehlhorn, and Karsten M. Borgwardt. Weisfeiler Lehman + Graph Kernels. Journal of Machine Learning Research. 2011. + http://www.jmlr.org/papers/volume12/shervashidze11a/shervashidze11a.pdf + + See also + -------- + weisfeiler_lehman_subgraph_hashes + """ + + def weisfeiler_lehman_step(G, labels, edge_attr=None): + """ + Apply neighborhood aggregation to each node + in the graph. + Computes a dictionary with labels for each node. + """ + new_labels = {} + for node in G.nodes(): + label = _neighborhood_aggregate(G, node, labels, edge_attr=edge_attr) + new_labels[node] = _hash_label(label, digest_size) + return new_labels + + # set initial node labels + node_labels = _init_node_labels(G, edge_attr, node_attr) + + subgraph_hash_counts = [] + for _ in range(iterations): + node_labels = weisfeiler_lehman_step(G, node_labels, edge_attr=edge_attr) + counter = Counter(node_labels.values()) + # sort the counter, extend total counts + subgraph_hash_counts.extend(sorted(counter.items(), key=lambda x: x[0])) + + # hash the final counter + return _hash_label(str(tuple(subgraph_hash_counts)), digest_size) + + +@nx._dispatch(edge_attrs={"edge_attr": None}, node_attrs="node_attr") +def weisfeiler_lehman_subgraph_hashes( + G, edge_attr=None, node_attr=None, iterations=3, digest_size=16 +): + """ + Return a dictionary of subgraph hashes by node. + + Dictionary keys are nodes in `G`, and values are a list of hashes. + Each hash corresponds to a subgraph rooted at a given node u in `G`. + Lists of subgraph hashes are sorted in increasing order of depth from + their root node, with the hash at index i corresponding to a subgraph + of nodes at most i edges distance from u. Thus, each list will contain + ``iterations + 1`` elements - a hash for a subgraph at each depth, and + additionally a hash of the initial node label (or equivalently a + subgraph of depth 0) + + The function iteratively aggregates and hashes neighbourhoods of each node. + This is achieved for each step by replacing for each node its label from + the previous iteration with its hashed 1-hop neighborhood aggregate. + The new node label is then appended to a list of node labels for each + node. + + To aggregate neighborhoods at each step for a node $n$, all labels of + nodes adjacent to $n$ are concatenated. If the `edge_attr` parameter is set, + labels for each neighboring node are prefixed with the value of this attribute + along the connecting edge from this neighbor to node $n$. The resulting string + is then hashed to compress this information into a fixed digest size. + + Thus, at the $i$-th iteration, nodes within $i$ hops influence any given + hashed node label. We can therefore say that at depth $i$ for node $n$ + we have a hash for a subgraph induced by the $2i$-hop neighborhood of $n$. + + The output can be used to to create general Weisfeiler-Lehman graph kernels, + or generate features for graphs or nodes - for example to generate 'words' in + a graph as seen in the 'graph2vec' algorithm. + See [1]_ & [2]_ respectively for details. + + Hashes are identical for isomorphic subgraphs and there exist strong + guarantees that non-isomorphic graphs will get different hashes. + See [1]_ for details. + + If no node or edge attributes are provided, the degree of each node + is used as its initial label. + Otherwise, node and/or edge labels are used to compute the hash. + + Parameters + ---------- + G: graph + The graph to be hashed. + Can have node and/or edge attributes. Can also have no attributes. + edge_attr: string, default=None + The key in edge attribute dictionary to be used for hashing. + If None, edge labels are ignored. + node_attr: string, default=None + The key in node attribute dictionary to be used for hashing. + If None, and no edge_attr given, use the degrees of the nodes as labels. + iterations: int, default=3 + Number of neighbor aggregations to perform. + Should be larger for larger graphs. + digest_size: int, default=16 + Size (in bits) of blake2b hash digest to use for hashing node labels. + The default size is 16 bits + + Returns + ------- + node_subgraph_hashes : dict + A dictionary with each key given by a node in G, and each value given + by the subgraph hashes in order of depth from the key node. + + Examples + -------- + Finding similar nodes in different graphs: + + >>> G1 = nx.Graph() + >>> G1.add_edges_from([ + ... (1, 2), (2, 3), (2, 4), (3, 5), (4, 6), (5, 7), (6, 7) + ... ]) + >>> G2 = nx.Graph() + >>> G2.add_edges_from([ + ... (1, 3), (2, 3), (1, 6), (1, 5), (4, 6) + ... ]) + >>> g1_hashes = nx.weisfeiler_lehman_subgraph_hashes(G1, iterations=3, digest_size=8) + >>> g2_hashes = nx.weisfeiler_lehman_subgraph_hashes(G2, iterations=3, digest_size=8) + + Even though G1 and G2 are not isomorphic (they have different numbers of edges), + the hash sequence of depth 3 for node 1 in G1 and node 5 in G2 are similar: + + >>> g1_hashes[1] + ['a93b64973cfc8897', 'db1b43ae35a1878f', '57872a7d2059c1c0'] + >>> g2_hashes[5] + ['a93b64973cfc8897', 'db1b43ae35a1878f', '1716d2a4012fa4bc'] + + The first 2 WL subgraph hashes match. From this we can conclude that it's very + likely the neighborhood of 4 hops around these nodes are isomorphic: each + iteration aggregates 1-hop neighbourhoods meaning hashes at depth $n$ are influenced + by every node within $2n$ hops. + + However the neighborhood of 6 hops is no longer isomorphic since their 3rd hash does + not match. + + These nodes may be candidates to be classified together since their local topology + is similar. + + Notes + ----- + To hash the full graph when subgraph hashes are not needed, use + `weisfeiler_lehman_graph_hash` for efficiency. + + Similarity between hashes does not imply similarity between graphs. + + References + ---------- + .. [1] Shervashidze, Nino, Pascal Schweitzer, Erik Jan Van Leeuwen, + Kurt Mehlhorn, and Karsten M. Borgwardt. Weisfeiler Lehman + Graph Kernels. Journal of Machine Learning Research. 2011. + http://www.jmlr.org/papers/volume12/shervashidze11a/shervashidze11a.pdf + .. [2] Annamalai Narayanan, Mahinthan Chandramohan, Rajasekar Venkatesan, + Lihui Chen, Yang Liu and Shantanu Jaiswa. graph2vec: Learning + Distributed Representations of Graphs. arXiv. 2017 + https://arxiv.org/pdf/1707.05005.pdf + + See also + -------- + weisfeiler_lehman_graph_hash + """ + + def weisfeiler_lehman_step(G, labels, node_subgraph_hashes, edge_attr=None): + """ + Apply neighborhood aggregation to each node + in the graph. + Computes a dictionary with labels for each node. + Appends the new hashed label to the dictionary of subgraph hashes + originating from and indexed by each node in G + """ + new_labels = {} + for node in G.nodes(): + label = _neighborhood_aggregate(G, node, labels, edge_attr=edge_attr) + hashed_label = _hash_label(label, digest_size) + new_labels[node] = hashed_label + node_subgraph_hashes[node].append(hashed_label) + return new_labels + + node_labels = _init_node_labels(G, edge_attr, node_attr) + + node_subgraph_hashes = defaultdict(list) + for _ in range(iterations): + node_labels = weisfeiler_lehman_step( + G, node_labels, node_subgraph_hashes, edge_attr + ) + + return dict(node_subgraph_hashes) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/graphical.py b/MLPY/Lib/site-packages/networkx/algorithms/graphical.py new file mode 100644 index 0000000000000000000000000000000000000000..cb1664427fda27b52930e6e8d451000ba9d5912b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/graphical.py @@ -0,0 +1,483 @@ +"""Test sequences for graphiness. +""" +import heapq + +import networkx as nx + +__all__ = [ + "is_graphical", + "is_multigraphical", + "is_pseudographical", + "is_digraphical", + "is_valid_degree_sequence_erdos_gallai", + "is_valid_degree_sequence_havel_hakimi", +] + + +@nx._dispatch(graphs=None) +def is_graphical(sequence, method="eg"): + """Returns True if sequence is a valid degree sequence. + + A degree sequence is valid if some graph can realize it. + + Parameters + ---------- + sequence : list or iterable container + A sequence of integer node degrees + + method : "eg" | "hh" (default: 'eg') + The method used to validate the degree sequence. + "eg" corresponds to the Erdős-Gallai algorithm + [EG1960]_, [choudum1986]_, and + "hh" to the Havel-Hakimi algorithm + [havel1955]_, [hakimi1962]_, [CL1996]_. + + Returns + ------- + valid : bool + True if the sequence is a valid degree sequence and False if not. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> sequence = (d for n, d in G.degree()) + >>> nx.is_graphical(sequence) + True + + To test a non-graphical sequence: + >>> sequence_list = [d for n, d in G.degree()] + >>> sequence_list[-1] += 1 + >>> nx.is_graphical(sequence_list) + False + + References + ---------- + .. [EG1960] Erdős and Gallai, Mat. Lapok 11 264, 1960. + .. [choudum1986] S.A. Choudum. "A simple proof of the Erdős-Gallai theorem on + graph sequences." Bulletin of the Australian Mathematical Society, 33, + pp 67-70, 1986. https://doi.org/10.1017/S0004972700002872 + .. [havel1955] Havel, V. "A Remark on the Existence of Finite Graphs" + Casopis Pest. Mat. 80, 477-480, 1955. + .. [hakimi1962] Hakimi, S. "On the Realizability of a Set of Integers as + Degrees of the Vertices of a Graph." SIAM J. Appl. Math. 10, 496-506, 1962. + .. [CL1996] G. Chartrand and L. Lesniak, "Graphs and Digraphs", + Chapman and Hall/CRC, 1996. + """ + if method == "eg": + valid = is_valid_degree_sequence_erdos_gallai(list(sequence)) + elif method == "hh": + valid = is_valid_degree_sequence_havel_hakimi(list(sequence)) + else: + msg = "`method` must be 'eg' or 'hh'" + raise nx.NetworkXException(msg) + return valid + + +def _basic_graphical_tests(deg_sequence): + # Sort and perform some simple tests on the sequence + deg_sequence = nx.utils.make_list_of_ints(deg_sequence) + p = len(deg_sequence) + num_degs = [0] * p + dmax, dmin, dsum, n = 0, p, 0, 0 + for d in deg_sequence: + # Reject if degree is negative or larger than the sequence length + if d < 0 or d >= p: + raise nx.NetworkXUnfeasible + # Process only the non-zero integers + elif d > 0: + dmax, dmin, dsum, n = max(dmax, d), min(dmin, d), dsum + d, n + 1 + num_degs[d] += 1 + # Reject sequence if it has odd sum or is oversaturated + if dsum % 2 or dsum > n * (n - 1): + raise nx.NetworkXUnfeasible + return dmax, dmin, dsum, n, num_degs + + +@nx._dispatch(graphs=None) +def is_valid_degree_sequence_havel_hakimi(deg_sequence): + r"""Returns True if deg_sequence can be realized by a simple graph. + + The validation proceeds using the Havel-Hakimi theorem + [havel1955]_, [hakimi1962]_, [CL1996]_. + Worst-case run time is $O(s)$ where $s$ is the sum of the sequence. + + Parameters + ---------- + deg_sequence : list + A list of integers where each element specifies the degree of a node + in a graph. + + Returns + ------- + valid : bool + True if deg_sequence is graphical and False if not. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)]) + >>> sequence = (d for _, d in G.degree()) + >>> nx.is_valid_degree_sequence_havel_hakimi(sequence) + True + + To test a non-valid sequence: + >>> sequence_list = [d for _, d in G.degree()] + >>> sequence_list[-1] += 1 + >>> nx.is_valid_degree_sequence_havel_hakimi(sequence_list) + False + + Notes + ----- + The ZZ condition says that for the sequence d if + + .. math:: + |d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)} + + then d is graphical. This was shown in Theorem 6 in [1]_. + + References + ---------- + .. [1] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory + of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992). + .. [havel1955] Havel, V. "A Remark on the Existence of Finite Graphs" + Casopis Pest. Mat. 80, 477-480, 1955. + .. [hakimi1962] Hakimi, S. "On the Realizability of a Set of Integers as + Degrees of the Vertices of a Graph." SIAM J. Appl. Math. 10, 496-506, 1962. + .. [CL1996] G. Chartrand and L. Lesniak, "Graphs and Digraphs", + Chapman and Hall/CRC, 1996. + """ + try: + dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence) + except nx.NetworkXUnfeasible: + return False + # Accept if sequence has no non-zero degrees or passes the ZZ condition + if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1): + return True + + modstubs = [0] * (dmax + 1) + # Successively reduce degree sequence by removing the maximum degree + while n > 0: + # Retrieve the maximum degree in the sequence + while num_degs[dmax] == 0: + dmax -= 1 + # If there are not enough stubs to connect to, then the sequence is + # not graphical + if dmax > n - 1: + return False + + # Remove largest stub in list + num_degs[dmax], n = num_degs[dmax] - 1, n - 1 + # Reduce the next dmax largest stubs + mslen = 0 + k = dmax + for i in range(dmax): + while num_degs[k] == 0: + k -= 1 + num_degs[k], n = num_degs[k] - 1, n - 1 + if k > 1: + modstubs[mslen] = k - 1 + mslen += 1 + # Add back to the list any non-zero stubs that were removed + for i in range(mslen): + stub = modstubs[i] + num_degs[stub], n = num_degs[stub] + 1, n + 1 + return True + + +@nx._dispatch(graphs=None) +def is_valid_degree_sequence_erdos_gallai(deg_sequence): + r"""Returns True if deg_sequence can be realized by a simple graph. + + The validation is done using the Erdős-Gallai theorem [EG1960]_. + + Parameters + ---------- + deg_sequence : list + A list of integers + + Returns + ------- + valid : bool + True if deg_sequence is graphical and False if not. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)]) + >>> sequence = (d for _, d in G.degree()) + >>> nx.is_valid_degree_sequence_erdos_gallai(sequence) + True + + To test a non-valid sequence: + >>> sequence_list = [d for _, d in G.degree()] + >>> sequence_list[-1] += 1 + >>> nx.is_valid_degree_sequence_erdos_gallai(sequence_list) + False + + Notes + ----- + + This implementation uses an equivalent form of the Erdős-Gallai criterion. + Worst-case run time is $O(n)$ where $n$ is the length of the sequence. + + Specifically, a sequence d is graphical if and only if the + sum of the sequence is even and for all strong indices k in the sequence, + + .. math:: + + \sum_{i=1}^{k} d_i \leq k(k-1) + \sum_{j=k+1}^{n} \min(d_i,k) + = k(n-1) - ( k \sum_{j=0}^{k-1} n_j - \sum_{j=0}^{k-1} j n_j ) + + A strong index k is any index where d_k >= k and the value n_j is the + number of occurrences of j in d. The maximal strong index is called the + Durfee index. + + This particular rearrangement comes from the proof of Theorem 3 in [2]_. + + The ZZ condition says that for the sequence d if + + .. math:: + |d| >= \frac{(\max(d) + \min(d) + 1)^2}{4*\min(d)} + + then d is graphical. This was shown in Theorem 6 in [2]_. + + References + ---------- + .. [1] A. Tripathi and S. Vijay. "A note on a theorem of Erdős & Gallai", + Discrete Mathematics, 265, pp. 417-420 (2003). + .. [2] I.E. Zverovich and V.E. Zverovich. "Contributions to the theory + of graphic sequences", Discrete Mathematics, 105, pp. 292-303 (1992). + .. [EG1960] Erdős and Gallai, Mat. Lapok 11 264, 1960. + """ + try: + dmax, dmin, dsum, n, num_degs = _basic_graphical_tests(deg_sequence) + except nx.NetworkXUnfeasible: + return False + # Accept if sequence has no non-zero degrees or passes the ZZ condition + if n == 0 or 4 * dmin * n >= (dmax + dmin + 1) * (dmax + dmin + 1): + return True + + # Perform the EG checks using the reformulation of Zverovich and Zverovich + k, sum_deg, sum_nj, sum_jnj = 0, 0, 0, 0 + for dk in range(dmax, dmin - 1, -1): + if dk < k + 1: # Check if already past Durfee index + return True + if num_degs[dk] > 0: + run_size = num_degs[dk] # Process a run of identical-valued degrees + if dk < k + run_size: # Check if end of run is past Durfee index + run_size = dk - k # Adjust back to Durfee index + sum_deg += run_size * dk + for v in range(run_size): + sum_nj += num_degs[k + v] + sum_jnj += (k + v) * num_degs[k + v] + k += run_size + if sum_deg > k * (n - 1) - k * sum_nj + sum_jnj: + return False + return True + + +@nx._dispatch(graphs=None) +def is_multigraphical(sequence): + """Returns True if some multigraph can realize the sequence. + + Parameters + ---------- + sequence : list + A list of integers + + Returns + ------- + valid : bool + True if deg_sequence is a multigraphic degree sequence and False if not. + + Examples + -------- + >>> G = nx.MultiGraph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)]) + >>> sequence = (d for _, d in G.degree()) + >>> nx.is_multigraphical(sequence) + True + + To test a non-multigraphical sequence: + >>> sequence_list = [d for _, d in G.degree()] + >>> sequence_list[-1] += 1 + >>> nx.is_multigraphical(sequence_list) + False + + Notes + ----- + The worst-case run time is $O(n)$ where $n$ is the length of the sequence. + + References + ---------- + .. [1] S. L. Hakimi. "On the realizability of a set of integers as + degrees of the vertices of a linear graph", J. SIAM, 10, pp. 496-506 + (1962). + """ + try: + deg_sequence = nx.utils.make_list_of_ints(sequence) + except nx.NetworkXError: + return False + dsum, dmax = 0, 0 + for d in deg_sequence: + if d < 0: + return False + dsum, dmax = dsum + d, max(dmax, d) + if dsum % 2 or dsum < 2 * dmax: + return False + return True + + +@nx._dispatch(graphs=None) +def is_pseudographical(sequence): + """Returns True if some pseudograph can realize the sequence. + + Every nonnegative integer sequence with an even sum is pseudographical + (see [1]_). + + Parameters + ---------- + sequence : list or iterable container + A sequence of integer node degrees + + Returns + ------- + valid : bool + True if the sequence is a pseudographic degree sequence and False if not. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)]) + >>> sequence = (d for _, d in G.degree()) + >>> nx.is_pseudographical(sequence) + True + + To test a non-pseudographical sequence: + >>> sequence_list = [d for _, d in G.degree()] + >>> sequence_list[-1] += 1 + >>> nx.is_pseudographical(sequence_list) + False + + Notes + ----- + The worst-case run time is $O(n)$ where n is the length of the sequence. + + References + ---------- + .. [1] F. Boesch and F. Harary. "Line removal algorithms for graphs + and their degree lists", IEEE Trans. Circuits and Systems, CAS-23(12), + pp. 778-782 (1976). + """ + try: + deg_sequence = nx.utils.make_list_of_ints(sequence) + except nx.NetworkXError: + return False + return sum(deg_sequence) % 2 == 0 and min(deg_sequence) >= 0 + + +@nx._dispatch(graphs=None) +def is_digraphical(in_sequence, out_sequence): + r"""Returns True if some directed graph can realize the in- and out-degree + sequences. + + Parameters + ---------- + in_sequence : list or iterable container + A sequence of integer node in-degrees + + out_sequence : list or iterable container + A sequence of integer node out-degrees + + Returns + ------- + valid : bool + True if in and out-sequences are digraphic False if not. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 3), (3, 4), (4, 2), (5, 1), (5, 4)]) + >>> in_seq = (d for n, d in G.in_degree()) + >>> out_seq = (d for n, d in G.out_degree()) + >>> nx.is_digraphical(in_seq, out_seq) + True + + To test a non-digraphical scenario: + >>> in_seq_list = [d for n, d in G.in_degree()] + >>> in_seq_list[-1] += 1 + >>> nx.is_digraphical(in_seq_list, out_seq) + False + + Notes + ----- + This algorithm is from Kleitman and Wang [1]_. + The worst case runtime is $O(s \times \log n)$ where $s$ and $n$ are the + sum and length of the sequences respectively. + + References + ---------- + .. [1] D.J. Kleitman and D.L. Wang + Algorithms for Constructing Graphs and Digraphs with Given Valences + and Factors, Discrete Mathematics, 6(1), pp. 79-88 (1973) + """ + try: + in_deg_sequence = nx.utils.make_list_of_ints(in_sequence) + out_deg_sequence = nx.utils.make_list_of_ints(out_sequence) + except nx.NetworkXError: + return False + # Process the sequences and form two heaps to store degree pairs with + # either zero or non-zero out degrees + sumin, sumout, nin, nout = 0, 0, len(in_deg_sequence), len(out_deg_sequence) + maxn = max(nin, nout) + maxin = 0 + if maxn == 0: + return True + stubheap, zeroheap = [], [] + for n in range(maxn): + in_deg, out_deg = 0, 0 + if n < nout: + out_deg = out_deg_sequence[n] + if n < nin: + in_deg = in_deg_sequence[n] + if in_deg < 0 or out_deg < 0: + return False + sumin, sumout, maxin = sumin + in_deg, sumout + out_deg, max(maxin, in_deg) + if in_deg > 0: + stubheap.append((-1 * out_deg, -1 * in_deg)) + elif out_deg > 0: + zeroheap.append(-1 * out_deg) + if sumin != sumout: + return False + heapq.heapify(stubheap) + heapq.heapify(zeroheap) + + modstubs = [(0, 0)] * (maxin + 1) + # Successively reduce degree sequence by removing the maximum out degree + while stubheap: + # Take the first value in the sequence with non-zero in degree + (freeout, freein) = heapq.heappop(stubheap) + freein *= -1 + if freein > len(stubheap) + len(zeroheap): + return False + + # Attach out stubs to the nodes with the most in stubs + mslen = 0 + for i in range(freein): + if zeroheap and (not stubheap or stubheap[0][0] > zeroheap[0]): + stubout = heapq.heappop(zeroheap) + stubin = 0 + else: + (stubout, stubin) = heapq.heappop(stubheap) + if stubout == 0: + return False + # Check if target is now totally connected + if stubout + 1 < 0 or stubin < 0: + modstubs[mslen] = (stubout + 1, stubin) + mslen += 1 + + # Add back the nodes to the heap that still have available stubs + for i in range(mslen): + stub = modstubs[i] + if stub[1] < 0: + heapq.heappush(stubheap, stub) + else: + heapq.heappush(zeroheap, stub[0]) + if freeout < 0: + heapq.heappush(zeroheap, freeout) + return True diff --git a/MLPY/Lib/site-packages/networkx/algorithms/hierarchy.py b/MLPY/Lib/site-packages/networkx/algorithms/hierarchy.py new file mode 100644 index 0000000000000000000000000000000000000000..6dc63a741b5cfd0aa6b58dbd68caffca7ba1135a --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/hierarchy.py @@ -0,0 +1,48 @@ +""" +Flow Hierarchy. +""" +import networkx as nx + +__all__ = ["flow_hierarchy"] + + +@nx._dispatch(edge_attrs="weight") +def flow_hierarchy(G, weight=None): + """Returns the flow hierarchy of a directed network. + + Flow hierarchy is defined as the fraction of edges not participating + in cycles in a directed graph [1]_. + + Parameters + ---------- + G : DiGraph or MultiDiGraph + A directed graph + + weight : string, optional (default=None) + Attribute to use for edge weights. If None the weight defaults to 1. + + Returns + ------- + h : float + Flow hierarchy value + + Notes + ----- + The algorithm described in [1]_ computes the flow hierarchy through + exponentiation of the adjacency matrix. This function implements an + alternative approach that finds strongly connected components. + An edge is in a cycle if and only if it is in a strongly connected + component, which can be found in $O(m)$ time using Tarjan's algorithm. + + References + ---------- + .. [1] Luo, J.; Magee, C.L. (2011), + Detecting evolving patterns of self-organizing networks by flow + hierarchy measurement, Complexity, Volume 16 Issue 6 53-61. + DOI: 10.1002/cplx.20368 + http://web.mit.edu/~cmagee/www/documents/28-DetectingEvolvingPatterns_FlowHierarchy.pdf + """ + if not G.is_directed(): + raise nx.NetworkXError("G must be a digraph in flow_hierarchy") + scc = nx.strongly_connected_components(G) + return 1 - sum(G.subgraph(c).size(weight) for c in scc) / G.size(weight) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/hybrid.py b/MLPY/Lib/site-packages/networkx/algorithms/hybrid.py new file mode 100644 index 0000000000000000000000000000000000000000..347f5c2f199bac838f5196d5ed544af880f8b06a --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/hybrid.py @@ -0,0 +1,195 @@ +""" +Provides functions for finding and testing for locally `(k, l)`-connected +graphs. + +""" +import copy + +import networkx as nx + +__all__ = ["kl_connected_subgraph", "is_kl_connected"] + + +@nx._dispatch +def kl_connected_subgraph(G, k, l, low_memory=False, same_as_graph=False): + """Returns the maximum locally `(k, l)`-connected subgraph of `G`. + + A graph is locally `(k, l)`-connected if for each edge `(u, v)` in the + graph there are at least `l` edge-disjoint paths of length at most `k` + joining `u` to `v`. + + Parameters + ---------- + G : NetworkX graph + The graph in which to find a maximum locally `(k, l)`-connected + subgraph. + + k : integer + The maximum length of paths to consider. A higher number means a looser + connectivity requirement. + + l : integer + The number of edge-disjoint paths. A higher number means a stricter + connectivity requirement. + + low_memory : bool + If this is True, this function uses an algorithm that uses slightly + more time but less memory. + + same_as_graph : bool + If True then return a tuple of the form `(H, is_same)`, + where `H` is the maximum locally `(k, l)`-connected subgraph and + `is_same` is a Boolean representing whether `G` is locally `(k, + l)`-connected (and hence, whether `H` is simply a copy of the input + graph `G`). + + Returns + ------- + NetworkX graph or two-tuple + If `same_as_graph` is True, then this function returns a + two-tuple as described above. Otherwise, it returns only the maximum + locally `(k, l)`-connected subgraph. + + See also + -------- + is_kl_connected + + References + ---------- + .. [1] Chung, Fan and Linyuan Lu. "The Small World Phenomenon in Hybrid + Power Law Graphs." *Complex Networks*. Springer Berlin Heidelberg, + 2004. 89--104. + + """ + H = copy.deepcopy(G) # subgraph we construct by removing from G + + graphOK = True + deleted_some = True # hack to start off the while loop + while deleted_some: + deleted_some = False + # We use `for edge in list(H.edges()):` instead of + # `for edge in H.edges():` because we edit the graph `H` in + # the loop. Hence using an iterator will result in + # `RuntimeError: dictionary changed size during iteration` + for edge in list(H.edges()): + (u, v) = edge + # Get copy of graph needed for this search + if low_memory: + verts = {u, v} + for i in range(k): + for w in verts.copy(): + verts.update(G[w]) + G2 = G.subgraph(verts).copy() + else: + G2 = copy.deepcopy(G) + ### + path = [u, v] + cnt = 0 + accept = 0 + while path: + cnt += 1 # Found a path + if cnt >= l: + accept = 1 + break + # record edges along this graph + prev = u + for w in path: + if prev != w: + G2.remove_edge(prev, w) + prev = w + # path = shortest_path(G2, u, v, k) # ??? should "Cutoff" be k+1? + try: + path = nx.shortest_path(G2, u, v) # ??? should "Cutoff" be k+1? + except nx.NetworkXNoPath: + path = False + # No Other Paths + if accept == 0: + H.remove_edge(u, v) + deleted_some = True + if graphOK: + graphOK = False + # We looked through all edges and removed none of them. + # So, H is the maximal (k,l)-connected subgraph of G + if same_as_graph: + return (H, graphOK) + return H + + +@nx._dispatch +def is_kl_connected(G, k, l, low_memory=False): + """Returns True if and only if `G` is locally `(k, l)`-connected. + + A graph is locally `(k, l)`-connected if for each edge `(u, v)` in the + graph there are at least `l` edge-disjoint paths of length at most `k` + joining `u` to `v`. + + Parameters + ---------- + G : NetworkX graph + The graph to test for local `(k, l)`-connectedness. + + k : integer + The maximum length of paths to consider. A higher number means a looser + connectivity requirement. + + l : integer + The number of edge-disjoint paths. A higher number means a stricter + connectivity requirement. + + low_memory : bool + If this is True, this function uses an algorithm that uses slightly + more time but less memory. + + Returns + ------- + bool + Whether the graph is locally `(k, l)`-connected subgraph. + + See also + -------- + kl_connected_subgraph + + References + ---------- + .. [1] Chung, Fan and Linyuan Lu. "The Small World Phenomenon in Hybrid + Power Law Graphs." *Complex Networks*. Springer Berlin Heidelberg, + 2004. 89--104. + + """ + graphOK = True + for edge in G.edges(): + (u, v) = edge + # Get copy of graph needed for this search + if low_memory: + verts = {u, v} + for i in range(k): + [verts.update(G.neighbors(w)) for w in verts.copy()] + G2 = G.subgraph(verts) + else: + G2 = copy.deepcopy(G) + ### + path = [u, v] + cnt = 0 + accept = 0 + while path: + cnt += 1 # Found a path + if cnt >= l: + accept = 1 + break + # record edges along this graph + prev = u + for w in path: + if w != prev: + G2.remove_edge(prev, w) + prev = w + # path = shortest_path(G2, u, v, k) # ??? should "Cutoff" be k+1? + try: + path = nx.shortest_path(G2, u, v) # ??? should "Cutoff" be k+1? + except nx.NetworkXNoPath: + path = False + # No Other Paths + if accept == 0: + graphOK = False + break + # return status + return graphOK diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isolate.py b/MLPY/Lib/site-packages/networkx/algorithms/isolate.py new file mode 100644 index 0000000000000000000000000000000000000000..f9983282a635c2699eaed79446bb0ed79794c362 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/isolate.py @@ -0,0 +1,107 @@ +""" +Functions for identifying isolate (degree zero) nodes. +""" +import networkx as nx + +__all__ = ["is_isolate", "isolates", "number_of_isolates"] + + +@nx._dispatch +def is_isolate(G, n): + """Determines whether a node is an isolate. + + An *isolate* is a node with no neighbors (that is, with degree + zero). For directed graphs, this means no in-neighbors and no + out-neighbors. + + Parameters + ---------- + G : NetworkX graph + + n : node + A node in `G`. + + Returns + ------- + is_isolate : bool + True if and only if `n` has no neighbors. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edge(1, 2) + >>> G.add_node(3) + >>> nx.is_isolate(G, 2) + False + >>> nx.is_isolate(G, 3) + True + """ + return G.degree(n) == 0 + + +@nx._dispatch +def isolates(G): + """Iterator over isolates in the graph. + + An *isolate* is a node with no neighbors (that is, with degree + zero). For directed graphs, this means no in-neighbors and no + out-neighbors. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + iterator + An iterator over the isolates of `G`. + + Examples + -------- + To get a list of all isolates of a graph, use the :class:`list` + constructor:: + + >>> G = nx.Graph() + >>> G.add_edge(1, 2) + >>> G.add_node(3) + >>> list(nx.isolates(G)) + [3] + + To remove all isolates in the graph, first create a list of the + isolates, then use :meth:`Graph.remove_nodes_from`:: + + >>> G.remove_nodes_from(list(nx.isolates(G))) + >>> list(G) + [1, 2] + + For digraphs, isolates have zero in-degree and zero out_degre:: + + >>> G = nx.DiGraph([(0, 1), (1, 2)]) + >>> G.add_node(3) + >>> list(nx.isolates(G)) + [3] + + """ + return (n for n, d in G.degree() if d == 0) + + +@nx._dispatch +def number_of_isolates(G): + """Returns the number of isolates in the graph. + + An *isolate* is a node with no neighbors (that is, with degree + zero). For directed graphs, this means no in-neighbors and no + out-neighbors. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + int + The number of degree zero nodes in the graph `G`. + + """ + # TODO This can be parallelized. + return sum(1 for v in isolates(G)) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..58c22688660073a6abb59f7639871f711d1bd6ac --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__init__.py @@ -0,0 +1,7 @@ +from networkx.algorithms.isomorphism.isomorph import * +from networkx.algorithms.isomorphism.vf2userfunc import * +from networkx.algorithms.isomorphism.matchhelpers import * +from networkx.algorithms.isomorphism.temporalisomorphvf2 import * +from networkx.algorithms.isomorphism.ismags import * +from networkx.algorithms.isomorphism.tree_isomorphism import * +from networkx.algorithms.isomorphism.vf2pp import * diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df1ae7cd27fb2ed63e288fbfd8914109ac218282 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/ismags.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/ismags.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f6cc84264165b4284daf43782217441d700971d Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/ismags.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/isomorph.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/isomorph.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4830f20c1bfeb7128fed32bb710fa29602b40f31 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/isomorph.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/isomorphvf2.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/isomorphvf2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0cf8e3db14dc0c3348e4fdecae8b8746c0c22099 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/isomorphvf2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/matchhelpers.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/matchhelpers.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f83d70a27a260a2e4034f62ccf0242f45dd94018 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/matchhelpers.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/temporalisomorphvf2.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/temporalisomorphvf2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..286e28021019cffc5091746bb1c467900373993e Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/temporalisomorphvf2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/tree_isomorphism.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/tree_isomorphism.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8cf9bc4e414cd651a8b596fd299e48528913441c Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/tree_isomorphism.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/vf2pp.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/vf2pp.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a5e153ea1eef94fb822c1913089e33dac750a9e Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/vf2pp.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/vf2userfunc.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/vf2userfunc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0053df214ee25399747c340a50ae6430b534339 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/__pycache__/vf2userfunc.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/ismags.py b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/ismags.py new file mode 100644 index 0000000000000000000000000000000000000000..25ce94c87ec9b76ed81a5650f67ecfb5cda130ba --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/ismags.py @@ -0,0 +1,1169 @@ +""" +ISMAGS Algorithm +================ + +Provides a Python implementation of the ISMAGS algorithm. [1]_ + +It is capable of finding (subgraph) isomorphisms between two graphs, taking the +symmetry of the subgraph into account. In most cases the VF2 algorithm is +faster (at least on small graphs) than this implementation, but in some cases +there is an exponential number of isomorphisms that are symmetrically +equivalent. In that case, the ISMAGS algorithm will provide only one solution +per symmetry group. + +>>> petersen = nx.petersen_graph() +>>> ismags = nx.isomorphism.ISMAGS(petersen, petersen) +>>> isomorphisms = list(ismags.isomorphisms_iter(symmetry=False)) +>>> len(isomorphisms) +120 +>>> isomorphisms = list(ismags.isomorphisms_iter(symmetry=True)) +>>> answer = [{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}] +>>> answer == isomorphisms +True + +In addition, this implementation also provides an interface to find the +largest common induced subgraph [2]_ between any two graphs, again taking +symmetry into account. Given `graph` and `subgraph` the algorithm will remove +nodes from the `subgraph` until `subgraph` is isomorphic to a subgraph of +`graph`. Since only the symmetry of `subgraph` is taken into account it is +worth thinking about how you provide your graphs: + +>>> graph1 = nx.path_graph(4) +>>> graph2 = nx.star_graph(3) +>>> ismags = nx.isomorphism.ISMAGS(graph1, graph2) +>>> ismags.is_isomorphic() +False +>>> largest_common_subgraph = list(ismags.largest_common_subgraph()) +>>> answer = [{1: 0, 0: 1, 2: 2}, {2: 0, 1: 1, 3: 2}] +>>> answer == largest_common_subgraph +True +>>> ismags2 = nx.isomorphism.ISMAGS(graph2, graph1) +>>> largest_common_subgraph = list(ismags2.largest_common_subgraph()) +>>> answer = [ +... {1: 0, 0: 1, 2: 2}, +... {1: 0, 0: 1, 3: 2}, +... {2: 0, 0: 1, 1: 2}, +... {2: 0, 0: 1, 3: 2}, +... {3: 0, 0: 1, 1: 2}, +... {3: 0, 0: 1, 2: 2}, +... ] +>>> answer == largest_common_subgraph +True + +However, when not taking symmetry into account, it doesn't matter: + +>>> largest_common_subgraph = list(ismags.largest_common_subgraph(symmetry=False)) +>>> answer = [ +... {1: 0, 0: 1, 2: 2}, +... {1: 0, 2: 1, 0: 2}, +... {2: 0, 1: 1, 3: 2}, +... {2: 0, 3: 1, 1: 2}, +... {1: 0, 0: 1, 2: 3}, +... {1: 0, 2: 1, 0: 3}, +... {2: 0, 1: 1, 3: 3}, +... {2: 0, 3: 1, 1: 3}, +... {1: 0, 0: 2, 2: 3}, +... {1: 0, 2: 2, 0: 3}, +... {2: 0, 1: 2, 3: 3}, +... {2: 0, 3: 2, 1: 3}, +... ] +>>> answer == largest_common_subgraph +True +>>> largest_common_subgraph = list(ismags2.largest_common_subgraph(symmetry=False)) +>>> answer = [ +... {1: 0, 0: 1, 2: 2}, +... {1: 0, 0: 1, 3: 2}, +... {2: 0, 0: 1, 1: 2}, +... {2: 0, 0: 1, 3: 2}, +... {3: 0, 0: 1, 1: 2}, +... {3: 0, 0: 1, 2: 2}, +... {1: 1, 0: 2, 2: 3}, +... {1: 1, 0: 2, 3: 3}, +... {2: 1, 0: 2, 1: 3}, +... {2: 1, 0: 2, 3: 3}, +... {3: 1, 0: 2, 1: 3}, +... {3: 1, 0: 2, 2: 3}, +... ] +>>> answer == largest_common_subgraph +True + +Notes +----- +- The current implementation works for undirected graphs only. The algorithm + in general should work for directed graphs as well though. +- Node keys for both provided graphs need to be fully orderable as well as + hashable. +- Node and edge equality is assumed to be transitive: if A is equal to B, and + B is equal to C, then A is equal to C. + +References +---------- +.. [1] M. Houbraken, S. Demeyer, T. Michoel, P. Audenaert, D. Colle, + M. Pickavet, "The Index-Based Subgraph Matching Algorithm with General + Symmetries (ISMAGS): Exploiting Symmetry for Faster Subgraph + Enumeration", PLoS One 9(5): e97896, 2014. + https://doi.org/10.1371/journal.pone.0097896 +.. [2] https://en.wikipedia.org/wiki/Maximum_common_induced_subgraph +""" + +__all__ = ["ISMAGS"] + +import itertools +from collections import Counter, defaultdict +from functools import reduce, wraps + + +def are_all_equal(iterable): + """ + Returns ``True`` if and only if all elements in `iterable` are equal; and + ``False`` otherwise. + + Parameters + ---------- + iterable: collections.abc.Iterable + The container whose elements will be checked. + + Returns + ------- + bool + ``True`` iff all elements in `iterable` compare equal, ``False`` + otherwise. + """ + try: + shape = iterable.shape + except AttributeError: + pass + else: + if len(shape) > 1: + message = "The function does not works on multidimensional arrays." + raise NotImplementedError(message) from None + + iterator = iter(iterable) + first = next(iterator, None) + return all(item == first for item in iterator) + + +def make_partitions(items, test): + """ + Partitions items into sets based on the outcome of ``test(item1, item2)``. + Pairs of items for which `test` returns `True` end up in the same set. + + Parameters + ---------- + items : collections.abc.Iterable[collections.abc.Hashable] + Items to partition + test : collections.abc.Callable[collections.abc.Hashable, collections.abc.Hashable] + A function that will be called with 2 arguments, taken from items. + Should return `True` if those 2 items need to end up in the same + partition, and `False` otherwise. + + Returns + ------- + list[set] + A list of sets, with each set containing part of the items in `items`, + such that ``all(test(*pair) for pair in itertools.combinations(set, 2)) + == True`` + + Notes + ----- + The function `test` is assumed to be transitive: if ``test(a, b)`` and + ``test(b, c)`` return ``True``, then ``test(a, c)`` must also be ``True``. + """ + partitions = [] + for item in items: + for partition in partitions: + p_item = next(iter(partition)) + if test(item, p_item): + partition.add(item) + break + else: # No break + partitions.append({item}) + return partitions + + +def partition_to_color(partitions): + """ + Creates a dictionary that maps each item in each partition to the index of + the partition to which it belongs. + + Parameters + ---------- + partitions: collections.abc.Sequence[collections.abc.Iterable] + As returned by :func:`make_partitions`. + + Returns + ------- + dict + """ + colors = {} + for color, keys in enumerate(partitions): + for key in keys: + colors[key] = color + return colors + + +def intersect(collection_of_sets): + """ + Given an collection of sets, returns the intersection of those sets. + + Parameters + ---------- + collection_of_sets: collections.abc.Collection[set] + A collection of sets. + + Returns + ------- + set + An intersection of all sets in `collection_of_sets`. Will have the same + type as the item initially taken from `collection_of_sets`. + """ + collection_of_sets = list(collection_of_sets) + first = collection_of_sets.pop() + out = reduce(set.intersection, collection_of_sets, set(first)) + return type(first)(out) + + +class ISMAGS: + """ + Implements the ISMAGS subgraph matching algorithm. [1]_ ISMAGS stands for + "Index-based Subgraph Matching Algorithm with General Symmetries". As the + name implies, it is symmetry aware and will only generate non-symmetric + isomorphisms. + + Notes + ----- + The implementation imposes additional conditions compared to the VF2 + algorithm on the graphs provided and the comparison functions + (:attr:`node_equality` and :attr:`edge_equality`): + + - Node keys in both graphs must be orderable as well as hashable. + - Equality must be transitive: if A is equal to B, and B is equal to C, + then A must be equal to C. + + Attributes + ---------- + graph: networkx.Graph + subgraph: networkx.Graph + node_equality: collections.abc.Callable + The function called to see if two nodes should be considered equal. + It's signature looks like this: + ``f(graph1: networkx.Graph, node1, graph2: networkx.Graph, node2) -> bool``. + `node1` is a node in `graph1`, and `node2` a node in `graph2`. + Constructed from the argument `node_match`. + edge_equality: collections.abc.Callable + The function called to see if two edges should be considered equal. + It's signature looks like this: + ``f(graph1: networkx.Graph, edge1, graph2: networkx.Graph, edge2) -> bool``. + `edge1` is an edge in `graph1`, and `edge2` an edge in `graph2`. + Constructed from the argument `edge_match`. + + References + ---------- + .. [1] M. Houbraken, S. Demeyer, T. Michoel, P. Audenaert, D. Colle, + M. Pickavet, "The Index-Based Subgraph Matching Algorithm with General + Symmetries (ISMAGS): Exploiting Symmetry for Faster Subgraph + Enumeration", PLoS One 9(5): e97896, 2014. + https://doi.org/10.1371/journal.pone.0097896 + """ + + def __init__(self, graph, subgraph, node_match=None, edge_match=None, cache=None): + """ + Parameters + ---------- + graph: networkx.Graph + subgraph: networkx.Graph + node_match: collections.abc.Callable or None + Function used to determine whether two nodes are equivalent. Its + signature should look like ``f(n1: dict, n2: dict) -> bool``, with + `n1` and `n2` node property dicts. See also + :func:`~networkx.algorithms.isomorphism.categorical_node_match` and + friends. + If `None`, all nodes are considered equal. + edge_match: collections.abc.Callable or None + Function used to determine whether two edges are equivalent. Its + signature should look like ``f(e1: dict, e2: dict) -> bool``, with + `e1` and `e2` edge property dicts. See also + :func:`~networkx.algorithms.isomorphism.categorical_edge_match` and + friends. + If `None`, all edges are considered equal. + cache: collections.abc.Mapping + A cache used for caching graph symmetries. + """ + # TODO: graph and subgraph setter methods that invalidate the caches. + # TODO: allow for precomputed partitions and colors + self.graph = graph + self.subgraph = subgraph + self._symmetry_cache = cache + # Naming conventions are taken from the original paper. For your + # sanity: + # sg: subgraph + # g: graph + # e: edge(s) + # n: node(s) + # So: sgn means "subgraph nodes". + self._sgn_partitions_ = None + self._sge_partitions_ = None + + self._sgn_colors_ = None + self._sge_colors_ = None + + self._gn_partitions_ = None + self._ge_partitions_ = None + + self._gn_colors_ = None + self._ge_colors_ = None + + self._node_compat_ = None + self._edge_compat_ = None + + if node_match is None: + self.node_equality = self._node_match_maker(lambda n1, n2: True) + self._sgn_partitions_ = [set(self.subgraph.nodes)] + self._gn_partitions_ = [set(self.graph.nodes)] + self._node_compat_ = {0: 0} + else: + self.node_equality = self._node_match_maker(node_match) + if edge_match is None: + self.edge_equality = self._edge_match_maker(lambda e1, e2: True) + self._sge_partitions_ = [set(self.subgraph.edges)] + self._ge_partitions_ = [set(self.graph.edges)] + self._edge_compat_ = {0: 0} + else: + self.edge_equality = self._edge_match_maker(edge_match) + + @property + def _sgn_partitions(self): + if self._sgn_partitions_ is None: + + def nodematch(node1, node2): + return self.node_equality(self.subgraph, node1, self.subgraph, node2) + + self._sgn_partitions_ = make_partitions(self.subgraph.nodes, nodematch) + return self._sgn_partitions_ + + @property + def _sge_partitions(self): + if self._sge_partitions_ is None: + + def edgematch(edge1, edge2): + return self.edge_equality(self.subgraph, edge1, self.subgraph, edge2) + + self._sge_partitions_ = make_partitions(self.subgraph.edges, edgematch) + return self._sge_partitions_ + + @property + def _gn_partitions(self): + if self._gn_partitions_ is None: + + def nodematch(node1, node2): + return self.node_equality(self.graph, node1, self.graph, node2) + + self._gn_partitions_ = make_partitions(self.graph.nodes, nodematch) + return self._gn_partitions_ + + @property + def _ge_partitions(self): + if self._ge_partitions_ is None: + + def edgematch(edge1, edge2): + return self.edge_equality(self.graph, edge1, self.graph, edge2) + + self._ge_partitions_ = make_partitions(self.graph.edges, edgematch) + return self._ge_partitions_ + + @property + def _sgn_colors(self): + if self._sgn_colors_ is None: + self._sgn_colors_ = partition_to_color(self._sgn_partitions) + return self._sgn_colors_ + + @property + def _sge_colors(self): + if self._sge_colors_ is None: + self._sge_colors_ = partition_to_color(self._sge_partitions) + return self._sge_colors_ + + @property + def _gn_colors(self): + if self._gn_colors_ is None: + self._gn_colors_ = partition_to_color(self._gn_partitions) + return self._gn_colors_ + + @property + def _ge_colors(self): + if self._ge_colors_ is None: + self._ge_colors_ = partition_to_color(self._ge_partitions) + return self._ge_colors_ + + @property + def _node_compatibility(self): + if self._node_compat_ is not None: + return self._node_compat_ + self._node_compat_ = {} + for sgn_part_color, gn_part_color in itertools.product( + range(len(self._sgn_partitions)), range(len(self._gn_partitions)) + ): + sgn = next(iter(self._sgn_partitions[sgn_part_color])) + gn = next(iter(self._gn_partitions[gn_part_color])) + if self.node_equality(self.subgraph, sgn, self.graph, gn): + self._node_compat_[sgn_part_color] = gn_part_color + return self._node_compat_ + + @property + def _edge_compatibility(self): + if self._edge_compat_ is not None: + return self._edge_compat_ + self._edge_compat_ = {} + for sge_part_color, ge_part_color in itertools.product( + range(len(self._sge_partitions)), range(len(self._ge_partitions)) + ): + sge = next(iter(self._sge_partitions[sge_part_color])) + ge = next(iter(self._ge_partitions[ge_part_color])) + if self.edge_equality(self.subgraph, sge, self.graph, ge): + self._edge_compat_[sge_part_color] = ge_part_color + return self._edge_compat_ + + @staticmethod + def _node_match_maker(cmp): + @wraps(cmp) + def comparer(graph1, node1, graph2, node2): + return cmp(graph1.nodes[node1], graph2.nodes[node2]) + + return comparer + + @staticmethod + def _edge_match_maker(cmp): + @wraps(cmp) + def comparer(graph1, edge1, graph2, edge2): + return cmp(graph1.edges[edge1], graph2.edges[edge2]) + + return comparer + + def find_isomorphisms(self, symmetry=True): + """Find all subgraph isomorphisms between subgraph and graph + + Finds isomorphisms where :attr:`subgraph` <= :attr:`graph`. + + Parameters + ---------- + symmetry: bool + Whether symmetry should be taken into account. If False, found + isomorphisms may be symmetrically equivalent. + + Yields + ------ + dict + The found isomorphism mappings of {graph_node: subgraph_node}. + """ + # The networkx VF2 algorithm is slightly funny in when it yields an + # empty dict and when not. + if not self.subgraph: + yield {} + return + elif not self.graph: + return + elif len(self.graph) < len(self.subgraph): + return + + if symmetry: + _, cosets = self.analyze_symmetry( + self.subgraph, self._sgn_partitions, self._sge_colors + ) + constraints = self._make_constraints(cosets) + else: + constraints = [] + + candidates = self._find_nodecolor_candidates() + la_candidates = self._get_lookahead_candidates() + for sgn in self.subgraph: + extra_candidates = la_candidates[sgn] + if extra_candidates: + candidates[sgn] = candidates[sgn] | {frozenset(extra_candidates)} + + if any(candidates.values()): + start_sgn = min(candidates, key=lambda n: min(candidates[n], key=len)) + candidates[start_sgn] = (intersect(candidates[start_sgn]),) + yield from self._map_nodes(start_sgn, candidates, constraints) + else: + return + + @staticmethod + def _find_neighbor_color_count(graph, node, node_color, edge_color): + """ + For `node` in `graph`, count the number of edges of a specific color + it has to nodes of a specific color. + """ + counts = Counter() + neighbors = graph[node] + for neighbor in neighbors: + n_color = node_color[neighbor] + if (node, neighbor) in edge_color: + e_color = edge_color[node, neighbor] + else: + e_color = edge_color[neighbor, node] + counts[e_color, n_color] += 1 + return counts + + def _get_lookahead_candidates(self): + """ + Returns a mapping of {subgraph node: collection of graph nodes} for + which the graph nodes are feasible candidates for the subgraph node, as + determined by looking ahead one edge. + """ + g_counts = {} + for gn in self.graph: + g_counts[gn] = self._find_neighbor_color_count( + self.graph, gn, self._gn_colors, self._ge_colors + ) + candidates = defaultdict(set) + for sgn in self.subgraph: + sg_count = self._find_neighbor_color_count( + self.subgraph, sgn, self._sgn_colors, self._sge_colors + ) + new_sg_count = Counter() + for (sge_color, sgn_color), count in sg_count.items(): + try: + ge_color = self._edge_compatibility[sge_color] + gn_color = self._node_compatibility[sgn_color] + except KeyError: + pass + else: + new_sg_count[ge_color, gn_color] = count + + for gn, g_count in g_counts.items(): + if all(new_sg_count[x] <= g_count[x] for x in new_sg_count): + # Valid candidate + candidates[sgn].add(gn) + return candidates + + def largest_common_subgraph(self, symmetry=True): + """ + Find the largest common induced subgraphs between :attr:`subgraph` and + :attr:`graph`. + + Parameters + ---------- + symmetry: bool + Whether symmetry should be taken into account. If False, found + largest common subgraphs may be symmetrically equivalent. + + Yields + ------ + dict + The found isomorphism mappings of {graph_node: subgraph_node}. + """ + # The networkx VF2 algorithm is slightly funny in when it yields an + # empty dict and when not. + if not self.subgraph: + yield {} + return + elif not self.graph: + return + + if symmetry: + _, cosets = self.analyze_symmetry( + self.subgraph, self._sgn_partitions, self._sge_colors + ) + constraints = self._make_constraints(cosets) + else: + constraints = [] + + candidates = self._find_nodecolor_candidates() + + if any(candidates.values()): + yield from self._largest_common_subgraph(candidates, constraints) + else: + return + + def analyze_symmetry(self, graph, node_partitions, edge_colors): + """ + Find a minimal set of permutations and corresponding co-sets that + describe the symmetry of `graph`, given the node and edge equalities + given by `node_partitions` and `edge_colors`, respectively. + + Parameters + ---------- + graph : networkx.Graph + The graph whose symmetry should be analyzed. + node_partitions : list of sets + A list of sets containing node keys. Node keys in the same set + are considered equivalent. Every node key in `graph` should be in + exactly one of the sets. If all nodes are equivalent, this should + be ``[set(graph.nodes)]``. + edge_colors : dict mapping edges to their colors + A dict mapping every edge in `graph` to its corresponding color. + Edges with the same color are considered equivalent. If all edges + are equivalent, this should be ``{e: 0 for e in graph.edges}``. + + + Returns + ------- + set[frozenset] + The found permutations. This is a set of frozensets of pairs of node + keys which can be exchanged without changing :attr:`subgraph`. + dict[collections.abc.Hashable, set[collections.abc.Hashable]] + The found co-sets. The co-sets is a dictionary of + ``{node key: set of node keys}``. + Every key-value pair describes which ``values`` can be interchanged + without changing nodes less than ``key``. + """ + if self._symmetry_cache is not None: + key = hash( + ( + tuple(graph.nodes), + tuple(graph.edges), + tuple(map(tuple, node_partitions)), + tuple(edge_colors.items()), + ) + ) + if key in self._symmetry_cache: + return self._symmetry_cache[key] + node_partitions = list( + self._refine_node_partitions(graph, node_partitions, edge_colors) + ) + assert len(node_partitions) == 1 + node_partitions = node_partitions[0] + permutations, cosets = self._process_ordered_pair_partitions( + graph, node_partitions, node_partitions, edge_colors + ) + if self._symmetry_cache is not None: + self._symmetry_cache[key] = permutations, cosets + return permutations, cosets + + def is_isomorphic(self, symmetry=False): + """ + Returns True if :attr:`graph` is isomorphic to :attr:`subgraph` and + False otherwise. + + Returns + ------- + bool + """ + return len(self.subgraph) == len(self.graph) and self.subgraph_is_isomorphic( + symmetry + ) + + def subgraph_is_isomorphic(self, symmetry=False): + """ + Returns True if a subgraph of :attr:`graph` is isomorphic to + :attr:`subgraph` and False otherwise. + + Returns + ------- + bool + """ + # symmetry=False, since we only need to know whether there is any + # example; figuring out all symmetry elements probably costs more time + # than it gains. + isom = next(self.subgraph_isomorphisms_iter(symmetry=symmetry), None) + return isom is not None + + def isomorphisms_iter(self, symmetry=True): + """ + Does the same as :meth:`find_isomorphisms` if :attr:`graph` and + :attr:`subgraph` have the same number of nodes. + """ + if len(self.graph) == len(self.subgraph): + yield from self.subgraph_isomorphisms_iter(symmetry=symmetry) + + def subgraph_isomorphisms_iter(self, symmetry=True): + """Alternative name for :meth:`find_isomorphisms`.""" + return self.find_isomorphisms(symmetry) + + def _find_nodecolor_candidates(self): + """ + Per node in subgraph find all nodes in graph that have the same color. + """ + candidates = defaultdict(set) + for sgn in self.subgraph.nodes: + sgn_color = self._sgn_colors[sgn] + if sgn_color in self._node_compatibility: + gn_color = self._node_compatibility[sgn_color] + candidates[sgn].add(frozenset(self._gn_partitions[gn_color])) + else: + candidates[sgn].add(frozenset()) + candidates = dict(candidates) + for sgn, options in candidates.items(): + candidates[sgn] = frozenset(options) + return candidates + + @staticmethod + def _make_constraints(cosets): + """ + Turn cosets into constraints. + """ + constraints = [] + for node_i, node_ts in cosets.items(): + for node_t in node_ts: + if node_i != node_t: + # Node i must be smaller than node t. + constraints.append((node_i, node_t)) + return constraints + + @staticmethod + def _find_node_edge_color(graph, node_colors, edge_colors): + """ + For every node in graph, come up with a color that combines 1) the + color of the node, and 2) the number of edges of a color to each type + of node. + """ + counts = defaultdict(lambda: defaultdict(int)) + for node1, node2 in graph.edges: + if (node1, node2) in edge_colors: + # FIXME directed graphs + ecolor = edge_colors[node1, node2] + else: + ecolor = edge_colors[node2, node1] + # Count per node how many edges it has of what color to nodes of + # what color + counts[node1][ecolor, node_colors[node2]] += 1 + counts[node2][ecolor, node_colors[node1]] += 1 + + node_edge_colors = {} + for node in graph.nodes: + node_edge_colors[node] = node_colors[node], set(counts[node].items()) + + return node_edge_colors + + @staticmethod + def _get_permutations_by_length(items): + """ + Get all permutations of items, but only permute items with the same + length. + + >>> found = list(ISMAGS._get_permutations_by_length([[1], [2], [3, 4], [4, 5]])) + >>> answer = [ + ... (([1], [2]), ([3, 4], [4, 5])), + ... (([1], [2]), ([4, 5], [3, 4])), + ... (([2], [1]), ([3, 4], [4, 5])), + ... (([2], [1]), ([4, 5], [3, 4])), + ... ] + >>> found == answer + True + """ + by_len = defaultdict(list) + for item in items: + by_len[len(item)].append(item) + + yield from itertools.product( + *(itertools.permutations(by_len[l]) for l in sorted(by_len)) + ) + + @classmethod + def _refine_node_partitions(cls, graph, node_partitions, edge_colors, branch=False): + """ + Given a partition of nodes in graph, make the partitions smaller such + that all nodes in a partition have 1) the same color, and 2) the same + number of edges to specific other partitions. + """ + + def equal_color(node1, node2): + return node_edge_colors[node1] == node_edge_colors[node2] + + node_partitions = list(node_partitions) + node_colors = partition_to_color(node_partitions) + node_edge_colors = cls._find_node_edge_color(graph, node_colors, edge_colors) + if all( + are_all_equal(node_edge_colors[node] for node in partition) + for partition in node_partitions + ): + yield node_partitions + return + + new_partitions = [] + output = [new_partitions] + for partition in node_partitions: + if not are_all_equal(node_edge_colors[node] for node in partition): + refined = make_partitions(partition, equal_color) + if ( + branch + and len(refined) != 1 + and len({len(r) for r in refined}) != len([len(r) for r in refined]) + ): + # This is where it breaks. There are multiple new cells + # in refined with the same length, and their order + # matters. + # So option 1) Hit it with a big hammer and simply make all + # orderings. + permutations = cls._get_permutations_by_length(refined) + new_output = [] + for n_p in output: + for permutation in permutations: + new_output.append(n_p + list(permutation[0])) + output = new_output + else: + for n_p in output: + n_p.extend(sorted(refined, key=len)) + else: + for n_p in output: + n_p.append(partition) + for n_p in output: + yield from cls._refine_node_partitions(graph, n_p, edge_colors, branch) + + def _edges_of_same_color(self, sgn1, sgn2): + """ + Returns all edges in :attr:`graph` that have the same colour as the + edge between sgn1 and sgn2 in :attr:`subgraph`. + """ + if (sgn1, sgn2) in self._sge_colors: + # FIXME directed graphs + sge_color = self._sge_colors[sgn1, sgn2] + else: + sge_color = self._sge_colors[sgn2, sgn1] + if sge_color in self._edge_compatibility: + ge_color = self._edge_compatibility[sge_color] + g_edges = self._ge_partitions[ge_color] + else: + g_edges = [] + return g_edges + + def _map_nodes(self, sgn, candidates, constraints, mapping=None, to_be_mapped=None): + """ + Find all subgraph isomorphisms honoring constraints. + """ + if mapping is None: + mapping = {} + else: + mapping = mapping.copy() + if to_be_mapped is None: + to_be_mapped = set(self.subgraph.nodes) + + # Note, we modify candidates here. Doesn't seem to affect results, but + # remember this. + # candidates = candidates.copy() + sgn_candidates = intersect(candidates[sgn]) + candidates[sgn] = frozenset([sgn_candidates]) + for gn in sgn_candidates: + # We're going to try to map sgn to gn. + if gn in mapping.values() or sgn not in to_be_mapped: + # gn is already mapped to something + continue # pragma: no cover + + # REDUCTION and COMBINATION + mapping[sgn] = gn + # BASECASE + if to_be_mapped == set(mapping.keys()): + yield {v: k for k, v in mapping.items()} + continue + left_to_map = to_be_mapped - set(mapping.keys()) + + new_candidates = candidates.copy() + sgn_neighbours = set(self.subgraph[sgn]) + not_gn_neighbours = set(self.graph.nodes) - set(self.graph[gn]) + for sgn2 in left_to_map: + if sgn2 not in sgn_neighbours: + gn2_options = not_gn_neighbours + else: + # Get all edges to gn of the right color: + g_edges = self._edges_of_same_color(sgn, sgn2) + # FIXME directed graphs + # And all nodes involved in those which are connected to gn + gn2_options = {n for e in g_edges for n in e if gn in e} + # Node color compatibility should be taken care of by the + # initial candidate lists made by find_subgraphs + + # Add gn2_options to the right collection. Since new_candidates + # is a dict of frozensets of frozensets of node indices it's + # a bit clunky. We can't do .add, and + also doesn't work. We + # could do |, but I deem union to be clearer. + new_candidates[sgn2] = new_candidates[sgn2].union( + [frozenset(gn2_options)] + ) + + if (sgn, sgn2) in constraints: + gn2_options = {gn2 for gn2 in self.graph if gn2 > gn} + elif (sgn2, sgn) in constraints: + gn2_options = {gn2 for gn2 in self.graph if gn2 < gn} + else: + continue # pragma: no cover + new_candidates[sgn2] = new_candidates[sgn2].union( + [frozenset(gn2_options)] + ) + + # The next node is the one that is unmapped and has fewest + # candidates + # Pylint disables because it's a one-shot function. + next_sgn = min( + left_to_map, key=lambda n: min(new_candidates[n], key=len) + ) # pylint: disable=cell-var-from-loop + yield from self._map_nodes( + next_sgn, + new_candidates, + constraints, + mapping=mapping, + to_be_mapped=to_be_mapped, + ) + # Unmap sgn-gn. Strictly not necessary since it'd get overwritten + # when making a new mapping for sgn. + # del mapping[sgn] + + def _largest_common_subgraph(self, candidates, constraints, to_be_mapped=None): + """ + Find all largest common subgraphs honoring constraints. + """ + if to_be_mapped is None: + to_be_mapped = {frozenset(self.subgraph.nodes)} + + # The LCS problem is basically a repeated subgraph isomorphism problem + # with smaller and smaller subgraphs. We store the nodes that are + # "part of" the subgraph in to_be_mapped, and we make it a little + # smaller every iteration. + + # pylint disable because it's guarded against by default value + current_size = len( + next(iter(to_be_mapped), []) + ) # pylint: disable=stop-iteration-return + + found_iso = False + if current_size <= len(self.graph): + # There's no point in trying to find isomorphisms of + # graph >= subgraph if subgraph has more nodes than graph. + + # Try the isomorphism first with the nodes with lowest ID. So sort + # them. Those are more likely to be part of the final + # correspondence. This makes finding the first answer(s) faster. In + # theory. + for nodes in sorted(to_be_mapped, key=sorted): + # Find the isomorphism between subgraph[to_be_mapped] <= graph + next_sgn = min(nodes, key=lambda n: min(candidates[n], key=len)) + isomorphs = self._map_nodes( + next_sgn, candidates, constraints, to_be_mapped=nodes + ) + + # This is effectively `yield from isomorphs`, except that we look + # whether an item was yielded. + try: + item = next(isomorphs) + except StopIteration: + pass + else: + yield item + yield from isomorphs + found_iso = True + + # BASECASE + if found_iso or current_size == 1: + # Shrinking has no point because either 1) we end up with a smaller + # common subgraph (and we want the largest), or 2) there'll be no + # more subgraph. + return + + left_to_be_mapped = set() + for nodes in to_be_mapped: + for sgn in nodes: + # We're going to remove sgn from to_be_mapped, but subject to + # symmetry constraints. We know that for every constraint we + # have those subgraph nodes are equal. So whenever we would + # remove the lower part of a constraint, remove the higher + # instead. This is all dealth with by _remove_node. And because + # left_to_be_mapped is a set, we don't do double work. + + # And finally, make the subgraph one node smaller. + # REDUCTION + new_nodes = self._remove_node(sgn, nodes, constraints) + left_to_be_mapped.add(new_nodes) + # COMBINATION + yield from self._largest_common_subgraph( + candidates, constraints, to_be_mapped=left_to_be_mapped + ) + + @staticmethod + def _remove_node(node, nodes, constraints): + """ + Returns a new set where node has been removed from nodes, subject to + symmetry constraints. We know, that for every constraint we have + those subgraph nodes are equal. So whenever we would remove the + lower part of a constraint, remove the higher instead. + """ + while True: + for low, high in constraints: + if low == node and high in nodes: + node = high + break + else: # no break, couldn't find node in constraints + break + return frozenset(nodes - {node}) + + @staticmethod + def _find_permutations(top_partitions, bottom_partitions): + """ + Return the pairs of top/bottom partitions where the partitions are + different. Ensures that all partitions in both top and bottom + partitions have size 1. + """ + # Find permutations + permutations = set() + for top, bot in zip(top_partitions, bottom_partitions): + # top and bot have only one element + if len(top) != 1 or len(bot) != 1: + raise IndexError( + "Not all nodes are coupled. This is" + f" impossible: {top_partitions}, {bottom_partitions}" + ) + if top != bot: + permutations.add(frozenset((next(iter(top)), next(iter(bot))))) + return permutations + + @staticmethod + def _update_orbits(orbits, permutations): + """ + Update orbits based on permutations. Orbits is modified in place. + For every pair of items in permutations their respective orbits are + merged. + """ + for permutation in permutations: + node, node2 = permutation + # Find the orbits that contain node and node2, and replace the + # orbit containing node with the union + first = second = None + for idx, orbit in enumerate(orbits): + if first is not None and second is not None: + break + if node in orbit: + first = idx + if node2 in orbit: + second = idx + if first != second: + orbits[first].update(orbits[second]) + del orbits[second] + + def _couple_nodes( + self, + top_partitions, + bottom_partitions, + pair_idx, + t_node, + b_node, + graph, + edge_colors, + ): + """ + Generate new partitions from top and bottom_partitions where t_node is + coupled to b_node. pair_idx is the index of the partitions where t_ and + b_node can be found. + """ + t_partition = top_partitions[pair_idx] + b_partition = bottom_partitions[pair_idx] + assert t_node in t_partition and b_node in b_partition + # Couple node to node2. This means they get their own partition + new_top_partitions = [top.copy() for top in top_partitions] + new_bottom_partitions = [bot.copy() for bot in bottom_partitions] + new_t_groups = {t_node}, t_partition - {t_node} + new_b_groups = {b_node}, b_partition - {b_node} + # Replace the old partitions with the coupled ones + del new_top_partitions[pair_idx] + del new_bottom_partitions[pair_idx] + new_top_partitions[pair_idx:pair_idx] = new_t_groups + new_bottom_partitions[pair_idx:pair_idx] = new_b_groups + + new_top_partitions = self._refine_node_partitions( + graph, new_top_partitions, edge_colors + ) + new_bottom_partitions = self._refine_node_partitions( + graph, new_bottom_partitions, edge_colors, branch=True + ) + new_top_partitions = list(new_top_partitions) + assert len(new_top_partitions) == 1 + new_top_partitions = new_top_partitions[0] + for bot in new_bottom_partitions: + yield list(new_top_partitions), bot + + def _process_ordered_pair_partitions( + self, + graph, + top_partitions, + bottom_partitions, + edge_colors, + orbits=None, + cosets=None, + ): + """ + Processes ordered pair partitions as per the reference paper. Finds and + returns all permutations and cosets that leave the graph unchanged. + """ + if orbits is None: + orbits = [{node} for node in graph.nodes] + else: + # Note that we don't copy orbits when we are given one. This means + # we leak information between the recursive branches. This is + # intentional! + orbits = orbits + if cosets is None: + cosets = {} + else: + cosets = cosets.copy() + + assert all( + len(t_p) == len(b_p) for t_p, b_p in zip(top_partitions, bottom_partitions) + ) + + # BASECASE + if all(len(top) == 1 for top in top_partitions): + # All nodes are mapped + permutations = self._find_permutations(top_partitions, bottom_partitions) + self._update_orbits(orbits, permutations) + if permutations: + return [permutations], cosets + else: + return [], cosets + + permutations = [] + unmapped_nodes = { + (node, idx) + for idx, t_partition in enumerate(top_partitions) + for node in t_partition + if len(t_partition) > 1 + } + node, pair_idx = min(unmapped_nodes) + b_partition = bottom_partitions[pair_idx] + + for node2 in sorted(b_partition): + if len(b_partition) == 1: + # Can never result in symmetry + continue + if node != node2 and any( + node in orbit and node2 in orbit for orbit in orbits + ): + # Orbit prune branch + continue + # REDUCTION + # Couple node to node2 + partitions = self._couple_nodes( + top_partitions, + bottom_partitions, + pair_idx, + node, + node2, + graph, + edge_colors, + ) + for opp in partitions: + new_top_partitions, new_bottom_partitions = opp + + new_perms, new_cosets = self._process_ordered_pair_partitions( + graph, + new_top_partitions, + new_bottom_partitions, + edge_colors, + orbits, + cosets, + ) + # COMBINATION + permutations += new_perms + cosets.update(new_cosets) + + mapped = { + k + for top, bottom in zip(top_partitions, bottom_partitions) + for k in top + if len(top) == 1 and top == bottom + } + ks = {k for k in graph.nodes if k < node} + # Have all nodes with ID < node been mapped? + find_coset = ks <= mapped and node not in cosets + if find_coset: + # Find the orbit that contains node + for orbit in orbits: + if node in orbit: + cosets[node] = orbit.copy() + return permutations, cosets diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/isomorph.py b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/isomorph.py new file mode 100644 index 0000000000000000000000000000000000000000..6f562400a3b5d8195ac0ab679e08de8b60da1c13 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/isomorph.py @@ -0,0 +1,248 @@ +""" +Graph isomorphism functions. +""" +import networkx as nx +from networkx.exception import NetworkXError + +__all__ = [ + "could_be_isomorphic", + "fast_could_be_isomorphic", + "faster_could_be_isomorphic", + "is_isomorphic", +] + + +@nx._dispatch(graphs={"G1": 0, "G2": 1}) +def could_be_isomorphic(G1, G2): + """Returns False if graphs are definitely not isomorphic. + True does NOT guarantee isomorphism. + + Parameters + ---------- + G1, G2 : graphs + The two graphs G1 and G2 must be the same type. + + Notes + ----- + Checks for matching degree, triangle, and number of cliques sequences. + The triangle sequence contains the number of triangles each node is part of. + The clique sequence contains for each node the number of maximal cliques + involving that node. + + """ + + # Check global properties + if G1.order() != G2.order(): + return False + + # Check local properties + d1 = G1.degree() + t1 = nx.triangles(G1) + clqs_1 = list(nx.find_cliques(G1)) + c1 = {n: sum(1 for c in clqs_1 if n in c) for n in G1} # number of cliques + props1 = [[d, t1[v], c1[v]] for v, d in d1] + props1.sort() + + d2 = G2.degree() + t2 = nx.triangles(G2) + clqs_2 = list(nx.find_cliques(G2)) + c2 = {n: sum(1 for c in clqs_2 if n in c) for n in G2} # number of cliques + props2 = [[d, t2[v], c2[v]] for v, d in d2] + props2.sort() + + if props1 != props2: + return False + + # OK... + return True + + +graph_could_be_isomorphic = could_be_isomorphic + + +@nx._dispatch(graphs={"G1": 0, "G2": 1}) +def fast_could_be_isomorphic(G1, G2): + """Returns False if graphs are definitely not isomorphic. + + True does NOT guarantee isomorphism. + + Parameters + ---------- + G1, G2 : graphs + The two graphs G1 and G2 must be the same type. + + Notes + ----- + Checks for matching degree and triangle sequences. The triangle + sequence contains the number of triangles each node is part of. + """ + # Check global properties + if G1.order() != G2.order(): + return False + + # Check local properties + d1 = G1.degree() + t1 = nx.triangles(G1) + props1 = [[d, t1[v]] for v, d in d1] + props1.sort() + + d2 = G2.degree() + t2 = nx.triangles(G2) + props2 = [[d, t2[v]] for v, d in d2] + props2.sort() + + if props1 != props2: + return False + + # OK... + return True + + +fast_graph_could_be_isomorphic = fast_could_be_isomorphic + + +@nx._dispatch(graphs={"G1": 0, "G2": 1}) +def faster_could_be_isomorphic(G1, G2): + """Returns False if graphs are definitely not isomorphic. + + True does NOT guarantee isomorphism. + + Parameters + ---------- + G1, G2 : graphs + The two graphs G1 and G2 must be the same type. + + Notes + ----- + Checks for matching degree sequences. + """ + # Check global properties + if G1.order() != G2.order(): + return False + + # Check local properties + d1 = sorted(d for n, d in G1.degree()) + d2 = sorted(d for n, d in G2.degree()) + + if d1 != d2: + return False + + # OK... + return True + + +faster_graph_could_be_isomorphic = faster_could_be_isomorphic + + +@nx._dispatch( + graphs={"G1": 0, "G2": 1}, + preserve_edge_attrs="edge_match", + preserve_node_attrs="node_match", +) +def is_isomorphic(G1, G2, node_match=None, edge_match=None): + """Returns True if the graphs G1 and G2 are isomorphic and False otherwise. + + Parameters + ---------- + G1, G2: graphs + The two graphs G1 and G2 must be the same type. + + node_match : callable + A function that returns True if node n1 in G1 and n2 in G2 should + be considered equal during the isomorphism test. + If node_match is not specified then node attributes are not considered. + + The function will be called like + + node_match(G1.nodes[n1], G2.nodes[n2]). + + That is, the function will receive the node attribute dictionaries + for n1 and n2 as inputs. + + edge_match : callable + A function that returns True if the edge attribute dictionary + for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should + be considered equal during the isomorphism test. If edge_match is + not specified then edge attributes are not considered. + + The function will be called like + + edge_match(G1[u1][v1], G2[u2][v2]). + + That is, the function will receive the edge attribute dictionaries + of the edges under consideration. + + Notes + ----- + Uses the vf2 algorithm [1]_. + + Examples + -------- + >>> import networkx.algorithms.isomorphism as iso + + For digraphs G1 and G2, using 'weight' edge attribute (default: 1) + + >>> G1 = nx.DiGraph() + >>> G2 = nx.DiGraph() + >>> nx.add_path(G1, [1, 2, 3, 4], weight=1) + >>> nx.add_path(G2, [10, 20, 30, 40], weight=2) + >>> em = iso.numerical_edge_match("weight", 1) + >>> nx.is_isomorphic(G1, G2) # no weights considered + True + >>> nx.is_isomorphic(G1, G2, edge_match=em) # match weights + False + + For multidigraphs G1 and G2, using 'fill' node attribute (default: '') + + >>> G1 = nx.MultiDiGraph() + >>> G2 = nx.MultiDiGraph() + >>> G1.add_nodes_from([1, 2, 3], fill="red") + >>> G2.add_nodes_from([10, 20, 30, 40], fill="red") + >>> nx.add_path(G1, [1, 2, 3, 4], weight=3, linewidth=2.5) + >>> nx.add_path(G2, [10, 20, 30, 40], weight=3) + >>> nm = iso.categorical_node_match("fill", "red") + >>> nx.is_isomorphic(G1, G2, node_match=nm) + True + + For multidigraphs G1 and G2, using 'weight' edge attribute (default: 7) + + >>> G1.add_edge(1, 2, weight=7) + 1 + >>> G2.add_edge(10, 20) + 1 + >>> em = iso.numerical_multiedge_match("weight", 7, rtol=1e-6) + >>> nx.is_isomorphic(G1, G2, edge_match=em) + True + + For multigraphs G1 and G2, using 'weight' and 'linewidth' edge attributes + with default values 7 and 2.5. Also using 'fill' node attribute with + default value 'red'. + + >>> em = iso.numerical_multiedge_match(["weight", "linewidth"], [7, 2.5]) + >>> nm = iso.categorical_node_match("fill", "red") + >>> nx.is_isomorphic(G1, G2, edge_match=em, node_match=nm) + True + + See Also + -------- + numerical_node_match, numerical_edge_match, numerical_multiedge_match + categorical_node_match, categorical_edge_match, categorical_multiedge_match + + References + ---------- + .. [1] L. P. Cordella, P. Foggia, C. Sansone, M. Vento, + "An Improved Algorithm for Matching Large Graphs", + 3rd IAPR-TC15 Workshop on Graph-based Representations in + Pattern Recognition, Cuen, pp. 149-159, 2001. + https://www.researchgate.net/publication/200034365_An_Improved_Algorithm_for_Matching_Large_Graphs + """ + if G1.is_directed() and G2.is_directed(): + GM = nx.algorithms.isomorphism.DiGraphMatcher + elif (not G1.is_directed()) and (not G2.is_directed()): + GM = nx.algorithms.isomorphism.GraphMatcher + else: + raise NetworkXError("Graphs G1 and G2 are not of the same type.") + + gm = GM(G1, G2, node_match=node_match, edge_match=edge_match) + + return gm.is_isomorphic() diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/isomorphvf2.py b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/isomorphvf2.py new file mode 100644 index 0000000000000000000000000000000000000000..3e4bcc4dc9bc0f1891f15cd7a11171f4e64e0d3e --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/isomorphvf2.py @@ -0,0 +1,1060 @@ +""" +************* +VF2 Algorithm +************* + +An implementation of VF2 algorithm for graph isomorphism testing. + +The simplest interface to use this module is to call networkx.is_isomorphic(). + +Introduction +------------ + +The GraphMatcher and DiGraphMatcher are responsible for matching +graphs or directed graphs in a predetermined manner. This +usually means a check for an isomorphism, though other checks +are also possible. For example, a subgraph of one graph +can be checked for isomorphism to a second graph. + +Matching is done via syntactic feasibility. It is also possible +to check for semantic feasibility. Feasibility, then, is defined +as the logical AND of the two functions. + +To include a semantic check, the (Di)GraphMatcher class should be +subclassed, and the semantic_feasibility() function should be +redefined. By default, the semantic feasibility function always +returns True. The effect of this is that semantics are not +considered in the matching of G1 and G2. + +Examples +-------- + +Suppose G1 and G2 are isomorphic graphs. Verification is as follows: + +>>> from networkx.algorithms import isomorphism +>>> G1 = nx.path_graph(4) +>>> G2 = nx.path_graph(4) +>>> GM = isomorphism.GraphMatcher(G1, G2) +>>> GM.is_isomorphic() +True + +GM.mapping stores the isomorphism mapping from G1 to G2. + +>>> GM.mapping +{0: 0, 1: 1, 2: 2, 3: 3} + + +Suppose G1 and G2 are isomorphic directed graphs. +Verification is as follows: + +>>> G1 = nx.path_graph(4, create_using=nx.DiGraph()) +>>> G2 = nx.path_graph(4, create_using=nx.DiGraph()) +>>> DiGM = isomorphism.DiGraphMatcher(G1, G2) +>>> DiGM.is_isomorphic() +True + +DiGM.mapping stores the isomorphism mapping from G1 to G2. + +>>> DiGM.mapping +{0: 0, 1: 1, 2: 2, 3: 3} + + + +Subgraph Isomorphism +-------------------- +Graph theory literature can be ambiguous about the meaning of the +above statement, and we seek to clarify it now. + +In the VF2 literature, a mapping M is said to be a graph-subgraph +isomorphism iff M is an isomorphism between G2 and a subgraph of G1. +Thus, to say that G1 and G2 are graph-subgraph isomorphic is to say +that a subgraph of G1 is isomorphic to G2. + +Other literature uses the phrase 'subgraph isomorphic' as in 'G1 does +not have a subgraph isomorphic to G2'. Another use is as an in adverb +for isomorphic. Thus, to say that G1 and G2 are subgraph isomorphic +is to say that a subgraph of G1 is isomorphic to G2. + +Finally, the term 'subgraph' can have multiple meanings. In this +context, 'subgraph' always means a 'node-induced subgraph'. Edge-induced +subgraph isomorphisms are not directly supported, but one should be +able to perform the check by making use of nx.line_graph(). For +subgraphs which are not induced, the term 'monomorphism' is preferred +over 'isomorphism'. + +Let G=(N,E) be a graph with a set of nodes N and set of edges E. + +If G'=(N',E') is a subgraph, then: + N' is a subset of N + E' is a subset of E + +If G'=(N',E') is a node-induced subgraph, then: + N' is a subset of N + E' is the subset of edges in E relating nodes in N' + +If G'=(N',E') is an edge-induced subgraph, then: + N' is the subset of nodes in N related by edges in E' + E' is a subset of E + +If G'=(N',E') is a monomorphism, then: + N' is a subset of N + E' is a subset of the set of edges in E relating nodes in N' + +Note that if G' is a node-induced subgraph of G, then it is always a +subgraph monomorphism of G, but the opposite is not always true, as a +monomorphism can have fewer edges. + +References +---------- +[1] Luigi P. Cordella, Pasquale Foggia, Carlo Sansone, Mario Vento, + "A (Sub)Graph Isomorphism Algorithm for Matching Large Graphs", + IEEE Transactions on Pattern Analysis and Machine Intelligence, + vol. 26, no. 10, pp. 1367-1372, Oct., 2004. + http://ieeexplore.ieee.org/iel5/34/29305/01323804.pdf + +[2] L. P. Cordella, P. Foggia, C. Sansone, M. Vento, "An Improved + Algorithm for Matching Large Graphs", 3rd IAPR-TC15 Workshop + on Graph-based Representations in Pattern Recognition, Cuen, + pp. 149-159, 2001. + https://www.researchgate.net/publication/200034365_An_Improved_Algorithm_for_Matching_Large_Graphs + +See Also +-------- +syntactic_feasibility(), semantic_feasibility() + +Notes +----- + +The implementation handles both directed and undirected graphs as well +as multigraphs. + +In general, the subgraph isomorphism problem is NP-complete whereas the +graph isomorphism problem is most likely not NP-complete (although no +polynomial-time algorithm is known to exist). + +""" + +# This work was originally coded by Christopher Ellison +# as part of the Computational Mechanics Python (CMPy) project. +# James P. Crutchfield, principal investigator. +# Complexity Sciences Center and Physics Department, UC Davis. + +import sys + +__all__ = ["GraphMatcher", "DiGraphMatcher"] + + +class GraphMatcher: + """Implementation of VF2 algorithm for matching undirected graphs. + + Suitable for Graph and MultiGraph instances. + """ + + def __init__(self, G1, G2): + """Initialize GraphMatcher. + + Parameters + ---------- + G1,G2: NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism or monomorphism. + + Examples + -------- + To create a GraphMatcher which checks for syntactic feasibility: + + >>> from networkx.algorithms import isomorphism + >>> G1 = nx.path_graph(4) + >>> G2 = nx.path_graph(4) + >>> GM = isomorphism.GraphMatcher(G1, G2) + """ + self.G1 = G1 + self.G2 = G2 + self.G1_nodes = set(G1.nodes()) + self.G2_nodes = set(G2.nodes()) + self.G2_node_order = {n: i for i, n in enumerate(G2)} + + # Set recursion limit. + self.old_recursion_limit = sys.getrecursionlimit() + expected_max_recursion_level = len(self.G2) + if self.old_recursion_limit < 1.5 * expected_max_recursion_level: + # Give some breathing room. + sys.setrecursionlimit(int(1.5 * expected_max_recursion_level)) + + # Declare that we will be searching for a graph-graph isomorphism. + self.test = "graph" + + # Initialize state + self.initialize() + + def reset_recursion_limit(self): + """Restores the recursion limit.""" + # TODO: + # Currently, we use recursion and set the recursion level higher. + # It would be nice to restore the level, but because the + # (Di)GraphMatcher classes make use of cyclic references, garbage + # collection will never happen when we define __del__() to + # restore the recursion level. The result is a memory leak. + # So for now, we do not automatically restore the recursion level, + # and instead provide a method to do this manually. Eventually, + # we should turn this into a non-recursive implementation. + sys.setrecursionlimit(self.old_recursion_limit) + + def candidate_pairs_iter(self): + """Iterator over candidate pairs of nodes in G1 and G2.""" + + # All computations are done using the current state! + + G1_nodes = self.G1_nodes + G2_nodes = self.G2_nodes + min_key = self.G2_node_order.__getitem__ + + # First we compute the inout-terminal sets. + T1_inout = [node for node in self.inout_1 if node not in self.core_1] + T2_inout = [node for node in self.inout_2 if node not in self.core_2] + + # If T1_inout and T2_inout are both nonempty. + # P(s) = T1_inout x {min T2_inout} + if T1_inout and T2_inout: + node_2 = min(T2_inout, key=min_key) + for node_1 in T1_inout: + yield node_1, node_2 + + else: + # If T1_inout and T2_inout were both empty.... + # P(s) = (N_1 - M_1) x {min (N_2 - M_2)} + # if not (T1_inout or T2_inout): # as suggested by [2], incorrect + if 1: # as inferred from [1], correct + # First we determine the candidate node for G2 + other_node = min(G2_nodes - set(self.core_2), key=min_key) + for node in self.G1: + if node not in self.core_1: + yield node, other_node + + # For all other cases, we don't have any candidate pairs. + + def initialize(self): + """Reinitializes the state of the algorithm. + + This method should be redefined if using something other than GMState. + If only subclassing GraphMatcher, a redefinition is not necessary. + + """ + + # core_1[n] contains the index of the node paired with n, which is m, + # provided n is in the mapping. + # core_2[m] contains the index of the node paired with m, which is n, + # provided m is in the mapping. + self.core_1 = {} + self.core_2 = {} + + # See the paper for definitions of M_x and T_x^{y} + + # inout_1[n] is non-zero if n is in M_1 or in T_1^{inout} + # inout_2[m] is non-zero if m is in M_2 or in T_2^{inout} + # + # The value stored is the depth of the SSR tree when the node became + # part of the corresponding set. + self.inout_1 = {} + self.inout_2 = {} + # Practically, these sets simply store the nodes in the subgraph. + + self.state = GMState(self) + + # Provide a convenient way to access the isomorphism mapping. + self.mapping = self.core_1.copy() + + def is_isomorphic(self): + """Returns True if G1 and G2 are isomorphic graphs.""" + + # Let's do two very quick checks! + # QUESTION: Should we call faster_graph_could_be_isomorphic(G1,G2)? + # For now, I just copy the code. + + # Check global properties + if self.G1.order() != self.G2.order(): + return False + + # Check local properties + d1 = sorted(d for n, d in self.G1.degree()) + d2 = sorted(d for n, d in self.G2.degree()) + if d1 != d2: + return False + + try: + x = next(self.isomorphisms_iter()) + return True + except StopIteration: + return False + + def isomorphisms_iter(self): + """Generator over isomorphisms between G1 and G2.""" + # Declare that we are looking for a graph-graph isomorphism. + self.test = "graph" + self.initialize() + yield from self.match() + + def match(self): + """Extends the isomorphism mapping. + + This function is called recursively to determine if a complete + isomorphism can be found between G1 and G2. It cleans up the class + variables after each recursive call. If an isomorphism is found, + we yield the mapping. + + """ + if len(self.core_1) == len(self.G2): + # Save the final mapping, otherwise garbage collection deletes it. + self.mapping = self.core_1.copy() + # The mapping is complete. + yield self.mapping + else: + for G1_node, G2_node in self.candidate_pairs_iter(): + if self.syntactic_feasibility(G1_node, G2_node): + if self.semantic_feasibility(G1_node, G2_node): + # Recursive call, adding the feasible state. + newstate = self.state.__class__(self, G1_node, G2_node) + yield from self.match() + + # restore data structures + newstate.restore() + + def semantic_feasibility(self, G1_node, G2_node): + """Returns True if adding (G1_node, G2_node) is semantically feasible. + + The semantic feasibility function should return True if it is + acceptable to add the candidate pair (G1_node, G2_node) to the current + partial isomorphism mapping. The logic should focus on semantic + information contained in the edge data or a formalized node class. + + By acceptable, we mean that the subsequent mapping can still become a + complete isomorphism mapping. Thus, if adding the candidate pair + definitely makes it so that the subsequent mapping cannot become a + complete isomorphism mapping, then this function must return False. + + The default semantic feasibility function always returns True. The + effect is that semantics are not considered in the matching of G1 + and G2. + + The semantic checks might differ based on the what type of test is + being performed. A keyword description of the test is stored in + self.test. Here is a quick description of the currently implemented + tests:: + + test='graph' + Indicates that the graph matcher is looking for a graph-graph + isomorphism. + + test='subgraph' + Indicates that the graph matcher is looking for a subgraph-graph + isomorphism such that a subgraph of G1 is isomorphic to G2. + + test='mono' + Indicates that the graph matcher is looking for a subgraph-graph + monomorphism such that a subgraph of G1 is monomorphic to G2. + + Any subclass which redefines semantic_feasibility() must maintain + the above form to keep the match() method functional. Implementations + should consider multigraphs. + """ + return True + + def subgraph_is_isomorphic(self): + """Returns True if a subgraph of G1 is isomorphic to G2.""" + try: + x = next(self.subgraph_isomorphisms_iter()) + return True + except StopIteration: + return False + + def subgraph_is_monomorphic(self): + """Returns True if a subgraph of G1 is monomorphic to G2.""" + try: + x = next(self.subgraph_monomorphisms_iter()) + return True + except StopIteration: + return False + + # subgraph_is_isomorphic.__doc__ += "\n" + subgraph.replace('\n','\n'+indent) + + def subgraph_isomorphisms_iter(self): + """Generator over isomorphisms between a subgraph of G1 and G2.""" + # Declare that we are looking for graph-subgraph isomorphism. + self.test = "subgraph" + self.initialize() + yield from self.match() + + def subgraph_monomorphisms_iter(self): + """Generator over monomorphisms between a subgraph of G1 and G2.""" + # Declare that we are looking for graph-subgraph monomorphism. + self.test = "mono" + self.initialize() + yield from self.match() + + # subgraph_isomorphisms_iter.__doc__ += "\n" + subgraph.replace('\n','\n'+indent) + + def syntactic_feasibility(self, G1_node, G2_node): + """Returns True if adding (G1_node, G2_node) is syntactically feasible. + + This function returns True if it is adding the candidate pair + to the current partial isomorphism/monomorphism mapping is allowable. + The addition is allowable if the inclusion of the candidate pair does + not make it impossible for an isomorphism/monomorphism to be found. + """ + + # The VF2 algorithm was designed to work with graphs having, at most, + # one edge connecting any two nodes. This is not the case when + # dealing with an MultiGraphs. + # + # Basically, when we test the look-ahead rules R_neighbor, we will + # make sure that the number of edges are checked. We also add + # a R_self check to verify that the number of selfloops is acceptable. + # + # Users might be comparing Graph instances with MultiGraph instances. + # So the generic GraphMatcher class must work with MultiGraphs. + # Care must be taken since the value in the innermost dictionary is a + # singlet for Graph instances. For MultiGraphs, the value in the + # innermost dictionary is a list. + + ### + # Test at each step to get a return value as soon as possible. + ### + + # Look ahead 0 + + # R_self + + # The number of selfloops for G1_node must equal the number of + # self-loops for G2_node. Without this check, we would fail on + # R_neighbor at the next recursion level. But it is good to prune the + # search tree now. + + if self.test == "mono": + if self.G1.number_of_edges(G1_node, G1_node) < self.G2.number_of_edges( + G2_node, G2_node + ): + return False + else: + if self.G1.number_of_edges(G1_node, G1_node) != self.G2.number_of_edges( + G2_node, G2_node + ): + return False + + # R_neighbor + + # For each neighbor n' of n in the partial mapping, the corresponding + # node m' is a neighbor of m, and vice versa. Also, the number of + # edges must be equal. + if self.test != "mono": + for neighbor in self.G1[G1_node]: + if neighbor in self.core_1: + if self.core_1[neighbor] not in self.G2[G2_node]: + return False + elif self.G1.number_of_edges( + neighbor, G1_node + ) != self.G2.number_of_edges(self.core_1[neighbor], G2_node): + return False + + for neighbor in self.G2[G2_node]: + if neighbor in self.core_2: + if self.core_2[neighbor] not in self.G1[G1_node]: + return False + elif self.test == "mono": + if self.G1.number_of_edges( + self.core_2[neighbor], G1_node + ) < self.G2.number_of_edges(neighbor, G2_node): + return False + else: + if self.G1.number_of_edges( + self.core_2[neighbor], G1_node + ) != self.G2.number_of_edges(neighbor, G2_node): + return False + + if self.test != "mono": + # Look ahead 1 + + # R_terminout + # The number of neighbors of n in T_1^{inout} is equal to the + # number of neighbors of m that are in T_2^{inout}, and vice versa. + num1 = 0 + for neighbor in self.G1[G1_node]: + if (neighbor in self.inout_1) and (neighbor not in self.core_1): + num1 += 1 + num2 = 0 + for neighbor in self.G2[G2_node]: + if (neighbor in self.inout_2) and (neighbor not in self.core_2): + num2 += 1 + if self.test == "graph": + if num1 != num2: + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # Look ahead 2 + + # R_new + + # The number of neighbors of n that are neither in the core_1 nor + # T_1^{inout} is equal to the number of neighbors of m + # that are neither in core_2 nor T_2^{inout}. + num1 = 0 + for neighbor in self.G1[G1_node]: + if neighbor not in self.inout_1: + num1 += 1 + num2 = 0 + for neighbor in self.G2[G2_node]: + if neighbor not in self.inout_2: + num2 += 1 + if self.test == "graph": + if num1 != num2: + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # Otherwise, this node pair is syntactically feasible! + return True + + +class DiGraphMatcher(GraphMatcher): + """Implementation of VF2 algorithm for matching directed graphs. + + Suitable for DiGraph and MultiDiGraph instances. + """ + + def __init__(self, G1, G2): + """Initialize DiGraphMatcher. + + G1 and G2 should be nx.Graph or nx.MultiGraph instances. + + Examples + -------- + To create a GraphMatcher which checks for syntactic feasibility: + + >>> from networkx.algorithms import isomorphism + >>> G1 = nx.DiGraph(nx.path_graph(4, create_using=nx.DiGraph())) + >>> G2 = nx.DiGraph(nx.path_graph(4, create_using=nx.DiGraph())) + >>> DiGM = isomorphism.DiGraphMatcher(G1, G2) + """ + super().__init__(G1, G2) + + def candidate_pairs_iter(self): + """Iterator over candidate pairs of nodes in G1 and G2.""" + + # All computations are done using the current state! + + G1_nodes = self.G1_nodes + G2_nodes = self.G2_nodes + min_key = self.G2_node_order.__getitem__ + + # First we compute the out-terminal sets. + T1_out = [node for node in self.out_1 if node not in self.core_1] + T2_out = [node for node in self.out_2 if node not in self.core_2] + + # If T1_out and T2_out are both nonempty. + # P(s) = T1_out x {min T2_out} + if T1_out and T2_out: + node_2 = min(T2_out, key=min_key) + for node_1 in T1_out: + yield node_1, node_2 + + # If T1_out and T2_out were both empty.... + # We compute the in-terminal sets. + + # elif not (T1_out or T2_out): # as suggested by [2], incorrect + else: # as suggested by [1], correct + T1_in = [node for node in self.in_1 if node not in self.core_1] + T2_in = [node for node in self.in_2 if node not in self.core_2] + + # If T1_in and T2_in are both nonempty. + # P(s) = T1_out x {min T2_out} + if T1_in and T2_in: + node_2 = min(T2_in, key=min_key) + for node_1 in T1_in: + yield node_1, node_2 + + # If all terminal sets are empty... + # P(s) = (N_1 - M_1) x {min (N_2 - M_2)} + + # elif not (T1_in or T2_in): # as suggested by [2], incorrect + else: # as inferred from [1], correct + node_2 = min(G2_nodes - set(self.core_2), key=min_key) + for node_1 in G1_nodes: + if node_1 not in self.core_1: + yield node_1, node_2 + + # For all other cases, we don't have any candidate pairs. + + def initialize(self): + """Reinitializes the state of the algorithm. + + This method should be redefined if using something other than DiGMState. + If only subclassing GraphMatcher, a redefinition is not necessary. + """ + + # core_1[n] contains the index of the node paired with n, which is m, + # provided n is in the mapping. + # core_2[m] contains the index of the node paired with m, which is n, + # provided m is in the mapping. + self.core_1 = {} + self.core_2 = {} + + # See the paper for definitions of M_x and T_x^{y} + + # in_1[n] is non-zero if n is in M_1 or in T_1^{in} + # out_1[n] is non-zero if n is in M_1 or in T_1^{out} + # + # in_2[m] is non-zero if m is in M_2 or in T_2^{in} + # out_2[m] is non-zero if m is in M_2 or in T_2^{out} + # + # The value stored is the depth of the search tree when the node became + # part of the corresponding set. + self.in_1 = {} + self.in_2 = {} + self.out_1 = {} + self.out_2 = {} + + self.state = DiGMState(self) + + # Provide a convenient way to access the isomorphism mapping. + self.mapping = self.core_1.copy() + + def syntactic_feasibility(self, G1_node, G2_node): + """Returns True if adding (G1_node, G2_node) is syntactically feasible. + + This function returns True if it is adding the candidate pair + to the current partial isomorphism/monomorphism mapping is allowable. + The addition is allowable if the inclusion of the candidate pair does + not make it impossible for an isomorphism/monomorphism to be found. + """ + + # The VF2 algorithm was designed to work with graphs having, at most, + # one edge connecting any two nodes. This is not the case when + # dealing with an MultiGraphs. + # + # Basically, when we test the look-ahead rules R_pred and R_succ, we + # will make sure that the number of edges are checked. We also add + # a R_self check to verify that the number of selfloops is acceptable. + + # Users might be comparing DiGraph instances with MultiDiGraph + # instances. So the generic DiGraphMatcher class must work with + # MultiDiGraphs. Care must be taken since the value in the innermost + # dictionary is a singlet for DiGraph instances. For MultiDiGraphs, + # the value in the innermost dictionary is a list. + + ### + # Test at each step to get a return value as soon as possible. + ### + + # Look ahead 0 + + # R_self + + # The number of selfloops for G1_node must equal the number of + # self-loops for G2_node. Without this check, we would fail on R_pred + # at the next recursion level. This should prune the tree even further. + if self.test == "mono": + if self.G1.number_of_edges(G1_node, G1_node) < self.G2.number_of_edges( + G2_node, G2_node + ): + return False + else: + if self.G1.number_of_edges(G1_node, G1_node) != self.G2.number_of_edges( + G2_node, G2_node + ): + return False + + # R_pred + + # For each predecessor n' of n in the partial mapping, the + # corresponding node m' is a predecessor of m, and vice versa. Also, + # the number of edges must be equal + if self.test != "mono": + for predecessor in self.G1.pred[G1_node]: + if predecessor in self.core_1: + if self.core_1[predecessor] not in self.G2.pred[G2_node]: + return False + elif self.G1.number_of_edges( + predecessor, G1_node + ) != self.G2.number_of_edges(self.core_1[predecessor], G2_node): + return False + + for predecessor in self.G2.pred[G2_node]: + if predecessor in self.core_2: + if self.core_2[predecessor] not in self.G1.pred[G1_node]: + return False + elif self.test == "mono": + if self.G1.number_of_edges( + self.core_2[predecessor], G1_node + ) < self.G2.number_of_edges(predecessor, G2_node): + return False + else: + if self.G1.number_of_edges( + self.core_2[predecessor], G1_node + ) != self.G2.number_of_edges(predecessor, G2_node): + return False + + # R_succ + + # For each successor n' of n in the partial mapping, the corresponding + # node m' is a successor of m, and vice versa. Also, the number of + # edges must be equal. + if self.test != "mono": + for successor in self.G1[G1_node]: + if successor in self.core_1: + if self.core_1[successor] not in self.G2[G2_node]: + return False + elif self.G1.number_of_edges( + G1_node, successor + ) != self.G2.number_of_edges(G2_node, self.core_1[successor]): + return False + + for successor in self.G2[G2_node]: + if successor in self.core_2: + if self.core_2[successor] not in self.G1[G1_node]: + return False + elif self.test == "mono": + if self.G1.number_of_edges( + G1_node, self.core_2[successor] + ) < self.G2.number_of_edges(G2_node, successor): + return False + else: + if self.G1.number_of_edges( + G1_node, self.core_2[successor] + ) != self.G2.number_of_edges(G2_node, successor): + return False + + if self.test != "mono": + # Look ahead 1 + + # R_termin + # The number of predecessors of n that are in T_1^{in} is equal to the + # number of predecessors of m that are in T_2^{in}. + num1 = 0 + for predecessor in self.G1.pred[G1_node]: + if (predecessor in self.in_1) and (predecessor not in self.core_1): + num1 += 1 + num2 = 0 + for predecessor in self.G2.pred[G2_node]: + if (predecessor in self.in_2) and (predecessor not in self.core_2): + num2 += 1 + if self.test == "graph": + if num1 != num2: + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # The number of successors of n that are in T_1^{in} is equal to the + # number of successors of m that are in T_2^{in}. + num1 = 0 + for successor in self.G1[G1_node]: + if (successor in self.in_1) and (successor not in self.core_1): + num1 += 1 + num2 = 0 + for successor in self.G2[G2_node]: + if (successor in self.in_2) and (successor not in self.core_2): + num2 += 1 + if self.test == "graph": + if num1 != num2: + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # R_termout + + # The number of predecessors of n that are in T_1^{out} is equal to the + # number of predecessors of m that are in T_2^{out}. + num1 = 0 + for predecessor in self.G1.pred[G1_node]: + if (predecessor in self.out_1) and (predecessor not in self.core_1): + num1 += 1 + num2 = 0 + for predecessor in self.G2.pred[G2_node]: + if (predecessor in self.out_2) and (predecessor not in self.core_2): + num2 += 1 + if self.test == "graph": + if num1 != num2: + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # The number of successors of n that are in T_1^{out} is equal to the + # number of successors of m that are in T_2^{out}. + num1 = 0 + for successor in self.G1[G1_node]: + if (successor in self.out_1) and (successor not in self.core_1): + num1 += 1 + num2 = 0 + for successor in self.G2[G2_node]: + if (successor in self.out_2) and (successor not in self.core_2): + num2 += 1 + if self.test == "graph": + if num1 != num2: + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # Look ahead 2 + + # R_new + + # The number of predecessors of n that are neither in the core_1 nor + # T_1^{in} nor T_1^{out} is equal to the number of predecessors of m + # that are neither in core_2 nor T_2^{in} nor T_2^{out}. + num1 = 0 + for predecessor in self.G1.pred[G1_node]: + if (predecessor not in self.in_1) and (predecessor not in self.out_1): + num1 += 1 + num2 = 0 + for predecessor in self.G2.pred[G2_node]: + if (predecessor not in self.in_2) and (predecessor not in self.out_2): + num2 += 1 + if self.test == "graph": + if num1 != num2: + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # The number of successors of n that are neither in the core_1 nor + # T_1^{in} nor T_1^{out} is equal to the number of successors of m + # that are neither in core_2 nor T_2^{in} nor T_2^{out}. + num1 = 0 + for successor in self.G1[G1_node]: + if (successor not in self.in_1) and (successor not in self.out_1): + num1 += 1 + num2 = 0 + for successor in self.G2[G2_node]: + if (successor not in self.in_2) and (successor not in self.out_2): + num2 += 1 + if self.test == "graph": + if num1 != num2: + return False + else: # self.test == 'subgraph' + if not (num1 >= num2): + return False + + # Otherwise, this node pair is syntactically feasible! + return True + + +class GMState: + """Internal representation of state for the GraphMatcher class. + + This class is used internally by the GraphMatcher class. It is used + only to store state specific data. There will be at most G2.order() of + these objects in memory at a time, due to the depth-first search + strategy employed by the VF2 algorithm. + """ + + def __init__(self, GM, G1_node=None, G2_node=None): + """Initializes GMState object. + + Pass in the GraphMatcher to which this GMState belongs and the + new node pair that will be added to the GraphMatcher's current + isomorphism mapping. + """ + self.GM = GM + + # Initialize the last stored node pair. + self.G1_node = None + self.G2_node = None + self.depth = len(GM.core_1) + + if G1_node is None or G2_node is None: + # Then we reset the class variables + GM.core_1 = {} + GM.core_2 = {} + GM.inout_1 = {} + GM.inout_2 = {} + + # Watch out! G1_node == 0 should evaluate to True. + if G1_node is not None and G2_node is not None: + # Add the node pair to the isomorphism mapping. + GM.core_1[G1_node] = G2_node + GM.core_2[G2_node] = G1_node + + # Store the node that was added last. + self.G1_node = G1_node + self.G2_node = G2_node + + # Now we must update the other two vectors. + # We will add only if it is not in there already! + self.depth = len(GM.core_1) + + # First we add the new nodes... + if G1_node not in GM.inout_1: + GM.inout_1[G1_node] = self.depth + if G2_node not in GM.inout_2: + GM.inout_2[G2_node] = self.depth + + # Now we add every other node... + + # Updates for T_1^{inout} + new_nodes = set() + for node in GM.core_1: + new_nodes.update( + [neighbor for neighbor in GM.G1[node] if neighbor not in GM.core_1] + ) + for node in new_nodes: + if node not in GM.inout_1: + GM.inout_1[node] = self.depth + + # Updates for T_2^{inout} + new_nodes = set() + for node in GM.core_2: + new_nodes.update( + [neighbor for neighbor in GM.G2[node] if neighbor not in GM.core_2] + ) + for node in new_nodes: + if node not in GM.inout_2: + GM.inout_2[node] = self.depth + + def restore(self): + """Deletes the GMState object and restores the class variables.""" + # First we remove the node that was added from the core vectors. + # Watch out! G1_node == 0 should evaluate to True. + if self.G1_node is not None and self.G2_node is not None: + del self.GM.core_1[self.G1_node] + del self.GM.core_2[self.G2_node] + + # Now we revert the other two vectors. + # Thus, we delete all entries which have this depth level. + for vector in (self.GM.inout_1, self.GM.inout_2): + for node in list(vector.keys()): + if vector[node] == self.depth: + del vector[node] + + +class DiGMState: + """Internal representation of state for the DiGraphMatcher class. + + This class is used internally by the DiGraphMatcher class. It is used + only to store state specific data. There will be at most G2.order() of + these objects in memory at a time, due to the depth-first search + strategy employed by the VF2 algorithm. + + """ + + def __init__(self, GM, G1_node=None, G2_node=None): + """Initializes DiGMState object. + + Pass in the DiGraphMatcher to which this DiGMState belongs and the + new node pair that will be added to the GraphMatcher's current + isomorphism mapping. + """ + self.GM = GM + + # Initialize the last stored node pair. + self.G1_node = None + self.G2_node = None + self.depth = len(GM.core_1) + + if G1_node is None or G2_node is None: + # Then we reset the class variables + GM.core_1 = {} + GM.core_2 = {} + GM.in_1 = {} + GM.in_2 = {} + GM.out_1 = {} + GM.out_2 = {} + + # Watch out! G1_node == 0 should evaluate to True. + if G1_node is not None and G2_node is not None: + # Add the node pair to the isomorphism mapping. + GM.core_1[G1_node] = G2_node + GM.core_2[G2_node] = G1_node + + # Store the node that was added last. + self.G1_node = G1_node + self.G2_node = G2_node + + # Now we must update the other four vectors. + # We will add only if it is not in there already! + self.depth = len(GM.core_1) + + # First we add the new nodes... + for vector in (GM.in_1, GM.out_1): + if G1_node not in vector: + vector[G1_node] = self.depth + for vector in (GM.in_2, GM.out_2): + if G2_node not in vector: + vector[G2_node] = self.depth + + # Now we add every other node... + + # Updates for T_1^{in} + new_nodes = set() + for node in GM.core_1: + new_nodes.update( + [ + predecessor + for predecessor in GM.G1.predecessors(node) + if predecessor not in GM.core_1 + ] + ) + for node in new_nodes: + if node not in GM.in_1: + GM.in_1[node] = self.depth + + # Updates for T_2^{in} + new_nodes = set() + for node in GM.core_2: + new_nodes.update( + [ + predecessor + for predecessor in GM.G2.predecessors(node) + if predecessor not in GM.core_2 + ] + ) + for node in new_nodes: + if node not in GM.in_2: + GM.in_2[node] = self.depth + + # Updates for T_1^{out} + new_nodes = set() + for node in GM.core_1: + new_nodes.update( + [ + successor + for successor in GM.G1.successors(node) + if successor not in GM.core_1 + ] + ) + for node in new_nodes: + if node not in GM.out_1: + GM.out_1[node] = self.depth + + # Updates for T_2^{out} + new_nodes = set() + for node in GM.core_2: + new_nodes.update( + [ + successor + for successor in GM.G2.successors(node) + if successor not in GM.core_2 + ] + ) + for node in new_nodes: + if node not in GM.out_2: + GM.out_2[node] = self.depth + + def restore(self): + """Deletes the DiGMState object and restores the class variables.""" + + # First we remove the node that was added from the core vectors. + # Watch out! G1_node == 0 should evaluate to True. + if self.G1_node is not None and self.G2_node is not None: + del self.GM.core_1[self.G1_node] + del self.GM.core_2[self.G2_node] + + # Now we revert the other four vectors. + # Thus, we delete all entries which have this depth level. + for vector in (self.GM.in_1, self.GM.in_2, self.GM.out_1, self.GM.out_2): + for node in list(vector.keys()): + if vector[node] == self.depth: + del vector[node] diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/matchhelpers.py b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/matchhelpers.py new file mode 100644 index 0000000000000000000000000000000000000000..5239ed77e6cca40fadd538c187838b7fbec65266 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/matchhelpers.py @@ -0,0 +1,352 @@ +"""Functions which help end users define customize node_match and +edge_match functions to use during isomorphism checks. +""" +import math +import types +from itertools import permutations + +__all__ = [ + "categorical_node_match", + "categorical_edge_match", + "categorical_multiedge_match", + "numerical_node_match", + "numerical_edge_match", + "numerical_multiedge_match", + "generic_node_match", + "generic_edge_match", + "generic_multiedge_match", +] + + +def copyfunc(f, name=None): + """Returns a deepcopy of a function.""" + return types.FunctionType( + f.__code__, f.__globals__, name or f.__name__, f.__defaults__, f.__closure__ + ) + + +def allclose(x, y, rtol=1.0000000000000001e-05, atol=1e-08): + """Returns True if x and y are sufficiently close, elementwise. + + Parameters + ---------- + rtol : float + The relative error tolerance. + atol : float + The absolute error tolerance. + + """ + # assume finite weights, see numpy.allclose() for reference + return all(math.isclose(xi, yi, rel_tol=rtol, abs_tol=atol) for xi, yi in zip(x, y)) + + +categorical_doc = """ +Returns a comparison function for a categorical node attribute. + +The value(s) of the attr(s) must be hashable and comparable via the == +operator since they are placed into a set([]) object. If the sets from +G1 and G2 are the same, then the constructed function returns True. + +Parameters +---------- +attr : string | list + The categorical node attribute to compare, or a list of categorical + node attributes to compare. +default : value | list + The default value for the categorical node attribute, or a list of + default values for the categorical node attributes. + +Returns +------- +match : function + The customized, categorical `node_match` function. + +Examples +-------- +>>> import networkx.algorithms.isomorphism as iso +>>> nm = iso.categorical_node_match("size", 1) +>>> nm = iso.categorical_node_match(["color", "size"], ["red", 2]) + +""" + + +def categorical_node_match(attr, default): + if isinstance(attr, str): + + def match(data1, data2): + return data1.get(attr, default) == data2.get(attr, default) + + else: + attrs = list(zip(attr, default)) # Python 3 + + def match(data1, data2): + return all(data1.get(attr, d) == data2.get(attr, d) for attr, d in attrs) + + return match + + +categorical_edge_match = copyfunc(categorical_node_match, "categorical_edge_match") + + +def categorical_multiedge_match(attr, default): + if isinstance(attr, str): + + def match(datasets1, datasets2): + values1 = {data.get(attr, default) for data in datasets1.values()} + values2 = {data.get(attr, default) for data in datasets2.values()} + return values1 == values2 + + else: + attrs = list(zip(attr, default)) # Python 3 + + def match(datasets1, datasets2): + values1 = set() + for data1 in datasets1.values(): + x = tuple(data1.get(attr, d) for attr, d in attrs) + values1.add(x) + values2 = set() + for data2 in datasets2.values(): + x = tuple(data2.get(attr, d) for attr, d in attrs) + values2.add(x) + return values1 == values2 + + return match + + +# Docstrings for categorical functions. +categorical_node_match.__doc__ = categorical_doc +categorical_edge_match.__doc__ = categorical_doc.replace("node", "edge") +tmpdoc = categorical_doc.replace("node", "edge") +tmpdoc = tmpdoc.replace("categorical_edge_match", "categorical_multiedge_match") +categorical_multiedge_match.__doc__ = tmpdoc + + +numerical_doc = """ +Returns a comparison function for a numerical node attribute. + +The value(s) of the attr(s) must be numerical and sortable. If the +sorted list of values from G1 and G2 are the same within some +tolerance, then the constructed function returns True. + +Parameters +---------- +attr : string | list + The numerical node attribute to compare, or a list of numerical + node attributes to compare. +default : value | list + The default value for the numerical node attribute, or a list of + default values for the numerical node attributes. +rtol : float + The relative error tolerance. +atol : float + The absolute error tolerance. + +Returns +------- +match : function + The customized, numerical `node_match` function. + +Examples +-------- +>>> import networkx.algorithms.isomorphism as iso +>>> nm = iso.numerical_node_match("weight", 1.0) +>>> nm = iso.numerical_node_match(["weight", "linewidth"], [0.25, 0.5]) + +""" + + +def numerical_node_match(attr, default, rtol=1.0000000000000001e-05, atol=1e-08): + if isinstance(attr, str): + + def match(data1, data2): + return math.isclose( + data1.get(attr, default), + data2.get(attr, default), + rel_tol=rtol, + abs_tol=atol, + ) + + else: + attrs = list(zip(attr, default)) # Python 3 + + def match(data1, data2): + values1 = [data1.get(attr, d) for attr, d in attrs] + values2 = [data2.get(attr, d) for attr, d in attrs] + return allclose(values1, values2, rtol=rtol, atol=atol) + + return match + + +numerical_edge_match = copyfunc(numerical_node_match, "numerical_edge_match") + + +def numerical_multiedge_match(attr, default, rtol=1.0000000000000001e-05, atol=1e-08): + if isinstance(attr, str): + + def match(datasets1, datasets2): + values1 = sorted(data.get(attr, default) for data in datasets1.values()) + values2 = sorted(data.get(attr, default) for data in datasets2.values()) + return allclose(values1, values2, rtol=rtol, atol=atol) + + else: + attrs = list(zip(attr, default)) # Python 3 + + def match(datasets1, datasets2): + values1 = [] + for data1 in datasets1.values(): + x = tuple(data1.get(attr, d) for attr, d in attrs) + values1.append(x) + values2 = [] + for data2 in datasets2.values(): + x = tuple(data2.get(attr, d) for attr, d in attrs) + values2.append(x) + values1.sort() + values2.sort() + for xi, yi in zip(values1, values2): + if not allclose(xi, yi, rtol=rtol, atol=atol): + return False + else: + return True + + return match + + +# Docstrings for numerical functions. +numerical_node_match.__doc__ = numerical_doc +numerical_edge_match.__doc__ = numerical_doc.replace("node", "edge") +tmpdoc = numerical_doc.replace("node", "edge") +tmpdoc = tmpdoc.replace("numerical_edge_match", "numerical_multiedge_match") +numerical_multiedge_match.__doc__ = tmpdoc + + +generic_doc = """ +Returns a comparison function for a generic attribute. + +The value(s) of the attr(s) are compared using the specified +operators. If all the attributes are equal, then the constructed +function returns True. + +Parameters +---------- +attr : string | list + The node attribute to compare, or a list of node attributes + to compare. +default : value | list + The default value for the node attribute, or a list of + default values for the node attributes. +op : callable | list + The operator to use when comparing attribute values, or a list + of operators to use when comparing values for each attribute. + +Returns +------- +match : function + The customized, generic `node_match` function. + +Examples +-------- +>>> from operator import eq +>>> from math import isclose +>>> from networkx.algorithms.isomorphism import generic_node_match +>>> nm = generic_node_match("weight", 1.0, isclose) +>>> nm = generic_node_match("color", "red", eq) +>>> nm = generic_node_match(["weight", "color"], [1.0, "red"], [isclose, eq]) + +""" + + +def generic_node_match(attr, default, op): + if isinstance(attr, str): + + def match(data1, data2): + return op(data1.get(attr, default), data2.get(attr, default)) + + else: + attrs = list(zip(attr, default, op)) # Python 3 + + def match(data1, data2): + for attr, d, operator in attrs: + if not operator(data1.get(attr, d), data2.get(attr, d)): + return False + else: + return True + + return match + + +generic_edge_match = copyfunc(generic_node_match, "generic_edge_match") + + +def generic_multiedge_match(attr, default, op): + """Returns a comparison function for a generic attribute. + + The value(s) of the attr(s) are compared using the specified + operators. If all the attributes are equal, then the constructed + function returns True. Potentially, the constructed edge_match + function can be slow since it must verify that no isomorphism + exists between the multiedges before it returns False. + + Parameters + ---------- + attr : string | list + The edge attribute to compare, or a list of node attributes + to compare. + default : value | list + The default value for the edge attribute, or a list of + default values for the edgeattributes. + op : callable | list + The operator to use when comparing attribute values, or a list + of operators to use when comparing values for each attribute. + + Returns + ------- + match : function + The customized, generic `edge_match` function. + + Examples + -------- + >>> from operator import eq + >>> from math import isclose + >>> from networkx.algorithms.isomorphism import generic_node_match + >>> nm = generic_node_match("weight", 1.0, isclose) + >>> nm = generic_node_match("color", "red", eq) + >>> nm = generic_node_match(["weight", "color"], [1.0, "red"], [isclose, eq]) + ... + + """ + + # This is slow, but generic. + # We must test every possible isomorphism between the edges. + if isinstance(attr, str): + attr = [attr] + default = [default] + op = [op] + attrs = list(zip(attr, default)) # Python 3 + + def match(datasets1, datasets2): + values1 = [] + for data1 in datasets1.values(): + x = tuple(data1.get(attr, d) for attr, d in attrs) + values1.append(x) + values2 = [] + for data2 in datasets2.values(): + x = tuple(data2.get(attr, d) for attr, d in attrs) + values2.append(x) + for vals2 in permutations(values2): + for xi, yi in zip(values1, vals2): + if not all(map(lambda x, y, z: z(x, y), xi, yi, op)): + # This is not an isomorphism, go to next permutation. + break + else: + # Then we found an isomorphism. + return True + else: + # Then there are no isomorphisms between the multiedges. + return False + + return match + + +# Docstrings for numerical functions. +generic_node_match.__doc__ = generic_doc +generic_edge_match.__doc__ = generic_doc.replace("node", "edge") diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/temporalisomorphvf2.py b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/temporalisomorphvf2.py new file mode 100644 index 0000000000000000000000000000000000000000..62cacc77887efa99026c117687bb9ad82cebd4dd --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/temporalisomorphvf2.py @@ -0,0 +1,308 @@ +""" +***************************** +Time-respecting VF2 Algorithm +***************************** + +An extension of the VF2 algorithm for time-respecting graph isomorphism +testing in temporal graphs. + +A temporal graph is one in which edges contain a datetime attribute, +denoting when interaction occurred between the incident nodes. A +time-respecting subgraph of a temporal graph is a subgraph such that +all interactions incident to a node occurred within a time threshold, +delta, of each other. A directed time-respecting subgraph has the +added constraint that incoming interactions to a node must precede +outgoing interactions from the same node - this enforces a sense of +directed flow. + +Introduction +------------ + +The TimeRespectingGraphMatcher and TimeRespectingDiGraphMatcher +extend the GraphMatcher and DiGraphMatcher classes, respectively, +to include temporal constraints on matches. This is achieved through +a semantic check, via the semantic_feasibility() function. + +As well as including G1 (the graph in which to seek embeddings) and +G2 (the subgraph structure of interest), the name of the temporal +attribute on the edges and the time threshold, delta, must be supplied +as arguments to the matching constructors. + +A delta of zero is the strictest temporal constraint on the match - +only embeddings in which all interactions occur at the same time will +be returned. A delta of one day will allow embeddings in which +adjacent interactions occur up to a day apart. + +Examples +-------- + +Examples will be provided when the datetime type has been incorporated. + + +Temporal Subgraph Isomorphism +----------------------------- + +A brief discussion of the somewhat diverse current literature will be +included here. + +References +---------- + +[1] Redmond, U. and Cunningham, P. Temporal subgraph isomorphism. In: +The 2013 IEEE/ACM International Conference on Advances in Social +Networks Analysis and Mining (ASONAM). Niagara Falls, Canada; 2013: +pages 1451 - 1452. [65] + +For a discussion of the literature on temporal networks: + +[3] P. Holme and J. Saramaki. Temporal networks. Physics Reports, +519(3):97–125, 2012. + +Notes +----- + +Handles directed and undirected graphs and graphs with parallel edges. + +""" + +import networkx as nx + +from .isomorphvf2 import DiGraphMatcher, GraphMatcher + +__all__ = ["TimeRespectingGraphMatcher", "TimeRespectingDiGraphMatcher"] + + +class TimeRespectingGraphMatcher(GraphMatcher): + def __init__(self, G1, G2, temporal_attribute_name, delta): + """Initialize TimeRespectingGraphMatcher. + + G1 and G2 should be nx.Graph or nx.MultiGraph instances. + + Examples + -------- + To create a TimeRespectingGraphMatcher which checks for + syntactic and semantic feasibility: + + >>> from networkx.algorithms import isomorphism + >>> from datetime import timedelta + >>> G1 = nx.Graph(nx.path_graph(4, create_using=nx.Graph())) + + >>> G2 = nx.Graph(nx.path_graph(4, create_using=nx.Graph())) + + >>> GM = isomorphism.TimeRespectingGraphMatcher( + ... G1, G2, "date", timedelta(days=1) + ... ) + """ + self.temporal_attribute_name = temporal_attribute_name + self.delta = delta + super().__init__(G1, G2) + + def one_hop(self, Gx, Gx_node, neighbors): + """ + Edges one hop out from a node in the mapping should be + time-respecting with respect to each other. + """ + dates = [] + for n in neighbors: + if isinstance(Gx, nx.Graph): # Graph G[u][v] returns the data dictionary. + dates.append(Gx[Gx_node][n][self.temporal_attribute_name]) + else: # MultiGraph G[u][v] returns a dictionary of key -> data dictionary. + for edge in Gx[Gx_node][ + n + ].values(): # Iterates all edges between node pair. + dates.append(edge[self.temporal_attribute_name]) + if any(x is None for x in dates): + raise ValueError("Datetime not supplied for at least one edge.") + return not dates or max(dates) - min(dates) <= self.delta + + def two_hop(self, Gx, core_x, Gx_node, neighbors): + """ + Paths of length 2 from Gx_node should be time-respecting. + """ + return all( + self.one_hop(Gx, v, [n for n in Gx[v] if n in core_x] + [Gx_node]) + for v in neighbors + ) + + def semantic_feasibility(self, G1_node, G2_node): + """Returns True if adding (G1_node, G2_node) is semantically + feasible. + + Any subclass which redefines semantic_feasibility() must + maintain the self.tests if needed, to keep the match() method + functional. Implementations should consider multigraphs. + """ + neighbors = [n for n in self.G1[G1_node] if n in self.core_1] + if not self.one_hop(self.G1, G1_node, neighbors): # Fail fast on first node. + return False + if not self.two_hop(self.G1, self.core_1, G1_node, neighbors): + return False + # Otherwise, this node is semantically feasible! + return True + + +class TimeRespectingDiGraphMatcher(DiGraphMatcher): + def __init__(self, G1, G2, temporal_attribute_name, delta): + """Initialize TimeRespectingDiGraphMatcher. + + G1 and G2 should be nx.DiGraph or nx.MultiDiGraph instances. + + Examples + -------- + To create a TimeRespectingDiGraphMatcher which checks for + syntactic and semantic feasibility: + + >>> from networkx.algorithms import isomorphism + >>> from datetime import timedelta + >>> G1 = nx.DiGraph(nx.path_graph(4, create_using=nx.DiGraph())) + + >>> G2 = nx.DiGraph(nx.path_graph(4, create_using=nx.DiGraph())) + + >>> GM = isomorphism.TimeRespectingDiGraphMatcher( + ... G1, G2, "date", timedelta(days=1) + ... ) + """ + self.temporal_attribute_name = temporal_attribute_name + self.delta = delta + super().__init__(G1, G2) + + def get_pred_dates(self, Gx, Gx_node, core_x, pred): + """ + Get the dates of edges from predecessors. + """ + pred_dates = [] + if isinstance(Gx, nx.DiGraph): # Graph G[u][v] returns the data dictionary. + for n in pred: + pred_dates.append(Gx[n][Gx_node][self.temporal_attribute_name]) + else: # MultiGraph G[u][v] returns a dictionary of key -> data dictionary. + for n in pred: + for edge in Gx[n][ + Gx_node + ].values(): # Iterates all edge data between node pair. + pred_dates.append(edge[self.temporal_attribute_name]) + return pred_dates + + def get_succ_dates(self, Gx, Gx_node, core_x, succ): + """ + Get the dates of edges to successors. + """ + succ_dates = [] + if isinstance(Gx, nx.DiGraph): # Graph G[u][v] returns the data dictionary. + for n in succ: + succ_dates.append(Gx[Gx_node][n][self.temporal_attribute_name]) + else: # MultiGraph G[u][v] returns a dictionary of key -> data dictionary. + for n in succ: + for edge in Gx[Gx_node][ + n + ].values(): # Iterates all edge data between node pair. + succ_dates.append(edge[self.temporal_attribute_name]) + return succ_dates + + def one_hop(self, Gx, Gx_node, core_x, pred, succ): + """ + The ego node. + """ + pred_dates = self.get_pred_dates(Gx, Gx_node, core_x, pred) + succ_dates = self.get_succ_dates(Gx, Gx_node, core_x, succ) + return self.test_one(pred_dates, succ_dates) and self.test_two( + pred_dates, succ_dates + ) + + def two_hop_pred(self, Gx, Gx_node, core_x, pred): + """ + The predecessors of the ego node. + """ + return all( + self.one_hop( + Gx, + p, + core_x, + self.preds(Gx, core_x, p), + self.succs(Gx, core_x, p, Gx_node), + ) + for p in pred + ) + + def two_hop_succ(self, Gx, Gx_node, core_x, succ): + """ + The successors of the ego node. + """ + return all( + self.one_hop( + Gx, + s, + core_x, + self.preds(Gx, core_x, s, Gx_node), + self.succs(Gx, core_x, s), + ) + for s in succ + ) + + def preds(self, Gx, core_x, v, Gx_node=None): + pred = [n for n in Gx.predecessors(v) if n in core_x] + if Gx_node: + pred.append(Gx_node) + return pred + + def succs(self, Gx, core_x, v, Gx_node=None): + succ = [n for n in Gx.successors(v) if n in core_x] + if Gx_node: + succ.append(Gx_node) + return succ + + def test_one(self, pred_dates, succ_dates): + """ + Edges one hop out from Gx_node in the mapping should be + time-respecting with respect to each other, regardless of + direction. + """ + time_respecting = True + dates = pred_dates + succ_dates + + if any(x is None for x in dates): + raise ValueError("Date or datetime not supplied for at least one edge.") + + dates.sort() # Small to large. + if 0 < len(dates) and not (dates[-1] - dates[0] <= self.delta): + time_respecting = False + return time_respecting + + def test_two(self, pred_dates, succ_dates): + """ + Edges from a dual Gx_node in the mapping should be ordered in + a time-respecting manner. + """ + time_respecting = True + pred_dates.sort() + succ_dates.sort() + # First out before last in; negative of the necessary condition for time-respect. + if ( + 0 < len(succ_dates) + and 0 < len(pred_dates) + and succ_dates[0] < pred_dates[-1] + ): + time_respecting = False + return time_respecting + + def semantic_feasibility(self, G1_node, G2_node): + """Returns True if adding (G1_node, G2_node) is semantically + feasible. + + Any subclass which redefines semantic_feasibility() must + maintain the self.tests if needed, to keep the match() method + functional. Implementations should consider multigraphs. + """ + pred, succ = ( + [n for n in self.G1.predecessors(G1_node) if n in self.core_1], + [n for n in self.G1.successors(G1_node) if n in self.core_1], + ) + if not self.one_hop( + self.G1, G1_node, self.core_1, pred, succ + ): # Fail fast on first node. + return False + if not self.two_hop_pred(self.G1, G1_node, self.core_1, pred): + return False + if not self.two_hop_succ(self.G1, G1_node, self.core_1, succ): + return False + # Otherwise, this node is semantically feasible! + return True diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5da28f7d58f47e8e5a3e66e10347dfbc3b1ae273 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_ismags.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_ismags.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c676001c706bb99a54a5c71ac57d7d50210c94d0 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_ismags.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphism.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphism.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f19fe88a870f29a62261fe56cf37898d1401801 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphism.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphvf2.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphvf2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b33bf44847f08a66f9462d8dd3ff7070a1be5c8 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphvf2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_match_helpers.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_match_helpers.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da3bdb5122a143bbd69de6831c7fea277de17b08 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_match_helpers.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_temporalisomorphvf2.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_temporalisomorphvf2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ff9efaa46c72d11af61f99dfb836c11b9ca3ad4 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_temporalisomorphvf2.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_tree_isomorphism.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_tree_isomorphism.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e011acfacbf49c4145b10926458547028661078e Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_tree_isomorphism.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41536047295b7231f793830a05c2d59b3a247023 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp_helpers.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp_helpers.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49d84cda72f30790af0ad02f60873b3b1997e6f5 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp_helpers.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_vf2userfunc.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_vf2userfunc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41063f1f6d3502696c83a8d77f03f24486d94fc4 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/__pycache__/test_vf2userfunc.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/iso_r01_s80.A99 b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/iso_r01_s80.A99 new file mode 100644 index 0000000000000000000000000000000000000000..dac54f0038c70e2d359ffa68de3c7641b46db21a Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/iso_r01_s80.A99 differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/iso_r01_s80.B99 b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/iso_r01_s80.B99 new file mode 100644 index 0000000000000000000000000000000000000000..6c6af680031b4f30fc6da946d0344b5c27e5f05e Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/iso_r01_s80.B99 differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/si2_b06_m200.A99 b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/si2_b06_m200.A99 new file mode 100644 index 0000000000000000000000000000000000000000..60c3a3ce1bdb54a61ead043f0adaf24b1fe24e93 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/si2_b06_m200.A99 differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/si2_b06_m200.B99 b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/si2_b06_m200.B99 new file mode 100644 index 0000000000000000000000000000000000000000..0236872094d4c73b0bb132165ce3ec4d1054f5f5 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/si2_b06_m200.B99 differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/test_ismags.py b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/test_ismags.py new file mode 100644 index 0000000000000000000000000000000000000000..00641519978edd6676396dafcb1d1f33b5490b82 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/test_ismags.py @@ -0,0 +1,327 @@ +""" + Tests for ISMAGS isomorphism algorithm. +""" + +import pytest + +import networkx as nx +from networkx.algorithms import isomorphism as iso + + +def _matches_to_sets(matches): + """ + Helper function to facilitate comparing collections of dictionaries in + which order does not matter. + """ + return {frozenset(m.items()) for m in matches} + + +class TestSelfIsomorphism: + data = [ + ( + [ + (0, {"name": "a"}), + (1, {"name": "a"}), + (2, {"name": "b"}), + (3, {"name": "b"}), + (4, {"name": "a"}), + (5, {"name": "a"}), + ], + [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)], + ), + (range(1, 5), [(1, 2), (2, 4), (4, 3), (3, 1)]), + ( + [], + [ + (0, 1), + (1, 2), + (2, 3), + (3, 4), + (4, 5), + (5, 0), + (0, 6), + (6, 7), + (2, 8), + (8, 9), + (4, 10), + (10, 11), + ], + ), + ([], [(0, 1), (1, 2), (1, 4), (2, 3), (3, 5), (3, 6)]), + ] + + def test_self_isomorphism(self): + """ + For some small, symmetric graphs, make sure that 1) they are isomorphic + to themselves, and 2) that only the identity mapping is found. + """ + for node_data, edge_data in self.data: + graph = nx.Graph() + graph.add_nodes_from(node_data) + graph.add_edges_from(edge_data) + + ismags = iso.ISMAGS( + graph, graph, node_match=iso.categorical_node_match("name", None) + ) + assert ismags.is_isomorphic() + assert ismags.subgraph_is_isomorphic() + assert list(ismags.subgraph_isomorphisms_iter(symmetry=True)) == [ + {n: n for n in graph.nodes} + ] + + def test_edgecase_self_isomorphism(self): + """ + This edgecase is one of the cases in which it is hard to find all + symmetry elements. + """ + graph = nx.Graph() + nx.add_path(graph, range(5)) + graph.add_edges_from([(2, 5), (5, 6)]) + + ismags = iso.ISMAGS(graph, graph) + ismags_answer = list(ismags.find_isomorphisms(True)) + assert ismags_answer == [{n: n for n in graph.nodes}] + + graph = nx.relabel_nodes(graph, {0: 0, 1: 1, 2: 2, 3: 3, 4: 6, 5: 4, 6: 5}) + ismags = iso.ISMAGS(graph, graph) + ismags_answer = list(ismags.find_isomorphisms(True)) + assert ismags_answer == [{n: n for n in graph.nodes}] + + def test_directed_self_isomorphism(self): + """ + For some small, directed, symmetric graphs, make sure that 1) they are + isomorphic to themselves, and 2) that only the identity mapping is + found. + """ + for node_data, edge_data in self.data: + graph = nx.Graph() + graph.add_nodes_from(node_data) + graph.add_edges_from(edge_data) + + ismags = iso.ISMAGS( + graph, graph, node_match=iso.categorical_node_match("name", None) + ) + assert ismags.is_isomorphic() + assert ismags.subgraph_is_isomorphic() + assert list(ismags.subgraph_isomorphisms_iter(symmetry=True)) == [ + {n: n for n in graph.nodes} + ] + + +class TestSubgraphIsomorphism: + def test_isomorphism(self): + g1 = nx.Graph() + nx.add_cycle(g1, range(4)) + + g2 = nx.Graph() + nx.add_cycle(g2, range(4)) + g2.add_edges_from(list(zip(g2, range(4, 8)))) + ismags = iso.ISMAGS(g2, g1) + assert list(ismags.subgraph_isomorphisms_iter(symmetry=True)) == [ + {n: n for n in g1.nodes} + ] + + def test_isomorphism2(self): + g1 = nx.Graph() + nx.add_path(g1, range(3)) + + g2 = g1.copy() + g2.add_edge(1, 3) + + ismags = iso.ISMAGS(g2, g1) + matches = ismags.subgraph_isomorphisms_iter(symmetry=True) + expected_symmetric = [ + {0: 0, 1: 1, 2: 2}, + {0: 0, 1: 1, 3: 2}, + {2: 0, 1: 1, 3: 2}, + ] + assert _matches_to_sets(matches) == _matches_to_sets(expected_symmetric) + + matches = ismags.subgraph_isomorphisms_iter(symmetry=False) + expected_asymmetric = [ + {0: 2, 1: 1, 2: 0}, + {0: 2, 1: 1, 3: 0}, + {2: 2, 1: 1, 3: 0}, + ] + assert _matches_to_sets(matches) == _matches_to_sets( + expected_symmetric + expected_asymmetric + ) + + def test_labeled_nodes(self): + g1 = nx.Graph() + nx.add_cycle(g1, range(3)) + g1.nodes[1]["attr"] = True + + g2 = g1.copy() + g2.add_edge(1, 3) + ismags = iso.ISMAGS(g2, g1, node_match=lambda x, y: x == y) + matches = ismags.subgraph_isomorphisms_iter(symmetry=True) + expected_symmetric = [{0: 0, 1: 1, 2: 2}] + assert _matches_to_sets(matches) == _matches_to_sets(expected_symmetric) + + matches = ismags.subgraph_isomorphisms_iter(symmetry=False) + expected_asymmetric = [{0: 2, 1: 1, 2: 0}] + assert _matches_to_sets(matches) == _matches_to_sets( + expected_symmetric + expected_asymmetric + ) + + def test_labeled_edges(self): + g1 = nx.Graph() + nx.add_cycle(g1, range(3)) + g1.edges[1, 2]["attr"] = True + + g2 = g1.copy() + g2.add_edge(1, 3) + ismags = iso.ISMAGS(g2, g1, edge_match=lambda x, y: x == y) + matches = ismags.subgraph_isomorphisms_iter(symmetry=True) + expected_symmetric = [{0: 0, 1: 1, 2: 2}] + assert _matches_to_sets(matches) == _matches_to_sets(expected_symmetric) + + matches = ismags.subgraph_isomorphisms_iter(symmetry=False) + expected_asymmetric = [{1: 2, 0: 0, 2: 1}] + assert _matches_to_sets(matches) == _matches_to_sets( + expected_symmetric + expected_asymmetric + ) + + +class TestWikipediaExample: + # Nodes 'a', 'b', 'c' and 'd' form a column. + # Nodes 'g', 'h', 'i' and 'j' form a column. + g1edges = [ + ["a", "g"], + ["a", "h"], + ["a", "i"], + ["b", "g"], + ["b", "h"], + ["b", "j"], + ["c", "g"], + ["c", "i"], + ["c", "j"], + ["d", "h"], + ["d", "i"], + ["d", "j"], + ] + + # Nodes 1,2,3,4 form the clockwise corners of a large square. + # Nodes 5,6,7,8 form the clockwise corners of a small square + g2edges = [ + [1, 2], + [2, 3], + [3, 4], + [4, 1], + [5, 6], + [6, 7], + [7, 8], + [8, 5], + [1, 5], + [2, 6], + [3, 7], + [4, 8], + ] + + def test_graph(self): + g1 = nx.Graph() + g2 = nx.Graph() + g1.add_edges_from(self.g1edges) + g2.add_edges_from(self.g2edges) + gm = iso.ISMAGS(g1, g2) + assert gm.is_isomorphic() + + +class TestLargestCommonSubgraph: + def test_mcis(self): + # Example graphs from DOI: 10.1002/spe.588 + graph1 = nx.Graph() + graph1.add_edges_from([(1, 2), (2, 3), (2, 4), (3, 4), (4, 5)]) + graph1.nodes[1]["color"] = 0 + + graph2 = nx.Graph() + graph2.add_edges_from( + [(1, 2), (2, 3), (2, 4), (3, 4), (3, 5), (5, 6), (5, 7), (6, 7)] + ) + graph2.nodes[1]["color"] = 1 + graph2.nodes[6]["color"] = 2 + graph2.nodes[7]["color"] = 2 + + ismags = iso.ISMAGS( + graph1, graph2, node_match=iso.categorical_node_match("color", None) + ) + assert list(ismags.subgraph_isomorphisms_iter(True)) == [] + assert list(ismags.subgraph_isomorphisms_iter(False)) == [] + found_mcis = _matches_to_sets(ismags.largest_common_subgraph()) + expected = _matches_to_sets( + [{2: 2, 3: 4, 4: 3, 5: 5}, {2: 4, 3: 2, 4: 3, 5: 5}] + ) + assert expected == found_mcis + + ismags = iso.ISMAGS( + graph2, graph1, node_match=iso.categorical_node_match("color", None) + ) + assert list(ismags.subgraph_isomorphisms_iter(True)) == [] + assert list(ismags.subgraph_isomorphisms_iter(False)) == [] + found_mcis = _matches_to_sets(ismags.largest_common_subgraph()) + # Same answer, but reversed. + expected = _matches_to_sets( + [{2: 2, 3: 4, 4: 3, 5: 5}, {4: 2, 2: 3, 3: 4, 5: 5}] + ) + assert expected == found_mcis + + def test_symmetry_mcis(self): + graph1 = nx.Graph() + nx.add_path(graph1, range(4)) + + graph2 = nx.Graph() + nx.add_path(graph2, range(3)) + graph2.add_edge(1, 3) + + # Only the symmetry of graph2 is taken into account here. + ismags1 = iso.ISMAGS( + graph1, graph2, node_match=iso.categorical_node_match("color", None) + ) + assert list(ismags1.subgraph_isomorphisms_iter(True)) == [] + found_mcis = _matches_to_sets(ismags1.largest_common_subgraph()) + expected = _matches_to_sets([{0: 0, 1: 1, 2: 2}, {1: 0, 3: 2, 2: 1}]) + assert expected == found_mcis + + # Only the symmetry of graph1 is taken into account here. + ismags2 = iso.ISMAGS( + graph2, graph1, node_match=iso.categorical_node_match("color", None) + ) + assert list(ismags2.subgraph_isomorphisms_iter(True)) == [] + found_mcis = _matches_to_sets(ismags2.largest_common_subgraph()) + expected = _matches_to_sets( + [ + {3: 2, 0: 0, 1: 1}, + {2: 0, 0: 2, 1: 1}, + {3: 0, 0: 2, 1: 1}, + {3: 0, 1: 1, 2: 2}, + {0: 0, 1: 1, 2: 2}, + {2: 0, 3: 2, 1: 1}, + ] + ) + + assert expected == found_mcis + + found_mcis1 = _matches_to_sets(ismags1.largest_common_subgraph(False)) + found_mcis2 = ismags2.largest_common_subgraph(False) + found_mcis2 = [{v: k for k, v in d.items()} for d in found_mcis2] + found_mcis2 = _matches_to_sets(found_mcis2) + + expected = _matches_to_sets( + [ + {3: 2, 1: 3, 2: 1}, + {2: 0, 0: 2, 1: 1}, + {1: 2, 3: 3, 2: 1}, + {3: 0, 1: 3, 2: 1}, + {0: 2, 2: 3, 1: 1}, + {3: 0, 1: 2, 2: 1}, + {2: 0, 0: 3, 1: 1}, + {0: 0, 2: 3, 1: 1}, + {1: 0, 3: 3, 2: 1}, + {1: 0, 3: 2, 2: 1}, + {0: 3, 1: 1, 2: 2}, + {0: 0, 1: 1, 2: 2}, + ] + ) + assert expected == found_mcis1 + assert expected == found_mcis2 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/test_isomorphism.py b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/test_isomorphism.py new file mode 100644 index 0000000000000000000000000000000000000000..c669040390d504be04126efaa69d41134f72c8c9 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/test_isomorphism.py @@ -0,0 +1,40 @@ +import networkx as nx +from networkx.algorithms import isomorphism as iso + + +class TestIsomorph: + @classmethod + def setup_class(cls): + cls.G1 = nx.Graph() + cls.G2 = nx.Graph() + cls.G3 = nx.Graph() + cls.G4 = nx.Graph() + cls.G5 = nx.Graph() + cls.G6 = nx.Graph() + cls.G1.add_edges_from([[1, 2], [1, 3], [1, 5], [2, 3]]) + cls.G2.add_edges_from([[10, 20], [20, 30], [10, 30], [10, 50]]) + cls.G3.add_edges_from([[1, 2], [1, 3], [1, 5], [2, 5]]) + cls.G4.add_edges_from([[1, 2], [1, 3], [1, 5], [2, 4]]) + cls.G5.add_edges_from([[1, 2], [1, 3]]) + cls.G6.add_edges_from([[10, 20], [20, 30], [10, 30], [10, 50], [20, 50]]) + + def test_could_be_isomorphic(self): + assert iso.could_be_isomorphic(self.G1, self.G2) + assert iso.could_be_isomorphic(self.G1, self.G3) + assert not iso.could_be_isomorphic(self.G1, self.G4) + assert iso.could_be_isomorphic(self.G3, self.G2) + assert not iso.could_be_isomorphic(self.G1, self.G6) + + def test_fast_could_be_isomorphic(self): + assert iso.fast_could_be_isomorphic(self.G3, self.G2) + assert not iso.fast_could_be_isomorphic(self.G3, self.G5) + assert not iso.fast_could_be_isomorphic(self.G1, self.G6) + + def test_faster_could_be_isomorphic(self): + assert iso.faster_could_be_isomorphic(self.G3, self.G2) + assert not iso.faster_could_be_isomorphic(self.G3, self.G5) + assert not iso.faster_could_be_isomorphic(self.G1, self.G6) + + def test_is_isomorphic(self): + assert iso.is_isomorphic(self.G1, self.G2) + assert not iso.is_isomorphic(self.G1, self.G4) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/test_isomorphvf2.py b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/test_isomorphvf2.py new file mode 100644 index 0000000000000000000000000000000000000000..f658bcae4e3d0fd1945f67fd7e809933d97d4ee9 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/test_isomorphvf2.py @@ -0,0 +1,410 @@ +""" + Tests for VF2 isomorphism algorithm. +""" + +import importlib.resources +import os +import random +import struct + +import networkx as nx +from networkx.algorithms import isomorphism as iso + + +class TestWikipediaExample: + # Source: https://en.wikipedia.org/wiki/Graph_isomorphism + + # Nodes 'a', 'b', 'c' and 'd' form a column. + # Nodes 'g', 'h', 'i' and 'j' form a column. + g1edges = [ + ["a", "g"], + ["a", "h"], + ["a", "i"], + ["b", "g"], + ["b", "h"], + ["b", "j"], + ["c", "g"], + ["c", "i"], + ["c", "j"], + ["d", "h"], + ["d", "i"], + ["d", "j"], + ] + + # Nodes 1,2,3,4 form the clockwise corners of a large square. + # Nodes 5,6,7,8 form the clockwise corners of a small square + g2edges = [ + [1, 2], + [2, 3], + [3, 4], + [4, 1], + [5, 6], + [6, 7], + [7, 8], + [8, 5], + [1, 5], + [2, 6], + [3, 7], + [4, 8], + ] + + def test_graph(self): + g1 = nx.Graph() + g2 = nx.Graph() + g1.add_edges_from(self.g1edges) + g2.add_edges_from(self.g2edges) + gm = iso.GraphMatcher(g1, g2) + assert gm.is_isomorphic() + # Just testing some cases + assert gm.subgraph_is_monomorphic() + + mapping = sorted(gm.mapping.items()) + + # this mapping is only one of the possibilities + # so this test needs to be reconsidered + # isomap = [('a', 1), ('b', 6), ('c', 3), ('d', 8), + # ('g', 2), ('h', 5), ('i', 4), ('j', 7)] + # assert_equal(mapping, isomap) + + def test_subgraph(self): + g1 = nx.Graph() + g2 = nx.Graph() + g1.add_edges_from(self.g1edges) + g2.add_edges_from(self.g2edges) + g3 = g2.subgraph([1, 2, 3, 4]) + gm = iso.GraphMatcher(g1, g3) + assert gm.subgraph_is_isomorphic() + + def test_subgraph_mono(self): + g1 = nx.Graph() + g2 = nx.Graph() + g1.add_edges_from(self.g1edges) + g2.add_edges_from([[1, 2], [2, 3], [3, 4]]) + gm = iso.GraphMatcher(g1, g2) + assert gm.subgraph_is_monomorphic() + + +class TestVF2GraphDB: + # https://web.archive.org/web/20090303210205/http://amalfi.dis.unina.it/graph/db/ + + @staticmethod + def create_graph(filename): + """Creates a Graph instance from the filename.""" + + # The file is assumed to be in the format from the VF2 graph database. + # Each file is composed of 16-bit numbers (unsigned short int). + # So we will want to read 2 bytes at a time. + + # We can read the number as follows: + # number = struct.unpack(' 0 + assert check_isomorphism(t1, t2, isomorphism) + + +# run positive_single_tree over all the +# non-isomorphic trees for k from 4 to maxk +# k = 4 is the first level that has more than 1 non-isomorphic tree +# k = 13 takes about 2.86 seconds to run on my laptop +# larger values run slow down significantly +# as the number of trees grows rapidly +def test_positive(maxk=14): + print("positive test") + + for k in range(2, maxk + 1): + start_time = time.time() + trial = 0 + for t in nx.nonisomorphic_trees(k): + positive_single_tree(t) + trial += 1 + print(k, trial, time.time() - start_time) + + +# test the trivial case of a single node in each tree +# note that nonisomorphic_trees doesn't work for k = 1 +def test_trivial(): + print("trivial test") + + # back to an undirected graph + t1 = nx.Graph() + t1.add_node("a") + root1 = "a" + + t2 = nx.Graph() + t2.add_node("n") + root2 = "n" + + isomorphism = rooted_tree_isomorphism(t1, root1, t2, root2) + + assert isomorphism == [("a", "n")] + + assert check_isomorphism(t1, t2, isomorphism) + + +# test another trivial case where the two graphs have +# different numbers of nodes +def test_trivial_2(): + print("trivial test 2") + + edges_1 = [("a", "b"), ("a", "c")] + + edges_2 = [("v", "y")] + + t1 = nx.Graph() + t1.add_edges_from(edges_1) + + t2 = nx.Graph() + t2.add_edges_from(edges_2) + + isomorphism = tree_isomorphism(t1, t2) + + # they cannot be isomorphic, + # since they have different numbers of nodes + assert isomorphism == [] + + +# the function nonisomorphic_trees generates all the non-isomorphic +# trees of a given size. Take each pair of these and verify that +# they are not isomorphic +# k = 4 is the first level that has more than 1 non-isomorphic tree +# k = 11 takes about 4.76 seconds to run on my laptop +# larger values run slow down significantly +# as the number of trees grows rapidly +def test_negative(maxk=11): + print("negative test") + + for k in range(4, maxk + 1): + test_trees = list(nx.nonisomorphic_trees(k)) + start_time = time.time() + trial = 0 + for i in range(len(test_trees) - 1): + for j in range(i + 1, len(test_trees)): + trial += 1 + assert tree_isomorphism(test_trees[i], test_trees[j]) == [] + print(k, trial, time.time() - start_time) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/test_vf2pp.py b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/test_vf2pp.py new file mode 100644 index 0000000000000000000000000000000000000000..5f3fb901cc8afdc2a73cfb7001538db49371eaf4 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/test_vf2pp.py @@ -0,0 +1,1608 @@ +import itertools as it + +import pytest + +import networkx as nx +from networkx import vf2pp_is_isomorphic, vf2pp_isomorphism + +labels_same = ["blue"] + +labels_many = [ + "white", + "red", + "blue", + "green", + "orange", + "black", + "purple", + "yellow", + "brown", + "cyan", + "solarized", + "pink", + "none", +] + + +class TestPreCheck: + def test_first_graph_empty(self): + G1 = nx.Graph() + G2 = nx.Graph([(0, 1), (1, 2)]) + assert not vf2pp_is_isomorphic(G1, G2) + + def test_second_graph_empty(self): + G1 = nx.Graph([(0, 1), (1, 2)]) + G2 = nx.Graph() + assert not vf2pp_is_isomorphic(G1, G2) + + def test_different_order1(self): + G1 = nx.path_graph(5) + G2 = nx.path_graph(6) + assert not vf2pp_is_isomorphic(G1, G2) + + def test_different_order2(self): + G1 = nx.barbell_graph(100, 20) + G2 = nx.barbell_graph(101, 20) + assert not vf2pp_is_isomorphic(G1, G2) + + def test_different_order3(self): + G1 = nx.complete_graph(7) + G2 = nx.complete_graph(8) + assert not vf2pp_is_isomorphic(G1, G2) + + def test_different_degree_sequences1(self): + G1 = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (0, 4)]) + G2 = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (0, 4), (2, 5)]) + assert not vf2pp_is_isomorphic(G1, G2) + + G2.remove_node(3) + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(["a"]))), "label") + nx.set_node_attributes(G2, dict(zip(G2, it.cycle("a"))), "label") + + assert vf2pp_is_isomorphic(G1, G2) + + def test_different_degree_sequences2(self): + G1 = nx.Graph( + [ + (0, 1), + (1, 2), + (0, 2), + (2, 3), + (3, 4), + (4, 5), + (5, 6), + (6, 3), + (4, 7), + (7, 8), + (8, 3), + ] + ) + G2 = G1.copy() + G2.add_edge(8, 0) + assert not vf2pp_is_isomorphic(G1, G2) + + G1.add_edge(6, 1) + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(["a"]))), "label") + nx.set_node_attributes(G2, dict(zip(G2, it.cycle("a"))), "label") + + assert vf2pp_is_isomorphic(G1, G2) + + def test_different_degree_sequences3(self): + G1 = nx.Graph([(0, 1), (0, 2), (1, 2), (2, 3), (2, 4), (3, 4), (2, 5), (2, 6)]) + G2 = nx.Graph( + [(0, 1), (0, 6), (0, 2), (1, 2), (2, 3), (2, 4), (3, 4), (2, 5), (2, 6)] + ) + assert not vf2pp_is_isomorphic(G1, G2) + + G1.add_edge(3, 5) + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(["a"]))), "label") + nx.set_node_attributes(G2, dict(zip(G2, it.cycle("a"))), "label") + + assert vf2pp_is_isomorphic(G1, G2) + + def test_label_distribution(self): + G1 = nx.Graph([(0, 1), (0, 2), (1, 2), (2, 3), (2, 4), (3, 4), (2, 5), (2, 6)]) + G2 = nx.Graph([(0, 1), (0, 2), (1, 2), (2, 3), (2, 4), (3, 4), (2, 5), (2, 6)]) + + colors1 = ["blue", "blue", "blue", "yellow", "black", "purple", "purple"] + colors2 = ["blue", "blue", "yellow", "yellow", "black", "purple", "purple"] + + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(colors1[::-1]))), "label") + nx.set_node_attributes(G2, dict(zip(G2, it.cycle(colors2[::-1]))), "label") + + assert not vf2pp_is_isomorphic(G1, G2, node_label="label") + G2.nodes[3]["label"] = "blue" + assert vf2pp_is_isomorphic(G1, G2, node_label="label") + + +class TestAllGraphTypesEdgeCases: + @pytest.mark.parametrize("graph_type", (nx.Graph, nx.MultiGraph, nx.DiGraph)) + def test_both_graphs_empty(self, graph_type): + G = graph_type() + H = graph_type() + assert vf2pp_isomorphism(G, H) is None + + G.add_node(0) + + assert vf2pp_isomorphism(G, H) is None + assert vf2pp_isomorphism(H, G) is None + + H.add_node(0) + assert vf2pp_isomorphism(G, H) == {0: 0} + + @pytest.mark.parametrize("graph_type", (nx.Graph, nx.MultiGraph, nx.DiGraph)) + def test_first_graph_empty(self, graph_type): + G = graph_type() + H = graph_type([(0, 1)]) + assert vf2pp_isomorphism(G, H) is None + + @pytest.mark.parametrize("graph_type", (nx.Graph, nx.MultiGraph, nx.DiGraph)) + def test_second_graph_empty(self, graph_type): + G = graph_type([(0, 1)]) + H = graph_type() + assert vf2pp_isomorphism(G, H) is None + + +class TestGraphISOVF2pp: + def test_custom_graph1_same_labels(self): + G1 = nx.Graph() + + mapped = {1: "A", 2: "B", 3: "C", 4: "D", 5: "Z", 6: "E"} + edges1 = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 6), (3, 4), (5, 1), (5, 2)] + + G1.add_edges_from(edges1) + G2 = nx.relabel_nodes(G1, mapped) + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_same))), "label") + nx.set_node_attributes(G2, dict(zip(G2, it.cycle(labels_same))), "label") + assert vf2pp_isomorphism(G1, G2, node_label="label") + + # Add edge making G1 symmetrical + G1.add_edge(3, 7) + G1.nodes[7]["label"] = "blue" + assert vf2pp_isomorphism(G1, G2, node_label="label") is None + + # Make G2 isomorphic to G1 + G2.add_edges_from([(mapped[3], "X"), (mapped[6], mapped[5])]) + G1.add_edge(4, 7) + G2.nodes["X"]["label"] = "blue" + assert vf2pp_isomorphism(G1, G2, node_label="label") + + # Re-structure maintaining isomorphism + G1.remove_edges_from([(1, 4), (1, 3)]) + G2.remove_edges_from([(mapped[1], mapped[5]), (mapped[1], mapped[2])]) + assert vf2pp_isomorphism(G1, G2, node_label="label") + + def test_custom_graph1_different_labels(self): + G1 = nx.Graph() + + mapped = {1: "A", 2: "B", 3: "C", 4: "D", 5: "Z", 6: "E"} + edges1 = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 6), (3, 4), (5, 1), (5, 2)] + + G1.add_edges_from(edges1) + G2 = nx.relabel_nodes(G1, mapped) + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_many))), "label") + nx.set_node_attributes( + G2, + dict(zip([mapped[n] for n in G1], it.cycle(labels_many))), + "label", + ) + assert vf2pp_isomorphism(G1, G2, node_label="label") == mapped + + def test_custom_graph2_same_labels(self): + G1 = nx.Graph() + + mapped = {1: "A", 2: "C", 3: "D", 4: "E", 5: "G", 7: "B", 6: "F"} + edges1 = [(1, 2), (1, 5), (5, 6), (2, 3), (2, 4), (3, 4), (4, 5), (2, 7)] + + G1.add_edges_from(edges1) + G2 = nx.relabel_nodes(G1, mapped) + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_same))), "label") + nx.set_node_attributes(G2, dict(zip(G2, it.cycle(labels_same))), "label") + + assert vf2pp_isomorphism(G1, G2, node_label="label") + + # Obtain two isomorphic subgraphs from the graph + G2.remove_edge(mapped[1], mapped[2]) + G2.add_edge(mapped[1], mapped[4]) + H1 = nx.Graph(G1.subgraph([2, 3, 4, 7])) + H2 = nx.Graph(G2.subgraph([mapped[1], mapped[4], mapped[5], mapped[6]])) + assert vf2pp_isomorphism(H1, H2, node_label="label") + + # Add edges maintaining isomorphism + H1.add_edges_from([(3, 7), (4, 7)]) + H2.add_edges_from([(mapped[1], mapped[6]), (mapped[4], mapped[6])]) + assert vf2pp_isomorphism(H1, H2, node_label="label") + + def test_custom_graph2_different_labels(self): + G1 = nx.Graph() + + mapped = {1: "A", 2: "C", 3: "D", 4: "E", 5: "G", 7: "B", 6: "F"} + edges1 = [(1, 2), (1, 5), (5, 6), (2, 3), (2, 4), (3, 4), (4, 5), (2, 7)] + + G1.add_edges_from(edges1) + G2 = nx.relabel_nodes(G1, mapped) + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_many))), "label") + nx.set_node_attributes( + G2, + dict(zip([mapped[n] for n in G1], it.cycle(labels_many))), + "label", + ) + + # Adding new nodes + G1.add_node(0) + G2.add_node("Z") + G1.nodes[0]["label"] = G1.nodes[1]["label"] + G2.nodes["Z"]["label"] = G1.nodes[1]["label"] + mapped.update({0: "Z"}) + + assert vf2pp_isomorphism(G1, G2, node_label="label") == mapped + + # Change the color of one of the nodes + G2.nodes["Z"]["label"] = G1.nodes[2]["label"] + assert vf2pp_isomorphism(G1, G2, node_label="label") is None + + # Add an extra edge + G1.nodes[0]["label"] = "blue" + G2.nodes["Z"]["label"] = "blue" + G1.add_edge(0, 1) + + assert vf2pp_isomorphism(G1, G2, node_label="label") is None + + # Add extra edge to both + G2.add_edge("Z", "A") + assert vf2pp_isomorphism(G1, G2, node_label="label") == mapped + + def test_custom_graph3_same_labels(self): + G1 = nx.Graph() + + mapped = {1: 9, 2: 8, 3: 7, 4: 6, 5: 3, 8: 5, 9: 4, 7: 1, 6: 2} + edges1 = [ + (1, 2), + (1, 3), + (2, 3), + (3, 4), + (4, 5), + (4, 7), + (4, 9), + (5, 8), + (8, 9), + (5, 6), + (6, 7), + (5, 2), + ] + G1.add_edges_from(edges1) + G2 = nx.relabel_nodes(G1, mapped) + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_same))), "label") + nx.set_node_attributes(G2, dict(zip(G2, it.cycle(labels_same))), "label") + assert vf2pp_isomorphism(G1, G2, node_label="label") + + # Connect nodes maintaining symmetry + G1.add_edges_from([(6, 9), (7, 8)]) + G2.add_edges_from([(mapped[6], mapped[8]), (mapped[7], mapped[9])]) + assert vf2pp_isomorphism(G1, G2, node_label="label") is None + + # Make isomorphic + G1.add_edges_from([(6, 8), (7, 9)]) + G2.add_edges_from([(mapped[6], mapped[9]), (mapped[7], mapped[8])]) + assert vf2pp_isomorphism(G1, G2, node_label="label") + + # Connect more nodes + G1.add_edges_from([(2, 7), (3, 6)]) + G2.add_edges_from([(mapped[2], mapped[7]), (mapped[3], mapped[6])]) + G1.add_node(10) + G2.add_node("Z") + G1.nodes[10]["label"] = "blue" + G2.nodes["Z"]["label"] = "blue" + + assert vf2pp_isomorphism(G1, G2, node_label="label") + + # Connect the newly added node, to opposite sides of the graph + G1.add_edges_from([(10, 1), (10, 5), (10, 8)]) + G2.add_edges_from([("Z", mapped[1]), ("Z", mapped[4]), ("Z", mapped[9])]) + assert vf2pp_isomorphism(G1, G2, node_label="label") + + # Get two subgraphs that are not isomorphic but are easy to make + H1 = nx.Graph(G1.subgraph([2, 3, 4, 5, 6, 7, 10])) + H2 = nx.Graph( + G2.subgraph( + [mapped[4], mapped[5], mapped[6], mapped[7], mapped[8], mapped[9], "Z"] + ) + ) + assert vf2pp_isomorphism(H1, H2, node_label="label") is None + + # Restructure both to make them isomorphic + H1.add_edges_from([(10, 2), (10, 6), (3, 6), (2, 7), (2, 6), (3, 7)]) + H2.add_edges_from( + [("Z", mapped[7]), (mapped[6], mapped[9]), (mapped[7], mapped[8])] + ) + assert vf2pp_isomorphism(H1, H2, node_label="label") + + # Add edges with opposite direction in each Graph + H1.add_edge(3, 5) + H2.add_edge(mapped[5], mapped[7]) + assert vf2pp_isomorphism(H1, H2, node_label="label") is None + + def test_custom_graph3_different_labels(self): + G1 = nx.Graph() + + mapped = {1: 9, 2: 8, 3: 7, 4: 6, 5: 3, 8: 5, 9: 4, 7: 1, 6: 2} + edges1 = [ + (1, 2), + (1, 3), + (2, 3), + (3, 4), + (4, 5), + (4, 7), + (4, 9), + (5, 8), + (8, 9), + (5, 6), + (6, 7), + (5, 2), + ] + G1.add_edges_from(edges1) + G2 = nx.relabel_nodes(G1, mapped) + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_many))), "label") + nx.set_node_attributes( + G2, + dict(zip([mapped[n] for n in G1], it.cycle(labels_many))), + "label", + ) + assert vf2pp_isomorphism(G1, G2, node_label="label") == mapped + + # Add extra edge to G1 + G1.add_edge(1, 7) + assert vf2pp_isomorphism(G1, G2, node_label="label") is None + + # Compensate in G2 + G2.add_edge(9, 1) + assert vf2pp_isomorphism(G1, G2, node_label="label") == mapped + + # Add extra node + G1.add_node("A") + G2.add_node("K") + G1.nodes["A"]["label"] = "green" + G2.nodes["K"]["label"] = "green" + mapped.update({"A": "K"}) + + assert vf2pp_isomorphism(G1, G2, node_label="label") == mapped + + # Connect A to one side of G1 and K to the opposite + G1.add_edge("A", 6) + G2.add_edge("K", 5) + assert vf2pp_isomorphism(G1, G2, node_label="label") is None + + # Make the graphs symmetrical + G1.add_edge(1, 5) + G1.add_edge(2, 9) + G2.add_edge(9, 3) + G2.add_edge(8, 4) + assert vf2pp_isomorphism(G1, G2, node_label="label") is None + + # Assign same colors so the two opposite sides are identical + for node in G1.nodes(): + color = "red" + G1.nodes[node]["label"] = color + G2.nodes[mapped[node]]["label"] = color + + assert vf2pp_isomorphism(G1, G2, node_label="label") + + def test_custom_graph4_different_labels(self): + G1 = nx.Graph() + edges1 = [ + (1, 2), + (2, 3), + (3, 8), + (3, 4), + (4, 5), + (4, 6), + (3, 6), + (8, 7), + (8, 9), + (5, 9), + (10, 11), + (11, 12), + (12, 13), + (11, 13), + ] + + mapped = { + 1: "n", + 2: "m", + 3: "l", + 4: "j", + 5: "k", + 6: "i", + 7: "g", + 8: "h", + 9: "f", + 10: "b", + 11: "a", + 12: "d", + 13: "e", + } + + G1.add_edges_from(edges1) + G2 = nx.relabel_nodes(G1, mapped) + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_many))), "label") + nx.set_node_attributes( + G2, + dict(zip([mapped[n] for n in G1], it.cycle(labels_many))), + "label", + ) + assert vf2pp_isomorphism(G1, G2, node_label="label") == mapped + + def test_custom_graph4_same_labels(self): + G1 = nx.Graph() + edges1 = [ + (1, 2), + (2, 3), + (3, 8), + (3, 4), + (4, 5), + (4, 6), + (3, 6), + (8, 7), + (8, 9), + (5, 9), + (10, 11), + (11, 12), + (12, 13), + (11, 13), + ] + + mapped = { + 1: "n", + 2: "m", + 3: "l", + 4: "j", + 5: "k", + 6: "i", + 7: "g", + 8: "h", + 9: "f", + 10: "b", + 11: "a", + 12: "d", + 13: "e", + } + + G1.add_edges_from(edges1) + G2 = nx.relabel_nodes(G1, mapped) + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_same))), "label") + nx.set_node_attributes(G2, dict(zip(G2, it.cycle(labels_same))), "label") + assert vf2pp_isomorphism(G1, G2, node_label="label") + + # Add nodes of different label + G1.add_node(0) + G2.add_node("z") + G1.nodes[0]["label"] = "green" + G2.nodes["z"]["label"] = "blue" + + assert vf2pp_isomorphism(G1, G2, node_label="label") is None + + # Make the labels identical + G2.nodes["z"]["label"] = "green" + assert vf2pp_isomorphism(G1, G2, node_label="label") + + # Change the structure of the graphs, keeping them isomorphic + G1.add_edge(2, 5) + G2.remove_edge("i", "l") + G2.add_edge("g", "l") + G2.add_edge("m", "f") + assert vf2pp_isomorphism(G1, G2, node_label="label") + + # Change the structure of the disconnected sub-graph, keeping it isomorphic + G1.remove_node(13) + G2.remove_node("d") + assert vf2pp_isomorphism(G1, G2, node_label="label") + + # Connect the newly added node to the disconnected graph, which now is just a path of size 3 + G1.add_edge(0, 10) + G2.add_edge("e", "z") + assert vf2pp_isomorphism(G1, G2, node_label="label") + + # Connect the two disconnected sub-graphs, forming a single graph + G1.add_edge(11, 3) + G1.add_edge(0, 8) + G2.add_edge("a", "l") + G2.add_edge("z", "j") + assert vf2pp_isomorphism(G1, G2, node_label="label") + + def test_custom_graph5_same_labels(self): + G1 = nx.Graph() + edges1 = [ + (1, 5), + (1, 2), + (1, 4), + (2, 3), + (2, 6), + (3, 4), + (3, 7), + (4, 8), + (5, 8), + (5, 6), + (6, 7), + (7, 8), + ] + mapped = {1: "a", 2: "h", 3: "d", 4: "i", 5: "g", 6: "b", 7: "j", 8: "c"} + + G1.add_edges_from(edges1) + G2 = nx.relabel_nodes(G1, mapped) + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_same))), "label") + nx.set_node_attributes(G2, dict(zip(G2, it.cycle(labels_same))), "label") + assert vf2pp_isomorphism(G1, G2, node_label="label") + + # Add different edges in each graph, maintaining symmetry + G1.add_edges_from([(3, 6), (2, 7), (2, 5), (1, 3), (4, 7), (6, 8)]) + G2.add_edges_from( + [ + (mapped[6], mapped[3]), + (mapped[2], mapped[7]), + (mapped[1], mapped[6]), + (mapped[5], mapped[7]), + (mapped[3], mapped[8]), + (mapped[2], mapped[4]), + ] + ) + assert vf2pp_isomorphism(G1, G2, node_label="label") + + # Obtain two different but isomorphic subgraphs from G1 and G2 + H1 = nx.Graph(G1.subgraph([1, 5, 8, 6, 7, 3])) + H2 = nx.Graph( + G2.subgraph( + [mapped[1], mapped[4], mapped[8], mapped[7], mapped[3], mapped[5]] + ) + ) + assert vf2pp_isomorphism(H1, H2, node_label="label") + + # Delete corresponding node from the two graphs + H1.remove_node(8) + H2.remove_node(mapped[7]) + assert vf2pp_isomorphism(H1, H2, node_label="label") + + # Re-orient, maintaining isomorphism + H1.add_edge(1, 6) + H1.remove_edge(3, 6) + assert vf2pp_isomorphism(H1, H2, node_label="label") + + def test_custom_graph5_different_labels(self): + G1 = nx.Graph() + edges1 = [ + (1, 5), + (1, 2), + (1, 4), + (2, 3), + (2, 6), + (3, 4), + (3, 7), + (4, 8), + (5, 8), + (5, 6), + (6, 7), + (7, 8), + ] + mapped = {1: "a", 2: "h", 3: "d", 4: "i", 5: "g", 6: "b", 7: "j", 8: "c"} + + G1.add_edges_from(edges1) + G2 = nx.relabel_nodes(G1, mapped) + + colors = ["red", "blue", "grey", "none", "brown", "solarized", "yellow", "pink"] + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_many))), "label") + nx.set_node_attributes( + G2, + dict(zip([mapped[n] for n in G1], it.cycle(labels_many))), + "label", + ) + assert vf2pp_isomorphism(G1, G2, node_label="label") == mapped + + # Assign different colors to matching nodes + c = 0 + for node in G1.nodes(): + color1 = colors[c] + color2 = colors[(c + 3) % len(colors)] + G1.nodes[node]["label"] = color1 + G2.nodes[mapped[node]]["label"] = color2 + c += 1 + + assert vf2pp_isomorphism(G1, G2, node_label="label") is None + + # Get symmetrical sub-graphs of G1,G2 and compare them + H1 = G1.subgraph([1, 5]) + H2 = G2.subgraph(["i", "c"]) + c = 0 + for node1, node2 in zip(H1.nodes(), H2.nodes()): + H1.nodes[node1]["label"] = "red" + H2.nodes[node2]["label"] = "red" + c += 1 + + assert vf2pp_isomorphism(H1, H2, node_label="label") + + def test_disconnected_graph_all_same_labels(self): + G1 = nx.Graph() + G1.add_nodes_from(list(range(10))) + + mapped = {0: 9, 1: 8, 2: 7, 3: 6, 4: 5, 5: 4, 6: 3, 7: 2, 8: 1, 9: 0} + G2 = nx.relabel_nodes(G1, mapped) + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_same))), "label") + nx.set_node_attributes(G2, dict(zip(G2, it.cycle(labels_same))), "label") + assert vf2pp_isomorphism(G1, G2, node_label="label") + + def test_disconnected_graph_all_different_labels(self): + G1 = nx.Graph() + G1.add_nodes_from(list(range(10))) + + mapped = {0: 9, 1: 8, 2: 7, 3: 6, 4: 5, 5: 4, 6: 3, 7: 2, 8: 1, 9: 0} + G2 = nx.relabel_nodes(G1, mapped) + + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_many))), "label") + nx.set_node_attributes( + G2, + dict(zip([mapped[n] for n in G1], it.cycle(labels_many))), + "label", + ) + assert vf2pp_isomorphism(G1, G2, node_label="label") == mapped + + def test_disconnected_graph_some_same_labels(self): + G1 = nx.Graph() + G1.add_nodes_from(list(range(10))) + + mapped = {0: 9, 1: 8, 2: 7, 3: 6, 4: 5, 5: 4, 6: 3, 7: 2, 8: 1, 9: 0} + G2 = nx.relabel_nodes(G1, mapped) + + colors = [ + "white", + "white", + "white", + "purple", + "purple", + "red", + "red", + "pink", + "pink", + "pink", + ] + + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(colors))), "label") + nx.set_node_attributes( + G2, dict(zip([mapped[n] for n in G1], it.cycle(colors))), "label" + ) + + assert vf2pp_isomorphism(G1, G2, node_label="label") + + +class TestMultiGraphISOVF2pp: + def test_custom_multigraph1_same_labels(self): + G1 = nx.MultiGraph() + + mapped = {1: "A", 2: "B", 3: "C", 4: "D", 5: "Z", 6: "E"} + edges1 = [ + (1, 2), + (1, 3), + (1, 4), + (1, 4), + (1, 4), + (2, 3), + (2, 6), + (2, 6), + (3, 4), + (3, 4), + (5, 1), + (5, 1), + (5, 2), + (5, 2), + ] + + G1.add_edges_from(edges1) + G2 = nx.relabel_nodes(G1, mapped) + + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_same))), "label") + nx.set_node_attributes(G2, dict(zip(G2, it.cycle(labels_same))), "label") + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + # Transfer the 2-clique to the right side of G1 + G1.remove_edges_from([(2, 6), (2, 6)]) + G1.add_edges_from([(3, 6), (3, 6)]) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert not m + + # Delete an edges, making them symmetrical, so the position of the 2-clique doesn't matter + G2.remove_edge(mapped[1], mapped[4]) + G1.remove_edge(1, 4) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + # Add self-loops + G1.add_edges_from([(5, 5), (5, 5), (1, 1)]) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert not m + + # Compensate in G2 + G2.add_edges_from( + [(mapped[1], mapped[1]), (mapped[4], mapped[4]), (mapped[4], mapped[4])] + ) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + def test_custom_multigraph1_different_labels(self): + G1 = nx.MultiGraph() + + mapped = {1: "A", 2: "B", 3: "C", 4: "D", 5: "Z", 6: "E"} + edges1 = [ + (1, 2), + (1, 3), + (1, 4), + (1, 4), + (1, 4), + (2, 3), + (2, 6), + (2, 6), + (3, 4), + (3, 4), + (5, 1), + (5, 1), + (5, 2), + (5, 2), + ] + + G1.add_edges_from(edges1) + G2 = nx.relabel_nodes(G1, mapped) + + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_many))), "label") + nx.set_node_attributes( + G2, + dict(zip([mapped[n] for n in G1], it.cycle(labels_many))), + "label", + ) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + assert m == mapped + + # Re-structure G1, maintaining the degree sequence + G1.remove_edge(1, 4) + G1.add_edge(1, 5) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert not m + + # Restructure G2, making it isomorphic to G1 + G2.remove_edge("A", "D") + G2.add_edge("A", "Z") + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + assert m == mapped + + # Add edge from node to itself + G1.add_edges_from([(6, 6), (6, 6), (6, 6)]) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert not m + + # Same for G2 + G2.add_edges_from([("E", "E"), ("E", "E"), ("E", "E")]) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + assert m == mapped + + def test_custom_multigraph2_same_labels(self): + G1 = nx.MultiGraph() + + mapped = {1: "A", 2: "C", 3: "D", 4: "E", 5: "G", 7: "B", 6: "F"} + edges1 = [ + (1, 2), + (1, 2), + (1, 5), + (1, 5), + (1, 5), + (5, 6), + (2, 3), + (2, 3), + (2, 4), + (3, 4), + (3, 4), + (4, 5), + (4, 5), + (4, 5), + (2, 7), + (2, 7), + (2, 7), + ] + + G1.add_edges_from(edges1) + G2 = nx.relabel_nodes(G1, mapped) + + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_same))), "label") + nx.set_node_attributes(G2, dict(zip(G2, it.cycle(labels_same))), "label") + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + # Obtain two non-isomorphic subgraphs from the graph + G2.remove_edges_from([(mapped[1], mapped[2]), (mapped[1], mapped[2])]) + G2.add_edge(mapped[1], mapped[4]) + H1 = nx.MultiGraph(G1.subgraph([2, 3, 4, 7])) + H2 = nx.MultiGraph(G2.subgraph([mapped[1], mapped[4], mapped[5], mapped[6]])) + + m = vf2pp_isomorphism(H1, H2, node_label="label") + assert not m + + # Make them isomorphic + H1.remove_edge(3, 4) + H1.add_edges_from([(2, 3), (2, 4), (2, 4)]) + H2.add_edges_from([(mapped[5], mapped[6]), (mapped[5], mapped[6])]) + m = vf2pp_isomorphism(H1, H2, node_label="label") + assert m + + # Remove triangle edge + H1.remove_edges_from([(2, 3), (2, 3), (2, 3)]) + H2.remove_edges_from([(mapped[5], mapped[4])] * 3) + m = vf2pp_isomorphism(H1, H2, node_label="label") + assert m + + # Change the edge orientation such that H1 is rotated H2 + H1.remove_edges_from([(2, 7), (2, 7)]) + H1.add_edges_from([(3, 4), (3, 4)]) + m = vf2pp_isomorphism(H1, H2, node_label="label") + assert m + + # Add extra edges maintaining degree sequence, but in a non-symmetrical manner + H2.add_edge(mapped[5], mapped[1]) + H1.add_edge(3, 4) + m = vf2pp_isomorphism(H1, H2, node_label="label") + assert not m + + def test_custom_multigraph2_different_labels(self): + G1 = nx.MultiGraph() + + mapped = {1: "A", 2: "C", 3: "D", 4: "E", 5: "G", 7: "B", 6: "F"} + edges1 = [ + (1, 2), + (1, 2), + (1, 5), + (1, 5), + (1, 5), + (5, 6), + (2, 3), + (2, 3), + (2, 4), + (3, 4), + (3, 4), + (4, 5), + (4, 5), + (4, 5), + (2, 7), + (2, 7), + (2, 7), + ] + + G1.add_edges_from(edges1) + G2 = nx.relabel_nodes(G1, mapped) + + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_many))), "label") + nx.set_node_attributes( + G2, + dict(zip([mapped[n] for n in G1], it.cycle(labels_many))), + "label", + ) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + assert m == mapped + + # Re-structure G1 + G1.remove_edge(2, 7) + G1.add_edge(5, 6) + + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert not m + + # Same for G2 + G2.remove_edge("B", "C") + G2.add_edge("G", "F") + + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + assert m == mapped + + # Delete node from G1 and G2, keeping them isomorphic + G1.remove_node(3) + G2.remove_node("D") + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + # Change G1 edges + G1.remove_edge(1, 2) + G1.remove_edge(2, 7) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert not m + + # Make G2 identical to G1, but with different edge orientation and different labels + G2.add_edges_from([("A", "C"), ("C", "E"), ("C", "E")]) + G2.remove_edges_from( + [("A", "G"), ("A", "G"), ("F", "G"), ("E", "G"), ("E", "G")] + ) + + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert not m + + # Make all labels the same, so G1 and G2 are also isomorphic + for n1, n2 in zip(G1.nodes(), G2.nodes()): + G1.nodes[n1]["label"] = "blue" + G2.nodes[n2]["label"] = "blue" + + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + def test_custom_multigraph3_same_labels(self): + G1 = nx.MultiGraph() + + mapped = {1: 9, 2: 8, 3: 7, 4: 6, 5: 3, 8: 5, 9: 4, 7: 1, 6: 2} + edges1 = [ + (1, 2), + (1, 3), + (1, 3), + (2, 3), + (2, 3), + (3, 4), + (4, 5), + (4, 7), + (4, 9), + (4, 9), + (4, 9), + (5, 8), + (5, 8), + (8, 9), + (8, 9), + (5, 6), + (6, 7), + (6, 7), + (6, 7), + (5, 2), + ] + G1.add_edges_from(edges1) + G2 = nx.relabel_nodes(G1, mapped) + + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_same))), "label") + nx.set_node_attributes(G2, dict(zip(G2, it.cycle(labels_same))), "label") + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + # Connect nodes maintaining symmetry + G1.add_edges_from([(6, 9), (7, 8), (5, 8), (4, 9), (4, 9)]) + G2.add_edges_from( + [ + (mapped[6], mapped[8]), + (mapped[7], mapped[9]), + (mapped[5], mapped[8]), + (mapped[4], mapped[9]), + (mapped[4], mapped[9]), + ] + ) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert not m + + # Make isomorphic + G1.add_edges_from([(6, 8), (6, 8), (7, 9), (7, 9), (7, 9)]) + G2.add_edges_from( + [ + (mapped[6], mapped[8]), + (mapped[6], mapped[9]), + (mapped[7], mapped[8]), + (mapped[7], mapped[9]), + (mapped[7], mapped[9]), + ] + ) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + # Connect more nodes + G1.add_edges_from([(2, 7), (2, 7), (3, 6), (3, 6)]) + G2.add_edges_from( + [ + (mapped[2], mapped[7]), + (mapped[2], mapped[7]), + (mapped[3], mapped[6]), + (mapped[3], mapped[6]), + ] + ) + G1.add_node(10) + G2.add_node("Z") + G1.nodes[10]["label"] = "blue" + G2.nodes["Z"]["label"] = "blue" + + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + # Connect the newly added node, to opposite sides of the graph + G1.add_edges_from([(10, 1), (10, 5), (10, 8), (10, 10), (10, 10)]) + G2.add_edges_from( + [ + ("Z", mapped[1]), + ("Z", mapped[4]), + ("Z", mapped[9]), + ("Z", "Z"), + ("Z", "Z"), + ] + ) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert not m + + # We connected the new node to opposite sides, so G1 must be symmetrical to G2. Re-structure them to be so + G1.remove_edges_from([(1, 3), (4, 9), (4, 9), (7, 9)]) + G2.remove_edges_from( + [ + (mapped[1], mapped[3]), + (mapped[4], mapped[9]), + (mapped[4], mapped[9]), + (mapped[7], mapped[9]), + ] + ) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + # Get two subgraphs that are not isomorphic but are easy to make + H1 = nx.Graph(G1.subgraph([2, 3, 4, 5, 6, 7, 10])) + H2 = nx.Graph( + G2.subgraph( + [mapped[4], mapped[5], mapped[6], mapped[7], mapped[8], mapped[9], "Z"] + ) + ) + + m = vf2pp_isomorphism(H1, H2, node_label="label") + assert not m + + # Restructure both to make them isomorphic + H1.add_edges_from([(10, 2), (10, 6), (3, 6), (2, 7), (2, 6), (3, 7)]) + H2.add_edges_from( + [("Z", mapped[7]), (mapped[6], mapped[9]), (mapped[7], mapped[8])] + ) + m = vf2pp_isomorphism(H1, H2, node_label="label") + assert m + + # Remove one self-loop in H2 + H2.remove_edge("Z", "Z") + m = vf2pp_isomorphism(H1, H2, node_label="label") + assert not m + + # Compensate in H1 + H1.remove_edge(10, 10) + m = vf2pp_isomorphism(H1, H2, node_label="label") + assert m + + def test_custom_multigraph3_different_labels(self): + G1 = nx.MultiGraph() + + mapped = {1: 9, 2: 8, 3: 7, 4: 6, 5: 3, 8: 5, 9: 4, 7: 1, 6: 2} + edges1 = [ + (1, 2), + (1, 3), + (1, 3), + (2, 3), + (2, 3), + (3, 4), + (4, 5), + (4, 7), + (4, 9), + (4, 9), + (4, 9), + (5, 8), + (5, 8), + (8, 9), + (8, 9), + (5, 6), + (6, 7), + (6, 7), + (6, 7), + (5, 2), + ] + + G1.add_edges_from(edges1) + G2 = nx.relabel_nodes(G1, mapped) + + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_many))), "label") + nx.set_node_attributes( + G2, + dict(zip([mapped[n] for n in G1], it.cycle(labels_many))), + "label", + ) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + assert m == mapped + + # Delete edge maintaining isomorphism + G1.remove_edge(4, 9) + G2.remove_edge(4, 6) + + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + assert m == mapped + + # Change edge orientation such that G1 mirrors G2 + G1.add_edges_from([(4, 9), (1, 2), (1, 2)]) + G1.remove_edges_from([(1, 3), (1, 3)]) + G2.add_edges_from([(3, 5), (7, 9)]) + G2.remove_edge(8, 9) + + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert not m + + # Make all labels the same, so G1 and G2 are also isomorphic + for n1, n2 in zip(G1.nodes(), G2.nodes()): + G1.nodes[n1]["label"] = "blue" + G2.nodes[n2]["label"] = "blue" + + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + G1.add_node(10) + G2.add_node("Z") + G1.nodes[10]["label"] = "green" + G2.nodes["Z"]["label"] = "green" + + # Add different number of edges between the new nodes and themselves + G1.add_edges_from([(10, 10), (10, 10)]) + G2.add_edges_from([("Z", "Z")]) + + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert not m + + # Make the number of self-edges equal + G1.remove_edge(10, 10) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + # Connect the new node to the graph + G1.add_edges_from([(10, 3), (10, 4)]) + G2.add_edges_from([("Z", 8), ("Z", 3)]) + + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + # Remove central node + G1.remove_node(4) + G2.remove_node(3) + G1.add_edges_from([(5, 6), (5, 6), (5, 7)]) + G2.add_edges_from([(1, 6), (1, 6), (6, 2)]) + + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + def test_custom_multigraph4_same_labels(self): + G1 = nx.MultiGraph() + edges1 = [ + (1, 2), + (1, 2), + (2, 2), + (2, 3), + (3, 8), + (3, 8), + (3, 4), + (4, 5), + (4, 5), + (4, 5), + (4, 6), + (3, 6), + (3, 6), + (6, 6), + (8, 7), + (7, 7), + (8, 9), + (9, 9), + (8, 9), + (8, 9), + (5, 9), + (10, 11), + (11, 12), + (12, 13), + (11, 13), + (10, 10), + (10, 11), + (11, 13), + ] + + mapped = { + 1: "n", + 2: "m", + 3: "l", + 4: "j", + 5: "k", + 6: "i", + 7: "g", + 8: "h", + 9: "f", + 10: "b", + 11: "a", + 12: "d", + 13: "e", + } + + G1.add_edges_from(edges1) + G2 = nx.relabel_nodes(G1, mapped) + + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_same))), "label") + nx.set_node_attributes(G2, dict(zip(G2, it.cycle(labels_same))), "label") + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + # Add extra but corresponding edges to both graphs + G1.add_edges_from([(2, 2), (2, 3), (2, 8), (3, 4)]) + G2.add_edges_from([("m", "m"), ("m", "l"), ("m", "h"), ("l", "j")]) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + # Obtain subgraphs + H1 = nx.MultiGraph(G1.subgraph([2, 3, 4, 6, 10, 11, 12, 13])) + H2 = nx.MultiGraph( + G2.subgraph( + [ + mapped[2], + mapped[3], + mapped[8], + mapped[9], + mapped[10], + mapped[11], + mapped[12], + mapped[13], + ] + ) + ) + + m = vf2pp_isomorphism(H1, H2, node_label="label") + assert not m + + # Make them isomorphic + H2.remove_edges_from( + [(mapped[3], mapped[2]), (mapped[9], mapped[8]), (mapped[2], mapped[2])] + ) + H2.add_edges_from([(mapped[9], mapped[9]), (mapped[2], mapped[8])]) + m = vf2pp_isomorphism(H1, H2, node_label="label") + assert m + + # Re-structure the disconnected sub-graph + H1.remove_node(12) + H2.remove_node(mapped[12]) + H1.add_edge(13, 13) + H2.add_edge(mapped[13], mapped[13]) + + # Connect the two disconnected components, forming a single graph + H1.add_edges_from([(3, 13), (6, 11)]) + H2.add_edges_from([(mapped[8], mapped[10]), (mapped[2], mapped[11])]) + m = vf2pp_isomorphism(H1, H2, node_label="label") + assert m + + # Change orientation of self-loops in one graph, maintaining the degree sequence + H1.remove_edges_from([(2, 2), (3, 6)]) + H1.add_edges_from([(6, 6), (2, 3)]) + m = vf2pp_isomorphism(H1, H2, node_label="label") + assert not m + + def test_custom_multigraph4_different_labels(self): + G1 = nx.MultiGraph() + edges1 = [ + (1, 2), + (1, 2), + (2, 2), + (2, 3), + (3, 8), + (3, 8), + (3, 4), + (4, 5), + (4, 5), + (4, 5), + (4, 6), + (3, 6), + (3, 6), + (6, 6), + (8, 7), + (7, 7), + (8, 9), + (9, 9), + (8, 9), + (8, 9), + (5, 9), + (10, 11), + (11, 12), + (12, 13), + (11, 13), + ] + + mapped = { + 1: "n", + 2: "m", + 3: "l", + 4: "j", + 5: "k", + 6: "i", + 7: "g", + 8: "h", + 9: "f", + 10: "b", + 11: "a", + 12: "d", + 13: "e", + } + + G1.add_edges_from(edges1) + G2 = nx.relabel_nodes(G1, mapped) + + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_many))), "label") + nx.set_node_attributes( + G2, + dict(zip([mapped[n] for n in G1], it.cycle(labels_many))), + "label", + ) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m == mapped + + # Add extra but corresponding edges to both graphs + G1.add_edges_from([(2, 2), (2, 3), (2, 8), (3, 4)]) + G2.add_edges_from([("m", "m"), ("m", "l"), ("m", "h"), ("l", "j")]) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m == mapped + + # Obtain isomorphic subgraphs + H1 = nx.MultiGraph(G1.subgraph([2, 3, 4, 6])) + H2 = nx.MultiGraph(G2.subgraph(["m", "l", "j", "i"])) + + m = vf2pp_isomorphism(H1, H2, node_label="label") + assert m + + # Delete the 3-clique, keeping only the path-graph. Also, H1 mirrors H2 + H1.remove_node(4) + H2.remove_node("j") + H1.remove_edges_from([(2, 2), (2, 3), (6, 6)]) + H2.remove_edges_from([("l", "i"), ("m", "m"), ("m", "m")]) + + m = vf2pp_isomorphism(H1, H2, node_label="label") + assert not m + + # Assign the same labels so that mirroring means isomorphic + for n1, n2 in zip(H1.nodes(), H2.nodes()): + H1.nodes[n1]["label"] = "red" + H2.nodes[n2]["label"] = "red" + + m = vf2pp_isomorphism(H1, H2, node_label="label") + assert m + + # Leave only one node with self-loop + H1.remove_nodes_from([3, 6]) + H2.remove_nodes_from(["m", "l"]) + m = vf2pp_isomorphism(H1, H2, node_label="label") + assert m + + # Remove one self-loop from H1 + H1.remove_edge(2, 2) + m = vf2pp_isomorphism(H1, H2, node_label="label") + assert not m + + # Same for H2 + H2.remove_edge("i", "i") + m = vf2pp_isomorphism(H1, H2, node_label="label") + assert m + + # Compose H1 with the disconnected sub-graph of G1. Same for H2 + S1 = nx.compose(H1, nx.MultiGraph(G1.subgraph([10, 11, 12, 13]))) + S2 = nx.compose(H2, nx.MultiGraph(G2.subgraph(["a", "b", "d", "e"]))) + + m = vf2pp_isomorphism(H1, H2, node_label="label") + assert m + + # Connect the two components + S1.add_edges_from([(13, 13), (13, 13), (2, 13)]) + S2.add_edges_from([("a", "a"), ("a", "a"), ("i", "e")]) + m = vf2pp_isomorphism(H1, H2, node_label="label") + assert m + + def test_custom_multigraph5_same_labels(self): + G1 = nx.MultiGraph() + + edges1 = [ + (1, 5), + (1, 2), + (1, 4), + (2, 3), + (2, 6), + (3, 4), + (3, 7), + (4, 8), + (5, 8), + (5, 6), + (6, 7), + (7, 8), + ] + mapped = {1: "a", 2: "h", 3: "d", 4: "i", 5: "g", 6: "b", 7: "j", 8: "c"} + + G1.add_edges_from(edges1) + G2 = nx.relabel_nodes(G1, mapped) + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_same))), "label") + nx.set_node_attributes(G2, dict(zip(G2, it.cycle(labels_same))), "label") + + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + # Add multiple edges and self-loops, maintaining isomorphism + G1.add_edges_from( + [(1, 2), (1, 2), (3, 7), (8, 8), (8, 8), (7, 8), (2, 3), (5, 6)] + ) + G2.add_edges_from( + [ + ("a", "h"), + ("a", "h"), + ("d", "j"), + ("c", "c"), + ("c", "c"), + ("j", "c"), + ("d", "h"), + ("g", "b"), + ] + ) + + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + # Make G2 to be the rotated G1 + G2.remove_edges_from( + [ + ("a", "h"), + ("a", "h"), + ("d", "j"), + ("c", "c"), + ("c", "c"), + ("j", "c"), + ("d", "h"), + ("g", "b"), + ] + ) + G2.add_edges_from( + [ + ("d", "i"), + ("a", "h"), + ("g", "b"), + ("g", "b"), + ("i", "i"), + ("i", "i"), + ("b", "j"), + ("d", "j"), + ] + ) + + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + def test_disconnected_multigraph_all_same_labels(self): + G1 = nx.MultiGraph() + G1.add_nodes_from(list(range(10))) + G1.add_edges_from([(i, i) for i in range(10)]) + + mapped = {0: 9, 1: 8, 2: 7, 3: 6, 4: 5, 5: 4, 6: 3, 7: 2, 8: 1, 9: 0} + G2 = nx.relabel_nodes(G1, mapped) + + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_same))), "label") + nx.set_node_attributes(G2, dict(zip(G2, it.cycle(labels_same))), "label") + + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + # Add self-loops to non-mapped nodes. Should be the same, as the graph is disconnected. + G1.add_edges_from([(i, i) for i in range(5, 8)] * 3) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert not m + + # Compensate in G2 + G2.add_edges_from([(i, i) for i in range(3)] * 3) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + # Add one more self-loop in G2 + G2.add_edges_from([(0, 0), (1, 1), (1, 1)]) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert not m + + # Compensate in G1 + G1.add_edges_from([(5, 5), (7, 7), (7, 7)]) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + def test_disconnected_multigraph_all_different_labels(self): + G1 = nx.MultiGraph() + G1.add_nodes_from(list(range(10))) + G1.add_edges_from([(i, i) for i in range(10)]) + + mapped = {0: 9, 1: 8, 2: 7, 3: 6, 4: 5, 5: 4, 6: 3, 7: 2, 8: 1, 9: 0} + G2 = nx.relabel_nodes(G1, mapped) + + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_many))), "label") + nx.set_node_attributes( + G2, + dict(zip([mapped[n] for n in G1], it.cycle(labels_many))), + "label", + ) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + assert m == mapped + + # Add self-loops to non-mapped nodes. Now it is not the same, as there are different labels + G1.add_edges_from([(i, i) for i in range(5, 8)] * 3) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert not m + + # Add self-loops to non mapped nodes in G2 as well + G2.add_edges_from([(mapped[i], mapped[i]) for i in range(3)] * 7) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert not m + + # Add self-loops to mapped nodes in G2 + G2.add_edges_from([(mapped[i], mapped[i]) for i in range(5, 8)] * 3) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert not m + + # Add self-loops to G1 so that they are even in both graphs + G1.add_edges_from([(i, i) for i in range(3)] * 7) + m = vf2pp_isomorphism(G1, G2, node_label="label") + assert m + + +class TestDiGraphISOVF2pp: + def test_wikipedia_graph(self): + edges1 = [ + (1, 5), + (1, 2), + (1, 4), + (3, 2), + (6, 2), + (3, 4), + (7, 3), + (4, 8), + (5, 8), + (6, 5), + (6, 7), + (7, 8), + ] + mapped = {1: "a", 2: "h", 3: "d", 4: "i", 5: "g", 6: "b", 7: "j", 8: "c"} + + G1 = nx.DiGraph(edges1) + G2 = nx.relabel_nodes(G1, mapped) + + assert vf2pp_isomorphism(G1, G2) == mapped + + # Change the direction of an edge + G1.remove_edge(1, 5) + G1.add_edge(5, 1) + assert vf2pp_isomorphism(G1, G2) is None + + def test_non_isomorphic_same_degree_sequence(self): + r""" + G1 G2 + x--------------x x--------------x + | \ | | \ | + | x-------x | | x-------x | + | | | | | | | | + | x-------x | | x-------x | + | / | | \ | + x--------------x x--------------x + """ + edges1 = [ + (1, 5), + (1, 2), + (4, 1), + (3, 2), + (3, 4), + (4, 8), + (5, 8), + (6, 5), + (6, 7), + (7, 8), + ] + edges2 = [ + (1, 5), + (1, 2), + (4, 1), + (3, 2), + (4, 3), + (5, 8), + (6, 5), + (6, 7), + (3, 7), + (8, 7), + ] + + G1 = nx.DiGraph(edges1) + G2 = nx.DiGraph(edges2) + assert vf2pp_isomorphism(G1, G2) is None diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/test_vf2pp_helpers.py b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/test_vf2pp_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..1a46c38f5012045f21d3fe5cc5058246ad312167 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/test_vf2pp_helpers.py @@ -0,0 +1,3103 @@ +import itertools as it + +import pytest + +import networkx as nx +from networkx import vf2pp_is_isomorphic, vf2pp_isomorphism +from networkx.algorithms.isomorphism.vf2pp import ( + _consistent_PT, + _cut_PT, + _feasibility, + _find_candidates, + _find_candidates_Di, + _GraphParameters, + _initialize_parameters, + _matching_order, + _restore_Tinout, + _restore_Tinout_Di, + _StateParameters, + _update_Tinout, +) + +labels_same = ["blue"] + +labels_many = [ + "white", + "red", + "blue", + "green", + "orange", + "black", + "purple", + "yellow", + "brown", + "cyan", + "solarized", + "pink", + "none", +] + + +class TestNodeOrdering: + def test_empty_graph(self): + G1 = nx.Graph() + G2 = nx.Graph() + gparams = _GraphParameters(G1, G2, None, None, None, None, None) + assert len(set(_matching_order(gparams))) == 0 + + def test_single_node(self): + G1 = nx.Graph() + G2 = nx.Graph() + G1.add_node(1) + G2.add_node(1) + + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels_many))), "label") + nx.set_node_attributes( + G2, + dict(zip(G2, it.cycle(labels_many))), + "label", + ) + l1, l2 = nx.get_node_attributes(G1, "label"), nx.get_node_attributes( + G2, "label" + ) + + gparams = _GraphParameters( + G1, + G2, + l1, + l2, + nx.utils.groups(l1), + nx.utils.groups(l2), + nx.utils.groups(dict(G2.degree())), + ) + m = _matching_order(gparams) + assert m == [1] + + def test_matching_order(self): + labels = [ + "blue", + "blue", + "red", + "red", + "red", + "red", + "green", + "green", + "green", + "yellow", + "purple", + "purple", + "blue", + "blue", + ] + G1 = nx.Graph( + [ + (0, 1), + (0, 2), + (1, 2), + (2, 5), + (2, 4), + (1, 3), + (1, 4), + (3, 6), + (4, 6), + (6, 7), + (7, 8), + (9, 10), + (9, 11), + (11, 12), + (11, 13), + (12, 13), + (10, 13), + ] + ) + G2 = G1.copy() + nx.set_node_attributes(G1, dict(zip(G1, it.cycle(labels))), "label") + nx.set_node_attributes( + G2, + dict(zip(G2, it.cycle(labels))), + "label", + ) + l1, l2 = nx.get_node_attributes(G1, "label"), nx.get_node_attributes( + G2, "label" + ) + gparams = _GraphParameters( + G1, + G2, + l1, + l2, + nx.utils.groups(l1), + nx.utils.groups(l2), + nx.utils.groups(dict(G2.degree())), + ) + + expected = [9, 11, 10, 13, 12, 1, 2, 4, 0, 3, 6, 5, 7, 8] + assert _matching_order(gparams) == expected + + def test_matching_order_all_branches(self): + G1 = nx.Graph( + [(0, 1), (0, 2), (0, 3), (0, 4), (1, 2), (1, 3), (1, 4), (2, 4), (3, 4)] + ) + G1.add_node(5) + G2 = G1.copy() + + G1.nodes[0]["label"] = "black" + G1.nodes[1]["label"] = "blue" + G1.nodes[2]["label"] = "blue" + G1.nodes[3]["label"] = "red" + G1.nodes[4]["label"] = "red" + G1.nodes[5]["label"] = "blue" + + G2.nodes[0]["label"] = "black" + G2.nodes[1]["label"] = "blue" + G2.nodes[2]["label"] = "blue" + G2.nodes[3]["label"] = "red" + G2.nodes[4]["label"] = "red" + G2.nodes[5]["label"] = "blue" + + l1, l2 = nx.get_node_attributes(G1, "label"), nx.get_node_attributes( + G2, "label" + ) + gparams = _GraphParameters( + G1, + G2, + l1, + l2, + nx.utils.groups(l1), + nx.utils.groups(l2), + nx.utils.groups(dict(G2.degree())), + ) + + expected = [0, 4, 1, 3, 2, 5] + assert _matching_order(gparams) == expected + + +class TestGraphCandidateSelection: + G1_edges = [ + (1, 2), + (1, 4), + (1, 5), + (2, 3), + (2, 4), + (3, 4), + (4, 5), + (1, 6), + (6, 7), + (6, 8), + (8, 9), + (7, 9), + ] + mapped = { + 0: "x", + 1: "a", + 2: "b", + 3: "c", + 4: "d", + 5: "e", + 6: "f", + 7: "g", + 8: "h", + 9: "i", + } + + def test_no_covered_neighbors_no_labels(self): + G1 = nx.Graph() + G1.add_edges_from(self.G1_edges) + G1.add_node(0) + G2 = nx.relabel_nodes(G1, self.mapped) + + G1_degree = dict(G1.degree) + l1 = dict(G1.nodes(data="label", default=-1)) + l2 = dict(G2.nodes(data="label", default=-1)) + gparams = _GraphParameters( + G1, + G2, + l1, + l2, + nx.utils.groups(l1), + nx.utils.groups(l2), + nx.utils.groups(dict(G2.degree())), + ) + + m = {9: self.mapped[9], 1: self.mapped[1]} + m_rev = {self.mapped[9]: 9, self.mapped[1]: 1} + + T1 = {7, 8, 2, 4, 5} + T1_tilde = {0, 3, 6} + T2 = {"g", "h", "b", "d", "e"} + T2_tilde = {"x", "c", "f"} + + sparams = _StateParameters( + m, m_rev, T1, None, T1_tilde, None, T2, None, T2_tilde, None + ) + + u = 3 + candidates = _find_candidates(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u]} + + u = 0 + candidates = _find_candidates(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u]} + + m.pop(9) + m_rev.pop(self.mapped[9]) + + T1 = {2, 4, 5, 6} + T1_tilde = {0, 3, 7, 8, 9} + T2 = {"g", "h", "b", "d", "e", "f"} + T2_tilde = {"x", "c", "g", "h", "i"} + + sparams = _StateParameters( + m, m_rev, T1, None, T1_tilde, None, T2, None, T2_tilde, None + ) + + u = 7 + candidates = _find_candidates(u, gparams, sparams, G1_degree) + assert candidates == { + self.mapped[u], + self.mapped[8], + self.mapped[3], + self.mapped[9], + } + + def test_no_covered_neighbors_with_labels(self): + G1 = nx.Graph() + G1.add_edges_from(self.G1_edges) + G1.add_node(0) + G2 = nx.relabel_nodes(G1, self.mapped) + + G1_degree = dict(G1.degree) + nx.set_node_attributes( + G1, + dict(zip(G1, it.cycle(labels_many))), + "label", + ) + nx.set_node_attributes( + G2, + dict( + zip( + [self.mapped[n] for n in G1], + it.cycle(labels_many), + ) + ), + "label", + ) + l1 = dict(G1.nodes(data="label", default=-1)) + l2 = dict(G2.nodes(data="label", default=-1)) + gparams = _GraphParameters( + G1, + G2, + l1, + l2, + nx.utils.groups(l1), + nx.utils.groups(l2), + nx.utils.groups(dict(G2.degree())), + ) + + m = {9: self.mapped[9], 1: self.mapped[1]} + m_rev = {self.mapped[9]: 9, self.mapped[1]: 1} + + T1 = {7, 8, 2, 4, 5, 6} + T1_tilde = {0, 3} + T2 = {"g", "h", "b", "d", "e", "f"} + T2_tilde = {"x", "c"} + + sparams = _StateParameters( + m, m_rev, T1, None, T1_tilde, None, T2, None, T2_tilde, None + ) + + u = 3 + candidates = _find_candidates(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u]} + + u = 0 + candidates = _find_candidates(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u]} + + # Change label of disconnected node + G1.nodes[u]["label"] = "blue" + l1 = dict(G1.nodes(data="label", default=-1)) + l2 = dict(G2.nodes(data="label", default=-1)) + gparams = _GraphParameters( + G1, + G2, + l1, + l2, + nx.utils.groups(l1), + nx.utils.groups(l2), + nx.utils.groups(dict(G2.degree())), + ) + + # No candidate + candidates = _find_candidates(u, gparams, sparams, G1_degree) + assert candidates == set() + + m.pop(9) + m_rev.pop(self.mapped[9]) + + T1 = {2, 4, 5, 6} + T1_tilde = {0, 3, 7, 8, 9} + T2 = {"b", "d", "e", "f"} + T2_tilde = {"x", "c", "g", "h", "i"} + + sparams = _StateParameters( + m, m_rev, T1, None, T1_tilde, None, T2, None, T2_tilde, None + ) + + u = 7 + candidates = _find_candidates(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u]} + + G1.nodes[8]["label"] = G1.nodes[7]["label"] + G2.nodes[self.mapped[8]]["label"] = G1.nodes[7]["label"] + l1 = dict(G1.nodes(data="label", default=-1)) + l2 = dict(G2.nodes(data="label", default=-1)) + gparams = _GraphParameters( + G1, + G2, + l1, + l2, + nx.utils.groups(l1), + nx.utils.groups(l2), + nx.utils.groups(dict(G2.degree())), + ) + + candidates = _find_candidates(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u], self.mapped[8]} + + def test_covered_neighbors_no_labels(self): + G1 = nx.Graph() + G1.add_edges_from(self.G1_edges) + G1.add_node(0) + G2 = nx.relabel_nodes(G1, self.mapped) + + G1_degree = dict(G1.degree) + l1 = dict(G1.nodes(data=None, default=-1)) + l2 = dict(G2.nodes(data=None, default=-1)) + gparams = _GraphParameters( + G1, + G2, + l1, + l2, + nx.utils.groups(l1), + nx.utils.groups(l2), + nx.utils.groups(dict(G2.degree())), + ) + + m = {9: self.mapped[9], 1: self.mapped[1]} + m_rev = {self.mapped[9]: 9, self.mapped[1]: 1} + + T1 = {7, 8, 2, 4, 5, 6} + T1_tilde = {0, 3} + T2 = {"g", "h", "b", "d", "e", "f"} + T2_tilde = {"x", "c"} + + sparams = _StateParameters( + m, m_rev, T1, None, T1_tilde, None, T2, None, T2_tilde, None + ) + + u = 5 + candidates = _find_candidates(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u]} + + u = 6 + candidates = _find_candidates(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u], self.mapped[2]} + + def test_covered_neighbors_with_labels(self): + G1 = nx.Graph() + G1.add_edges_from(self.G1_edges) + G1.add_node(0) + G2 = nx.relabel_nodes(G1, self.mapped) + + G1_degree = dict(G1.degree) + nx.set_node_attributes( + G1, + dict(zip(G1, it.cycle(labels_many))), + "label", + ) + nx.set_node_attributes( + G2, + dict( + zip( + [self.mapped[n] for n in G1], + it.cycle(labels_many), + ) + ), + "label", + ) + l1 = dict(G1.nodes(data="label", default=-1)) + l2 = dict(G2.nodes(data="label", default=-1)) + gparams = _GraphParameters( + G1, + G2, + l1, + l2, + nx.utils.groups(l1), + nx.utils.groups(l2), + nx.utils.groups(dict(G2.degree())), + ) + + m = {9: self.mapped[9], 1: self.mapped[1]} + m_rev = {self.mapped[9]: 9, self.mapped[1]: 1} + + T1 = {7, 8, 2, 4, 5, 6} + T1_tilde = {0, 3} + T2 = {"g", "h", "b", "d", "e", "f"} + T2_tilde = {"x", "c"} + + sparams = _StateParameters( + m, m_rev, T1, None, T1_tilde, None, T2, None, T2_tilde, None + ) + + u = 5 + candidates = _find_candidates(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u]} + + u = 6 + candidates = _find_candidates(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u]} + + # Assign to 2, the same label as 6 + G1.nodes[2]["label"] = G1.nodes[u]["label"] + G2.nodes[self.mapped[2]]["label"] = G1.nodes[u]["label"] + l1 = dict(G1.nodes(data="label", default=-1)) + l2 = dict(G2.nodes(data="label", default=-1)) + gparams = _GraphParameters( + G1, + G2, + l1, + l2, + nx.utils.groups(l1), + nx.utils.groups(l2), + nx.utils.groups(dict(G2.degree())), + ) + + candidates = _find_candidates(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u], self.mapped[2]} + + +class TestDiGraphCandidateSelection: + G1_edges = [ + (1, 2), + (1, 4), + (5, 1), + (2, 3), + (4, 2), + (3, 4), + (4, 5), + (1, 6), + (6, 7), + (6, 8), + (8, 9), + (7, 9), + ] + mapped = { + 0: "x", + 1: "a", + 2: "b", + 3: "c", + 4: "d", + 5: "e", + 6: "f", + 7: "g", + 8: "h", + 9: "i", + } + + def test_no_covered_neighbors_no_labels(self): + G1 = nx.DiGraph() + G1.add_edges_from(self.G1_edges) + G1.add_node(0) + G2 = nx.relabel_nodes(G1, self.mapped) + + G1_degree = { + n: (in_degree, out_degree) + for (n, in_degree), (_, out_degree) in zip(G1.in_degree, G1.out_degree) + } + + l1 = dict(G1.nodes(data="label", default=-1)) + l2 = dict(G2.nodes(data="label", default=-1)) + gparams = _GraphParameters( + G1, + G2, + l1, + l2, + nx.utils.groups(l1), + nx.utils.groups(l2), + nx.utils.groups( + { + node: (in_degree, out_degree) + for (node, in_degree), (_, out_degree) in zip( + G2.in_degree(), G2.out_degree() + ) + } + ), + ) + + m = {9: self.mapped[9], 1: self.mapped[1]} + m_rev = {self.mapped[9]: 9, self.mapped[1]: 1} + + T1_out = {2, 4, 6} + T1_in = {5, 7, 8} + T1_tilde = {0, 3} + T2_out = {"b", "d", "f"} + T2_in = {"e", "g", "h"} + T2_tilde = {"x", "c"} + + sparams = _StateParameters( + m, m_rev, T1_out, T1_in, T1_tilde, None, T2_out, T2_in, T2_tilde, None + ) + + u = 3 + candidates = _find_candidates_Di(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u]} + + u = 0 + candidates = _find_candidates_Di(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u]} + + m.pop(9) + m_rev.pop(self.mapped[9]) + + T1_out = {2, 4, 6} + T1_in = {5} + T1_tilde = {0, 3, 7, 8, 9} + T2_out = {"b", "d", "f"} + T2_in = {"e"} + T2_tilde = {"x", "c", "g", "h", "i"} + + sparams = _StateParameters( + m, m_rev, T1_out, T1_in, T1_tilde, None, T2_out, T2_in, T2_tilde, None + ) + + u = 7 + candidates = _find_candidates_Di(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u], self.mapped[8], self.mapped[3]} + + def test_no_covered_neighbors_with_labels(self): + G1 = nx.DiGraph() + G1.add_edges_from(self.G1_edges) + G1.add_node(0) + G2 = nx.relabel_nodes(G1, self.mapped) + + G1_degree = { + n: (in_degree, out_degree) + for (n, in_degree), (_, out_degree) in zip(G1.in_degree, G1.out_degree) + } + nx.set_node_attributes( + G1, + dict(zip(G1, it.cycle(labels_many))), + "label", + ) + nx.set_node_attributes( + G2, + dict( + zip( + [self.mapped[n] for n in G1], + it.cycle(labels_many), + ) + ), + "label", + ) + l1 = dict(G1.nodes(data="label", default=-1)) + l2 = dict(G2.nodes(data="label", default=-1)) + gparams = _GraphParameters( + G1, + G2, + l1, + l2, + nx.utils.groups(l1), + nx.utils.groups(l2), + nx.utils.groups( + { + node: (in_degree, out_degree) + for (node, in_degree), (_, out_degree) in zip( + G2.in_degree(), G2.out_degree() + ) + } + ), + ) + + m = {9: self.mapped[9], 1: self.mapped[1]} + m_rev = {self.mapped[9]: 9, self.mapped[1]: 1} + + T1_out = {2, 4, 6} + T1_in = {5, 7, 8} + T1_tilde = {0, 3} + T2_out = {"b", "d", "f"} + T2_in = {"e", "g", "h"} + T2_tilde = {"x", "c"} + + sparams = _StateParameters( + m, m_rev, T1_out, T1_in, T1_tilde, None, T2_out, T2_in, T2_tilde, None + ) + + u = 3 + candidates = _find_candidates_Di(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u]} + + u = 0 + candidates = _find_candidates_Di(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u]} + + # Change label of disconnected node + G1.nodes[u]["label"] = "blue" + l1 = dict(G1.nodes(data="label", default=-1)) + l2 = dict(G2.nodes(data="label", default=-1)) + gparams = _GraphParameters( + G1, + G2, + l1, + l2, + nx.utils.groups(l1), + nx.utils.groups(l2), + nx.utils.groups( + { + node: (in_degree, out_degree) + for (node, in_degree), (_, out_degree) in zip( + G2.in_degree(), G2.out_degree() + ) + } + ), + ) + + # No candidate + candidates = _find_candidates_Di(u, gparams, sparams, G1_degree) + assert candidates == set() + + m.pop(9) + m_rev.pop(self.mapped[9]) + + T1_out = {2, 4, 6} + T1_in = {5} + T1_tilde = {0, 3, 7, 8, 9} + T2_out = {"b", "d", "f"} + T2_in = {"e"} + T2_tilde = {"x", "c", "g", "h", "i"} + + sparams = _StateParameters( + m, m_rev, T1_out, T1_in, T1_tilde, None, T2_out, T2_in, T2_tilde, None + ) + + u = 7 + candidates = _find_candidates_Di(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u]} + + G1.nodes[8]["label"] = G1.nodes[7]["label"] + G2.nodes[self.mapped[8]]["label"] = G1.nodes[7]["label"] + l1 = dict(G1.nodes(data="label", default=-1)) + l2 = dict(G2.nodes(data="label", default=-1)) + gparams = _GraphParameters( + G1, + G2, + l1, + l2, + nx.utils.groups(l1), + nx.utils.groups(l2), + nx.utils.groups( + { + node: (in_degree, out_degree) + for (node, in_degree), (_, out_degree) in zip( + G2.in_degree(), G2.out_degree() + ) + } + ), + ) + + candidates = _find_candidates_Di(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u], self.mapped[8]} + + def test_covered_neighbors_no_labels(self): + G1 = nx.DiGraph() + G1.add_edges_from(self.G1_edges) + G1.add_node(0) + G2 = nx.relabel_nodes(G1, self.mapped) + + G1_degree = { + n: (in_degree, out_degree) + for (n, in_degree), (_, out_degree) in zip(G1.in_degree, G1.out_degree) + } + + l1 = dict(G1.nodes(data=None, default=-1)) + l2 = dict(G2.nodes(data=None, default=-1)) + gparams = _GraphParameters( + G1, + G2, + l1, + l2, + nx.utils.groups(l1), + nx.utils.groups(l2), + nx.utils.groups( + { + node: (in_degree, out_degree) + for (node, in_degree), (_, out_degree) in zip( + G2.in_degree(), G2.out_degree() + ) + } + ), + ) + + m = {9: self.mapped[9], 1: self.mapped[1]} + m_rev = {self.mapped[9]: 9, self.mapped[1]: 1} + + T1_out = {2, 4, 6} + T1_in = {5, 7, 8} + T1_tilde = {0, 3} + T2_out = {"b", "d", "f"} + T2_in = {"e", "g", "h"} + T2_tilde = {"x", "c"} + + sparams = _StateParameters( + m, m_rev, T1_out, T1_in, T1_tilde, None, T2_out, T2_in, T2_tilde, None + ) + + u = 5 + candidates = _find_candidates_Di(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u]} + + u = 6 + candidates = _find_candidates_Di(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u]} + + # Change the direction of an edge to make the degree orientation same as first candidate of u. + G1.remove_edge(4, 2) + G1.add_edge(2, 4) + G2.remove_edge("d", "b") + G2.add_edge("b", "d") + + gparams = _GraphParameters( + G1, + G2, + l1, + l2, + nx.utils.groups(l1), + nx.utils.groups(l2), + nx.utils.groups( + { + node: (in_degree, out_degree) + for (node, in_degree), (_, out_degree) in zip( + G2.in_degree(), G2.out_degree() + ) + } + ), + ) + + candidates = _find_candidates_Di(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u], self.mapped[2]} + + def test_covered_neighbors_with_labels(self): + G1 = nx.DiGraph() + G1.add_edges_from(self.G1_edges) + G1.add_node(0) + G2 = nx.relabel_nodes(G1, self.mapped) + + G1.remove_edge(4, 2) + G1.add_edge(2, 4) + G2.remove_edge("d", "b") + G2.add_edge("b", "d") + + G1_degree = { + n: (in_degree, out_degree) + for (n, in_degree), (_, out_degree) in zip(G1.in_degree, G1.out_degree) + } + + nx.set_node_attributes( + G1, + dict(zip(G1, it.cycle(labels_many))), + "label", + ) + nx.set_node_attributes( + G2, + dict( + zip( + [self.mapped[n] for n in G1], + it.cycle(labels_many), + ) + ), + "label", + ) + l1 = dict(G1.nodes(data="label", default=-1)) + l2 = dict(G2.nodes(data="label", default=-1)) + gparams = _GraphParameters( + G1, + G2, + l1, + l2, + nx.utils.groups(l1), + nx.utils.groups(l2), + nx.utils.groups( + { + node: (in_degree, out_degree) + for (node, in_degree), (_, out_degree) in zip( + G2.in_degree(), G2.out_degree() + ) + } + ), + ) + + m = {9: self.mapped[9], 1: self.mapped[1]} + m_rev = {self.mapped[9]: 9, self.mapped[1]: 1} + + T1_out = {2, 4, 6} + T1_in = {5, 7, 8} + T1_tilde = {0, 3} + T2_out = {"b", "d", "f"} + T2_in = {"e", "g", "h"} + T2_tilde = {"x", "c"} + + sparams = _StateParameters( + m, m_rev, T1_out, T1_in, T1_tilde, None, T2_out, T2_in, T2_tilde, None + ) + + u = 5 + candidates = _find_candidates_Di(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u]} + + u = 6 + candidates = _find_candidates_Di(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u]} + + # Assign to 2, the same label as 6 + G1.nodes[2]["label"] = G1.nodes[u]["label"] + G2.nodes[self.mapped[2]]["label"] = G1.nodes[u]["label"] + l1 = dict(G1.nodes(data="label", default=-1)) + l2 = dict(G2.nodes(data="label", default=-1)) + gparams = _GraphParameters( + G1, + G2, + l1, + l2, + nx.utils.groups(l1), + nx.utils.groups(l2), + nx.utils.groups( + { + node: (in_degree, out_degree) + for (node, in_degree), (_, out_degree) in zip( + G2.in_degree(), G2.out_degree() + ) + } + ), + ) + + candidates = _find_candidates_Di(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u], self.mapped[2]} + + # Change the direction of an edge to make the degree orientation same as first candidate of u. + G1.remove_edge(2, 4) + G1.add_edge(4, 2) + G2.remove_edge("b", "d") + G2.add_edge("d", "b") + + gparams = _GraphParameters( + G1, + G2, + l1, + l2, + nx.utils.groups(l1), + nx.utils.groups(l2), + nx.utils.groups( + { + node: (in_degree, out_degree) + for (node, in_degree), (_, out_degree) in zip( + G2.in_degree(), G2.out_degree() + ) + } + ), + ) + + candidates = _find_candidates_Di(u, gparams, sparams, G1_degree) + assert candidates == {self.mapped[u]} + + def test_same_in_out_degrees_no_candidate(self): + g1 = nx.DiGraph([(4, 1), (4, 2), (3, 4), (5, 4), (6, 4)]) + g2 = nx.DiGraph([(1, 4), (2, 4), (3, 4), (4, 5), (4, 6)]) + + l1 = dict(g1.nodes(data=None, default=-1)) + l2 = dict(g2.nodes(data=None, default=-1)) + gparams = _GraphParameters( + g1, + g2, + l1, + l2, + nx.utils.groups(l1), + nx.utils.groups(l2), + nx.utils.groups( + { + node: (in_degree, out_degree) + for (node, in_degree), (_, out_degree) in zip( + g2.in_degree(), g2.out_degree() + ) + } + ), + ) + + g1_degree = { + n: (in_degree, out_degree) + for (n, in_degree), (_, out_degree) in zip(g1.in_degree, g1.out_degree) + } + + m = {1: 1, 2: 2, 3: 3} + m_rev = m.copy() + + T1_out = {4} + T1_in = {4} + T1_tilde = {5, 6} + T2_out = {4} + T2_in = {4} + T2_tilde = {5, 6} + + sparams = _StateParameters( + m, m_rev, T1_out, T1_in, T1_tilde, None, T2_out, T2_in, T2_tilde, None + ) + + u = 4 + # despite the same in and out degree, there's no candidate for u=4 + candidates = _find_candidates_Di(u, gparams, sparams, g1_degree) + assert candidates == set() + # Notice how the regular candidate selection method returns wrong result. + assert _find_candidates(u, gparams, sparams, g1_degree) == {4} + + +class TestGraphISOFeasibility: + def test_const_covered_neighbors(self): + G1 = nx.Graph([(0, 1), (1, 2), (3, 0), (3, 2)]) + G2 = nx.Graph([("a", "b"), ("b", "c"), ("k", "a"), ("k", "c")]) + gparams = _GraphParameters(G1, G2, None, None, None, None, None) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c"}, + {"a": 0, "b": 1, "c": 2}, + None, + None, + None, + None, + None, + None, + None, + None, + ) + u, v = 3, "k" + assert _consistent_PT(u, v, gparams, sparams) + + def test_const_no_covered_neighbors(self): + G1 = nx.Graph([(0, 1), (1, 2), (3, 4), (3, 5)]) + G2 = nx.Graph([("a", "b"), ("b", "c"), ("k", "w"), ("k", "z")]) + gparams = _GraphParameters(G1, G2, None, None, None, None, None) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c"}, + {"a": 0, "b": 1, "c": 2}, + None, + None, + None, + None, + None, + None, + None, + None, + ) + u, v = 3, "k" + assert _consistent_PT(u, v, gparams, sparams) + + def test_const_mixed_covered_uncovered_neighbors(self): + G1 = nx.Graph([(0, 1), (1, 2), (3, 0), (3, 2), (3, 4), (3, 5)]) + G2 = nx.Graph( + [("a", "b"), ("b", "c"), ("k", "a"), ("k", "c"), ("k", "w"), ("k", "z")] + ) + gparams = _GraphParameters(G1, G2, None, None, None, None, None) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c"}, + {"a": 0, "b": 1, "c": 2}, + None, + None, + None, + None, + None, + None, + None, + None, + ) + u, v = 3, "k" + assert _consistent_PT(u, v, gparams, sparams) + + def test_const_fail_cases(self): + G1 = nx.Graph( + [ + (0, 1), + (1, 2), + (10, 0), + (10, 3), + (10, 4), + (10, 5), + (10, 6), + (4, 1), + (5, 3), + ] + ) + G2 = nx.Graph( + [ + ("a", "b"), + ("b", "c"), + ("k", "a"), + ("k", "d"), + ("k", "e"), + ("k", "f"), + ("k", "g"), + ("e", "b"), + ("f", "d"), + ] + ) + gparams = _GraphParameters(G1, G2, None, None, None, None, None) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c", 3: "d"}, + {"a": 0, "b": 1, "c": 2, "d": 3}, + None, + None, + None, + None, + None, + None, + None, + None, + ) + u, v = 10, "k" + assert _consistent_PT(u, v, gparams, sparams) + + # Delete one uncovered neighbor of u. Notice how it still passes the test. + # Two reasons for this: + # 1. If u, v had different degrees from the beginning, they wouldn't + # be selected as candidates in the first place. + # 2. Even if they are selected, consistency is basically 1-look-ahead, + # meaning that we take into consideration the relation of the + # candidates with their mapped neighbors. The node we deleted is + # not a covered neighbor. + # Such nodes will be checked by the cut_PT function, which is + # basically the 2-look-ahead, checking the relation of the + # candidates with T1, T2 (in which belongs the node we just deleted). + G1.remove_node(6) + assert _consistent_PT(u, v, gparams, sparams) + + # Add one more covered neighbor of u in G1 + G1.add_edge(u, 2) + assert not _consistent_PT(u, v, gparams, sparams) + + # Compensate in G2 + G2.add_edge(v, "c") + assert _consistent_PT(u, v, gparams, sparams) + + # Add one more covered neighbor of v in G2 + G2.add_edge(v, "x") + G1.add_node(7) + sparams.mapping.update({7: "x"}) + sparams.reverse_mapping.update({"x": 7}) + assert not _consistent_PT(u, v, gparams, sparams) + + # Compendate in G1 + G1.add_edge(u, 7) + assert _consistent_PT(u, v, gparams, sparams) + + @pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph)) + def test_cut_inconsistent_labels(self, graph_type): + G1 = graph_type( + [ + (0, 1), + (1, 2), + (10, 0), + (10, 3), + (10, 4), + (10, 5), + (10, 6), + (4, 1), + (5, 3), + ] + ) + G2 = graph_type( + [ + ("a", "b"), + ("b", "c"), + ("k", "a"), + ("k", "d"), + ("k", "e"), + ("k", "f"), + ("k", "g"), + ("e", "b"), + ("f", "d"), + ] + ) + + l1 = {n: "blue" for n in G1.nodes()} + l2 = {n: "blue" for n in G2.nodes()} + l1.update({6: "green"}) # Change the label of one neighbor of u + + gparams = _GraphParameters( + G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None + ) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c", 3: "d"}, + {"a": 0, "b": 1, "c": 2, "d": 3}, + None, + None, + None, + None, + None, + None, + None, + None, + ) + + u, v = 10, "k" + assert _cut_PT(u, v, gparams, sparams) + + def test_cut_consistent_labels(self): + G1 = nx.Graph( + [ + (0, 1), + (1, 2), + (10, 0), + (10, 3), + (10, 4), + (10, 5), + (10, 6), + (4, 1), + (5, 3), + ] + ) + G2 = nx.Graph( + [ + ("a", "b"), + ("b", "c"), + ("k", "a"), + ("k", "d"), + ("k", "e"), + ("k", "f"), + ("k", "g"), + ("e", "b"), + ("f", "d"), + ] + ) + + l1 = {n: "blue" for n in G1.nodes()} + l2 = {n: "blue" for n in G2.nodes()} + + gparams = _GraphParameters( + G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None + ) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c", 3: "d"}, + {"a": 0, "b": 1, "c": 2, "d": 3}, + {4, 5}, + None, + {6}, + None, + {"e", "f"}, + None, + {"g"}, + None, + ) + + u, v = 10, "k" + assert not _cut_PT(u, v, gparams, sparams) + + def test_cut_same_labels(self): + G1 = nx.Graph( + [ + (0, 1), + (1, 2), + (10, 0), + (10, 3), + (10, 4), + (10, 5), + (10, 6), + (4, 1), + (5, 3), + ] + ) + mapped = {0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f", 6: "g", 10: "k"} + G2 = nx.relabel_nodes(G1, mapped) + l1 = {n: "blue" for n in G1.nodes()} + l2 = {n: "blue" for n in G2.nodes()} + + gparams = _GraphParameters( + G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None + ) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c", 3: "d"}, + {"a": 0, "b": 1, "c": 2, "d": 3}, + {4, 5}, + None, + {6}, + None, + {"e", "f"}, + None, + {"g"}, + None, + ) + + u, v = 10, "k" + assert not _cut_PT(u, v, gparams, sparams) + + # Change intersection between G1[u] and T1, so it's not the same as the one between G2[v] and T2 + G1.remove_edge(u, 4) + assert _cut_PT(u, v, gparams, sparams) + + # Compensate in G2 + G2.remove_edge(v, mapped[4]) + assert not _cut_PT(u, v, gparams, sparams) + + # Change intersection between G2[v] and T2_tilde, so it's not the same as the one between G1[u] and T1_tilde + G2.remove_edge(v, mapped[6]) + assert _cut_PT(u, v, gparams, sparams) + + # Compensate in G1 + G1.remove_edge(u, 6) + assert not _cut_PT(u, v, gparams, sparams) + + # Add disconnected nodes, which will form the new Ti_out + G1.add_nodes_from([6, 7, 8]) + G2.add_nodes_from(["g", "y", "z"]) + sparams.T1_tilde.update({6, 7, 8}) + sparams.T2_tilde.update({"g", "y", "z"}) + + l1 = {n: "blue" for n in G1.nodes()} + l2 = {n: "blue" for n in G2.nodes()} + gparams = _GraphParameters( + G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None + ) + + assert not _cut_PT(u, v, gparams, sparams) + + # Add some new nodes to the mapping + sparams.mapping.update({6: "g", 7: "y"}) + sparams.reverse_mapping.update({"g": 6, "y": 7}) + + # Add more nodes to T1, T2. + G1.add_edges_from([(6, 20), (7, 20), (6, 21)]) + G2.add_edges_from([("g", "i"), ("g", "j"), ("y", "j")]) + + sparams.mapping.update({20: "j", 21: "i"}) + sparams.reverse_mapping.update({"j": 20, "i": 21}) + sparams.T1.update({20, 21}) + sparams.T2.update({"i", "j"}) + sparams.T1_tilde.difference_update({6, 7}) + sparams.T2_tilde.difference_update({"g", "y"}) + + assert not _cut_PT(u, v, gparams, sparams) + + # Add nodes from the new T1 and T2, as neighbors of u and v respectively + G1.add_edges_from([(u, 20), (u, 21)]) + G2.add_edges_from([(v, "i"), (v, "j")]) + l1 = {n: "blue" for n in G1.nodes()} + l2 = {n: "blue" for n in G2.nodes()} + gparams = _GraphParameters( + G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None + ) + + assert not _cut_PT(u, v, gparams, sparams) + + # Change the edges, maintaining the G1[u]-T1 intersection + G1.remove_edge(u, 20) + G1.add_edge(u, 4) + assert not _cut_PT(u, v, gparams, sparams) + + # Connect u to 8 which is still in T1_tilde + G1.add_edge(u, 8) + assert _cut_PT(u, v, gparams, sparams) + + # Same for v and z, so that inters(G1[u], T1out) == inters(G2[v], T2out) + G2.add_edge(v, "z") + assert not _cut_PT(u, v, gparams, sparams) + + def test_cut_different_labels(self): + G1 = nx.Graph( + [ + (0, 1), + (1, 2), + (1, 14), + (0, 4), + (1, 5), + (2, 6), + (3, 7), + (3, 6), + (4, 10), + (4, 9), + (6, 10), + (20, 9), + (20, 15), + (20, 12), + (20, 11), + (12, 13), + (11, 13), + (20, 8), + (20, 3), + (20, 5), + (20, 0), + ] + ) + mapped = { + 0: "a", + 1: "b", + 2: "c", + 3: "d", + 4: "e", + 5: "f", + 6: "g", + 7: "h", + 8: "i", + 9: "j", + 10: "k", + 11: "l", + 12: "m", + 13: "n", + 14: "o", + 15: "p", + 20: "x", + } + G2 = nx.relabel_nodes(G1, mapped) + + l1 = {n: "none" for n in G1.nodes()} + l2 = {} + + l1.update( + { + 9: "blue", + 15: "blue", + 12: "blue", + 11: "green", + 3: "green", + 8: "red", + 0: "red", + 5: "yellow", + } + ) + l2.update({mapped[n]: l for n, l in l1.items()}) + + gparams = _GraphParameters( + G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None + ) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c", 3: "d"}, + {"a": 0, "b": 1, "c": 2, "d": 3}, + {4, 5, 6, 7, 14}, + None, + {9, 10, 15, 12, 11, 13, 8}, + None, + {"e", "f", "g", "h", "o"}, + None, + {"j", "k", "l", "m", "n", "i", "p"}, + None, + ) + + u, v = 20, "x" + assert not _cut_PT(u, v, gparams, sparams) + + # Change the orientation of the labels on neighbors of u compared to neighbors of v. Leave the structure intact + l1.update({9: "red"}) + assert _cut_PT(u, v, gparams, sparams) + + # compensate in G2 + l2.update({mapped[9]: "red"}) + assert not _cut_PT(u, v, gparams, sparams) + + # Change the intersection of G1[u] and T1 + G1.add_edge(u, 4) + assert _cut_PT(u, v, gparams, sparams) + + # Same for G2[v] and T2 + G2.add_edge(v, mapped[4]) + assert not _cut_PT(u, v, gparams, sparams) + + # Change the intersection of G2[v] and T2_tilde + G2.remove_edge(v, mapped[8]) + assert _cut_PT(u, v, gparams, sparams) + + # Same for G1[u] and T1_tilde + G1.remove_edge(u, 8) + assert not _cut_PT(u, v, gparams, sparams) + + # Place 8 and mapped[8] in T1 and T2 respectively, by connecting it to covered nodes + G1.add_edge(8, 3) + G2.add_edge(mapped[8], mapped[3]) + sparams.T1.add(8) + sparams.T2.add(mapped[8]) + sparams.T1_tilde.remove(8) + sparams.T2_tilde.remove(mapped[8]) + + assert not _cut_PT(u, v, gparams, sparams) + + # Remove neighbor of u from T1 + G1.remove_node(5) + l1.pop(5) + sparams.T1.remove(5) + assert _cut_PT(u, v, gparams, sparams) + + # Same in G2 + G2.remove_node(mapped[5]) + l2.pop(mapped[5]) + sparams.T2.remove(mapped[5]) + assert not _cut_PT(u, v, gparams, sparams) + + def test_feasibility_same_labels(self): + G1 = nx.Graph( + [ + (0, 1), + (1, 2), + (1, 14), + (0, 4), + (1, 5), + (2, 6), + (3, 7), + (3, 6), + (4, 10), + (4, 9), + (6, 10), + (20, 9), + (20, 15), + (20, 12), + (20, 11), + (12, 13), + (11, 13), + (20, 8), + (20, 2), + (20, 5), + (20, 0), + ] + ) + mapped = { + 0: "a", + 1: "b", + 2: "c", + 3: "d", + 4: "e", + 5: "f", + 6: "g", + 7: "h", + 8: "i", + 9: "j", + 10: "k", + 11: "l", + 12: "m", + 13: "n", + 14: "o", + 15: "p", + 20: "x", + } + G2 = nx.relabel_nodes(G1, mapped) + + l1 = {n: "blue" for n in G1.nodes()} + l2 = {mapped[n]: "blue" for n in G1.nodes()} + + gparams = _GraphParameters( + G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None + ) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c", 3: "d"}, + {"a": 0, "b": 1, "c": 2, "d": 3}, + {4, 5, 6, 7, 14}, + None, + {9, 10, 15, 12, 11, 13, 8}, + None, + {"e", "f", "g", "h", "o"}, + None, + {"j", "k", "l", "m", "n", "i", "p"}, + None, + ) + + u, v = 20, "x" + assert not _cut_PT(u, v, gparams, sparams) + + # Change structure in G2 such that, ONLY consistency is harmed + G2.remove_edge(mapped[20], mapped[2]) + G2.add_edge(mapped[20], mapped[3]) + + # Consistency check fails, while the cutting rules are satisfied! + assert not _cut_PT(u, v, gparams, sparams) + assert not _consistent_PT(u, v, gparams, sparams) + + # Compensate in G1 and make it consistent + G1.remove_edge(20, 2) + G1.add_edge(20, 3) + assert not _cut_PT(u, v, gparams, sparams) + assert _consistent_PT(u, v, gparams, sparams) + + # ONLY fail the cutting check + G2.add_edge(v, mapped[10]) + assert _cut_PT(u, v, gparams, sparams) + assert _consistent_PT(u, v, gparams, sparams) + + def test_feasibility_different_labels(self): + G1 = nx.Graph( + [ + (0, 1), + (1, 2), + (1, 14), + (0, 4), + (1, 5), + (2, 6), + (3, 7), + (3, 6), + (4, 10), + (4, 9), + (6, 10), + (20, 9), + (20, 15), + (20, 12), + (20, 11), + (12, 13), + (11, 13), + (20, 8), + (20, 2), + (20, 5), + (20, 0), + ] + ) + mapped = { + 0: "a", + 1: "b", + 2: "c", + 3: "d", + 4: "e", + 5: "f", + 6: "g", + 7: "h", + 8: "i", + 9: "j", + 10: "k", + 11: "l", + 12: "m", + 13: "n", + 14: "o", + 15: "p", + 20: "x", + } + G2 = nx.relabel_nodes(G1, mapped) + + l1 = {n: "none" for n in G1.nodes()} + l2 = {} + + l1.update( + { + 9: "blue", + 15: "blue", + 12: "blue", + 11: "green", + 2: "green", + 8: "red", + 0: "red", + 5: "yellow", + } + ) + l2.update({mapped[n]: l for n, l in l1.items()}) + + gparams = _GraphParameters( + G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None + ) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c", 3: "d"}, + {"a": 0, "b": 1, "c": 2, "d": 3}, + {4, 5, 6, 7, 14}, + None, + {9, 10, 15, 12, 11, 13, 8}, + None, + {"e", "f", "g", "h", "o"}, + None, + {"j", "k", "l", "m", "n", "i", "p"}, + None, + ) + + u, v = 20, "x" + assert not _cut_PT(u, v, gparams, sparams) + + # Change structure in G2 such that, ONLY consistency is harmed + G2.remove_edge(mapped[20], mapped[2]) + G2.add_edge(mapped[20], mapped[3]) + l2.update({mapped[3]: "green"}) + + # Consistency check fails, while the cutting rules are satisfied! + assert not _cut_PT(u, v, gparams, sparams) + assert not _consistent_PT(u, v, gparams, sparams) + + # Compensate in G1 and make it consistent + G1.remove_edge(20, 2) + G1.add_edge(20, 3) + l1.update({3: "green"}) + assert not _cut_PT(u, v, gparams, sparams) + assert _consistent_PT(u, v, gparams, sparams) + + # ONLY fail the cutting check + l1.update({5: "red"}) + assert _cut_PT(u, v, gparams, sparams) + assert _consistent_PT(u, v, gparams, sparams) + + +class TestMultiGraphISOFeasibility: + def test_const_covered_neighbors(self): + G1 = nx.MultiGraph( + [(0, 1), (0, 1), (1, 2), (3, 0), (3, 0), (3, 0), (3, 2), (3, 2)] + ) + G2 = nx.MultiGraph( + [ + ("a", "b"), + ("a", "b"), + ("b", "c"), + ("k", "a"), + ("k", "a"), + ("k", "a"), + ("k", "c"), + ("k", "c"), + ] + ) + gparams = _GraphParameters(G1, G2, None, None, None, None, None) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c"}, + {"a": 0, "b": 1, "c": 2}, + None, + None, + None, + None, + None, + None, + None, + None, + ) + u, v = 3, "k" + assert _consistent_PT(u, v, gparams, sparams) + + def test_const_no_covered_neighbors(self): + G1 = nx.MultiGraph([(0, 1), (0, 1), (1, 2), (3, 4), (3, 4), (3, 5)]) + G2 = nx.MultiGraph([("a", "b"), ("b", "c"), ("k", "w"), ("k", "w"), ("k", "z")]) + gparams = _GraphParameters(G1, G2, None, None, None, None, None) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c"}, + {"a": 0, "b": 1, "c": 2}, + None, + None, + None, + None, + None, + None, + None, + None, + ) + u, v = 3, "k" + assert _consistent_PT(u, v, gparams, sparams) + + def test_const_mixed_covered_uncovered_neighbors(self): + G1 = nx.MultiGraph( + [(0, 1), (1, 2), (3, 0), (3, 0), (3, 0), (3, 2), (3, 2), (3, 4), (3, 5)] + ) + G2 = nx.MultiGraph( + [ + ("a", "b"), + ("b", "c"), + ("k", "a"), + ("k", "a"), + ("k", "a"), + ("k", "c"), + ("k", "c"), + ("k", "w"), + ("k", "z"), + ] + ) + gparams = _GraphParameters(G1, G2, None, None, None, None, None) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c"}, + {"a": 0, "b": 1, "c": 2}, + None, + None, + None, + None, + None, + None, + None, + None, + ) + u, v = 3, "k" + assert _consistent_PT(u, v, gparams, sparams) + + def test_const_fail_cases(self): + G1 = nx.MultiGraph( + [ + (0, 1), + (1, 2), + (10, 0), + (10, 0), + (10, 0), + (10, 3), + (10, 3), + (10, 4), + (10, 5), + (10, 6), + (10, 6), + (4, 1), + (5, 3), + ] + ) + mapped = {0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f", 6: "g", 10: "k"} + G2 = nx.relabel_nodes(G1, mapped) + + gparams = _GraphParameters(G1, G2, None, None, None, None, None) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c", 3: "d"}, + {"a": 0, "b": 1, "c": 2, "d": 3}, + None, + None, + None, + None, + None, + None, + None, + None, + ) + u, v = 10, "k" + assert _consistent_PT(u, v, gparams, sparams) + + # Delete one uncovered neighbor of u. Notice how it still passes the test. Two reasons for this: + # 1. If u, v had different degrees from the beginning, they wouldn't be selected as candidates in the first + # place. + # 2. Even if they are selected, consistency is basically 1-look-ahead, meaning that we take into consideration + # the relation of the candidates with their mapped neighbors. The node we deleted is not a covered neighbor. + # Such nodes will be checked by the cut_PT function, which is basically the 2-look-ahead, checking the + # relation of the candidates with T1, T2 (in which belongs the node we just deleted). + G1.remove_node(6) + assert _consistent_PT(u, v, gparams, sparams) + + # Add one more covered neighbor of u in G1 + G1.add_edge(u, 2) + assert not _consistent_PT(u, v, gparams, sparams) + + # Compensate in G2 + G2.add_edge(v, "c") + assert _consistent_PT(u, v, gparams, sparams) + + # Add one more covered neighbor of v in G2 + G2.add_edge(v, "x") + G1.add_node(7) + sparams.mapping.update({7: "x"}) + sparams.reverse_mapping.update({"x": 7}) + assert not _consistent_PT(u, v, gparams, sparams) + + # Compendate in G1 + G1.add_edge(u, 7) + assert _consistent_PT(u, v, gparams, sparams) + + # Delete an edge between u and a covered neighbor + G1.remove_edges_from([(u, 0), (u, 0)]) + assert not _consistent_PT(u, v, gparams, sparams) + + # Compensate in G2 + G2.remove_edges_from([(v, mapped[0]), (v, mapped[0])]) + assert _consistent_PT(u, v, gparams, sparams) + + # Remove an edge between v and a covered neighbor + G2.remove_edge(v, mapped[3]) + assert not _consistent_PT(u, v, gparams, sparams) + + # Compensate in G1 + G1.remove_edge(u, 3) + assert _consistent_PT(u, v, gparams, sparams) + + def test_cut_same_labels(self): + G1 = nx.MultiGraph( + [ + (0, 1), + (1, 2), + (10, 0), + (10, 0), + (10, 0), + (10, 3), + (10, 3), + (10, 4), + (10, 4), + (10, 5), + (10, 5), + (10, 5), + (10, 5), + (10, 6), + (10, 6), + (4, 1), + (5, 3), + ] + ) + mapped = {0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f", 6: "g", 10: "k"} + G2 = nx.relabel_nodes(G1, mapped) + l1 = {n: "blue" for n in G1.nodes()} + l2 = {n: "blue" for n in G2.nodes()} + + gparams = _GraphParameters( + G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None + ) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c", 3: "d"}, + {"a": 0, "b": 1, "c": 2, "d": 3}, + {4, 5}, + None, + {6}, + None, + {"e", "f"}, + None, + {"g"}, + None, + ) + + u, v = 10, "k" + assert not _cut_PT(u, v, gparams, sparams) + + # Remove one of the multiple edges between u and a neighbor + G1.remove_edge(u, 4) + assert _cut_PT(u, v, gparams, sparams) + + # Compensate in G2 + G1.remove_edge(u, 4) + G2.remove_edges_from([(v, mapped[4]), (v, mapped[4])]) + assert not _cut_PT(u, v, gparams, sparams) + + # Change intersection between G2[v] and T2_tilde, so it's not the same as the one between G1[u] and T1_tilde + G2.remove_edge(v, mapped[6]) + assert _cut_PT(u, v, gparams, sparams) + + # Compensate in G1 + G1.remove_edge(u, 6) + assert not _cut_PT(u, v, gparams, sparams) + + # Add more edges between u and neighbor which belongs in T1_tilde + G1.add_edges_from([(u, 5), (u, 5), (u, 5)]) + assert _cut_PT(u, v, gparams, sparams) + + # Compensate in G2 + G2.add_edges_from([(v, mapped[5]), (v, mapped[5]), (v, mapped[5])]) + assert not _cut_PT(u, v, gparams, sparams) + + # Add disconnected nodes, which will form the new Ti_out + G1.add_nodes_from([6, 7, 8]) + G2.add_nodes_from(["g", "y", "z"]) + G1.add_edges_from([(u, 6), (u, 6), (u, 6), (u, 8)]) + G2.add_edges_from([(v, "g"), (v, "g"), (v, "g"), (v, "z")]) + + sparams.T1_tilde.update({6, 7, 8}) + sparams.T2_tilde.update({"g", "y", "z"}) + + l1 = {n: "blue" for n in G1.nodes()} + l2 = {n: "blue" for n in G2.nodes()} + gparams = _GraphParameters( + G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None + ) + + assert not _cut_PT(u, v, gparams, sparams) + + # Add some new nodes to the mapping + sparams.mapping.update({6: "g", 7: "y"}) + sparams.reverse_mapping.update({"g": 6, "y": 7}) + + # Add more nodes to T1, T2. + G1.add_edges_from([(6, 20), (7, 20), (6, 21)]) + G2.add_edges_from([("g", "i"), ("g", "j"), ("y", "j")]) + + sparams.T1.update({20, 21}) + sparams.T2.update({"i", "j"}) + sparams.T1_tilde.difference_update({6, 7}) + sparams.T2_tilde.difference_update({"g", "y"}) + + assert not _cut_PT(u, v, gparams, sparams) + + # Remove some edges + G2.remove_edge(v, "g") + assert _cut_PT(u, v, gparams, sparams) + + G1.remove_edge(u, 6) + G1.add_edge(u, 8) + G2.add_edge(v, "z") + assert not _cut_PT(u, v, gparams, sparams) + + # Add nodes from the new T1 and T2, as neighbors of u and v respectively + G1.add_edges_from([(u, 20), (u, 20), (u, 20), (u, 21)]) + G2.add_edges_from([(v, "i"), (v, "i"), (v, "i"), (v, "j")]) + l1 = {n: "blue" for n in G1.nodes()} + l2 = {n: "blue" for n in G2.nodes()} + gparams = _GraphParameters( + G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None + ) + + assert not _cut_PT(u, v, gparams, sparams) + + # Change the edges + G1.remove_edge(u, 20) + G1.add_edge(u, 4) + assert _cut_PT(u, v, gparams, sparams) + + G2.remove_edge(v, "i") + G2.add_edge(v, mapped[4]) + assert not _cut_PT(u, v, gparams, sparams) + + def test_cut_different_labels(self): + G1 = nx.MultiGraph( + [ + (0, 1), + (0, 1), + (1, 2), + (1, 2), + (1, 14), + (0, 4), + (1, 5), + (2, 6), + (3, 7), + (3, 6), + (4, 10), + (4, 9), + (6, 10), + (20, 9), + (20, 9), + (20, 9), + (20, 15), + (20, 15), + (20, 12), + (20, 11), + (20, 11), + (20, 11), + (12, 13), + (11, 13), + (20, 8), + (20, 8), + (20, 3), + (20, 3), + (20, 5), + (20, 5), + (20, 5), + (20, 0), + (20, 0), + (20, 0), + ] + ) + mapped = { + 0: "a", + 1: "b", + 2: "c", + 3: "d", + 4: "e", + 5: "f", + 6: "g", + 7: "h", + 8: "i", + 9: "j", + 10: "k", + 11: "l", + 12: "m", + 13: "n", + 14: "o", + 15: "p", + 20: "x", + } + G2 = nx.relabel_nodes(G1, mapped) + + l1 = {n: "none" for n in G1.nodes()} + l2 = {} + + l1.update( + { + 9: "blue", + 15: "blue", + 12: "blue", + 11: "green", + 3: "green", + 8: "red", + 0: "red", + 5: "yellow", + } + ) + l2.update({mapped[n]: l for n, l in l1.items()}) + + gparams = _GraphParameters( + G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None + ) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c", 3: "d"}, + {"a": 0, "b": 1, "c": 2, "d": 3}, + {4, 5, 6, 7, 14}, + None, + {9, 10, 15, 12, 11, 13, 8}, + None, + {"e", "f", "g", "h", "o"}, + None, + {"j", "k", "l", "m", "n", "i", "p"}, + None, + ) + + u, v = 20, "x" + assert not _cut_PT(u, v, gparams, sparams) + + # Change the orientation of the labels on neighbors of u compared to neighbors of v. Leave the structure intact + l1.update({9: "red"}) + assert _cut_PT(u, v, gparams, sparams) + + # compensate in G2 + l2.update({mapped[9]: "red"}) + assert not _cut_PT(u, v, gparams, sparams) + + # Change the intersection of G1[u] and T1 + G1.add_edge(u, 4) + assert _cut_PT(u, v, gparams, sparams) + + # Same for G2[v] and T2 + G2.add_edge(v, mapped[4]) + assert not _cut_PT(u, v, gparams, sparams) + + # Delete one from the multiple edges + G2.remove_edge(v, mapped[8]) + assert _cut_PT(u, v, gparams, sparams) + + # Same for G1[u] and T1_tilde + G1.remove_edge(u, 8) + assert not _cut_PT(u, v, gparams, sparams) + + # Place 8 and mapped[8] in T1 and T2 respectively, by connecting it to covered nodes + G1.add_edges_from([(8, 3), (8, 3), (8, u)]) + G2.add_edges_from([(mapped[8], mapped[3]), (mapped[8], mapped[3])]) + sparams.T1.add(8) + sparams.T2.add(mapped[8]) + sparams.T1_tilde.remove(8) + sparams.T2_tilde.remove(mapped[8]) + + assert _cut_PT(u, v, gparams, sparams) + + # Fix uneven edges + G1.remove_edge(8, u) + assert not _cut_PT(u, v, gparams, sparams) + + # Remove neighbor of u from T1 + G1.remove_node(5) + l1.pop(5) + sparams.T1.remove(5) + assert _cut_PT(u, v, gparams, sparams) + + # Same in G2 + G2.remove_node(mapped[5]) + l2.pop(mapped[5]) + sparams.T2.remove(mapped[5]) + assert not _cut_PT(u, v, gparams, sparams) + + def test_feasibility_same_labels(self): + G1 = nx.MultiGraph( + [ + (0, 1), + (0, 1), + (1, 2), + (1, 2), + (1, 14), + (0, 4), + (1, 5), + (2, 6), + (3, 7), + (3, 6), + (4, 10), + (4, 9), + (6, 10), + (20, 9), + (20, 9), + (20, 9), + (20, 15), + (20, 15), + (20, 12), + (20, 11), + (20, 11), + (20, 11), + (12, 13), + (11, 13), + (20, 8), + (20, 8), + (20, 3), + (20, 3), + (20, 5), + (20, 5), + (20, 5), + (20, 0), + (20, 0), + (20, 0), + ] + ) + mapped = { + 0: "a", + 1: "b", + 2: "c", + 3: "d", + 4: "e", + 5: "f", + 6: "g", + 7: "h", + 8: "i", + 9: "j", + 10: "k", + 11: "l", + 12: "m", + 13: "n", + 14: "o", + 15: "p", + 20: "x", + } + G2 = nx.relabel_nodes(G1, mapped) + l1 = {n: "blue" for n in G1.nodes()} + l2 = {mapped[n]: "blue" for n in G1.nodes()} + + gparams = _GraphParameters( + G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None + ) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c", 3: "d"}, + {"a": 0, "b": 1, "c": 2, "d": 3}, + {4, 5, 6, 7, 14}, + None, + {9, 10, 15, 12, 11, 13, 8}, + None, + {"e", "f", "g", "h", "o"}, + None, + {"j", "k", "l", "m", "n", "i", "p"}, + None, + ) + + u, v = 20, "x" + assert not _cut_PT(u, v, gparams, sparams) + + # Change structure in G2 such that, ONLY consistency is harmed + G2.remove_edges_from([(mapped[20], mapped[3]), (mapped[20], mapped[3])]) + G2.add_edges_from([(mapped[20], mapped[2]), (mapped[20], mapped[2])]) + + # Consistency check fails, while the cutting rules are satisfied! + assert not _cut_PT(u, v, gparams, sparams) + assert not _consistent_PT(u, v, gparams, sparams) + + # Compensate in G1 and make it consistent + G1.remove_edges_from([(20, 3), (20, 3)]) + G1.add_edges_from([(20, 2), (20, 2)]) + assert not _cut_PT(u, v, gparams, sparams) + assert _consistent_PT(u, v, gparams, sparams) + + # ONLY fail the cutting check + G2.add_edges_from([(v, mapped[10])] * 5) + assert _cut_PT(u, v, gparams, sparams) + assert _consistent_PT(u, v, gparams, sparams) + + # Pass all tests + G1.add_edges_from([(u, 10)] * 5) + assert not _cut_PT(u, v, gparams, sparams) + assert _consistent_PT(u, v, gparams, sparams) + + def test_feasibility_different_labels(self): + G1 = nx.MultiGraph( + [ + (0, 1), + (0, 1), + (1, 2), + (1, 2), + (1, 14), + (0, 4), + (1, 5), + (2, 6), + (3, 7), + (3, 6), + (4, 10), + (4, 9), + (6, 10), + (20, 9), + (20, 9), + (20, 9), + (20, 15), + (20, 15), + (20, 12), + (20, 11), + (20, 11), + (20, 11), + (12, 13), + (11, 13), + (20, 8), + (20, 8), + (20, 2), + (20, 2), + (20, 5), + (20, 5), + (20, 5), + (20, 0), + (20, 0), + (20, 0), + ] + ) + mapped = { + 0: "a", + 1: "b", + 2: "c", + 3: "d", + 4: "e", + 5: "f", + 6: "g", + 7: "h", + 8: "i", + 9: "j", + 10: "k", + 11: "l", + 12: "m", + 13: "n", + 14: "o", + 15: "p", + 20: "x", + } + G2 = nx.relabel_nodes(G1, mapped) + l1 = {n: "none" for n in G1.nodes()} + l2 = {} + + l1.update( + { + 9: "blue", + 15: "blue", + 12: "blue", + 11: "green", + 2: "green", + 8: "red", + 0: "red", + 5: "yellow", + } + ) + l2.update({mapped[n]: l for n, l in l1.items()}) + + gparams = _GraphParameters( + G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None + ) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c", 3: "d"}, + {"a": 0, "b": 1, "c": 2, "d": 3}, + {4, 5, 6, 7, 14}, + None, + {9, 10, 15, 12, 11, 13, 8}, + None, + {"e", "f", "g", "h", "o"}, + None, + {"j", "k", "l", "m", "n", "i", "p"}, + None, + ) + + u, v = 20, "x" + assert not _cut_PT(u, v, gparams, sparams) + + # Change structure in G2 such that, ONLY consistency is harmed + G2.remove_edges_from([(mapped[20], mapped[2]), (mapped[20], mapped[2])]) + G2.add_edges_from([(mapped[20], mapped[3]), (mapped[20], mapped[3])]) + l2.update({mapped[3]: "green"}) + + # Consistency check fails, while the cutting rules are satisfied! + assert not _cut_PT(u, v, gparams, sparams) + assert not _consistent_PT(u, v, gparams, sparams) + + # Compensate in G1 and make it consistent + G1.remove_edges_from([(20, 2), (20, 2)]) + G1.add_edges_from([(20, 3), (20, 3)]) + l1.update({3: "green"}) + assert not _cut_PT(u, v, gparams, sparams) + assert _consistent_PT(u, v, gparams, sparams) + + # ONLY fail the cutting check + l1.update({5: "red"}) + assert _cut_PT(u, v, gparams, sparams) + assert _consistent_PT(u, v, gparams, sparams) + + +class TestDiGraphISOFeasibility: + def test_const_covered_neighbors(self): + G1 = nx.DiGraph([(0, 1), (1, 2), (0, 3), (2, 3)]) + G2 = nx.DiGraph([("a", "b"), ("b", "c"), ("a", "k"), ("c", "k")]) + gparams = _GraphParameters(G1, G2, None, None, None, None, None) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c"}, + {"a": 0, "b": 1, "c": 2}, + None, + None, + None, + None, + None, + None, + None, + None, + ) + u, v = 3, "k" + assert _consistent_PT(u, v, gparams, sparams) + + def test_const_no_covered_neighbors(self): + G1 = nx.DiGraph([(0, 1), (1, 2), (3, 4), (3, 5)]) + G2 = nx.DiGraph([("a", "b"), ("b", "c"), ("k", "w"), ("k", "z")]) + gparams = _GraphParameters(G1, G2, None, None, None, None, None) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c"}, + {"a": 0, "b": 1, "c": 2}, + None, + None, + None, + None, + None, + None, + None, + None, + ) + u, v = 3, "k" + assert _consistent_PT(u, v, gparams, sparams) + + def test_const_mixed_covered_uncovered_neighbors(self): + G1 = nx.DiGraph([(0, 1), (1, 2), (3, 0), (3, 2), (3, 4), (3, 5)]) + G2 = nx.DiGraph( + [("a", "b"), ("b", "c"), ("k", "a"), ("k", "c"), ("k", "w"), ("k", "z")] + ) + gparams = _GraphParameters(G1, G2, None, None, None, None, None) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c"}, + {"a": 0, "b": 1, "c": 2}, + None, + None, + None, + None, + None, + None, + None, + None, + ) + u, v = 3, "k" + assert _consistent_PT(u, v, gparams, sparams) + + def test_const_fail_cases(self): + G1 = nx.DiGraph( + [ + (0, 1), + (2, 1), + (10, 0), + (10, 3), + (10, 4), + (5, 10), + (10, 6), + (1, 4), + (5, 3), + ] + ) + G2 = nx.DiGraph( + [ + ("a", "b"), + ("c", "b"), + ("k", "a"), + ("k", "d"), + ("k", "e"), + ("f", "k"), + ("k", "g"), + ("b", "e"), + ("f", "d"), + ] + ) + gparams = _GraphParameters(G1, G2, None, None, None, None, None) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c", 3: "d"}, + {"a": 0, "b": 1, "c": 2, "d": 3}, + None, + None, + None, + None, + None, + None, + None, + None, + ) + u, v = 10, "k" + assert _consistent_PT(u, v, gparams, sparams) + + # Delete one uncovered neighbor of u. Notice how it still passes the + # test. Two reasons for this: + # 1. If u, v had different degrees from the beginning, they wouldn't + # be selected as candidates in the first place. + # 2. Even if they are selected, consistency is basically + # 1-look-ahead, meaning that we take into consideration the + # relation of the candidates with their mapped neighbors. + # The node we deleted is not a covered neighbor. + # Such nodes will be checked by the cut_PT function, which is + # basically the 2-look-ahead, checking the relation of the + # candidates with T1, T2 (in which belongs the node we just deleted). + G1.remove_node(6) + assert _consistent_PT(u, v, gparams, sparams) + + # Add one more covered neighbor of u in G1 + G1.add_edge(u, 2) + assert not _consistent_PT(u, v, gparams, sparams) + + # Compensate in G2 + G2.add_edge(v, "c") + assert _consistent_PT(u, v, gparams, sparams) + + # Add one more covered neighbor of v in G2 + G2.add_edge(v, "x") + G1.add_node(7) + sparams.mapping.update({7: "x"}) + sparams.reverse_mapping.update({"x": 7}) + assert not _consistent_PT(u, v, gparams, sparams) + + # Compensate in G1 + G1.add_edge(u, 7) + assert _consistent_PT(u, v, gparams, sparams) + + def test_cut_inconsistent_labels(self): + G1 = nx.DiGraph( + [ + (0, 1), + (2, 1), + (10, 0), + (10, 3), + (10, 4), + (5, 10), + (10, 6), + (1, 4), + (5, 3), + ] + ) + G2 = nx.DiGraph( + [ + ("a", "b"), + ("c", "b"), + ("k", "a"), + ("k", "d"), + ("k", "e"), + ("f", "k"), + ("k", "g"), + ("b", "e"), + ("f", "d"), + ] + ) + + l1 = {n: "blue" for n in G1.nodes()} + l2 = {n: "blue" for n in G2.nodes()} + l1.update({5: "green"}) # Change the label of one neighbor of u + + gparams = _GraphParameters( + G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None + ) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c", 3: "d"}, + {"a": 0, "b": 1, "c": 2, "d": 3}, + None, + None, + None, + None, + None, + None, + None, + None, + ) + + u, v = 10, "k" + assert _cut_PT(u, v, gparams, sparams) + + def test_cut_consistent_labels(self): + G1 = nx.DiGraph( + [ + (0, 1), + (2, 1), + (10, 0), + (10, 3), + (10, 4), + (5, 10), + (10, 6), + (1, 4), + (5, 3), + ] + ) + G2 = nx.DiGraph( + [ + ("a", "b"), + ("c", "b"), + ("k", "a"), + ("k", "d"), + ("k", "e"), + ("f", "k"), + ("k", "g"), + ("b", "e"), + ("f", "d"), + ] + ) + + l1 = {n: "blue" for n in G1.nodes()} + l2 = {n: "blue" for n in G2.nodes()} + + gparams = _GraphParameters( + G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None + ) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c", 3: "d"}, + {"a": 0, "b": 1, "c": 2, "d": 3}, + {4}, + {5, 10}, + {6}, + None, + {"e"}, + {"f", "k"}, + {"g"}, + None, + ) + + u, v = 10, "k" + assert not _cut_PT(u, v, gparams, sparams) + + def test_cut_same_labels(self): + G1 = nx.DiGraph( + [ + (0, 1), + (2, 1), + (10, 0), + (10, 3), + (10, 4), + (5, 10), + (10, 6), + (1, 4), + (5, 3), + ] + ) + mapped = {0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f", 6: "g", 10: "k"} + G2 = nx.relabel_nodes(G1, mapped) + l1 = {n: "blue" for n in G1.nodes()} + l2 = {n: "blue" for n in G2.nodes()} + + gparams = _GraphParameters( + G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None + ) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c", 3: "d"}, + {"a": 0, "b": 1, "c": 2, "d": 3}, + {4}, + {5, 10}, + {6}, + None, + {"e"}, + {"f", "k"}, + {"g"}, + None, + ) + + u, v = 10, "k" + assert not _cut_PT(u, v, gparams, sparams) + + # Change intersection between G1[u] and T1_out, so it's not the same as the one between G2[v] and T2_out + G1.remove_edge(u, 4) + assert _cut_PT(u, v, gparams, sparams) + + # Compensate in G2 + G2.remove_edge(v, mapped[4]) + assert not _cut_PT(u, v, gparams, sparams) + + # Change intersection between G1[u] and T1_in, so it's not the same as the one between G2[v] and T2_in + G1.remove_edge(5, u) + assert _cut_PT(u, v, gparams, sparams) + + # Compensate in G2 + G2.remove_edge(mapped[5], v) + assert not _cut_PT(u, v, gparams, sparams) + + # Change intersection between G2[v] and T2_tilde, so it's not the same as the one between G1[u] and T1_tilde + G2.remove_edge(v, mapped[6]) + assert _cut_PT(u, v, gparams, sparams) + + # Compensate in G1 + G1.remove_edge(u, 6) + assert not _cut_PT(u, v, gparams, sparams) + + # Add disconnected nodes, which will form the new Ti_tilde + G1.add_nodes_from([6, 7, 8]) + G2.add_nodes_from(["g", "y", "z"]) + sparams.T1_tilde.update({6, 7, 8}) + sparams.T2_tilde.update({"g", "y", "z"}) + + l1 = {n: "blue" for n in G1.nodes()} + l2 = {n: "blue" for n in G2.nodes()} + gparams = _GraphParameters( + G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None + ) + + assert not _cut_PT(u, v, gparams, sparams) + + def test_cut_different_labels(self): + G1 = nx.DiGraph( + [ + (0, 1), + (1, 2), + (14, 1), + (0, 4), + (1, 5), + (2, 6), + (3, 7), + (3, 6), + (10, 4), + (4, 9), + (6, 10), + (20, 9), + (20, 15), + (20, 12), + (20, 11), + (12, 13), + (11, 13), + (20, 8), + (20, 3), + (20, 5), + (0, 20), + ] + ) + mapped = { + 0: "a", + 1: "b", + 2: "c", + 3: "d", + 4: "e", + 5: "f", + 6: "g", + 7: "h", + 8: "i", + 9: "j", + 10: "k", + 11: "l", + 12: "m", + 13: "n", + 14: "o", + 15: "p", + 20: "x", + } + G2 = nx.relabel_nodes(G1, mapped) + + l1 = {n: "none" for n in G1.nodes()} + l2 = {} + + l1.update( + { + 9: "blue", + 15: "blue", + 12: "blue", + 11: "green", + 3: "green", + 8: "red", + 0: "red", + 5: "yellow", + } + ) + l2.update({mapped[n]: l for n, l in l1.items()}) + + gparams = _GraphParameters( + G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None + ) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c", 3: "d"}, + {"a": 0, "b": 1, "c": 2, "d": 3}, + {4, 5, 6, 7, 20}, + {14, 20}, + {9, 10, 15, 12, 11, 13, 8}, + None, + {"e", "f", "g", "x"}, + {"o", "x"}, + {"j", "k", "l", "m", "n", "i", "p"}, + None, + ) + + u, v = 20, "x" + assert not _cut_PT(u, v, gparams, sparams) + + # Change the orientation of the labels on neighbors of u compared to neighbors of v. Leave the structure intact + l1.update({9: "red"}) + assert _cut_PT(u, v, gparams, sparams) + + # compensate in G2 + l2.update({mapped[9]: "red"}) + assert not _cut_PT(u, v, gparams, sparams) + + # Change the intersection of G1[u] and T1_out + G1.add_edge(u, 4) + assert _cut_PT(u, v, gparams, sparams) + + # Same for G2[v] and T2_out + G2.add_edge(v, mapped[4]) + assert not _cut_PT(u, v, gparams, sparams) + + # Change the intersection of G1[u] and T1_in + G1.add_edge(u, 14) + assert _cut_PT(u, v, gparams, sparams) + + # Same for G2[v] and T2_in + G2.add_edge(v, mapped[14]) + assert not _cut_PT(u, v, gparams, sparams) + + # Change the intersection of G2[v] and T2_tilde + G2.remove_edge(v, mapped[8]) + assert _cut_PT(u, v, gparams, sparams) + + # Same for G1[u] and T1_tilde + G1.remove_edge(u, 8) + assert not _cut_PT(u, v, gparams, sparams) + + # Place 8 and mapped[8] in T1 and T2 respectively, by connecting it to covered nodes + G1.add_edge(8, 3) + G2.add_edge(mapped[8], mapped[3]) + sparams.T1.add(8) + sparams.T2.add(mapped[8]) + sparams.T1_tilde.remove(8) + sparams.T2_tilde.remove(mapped[8]) + + assert not _cut_PT(u, v, gparams, sparams) + + # Remove neighbor of u from T1 + G1.remove_node(5) + l1.pop(5) + sparams.T1.remove(5) + assert _cut_PT(u, v, gparams, sparams) + + # Same in G2 + G2.remove_node(mapped[5]) + l2.pop(mapped[5]) + sparams.T2.remove(mapped[5]) + assert not _cut_PT(u, v, gparams, sparams) + + def test_predecessor_T1_in_fail(self): + G1 = nx.DiGraph( + [(0, 1), (0, 3), (4, 0), (1, 5), (5, 2), (3, 6), (4, 6), (6, 5)] + ) + mapped = {0: "a", 1: "b", 2: "c", 3: "d", 4: "e", 5: "f", 6: "g"} + G2 = nx.relabel_nodes(G1, mapped) + l1 = {n: "blue" for n in G1.nodes()} + l2 = {n: "blue" for n in G2.nodes()} + + gparams = _GraphParameters( + G1, G2, l1, l2, nx.utils.groups(l1), nx.utils.groups(l2), None + ) + sparams = _StateParameters( + {0: "a", 1: "b", 2: "c"}, + {"a": 0, "b": 1, "c": 2}, + {3, 5}, + {4, 5}, + {6}, + None, + {"d", "f"}, + {"f"}, # mapped[4] is missing from T2_in + {"g"}, + None, + ) + + u, v = 6, "g" + assert _cut_PT(u, v, gparams, sparams) + + sparams.T2_in.add("e") + assert not _cut_PT(u, v, gparams, sparams) + + +class TestGraphTinoutUpdating: + edges = [ + (1, 3), + (2, 3), + (3, 4), + (4, 9), + (4, 5), + (3, 9), + (5, 8), + (5, 7), + (8, 7), + (6, 7), + ] + mapped = { + 0: "x", + 1: "a", + 2: "b", + 3: "c", + 4: "d", + 5: "e", + 6: "f", + 7: "g", + 8: "h", + 9: "i", + } + G1 = nx.Graph() + G1.add_edges_from(edges) + G1.add_node(0) + G2 = nx.relabel_nodes(G1, mapping=mapped) + + def test_updating(self): + G2_degree = dict(self.G2.degree) + gparams, sparams = _initialize_parameters(self.G1, self.G2, G2_degree) + m, m_rev, T1, _, T1_tilde, _, T2, _, T2_tilde, _ = sparams + + # Add node to the mapping + m[4] = self.mapped[4] + m_rev[self.mapped[4]] = 4 + _update_Tinout(4, self.mapped[4], gparams, sparams) + + assert T1 == {3, 5, 9} + assert T2 == {"c", "i", "e"} + assert T1_tilde == {0, 1, 2, 6, 7, 8} + assert T2_tilde == {"x", "a", "b", "f", "g", "h"} + + # Add node to the mapping + m[5] = self.mapped[5] + m_rev.update({self.mapped[5]: 5}) + _update_Tinout(5, self.mapped[5], gparams, sparams) + + assert T1 == {3, 9, 8, 7} + assert T2 == {"c", "i", "h", "g"} + assert T1_tilde == {0, 1, 2, 6} + assert T2_tilde == {"x", "a", "b", "f"} + + # Add node to the mapping + m[6] = self.mapped[6] + m_rev.update({self.mapped[6]: 6}) + _update_Tinout(6, self.mapped[6], gparams, sparams) + + assert T1 == {3, 9, 8, 7} + assert T2 == {"c", "i", "h", "g"} + assert T1_tilde == {0, 1, 2} + assert T2_tilde == {"x", "a", "b"} + + # Add node to the mapping + m[3] = self.mapped[3] + m_rev.update({self.mapped[3]: 3}) + _update_Tinout(3, self.mapped[3], gparams, sparams) + + assert T1 == {1, 2, 9, 8, 7} + assert T2 == {"a", "b", "i", "h", "g"} + assert T1_tilde == {0} + assert T2_tilde == {"x"} + + # Add node to the mapping + m[0] = self.mapped[0] + m_rev.update({self.mapped[0]: 0}) + _update_Tinout(0, self.mapped[0], gparams, sparams) + + assert T1 == {1, 2, 9, 8, 7} + assert T2 == {"a", "b", "i", "h", "g"} + assert T1_tilde == set() + assert T2_tilde == set() + + def test_restoring(self): + m = {0: "x", 3: "c", 4: "d", 5: "e", 6: "f"} + m_rev = {"x": 0, "c": 3, "d": 4, "e": 5, "f": 6} + + T1 = {1, 2, 7, 9, 8} + T2 = {"a", "b", "g", "i", "h"} + T1_tilde = set() + T2_tilde = set() + + gparams = _GraphParameters(self.G1, self.G2, {}, {}, {}, {}, {}) + sparams = _StateParameters( + m, m_rev, T1, None, T1_tilde, None, T2, None, T2_tilde, None + ) + + # Remove a node from the mapping + m.pop(0) + m_rev.pop("x") + _restore_Tinout(0, self.mapped[0], gparams, sparams) + + assert T1 == {1, 2, 7, 9, 8} + assert T2 == {"a", "b", "g", "i", "h"} + assert T1_tilde == {0} + assert T2_tilde == {"x"} + + # Remove a node from the mapping + m.pop(6) + m_rev.pop("f") + _restore_Tinout(6, self.mapped[6], gparams, sparams) + + assert T1 == {1, 2, 7, 9, 8} + assert T2 == {"a", "b", "g", "i", "h"} + assert T1_tilde == {0, 6} + assert T2_tilde == {"x", "f"} + + # Remove a node from the mapping + m.pop(3) + m_rev.pop("c") + _restore_Tinout(3, self.mapped[3], gparams, sparams) + + assert T1 == {7, 9, 8, 3} + assert T2 == {"g", "i", "h", "c"} + assert T1_tilde == {0, 6, 1, 2} + assert T2_tilde == {"x", "f", "a", "b"} + + # Remove a node from the mapping + m.pop(5) + m_rev.pop("e") + _restore_Tinout(5, self.mapped[5], gparams, sparams) + + assert T1 == {9, 3, 5} + assert T2 == {"i", "c", "e"} + assert T1_tilde == {0, 6, 1, 2, 7, 8} + assert T2_tilde == {"x", "f", "a", "b", "g", "h"} + + # Remove a node from the mapping + m.pop(4) + m_rev.pop("d") + _restore_Tinout(4, self.mapped[4], gparams, sparams) + + assert T1 == set() + assert T2 == set() + assert T1_tilde == set(self.G1.nodes()) + assert T2_tilde == set(self.G2.nodes()) + + +class TestDiGraphTinoutUpdating: + edges = [ + (1, 3), + (3, 2), + (3, 4), + (4, 9), + (4, 5), + (3, 9), + (5, 8), + (5, 7), + (8, 7), + (7, 6), + ] + mapped = { + 0: "x", + 1: "a", + 2: "b", + 3: "c", + 4: "d", + 5: "e", + 6: "f", + 7: "g", + 8: "h", + 9: "i", + } + G1 = nx.DiGraph(edges) + G1.add_node(0) + G2 = nx.relabel_nodes(G1, mapping=mapped) + + def test_updating(self): + G2_degree = { + n: (in_degree, out_degree) + for (n, in_degree), (_, out_degree) in zip( + self.G2.in_degree, self.G2.out_degree + ) + } + gparams, sparams = _initialize_parameters(self.G1, self.G2, G2_degree) + m, m_rev, T1_out, T1_in, T1_tilde, _, T2_out, T2_in, T2_tilde, _ = sparams + + # Add node to the mapping + m[4] = self.mapped[4] + m_rev[self.mapped[4]] = 4 + _update_Tinout(4, self.mapped[4], gparams, sparams) + + assert T1_out == {5, 9} + assert T1_in == {3} + assert T2_out == {"i", "e"} + assert T2_in == {"c"} + assert T1_tilde == {0, 1, 2, 6, 7, 8} + assert T2_tilde == {"x", "a", "b", "f", "g", "h"} + + # Add node to the mapping + m[5] = self.mapped[5] + m_rev[self.mapped[5]] = 5 + _update_Tinout(5, self.mapped[5], gparams, sparams) + + assert T1_out == {9, 8, 7} + assert T1_in == {3} + assert T2_out == {"i", "g", "h"} + assert T2_in == {"c"} + assert T1_tilde == {0, 1, 2, 6} + assert T2_tilde == {"x", "a", "b", "f"} + + # Add node to the mapping + m[6] = self.mapped[6] + m_rev[self.mapped[6]] = 6 + _update_Tinout(6, self.mapped[6], gparams, sparams) + + assert T1_out == {9, 8, 7} + assert T1_in == {3, 7} + assert T2_out == {"i", "g", "h"} + assert T2_in == {"c", "g"} + assert T1_tilde == {0, 1, 2} + assert T2_tilde == {"x", "a", "b"} + + # Add node to the mapping + m[3] = self.mapped[3] + m_rev[self.mapped[3]] = 3 + _update_Tinout(3, self.mapped[3], gparams, sparams) + + assert T1_out == {9, 8, 7, 2} + assert T1_in == {7, 1} + assert T2_out == {"i", "g", "h", "b"} + assert T2_in == {"g", "a"} + assert T1_tilde == {0} + assert T2_tilde == {"x"} + + # Add node to the mapping + m[0] = self.mapped[0] + m_rev[self.mapped[0]] = 0 + _update_Tinout(0, self.mapped[0], gparams, sparams) + + assert T1_out == {9, 8, 7, 2} + assert T1_in == {7, 1} + assert T2_out == {"i", "g", "h", "b"} + assert T2_in == {"g", "a"} + assert T1_tilde == set() + assert T2_tilde == set() + + def test_restoring(self): + m = {0: "x", 3: "c", 4: "d", 5: "e", 6: "f"} + m_rev = {"x": 0, "c": 3, "d": 4, "e": 5, "f": 6} + + T1_out = {2, 7, 9, 8} + T1_in = {1, 7} + T2_out = {"b", "g", "i", "h"} + T2_in = {"a", "g"} + T1_tilde = set() + T2_tilde = set() + + gparams = _GraphParameters(self.G1, self.G2, {}, {}, {}, {}, {}) + sparams = _StateParameters( + m, m_rev, T1_out, T1_in, T1_tilde, None, T2_out, T2_in, T2_tilde, None + ) + + # Remove a node from the mapping + m.pop(0) + m_rev.pop("x") + _restore_Tinout_Di(0, self.mapped[0], gparams, sparams) + + assert T1_out == {2, 7, 9, 8} + assert T1_in == {1, 7} + assert T2_out == {"b", "g", "i", "h"} + assert T2_in == {"a", "g"} + assert T1_tilde == {0} + assert T2_tilde == {"x"} + + # Remove a node from the mapping + m.pop(6) + m_rev.pop("f") + _restore_Tinout_Di(6, self.mapped[6], gparams, sparams) + + assert T1_out == {2, 9, 8, 7} + assert T1_in == {1} + assert T2_out == {"b", "i", "h", "g"} + assert T2_in == {"a"} + assert T1_tilde == {0, 6} + assert T2_tilde == {"x", "f"} + + # Remove a node from the mapping + m.pop(3) + m_rev.pop("c") + _restore_Tinout_Di(3, self.mapped[3], gparams, sparams) + + assert T1_out == {9, 8, 7} + assert T1_in == {3} + assert T2_out == {"i", "h", "g"} + assert T2_in == {"c"} + assert T1_tilde == {0, 6, 1, 2} + assert T2_tilde == {"x", "f", "a", "b"} + + # Remove a node from the mapping + m.pop(5) + m_rev.pop("e") + _restore_Tinout_Di(5, self.mapped[5], gparams, sparams) + + assert T1_out == {9, 5} + assert T1_in == {3} + assert T2_out == {"i", "e"} + assert T2_in == {"c"} + assert T1_tilde == {0, 6, 1, 2, 8, 7} + assert T2_tilde == {"x", "f", "a", "b", "h", "g"} + + # Remove a node from the mapping + m.pop(4) + m_rev.pop("d") + _restore_Tinout_Di(4, self.mapped[4], gparams, sparams) + + assert T1_out == set() + assert T1_in == set() + assert T2_out == set() + assert T2_in == set() + assert T1_tilde == set(self.G1.nodes()) + assert T2_tilde == set(self.G2.nodes()) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/test_vf2userfunc.py b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/test_vf2userfunc.py new file mode 100644 index 0000000000000000000000000000000000000000..66a434e05ceb7b42bb4e4893d26298001046386f --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tests/test_vf2userfunc.py @@ -0,0 +1,200 @@ +""" + Tests for VF2 isomorphism algorithm for weighted graphs. +""" + +import math +from operator import eq + +import networkx as nx +import networkx.algorithms.isomorphism as iso + + +def test_simple(): + # 16 simple tests + w = "weight" + edges = [(0, 0, 1), (0, 0, 1.5), (0, 1, 2), (1, 0, 3)] + for g1 in [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()]: + g1.add_weighted_edges_from(edges) + g2 = g1.subgraph(g1.nodes()) + if g1.is_multigraph(): + em = iso.numerical_multiedge_match("weight", 1) + else: + em = iso.numerical_edge_match("weight", 1) + assert nx.is_isomorphic(g1, g2, edge_match=em) + + for mod1, mod2 in [(False, True), (True, False), (True, True)]: + # mod1 tests a regular edge + # mod2 tests a selfloop + if g2.is_multigraph(): + if mod1: + data1 = {0: {"weight": 10}} + if mod2: + data2 = {0: {"weight": 1}, 1: {"weight": 2.5}} + else: + if mod1: + data1 = {"weight": 10} + if mod2: + data2 = {"weight": 2.5} + + g2 = g1.subgraph(g1.nodes()).copy() + if mod1: + if not g1.is_directed(): + g2._adj[1][0] = data1 + g2._adj[0][1] = data1 + else: + g2._succ[1][0] = data1 + g2._pred[0][1] = data1 + if mod2: + if not g1.is_directed(): + g2._adj[0][0] = data2 + else: + g2._succ[0][0] = data2 + g2._pred[0][0] = data2 + + assert not nx.is_isomorphic(g1, g2, edge_match=em) + + +def test_weightkey(): + g1 = nx.DiGraph() + g2 = nx.DiGraph() + + g1.add_edge("A", "B", weight=1) + g2.add_edge("C", "D", weight=0) + + assert nx.is_isomorphic(g1, g2) + em = iso.numerical_edge_match("nonexistent attribute", 1) + assert nx.is_isomorphic(g1, g2, edge_match=em) + em = iso.numerical_edge_match("weight", 1) + assert not nx.is_isomorphic(g1, g2, edge_match=em) + + g2 = nx.DiGraph() + g2.add_edge("C", "D") + assert nx.is_isomorphic(g1, g2, edge_match=em) + + +class TestNodeMatch_Graph: + def setup_method(self): + self.g1 = nx.Graph() + self.g2 = nx.Graph() + self.build() + + def build(self): + self.nm = iso.categorical_node_match("color", "") + self.em = iso.numerical_edge_match("weight", 1) + + self.g1.add_node("A", color="red") + self.g2.add_node("C", color="blue") + + self.g1.add_edge("A", "B", weight=1) + self.g2.add_edge("C", "D", weight=1) + + def test_noweight_nocolor(self): + assert nx.is_isomorphic(self.g1, self.g2) + + def test_color1(self): + assert not nx.is_isomorphic(self.g1, self.g2, node_match=self.nm) + + def test_color2(self): + self.g1.nodes["A"]["color"] = "blue" + assert nx.is_isomorphic(self.g1, self.g2, node_match=self.nm) + + def test_weight1(self): + assert nx.is_isomorphic(self.g1, self.g2, edge_match=self.em) + + def test_weight2(self): + self.g1.add_edge("A", "B", weight=2) + assert not nx.is_isomorphic(self.g1, self.g2, edge_match=self.em) + + def test_colorsandweights1(self): + iso = nx.is_isomorphic(self.g1, self.g2, node_match=self.nm, edge_match=self.em) + assert not iso + + def test_colorsandweights2(self): + self.g1.nodes["A"]["color"] = "blue" + iso = nx.is_isomorphic(self.g1, self.g2, node_match=self.nm, edge_match=self.em) + assert iso + + def test_colorsandweights3(self): + # make the weights disagree + self.g1.add_edge("A", "B", weight=2) + assert not nx.is_isomorphic( + self.g1, self.g2, node_match=self.nm, edge_match=self.em + ) + + +class TestEdgeMatch_MultiGraph: + def setup_method(self): + self.g1 = nx.MultiGraph() + self.g2 = nx.MultiGraph() + self.GM = iso.MultiGraphMatcher + self.build() + + def build(self): + g1 = self.g1 + g2 = self.g2 + + # We will assume integer weights only. + g1.add_edge("A", "B", color="green", weight=0, size=0.5) + g1.add_edge("A", "B", color="red", weight=1, size=0.35) + g1.add_edge("A", "B", color="red", weight=2, size=0.65) + + g2.add_edge("C", "D", color="green", weight=1, size=0.5) + g2.add_edge("C", "D", color="red", weight=0, size=0.45) + g2.add_edge("C", "D", color="red", weight=2, size=0.65) + + if g1.is_multigraph(): + self.em = iso.numerical_multiedge_match("weight", 1) + self.emc = iso.categorical_multiedge_match("color", "") + self.emcm = iso.categorical_multiedge_match(["color", "weight"], ["", 1]) + self.emg1 = iso.generic_multiedge_match("color", "red", eq) + self.emg2 = iso.generic_multiedge_match( + ["color", "weight", "size"], + ["red", 1, 0.5], + [eq, eq, math.isclose], + ) + else: + self.em = iso.numerical_edge_match("weight", 1) + self.emc = iso.categorical_edge_match("color", "") + self.emcm = iso.categorical_edge_match(["color", "weight"], ["", 1]) + self.emg1 = iso.generic_multiedge_match("color", "red", eq) + self.emg2 = iso.generic_edge_match( + ["color", "weight", "size"], + ["red", 1, 0.5], + [eq, eq, math.isclose], + ) + + def test_weights_only(self): + assert nx.is_isomorphic(self.g1, self.g2, edge_match=self.em) + + def test_colors_only(self): + gm = self.GM(self.g1, self.g2, edge_match=self.emc) + assert gm.is_isomorphic() + + def test_colorsandweights(self): + gm = self.GM(self.g1, self.g2, edge_match=self.emcm) + assert not gm.is_isomorphic() + + def test_generic1(self): + gm = self.GM(self.g1, self.g2, edge_match=self.emg1) + assert gm.is_isomorphic() + + def test_generic2(self): + gm = self.GM(self.g1, self.g2, edge_match=self.emg2) + assert not gm.is_isomorphic() + + +class TestEdgeMatch_DiGraph(TestNodeMatch_Graph): + def setup_method(self): + TestNodeMatch_Graph.setup_method(self) + self.g1 = nx.DiGraph() + self.g2 = nx.DiGraph() + self.build() + + +class TestEdgeMatch_MultiDiGraph(TestEdgeMatch_MultiGraph): + def setup_method(self): + TestEdgeMatch_MultiGraph.setup_method(self) + self.g1 = nx.MultiDiGraph() + self.g2 = nx.MultiDiGraph() + self.GM = iso.MultiDiGraphMatcher + self.build() diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tree_isomorphism.py b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tree_isomorphism.py new file mode 100644 index 0000000000000000000000000000000000000000..6e935063336d7f1a84cff099fdedcf5f79fad558 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/tree_isomorphism.py @@ -0,0 +1,283 @@ +""" +An algorithm for finding if two undirected trees are isomorphic, +and if so returns an isomorphism between the two sets of nodes. + +This algorithm uses a routine to tell if two rooted trees (trees with a +specified root node) are isomorphic, which may be independently useful. + +This implements an algorithm from: +The Design and Analysis of Computer Algorithms +by Aho, Hopcroft, and Ullman +Addison-Wesley Publishing 1974 +Example 3.2 pp. 84-86. + +A more understandable version of this algorithm is described in: +Homework Assignment 5 +McGill University SOCS 308-250B, Winter 2002 +by Matthew Suderman +http://crypto.cs.mcgill.ca/~crepeau/CS250/2004/HW5+.pdf +""" + +import networkx as nx +from networkx.utils.decorators import not_implemented_for + +__all__ = ["rooted_tree_isomorphism", "tree_isomorphism"] + + +@nx._dispatch(graphs={"t1": 0, "t2": 2}) +def root_trees(t1, root1, t2, root2): + """Create a single digraph dT of free trees t1 and t2 + # with roots root1 and root2 respectively + # rename the nodes with consecutive integers + # so that all nodes get a unique name between both trees + + # our new "fake" root node is 0 + # t1 is numbers from 1 ... n + # t2 is numbered from n+1 to 2n + """ + + dT = nx.DiGraph() + + newroot1 = 1 # left root will be 1 + newroot2 = nx.number_of_nodes(t1) + 1 # right will be n+1 + + # may be overlap in node names here so need separate maps + # given the old name, what is the new + namemap1 = {root1: newroot1} + namemap2 = {root2: newroot2} + + # add an edge from our new root to root1 and root2 + dT.add_edge(0, namemap1[root1]) + dT.add_edge(0, namemap2[root2]) + + for i, (v1, v2) in enumerate(nx.bfs_edges(t1, root1)): + namemap1[v2] = i + namemap1[root1] + 1 + dT.add_edge(namemap1[v1], namemap1[v2]) + + for i, (v1, v2) in enumerate(nx.bfs_edges(t2, root2)): + namemap2[v2] = i + namemap2[root2] + 1 + dT.add_edge(namemap2[v1], namemap2[v2]) + + # now we really want the inverse of namemap1 and namemap2 + # giving the old name given the new + # since the values of namemap1 and namemap2 are unique + # there won't be collisions + namemap = {} + for old, new in namemap1.items(): + namemap[new] = old + for old, new in namemap2.items(): + namemap[new] = old + + return (dT, namemap, newroot1, newroot2) + + +# figure out the level of each node, with 0 at root +@nx._dispatch +def assign_levels(G, root): + level = {} + level[root] = 0 + for v1, v2 in nx.bfs_edges(G, root): + level[v2] = level[v1] + 1 + + return level + + +# now group the nodes at each level +def group_by_levels(levels): + L = {} + for n, lev in levels.items(): + if lev not in L: + L[lev] = [] + L[lev].append(n) + + return L + + +# now lets get the isomorphism by walking the ordered_children +def generate_isomorphism(v, w, M, ordered_children): + # make sure tree1 comes first + assert v < w + M.append((v, w)) + for i, (x, y) in enumerate(zip(ordered_children[v], ordered_children[w])): + generate_isomorphism(x, y, M, ordered_children) + + +@nx._dispatch(graphs={"t1": 0, "t2": 2}) +def rooted_tree_isomorphism(t1, root1, t2, root2): + """ + Given two rooted trees `t1` and `t2`, + with roots `root1` and `root2` respectively + this routine will determine if they are isomorphic. + + These trees may be either directed or undirected, + but if they are directed, all edges should flow from the root. + + It returns the isomorphism, a mapping of the nodes of `t1` onto the nodes + of `t2`, such that two trees are then identical. + + Note that two trees may have more than one isomorphism, and this + routine just returns one valid mapping. + + Parameters + ---------- + `t1` : NetworkX graph + One of the trees being compared + + `root1` : a node of `t1` which is the root of the tree + + `t2` : undirected NetworkX graph + The other tree being compared + + `root2` : a node of `t2` which is the root of the tree + + This is a subroutine used to implement `tree_isomorphism`, but will + be somewhat faster if you already have rooted trees. + + Returns + ------- + isomorphism : list + A list of pairs in which the left element is a node in `t1` + and the right element is a node in `t2`. The pairs are in + arbitrary order. If the nodes in one tree is mapped to the names in + the other, then trees will be identical. Note that an isomorphism + will not necessarily be unique. + + If `t1` and `t2` are not isomorphic, then it returns the empty list. + """ + + assert nx.is_tree(t1) + assert nx.is_tree(t2) + + # get the rooted tree formed by combining them + # with unique names + (dT, namemap, newroot1, newroot2) = root_trees(t1, root1, t2, root2) + + # compute the distance from the root, with 0 for our + levels = assign_levels(dT, 0) + + # height + h = max(levels.values()) + + # collect nodes into a dict by level + L = group_by_levels(levels) + + # each node has a label, initially set to 0 + label = {v: 0 for v in dT} + # and also ordered_labels and ordered_children + # which will store ordered tuples + ordered_labels = {v: () for v in dT} + ordered_children = {v: () for v in dT} + + # nothing to do on last level so start on h-1 + # also nothing to do for our fake level 0, so skip that + for i in range(h - 1, 0, -1): + # update the ordered_labels and ordered_children + # for any children + for v in L[i]: + # nothing to do if no children + if dT.out_degree(v) > 0: + # get all the pairs of labels and nodes of children + # and sort by labels + s = sorted((label[u], u) for u in dT.successors(v)) + + # invert to give a list of two tuples + # the sorted labels, and the corresponding children + ordered_labels[v], ordered_children[v] = list(zip(*s)) + + # now collect and sort the sorted ordered_labels + # for all nodes in L[i], carrying along the node + forlabel = sorted((ordered_labels[v], v) for v in L[i]) + + # now assign labels to these nodes, according to the sorted order + # starting from 0, where identical ordered_labels get the same label + current = 0 + for i, (ol, v) in enumerate(forlabel): + # advance to next label if not 0, and different from previous + if (i != 0) and (ol != forlabel[i - 1][0]): + current += 1 + label[v] = current + + # they are isomorphic if the labels of newroot1 and newroot2 are 0 + isomorphism = [] + if label[newroot1] == 0 and label[newroot2] == 0: + generate_isomorphism(newroot1, newroot2, isomorphism, ordered_children) + + # get the mapping back in terms of the old names + # return in sorted order for neatness + isomorphism = [(namemap[u], namemap[v]) for (u, v) in isomorphism] + + return isomorphism + + +@not_implemented_for("directed", "multigraph") +@nx._dispatch(graphs={"t1": 0, "t2": 1}) +def tree_isomorphism(t1, t2): + """ + Given two undirected (or free) trees `t1` and `t2`, + this routine will determine if they are isomorphic. + It returns the isomorphism, a mapping of the nodes of `t1` onto the nodes + of `t2`, such that two trees are then identical. + + Note that two trees may have more than one isomorphism, and this + routine just returns one valid mapping. + + Parameters + ---------- + t1 : undirected NetworkX graph + One of the trees being compared + + t2 : undirected NetworkX graph + The other tree being compared + + Returns + ------- + isomorphism : list + A list of pairs in which the left element is a node in `t1` + and the right element is a node in `t2`. The pairs are in + arbitrary order. If the nodes in one tree is mapped to the names in + the other, then trees will be identical. Note that an isomorphism + will not necessarily be unique. + + If `t1` and `t2` are not isomorphic, then it returns the empty list. + + Notes + ----- + This runs in O(n*log(n)) time for trees with n nodes. + """ + + assert nx.is_tree(t1) + assert nx.is_tree(t2) + + # To be isomorphic, t1 and t2 must have the same number of nodes. + if nx.number_of_nodes(t1) != nx.number_of_nodes(t2): + return [] + + # Another shortcut is that the sorted degree sequences need to be the same. + degree_sequence1 = sorted(d for (n, d) in t1.degree()) + degree_sequence2 = sorted(d for (n, d) in t2.degree()) + + if degree_sequence1 != degree_sequence2: + return [] + + # A tree can have either 1 or 2 centers. + # If the number doesn't match then t1 and t2 are not isomorphic. + center1 = nx.center(t1) + center2 = nx.center(t2) + + if len(center1) != len(center2): + return [] + + # If there is only 1 center in each, then use it. + if len(center1) == 1: + return rooted_tree_isomorphism(t1, center1[0], t2, center2[0]) + + # If there both have 2 centers, then try the first for t1 + # with the first for t2. + attempts = rooted_tree_isomorphism(t1, center1[0], t2, center2[0]) + + # If that worked we're done. + if len(attempts) > 0: + return attempts + + # Otherwise, try center1[0] with the center2[1], and see if that works + return rooted_tree_isomorphism(t1, center1[0], t2, center2[1]) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/vf2pp.py b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/vf2pp.py new file mode 100644 index 0000000000000000000000000000000000000000..953204f669b76f9d86ef82849f7f39d0f9a11d88 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/vf2pp.py @@ -0,0 +1,1068 @@ +""" +*************** +VF2++ Algorithm +*************** + +An implementation of the VF2++ algorithm [1]_ for Graph Isomorphism testing. + +The simplest interface to use this module is to call: + +`vf2pp_is_isomorphic`: to check whether two graphs are isomorphic. +`vf2pp_isomorphism`: to obtain the node mapping between two graphs, +in case they are isomorphic. +`vf2pp_all_isomorphisms`: to generate all possible mappings between two graphs, +if isomorphic. + +Introduction +------------ +The VF2++ algorithm, follows a similar logic to that of VF2, while also +introducing new easy-to-check cutting rules and determining the optimal access +order of nodes. It is also implemented in a non-recursive manner, which saves +both time and space, when compared to its previous counterpart. + +The optimal node ordering is obtained after taking into consideration both the +degree but also the label rarity of each node. +This way we place the nodes that are more likely to match, first in the order, +thus examining the most promising branches in the beginning. +The rules also consider node labels, making it easier to prune unfruitful +branches early in the process. + +Examples +-------- + +Suppose G1 and G2 are Isomorphic Graphs. Verification is as follows: + +Without node labels: + +>>> import networkx as nx +>>> G1 = nx.path_graph(4) +>>> G2 = nx.path_graph(4) +>>> nx.vf2pp_is_isomorphic(G1, G2, node_label=None) +True +>>> nx.vf2pp_isomorphism(G1, G2, node_label=None) +{1: 1, 2: 2, 0: 0, 3: 3} + +With node labels: + +>>> G1 = nx.path_graph(4) +>>> G2 = nx.path_graph(4) +>>> mapped = {1: 1, 2: 2, 3: 3, 0: 0} +>>> nx.set_node_attributes(G1, dict(zip(G1, ["blue", "red", "green", "yellow"])), "label") +>>> nx.set_node_attributes(G2, dict(zip([mapped[u] for u in G1], ["blue", "red", "green", "yellow"])), "label") +>>> nx.vf2pp_is_isomorphic(G1, G2, node_label="label") +True +>>> nx.vf2pp_isomorphism(G1, G2, node_label="label") +{1: 1, 2: 2, 0: 0, 3: 3} + +References +---------- +.. [1] Jüttner, Alpár & Madarasi, Péter. (2018). "VF2++—An improved subgraph + isomorphism algorithm". Discrete Applied Mathematics. 242. + https://doi.org/10.1016/j.dam.2018.02.018 + +""" +import collections + +import networkx as nx + +__all__ = ["vf2pp_isomorphism", "vf2pp_is_isomorphic", "vf2pp_all_isomorphisms"] + +_GraphParameters = collections.namedtuple( + "_GraphParameters", + [ + "G1", + "G2", + "G1_labels", + "G2_labels", + "nodes_of_G1Labels", + "nodes_of_G2Labels", + "G2_nodes_of_degree", + ], +) + +_StateParameters = collections.namedtuple( + "_StateParameters", + [ + "mapping", + "reverse_mapping", + "T1", + "T1_in", + "T1_tilde", + "T1_tilde_in", + "T2", + "T2_in", + "T2_tilde", + "T2_tilde_in", + ], +) + + +@nx._dispatch(graphs={"G1": 0, "G2": 1}, node_attrs={"node_label": "default_label"}) +def vf2pp_isomorphism(G1, G2, node_label=None, default_label=None): + """Return an isomorphic mapping between `G1` and `G2` if it exists. + + Parameters + ---------- + G1, G2 : NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism. + + node_label : str, optional + The name of the node attribute to be used when comparing nodes. + The default is `None`, meaning node attributes are not considered + in the comparison. Any node that doesn't have the `node_label` + attribute uses `default_label` instead. + + default_label : scalar + Default value to use when a node doesn't have an attribute + named `node_label`. Default is `None`. + + Returns + ------- + dict or None + Node mapping if the two graphs are isomorphic. None otherwise. + """ + try: + mapping = next(vf2pp_all_isomorphisms(G1, G2, node_label, default_label)) + return mapping + except StopIteration: + return None + + +@nx._dispatch(graphs={"G1": 0, "G2": 1}, node_attrs={"node_label": "default_label"}) +def vf2pp_is_isomorphic(G1, G2, node_label=None, default_label=None): + """Examines whether G1 and G2 are isomorphic. + + Parameters + ---------- + G1, G2 : NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism. + + node_label : str, optional + The name of the node attribute to be used when comparing nodes. + The default is `None`, meaning node attributes are not considered + in the comparison. Any node that doesn't have the `node_label` + attribute uses `default_label` instead. + + default_label : scalar + Default value to use when a node doesn't have an attribute + named `node_label`. Default is `None`. + + Returns + ------- + bool + True if the two graphs are isomorphic, False otherwise. + """ + if vf2pp_isomorphism(G1, G2, node_label, default_label) is not None: + return True + return False + + +@nx._dispatch(graphs={"G1": 0, "G2": 1}, node_attrs={"node_label": "default_label"}) +def vf2pp_all_isomorphisms(G1, G2, node_label=None, default_label=None): + """Yields all the possible mappings between G1 and G2. + + Parameters + ---------- + G1, G2 : NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism. + + node_label : str, optional + The name of the node attribute to be used when comparing nodes. + The default is `None`, meaning node attributes are not considered + in the comparison. Any node that doesn't have the `node_label` + attribute uses `default_label` instead. + + default_label : scalar + Default value to use when a node doesn't have an attribute + named `node_label`. Default is `None`. + + Yields + ------ + dict + Isomorphic mapping between the nodes in `G1` and `G2`. + """ + if G1.number_of_nodes() == 0 or G2.number_of_nodes() == 0: + return False + + # Create the degree dicts based on graph type + if G1.is_directed(): + G1_degree = { + n: (in_degree, out_degree) + for (n, in_degree), (_, out_degree) in zip(G1.in_degree, G1.out_degree) + } + G2_degree = { + n: (in_degree, out_degree) + for (n, in_degree), (_, out_degree) in zip(G2.in_degree, G2.out_degree) + } + else: + G1_degree = dict(G1.degree) + G2_degree = dict(G2.degree) + + if not G1.is_directed(): + find_candidates = _find_candidates + restore_Tinout = _restore_Tinout + else: + find_candidates = _find_candidates_Di + restore_Tinout = _restore_Tinout_Di + + # Check that both graphs have the same number of nodes and degree sequence + if G1.order() != G2.order(): + return False + if sorted(G1_degree.values()) != sorted(G2_degree.values()): + return False + + # Initialize parameters and cache necessary information about degree and labels + graph_params, state_params = _initialize_parameters( + G1, G2, G2_degree, node_label, default_label + ) + + # Check if G1 and G2 have the same labels, and that number of nodes per label is equal between the two graphs + if not _precheck_label_properties(graph_params): + return False + + # Calculate the optimal node ordering + node_order = _matching_order(graph_params) + + # Initialize the stack + stack = [] + candidates = iter( + find_candidates(node_order[0], graph_params, state_params, G1_degree) + ) + stack.append((node_order[0], candidates)) + + mapping = state_params.mapping + reverse_mapping = state_params.reverse_mapping + + # Index of the node from the order, currently being examined + matching_node = 1 + + while stack: + current_node, candidate_nodes = stack[-1] + + try: + candidate = next(candidate_nodes) + except StopIteration: + # If no remaining candidates, return to a previous state, and follow another branch + stack.pop() + matching_node -= 1 + if stack: + # Pop the previously added u-v pair, and look for a different candidate _v for u + popped_node1, _ = stack[-1] + popped_node2 = mapping[popped_node1] + mapping.pop(popped_node1) + reverse_mapping.pop(popped_node2) + restore_Tinout(popped_node1, popped_node2, graph_params, state_params) + continue + + if _feasibility(current_node, candidate, graph_params, state_params): + # Terminate if mapping is extended to its full + if len(mapping) == G2.number_of_nodes() - 1: + cp_mapping = mapping.copy() + cp_mapping[current_node] = candidate + yield cp_mapping + continue + + # Feasibility rules pass, so extend the mapping and update the parameters + mapping[current_node] = candidate + reverse_mapping[candidate] = current_node + _update_Tinout(current_node, candidate, graph_params, state_params) + # Append the next node and its candidates to the stack + candidates = iter( + find_candidates( + node_order[matching_node], graph_params, state_params, G1_degree + ) + ) + stack.append((node_order[matching_node], candidates)) + matching_node += 1 + + +def _precheck_label_properties(graph_params): + G1, G2, G1_labels, G2_labels, nodes_of_G1Labels, nodes_of_G2Labels, _ = graph_params + if any( + label not in nodes_of_G1Labels or len(nodes_of_G1Labels[label]) != len(nodes) + for label, nodes in nodes_of_G2Labels.items() + ): + return False + return True + + +def _initialize_parameters(G1, G2, G2_degree, node_label=None, default_label=-1): + """Initializes all the necessary parameters for VF2++ + + Parameters + ---------- + G1,G2: NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism or monomorphism + + G1_labels,G2_labels: dict + The label of every node in G1 and G2 respectively + + Returns + ------- + graph_params: namedtuple + Contains all the Graph-related parameters: + + G1,G2 + G1_labels,G2_labels: dict + + state_params: namedtuple + Contains all the State-related parameters: + + mapping: dict + The mapping as extended so far. Maps nodes of G1 to nodes of G2 + + reverse_mapping: dict + The reverse mapping as extended so far. Maps nodes from G2 to nodes of G1. It's basically "mapping" reversed + + T1, T2: set + Ti contains uncovered neighbors of covered nodes from Gi, i.e. nodes that are not in the mapping, but are + neighbors of nodes that are. + + T1_out, T2_out: set + Ti_out contains all the nodes from Gi, that are neither in the mapping nor in Ti + """ + G1_labels = dict(G1.nodes(data=node_label, default=default_label)) + G2_labels = dict(G2.nodes(data=node_label, default=default_label)) + + graph_params = _GraphParameters( + G1, + G2, + G1_labels, + G2_labels, + nx.utils.groups(G1_labels), + nx.utils.groups(G2_labels), + nx.utils.groups(G2_degree), + ) + + T1, T1_in = set(), set() + T2, T2_in = set(), set() + if G1.is_directed(): + T1_tilde, T1_tilde_in = ( + set(G1.nodes()), + set(), + ) # todo: do we need Ti_tilde_in? What nodes does it have? + T2_tilde, T2_tilde_in = set(G2.nodes()), set() + else: + T1_tilde, T1_tilde_in = set(G1.nodes()), set() + T2_tilde, T2_tilde_in = set(G2.nodes()), set() + + state_params = _StateParameters( + {}, + {}, + T1, + T1_in, + T1_tilde, + T1_tilde_in, + T2, + T2_in, + T2_tilde, + T2_tilde_in, + ) + + return graph_params, state_params + + +def _matching_order(graph_params): + """The node ordering as introduced in VF2++. + + Notes + ----- + Taking into account the structure of the Graph and the node labeling, the nodes are placed in an order such that, + most of the unfruitful/infeasible branches of the search space can be pruned on high levels, significantly + decreasing the number of visited states. The premise is that, the algorithm will be able to recognize + inconsistencies early, proceeding to go deep into the search tree only if it's needed. + + Parameters + ---------- + graph_params: namedtuple + Contains: + + G1,G2: NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism or monomorphism. + + G1_labels,G2_labels: dict + The label of every node in G1 and G2 respectively. + + Returns + ------- + node_order: list + The ordering of the nodes. + """ + G1, G2, G1_labels, _, _, nodes_of_G2Labels, _ = graph_params + if not G1 and not G2: + return {} + + if G1.is_directed(): + G1 = G1.to_undirected(as_view=True) + + V1_unordered = set(G1.nodes()) + label_rarity = {label: len(nodes) for label, nodes in nodes_of_G2Labels.items()} + used_degrees = {node: 0 for node in G1} + node_order = [] + + while V1_unordered: + max_rarity = min(label_rarity[G1_labels[x]] for x in V1_unordered) + rarest_nodes = [ + n for n in V1_unordered if label_rarity[G1_labels[n]] == max_rarity + ] + max_node = max(rarest_nodes, key=G1.degree) + + for dlevel_nodes in nx.bfs_layers(G1, max_node): + nodes_to_add = dlevel_nodes.copy() + while nodes_to_add: + max_used_degree = max(used_degrees[n] for n in nodes_to_add) + max_used_degree_nodes = [ + n for n in nodes_to_add if used_degrees[n] == max_used_degree + ] + max_degree = max(G1.degree[n] for n in max_used_degree_nodes) + max_degree_nodes = [ + n for n in max_used_degree_nodes if G1.degree[n] == max_degree + ] + next_node = min( + max_degree_nodes, key=lambda x: label_rarity[G1_labels[x]] + ) + + node_order.append(next_node) + for node in G1.neighbors(next_node): + used_degrees[node] += 1 + + nodes_to_add.remove(next_node) + label_rarity[G1_labels[next_node]] -= 1 + V1_unordered.discard(next_node) + + return node_order + + +def _find_candidates( + u, graph_params, state_params, G1_degree +): # todo: make the 4th argument the degree of u + """Given node u of G1, finds the candidates of u from G2. + + Parameters + ---------- + u: Graph node + The node from G1 for which to find the candidates from G2. + + graph_params: namedtuple + Contains all the Graph-related parameters: + + G1,G2: NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism or monomorphism + + G1_labels,G2_labels: dict + The label of every node in G1 and G2 respectively + + state_params: namedtuple + Contains all the State-related parameters: + + mapping: dict + The mapping as extended so far. Maps nodes of G1 to nodes of G2 + + reverse_mapping: dict + The reverse mapping as extended so far. Maps nodes from G2 to nodes of G1. It's basically "mapping" reversed + + T1, T2: set + Ti contains uncovered neighbors of covered nodes from Gi, i.e. nodes that are not in the mapping, but are + neighbors of nodes that are. + + T1_tilde, T2_tilde: set + Ti_tilde contains all the nodes from Gi, that are neither in the mapping nor in Ti + + Returns + ------- + candidates: set + The nodes from G2 which are candidates for u. + """ + G1, G2, G1_labels, _, _, nodes_of_G2Labels, G2_nodes_of_degree = graph_params + mapping, reverse_mapping, _, _, _, _, _, _, T2_tilde, _ = state_params + + covered_neighbors = [nbr for nbr in G1[u] if nbr in mapping] + if not covered_neighbors: + candidates = set(nodes_of_G2Labels[G1_labels[u]]) + candidates.intersection_update(G2_nodes_of_degree[G1_degree[u]]) + candidates.intersection_update(T2_tilde) + candidates.difference_update(reverse_mapping) + if G1.is_multigraph(): + candidates.difference_update( + { + node + for node in candidates + if G1.number_of_edges(u, u) != G2.number_of_edges(node, node) + } + ) + return candidates + + nbr1 = covered_neighbors[0] + common_nodes = set(G2[mapping[nbr1]]) + + for nbr1 in covered_neighbors[1:]: + common_nodes.intersection_update(G2[mapping[nbr1]]) + + common_nodes.difference_update(reverse_mapping) + common_nodes.intersection_update(G2_nodes_of_degree[G1_degree[u]]) + common_nodes.intersection_update(nodes_of_G2Labels[G1_labels[u]]) + if G1.is_multigraph(): + common_nodes.difference_update( + { + node + for node in common_nodes + if G1.number_of_edges(u, u) != G2.number_of_edges(node, node) + } + ) + return common_nodes + + +def _find_candidates_Di(u, graph_params, state_params, G1_degree): + G1, G2, G1_labels, _, _, nodes_of_G2Labels, G2_nodes_of_degree = graph_params + mapping, reverse_mapping, _, _, _, _, _, _, T2_tilde, _ = state_params + + covered_successors = [succ for succ in G1[u] if succ in mapping] + covered_predecessors = [pred for pred in G1.pred[u] if pred in mapping] + + if not (covered_successors or covered_predecessors): + candidates = set(nodes_of_G2Labels[G1_labels[u]]) + candidates.intersection_update(G2_nodes_of_degree[G1_degree[u]]) + candidates.intersection_update(T2_tilde) + candidates.difference_update(reverse_mapping) + if G1.is_multigraph(): + candidates.difference_update( + { + node + for node in candidates + if G1.number_of_edges(u, u) != G2.number_of_edges(node, node) + } + ) + return candidates + + if covered_successors: + succ1 = covered_successors[0] + common_nodes = set(G2.pred[mapping[succ1]]) + + for succ1 in covered_successors[1:]: + common_nodes.intersection_update(G2.pred[mapping[succ1]]) + else: + pred1 = covered_predecessors.pop() + common_nodes = set(G2[mapping[pred1]]) + + for pred1 in covered_predecessors: + common_nodes.intersection_update(G2[mapping[pred1]]) + + common_nodes.difference_update(reverse_mapping) + common_nodes.intersection_update(G2_nodes_of_degree[G1_degree[u]]) + common_nodes.intersection_update(nodes_of_G2Labels[G1_labels[u]]) + if G1.is_multigraph(): + common_nodes.difference_update( + { + node + for node in common_nodes + if G1.number_of_edges(u, u) != G2.number_of_edges(node, node) + } + ) + return common_nodes + + +def _feasibility(node1, node2, graph_params, state_params): + """Given a candidate pair of nodes u and v from G1 and G2 respectively, checks if it's feasible to extend the + mapping, i.e. if u and v can be matched. + + Notes + ----- + This function performs all the necessary checking by applying both consistency and cutting rules. + + Parameters + ---------- + node1, node2: Graph node + The candidate pair of nodes being checked for matching + + graph_params: namedtuple + Contains all the Graph-related parameters: + + G1,G2: NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism or monomorphism + + G1_labels,G2_labels: dict + The label of every node in G1 and G2 respectively + + state_params: namedtuple + Contains all the State-related parameters: + + mapping: dict + The mapping as extended so far. Maps nodes of G1 to nodes of G2 + + reverse_mapping: dict + The reverse mapping as extended so far. Maps nodes from G2 to nodes of G1. It's basically "mapping" reversed + + T1, T2: set + Ti contains uncovered neighbors of covered nodes from Gi, i.e. nodes that are not in the mapping, but are + neighbors of nodes that are. + + T1_out, T2_out: set + Ti_out contains all the nodes from Gi, that are neither in the mapping nor in Ti + + Returns + ------- + True if all checks are successful, False otherwise. + """ + G1 = graph_params.G1 + + if _cut_PT(node1, node2, graph_params, state_params): + return False + + if G1.is_multigraph(): + if not _consistent_PT(node1, node2, graph_params, state_params): + return False + + return True + + +def _cut_PT(u, v, graph_params, state_params): + """Implements the cutting rules for the ISO problem. + + Parameters + ---------- + u, v: Graph node + The two candidate nodes being examined. + + graph_params: namedtuple + Contains all the Graph-related parameters: + + G1,G2: NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism or monomorphism + + G1_labels,G2_labels: dict + The label of every node in G1 and G2 respectively + + state_params: namedtuple + Contains all the State-related parameters: + + mapping: dict + The mapping as extended so far. Maps nodes of G1 to nodes of G2 + + reverse_mapping: dict + The reverse mapping as extended so far. Maps nodes from G2 to nodes of G1. It's basically "mapping" reversed + + T1, T2: set + Ti contains uncovered neighbors of covered nodes from Gi, i.e. nodes that are not in the mapping, but are + neighbors of nodes that are. + + T1_tilde, T2_tilde: set + Ti_out contains all the nodes from Gi, that are neither in the mapping nor in Ti + + Returns + ------- + True if we should prune this branch, i.e. the node pair failed the cutting checks. False otherwise. + """ + G1, G2, G1_labels, G2_labels, _, _, _ = graph_params + ( + _, + _, + T1, + T1_in, + T1_tilde, + _, + T2, + T2_in, + T2_tilde, + _, + ) = state_params + + u_labels_predecessors, v_labels_predecessors = {}, {} + if G1.is_directed(): + u_labels_predecessors = nx.utils.groups( + {n1: G1_labels[n1] for n1 in G1.pred[u]} + ) + v_labels_predecessors = nx.utils.groups( + {n2: G2_labels[n2] for n2 in G2.pred[v]} + ) + + if set(u_labels_predecessors.keys()) != set(v_labels_predecessors.keys()): + return True + + u_labels_successors = nx.utils.groups({n1: G1_labels[n1] for n1 in G1[u]}) + v_labels_successors = nx.utils.groups({n2: G2_labels[n2] for n2 in G2[v]}) + + # if the neighbors of u, do not have the same labels as those of v, NOT feasible. + if set(u_labels_successors.keys()) != set(v_labels_successors.keys()): + return True + + for label, G1_nbh in u_labels_successors.items(): + G2_nbh = v_labels_successors[label] + + if G1.is_multigraph(): + # Check for every neighbor in the neighborhood, if u-nbr1 has same edges as v-nbr2 + u_nbrs_edges = sorted(G1.number_of_edges(u, x) for x in G1_nbh) + v_nbrs_edges = sorted(G2.number_of_edges(v, x) for x in G2_nbh) + if any( + u_nbr_edges != v_nbr_edges + for u_nbr_edges, v_nbr_edges in zip(u_nbrs_edges, v_nbrs_edges) + ): + return True + + if len(T1.intersection(G1_nbh)) != len(T2.intersection(G2_nbh)): + return True + if len(T1_tilde.intersection(G1_nbh)) != len(T2_tilde.intersection(G2_nbh)): + return True + if G1.is_directed() and len(T1_in.intersection(G1_nbh)) != len( + T2_in.intersection(G2_nbh) + ): + return True + + if not G1.is_directed(): + return False + + for label, G1_pred in u_labels_predecessors.items(): + G2_pred = v_labels_predecessors[label] + + if G1.is_multigraph(): + # Check for every neighbor in the neighborhood, if u-nbr1 has same edges as v-nbr2 + u_pred_edges = sorted(G1.number_of_edges(u, x) for x in G1_pred) + v_pred_edges = sorted(G2.number_of_edges(v, x) for x in G2_pred) + if any( + u_nbr_edges != v_nbr_edges + for u_nbr_edges, v_nbr_edges in zip(u_pred_edges, v_pred_edges) + ): + return True + + if len(T1.intersection(G1_pred)) != len(T2.intersection(G2_pred)): + return True + if len(T1_tilde.intersection(G1_pred)) != len(T2_tilde.intersection(G2_pred)): + return True + if len(T1_in.intersection(G1_pred)) != len(T2_in.intersection(G2_pred)): + return True + + return False + + +def _consistent_PT(u, v, graph_params, state_params): + """Checks the consistency of extending the mapping using the current node pair. + + Parameters + ---------- + u, v: Graph node + The two candidate nodes being examined. + + graph_params: namedtuple + Contains all the Graph-related parameters: + + G1,G2: NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism or monomorphism + + G1_labels,G2_labels: dict + The label of every node in G1 and G2 respectively + + state_params: namedtuple + Contains all the State-related parameters: + + mapping: dict + The mapping as extended so far. Maps nodes of G1 to nodes of G2 + + reverse_mapping: dict + The reverse mapping as extended so far. Maps nodes from G2 to nodes of G1. It's basically "mapping" reversed + + T1, T2: set + Ti contains uncovered neighbors of covered nodes from Gi, i.e. nodes that are not in the mapping, but are + neighbors of nodes that are. + + T1_out, T2_out: set + Ti_out contains all the nodes from Gi, that are neither in the mapping nor in Ti + + Returns + ------- + True if the pair passes all the consistency checks successfully. False otherwise. + """ + G1, G2 = graph_params.G1, graph_params.G2 + mapping, reverse_mapping = state_params.mapping, state_params.reverse_mapping + + for neighbor in G1[u]: + if neighbor in mapping: + if G1.number_of_edges(u, neighbor) != G2.number_of_edges( + v, mapping[neighbor] + ): + return False + + for neighbor in G2[v]: + if neighbor in reverse_mapping: + if G1.number_of_edges(u, reverse_mapping[neighbor]) != G2.number_of_edges( + v, neighbor + ): + return False + + if not G1.is_directed(): + return True + + for predecessor in G1.pred[u]: + if predecessor in mapping: + if G1.number_of_edges(predecessor, u) != G2.number_of_edges( + mapping[predecessor], v + ): + return False + + for predecessor in G2.pred[v]: + if predecessor in reverse_mapping: + if G1.number_of_edges( + reverse_mapping[predecessor], u + ) != G2.number_of_edges(predecessor, v): + return False + + return True + + +def _update_Tinout(new_node1, new_node2, graph_params, state_params): + """Updates the Ti/Ti_out (i=1,2) when a new node pair u-v is added to the mapping. + + Notes + ----- + This function should be called right after the feasibility checks are passed, and node1 is mapped to node2. The + purpose of this function is to avoid brute force computing of Ti/Ti_out by iterating over all nodes of the graph + and checking which nodes satisfy the necessary conditions. Instead, in every step of the algorithm we focus + exclusively on the two nodes that are being added to the mapping, incrementally updating Ti/Ti_out. + + Parameters + ---------- + new_node1, new_node2: Graph node + The two new nodes, added to the mapping. + + graph_params: namedtuple + Contains all the Graph-related parameters: + + G1,G2: NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism or monomorphism + + G1_labels,G2_labels: dict + The label of every node in G1 and G2 respectively + + state_params: namedtuple + Contains all the State-related parameters: + + mapping: dict + The mapping as extended so far. Maps nodes of G1 to nodes of G2 + + reverse_mapping: dict + The reverse mapping as extended so far. Maps nodes from G2 to nodes of G1. It's basically "mapping" reversed + + T1, T2: set + Ti contains uncovered neighbors of covered nodes from Gi, i.e. nodes that are not in the mapping, but are + neighbors of nodes that are. + + T1_tilde, T2_tilde: set + Ti_out contains all the nodes from Gi, that are neither in the mapping nor in Ti + """ + G1, G2, _, _, _, _, _ = graph_params + ( + mapping, + reverse_mapping, + T1, + T1_in, + T1_tilde, + T1_tilde_in, + T2, + T2_in, + T2_tilde, + T2_tilde_in, + ) = state_params + + uncovered_successors_G1 = {succ for succ in G1[new_node1] if succ not in mapping} + uncovered_successors_G2 = { + succ for succ in G2[new_node2] if succ not in reverse_mapping + } + + # Add the uncovered neighbors of node1 and node2 in T1 and T2 respectively + T1.update(uncovered_successors_G1) + T2.update(uncovered_successors_G2) + T1.discard(new_node1) + T2.discard(new_node2) + + T1_tilde.difference_update(uncovered_successors_G1) + T2_tilde.difference_update(uncovered_successors_G2) + T1_tilde.discard(new_node1) + T2_tilde.discard(new_node2) + + if not G1.is_directed(): + return + + uncovered_predecessors_G1 = { + pred for pred in G1.pred[new_node1] if pred not in mapping + } + uncovered_predecessors_G2 = { + pred for pred in G2.pred[new_node2] if pred not in reverse_mapping + } + + T1_in.update(uncovered_predecessors_G1) + T2_in.update(uncovered_predecessors_G2) + T1_in.discard(new_node1) + T2_in.discard(new_node2) + + T1_tilde.difference_update(uncovered_predecessors_G1) + T2_tilde.difference_update(uncovered_predecessors_G2) + T1_tilde.discard(new_node1) + T2_tilde.discard(new_node2) + + +def _restore_Tinout(popped_node1, popped_node2, graph_params, state_params): + """Restores the previous version of Ti/Ti_out when a node pair is deleted from the mapping. + + Parameters + ---------- + popped_node1, popped_node2: Graph node + The two nodes deleted from the mapping. + + graph_params: namedtuple + Contains all the Graph-related parameters: + + G1,G2: NetworkX Graph or MultiGraph instances. + The two graphs to check for isomorphism or monomorphism + + G1_labels,G2_labels: dict + The label of every node in G1 and G2 respectively + + state_params: namedtuple + Contains all the State-related parameters: + + mapping: dict + The mapping as extended so far. Maps nodes of G1 to nodes of G2 + + reverse_mapping: dict + The reverse mapping as extended so far. Maps nodes from G2 to nodes of G1. It's basically "mapping" reversed + + T1, T2: set + Ti contains uncovered neighbors of covered nodes from Gi, i.e. nodes that are not in the mapping, but are + neighbors of nodes that are. + + T1_tilde, T2_tilde: set + Ti_out contains all the nodes from Gi, that are neither in the mapping nor in Ti + """ + # If the node we want to remove from the mapping, has at least one covered neighbor, add it to T1. + G1, G2, _, _, _, _, _ = graph_params + ( + mapping, + reverse_mapping, + T1, + T1_in, + T1_tilde, + T1_tilde_in, + T2, + T2_in, + T2_tilde, + T2_tilde_in, + ) = state_params + + is_added = False + for neighbor in G1[popped_node1]: + if neighbor in mapping: + # if a neighbor of the excluded node1 is in the mapping, keep node1 in T1 + is_added = True + T1.add(popped_node1) + else: + # check if its neighbor has another connection with a covered node. If not, only then exclude it from T1 + if any(nbr in mapping for nbr in G1[neighbor]): + continue + T1.discard(neighbor) + T1_tilde.add(neighbor) + + # Case where the node is not present in neither the mapping nor T1. By definition, it should belong to T1_tilde + if not is_added: + T1_tilde.add(popped_node1) + + is_added = False + for neighbor in G2[popped_node2]: + if neighbor in reverse_mapping: + is_added = True + T2.add(popped_node2) + else: + if any(nbr in reverse_mapping for nbr in G2[neighbor]): + continue + T2.discard(neighbor) + T2_tilde.add(neighbor) + + if not is_added: + T2_tilde.add(popped_node2) + + +def _restore_Tinout_Di(popped_node1, popped_node2, graph_params, state_params): + # If the node we want to remove from the mapping, has at least one covered neighbor, add it to T1. + G1, G2, _, _, _, _, _ = graph_params + ( + mapping, + reverse_mapping, + T1, + T1_in, + T1_tilde, + T1_tilde_in, + T2, + T2_in, + T2_tilde, + T2_tilde_in, + ) = state_params + + is_added = False + for successor in G1[popped_node1]: + if successor in mapping: + # if a neighbor of the excluded node1 is in the mapping, keep node1 in T1 + is_added = True + T1_in.add(popped_node1) + else: + # check if its neighbor has another connection with a covered node. If not, only then exclude it from T1 + if not any(pred in mapping for pred in G1.pred[successor]): + T1.discard(successor) + + if not any(succ in mapping for succ in G1[successor]): + T1_in.discard(successor) + + if successor not in T1: + if successor not in T1_in: + T1_tilde.add(successor) + + for predecessor in G1.pred[popped_node1]: + if predecessor in mapping: + # if a neighbor of the excluded node1 is in the mapping, keep node1 in T1 + is_added = True + T1.add(popped_node1) + else: + # check if its neighbor has another connection with a covered node. If not, only then exclude it from T1 + if not any(pred in mapping for pred in G1.pred[predecessor]): + T1.discard(predecessor) + + if not any(succ in mapping for succ in G1[predecessor]): + T1_in.discard(predecessor) + + if not (predecessor in T1 or predecessor in T1_in): + T1_tilde.add(predecessor) + + # Case where the node is not present in neither the mapping nor T1. By definition it should belong to T1_tilde + if not is_added: + T1_tilde.add(popped_node1) + + is_added = False + for successor in G2[popped_node2]: + if successor in reverse_mapping: + is_added = True + T2_in.add(popped_node2) + else: + if not any(pred in reverse_mapping for pred in G2.pred[successor]): + T2.discard(successor) + + if not any(succ in reverse_mapping for succ in G2[successor]): + T2_in.discard(successor) + + if successor not in T2: + if successor not in T2_in: + T2_tilde.add(successor) + + for predecessor in G2.pred[popped_node2]: + if predecessor in reverse_mapping: + # if a neighbor of the excluded node1 is in the mapping, keep node1 in T1 + is_added = True + T2.add(popped_node2) + else: + # check if its neighbor has another connection with a covered node. If not, only then exclude it from T1 + if not any(pred in reverse_mapping for pred in G2.pred[predecessor]): + T2.discard(predecessor) + + if not any(succ in reverse_mapping for succ in G2[predecessor]): + T2_in.discard(predecessor) + + if not (predecessor in T2 or predecessor in T2_in): + T2_tilde.add(predecessor) + + if not is_added: + T2_tilde.add(popped_node2) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/vf2userfunc.py b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/vf2userfunc.py new file mode 100644 index 0000000000000000000000000000000000000000..9484edc042ac127451b7141f9da659e3e429b679 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/isomorphism/vf2userfunc.py @@ -0,0 +1,192 @@ +""" + Module to simplify the specification of user-defined equality functions for + node and edge attributes during isomorphism checks. + + During the construction of an isomorphism, the algorithm considers two + candidate nodes n1 in G1 and n2 in G2. The graphs G1 and G2 are then + compared with respect to properties involving n1 and n2, and if the outcome + is good, then the candidate nodes are considered isomorphic. NetworkX + provides a simple mechanism for users to extend the comparisons to include + node and edge attributes. + + Node attributes are handled by the node_match keyword. When considering + n1 and n2, the algorithm passes their node attribute dictionaries to + node_match, and if it returns False, then n1 and n2 cannot be + considered to be isomorphic. + + Edge attributes are handled by the edge_match keyword. When considering + n1 and n2, the algorithm must verify that outgoing edges from n1 are + commensurate with the outgoing edges for n2. If the graph is directed, + then a similar check is also performed for incoming edges. + + Focusing only on outgoing edges, we consider pairs of nodes (n1, v1) from + G1 and (n2, v2) from G2. For graphs and digraphs, there is only one edge + between (n1, v1) and only one edge between (n2, v2). Those edge attribute + dictionaries are passed to edge_match, and if it returns False, then + n1 and n2 cannot be considered isomorphic. For multigraphs and + multidigraphs, there can be multiple edges between (n1, v1) and also + multiple edges between (n2, v2). Now, there must exist an isomorphism + from "all the edges between (n1, v1)" to "all the edges between (n2, v2)". + So, all of the edge attribute dictionaries are passed to edge_match, and + it must determine if there is an isomorphism between the two sets of edges. +""" + +from . import isomorphvf2 as vf2 + +__all__ = ["GraphMatcher", "DiGraphMatcher", "MultiGraphMatcher", "MultiDiGraphMatcher"] + + +def _semantic_feasibility(self, G1_node, G2_node): + """Returns True if mapping G1_node to G2_node is semantically feasible.""" + # Make sure the nodes match + if self.node_match is not None: + nm = self.node_match(self.G1.nodes[G1_node], self.G2.nodes[G2_node]) + if not nm: + return False + + # Make sure the edges match + if self.edge_match is not None: + # Cached lookups + G1nbrs = self.G1_adj[G1_node] + G2nbrs = self.G2_adj[G2_node] + core_1 = self.core_1 + edge_match = self.edge_match + + for neighbor in G1nbrs: + # G1_node is not in core_1, so we must handle R_self separately + if neighbor == G1_node: + if G2_node in G2nbrs and not edge_match( + G1nbrs[G1_node], G2nbrs[G2_node] + ): + return False + elif neighbor in core_1: + G2_nbr = core_1[neighbor] + if G2_nbr in G2nbrs and not edge_match( + G1nbrs[neighbor], G2nbrs[G2_nbr] + ): + return False + # syntactic check has already verified that neighbors are symmetric + + return True + + +class GraphMatcher(vf2.GraphMatcher): + """VF2 isomorphism checker for undirected graphs.""" + + def __init__(self, G1, G2, node_match=None, edge_match=None): + """Initialize graph matcher. + + Parameters + ---------- + G1, G2: graph + The graphs to be tested. + + node_match: callable + A function that returns True iff node n1 in G1 and n2 in G2 + should be considered equal during the isomorphism test. The + function will be called like:: + + node_match(G1.nodes[n1], G2.nodes[n2]) + + That is, the function will receive the node attribute dictionaries + of the nodes under consideration. If None, then no attributes are + considered when testing for an isomorphism. + + edge_match: callable + A function that returns True iff the edge attribute dictionary for + the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should be + considered equal during the isomorphism test. The function will be + called like:: + + edge_match(G1[u1][v1], G2[u2][v2]) + + That is, the function will receive the edge attribute dictionaries + of the edges under consideration. If None, then no attributes are + considered when testing for an isomorphism. + + """ + vf2.GraphMatcher.__init__(self, G1, G2) + + self.node_match = node_match + self.edge_match = edge_match + + # These will be modified during checks to minimize code repeat. + self.G1_adj = self.G1.adj + self.G2_adj = self.G2.adj + + semantic_feasibility = _semantic_feasibility + + +class DiGraphMatcher(vf2.DiGraphMatcher): + """VF2 isomorphism checker for directed graphs.""" + + def __init__(self, G1, G2, node_match=None, edge_match=None): + """Initialize graph matcher. + + Parameters + ---------- + G1, G2 : graph + The graphs to be tested. + + node_match : callable + A function that returns True iff node n1 in G1 and n2 in G2 + should be considered equal during the isomorphism test. The + function will be called like:: + + node_match(G1.nodes[n1], G2.nodes[n2]) + + That is, the function will receive the node attribute dictionaries + of the nodes under consideration. If None, then no attributes are + considered when testing for an isomorphism. + + edge_match : callable + A function that returns True iff the edge attribute dictionary for + the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should be + considered equal during the isomorphism test. The function will be + called like:: + + edge_match(G1[u1][v1], G2[u2][v2]) + + That is, the function will receive the edge attribute dictionaries + of the edges under consideration. If None, then no attributes are + considered when testing for an isomorphism. + + """ + vf2.DiGraphMatcher.__init__(self, G1, G2) + + self.node_match = node_match + self.edge_match = edge_match + + # These will be modified during checks to minimize code repeat. + self.G1_adj = self.G1.adj + self.G2_adj = self.G2.adj + + def semantic_feasibility(self, G1_node, G2_node): + """Returns True if mapping G1_node to G2_node is semantically feasible.""" + + # Test node_match and also test edge_match on successors + feasible = _semantic_feasibility(self, G1_node, G2_node) + if not feasible: + return False + + # Test edge_match on predecessors + self.G1_adj = self.G1.pred + self.G2_adj = self.G2.pred + feasible = _semantic_feasibility(self, G1_node, G2_node) + self.G1_adj = self.G1.adj + self.G2_adj = self.G2.adj + + return feasible + + +# The "semantics" of edge_match are different for multi(di)graphs, but +# the implementation is the same. So, technically we do not need to +# provide "multi" versions, but we do so to match NetworkX's base classes. + + +class MultiGraphMatcher(GraphMatcher): + """VF2 isomorphism checker for undirected multigraphs.""" + + +class MultiDiGraphMatcher(DiGraphMatcher): + """VF2 isomorphism checker for directed multigraphs.""" diff --git a/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6009f000814753ab436278e2d2cc38e961e80f3f --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/__init__.py @@ -0,0 +1,2 @@ +from networkx.algorithms.link_analysis.hits_alg import * +from networkx.algorithms.link_analysis.pagerank_alg import * diff --git a/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19ea3b3a2161a6ba745045424286fe71a00b6c05 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/__pycache__/hits_alg.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/__pycache__/hits_alg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7ede86d46737b270619ecc697b7a54fa537bb6b Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/__pycache__/hits_alg.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/__pycache__/pagerank_alg.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/__pycache__/pagerank_alg.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0e58127cb36cdff60a49cf25e87f9bda951ff44 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/__pycache__/pagerank_alg.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/hits_alg.py b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/hits_alg.py new file mode 100644 index 0000000000000000000000000000000000000000..6d557f522143023fc4130148d518fddfab64a302 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/hits_alg.py @@ -0,0 +1,334 @@ +"""Hubs and authorities analysis of graph structure. +""" +import networkx as nx + +__all__ = ["hits"] + + +@nx._dispatch +def hits(G, max_iter=100, tol=1.0e-8, nstart=None, normalized=True): + """Returns HITS hubs and authorities values for nodes. + + The HITS algorithm computes two numbers for a node. + Authorities estimates the node value based on the incoming links. + Hubs estimates the node value based on outgoing links. + + Parameters + ---------- + G : graph + A NetworkX graph + + max_iter : integer, optional + Maximum number of iterations in power method. + + tol : float, optional + Error tolerance used to check convergence in power method iteration. + + nstart : dictionary, optional + Starting value of each node for power method iteration. + + normalized : bool (default=True) + Normalize results by the sum of all of the values. + + Returns + ------- + (hubs,authorities) : two-tuple of dictionaries + Two dictionaries keyed by node containing the hub and authority + values. + + Raises + ------ + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> h, a = nx.hits(G) + + Notes + ----- + The eigenvector calculation is done by the power iteration method + and has no guarantee of convergence. The iteration will stop + after max_iter iterations or an error tolerance of + number_of_nodes(G)*tol has been reached. + + The HITS algorithm was designed for directed graphs but this + algorithm does not check if the input graph is directed and will + execute on undirected graphs. + + References + ---------- + .. [1] A. Langville and C. Meyer, + "A survey of eigenvector methods of web information retrieval." + http://citeseer.ist.psu.edu/713792.html + .. [2] Jon Kleinberg, + Authoritative sources in a hyperlinked environment + Journal of the ACM 46 (5): 604-32, 1999. + doi:10.1145/324133.324140. + http://www.cs.cornell.edu/home/kleinber/auth.pdf. + """ + import numpy as np + import scipy as sp + + if len(G) == 0: + return {}, {} + A = nx.adjacency_matrix(G, nodelist=list(G), dtype=float) + + if nstart is None: + _, _, vt = sp.sparse.linalg.svds(A, k=1, maxiter=max_iter, tol=tol) + else: + nstart = np.array(list(nstart.values())) + _, _, vt = sp.sparse.linalg.svds(A, k=1, v0=nstart, maxiter=max_iter, tol=tol) + + a = vt.flatten().real + h = A @ a + if normalized: + h /= h.sum() + a /= a.sum() + hubs = dict(zip(G, map(float, h))) + authorities = dict(zip(G, map(float, a))) + return hubs, authorities + + +def _hits_python(G, max_iter=100, tol=1.0e-8, nstart=None, normalized=True): + if isinstance(G, (nx.MultiGraph, nx.MultiDiGraph)): + raise Exception("hits() not defined for graphs with multiedges.") + if len(G) == 0: + return {}, {} + # choose fixed starting vector if not given + if nstart is None: + h = dict.fromkeys(G, 1.0 / G.number_of_nodes()) + else: + h = nstart + # normalize starting vector + s = 1.0 / sum(h.values()) + for k in h: + h[k] *= s + for _ in range(max_iter): # power iteration: make up to max_iter iterations + hlast = h + h = dict.fromkeys(hlast.keys(), 0) + a = dict.fromkeys(hlast.keys(), 0) + # this "matrix multiply" looks odd because it is + # doing a left multiply a^T=hlast^T*G + for n in h: + for nbr in G[n]: + a[nbr] += hlast[n] * G[n][nbr].get("weight", 1) + # now multiply h=Ga + for n in h: + for nbr in G[n]: + h[n] += a[nbr] * G[n][nbr].get("weight", 1) + # normalize vector + s = 1.0 / max(h.values()) + for n in h: + h[n] *= s + # normalize vector + s = 1.0 / max(a.values()) + for n in a: + a[n] *= s + # check convergence, l1 norm + err = sum(abs(h[n] - hlast[n]) for n in h) + if err < tol: + break + else: + raise nx.PowerIterationFailedConvergence(max_iter) + if normalized: + s = 1.0 / sum(a.values()) + for n in a: + a[n] *= s + s = 1.0 / sum(h.values()) + for n in h: + h[n] *= s + return h, a + + +def _hits_numpy(G, normalized=True): + """Returns HITS hubs and authorities values for nodes. + + The HITS algorithm computes two numbers for a node. + Authorities estimates the node value based on the incoming links. + Hubs estimates the node value based on outgoing links. + + Parameters + ---------- + G : graph + A NetworkX graph + + normalized : bool (default=True) + Normalize results by the sum of all of the values. + + Returns + ------- + (hubs,authorities) : two-tuple of dictionaries + Two dictionaries keyed by node containing the hub and authority + values. + + Examples + -------- + >>> G = nx.path_graph(4) + + The `hubs` and `authorities` are given by the eigenvectors corresponding to the + maximum eigenvalues of the hubs_matrix and the authority_matrix, respectively. + + The ``hubs`` and ``authority`` matrices are computed from the adjacency + matrix: + + >>> adj_ary = nx.to_numpy_array(G) + >>> hubs_matrix = adj_ary @ adj_ary.T + >>> authority_matrix = adj_ary.T @ adj_ary + + `_hits_numpy` maps the eigenvector corresponding to the maximum eigenvalue + of the respective matrices to the nodes in `G`: + + >>> from networkx.algorithms.link_analysis.hits_alg import _hits_numpy + >>> hubs, authority = _hits_numpy(G) + + Notes + ----- + The eigenvector calculation uses NumPy's interface to LAPACK. + + The HITS algorithm was designed for directed graphs but this + algorithm does not check if the input graph is directed and will + execute on undirected graphs. + + References + ---------- + .. [1] A. Langville and C. Meyer, + "A survey of eigenvector methods of web information retrieval." + http://citeseer.ist.psu.edu/713792.html + .. [2] Jon Kleinberg, + Authoritative sources in a hyperlinked environment + Journal of the ACM 46 (5): 604-32, 1999. + doi:10.1145/324133.324140. + http://www.cs.cornell.edu/home/kleinber/auth.pdf. + """ + import numpy as np + + if len(G) == 0: + return {}, {} + adj_ary = nx.to_numpy_array(G) + # Hub matrix + H = adj_ary @ adj_ary.T + e, ev = np.linalg.eig(H) + h = ev[:, np.argmax(e)] # eigenvector corresponding to the maximum eigenvalue + # Authority matrix + A = adj_ary.T @ adj_ary + e, ev = np.linalg.eig(A) + a = ev[:, np.argmax(e)] # eigenvector corresponding to the maximum eigenvalue + if normalized: + h /= h.sum() + a /= a.sum() + else: + h /= h.max() + a /= a.max() + hubs = dict(zip(G, map(float, h))) + authorities = dict(zip(G, map(float, a))) + return hubs, authorities + + +def _hits_scipy(G, max_iter=100, tol=1.0e-6, nstart=None, normalized=True): + """Returns HITS hubs and authorities values for nodes. + + + The HITS algorithm computes two numbers for a node. + Authorities estimates the node value based on the incoming links. + Hubs estimates the node value based on outgoing links. + + Parameters + ---------- + G : graph + A NetworkX graph + + max_iter : integer, optional + Maximum number of iterations in power method. + + tol : float, optional + Error tolerance used to check convergence in power method iteration. + + nstart : dictionary, optional + Starting value of each node for power method iteration. + + normalized : bool (default=True) + Normalize results by the sum of all of the values. + + Returns + ------- + (hubs,authorities) : two-tuple of dictionaries + Two dictionaries keyed by node containing the hub and authority + values. + + Examples + -------- + >>> from networkx.algorithms.link_analysis.hits_alg import _hits_scipy + >>> G = nx.path_graph(4) + >>> h, a = _hits_scipy(G) + + Notes + ----- + This implementation uses SciPy sparse matrices. + + The eigenvector calculation is done by the power iteration method + and has no guarantee of convergence. The iteration will stop + after max_iter iterations or an error tolerance of + number_of_nodes(G)*tol has been reached. + + The HITS algorithm was designed for directed graphs but this + algorithm does not check if the input graph is directed and will + execute on undirected graphs. + + Raises + ------ + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + References + ---------- + .. [1] A. Langville and C. Meyer, + "A survey of eigenvector methods of web information retrieval." + http://citeseer.ist.psu.edu/713792.html + .. [2] Jon Kleinberg, + Authoritative sources in a hyperlinked environment + Journal of the ACM 46 (5): 604-632, 1999. + doi:10.1145/324133.324140. + http://www.cs.cornell.edu/home/kleinber/auth.pdf. + """ + import numpy as np + + if len(G) == 0: + return {}, {} + A = nx.to_scipy_sparse_array(G, nodelist=list(G)) + (n, _) = A.shape # should be square + ATA = A.T @ A # authority matrix + # choose fixed starting vector if not given + if nstart is None: + x = np.ones((n, 1)) / n + else: + x = np.array([nstart.get(n, 0) for n in list(G)], dtype=float) + x /= x.sum() + + # power iteration on authority matrix + i = 0 + while True: + xlast = x + x = ATA @ x + x /= x.max() + # check convergence, l1 norm + err = np.absolute(x - xlast).sum() + if err < tol: + break + if i > max_iter: + raise nx.PowerIterationFailedConvergence(max_iter) + i += 1 + + a = x.flatten() + h = A @ a + if normalized: + h /= h.sum() + a /= a.sum() + hubs = dict(zip(G, map(float, h))) + authorities = dict(zip(G, map(float, a))) + return hubs, authorities diff --git a/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/pagerank_alg.py b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/pagerank_alg.py new file mode 100644 index 0000000000000000000000000000000000000000..371dd60edd9113ffee7224b9a42965f8c7c57bbd --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/pagerank_alg.py @@ -0,0 +1,499 @@ +"""PageRank analysis of graph structure. """ +from warnings import warn + +import networkx as nx + +__all__ = ["pagerank", "google_matrix"] + + +@nx._dispatch(edge_attrs="weight") +def pagerank( + G, + alpha=0.85, + personalization=None, + max_iter=100, + tol=1.0e-6, + nstart=None, + weight="weight", + dangling=None, +): + """Returns the PageRank of the nodes in the graph. + + PageRank computes a ranking of the nodes in the graph G based on + the structure of the incoming links. It was originally designed as + an algorithm to rank web pages. + + Parameters + ---------- + G : graph + A NetworkX graph. Undirected graphs will be converted to a directed + graph with two directed edges for each undirected edge. + + alpha : float, optional + Damping parameter for PageRank, default=0.85. + + personalization: dict, optional + The "personalization vector" consisting of a dictionary with a + key some subset of graph nodes and personalization value each of those. + At least one personalization value must be non-zero. + If not specified, a nodes personalization value will be zero. + By default, a uniform distribution is used. + + max_iter : integer, optional + Maximum number of iterations in power method eigenvalue solver. + + tol : float, optional + Error tolerance used to check convergence in power method solver. + The iteration will stop after a tolerance of ``len(G) * tol`` is reached. + + nstart : dictionary, optional + Starting value of PageRank iteration for each node. + + weight : key, optional + Edge data key to use as weight. If None weights are set to 1. + + dangling: dict, optional + The outedges to be assigned to any "dangling" nodes, i.e., nodes without + any outedges. The dict key is the node the outedge points to and the dict + value is the weight of that outedge. By default, dangling nodes are given + outedges according to the personalization vector (uniform if not + specified). This must be selected to result in an irreducible transition + matrix (see notes under google_matrix). It may be common to have the + dangling dict to be the same as the personalization dict. + + + Returns + ------- + pagerank : dictionary + Dictionary of nodes with PageRank as value + + Examples + -------- + >>> G = nx.DiGraph(nx.path_graph(4)) + >>> pr = nx.pagerank(G, alpha=0.9) + + Notes + ----- + The eigenvector calculation is done by the power iteration method + and has no guarantee of convergence. The iteration will stop after + an error tolerance of ``len(G) * tol`` has been reached. If the + number of iterations exceed `max_iter`, a + :exc:`networkx.exception.PowerIterationFailedConvergence` exception + is raised. + + The PageRank algorithm was designed for directed graphs but this + algorithm does not check if the input graph is directed and will + execute on undirected graphs by converting each edge in the + directed graph to two edges. + + See Also + -------- + google_matrix + + Raises + ------ + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + References + ---------- + .. [1] A. Langville and C. Meyer, + "A survey of eigenvector methods of web information retrieval." + http://citeseer.ist.psu.edu/713792.html + .. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry, + The PageRank citation ranking: Bringing order to the Web. 1999 + http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf + + """ + return _pagerank_scipy( + G, alpha, personalization, max_iter, tol, nstart, weight, dangling + ) + + +def _pagerank_python( + G, + alpha=0.85, + personalization=None, + max_iter=100, + tol=1.0e-6, + nstart=None, + weight="weight", + dangling=None, +): + if len(G) == 0: + return {} + + D = G.to_directed() + + # Create a copy in (right) stochastic form + W = nx.stochastic_graph(D, weight=weight) + N = W.number_of_nodes() + + # Choose fixed starting vector if not given + if nstart is None: + x = dict.fromkeys(W, 1.0 / N) + else: + # Normalized nstart vector + s = sum(nstart.values()) + x = {k: v / s for k, v in nstart.items()} + + if personalization is None: + # Assign uniform personalization vector if not given + p = dict.fromkeys(W, 1.0 / N) + else: + s = sum(personalization.values()) + p = {k: v / s for k, v in personalization.items()} + + if dangling is None: + # Use personalization vector if dangling vector not specified + dangling_weights = p + else: + s = sum(dangling.values()) + dangling_weights = {k: v / s for k, v in dangling.items()} + dangling_nodes = [n for n in W if W.out_degree(n, weight=weight) == 0.0] + + # power iteration: make up to max_iter iterations + for _ in range(max_iter): + xlast = x + x = dict.fromkeys(xlast.keys(), 0) + danglesum = alpha * sum(xlast[n] for n in dangling_nodes) + for n in x: + # this matrix multiply looks odd because it is + # doing a left multiply x^T=xlast^T*W + for _, nbr, wt in W.edges(n, data=weight): + x[nbr] += alpha * xlast[n] * wt + x[n] += danglesum * dangling_weights.get(n, 0) + (1.0 - alpha) * p.get(n, 0) + # check convergence, l1 norm + err = sum(abs(x[n] - xlast[n]) for n in x) + if err < N * tol: + return x + raise nx.PowerIterationFailedConvergence(max_iter) + + +@nx._dispatch(edge_attrs="weight") +def google_matrix( + G, alpha=0.85, personalization=None, nodelist=None, weight="weight", dangling=None +): + """Returns the Google matrix of the graph. + + Parameters + ---------- + G : graph + A NetworkX graph. Undirected graphs will be converted to a directed + graph with two directed edges for each undirected edge. + + alpha : float + The damping factor. + + personalization: dict, optional + The "personalization vector" consisting of a dictionary with a + key some subset of graph nodes and personalization value each of those. + At least one personalization value must be non-zero. + If not specified, a nodes personalization value will be zero. + By default, a uniform distribution is used. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in nodelist. + If nodelist is None, then the ordering is produced by G.nodes(). + + weight : key, optional + Edge data key to use as weight. If None weights are set to 1. + + dangling: dict, optional + The outedges to be assigned to any "dangling" nodes, i.e., nodes without + any outedges. The dict key is the node the outedge points to and the dict + value is the weight of that outedge. By default, dangling nodes are given + outedges according to the personalization vector (uniform if not + specified) This must be selected to result in an irreducible transition + matrix (see notes below). It may be common to have the dangling dict to + be the same as the personalization dict. + + Returns + ------- + A : 2D NumPy ndarray + Google matrix of the graph + + Notes + ----- + The array returned represents the transition matrix that describes the + Markov chain used in PageRank. For PageRank to converge to a unique + solution (i.e., a unique stationary distribution in a Markov chain), the + transition matrix must be irreducible. In other words, it must be that + there exists a path between every pair of nodes in the graph, or else there + is the potential of "rank sinks." + + This implementation works with Multi(Di)Graphs. For multigraphs the + weight between two nodes is set to be the sum of all edge weights + between those nodes. + + See Also + -------- + pagerank + """ + import numpy as np + + if nodelist is None: + nodelist = list(G) + + A = nx.to_numpy_array(G, nodelist=nodelist, weight=weight) + N = len(G) + if N == 0: + return A + + # Personalization vector + if personalization is None: + p = np.repeat(1.0 / N, N) + else: + p = np.array([personalization.get(n, 0) for n in nodelist], dtype=float) + if p.sum() == 0: + raise ZeroDivisionError + p /= p.sum() + + # Dangling nodes + if dangling is None: + dangling_weights = p + else: + # Convert the dangling dictionary into an array in nodelist order + dangling_weights = np.array([dangling.get(n, 0) for n in nodelist], dtype=float) + dangling_weights /= dangling_weights.sum() + dangling_nodes = np.where(A.sum(axis=1) == 0)[0] + + # Assign dangling_weights to any dangling nodes (nodes with no out links) + A[dangling_nodes] = dangling_weights + + A /= A.sum(axis=1)[:, np.newaxis] # Normalize rows to sum to 1 + + return alpha * A + (1 - alpha) * p + + +def _pagerank_numpy( + G, alpha=0.85, personalization=None, weight="weight", dangling=None +): + """Returns the PageRank of the nodes in the graph. + + PageRank computes a ranking of the nodes in the graph G based on + the structure of the incoming links. It was originally designed as + an algorithm to rank web pages. + + Parameters + ---------- + G : graph + A NetworkX graph. Undirected graphs will be converted to a directed + graph with two directed edges for each undirected edge. + + alpha : float, optional + Damping parameter for PageRank, default=0.85. + + personalization: dict, optional + The "personalization vector" consisting of a dictionary with a + key some subset of graph nodes and personalization value each of those. + At least one personalization value must be non-zero. + If not specified, a nodes personalization value will be zero. + By default, a uniform distribution is used. + + weight : key, optional + Edge data key to use as weight. If None weights are set to 1. + + dangling: dict, optional + The outedges to be assigned to any "dangling" nodes, i.e., nodes without + any outedges. The dict key is the node the outedge points to and the dict + value is the weight of that outedge. By default, dangling nodes are given + outedges according to the personalization vector (uniform if not + specified) This must be selected to result in an irreducible transition + matrix (see notes under google_matrix). It may be common to have the + dangling dict to be the same as the personalization dict. + + Returns + ------- + pagerank : dictionary + Dictionary of nodes with PageRank as value. + + Examples + -------- + >>> from networkx.algorithms.link_analysis.pagerank_alg import _pagerank_numpy + >>> G = nx.DiGraph(nx.path_graph(4)) + >>> pr = _pagerank_numpy(G, alpha=0.9) + + Notes + ----- + The eigenvector calculation uses NumPy's interface to the LAPACK + eigenvalue solvers. This will be the fastest and most accurate + for small graphs. + + This implementation works with Multi(Di)Graphs. For multigraphs the + weight between two nodes is set to be the sum of all edge weights + between those nodes. + + See Also + -------- + pagerank, google_matrix + + References + ---------- + .. [1] A. Langville and C. Meyer, + "A survey of eigenvector methods of web information retrieval." + http://citeseer.ist.psu.edu/713792.html + .. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry, + The PageRank citation ranking: Bringing order to the Web. 1999 + http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf + """ + import numpy as np + + if len(G) == 0: + return {} + M = google_matrix( + G, alpha, personalization=personalization, weight=weight, dangling=dangling + ) + # use numpy LAPACK solver + eigenvalues, eigenvectors = np.linalg.eig(M.T) + ind = np.argmax(eigenvalues) + # eigenvector of largest eigenvalue is at ind, normalized + largest = np.array(eigenvectors[:, ind]).flatten().real + norm = largest.sum() + return dict(zip(G, map(float, largest / norm))) + + +def _pagerank_scipy( + G, + alpha=0.85, + personalization=None, + max_iter=100, + tol=1.0e-6, + nstart=None, + weight="weight", + dangling=None, +): + """Returns the PageRank of the nodes in the graph. + + PageRank computes a ranking of the nodes in the graph G based on + the structure of the incoming links. It was originally designed as + an algorithm to rank web pages. + + Parameters + ---------- + G : graph + A NetworkX graph. Undirected graphs will be converted to a directed + graph with two directed edges for each undirected edge. + + alpha : float, optional + Damping parameter for PageRank, default=0.85. + + personalization: dict, optional + The "personalization vector" consisting of a dictionary with a + key some subset of graph nodes and personalization value each of those. + At least one personalization value must be non-zero. + If not specified, a nodes personalization value will be zero. + By default, a uniform distribution is used. + + max_iter : integer, optional + Maximum number of iterations in power method eigenvalue solver. + + tol : float, optional + Error tolerance used to check convergence in power method solver. + The iteration will stop after a tolerance of ``len(G) * tol`` is reached. + + nstart : dictionary, optional + Starting value of PageRank iteration for each node. + + weight : key, optional + Edge data key to use as weight. If None weights are set to 1. + + dangling: dict, optional + The outedges to be assigned to any "dangling" nodes, i.e., nodes without + any outedges. The dict key is the node the outedge points to and the dict + value is the weight of that outedge. By default, dangling nodes are given + outedges according to the personalization vector (uniform if not + specified) This must be selected to result in an irreducible transition + matrix (see notes under google_matrix). It may be common to have the + dangling dict to be the same as the personalization dict. + + Returns + ------- + pagerank : dictionary + Dictionary of nodes with PageRank as value + + Examples + -------- + >>> from networkx.algorithms.link_analysis.pagerank_alg import _pagerank_scipy + >>> G = nx.DiGraph(nx.path_graph(4)) + >>> pr = _pagerank_scipy(G, alpha=0.9) + + Notes + ----- + The eigenvector calculation uses power iteration with a SciPy + sparse matrix representation. + + This implementation works with Multi(Di)Graphs. For multigraphs the + weight between two nodes is set to be the sum of all edge weights + between those nodes. + + See Also + -------- + pagerank + + Raises + ------ + PowerIterationFailedConvergence + If the algorithm fails to converge to the specified tolerance + within the specified number of iterations of the power iteration + method. + + References + ---------- + .. [1] A. Langville and C. Meyer, + "A survey of eigenvector methods of web information retrieval." + http://citeseer.ist.psu.edu/713792.html + .. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry, + The PageRank citation ranking: Bringing order to the Web. 1999 + http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf + """ + import numpy as np + import scipy as sp + + N = len(G) + if N == 0: + return {} + + nodelist = list(G) + A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, dtype=float) + S = A.sum(axis=1) + S[S != 0] = 1.0 / S[S != 0] + # TODO: csr_array + Q = sp.sparse.csr_array(sp.sparse.spdiags(S.T, 0, *A.shape)) + A = Q @ A + + # initial vector + if nstart is None: + x = np.repeat(1.0 / N, N) + else: + x = np.array([nstart.get(n, 0) for n in nodelist], dtype=float) + x /= x.sum() + + # Personalization vector + if personalization is None: + p = np.repeat(1.0 / N, N) + else: + p = np.array([personalization.get(n, 0) for n in nodelist], dtype=float) + if p.sum() == 0: + raise ZeroDivisionError + p /= p.sum() + # Dangling nodes + if dangling is None: + dangling_weights = p + else: + # Convert the dangling dictionary into an array in nodelist order + dangling_weights = np.array([dangling.get(n, 0) for n in nodelist], dtype=float) + dangling_weights /= dangling_weights.sum() + is_dangling = np.where(S == 0)[0] + + # power iteration: make up to max_iter iterations + for _ in range(max_iter): + xlast = x + x = alpha * (x @ A + sum(x[is_dangling]) * dangling_weights) + (1 - alpha) * p + # check convergence, l1 norm + err = np.absolute(x - xlast).sum() + if err < N * tol: + return dict(zip(nodelist, map(float, x))) + raise nx.PowerIterationFailedConvergence(max_iter) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/tests/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d650c07ce83b28246456c8479f124f028336ed3c Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/tests/__pycache__/test_hits.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/tests/__pycache__/test_hits.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..054ea4f19b8e553c40be3858f11df6939973f22a Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/tests/__pycache__/test_hits.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/tests/__pycache__/test_pagerank.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/tests/__pycache__/test_pagerank.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9915349d672681dc2dd748de58a21548003d9a01 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/tests/__pycache__/test_pagerank.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/tests/test_hits.py b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/tests/test_hits.py new file mode 100644 index 0000000000000000000000000000000000000000..880803d89f8523753e1321543fa06cfd630da3f0 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/tests/test_hits.py @@ -0,0 +1,78 @@ +import pytest + +import networkx as nx + +np = pytest.importorskip("numpy") +sp = pytest.importorskip("scipy") + +from networkx.algorithms.link_analysis.hits_alg import ( + _hits_numpy, + _hits_python, + _hits_scipy, +) + +# Example from +# A. Langville and C. Meyer, "A survey of eigenvector methods of web +# information retrieval." http://citeseer.ist.psu.edu/713792.html + + +class TestHITS: + @classmethod + def setup_class(cls): + G = nx.DiGraph() + + edges = [(1, 3), (1, 5), (2, 1), (3, 5), (5, 4), (5, 3), (6, 5)] + + G.add_edges_from(edges, weight=1) + cls.G = G + cls.G.a = dict( + zip(sorted(G), [0.000000, 0.000000, 0.366025, 0.133975, 0.500000, 0.000000]) + ) + cls.G.h = dict( + zip(sorted(G), [0.366025, 0.000000, 0.211325, 0.000000, 0.211325, 0.211325]) + ) + + def test_hits_numpy(self): + G = self.G + h, a = _hits_numpy(G) + for n in G: + assert h[n] == pytest.approx(G.h[n], abs=1e-4) + for n in G: + assert a[n] == pytest.approx(G.a[n], abs=1e-4) + + @pytest.mark.parametrize("hits_alg", (nx.hits, _hits_python, _hits_scipy)) + def test_hits(self, hits_alg): + G = self.G + h, a = hits_alg(G, tol=1.0e-08) + for n in G: + assert h[n] == pytest.approx(G.h[n], abs=1e-4) + for n in G: + assert a[n] == pytest.approx(G.a[n], abs=1e-4) + nstart = {i: 1.0 / 2 for i in G} + h, a = hits_alg(G, nstart=nstart) + for n in G: + assert h[n] == pytest.approx(G.h[n], abs=1e-4) + for n in G: + assert a[n] == pytest.approx(G.a[n], abs=1e-4) + + def test_empty(self): + G = nx.Graph() + assert nx.hits(G) == ({}, {}) + assert _hits_numpy(G) == ({}, {}) + assert _hits_python(G) == ({}, {}) + assert _hits_scipy(G) == ({}, {}) + + def test_hits_not_convergent(self): + G = nx.path_graph(50) + with pytest.raises(nx.PowerIterationFailedConvergence): + _hits_scipy(G, max_iter=1) + with pytest.raises(nx.PowerIterationFailedConvergence): + _hits_python(G, max_iter=1) + with pytest.raises(nx.PowerIterationFailedConvergence): + _hits_scipy(G, max_iter=0) + with pytest.raises(nx.PowerIterationFailedConvergence): + _hits_python(G, max_iter=0) + with pytest.raises(ValueError): + nx.hits(G, max_iter=0) + with pytest.raises(sp.sparse.linalg.ArpackNoConvergence): + nx.hits(G, max_iter=1) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/tests/test_pagerank.py b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/tests/test_pagerank.py new file mode 100644 index 0000000000000000000000000000000000000000..6a30f0cd12c3af40f11235d9d8232347df31f56f --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/link_analysis/tests/test_pagerank.py @@ -0,0 +1,217 @@ +import random + +import pytest + +import networkx as nx +from networkx.classes.tests import dispatch_interface + +np = pytest.importorskip("numpy") +pytest.importorskip("scipy") + +from networkx.algorithms.link_analysis.pagerank_alg import ( + _pagerank_numpy, + _pagerank_python, + _pagerank_scipy, +) + +# Example from +# A. Langville and C. Meyer, "A survey of eigenvector methods of web +# information retrieval." http://citeseer.ist.psu.edu/713792.html + + +class TestPageRank: + @classmethod + def setup_class(cls): + G = nx.DiGraph() + edges = [ + (1, 2), + (1, 3), + # 2 is a dangling node + (3, 1), + (3, 2), + (3, 5), + (4, 5), + (4, 6), + (5, 4), + (5, 6), + (6, 4), + ] + G.add_edges_from(edges) + cls.G = G + cls.G.pagerank = dict( + zip( + sorted(G), + [ + 0.03721197, + 0.05395735, + 0.04150565, + 0.37508082, + 0.20599833, + 0.28624589, + ], + ) + ) + cls.dangling_node_index = 1 + cls.dangling_edges = {1: 2, 2: 3, 3: 0, 4: 0, 5: 0, 6: 0} + cls.G.dangling_pagerank = dict( + zip( + sorted(G), + [0.10844518, 0.18618601, 0.0710892, 0.2683668, 0.15919783, 0.20671497], + ) + ) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python)) + def test_pagerank(self, alg): + G = self.G + p = alg(G, alpha=0.9, tol=1.0e-08) + for n in G: + assert p[n] == pytest.approx(G.pagerank[n], abs=1e-4) + + nstart = {n: random.random() for n in G} + p = alg(G, alpha=0.9, tol=1.0e-08, nstart=nstart) + for n in G: + assert p[n] == pytest.approx(G.pagerank[n], abs=1e-4) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python)) + def test_pagerank_max_iter(self, alg): + with pytest.raises(nx.PowerIterationFailedConvergence): + alg(self.G, max_iter=0) + + def test_numpy_pagerank(self): + G = self.G + p = _pagerank_numpy(G, alpha=0.9) + for n in G: + assert p[n] == pytest.approx(G.pagerank[n], abs=1e-4) + + # This additionally tests the @nx._dispatch mechanism, treating + # nx.google_matrix as if it were a re-implementation from another package + @pytest.mark.parametrize("wrapper", [lambda x: x, dispatch_interface.convert]) + def test_google_matrix(self, wrapper): + G = wrapper(self.G) + M = nx.google_matrix(G, alpha=0.9, nodelist=sorted(G)) + _, ev = np.linalg.eig(M.T) + p = ev[:, 0] / ev[:, 0].sum() + for a, b in zip(p, self.G.pagerank.values()): + assert a == pytest.approx(b, abs=1e-7) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python, _pagerank_numpy)) + def test_personalization(self, alg): + G = nx.complete_graph(4) + personalize = {0: 1, 1: 1, 2: 4, 3: 4} + answer = { + 0: 0.23246732615667579, + 1: 0.23246732615667579, + 2: 0.267532673843324, + 3: 0.2675326738433241, + } + p = alg(G, alpha=0.85, personalization=personalize) + for n in G: + assert p[n] == pytest.approx(answer[n], abs=1e-4) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python, nx.google_matrix)) + def test_zero_personalization_vector(self, alg): + G = nx.complete_graph(4) + personalize = {0: 0, 1: 0, 2: 0, 3: 0} + pytest.raises(ZeroDivisionError, alg, G, personalization=personalize) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python)) + def test_one_nonzero_personalization_value(self, alg): + G = nx.complete_graph(4) + personalize = {0: 0, 1: 0, 2: 0, 3: 1} + answer = { + 0: 0.22077931820379187, + 1: 0.22077931820379187, + 2: 0.22077931820379187, + 3: 0.3376620453886241, + } + p = alg(G, alpha=0.85, personalization=personalize) + for n in G: + assert p[n] == pytest.approx(answer[n], abs=1e-4) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python)) + def test_incomplete_personalization(self, alg): + G = nx.complete_graph(4) + personalize = {3: 1} + answer = { + 0: 0.22077931820379187, + 1: 0.22077931820379187, + 2: 0.22077931820379187, + 3: 0.3376620453886241, + } + p = alg(G, alpha=0.85, personalization=personalize) + for n in G: + assert p[n] == pytest.approx(answer[n], abs=1e-4) + + def test_dangling_matrix(self): + """ + Tests that the google_matrix doesn't change except for the dangling + nodes. + """ + G = self.G + dangling = self.dangling_edges + dangling_sum = sum(dangling.values()) + M1 = nx.google_matrix(G, personalization=dangling) + M2 = nx.google_matrix(G, personalization=dangling, dangling=dangling) + for i in range(len(G)): + for j in range(len(G)): + if i == self.dangling_node_index and (j + 1) in dangling: + assert M2[i, j] == pytest.approx( + dangling[j + 1] / dangling_sum, abs=1e-4 + ) + else: + assert M2[i, j] == pytest.approx(M1[i, j], abs=1e-4) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python, _pagerank_numpy)) + def test_dangling_pagerank(self, alg): + pr = alg(self.G, dangling=self.dangling_edges) + for n in self.G: + assert pr[n] == pytest.approx(self.G.dangling_pagerank[n], abs=1e-4) + + def test_empty(self): + G = nx.Graph() + assert nx.pagerank(G) == {} + assert _pagerank_python(G) == {} + assert _pagerank_numpy(G) == {} + assert nx.google_matrix(G).shape == (0, 0) + + @pytest.mark.parametrize("alg", (nx.pagerank, _pagerank_python)) + def test_multigraph(self, alg): + G = nx.MultiGraph() + G.add_edges_from([(1, 2), (1, 2), (1, 2), (2, 3), (2, 3), ("3", 3), ("3", 3)]) + answer = { + 1: 0.21066048614468322, + 2: 0.3395308825985378, + 3: 0.28933951385531687, + "3": 0.16046911740146227, + } + p = alg(G) + for n in G: + assert p[n] == pytest.approx(answer[n], abs=1e-4) + + +class TestPageRankScipy(TestPageRank): + def test_scipy_pagerank(self): + G = self.G + p = _pagerank_scipy(G, alpha=0.9, tol=1.0e-08) + for n in G: + assert p[n] == pytest.approx(G.pagerank[n], abs=1e-4) + personalize = {n: random.random() for n in G} + p = _pagerank_scipy(G, alpha=0.9, tol=1.0e-08, personalization=personalize) + + nstart = {n: random.random() for n in G} + p = _pagerank_scipy(G, alpha=0.9, tol=1.0e-08, nstart=nstart) + for n in G: + assert p[n] == pytest.approx(G.pagerank[n], abs=1e-4) + + def test_scipy_pagerank_max_iter(self): + with pytest.raises(nx.PowerIterationFailedConvergence): + _pagerank_scipy(self.G, max_iter=0) + + def test_dangling_scipy_pagerank(self): + pr = _pagerank_scipy(self.G, dangling=self.dangling_edges) + for n in self.G: + assert pr[n] == pytest.approx(self.G.dangling_pagerank[n], abs=1e-4) + + def test_empty_scipy(self): + G = nx.Graph() + assert _pagerank_scipy(G) == {} diff --git a/MLPY/Lib/site-packages/networkx/algorithms/link_prediction.py b/MLPY/Lib/site-packages/networkx/algorithms/link_prediction.py new file mode 100644 index 0000000000000000000000000000000000000000..7335a77f5e0ac0e93d873d91bf1e4f25feeb6381 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/link_prediction.py @@ -0,0 +1,604 @@ +""" +Link prediction algorithms. +""" + + +from math import log + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "resource_allocation_index", + "jaccard_coefficient", + "adamic_adar_index", + "preferential_attachment", + "cn_soundarajan_hopcroft", + "ra_index_soundarajan_hopcroft", + "within_inter_cluster", + "common_neighbor_centrality", +] + + +def _apply_prediction(G, func, ebunch=None): + """Applies the given function to each edge in the specified iterable + of edges. + + `G` is an instance of :class:`networkx.Graph`. + + `func` is a function on two inputs, each of which is a node in the + graph. The function can return anything, but it should return a + value representing a prediction of the likelihood of a "link" + joining the two nodes. + + `ebunch` is an iterable of pairs of nodes. If not specified, all + non-edges in the graph `G` will be used. + + """ + if ebunch is None: + ebunch = nx.non_edges(G) + return ((u, v, func(u, v)) for u, v in ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def resource_allocation_index(G, ebunch=None): + r"""Compute the resource allocation index of all node pairs in ebunch. + + Resource allocation index of `u` and `v` is defined as + + .. math:: + + \sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{1}{|\Gamma(w)|} + + where $\Gamma(u)$ denotes the set of neighbors of $u$. + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + Resource allocation index will be computed for each pair of + nodes given in the iterable. The pairs must be given as + 2-tuples (u, v) where u and v are nodes in the graph. If ebunch + is None then all nonexistent edges in the graph will be used. + Default value: None. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their resource allocation index. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> preds = nx.resource_allocation_index(G, [(0, 1), (2, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 1) -> 0.75000000 + (2, 3) -> 0.75000000 + + References + ---------- + .. [1] T. Zhou, L. Lu, Y.-C. Zhang. + Predicting missing links via local information. + Eur. Phys. J. B 71 (2009) 623. + https://arxiv.org/pdf/0901.0553.pdf + """ + + def predict(u, v): + return sum(1 / G.degree(w) for w in nx.common_neighbors(G, u, v)) + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def jaccard_coefficient(G, ebunch=None): + r"""Compute the Jaccard coefficient of all node pairs in ebunch. + + Jaccard coefficient of nodes `u` and `v` is defined as + + .. math:: + + \frac{|\Gamma(u) \cap \Gamma(v)|}{|\Gamma(u) \cup \Gamma(v)|} + + where $\Gamma(u)$ denotes the set of neighbors of $u$. + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + Jaccard coefficient will be computed for each pair of nodes + given in the iterable. The pairs must be given as 2-tuples + (u, v) where u and v are nodes in the graph. If ebunch is None + then all nonexistent edges in the graph will be used. + Default value: None. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their Jaccard coefficient. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> preds = nx.jaccard_coefficient(G, [(0, 1), (2, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 1) -> 0.60000000 + (2, 3) -> 0.60000000 + + References + ---------- + .. [1] D. Liben-Nowell, J. Kleinberg. + The Link Prediction Problem for Social Networks (2004). + http://www.cs.cornell.edu/home/kleinber/link-pred.pdf + """ + + def predict(u, v): + union_size = len(set(G[u]) | set(G[v])) + if union_size == 0: + return 0 + return len(list(nx.common_neighbors(G, u, v))) / union_size + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def adamic_adar_index(G, ebunch=None): + r"""Compute the Adamic-Adar index of all node pairs in ebunch. + + Adamic-Adar index of `u` and `v` is defined as + + .. math:: + + \sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{1}{\log |\Gamma(w)|} + + where $\Gamma(u)$ denotes the set of neighbors of $u$. + This index leads to zero-division for nodes only connected via self-loops. + It is intended to be used when no self-loops are present. + + Parameters + ---------- + G : graph + NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + Adamic-Adar index will be computed for each pair of nodes given + in the iterable. The pairs must be given as 2-tuples (u, v) + where u and v are nodes in the graph. If ebunch is None then all + nonexistent edges in the graph will be used. + Default value: None. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their Adamic-Adar index. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> preds = nx.adamic_adar_index(G, [(0, 1), (2, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 1) -> 2.16404256 + (2, 3) -> 2.16404256 + + References + ---------- + .. [1] D. Liben-Nowell, J. Kleinberg. + The Link Prediction Problem for Social Networks (2004). + http://www.cs.cornell.edu/home/kleinber/link-pred.pdf + """ + + def predict(u, v): + return sum(1 / log(G.degree(w)) for w in nx.common_neighbors(G, u, v)) + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def common_neighbor_centrality(G, ebunch=None, alpha=0.8): + r"""Return the CCPA score for each pair of nodes. + + Compute the Common Neighbor and Centrality based Parameterized Algorithm(CCPA) + score of all node pairs in ebunch. + + CCPA score of `u` and `v` is defined as + + .. math:: + + \alpha \cdot (|\Gamma (u){\cap }^{}\Gamma (v)|)+(1-\alpha )\cdot \frac{N}{{d}_{uv}} + + where $\Gamma(u)$ denotes the set of neighbors of $u$, $\Gamma(v)$ denotes the + set of neighbors of $v$, $\alpha$ is parameter varies between [0,1], $N$ denotes + total number of nodes in the Graph and ${d}_{uv}$ denotes shortest distance + between $u$ and $v$. + + This algorithm is based on two vital properties of nodes, namely the number + of common neighbors and their centrality. Common neighbor refers to the common + nodes between two nodes. Centrality refers to the prestige that a node enjoys + in a network. + + .. seealso:: + + :func:`common_neighbors` + + Parameters + ---------- + G : graph + NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + Preferential attachment score will be computed for each pair of + nodes given in the iterable. The pairs must be given as + 2-tuples (u, v) where u and v are nodes in the graph. If ebunch + is None then all nonexistent edges in the graph will be used. + Default value: None. + + alpha : Parameter defined for participation of Common Neighbor + and Centrality Algorithm share. Values for alpha should + normally be between 0 and 1. Default value set to 0.8 + because author found better performance at 0.8 for all the + dataset. + Default value: 0.8 + + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their Common Neighbor and Centrality based + Parameterized Algorithm(CCPA) score. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> preds = nx.common_neighbor_centrality(G, [(0, 1), (2, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p}") + (0, 1) -> 3.4000000000000004 + (2, 3) -> 3.4000000000000004 + + References + ---------- + .. [1] Ahmad, I., Akhtar, M.U., Noor, S. et al. + Missing Link Prediction using Common Neighbor and Centrality based Parameterized Algorithm. + Sci Rep 10, 364 (2020). + https://doi.org/10.1038/s41598-019-57304-y + """ + + # When alpha == 1, the CCPA score simplifies to the number of common neighbors. + if alpha == 1: + + def predict(u, v): + if u == v: + raise nx.NetworkXAlgorithmError("Self links are not supported") + + return sum(1 for _ in nx.common_neighbors(G, u, v)) + + else: + spl = dict(nx.shortest_path_length(G)) + inf = float("inf") + + def predict(u, v): + if u == v: + raise nx.NetworkXAlgorithmError("Self links are not supported") + path_len = spl[u].get(v, inf) + + return alpha * sum(1 for _ in nx.common_neighbors(G, u, v)) + ( + 1 - alpha + ) * (G.number_of_nodes() / path_len) + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def preferential_attachment(G, ebunch=None): + r"""Compute the preferential attachment score of all node pairs in ebunch. + + Preferential attachment score of `u` and `v` is defined as + + .. math:: + + |\Gamma(u)| |\Gamma(v)| + + where $\Gamma(u)$ denotes the set of neighbors of $u$. + + Parameters + ---------- + G : graph + NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + Preferential attachment score will be computed for each pair of + nodes given in the iterable. The pairs must be given as + 2-tuples (u, v) where u and v are nodes in the graph. If ebunch + is None then all nonexistent edges in the graph will be used. + Default value: None. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their preferential attachment score. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> preds = nx.preferential_attachment(G, [(0, 1), (2, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p}") + (0, 1) -> 16 + (2, 3) -> 16 + + References + ---------- + .. [1] D. Liben-Nowell, J. Kleinberg. + The Link Prediction Problem for Social Networks (2004). + http://www.cs.cornell.edu/home/kleinber/link-pred.pdf + """ + + def predict(u, v): + return G.degree(u) * G.degree(v) + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch(node_attrs="community") +def cn_soundarajan_hopcroft(G, ebunch=None, community="community"): + r"""Count the number of common neighbors of all node pairs in ebunch + using community information. + + For two nodes $u$ and $v$, this function computes the number of + common neighbors and bonus one for each common neighbor belonging to + the same community as $u$ and $v$. Mathematically, + + .. math:: + + |\Gamma(u) \cap \Gamma(v)| + \sum_{w \in \Gamma(u) \cap \Gamma(v)} f(w) + + where $f(w)$ equals 1 if $w$ belongs to the same community as $u$ + and $v$ or 0 otherwise and $\Gamma(u)$ denotes the set of + neighbors of $u$. + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + The score will be computed for each pair of nodes given in the + iterable. The pairs must be given as 2-tuples (u, v) where u + and v are nodes in the graph. If ebunch is None then all + nonexistent edges in the graph will be used. + Default value: None. + + community : string, optional (default = 'community') + Nodes attribute name containing the community information. + G[u][community] identifies which community u belongs to. Each + node belongs to at most one community. Default value: 'community'. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their score. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> G.nodes[0]["community"] = 0 + >>> G.nodes[1]["community"] = 0 + >>> G.nodes[2]["community"] = 0 + >>> preds = nx.cn_soundarajan_hopcroft(G, [(0, 2)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p}") + (0, 2) -> 2 + + References + ---------- + .. [1] Sucheta Soundarajan and John Hopcroft. + Using community information to improve the precision of link + prediction methods. + In Proceedings of the 21st international conference companion on + World Wide Web (WWW '12 Companion). ACM, New York, NY, USA, 607-608. + http://doi.acm.org/10.1145/2187980.2188150 + """ + + def predict(u, v): + Cu = _community(G, u, community) + Cv = _community(G, v, community) + cnbors = list(nx.common_neighbors(G, u, v)) + neighbors = ( + sum(_community(G, w, community) == Cu for w in cnbors) if Cu == Cv else 0 + ) + return len(cnbors) + neighbors + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch(node_attrs="community") +def ra_index_soundarajan_hopcroft(G, ebunch=None, community="community"): + r"""Compute the resource allocation index of all node pairs in + ebunch using community information. + + For two nodes $u$ and $v$, this function computes the resource + allocation index considering only common neighbors belonging to the + same community as $u$ and $v$. Mathematically, + + .. math:: + + \sum_{w \in \Gamma(u) \cap \Gamma(v)} \frac{f(w)}{|\Gamma(w)|} + + where $f(w)$ equals 1 if $w$ belongs to the same community as $u$ + and $v$ or 0 otherwise and $\Gamma(u)$ denotes the set of + neighbors of $u$. + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + The score will be computed for each pair of nodes given in the + iterable. The pairs must be given as 2-tuples (u, v) where u + and v are nodes in the graph. If ebunch is None then all + nonexistent edges in the graph will be used. + Default value: None. + + community : string, optional (default = 'community') + Nodes attribute name containing the community information. + G[u][community] identifies which community u belongs to. Each + node belongs to at most one community. Default value: 'community'. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their score. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + >>> G.nodes[0]["community"] = 0 + >>> G.nodes[1]["community"] = 0 + >>> G.nodes[2]["community"] = 1 + >>> G.nodes[3]["community"] = 0 + >>> preds = nx.ra_index_soundarajan_hopcroft(G, [(0, 3)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 3) -> 0.50000000 + + References + ---------- + .. [1] Sucheta Soundarajan and John Hopcroft. + Using community information to improve the precision of link + prediction methods. + In Proceedings of the 21st international conference companion on + World Wide Web (WWW '12 Companion). ACM, New York, NY, USA, 607-608. + http://doi.acm.org/10.1145/2187980.2188150 + """ + + def predict(u, v): + Cu = _community(G, u, community) + Cv = _community(G, v, community) + if Cu != Cv: + return 0 + cnbors = nx.common_neighbors(G, u, v) + return sum(1 / G.degree(w) for w in cnbors if _community(G, w, community) == Cu) + + return _apply_prediction(G, predict, ebunch) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch(node_attrs="community") +def within_inter_cluster(G, ebunch=None, delta=0.001, community="community"): + """Compute the ratio of within- and inter-cluster common neighbors + of all node pairs in ebunch. + + For two nodes `u` and `v`, if a common neighbor `w` belongs to the + same community as them, `w` is considered as within-cluster common + neighbor of `u` and `v`. Otherwise, it is considered as + inter-cluster common neighbor of `u` and `v`. The ratio between the + size of the set of within- and inter-cluster common neighbors is + defined as the WIC measure. [1]_ + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + ebunch : iterable of node pairs, optional (default = None) + The WIC measure will be computed for each pair of nodes given in + the iterable. The pairs must be given as 2-tuples (u, v) where + u and v are nodes in the graph. If ebunch is None then all + nonexistent edges in the graph will be used. + Default value: None. + + delta : float, optional (default = 0.001) + Value to prevent division by zero in case there is no + inter-cluster common neighbor between two nodes. See [1]_ for + details. Default value: 0.001. + + community : string, optional (default = 'community') + Nodes attribute name containing the community information. + G[u][community] identifies which community u belongs to. Each + node belongs to at most one community. Default value: 'community'. + + Returns + ------- + piter : iterator + An iterator of 3-tuples in the form (u, v, p) where (u, v) is a + pair of nodes and p is their WIC measure. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from([(0, 1), (0, 2), (0, 3), (1, 4), (2, 4), (3, 4)]) + >>> G.nodes[0]["community"] = 0 + >>> G.nodes[1]["community"] = 1 + >>> G.nodes[2]["community"] = 0 + >>> G.nodes[3]["community"] = 0 + >>> G.nodes[4]["community"] = 0 + >>> preds = nx.within_inter_cluster(G, [(0, 4)]) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 4) -> 1.99800200 + >>> preds = nx.within_inter_cluster(G, [(0, 4)], delta=0.5) + >>> for u, v, p in preds: + ... print(f"({u}, {v}) -> {p:.8f}") + (0, 4) -> 1.33333333 + + References + ---------- + .. [1] Jorge Carlos Valverde-Rebaza and Alneu de Andrade Lopes. + Link prediction in complex networks based on cluster information. + In Proceedings of the 21st Brazilian conference on Advances in + Artificial Intelligence (SBIA'12) + https://doi.org/10.1007/978-3-642-34459-6_10 + """ + if delta <= 0: + raise nx.NetworkXAlgorithmError("Delta must be greater than zero") + + def predict(u, v): + Cu = _community(G, u, community) + Cv = _community(G, v, community) + if Cu != Cv: + return 0 + cnbors = set(nx.common_neighbors(G, u, v)) + within = {w for w in cnbors if _community(G, w, community) == Cu} + inter = cnbors - within + return len(within) / (len(inter) + delta) + + return _apply_prediction(G, predict, ebunch) + + +def _community(G, u, community): + """Get the community of the given node.""" + node_u = G.nodes[u] + try: + return node_u[community] + except KeyError as err: + raise nx.NetworkXAlgorithmError("No community information") from err diff --git a/MLPY/Lib/site-packages/networkx/algorithms/lowest_common_ancestors.py b/MLPY/Lib/site-packages/networkx/algorithms/lowest_common_ancestors.py new file mode 100644 index 0000000000000000000000000000000000000000..ca21d73766f2287103689ea6bcf6d86dd3eebced --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/lowest_common_ancestors.py @@ -0,0 +1,268 @@ +"""Algorithms for finding the lowest common ancestor of trees and DAGs.""" +from collections import defaultdict +from collections.abc import Mapping, Set +from itertools import combinations_with_replacement + +import networkx as nx +from networkx.utils import UnionFind, arbitrary_element, not_implemented_for + +__all__ = [ + "all_pairs_lowest_common_ancestor", + "tree_all_pairs_lowest_common_ancestor", + "lowest_common_ancestor", +] + + +@not_implemented_for("undirected") +@nx._dispatch +def all_pairs_lowest_common_ancestor(G, pairs=None): + """Return the lowest common ancestor of all pairs or the provided pairs + + Parameters + ---------- + G : NetworkX directed graph + + pairs : iterable of pairs of nodes, optional (default: all pairs) + The pairs of nodes of interest. + If None, will find the LCA of all pairs of nodes. + + Yields + ------ + ((node1, node2), lca) : 2-tuple + Where lca is least common ancestor of node1 and node2. + Note that for the default case, the order of the node pair is not considered, + e.g. you will not get both ``(a, b)`` and ``(b, a)`` + + Raises + ------ + NetworkXPointlessConcept + If `G` is null. + NetworkXError + If `G` is not a DAG. + + Examples + -------- + The default behavior is to yield the lowest common ancestor for all + possible combinations of nodes in `G`, including self-pairings: + + >>> G = nx.DiGraph([(0, 1), (0, 3), (1, 2)]) + >>> dict(nx.all_pairs_lowest_common_ancestor(G)) + {(0, 0): 0, (0, 1): 0, (0, 3): 0, (0, 2): 0, (1, 1): 1, (1, 3): 0, (1, 2): 1, (3, 3): 3, (3, 2): 0, (2, 2): 2} + + The pairs argument can be used to limit the output to only the + specified node pairings: + + >>> dict(nx.all_pairs_lowest_common_ancestor(G, pairs=[(1, 2), (2, 3)])) + {(1, 2): 1, (2, 3): 0} + + Notes + ----- + Only defined on non-null directed acyclic graphs. + + See Also + -------- + lowest_common_ancestor + """ + if not nx.is_directed_acyclic_graph(G): + raise nx.NetworkXError("LCA only defined on directed acyclic graphs.") + if len(G) == 0: + raise nx.NetworkXPointlessConcept("LCA meaningless on null graphs.") + + if pairs is None: + pairs = combinations_with_replacement(G, 2) + else: + # Convert iterator to iterable, if necessary. Trim duplicates. + pairs = dict.fromkeys(pairs) + # Verify that each of the nodes in the provided pairs is in G + nodeset = set(G) + for pair in pairs: + if set(pair) - nodeset: + raise nx.NodeNotFound( + f"Node(s) {set(pair) - nodeset} from pair {pair} not in G." + ) + + # Once input validation is done, construct the generator + def generate_lca_from_pairs(G, pairs): + ancestor_cache = {} + + for v, w in pairs: + if v not in ancestor_cache: + ancestor_cache[v] = nx.ancestors(G, v) + ancestor_cache[v].add(v) + if w not in ancestor_cache: + ancestor_cache[w] = nx.ancestors(G, w) + ancestor_cache[w].add(w) + + common_ancestors = ancestor_cache[v] & ancestor_cache[w] + + if common_ancestors: + common_ancestor = next(iter(common_ancestors)) + while True: + successor = None + for lower_ancestor in G.successors(common_ancestor): + if lower_ancestor in common_ancestors: + successor = lower_ancestor + break + if successor is None: + break + common_ancestor = successor + yield ((v, w), common_ancestor) + + return generate_lca_from_pairs(G, pairs) + + +@not_implemented_for("undirected") +@nx._dispatch +def lowest_common_ancestor(G, node1, node2, default=None): + """Compute the lowest common ancestor of the given pair of nodes. + + Parameters + ---------- + G : NetworkX directed graph + + node1, node2 : nodes in the graph. + + default : object + Returned if no common ancestor between `node1` and `node2` + + Returns + ------- + The lowest common ancestor of node1 and node2, + or default if they have no common ancestors. + + Examples + -------- + >>> G = nx.DiGraph() + >>> nx.add_path(G, (0, 1, 2, 3)) + >>> nx.add_path(G, (0, 4, 3)) + >>> nx.lowest_common_ancestor(G, 2, 4) + 0 + + See Also + -------- + all_pairs_lowest_common_ancestor""" + + ans = list(all_pairs_lowest_common_ancestor(G, pairs=[(node1, node2)])) + if ans: + assert len(ans) == 1 + return ans[0][1] + return default + + +@not_implemented_for("undirected") +@nx._dispatch +def tree_all_pairs_lowest_common_ancestor(G, root=None, pairs=None): + r"""Yield the lowest common ancestor for sets of pairs in a tree. + + Parameters + ---------- + G : NetworkX directed graph (must be a tree) + + root : node, optional (default: None) + The root of the subtree to operate on. + If None, assume the entire graph has exactly one source and use that. + + pairs : iterable or iterator of pairs of nodes, optional (default: None) + The pairs of interest. If None, Defaults to all pairs of nodes + under `root` that have a lowest common ancestor. + + Returns + ------- + lcas : generator of tuples `((u, v), lca)` where `u` and `v` are nodes + in `pairs` and `lca` is their lowest common ancestor. + + Examples + -------- + >>> import pprint + >>> G = nx.DiGraph([(1, 3), (2, 4), (1, 2)]) + >>> pprint.pprint(dict(nx.tree_all_pairs_lowest_common_ancestor(G))) + {(1, 1): 1, + (2, 1): 1, + (2, 2): 2, + (3, 1): 1, + (3, 2): 1, + (3, 3): 3, + (3, 4): 1, + (4, 1): 1, + (4, 2): 2, + (4, 4): 4} + + We can also use `pairs` argument to specify the pairs of nodes for which we + want to compute lowest common ancestors. Here is an example: + + >>> dict(nx.tree_all_pairs_lowest_common_ancestor(G, pairs=[(1, 4), (2, 3)])) + {(2, 3): 1, (1, 4): 1} + + Notes + ----- + Only defined on non-null trees represented with directed edges from + parents to children. Uses Tarjan's off-line lowest-common-ancestors + algorithm. Runs in time $O(4 \times (V + E + P))$ time, where 4 is the largest + value of the inverse Ackermann function likely to ever come up in actual + use, and $P$ is the number of pairs requested (or $V^2$ if all are needed). + + Tarjan, R. E. (1979), "Applications of path compression on balanced trees", + Journal of the ACM 26 (4): 690-715, doi:10.1145/322154.322161. + + See Also + -------- + all_pairs_lowest_common_ancestor: similar routine for general DAGs + lowest_common_ancestor: just a single pair for general DAGs + """ + if len(G) == 0: + raise nx.NetworkXPointlessConcept("LCA meaningless on null graphs.") + + # Index pairs of interest for efficient lookup from either side. + if pairs is not None: + pair_dict = defaultdict(set) + # See note on all_pairs_lowest_common_ancestor. + if not isinstance(pairs, (Mapping, Set)): + pairs = set(pairs) + for u, v in pairs: + for n in (u, v): + if n not in G: + msg = f"The node {str(n)} is not in the digraph." + raise nx.NodeNotFound(msg) + pair_dict[u].add(v) + pair_dict[v].add(u) + + # If root is not specified, find the exactly one node with in degree 0 and + # use it. Raise an error if none are found, or more than one is. Also check + # for any nodes with in degree larger than 1, which would imply G is not a + # tree. + if root is None: + for n, deg in G.in_degree: + if deg == 0: + if root is not None: + msg = "No root specified and tree has multiple sources." + raise nx.NetworkXError(msg) + root = n + # checking deg>1 is not sufficient for MultiDiGraphs + elif deg > 1 and len(G.pred[n]) > 1: + msg = "Tree LCA only defined on trees; use DAG routine." + raise nx.NetworkXError(msg) + if root is None: + raise nx.NetworkXError("Graph contains a cycle.") + + # Iterative implementation of Tarjan's offline lca algorithm + # as described in CLRS on page 521 (2nd edition)/page 584 (3rd edition) + uf = UnionFind() + ancestors = {} + for node in G: + ancestors[node] = uf[node] + + colors = defaultdict(bool) + for node in nx.dfs_postorder_nodes(G, root): + colors[node] = True + for v in pair_dict[node] if pairs is not None else G: + if colors[v]: + # If the user requested both directions of a pair, give it. + # Otherwise, just give one. + if pairs is not None and (node, v) in pairs: + yield (node, v), ancestors[uf[v]] + if pairs is None or (v, node) in pairs: + yield (v, node), ancestors[uf[v]] + if node != root: + parent = arbitrary_element(G.pred[node]) + uf.union(parent, node) + ancestors[uf[parent]] = parent diff --git a/MLPY/Lib/site-packages/networkx/algorithms/matching.py b/MLPY/Lib/site-packages/networkx/algorithms/matching.py new file mode 100644 index 0000000000000000000000000000000000000000..b20d7f6970cd3a3c3373c8ced4d31ac4dc85c1e2 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/matching.py @@ -0,0 +1,1151 @@ +"""Functions for computing and verifying matchings in a graph.""" +from collections import Counter +from itertools import combinations, repeat + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "is_matching", + "is_maximal_matching", + "is_perfect_matching", + "max_weight_matching", + "min_weight_matching", + "maximal_matching", +] + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +@nx._dispatch +def maximal_matching(G): + r"""Find a maximal matching in the graph. + + A matching is a subset of edges in which no node occurs more than once. + A maximal matching cannot add more edges and still be a matching. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + Returns + ------- + matching : set + A maximal matching of the graph. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (2, 4), (3, 5), (4, 5)]) + >>> sorted(nx.maximal_matching(G)) + [(1, 2), (3, 5)] + + Notes + ----- + The algorithm greedily selects a maximal matching M of the graph G + (i.e. no superset of M exists). It runs in $O(|E|)$ time. + """ + matching = set() + nodes = set() + for edge in G.edges(): + # If the edge isn't covered, add it to the matching + # then remove neighborhood of u and v from consideration. + u, v = edge + if u not in nodes and v not in nodes and u != v: + matching.add(edge) + nodes.update(edge) + return matching + + +def matching_dict_to_set(matching): + """Converts matching dict format to matching set format + + Converts a dictionary representing a matching (as returned by + :func:`max_weight_matching`) to a set representing a matching (as + returned by :func:`maximal_matching`). + + In the definition of maximal matching adopted by NetworkX, + self-loops are not allowed, so the provided dictionary is expected + to never have any mapping from a key to itself. However, the + dictionary is expected to have mirrored key/value pairs, for + example, key ``u`` with value ``v`` and key ``v`` with value ``u``. + + """ + edges = set() + for edge in matching.items(): + u, v = edge + if (v, u) in edges or edge in edges: + continue + if u == v: + raise nx.NetworkXError(f"Selfloops cannot appear in matchings {edge}") + edges.add(edge) + return edges + + +@nx._dispatch +def is_matching(G, matching): + """Return True if ``matching`` is a valid matching of ``G`` + + A *matching* in a graph is a set of edges in which no two distinct + edges share a common endpoint. Each node is incident to at most one + edge in the matching. The edges are said to be independent. + + Parameters + ---------- + G : NetworkX graph + + matching : dict or set + A dictionary or set representing a matching. If a dictionary, it + must have ``matching[u] == v`` and ``matching[v] == u`` for each + edge ``(u, v)`` in the matching. If a set, it must have elements + of the form ``(u, v)``, where ``(u, v)`` is an edge in the + matching. + + Returns + ------- + bool + Whether the given set or dictionary represents a valid matching + in the graph. + + Raises + ------ + NetworkXError + If the proposed matching has an edge to a node not in G. + Or if the matching is not a collection of 2-tuple edges. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (2, 4), (3, 5), (4, 5)]) + >>> nx.is_maximal_matching(G, {1: 3, 2: 4}) # using dict to represent matching + True + + >>> nx.is_matching(G, {(1, 3), (2, 4)}) # using set to represent matching + True + + """ + if isinstance(matching, dict): + matching = matching_dict_to_set(matching) + + nodes = set() + for edge in matching: + if len(edge) != 2: + raise nx.NetworkXError(f"matching has non-2-tuple edge {edge}") + u, v = edge + if u not in G or v not in G: + raise nx.NetworkXError(f"matching contains edge {edge} with node not in G") + if u == v: + return False + if not G.has_edge(u, v): + return False + if u in nodes or v in nodes: + return False + nodes.update(edge) + return True + + +@nx._dispatch +def is_maximal_matching(G, matching): + """Return True if ``matching`` is a maximal matching of ``G`` + + A *maximal matching* in a graph is a matching in which adding any + edge would cause the set to no longer be a valid matching. + + Parameters + ---------- + G : NetworkX graph + + matching : dict or set + A dictionary or set representing a matching. If a dictionary, it + must have ``matching[u] == v`` and ``matching[v] == u`` for each + edge ``(u, v)`` in the matching. If a set, it must have elements + of the form ``(u, v)``, where ``(u, v)`` is an edge in the + matching. + + Returns + ------- + bool + Whether the given set or dictionary represents a valid maximal + matching in the graph. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (3, 5)]) + >>> nx.is_maximal_matching(G, {(1, 2), (3, 4)}) + True + + """ + if isinstance(matching, dict): + matching = matching_dict_to_set(matching) + # If the given set is not a matching, then it is not a maximal matching. + edges = set() + nodes = set() + for edge in matching: + if len(edge) != 2: + raise nx.NetworkXError(f"matching has non-2-tuple edge {edge}") + u, v = edge + if u not in G or v not in G: + raise nx.NetworkXError(f"matching contains edge {edge} with node not in G") + if u == v: + return False + if not G.has_edge(u, v): + return False + if u in nodes or v in nodes: + return False + nodes.update(edge) + edges.add(edge) + edges.add((v, u)) + # A matching is maximal if adding any new edge from G to it + # causes the resulting set to match some node twice. + # Be careful to check for adding selfloops + for u, v in G.edges: + if (u, v) not in edges: + # could add edge (u, v) to edges and have a bigger matching + if u not in nodes and v not in nodes and u != v: + return False + return True + + +@nx._dispatch +def is_perfect_matching(G, matching): + """Return True if ``matching`` is a perfect matching for ``G`` + + A *perfect matching* in a graph is a matching in which exactly one edge + is incident upon each vertex. + + Parameters + ---------- + G : NetworkX graph + + matching : dict or set + A dictionary or set representing a matching. If a dictionary, it + must have ``matching[u] == v`` and ``matching[v] == u`` for each + edge ``(u, v)`` in the matching. If a set, it must have elements + of the form ``(u, v)``, where ``(u, v)`` is an edge in the + matching. + + Returns + ------- + bool + Whether the given set or dictionary represents a valid perfect + matching in the graph. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (2, 4), (3, 5), (4, 5), (4, 6)]) + >>> my_match = {1: 2, 3: 5, 4: 6} + >>> nx.is_perfect_matching(G, my_match) + True + + """ + if isinstance(matching, dict): + matching = matching_dict_to_set(matching) + + nodes = set() + for edge in matching: + if len(edge) != 2: + raise nx.NetworkXError(f"matching has non-2-tuple edge {edge}") + u, v = edge + if u not in G or v not in G: + raise nx.NetworkXError(f"matching contains edge {edge} with node not in G") + if u == v: + return False + if not G.has_edge(u, v): + return False + if u in nodes or v in nodes: + return False + nodes.update(edge) + return len(nodes) == len(G) + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def min_weight_matching(G, weight="weight"): + """Computing a minimum-weight maximal matching of G. + + Use the maximum-weight algorithm with edge weights subtracted + from the maximum weight of all edges. + + A matching is a subset of edges in which no node occurs more than once. + The weight of a matching is the sum of the weights of its edges. + A maximal matching cannot add more edges and still be a matching. + The cardinality of a matching is the number of matched edges. + + This method replaces the edge weights with 1 plus the maximum edge weight + minus the original edge weight. + + new_weight = (max_weight + 1) - edge_weight + + then runs :func:`max_weight_matching` with the new weights. + The max weight matching with these new weights corresponds + to the min weight matching using the original weights. + Adding 1 to the max edge weight keeps all edge weights positive + and as integers if they started as integers. + + You might worry that adding 1 to each weight would make the algorithm + favor matchings with more edges. But we use the parameter + `maxcardinality=True` in `max_weight_matching` to ensure that the + number of edges in the competing matchings are the same and thus + the optimum does not change due to changes in the number of edges. + + Read the documentation of `max_weight_matching` for more information. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + weight: string, optional (default='weight') + Edge data key corresponding to the edge weight. + If key not found, uses 1 as weight. + + Returns + ------- + matching : set + A minimal weight matching of the graph. + + See Also + -------- + max_weight_matching + """ + if len(G.edges) == 0: + return max_weight_matching(G, maxcardinality=True, weight=weight) + G_edges = G.edges(data=weight, default=1) + max_weight = 1 + max(w for _, _, w in G_edges) + InvG = nx.Graph() + edges = ((u, v, max_weight - w) for u, v, w in G_edges) + InvG.add_weighted_edges_from(edges, weight=weight) + return max_weight_matching(InvG, maxcardinality=True, weight=weight) + + +@not_implemented_for("multigraph") +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight") +def max_weight_matching(G, maxcardinality=False, weight="weight"): + """Compute a maximum-weighted matching of G. + + A matching is a subset of edges in which no node occurs more than once. + The weight of a matching is the sum of the weights of its edges. + A maximal matching cannot add more edges and still be a matching. + The cardinality of a matching is the number of matched edges. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + maxcardinality: bool, optional (default=False) + If maxcardinality is True, compute the maximum-cardinality matching + with maximum weight among all maximum-cardinality matchings. + + weight: string, optional (default='weight') + Edge data key corresponding to the edge weight. + If key not found, uses 1 as weight. + + + Returns + ------- + matching : set + A maximal matching of the graph. + + Examples + -------- + >>> G = nx.Graph() + >>> edges = [(1, 2, 6), (1, 3, 2), (2, 3, 1), (2, 4, 7), (3, 5, 9), (4, 5, 3)] + >>> G.add_weighted_edges_from(edges) + >>> sorted(nx.max_weight_matching(G)) + [(2, 4), (5, 3)] + + Notes + ----- + If G has edges with weight attributes the edge data are used as + weight values else the weights are assumed to be 1. + + This function takes time O(number_of_nodes ** 3). + + If all edge weights are integers, the algorithm uses only integer + computations. If floating point weights are used, the algorithm + could return a slightly suboptimal matching due to numeric + precision errors. + + This method is based on the "blossom" method for finding augmenting + paths and the "primal-dual" method for finding a matching of maximum + weight, both methods invented by Jack Edmonds [1]_. + + Bipartite graphs can also be matched using the functions present in + :mod:`networkx.algorithms.bipartite.matching`. + + References + ---------- + .. [1] "Efficient Algorithms for Finding Maximum Matching in Graphs", + Zvi Galil, ACM Computing Surveys, 1986. + """ + # + # The algorithm is taken from "Efficient Algorithms for Finding Maximum + # Matching in Graphs" by Zvi Galil, ACM Computing Surveys, 1986. + # It is based on the "blossom" method for finding augmenting paths and + # the "primal-dual" method for finding a matching of maximum weight, both + # methods invented by Jack Edmonds. + # + # A C program for maximum weight matching by Ed Rothberg was used + # extensively to validate this new code. + # + # Many terms used in the code comments are explained in the paper + # by Galil. You will probably need the paper to make sense of this code. + # + + class NoNode: + """Dummy value which is different from any node.""" + + class Blossom: + """Representation of a non-trivial blossom or sub-blossom.""" + + __slots__ = ["childs", "edges", "mybestedges"] + + # b.childs is an ordered list of b's sub-blossoms, starting with + # the base and going round the blossom. + + # b.edges is the list of b's connecting edges, such that + # b.edges[i] = (v, w) where v is a vertex in b.childs[i] + # and w is a vertex in b.childs[wrap(i+1)]. + + # If b is a top-level S-blossom, + # b.mybestedges is a list of least-slack edges to neighbouring + # S-blossoms, or None if no such list has been computed yet. + # This is used for efficient computation of delta3. + + # Generate the blossom's leaf vertices. + def leaves(self): + stack = [*self.childs] + while stack: + t = stack.pop() + if isinstance(t, Blossom): + stack.extend(t.childs) + else: + yield t + + # Get a list of vertices. + gnodes = list(G) + if not gnodes: + return set() # don't bother with empty graphs + + # Find the maximum edge weight. + maxweight = 0 + allinteger = True + for i, j, d in G.edges(data=True): + wt = d.get(weight, 1) + if i != j and wt > maxweight: + maxweight = wt + allinteger = allinteger and (str(type(wt)).split("'")[1] in ("int", "long")) + + # If v is a matched vertex, mate[v] is its partner vertex. + # If v is a single vertex, v does not occur as a key in mate. + # Initially all vertices are single; updated during augmentation. + mate = {} + + # If b is a top-level blossom, + # label.get(b) is None if b is unlabeled (free), + # 1 if b is an S-blossom, + # 2 if b is a T-blossom. + # The label of a vertex is found by looking at the label of its top-level + # containing blossom. + # If v is a vertex inside a T-blossom, label[v] is 2 iff v is reachable + # from an S-vertex outside the blossom. + # Labels are assigned during a stage and reset after each augmentation. + label = {} + + # If b is a labeled top-level blossom, + # labeledge[b] = (v, w) is the edge through which b obtained its label + # such that w is a vertex in b, or None if b's base vertex is single. + # If w is a vertex inside a T-blossom and label[w] == 2, + # labeledge[w] = (v, w) is an edge through which w is reachable from + # outside the blossom. + labeledge = {} + + # If v is a vertex, inblossom[v] is the top-level blossom to which v + # belongs. + # If v is a top-level vertex, inblossom[v] == v since v is itself + # a (trivial) top-level blossom. + # Initially all vertices are top-level trivial blossoms. + inblossom = dict(zip(gnodes, gnodes)) + + # If b is a sub-blossom, + # blossomparent[b] is its immediate parent (sub-)blossom. + # If b is a top-level blossom, blossomparent[b] is None. + blossomparent = dict(zip(gnodes, repeat(None))) + + # If b is a (sub-)blossom, + # blossombase[b] is its base VERTEX (i.e. recursive sub-blossom). + blossombase = dict(zip(gnodes, gnodes)) + + # If w is a free vertex (or an unreached vertex inside a T-blossom), + # bestedge[w] = (v, w) is the least-slack edge from an S-vertex, + # or None if there is no such edge. + # If b is a (possibly trivial) top-level S-blossom, + # bestedge[b] = (v, w) is the least-slack edge to a different S-blossom + # (v inside b), or None if there is no such edge. + # This is used for efficient computation of delta2 and delta3. + bestedge = {} + + # If v is a vertex, + # dualvar[v] = 2 * u(v) where u(v) is the v's variable in the dual + # optimization problem (if all edge weights are integers, multiplication + # by two ensures that all values remain integers throughout the algorithm). + # Initially, u(v) = maxweight / 2. + dualvar = dict(zip(gnodes, repeat(maxweight))) + + # If b is a non-trivial blossom, + # blossomdual[b] = z(b) where z(b) is b's variable in the dual + # optimization problem. + blossomdual = {} + + # If (v, w) in allowedge or (w, v) in allowedg, then the edge + # (v, w) is known to have zero slack in the optimization problem; + # otherwise the edge may or may not have zero slack. + allowedge = {} + + # Queue of newly discovered S-vertices. + queue = [] + + # Return 2 * slack of edge (v, w) (does not work inside blossoms). + def slack(v, w): + return dualvar[v] + dualvar[w] - 2 * G[v][w].get(weight, 1) + + # Assign label t to the top-level blossom containing vertex w, + # coming through an edge from vertex v. + def assignLabel(w, t, v): + b = inblossom[w] + assert label.get(w) is None and label.get(b) is None + label[w] = label[b] = t + if v is not None: + labeledge[w] = labeledge[b] = (v, w) + else: + labeledge[w] = labeledge[b] = None + bestedge[w] = bestedge[b] = None + if t == 1: + # b became an S-vertex/blossom; add it(s vertices) to the queue. + if isinstance(b, Blossom): + queue.extend(b.leaves()) + else: + queue.append(b) + elif t == 2: + # b became a T-vertex/blossom; assign label S to its mate. + # (If b is a non-trivial blossom, its base is the only vertex + # with an external mate.) + base = blossombase[b] + assignLabel(mate[base], 1, base) + + # Trace back from vertices v and w to discover either a new blossom + # or an augmenting path. Return the base vertex of the new blossom, + # or NoNode if an augmenting path was found. + def scanBlossom(v, w): + # Trace back from v and w, placing breadcrumbs as we go. + path = [] + base = NoNode + while v is not NoNode: + # Look for a breadcrumb in v's blossom or put a new breadcrumb. + b = inblossom[v] + if label[b] & 4: + base = blossombase[b] + break + assert label[b] == 1 + path.append(b) + label[b] = 5 + # Trace one step back. + if labeledge[b] is None: + # The base of blossom b is single; stop tracing this path. + assert blossombase[b] not in mate + v = NoNode + else: + assert labeledge[b][0] == mate[blossombase[b]] + v = labeledge[b][0] + b = inblossom[v] + assert label[b] == 2 + # b is a T-blossom; trace one more step back. + v = labeledge[b][0] + # Swap v and w so that we alternate between both paths. + if w is not NoNode: + v, w = w, v + # Remove breadcrumbs. + for b in path: + label[b] = 1 + # Return base vertex, if we found one. + return base + + # Construct a new blossom with given base, through S-vertices v and w. + # Label the new blossom as S; set its dual variable to zero; + # relabel its T-vertices to S and add them to the queue. + def addBlossom(base, v, w): + bb = inblossom[base] + bv = inblossom[v] + bw = inblossom[w] + # Create blossom. + b = Blossom() + blossombase[b] = base + blossomparent[b] = None + blossomparent[bb] = b + # Make list of sub-blossoms and their interconnecting edge endpoints. + b.childs = path = [] + b.edges = edgs = [(v, w)] + # Trace back from v to base. + while bv != bb: + # Add bv to the new blossom. + blossomparent[bv] = b + path.append(bv) + edgs.append(labeledge[bv]) + assert label[bv] == 2 or ( + label[bv] == 1 and labeledge[bv][0] == mate[blossombase[bv]] + ) + # Trace one step back. + v = labeledge[bv][0] + bv = inblossom[v] + # Add base sub-blossom; reverse lists. + path.append(bb) + path.reverse() + edgs.reverse() + # Trace back from w to base. + while bw != bb: + # Add bw to the new blossom. + blossomparent[bw] = b + path.append(bw) + edgs.append((labeledge[bw][1], labeledge[bw][0])) + assert label[bw] == 2 or ( + label[bw] == 1 and labeledge[bw][0] == mate[blossombase[bw]] + ) + # Trace one step back. + w = labeledge[bw][0] + bw = inblossom[w] + # Set label to S. + assert label[bb] == 1 + label[b] = 1 + labeledge[b] = labeledge[bb] + # Set dual variable to zero. + blossomdual[b] = 0 + # Relabel vertices. + for v in b.leaves(): + if label[inblossom[v]] == 2: + # This T-vertex now turns into an S-vertex because it becomes + # part of an S-blossom; add it to the queue. + queue.append(v) + inblossom[v] = b + # Compute b.mybestedges. + bestedgeto = {} + for bv in path: + if isinstance(bv, Blossom): + if bv.mybestedges is not None: + # Walk this subblossom's least-slack edges. + nblist = bv.mybestedges + # The sub-blossom won't need this data again. + bv.mybestedges = None + else: + # This subblossom does not have a list of least-slack + # edges; get the information from the vertices. + nblist = [ + (v, w) for v in bv.leaves() for w in G.neighbors(v) if v != w + ] + else: + nblist = [(bv, w) for w in G.neighbors(bv) if bv != w] + for k in nblist: + (i, j) = k + if inblossom[j] == b: + i, j = j, i + bj = inblossom[j] + if ( + bj != b + and label.get(bj) == 1 + and ((bj not in bestedgeto) or slack(i, j) < slack(*bestedgeto[bj])) + ): + bestedgeto[bj] = k + # Forget about least-slack edge of the subblossom. + bestedge[bv] = None + b.mybestedges = list(bestedgeto.values()) + # Select bestedge[b]. + mybestedge = None + bestedge[b] = None + for k in b.mybestedges: + kslack = slack(*k) + if mybestedge is None or kslack < mybestslack: + mybestedge = k + mybestslack = kslack + bestedge[b] = mybestedge + + # Expand the given top-level blossom. + def expandBlossom(b, endstage): + # This is an obnoxiously complicated recursive function for the sake of + # a stack-transformation. So, we hack around the complexity by using + # a trampoline pattern. By yielding the arguments to each recursive + # call, we keep the actual callstack flat. + + def _recurse(b, endstage): + # Convert sub-blossoms into top-level blossoms. + for s in b.childs: + blossomparent[s] = None + if isinstance(s, Blossom): + if endstage and blossomdual[s] == 0: + # Recursively expand this sub-blossom. + yield s + else: + for v in s.leaves(): + inblossom[v] = s + else: + inblossom[s] = s + # If we expand a T-blossom during a stage, its sub-blossoms must be + # relabeled. + if (not endstage) and label.get(b) == 2: + # Start at the sub-blossom through which the expanding + # blossom obtained its label, and relabel sub-blossoms untili + # we reach the base. + # Figure out through which sub-blossom the expanding blossom + # obtained its label initially. + entrychild = inblossom[labeledge[b][1]] + # Decide in which direction we will go round the blossom. + j = b.childs.index(entrychild) + if j & 1: + # Start index is odd; go forward and wrap. + j -= len(b.childs) + jstep = 1 + else: + # Start index is even; go backward. + jstep = -1 + # Move along the blossom until we get to the base. + v, w = labeledge[b] + while j != 0: + # Relabel the T-sub-blossom. + if jstep == 1: + p, q = b.edges[j] + else: + q, p = b.edges[j - 1] + label[w] = None + label[q] = None + assignLabel(w, 2, v) + # Step to the next S-sub-blossom and note its forward edge. + allowedge[(p, q)] = allowedge[(q, p)] = True + j += jstep + if jstep == 1: + v, w = b.edges[j] + else: + w, v = b.edges[j - 1] + # Step to the next T-sub-blossom. + allowedge[(v, w)] = allowedge[(w, v)] = True + j += jstep + # Relabel the base T-sub-blossom WITHOUT stepping through to + # its mate (so don't call assignLabel). + bw = b.childs[j] + label[w] = label[bw] = 2 + labeledge[w] = labeledge[bw] = (v, w) + bestedge[bw] = None + # Continue along the blossom until we get back to entrychild. + j += jstep + while b.childs[j] != entrychild: + # Examine the vertices of the sub-blossom to see whether + # it is reachable from a neighbouring S-vertex outside the + # expanding blossom. + bv = b.childs[j] + if label.get(bv) == 1: + # This sub-blossom just got label S through one of its + # neighbours; leave it be. + j += jstep + continue + if isinstance(bv, Blossom): + for v in bv.leaves(): + if label.get(v): + break + else: + v = bv + # If the sub-blossom contains a reachable vertex, assign + # label T to the sub-blossom. + if label.get(v): + assert label[v] == 2 + assert inblossom[v] == bv + label[v] = None + label[mate[blossombase[bv]]] = None + assignLabel(v, 2, labeledge[v][0]) + j += jstep + # Remove the expanded blossom entirely. + label.pop(b, None) + labeledge.pop(b, None) + bestedge.pop(b, None) + del blossomparent[b] + del blossombase[b] + del blossomdual[b] + + # Now, we apply the trampoline pattern. We simulate a recursive + # callstack by maintaining a stack of generators, each yielding a + # sequence of function arguments. We grow the stack by appending a call + # to _recurse on each argument tuple, and shrink the stack whenever a + # generator is exhausted. + stack = [_recurse(b, endstage)] + while stack: + top = stack[-1] + for s in top: + stack.append(_recurse(s, endstage)) + break + else: + stack.pop() + + # Swap matched/unmatched edges over an alternating path through blossom b + # between vertex v and the base vertex. Keep blossom bookkeeping + # consistent. + def augmentBlossom(b, v): + # This is an obnoxiously complicated recursive function for the sake of + # a stack-transformation. So, we hack around the complexity by using + # a trampoline pattern. By yielding the arguments to each recursive + # call, we keep the actual callstack flat. + + def _recurse(b, v): + # Bubble up through the blossom tree from vertex v to an immediate + # sub-blossom of b. + t = v + while blossomparent[t] != b: + t = blossomparent[t] + # Recursively deal with the first sub-blossom. + if isinstance(t, Blossom): + yield (t, v) + # Decide in which direction we will go round the blossom. + i = j = b.childs.index(t) + if i & 1: + # Start index is odd; go forward and wrap. + j -= len(b.childs) + jstep = 1 + else: + # Start index is even; go backward. + jstep = -1 + # Move along the blossom until we get to the base. + while j != 0: + # Step to the next sub-blossom and augment it recursively. + j += jstep + t = b.childs[j] + if jstep == 1: + w, x = b.edges[j] + else: + x, w = b.edges[j - 1] + if isinstance(t, Blossom): + yield (t, w) + # Step to the next sub-blossom and augment it recursively. + j += jstep + t = b.childs[j] + if isinstance(t, Blossom): + yield (t, x) + # Match the edge connecting those sub-blossoms. + mate[w] = x + mate[x] = w + # Rotate the list of sub-blossoms to put the new base at the front. + b.childs = b.childs[i:] + b.childs[:i] + b.edges = b.edges[i:] + b.edges[:i] + blossombase[b] = blossombase[b.childs[0]] + assert blossombase[b] == v + + # Now, we apply the trampoline pattern. We simulate a recursive + # callstack by maintaining a stack of generators, each yielding a + # sequence of function arguments. We grow the stack by appending a call + # to _recurse on each argument tuple, and shrink the stack whenever a + # generator is exhausted. + stack = [_recurse(b, v)] + while stack: + top = stack[-1] + for args in top: + stack.append(_recurse(*args)) + break + else: + stack.pop() + + # Swap matched/unmatched edges over an alternating path between two + # single vertices. The augmenting path runs through S-vertices v and w. + def augmentMatching(v, w): + for s, j in ((v, w), (w, v)): + # Match vertex s to vertex j. Then trace back from s + # until we find a single vertex, swapping matched and unmatched + # edges as we go. + while 1: + bs = inblossom[s] + assert label[bs] == 1 + assert (labeledge[bs] is None and blossombase[bs] not in mate) or ( + labeledge[bs][0] == mate[blossombase[bs]] + ) + # Augment through the S-blossom from s to base. + if isinstance(bs, Blossom): + augmentBlossom(bs, s) + # Update mate[s] + mate[s] = j + # Trace one step back. + if labeledge[bs] is None: + # Reached single vertex; stop. + break + t = labeledge[bs][0] + bt = inblossom[t] + assert label[bt] == 2 + # Trace one more step back. + s, j = labeledge[bt] + # Augment through the T-blossom from j to base. + assert blossombase[bt] == t + if isinstance(bt, Blossom): + augmentBlossom(bt, j) + # Update mate[j] + mate[j] = s + + # Verify that the optimum solution has been reached. + def verifyOptimum(): + if maxcardinality: + # Vertices may have negative dual; + # find a constant non-negative number to add to all vertex duals. + vdualoffset = max(0, -min(dualvar.values())) + else: + vdualoffset = 0 + # 0. all dual variables are non-negative + assert min(dualvar.values()) + vdualoffset >= 0 + assert len(blossomdual) == 0 or min(blossomdual.values()) >= 0 + # 0. all edges have non-negative slack and + # 1. all matched edges have zero slack; + for i, j, d in G.edges(data=True): + wt = d.get(weight, 1) + if i == j: + continue # ignore self-loops + s = dualvar[i] + dualvar[j] - 2 * wt + iblossoms = [i] + jblossoms = [j] + while blossomparent[iblossoms[-1]] is not None: + iblossoms.append(blossomparent[iblossoms[-1]]) + while blossomparent[jblossoms[-1]] is not None: + jblossoms.append(blossomparent[jblossoms[-1]]) + iblossoms.reverse() + jblossoms.reverse() + for bi, bj in zip(iblossoms, jblossoms): + if bi != bj: + break + s += 2 * blossomdual[bi] + assert s >= 0 + if mate.get(i) == j or mate.get(j) == i: + assert mate[i] == j and mate[j] == i + assert s == 0 + # 2. all single vertices have zero dual value; + for v in gnodes: + assert (v in mate) or dualvar[v] + vdualoffset == 0 + # 3. all blossoms with positive dual value are full. + for b in blossomdual: + if blossomdual[b] > 0: + assert len(b.edges) % 2 == 1 + for i, j in b.edges[1::2]: + assert mate[i] == j and mate[j] == i + # Ok. + + # Main loop: continue until no further improvement is possible. + while 1: + # Each iteration of this loop is a "stage". + # A stage finds an augmenting path and uses that to improve + # the matching. + + # Remove labels from top-level blossoms/vertices. + label.clear() + labeledge.clear() + + # Forget all about least-slack edges. + bestedge.clear() + for b in blossomdual: + b.mybestedges = None + + # Loss of labeling means that we can not be sure that currently + # allowable edges remain allowable throughout this stage. + allowedge.clear() + + # Make queue empty. + queue[:] = [] + + # Label single blossoms/vertices with S and put them in the queue. + for v in gnodes: + if (v not in mate) and label.get(inblossom[v]) is None: + assignLabel(v, 1, None) + + # Loop until we succeed in augmenting the matching. + augmented = 0 + while 1: + # Each iteration of this loop is a "substage". + # A substage tries to find an augmenting path; + # if found, the path is used to improve the matching and + # the stage ends. If there is no augmenting path, the + # primal-dual method is used to pump some slack out of + # the dual variables. + + # Continue labeling until all vertices which are reachable + # through an alternating path have got a label. + while queue and not augmented: + # Take an S vertex from the queue. + v = queue.pop() + assert label[inblossom[v]] == 1 + + # Scan its neighbours: + for w in G.neighbors(v): + if w == v: + continue # ignore self-loops + # w is a neighbour to v + bv = inblossom[v] + bw = inblossom[w] + if bv == bw: + # this edge is internal to a blossom; ignore it + continue + if (v, w) not in allowedge: + kslack = slack(v, w) + if kslack <= 0: + # edge k has zero slack => it is allowable + allowedge[(v, w)] = allowedge[(w, v)] = True + if (v, w) in allowedge: + if label.get(bw) is None: + # (C1) w is a free vertex; + # label w with T and label its mate with S (R12). + assignLabel(w, 2, v) + elif label.get(bw) == 1: + # (C2) w is an S-vertex (not in the same blossom); + # follow back-links to discover either an + # augmenting path or a new blossom. + base = scanBlossom(v, w) + if base is not NoNode: + # Found a new blossom; add it to the blossom + # bookkeeping and turn it into an S-blossom. + addBlossom(base, v, w) + else: + # Found an augmenting path; augment the + # matching and end this stage. + augmentMatching(v, w) + augmented = 1 + break + elif label.get(w) is None: + # w is inside a T-blossom, but w itself has not + # yet been reached from outside the blossom; + # mark it as reached (we need this to relabel + # during T-blossom expansion). + assert label[bw] == 2 + label[w] = 2 + labeledge[w] = (v, w) + elif label.get(bw) == 1: + # keep track of the least-slack non-allowable edge to + # a different S-blossom. + if bestedge.get(bv) is None or kslack < slack(*bestedge[bv]): + bestedge[bv] = (v, w) + elif label.get(w) is None: + # w is a free vertex (or an unreached vertex inside + # a T-blossom) but we can not reach it yet; + # keep track of the least-slack edge that reaches w. + if bestedge.get(w) is None or kslack < slack(*bestedge[w]): + bestedge[w] = (v, w) + + if augmented: + break + + # There is no augmenting path under these constraints; + # compute delta and reduce slack in the optimization problem. + # (Note that our vertex dual variables, edge slacks and delta's + # are pre-multiplied by two.) + deltatype = -1 + delta = deltaedge = deltablossom = None + + # Compute delta1: the minimum value of any vertex dual. + if not maxcardinality: + deltatype = 1 + delta = min(dualvar.values()) + + # Compute delta2: the minimum slack on any edge between + # an S-vertex and a free vertex. + for v in G.nodes(): + if label.get(inblossom[v]) is None and bestedge.get(v) is not None: + d = slack(*bestedge[v]) + if deltatype == -1 or d < delta: + delta = d + deltatype = 2 + deltaedge = bestedge[v] + + # Compute delta3: half the minimum slack on any edge between + # a pair of S-blossoms. + for b in blossomparent: + if ( + blossomparent[b] is None + and label.get(b) == 1 + and bestedge.get(b) is not None + ): + kslack = slack(*bestedge[b]) + if allinteger: + assert (kslack % 2) == 0 + d = kslack // 2 + else: + d = kslack / 2.0 + if deltatype == -1 or d < delta: + delta = d + deltatype = 3 + deltaedge = bestedge[b] + + # Compute delta4: minimum z variable of any T-blossom. + for b in blossomdual: + if ( + blossomparent[b] is None + and label.get(b) == 2 + and (deltatype == -1 or blossomdual[b] < delta) + ): + delta = blossomdual[b] + deltatype = 4 + deltablossom = b + + if deltatype == -1: + # No further improvement possible; max-cardinality optimum + # reached. Do a final delta update to make the optimum + # verifiable. + assert maxcardinality + deltatype = 1 + delta = max(0, min(dualvar.values())) + + # Update dual variables according to delta. + for v in gnodes: + if label.get(inblossom[v]) == 1: + # S-vertex: 2*u = 2*u - 2*delta + dualvar[v] -= delta + elif label.get(inblossom[v]) == 2: + # T-vertex: 2*u = 2*u + 2*delta + dualvar[v] += delta + for b in blossomdual: + if blossomparent[b] is None: + if label.get(b) == 1: + # top-level S-blossom: z = z + 2*delta + blossomdual[b] += delta + elif label.get(b) == 2: + # top-level T-blossom: z = z - 2*delta + blossomdual[b] -= delta + + # Take action at the point where minimum delta occurred. + if deltatype == 1: + # No further improvement possible; optimum reached. + break + elif deltatype == 2: + # Use the least-slack edge to continue the search. + (v, w) = deltaedge + assert label[inblossom[v]] == 1 + allowedge[(v, w)] = allowedge[(w, v)] = True + queue.append(v) + elif deltatype == 3: + # Use the least-slack edge to continue the search. + (v, w) = deltaedge + allowedge[(v, w)] = allowedge[(w, v)] = True + assert label[inblossom[v]] == 1 + queue.append(v) + elif deltatype == 4: + # Expand the least-z blossom. + expandBlossom(deltablossom, False) + + # End of a this substage. + + # Paranoia check that the matching is symmetric. + for v in mate: + assert mate[mate[v]] == v + + # Stop when no more augmenting path can be found. + if not augmented: + break + + # End of a stage; expand all S-blossoms which have zero dual. + for b in list(blossomdual.keys()): + if b not in blossomdual: + continue # already expanded + if blossomparent[b] is None and label.get(b) == 1 and blossomdual[b] == 0: + expandBlossom(b, True) + + # Verify that we reached the optimum solution (only for integer weights). + if allinteger: + verifyOptimum() + + return matching_dict_to_set(mate) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/minors/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/minors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cf15ddb592541a959149842a4d581cf9f0a3e5e1 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/minors/__init__.py @@ -0,0 +1,27 @@ +""" +Subpackages related to graph-minor problems. + +In graph theory, an undirected graph H is called a minor of the graph G if H +can be formed from G by deleting edges and vertices and by contracting edges +[1]_. + +References +---------- +.. [1] https://en.wikipedia.org/wiki/Graph_minor +""" + +from networkx.algorithms.minors.contraction import ( + contracted_edge, + contracted_nodes, + equivalence_classes, + identified_nodes, + quotient_graph, +) + +__all__ = [ + "contracted_edge", + "contracted_nodes", + "equivalence_classes", + "identified_nodes", + "quotient_graph", +] diff --git a/MLPY/Lib/site-packages/networkx/algorithms/minors/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/minors/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e30db53b97cd38029ac8941ec0201f8271f3b5a Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/minors/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/minors/__pycache__/contraction.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/minors/__pycache__/contraction.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a60dedd2984ea01dcdb35f4e95f9feacc8d53fd1 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/minors/__pycache__/contraction.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/minors/contraction.py b/MLPY/Lib/site-packages/networkx/algorithms/minors/contraction.py new file mode 100644 index 0000000000000000000000000000000000000000..1b4da352296a6c99a5e2dcd67f495a8542f25ee0 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/minors/contraction.py @@ -0,0 +1,630 @@ +"""Provides functions for computing minors of a graph.""" +from itertools import chain, combinations, permutations, product + +import networkx as nx +from networkx import density +from networkx.exception import NetworkXException +from networkx.utils import arbitrary_element + +__all__ = [ + "contracted_edge", + "contracted_nodes", + "equivalence_classes", + "identified_nodes", + "quotient_graph", +] + +chaini = chain.from_iterable + + +def equivalence_classes(iterable, relation): + """Returns equivalence classes of `relation` when applied to `iterable`. + + The equivalence classes, or blocks, consist of objects from `iterable` + which are all equivalent. They are defined to be equivalent if the + `relation` function returns `True` when passed any two objects from that + class, and `False` otherwise. To define an equivalence relation the + function must be reflexive, symmetric and transitive. + + Parameters + ---------- + iterable : list, tuple, or set + An iterable of elements/nodes. + + relation : function + A Boolean-valued function that implements an equivalence relation + (reflexive, symmetric, transitive binary relation) on the elements + of `iterable` - it must take two elements and return `True` if + they are related, or `False` if not. + + Returns + ------- + set of frozensets + A set of frozensets representing the partition induced by the equivalence + relation function `relation` on the elements of `iterable`. Each + member set in the return set represents an equivalence class, or + block, of the partition. + + Duplicate elements will be ignored so it makes the most sense for + `iterable` to be a :class:`set`. + + Notes + ----- + This function does not check that `relation` represents an equivalence + relation. You can check that your equivalence classes provide a partition + using `is_partition`. + + Examples + -------- + Let `X` be the set of integers from `0` to `9`, and consider an equivalence + relation `R` on `X` of congruence modulo `3`: this means that two integers + `x` and `y` in `X` are equivalent under `R` if they leave the same + remainder when divided by `3`, i.e. `(x - y) mod 3 = 0`. + + The equivalence classes of this relation are `{0, 3, 6, 9}`, `{1, 4, 7}`, + `{2, 5, 8}`: `0`, `3`, `6`, `9` are all divisible by `3` and leave zero + remainder; `1`, `4`, `7` leave remainder `1`; while `2`, `5` and `8` leave + remainder `2`. We can see this by calling `equivalence_classes` with + `X` and a function implementation of `R`. + + >>> X = set(range(10)) + >>> def mod3(x, y): return (x - y) % 3 == 0 + >>> equivalence_classes(X, mod3) # doctest: +SKIP + {frozenset({1, 4, 7}), frozenset({8, 2, 5}), frozenset({0, 9, 3, 6})} + """ + # For simplicity of implementation, we initialize the return value as a + # list of lists, then convert it to a set of sets at the end of the + # function. + blocks = [] + # Determine the equivalence class for each element of the iterable. + for y in iterable: + # Each element y must be in *exactly one* equivalence class. + # + # Each block is guaranteed to be non-empty + for block in blocks: + x = arbitrary_element(block) + if relation(x, y): + block.append(y) + break + else: + # If the element y is not part of any known equivalence class, it + # must be in its own, so we create a new singleton equivalence + # class for it. + blocks.append([y]) + return {frozenset(block) for block in blocks} + + +@nx._dispatch(edge_attrs="weight") +def quotient_graph( + G, + partition, + edge_relation=None, + node_data=None, + edge_data=None, + weight="weight", + relabel=False, + create_using=None, +): + """Returns the quotient graph of `G` under the specified equivalence + relation on nodes. + + Parameters + ---------- + G : NetworkX graph + The graph for which to return the quotient graph with the + specified node relation. + + partition : function, or dict or list of lists, tuples or sets + If a function, this function must represent an equivalence + relation on the nodes of `G`. It must take two arguments *u* + and *v* and return True exactly when *u* and *v* are in the + same equivalence class. The equivalence classes form the nodes + in the returned graph. + + If a dict of lists/tuples/sets, the keys can be any meaningful + block labels, but the values must be the block lists/tuples/sets + (one list/tuple/set per block), and the blocks must form a valid + partition of the nodes of the graph. That is, each node must be + in exactly one block of the partition. + + If a list of sets, the list must form a valid partition of + the nodes of the graph. That is, each node must be in exactly + one block of the partition. + + edge_relation : Boolean function with two arguments + This function must represent an edge relation on the *blocks* of + the `partition` of `G`. It must take two arguments, *B* and *C*, + each one a set of nodes, and return True exactly when there should be + an edge joining block *B* to block *C* in the returned graph. + + If `edge_relation` is not specified, it is assumed to be the + following relation. Block *B* is related to block *C* if and + only if some node in *B* is adjacent to some node in *C*, + according to the edge set of `G`. + + node_data : function + This function takes one argument, *B*, a set of nodes in `G`, + and must return a dictionary representing the node data + attributes to set on the node representing *B* in the quotient graph. + If None, the following node attributes will be set: + + * 'graph', the subgraph of the graph `G` that this block + represents, + * 'nnodes', the number of nodes in this block, + * 'nedges', the number of edges within this block, + * 'density', the density of the subgraph of `G` that this + block represents. + + edge_data : function + This function takes two arguments, *B* and *C*, each one a set + of nodes, and must return a dictionary representing the edge + data attributes to set on the edge joining *B* and *C*, should + there be an edge joining *B* and *C* in the quotient graph (if + no such edge occurs in the quotient graph as determined by + `edge_relation`, then the output of this function is ignored). + + If the quotient graph would be a multigraph, this function is + not applied, since the edge data from each edge in the graph + `G` appears in the edges of the quotient graph. + + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + + relabel : bool + If True, relabel the nodes of the quotient graph to be + nonnegative integers. Otherwise, the nodes are identified with + :class:`frozenset` instances representing the blocks given in + `partition`. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Returns + ------- + NetworkX graph + The quotient graph of `G` under the equivalence relation + specified by `partition`. If the partition were given as a + list of :class:`set` instances and `relabel` is False, + each node will be a :class:`frozenset` corresponding to the same + :class:`set`. + + Raises + ------ + NetworkXException + If the given partition is not a valid partition of the nodes of + `G`. + + Examples + -------- + The quotient graph of the complete bipartite graph under the "same + neighbors" equivalence relation is `K_2`. Under this relation, two nodes + are equivalent if they are not adjacent but have the same neighbor set. + + >>> G = nx.complete_bipartite_graph(2, 3) + >>> same_neighbors = lambda u, v: ( + ... u not in G[v] and v not in G[u] and G[u] == G[v] + ... ) + >>> Q = nx.quotient_graph(G, same_neighbors) + >>> K2 = nx.complete_graph(2) + >>> nx.is_isomorphic(Q, K2) + True + + The quotient graph of a directed graph under the "same strongly connected + component" equivalence relation is the condensation of the graph (see + :func:`condensation`). This example comes from the Wikipedia article + *`Strongly connected component`_*. + + >>> G = nx.DiGraph() + >>> edges = [ + ... "ab", + ... "be", + ... "bf", + ... "bc", + ... "cg", + ... "cd", + ... "dc", + ... "dh", + ... "ea", + ... "ef", + ... "fg", + ... "gf", + ... "hd", + ... "hf", + ... ] + >>> G.add_edges_from(tuple(x) for x in edges) + >>> components = list(nx.strongly_connected_components(G)) + >>> sorted(sorted(component) for component in components) + [['a', 'b', 'e'], ['c', 'd', 'h'], ['f', 'g']] + >>> + >>> C = nx.condensation(G, components) + >>> component_of = C.graph["mapping"] + >>> same_component = lambda u, v: component_of[u] == component_of[v] + >>> Q = nx.quotient_graph(G, same_component) + >>> nx.is_isomorphic(C, Q) + True + + Node identification can be represented as the quotient of a graph under the + equivalence relation that places the two nodes in one block and each other + node in its own singleton block. + + >>> K24 = nx.complete_bipartite_graph(2, 4) + >>> K34 = nx.complete_bipartite_graph(3, 4) + >>> C = nx.contracted_nodes(K34, 1, 2) + >>> nodes = {1, 2} + >>> is_contracted = lambda u, v: u in nodes and v in nodes + >>> Q = nx.quotient_graph(K34, is_contracted) + >>> nx.is_isomorphic(Q, C) + True + >>> nx.is_isomorphic(Q, K24) + True + + The blockmodeling technique described in [1]_ can be implemented as a + quotient graph. + + >>> G = nx.path_graph(6) + >>> partition = [{0, 1}, {2, 3}, {4, 5}] + >>> M = nx.quotient_graph(G, partition, relabel=True) + >>> list(M.edges()) + [(0, 1), (1, 2)] + + Here is the sample example but using partition as a dict of block sets. + + >>> G = nx.path_graph(6) + >>> partition = {0: {0, 1}, 2: {2, 3}, 4: {4, 5}} + >>> M = nx.quotient_graph(G, partition, relabel=True) + >>> list(M.edges()) + [(0, 1), (1, 2)] + + Partitions can be represented in various ways: + + 0. a list/tuple/set of block lists/tuples/sets + 1. a dict with block labels as keys and blocks lists/tuples/sets as values + 2. a dict with block lists/tuples/sets as keys and block labels as values + 3. a function from nodes in the original iterable to block labels + 4. an equivalence relation function on the target iterable + + As `quotient_graph` is designed to accept partitions represented as (0), (1) or + (4) only, the `equivalence_classes` function can be used to get the partitions + in the right form, in order to call `quotient_graph`. + + .. _Strongly connected component: https://en.wikipedia.org/wiki/Strongly_connected_component + + References + ---------- + .. [1] Patrick Doreian, Vladimir Batagelj, and Anuska Ferligoj. + *Generalized Blockmodeling*. + Cambridge University Press, 2004. + + """ + # If the user provided an equivalence relation as a function to compute + # the blocks of the partition on the nodes of G induced by the + # equivalence relation. + if callable(partition): + # equivalence_classes always return partition of whole G. + partition = equivalence_classes(G, partition) + if not nx.community.is_partition(G, partition): + raise nx.NetworkXException( + "Input `partition` is not an equivalence relation for nodes of G" + ) + return _quotient_graph( + G, + partition, + edge_relation, + node_data, + edge_data, + weight, + relabel, + create_using, + ) + + # If the partition is a dict, it is assumed to be one where the keys are + # user-defined block labels, and values are block lists, tuples or sets. + if isinstance(partition, dict): + partition = list(partition.values()) + + # If the user provided partition as a collection of sets. Then we + # need to check if partition covers all of G nodes. If the answer + # is 'No' then we need to prepare suitable subgraph view. + partition_nodes = set().union(*partition) + if len(partition_nodes) != len(G): + G = G.subgraph(partition_nodes) + # Each node in the graph/subgraph must be in exactly one block. + if not nx.community.is_partition(G, partition): + raise NetworkXException("each node must be in exactly one part of `partition`") + return _quotient_graph( + G, + partition, + edge_relation, + node_data, + edge_data, + weight, + relabel, + create_using, + ) + + +def _quotient_graph( + G, partition, edge_relation, node_data, edge_data, weight, relabel, create_using +): + """Construct the quotient graph assuming input has been checked""" + if create_using is None: + H = G.__class__() + else: + H = nx.empty_graph(0, create_using) + # By default set some basic information about the subgraph that each block + # represents on the nodes in the quotient graph. + if node_data is None: + + def node_data(b): + S = G.subgraph(b) + return { + "graph": S, + "nnodes": len(S), + "nedges": S.number_of_edges(), + "density": density(S), + } + + # Each block of the partition becomes a node in the quotient graph. + partition = [frozenset(b) for b in partition] + H.add_nodes_from((b, node_data(b)) for b in partition) + # By default, the edge relation is the relation defined as follows. B is + # adjacent to C if a node in B is adjacent to a node in C, according to the + # edge set of G. + # + # This is not a particularly efficient implementation of this relation: + # there are O(n^2) pairs to check and each check may require O(log n) time + # (to check set membership). This can certainly be parallelized. + if edge_relation is None: + + def edge_relation(b, c): + return any(v in G[u] for u, v in product(b, c)) + + # By default, sum the weights of the edges joining pairs of nodes across + # blocks to get the weight of the edge joining those two blocks. + if edge_data is None: + + def edge_data(b, c): + edgedata = ( + d + for u, v, d in G.edges(b | c, data=True) + if (u in b and v in c) or (u in c and v in b) + ) + return {"weight": sum(d.get(weight, 1) for d in edgedata)} + + block_pairs = permutations(H, 2) if H.is_directed() else combinations(H, 2) + # In a multigraph, add one edge in the quotient graph for each edge + # in the original graph. + if H.is_multigraph(): + edges = chaini( + ( + (b, c, G.get_edge_data(u, v, default={})) + for u, v in product(b, c) + if v in G[u] + ) + for b, c in block_pairs + if edge_relation(b, c) + ) + # In a simple graph, apply the edge data function to each pair of + # blocks to determine the edge data attributes to apply to each edge + # in the quotient graph. + else: + edges = ( + (b, c, edge_data(b, c)) for (b, c) in block_pairs if edge_relation(b, c) + ) + H.add_edges_from(edges) + # If requested by the user, relabel the nodes to be integers, + # numbered in increasing order from zero in the same order as the + # iteration order of `partition`. + if relabel: + # Can't use nx.convert_node_labels_to_integers() here since we + # want the order of iteration to be the same for backward + # compatibility with the nx.blockmodel() function. + labels = {b: i for i, b in enumerate(partition)} + H = nx.relabel_nodes(H, labels) + return H + + +@nx._dispatch(preserve_all_attrs=True) +def contracted_nodes(G, u, v, self_loops=True, copy=True): + """Returns the graph that results from contracting `u` and `v`. + + Node contraction identifies the two nodes as a single node incident to any + edge that was incident to the original two nodes. + + Parameters + ---------- + G : NetworkX graph + The graph whose nodes will be contracted. + + u, v : nodes + Must be nodes in `G`. + + self_loops : Boolean + If this is True, any edges joining `u` and `v` in `G` become + self-loops on the new node in the returned graph. + + copy : Boolean + If this is True (default True), make a copy of + `G` and return that instead of directly changing `G`. + + + Returns + ------- + Networkx graph + If Copy is True, + A new graph object of the same type as `G` (leaving `G` unmodified) + with `u` and `v` identified in a single node. The right node `v` + will be merged into the node `u`, so only `u` will appear in the + returned graph. + If copy is False, + Modifies `G` with `u` and `v` identified in a single node. + The right node `v` will be merged into the node `u`, so + only `u` will appear in the returned graph. + + Notes + ----- + For multigraphs, the edge keys for the realigned edges may + not be the same as the edge keys for the old edges. This is + natural because edge keys are unique only within each pair of nodes. + + For non-multigraphs where `u` and `v` are adjacent to a third node + `w`, the edge (`v`, `w`) will be contracted into the edge (`u`, + `w`) with its attributes stored into a "contraction" attribute. + + This function is also available as `identified_nodes`. + + Examples + -------- + Contracting two nonadjacent nodes of the cycle graph on four nodes `C_4` + yields the path graph (ignoring parallel edges): + + >>> G = nx.cycle_graph(4) + >>> M = nx.contracted_nodes(G, 1, 3) + >>> P3 = nx.path_graph(3) + >>> nx.is_isomorphic(M, P3) + True + + >>> G = nx.MultiGraph(P3) + >>> M = nx.contracted_nodes(G, 0, 2) + >>> M.edges + MultiEdgeView([(0, 1, 0), (0, 1, 1)]) + + >>> G = nx.Graph([(1, 2), (2, 2)]) + >>> H = nx.contracted_nodes(G, 1, 2, self_loops=False) + >>> list(H.nodes()) + [1] + >>> list(H.edges()) + [(1, 1)] + + In a ``MultiDiGraph`` with a self loop, the in and out edges will + be treated separately as edges, so while contracting a node which + has a self loop the contraction will add multiple edges: + + >>> G = nx.MultiDiGraph([(1, 2), (2, 2)]) + >>> H = nx.contracted_nodes(G, 1, 2) + >>> list(H.edges()) # edge 1->2, 2->2, 2<-2 from the original Graph G + [(1, 1), (1, 1), (1, 1)] + >>> H = nx.contracted_nodes(G, 1, 2, self_loops=False) + >>> list(H.edges()) # edge 2->2, 2<-2 from the original Graph G + [(1, 1), (1, 1)] + + See Also + -------- + contracted_edge + quotient_graph + + """ + # Copying has significant overhead and can be disabled if needed + if copy: + H = G.copy() + else: + H = G + + # edge code uses G.edges(v) instead of G.adj[v] to handle multiedges + if H.is_directed(): + edges_to_remap = chain(G.in_edges(v, data=True), G.out_edges(v, data=True)) + else: + edges_to_remap = G.edges(v, data=True) + + # If the H=G, the generators change as H changes + # This makes the edges_to_remap independent of H + if not copy: + edges_to_remap = list(edges_to_remap) + + v_data = H.nodes[v] + H.remove_node(v) + + for prev_w, prev_x, d in edges_to_remap: + w = prev_w if prev_w != v else u + x = prev_x if prev_x != v else u + + if ({prev_w, prev_x} == {u, v}) and not self_loops: + continue + + if not H.has_edge(w, x) or G.is_multigraph(): + H.add_edge(w, x, **d) + else: + if "contraction" in H.edges[(w, x)]: + H.edges[(w, x)]["contraction"][(prev_w, prev_x)] = d + else: + H.edges[(w, x)]["contraction"] = {(prev_w, prev_x): d} + + if "contraction" in H.nodes[u]: + H.nodes[u]["contraction"][v] = v_data + else: + H.nodes[u]["contraction"] = {v: v_data} + return H + + +identified_nodes = contracted_nodes + + +@nx._dispatch(preserve_edge_attrs=True) +def contracted_edge(G, edge, self_loops=True, copy=True): + """Returns the graph that results from contracting the specified edge. + + Edge contraction identifies the two endpoints of the edge as a single node + incident to any edge that was incident to the original two nodes. A graph + that results from edge contraction is called a *minor* of the original + graph. + + Parameters + ---------- + G : NetworkX graph + The graph whose edge will be contracted. + + edge : tuple + Must be a pair of nodes in `G`. + + self_loops : Boolean + If this is True, any edges (including `edge`) joining the + endpoints of `edge` in `G` become self-loops on the new node in the + returned graph. + + copy : Boolean (default True) + If this is True, a the contraction will be performed on a copy of `G`, + otherwise the contraction will happen in place. + + Returns + ------- + Networkx graph + A new graph object of the same type as `G` (leaving `G` unmodified) + with endpoints of `edge` identified in a single node. The right node + of `edge` will be merged into the left one, so only the left one will + appear in the returned graph. + + Raises + ------ + ValueError + If `edge` is not an edge in `G`. + + Examples + -------- + Attempting to contract two nonadjacent nodes yields an error: + + >>> G = nx.cycle_graph(4) + >>> nx.contracted_edge(G, (1, 3)) + Traceback (most recent call last): + ... + ValueError: Edge (1, 3) does not exist in graph G; cannot contract it + + Contracting two adjacent nodes in the cycle graph on *n* nodes yields the + cycle graph on *n - 1* nodes: + + >>> C5 = nx.cycle_graph(5) + >>> C4 = nx.cycle_graph(4) + >>> M = nx.contracted_edge(C5, (0, 1), self_loops=False) + >>> nx.is_isomorphic(M, C4) + True + + See also + -------- + contracted_nodes + quotient_graph + + """ + u, v = edge[:2] + if not G.has_edge(u, v): + raise ValueError(f"Edge {edge} does not exist in graph G; cannot contract it") + return contracted_nodes(G, u, v, self_loops=self_loops, copy=copy) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/minors/tests/__pycache__/test_contraction.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/minors/tests/__pycache__/test_contraction.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c13b804ac686a39ed44391869667f53a1ce9ffc Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/minors/tests/__pycache__/test_contraction.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/minors/tests/test_contraction.py b/MLPY/Lib/site-packages/networkx/algorithms/minors/tests/test_contraction.py new file mode 100644 index 0000000000000000000000000000000000000000..5b7b7ccd3ea96019af8ec34cab2715e80c6b88ba --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/minors/tests/test_contraction.py @@ -0,0 +1,445 @@ +"""Unit tests for the :mod:`networkx.algorithms.minors.contraction` module.""" +import pytest + +import networkx as nx +from networkx.utils import arbitrary_element, edges_equal, nodes_equal + + +def test_quotient_graph_complete_multipartite(): + """Tests that the quotient graph of the complete *n*-partite graph + under the "same neighbors" node relation is the complete graph on *n* + nodes. + + """ + G = nx.complete_multipartite_graph(2, 3, 4) + # Two nodes are equivalent if they are not adjacent but have the same + # neighbor set. + + def same_neighbors(u, v): + return u not in G[v] and v not in G[u] and G[u] == G[v] + + expected = nx.complete_graph(3) + actual = nx.quotient_graph(G, same_neighbors) + # It won't take too long to run a graph isomorphism algorithm on such + # small graphs. + assert nx.is_isomorphic(expected, actual) + + +def test_quotient_graph_complete_bipartite(): + """Tests that the quotient graph of the complete bipartite graph under + the "same neighbors" node relation is `K_2`. + + """ + G = nx.complete_bipartite_graph(2, 3) + # Two nodes are equivalent if they are not adjacent but have the same + # neighbor set. + + def same_neighbors(u, v): + return u not in G[v] and v not in G[u] and G[u] == G[v] + + expected = nx.complete_graph(2) + actual = nx.quotient_graph(G, same_neighbors) + # It won't take too long to run a graph isomorphism algorithm on such + # small graphs. + assert nx.is_isomorphic(expected, actual) + + +def test_quotient_graph_edge_relation(): + """Tests for specifying an alternate edge relation for the quotient + graph. + + """ + G = nx.path_graph(5) + + def identity(u, v): + return u == v + + def same_parity(b, c): + return arbitrary_element(b) % 2 == arbitrary_element(c) % 2 + + actual = nx.quotient_graph(G, identity, same_parity) + expected = nx.Graph() + expected.add_edges_from([(0, 2), (0, 4), (2, 4)]) + expected.add_edge(1, 3) + assert nx.is_isomorphic(actual, expected) + + +def test_condensation_as_quotient(): + """This tests that the condensation of a graph can be viewed as the + quotient graph under the "in the same connected component" equivalence + relation. + + """ + # This example graph comes from the file `test_strongly_connected.py`. + G = nx.DiGraph() + G.add_edges_from( + [ + (1, 2), + (2, 3), + (2, 11), + (2, 12), + (3, 4), + (4, 3), + (4, 5), + (5, 6), + (6, 5), + (6, 7), + (7, 8), + (7, 9), + (7, 10), + (8, 9), + (9, 7), + (10, 6), + (11, 2), + (11, 4), + (11, 6), + (12, 6), + (12, 11), + ] + ) + scc = list(nx.strongly_connected_components(G)) + C = nx.condensation(G, scc) + component_of = C.graph["mapping"] + # Two nodes are equivalent if they are in the same connected component. + + def same_component(u, v): + return component_of[u] == component_of[v] + + Q = nx.quotient_graph(G, same_component) + assert nx.is_isomorphic(C, Q) + + +def test_path(): + G = nx.path_graph(6) + partition = [{0, 1}, {2, 3}, {4, 5}] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1 + + +def test_path__partition_provided_as_dict_of_lists(): + G = nx.path_graph(6) + partition = {0: [0, 1], 2: [2, 3], 4: [4, 5]} + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1 + + +def test_path__partition_provided_as_dict_of_tuples(): + G = nx.path_graph(6) + partition = {0: (0, 1), 2: (2, 3), 4: (4, 5)} + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1 + + +def test_path__partition_provided_as_dict_of_sets(): + G = nx.path_graph(6) + partition = {0: {0, 1}, 2: {2, 3}, 4: {4, 5}} + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1 + + +def test_multigraph_path(): + G = nx.MultiGraph(nx.path_graph(6)) + partition = [{0, 1}, {2, 3}, {4, 5}] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1 + + +def test_directed_path(): + G = nx.DiGraph() + nx.add_path(G, range(6)) + partition = [{0, 1}, {2, 3}, {4, 5}] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 0.5 + + +def test_directed_multigraph_path(): + G = nx.MultiDiGraph() + nx.add_path(G, range(6)) + partition = [{0, 1}, {2, 3}, {4, 5}] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 0.5 + + +def test_overlapping_blocks(): + with pytest.raises(nx.NetworkXException): + G = nx.path_graph(6) + partition = [{0, 1, 2}, {2, 3}, {4, 5}] + nx.quotient_graph(G, partition) + + +def test_weighted_path(): + G = nx.path_graph(6) + for i in range(5): + G[i][i + 1]["w"] = i + 1 + partition = [{0, 1}, {2, 3}, {4, 5}] + M = nx.quotient_graph(G, partition, weight="w", relabel=True) + assert nodes_equal(M, [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + assert M[0][1]["weight"] == 2 + assert M[1][2]["weight"] == 4 + for n in M: + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1 + + +def test_barbell(): + G = nx.barbell_graph(3, 0) + partition = [{0, 1, 2}, {3, 4, 5}] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1]) + assert edges_equal(M.edges(), [(0, 1)]) + for n in M: + assert M.nodes[n]["nedges"] == 3 + assert M.nodes[n]["nnodes"] == 3 + assert M.nodes[n]["density"] == 1 + + +def test_barbell_plus(): + G = nx.barbell_graph(3, 0) + # Add an extra edge joining the bells. + G.add_edge(0, 5) + partition = [{0, 1, 2}, {3, 4, 5}] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M, [0, 1]) + assert edges_equal(M.edges(), [(0, 1)]) + assert M[0][1]["weight"] == 2 + for n in M: + assert M.nodes[n]["nedges"] == 3 + assert M.nodes[n]["nnodes"] == 3 + assert M.nodes[n]["density"] == 1 + + +def test_blockmodel(): + G = nx.path_graph(6) + partition = [[0, 1], [2, 3], [4, 5]] + M = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(M.nodes(), [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M.nodes(): + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1.0 + + +def test_multigraph_blockmodel(): + G = nx.MultiGraph(nx.path_graph(6)) + partition = [[0, 1], [2, 3], [4, 5]] + M = nx.quotient_graph(G, partition, create_using=nx.MultiGraph(), relabel=True) + assert nodes_equal(M.nodes(), [0, 1, 2]) + assert edges_equal(M.edges(), [(0, 1), (1, 2)]) + for n in M.nodes(): + assert M.nodes[n]["nedges"] == 1 + assert M.nodes[n]["nnodes"] == 2 + assert M.nodes[n]["density"] == 1.0 + + +def test_quotient_graph_incomplete_partition(): + G = nx.path_graph(6) + partition = [] + H = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(H.nodes(), []) + assert edges_equal(H.edges(), []) + + partition = [[0, 1], [2, 3], [5]] + H = nx.quotient_graph(G, partition, relabel=True) + assert nodes_equal(H.nodes(), [0, 1, 2]) + assert edges_equal(H.edges(), [(0, 1)]) + + +def test_undirected_node_contraction(): + """Tests for node contraction in an undirected graph.""" + G = nx.cycle_graph(4) + actual = nx.contracted_nodes(G, 0, 1) + expected = nx.cycle_graph(3) + expected.add_edge(0, 0) + assert nx.is_isomorphic(actual, expected) + + +def test_directed_node_contraction(): + """Tests for node contraction in a directed graph.""" + G = nx.DiGraph(nx.cycle_graph(4)) + actual = nx.contracted_nodes(G, 0, 1) + expected = nx.DiGraph(nx.cycle_graph(3)) + expected.add_edge(0, 0) + expected.add_edge(0, 0) + assert nx.is_isomorphic(actual, expected) + + +def test_undirected_node_contraction_no_copy(): + """Tests for node contraction in an undirected graph + by making changes in place.""" + G = nx.cycle_graph(4) + actual = nx.contracted_nodes(G, 0, 1, copy=False) + expected = nx.cycle_graph(3) + expected.add_edge(0, 0) + assert nx.is_isomorphic(actual, G) + assert nx.is_isomorphic(actual, expected) + + +def test_directed_node_contraction_no_copy(): + """Tests for node contraction in a directed graph + by making changes in place.""" + G = nx.DiGraph(nx.cycle_graph(4)) + actual = nx.contracted_nodes(G, 0, 1, copy=False) + expected = nx.DiGraph(nx.cycle_graph(3)) + expected.add_edge(0, 0) + expected.add_edge(0, 0) + assert nx.is_isomorphic(actual, G) + assert nx.is_isomorphic(actual, expected) + + +def test_create_multigraph(): + """Tests that using a MultiGraph creates multiple edges.""" + G = nx.path_graph(3, create_using=nx.MultiGraph()) + G.add_edge(0, 1) + G.add_edge(0, 0) + G.add_edge(0, 2) + actual = nx.contracted_nodes(G, 0, 2) + expected = nx.MultiGraph() + expected.add_edge(0, 1) + expected.add_edge(0, 1) + expected.add_edge(0, 1) + expected.add_edge(0, 0) + expected.add_edge(0, 0) + assert edges_equal(actual.edges, expected.edges) + + +def test_multigraph_keys(): + """Tests that multiedge keys are reset in new graph.""" + G = nx.path_graph(3, create_using=nx.MultiGraph()) + G.add_edge(0, 1, 5) + G.add_edge(0, 0, 0) + G.add_edge(0, 2, 5) + actual = nx.contracted_nodes(G, 0, 2) + expected = nx.MultiGraph() + expected.add_edge(0, 1, 0) + expected.add_edge(0, 1, 5) + expected.add_edge(0, 1, 2) # keyed as 2 b/c 2 edges already in G + expected.add_edge(0, 0, 0) + expected.add_edge(0, 0, 1) # this comes from (0, 2, 5) + assert edges_equal(actual.edges, expected.edges) + + +def test_node_attributes(): + """Tests that node contraction preserves node attributes.""" + G = nx.cycle_graph(4) + # Add some data to the two nodes being contracted. + G.nodes[0]["foo"] = "bar" + G.nodes[1]["baz"] = "xyzzy" + actual = nx.contracted_nodes(G, 0, 1) + # We expect that contracting the nodes 0 and 1 in C_4 yields K_3, but + # with nodes labeled 0, 2, and 3, and with a -loop on 0. + expected = nx.complete_graph(3) + expected = nx.relabel_nodes(expected, {1: 2, 2: 3}) + expected.add_edge(0, 0) + cdict = {1: {"baz": "xyzzy"}} + expected.nodes[0].update({"foo": "bar", "contraction": cdict}) + assert nx.is_isomorphic(actual, expected) + assert actual.nodes == expected.nodes + + +def test_edge_attributes(): + """Tests that node contraction preserves edge attributes.""" + # Shape: src1 --> dest <-- src2 + G = nx.DiGraph([("src1", "dest"), ("src2", "dest")]) + G["src1"]["dest"]["value"] = "src1-->dest" + G["src2"]["dest"]["value"] = "src2-->dest" + H = nx.MultiDiGraph(G) + + G = nx.contracted_nodes(G, "src1", "src2") # New Shape: src1 --> dest + assert G.edges[("src1", "dest")]["value"] == "src1-->dest" + assert ( + G.edges[("src1", "dest")]["contraction"][("src2", "dest")]["value"] + == "src2-->dest" + ) + + H = nx.contracted_nodes(H, "src1", "src2") # New Shape: src1 -(x2)-> dest + assert len(H.edges(("src1", "dest"))) == 2 + + +def test_without_self_loops(): + """Tests for node contraction without preserving -loops.""" + G = nx.cycle_graph(4) + actual = nx.contracted_nodes(G, 0, 1, self_loops=False) + expected = nx.complete_graph(3) + assert nx.is_isomorphic(actual, expected) + + +def test_contract_loop_graph(): + """Tests for node contraction when nodes have loops.""" + G = nx.cycle_graph(4) + G.add_edge(0, 0) + actual = nx.contracted_nodes(G, 0, 1) + expected = nx.complete_graph([0, 2, 3]) + expected.add_edge(0, 0) + expected.add_edge(0, 0) + assert edges_equal(actual.edges, expected.edges) + actual = nx.contracted_nodes(G, 1, 0) + expected = nx.complete_graph([1, 2, 3]) + expected.add_edge(1, 1) + expected.add_edge(1, 1) + assert edges_equal(actual.edges, expected.edges) + + +def test_undirected_edge_contraction(): + """Tests for edge contraction in an undirected graph.""" + G = nx.cycle_graph(4) + actual = nx.contracted_edge(G, (0, 1)) + expected = nx.complete_graph(3) + expected.add_edge(0, 0) + assert nx.is_isomorphic(actual, expected) + + +def test_multigraph_edge_contraction(): + """Tests for edge contraction in a multigraph""" + G = nx.cycle_graph(4) + actual = nx.contracted_edge(G, (0, 1, 0)) + expected = nx.complete_graph(3) + expected.add_edge(0, 0) + assert nx.is_isomorphic(actual, expected) + + +def test_nonexistent_edge(): + """Tests that attempting to contract a nonexistent edge raises an + exception. + + """ + with pytest.raises(ValueError): + G = nx.cycle_graph(4) + nx.contracted_edge(G, (0, 2)) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/mis.py b/MLPY/Lib/site-packages/networkx/algorithms/mis.py new file mode 100644 index 0000000000000000000000000000000000000000..00d101c52887b01f35e43043431ab749b01f6867 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/mis.py @@ -0,0 +1,77 @@ +""" +Algorithm to find a maximal (not maximum) independent set. + +""" +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = ["maximal_independent_set"] + + +@not_implemented_for("directed") +@py_random_state(2) +@nx._dispatch +def maximal_independent_set(G, nodes=None, seed=None): + """Returns a random maximal independent set guaranteed to contain + a given set of nodes. + + An independent set is a set of nodes such that the subgraph + of G induced by these nodes contains no edges. A maximal + independent set is an independent set such that it is not possible + to add a new node and still get an independent set. + + Parameters + ---------- + G : NetworkX graph + + nodes : list or iterable + Nodes that must be part of the independent set. This set of nodes + must be independent. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + indep_nodes : list + List of nodes that are part of a maximal independent set. + + Raises + ------ + NetworkXUnfeasible + If the nodes in the provided list are not part of the graph or + do not form an independent set, an exception is raised. + + NetworkXNotImplemented + If `G` is directed. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.maximal_independent_set(G) # doctest: +SKIP + [4, 0, 2] + >>> nx.maximal_independent_set(G, [1]) # doctest: +SKIP + [1, 3] + + Notes + ----- + This algorithm does not solve the maximum independent set problem. + + """ + if not nodes: + nodes = {seed.choice(list(G))} + else: + nodes = set(nodes) + if not nodes.issubset(G): + raise nx.NetworkXUnfeasible(f"{nodes} is not a subset of the nodes of G") + neighbors = set.union(*[set(G.adj[v]) for v in nodes]) + if set.intersection(neighbors, nodes): + raise nx.NetworkXUnfeasible(f"{nodes} is not an independent set of G") + indep_nodes = list(nodes) + available_nodes = set(G.nodes()).difference(neighbors.union(nodes)) + while available_nodes: + node = seed.choice(list(available_nodes)) + indep_nodes.append(node) + available_nodes.difference_update(list(G.adj[node]) + [node]) + return indep_nodes diff --git a/MLPY/Lib/site-packages/networkx/algorithms/moral.py b/MLPY/Lib/site-packages/networkx/algorithms/moral.py new file mode 100644 index 0000000000000000000000000000000000000000..af187259251111f8ea4332412af36e06380a8df8 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/moral.py @@ -0,0 +1,59 @@ +r"""Function for computing the moral graph of a directed graph.""" + +import itertools + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["moral_graph"] + + +@not_implemented_for("undirected") +@nx._dispatch +def moral_graph(G): + r"""Return the Moral Graph + + Returns the moralized graph of a given directed graph. + + Parameters + ---------- + G : NetworkX graph + Directed graph + + Returns + ------- + H : NetworkX graph + The undirected moralized graph of G + + Raises + ------ + NetworkXNotImplemented + If `G` is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (2, 3), (2, 5), (3, 4), (4, 3)]) + >>> G_moral = nx.moral_graph(G) + >>> G_moral.edges() + EdgeView([(1, 2), (2, 3), (2, 5), (2, 4), (3, 4)]) + + Notes + ----- + A moral graph is an undirected graph H = (V, E) generated from a + directed Graph, where if a node has more than one parent node, edges + between these parent nodes are inserted and all directed edges become + undirected. + + https://en.wikipedia.org/wiki/Moral_graph + + References + ---------- + .. [1] Wray L. Buntine. 1995. Chain graphs for learning. + In Proceedings of the Eleventh conference on Uncertainty + in artificial intelligence (UAI'95) + """ + H = G.to_undirected() + for preds in G.pred.values(): + predecessors_combinations = itertools.combinations(preds, r=2) + H.add_edges_from(predecessors_combinations) + return H diff --git a/MLPY/Lib/site-packages/networkx/algorithms/node_classification.py b/MLPY/Lib/site-packages/networkx/algorithms/node_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..c1b46545abbfcc2b0b99b91c56f00c415b70f53b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/node_classification.py @@ -0,0 +1,218 @@ +""" This module provides the functions for node classification problem. + +The functions in this module are not imported +into the top level `networkx` namespace. +You can access these functions by importing +the `networkx.algorithms.node_classification` modules, +then accessing the functions as attributes of `node_classification`. +For example: + + >>> from networkx.algorithms import node_classification + >>> G = nx.path_graph(4) + >>> G.edges() + EdgeView([(0, 1), (1, 2), (2, 3)]) + >>> G.nodes[0]["label"] = "A" + >>> G.nodes[3]["label"] = "B" + >>> node_classification.harmonic_function(G) + ['A', 'A', 'B', 'B'] + +References +---------- +Zhu, X., Ghahramani, Z., & Lafferty, J. (2003, August). +Semi-supervised learning using gaussian fields and harmonic functions. +In ICML (Vol. 3, pp. 912-919). +""" +import networkx as nx + +__all__ = ["harmonic_function", "local_and_global_consistency"] + + +@nx.utils.not_implemented_for("directed") +@nx._dispatch(node_attrs="label_name") +def harmonic_function(G, max_iter=30, label_name="label"): + """Node classification by Harmonic function + + Function for computing Harmonic function algorithm by Zhu et al. + + Parameters + ---------- + G : NetworkX Graph + max_iter : int + maximum number of iterations allowed + label_name : string + name of target labels to predict + + Returns + ------- + predicted : list + List of length ``len(G)`` with the predicted labels for each node. + + Raises + ------ + NetworkXError + If no nodes in `G` have attribute `label_name`. + + Examples + -------- + >>> from networkx.algorithms import node_classification + >>> G = nx.path_graph(4) + >>> G.nodes[0]["label"] = "A" + >>> G.nodes[3]["label"] = "B" + >>> G.nodes(data=True) + NodeDataView({0: {'label': 'A'}, 1: {}, 2: {}, 3: {'label': 'B'}}) + >>> G.edges() + EdgeView([(0, 1), (1, 2), (2, 3)]) + >>> predicted = node_classification.harmonic_function(G) + >>> predicted + ['A', 'A', 'B', 'B'] + + References + ---------- + Zhu, X., Ghahramani, Z., & Lafferty, J. (2003, August). + Semi-supervised learning using gaussian fields and harmonic functions. + In ICML (Vol. 3, pp. 912-919). + """ + import numpy as np + import scipy as sp + + X = nx.to_scipy_sparse_array(G) # adjacency matrix + labels, label_dict = _get_label_info(G, label_name) + + if labels.shape[0] == 0: + raise nx.NetworkXError( + f"No node on the input graph is labeled by '{label_name}'." + ) + + n_samples = X.shape[0] + n_classes = label_dict.shape[0] + F = np.zeros((n_samples, n_classes)) + + # Build propagation matrix + degrees = X.sum(axis=0) + degrees[degrees == 0] = 1 # Avoid division by 0 + # TODO: csr_array + D = sp.sparse.csr_array(sp.sparse.diags((1.0 / degrees), offsets=0)) + P = (D @ X).tolil() + P[labels[:, 0]] = 0 # labels[:, 0] indicates IDs of labeled nodes + # Build base matrix + B = np.zeros((n_samples, n_classes)) + B[labels[:, 0], labels[:, 1]] = 1 + + for _ in range(max_iter): + F = (P @ F) + B + + return label_dict[np.argmax(F, axis=1)].tolist() + + +@nx.utils.not_implemented_for("directed") +@nx._dispatch(node_attrs="label_name") +def local_and_global_consistency(G, alpha=0.99, max_iter=30, label_name="label"): + """Node classification by Local and Global Consistency + + Function for computing Local and global consistency algorithm by Zhou et al. + + Parameters + ---------- + G : NetworkX Graph + alpha : float + Clamping factor + max_iter : int + Maximum number of iterations allowed + label_name : string + Name of target labels to predict + + Returns + ------- + predicted : list + List of length ``len(G)`` with the predicted labels for each node. + + Raises + ------ + NetworkXError + If no nodes in `G` have attribute `label_name`. + + Examples + -------- + >>> from networkx.algorithms import node_classification + >>> G = nx.path_graph(4) + >>> G.nodes[0]["label"] = "A" + >>> G.nodes[3]["label"] = "B" + >>> G.nodes(data=True) + NodeDataView({0: {'label': 'A'}, 1: {}, 2: {}, 3: {'label': 'B'}}) + >>> G.edges() + EdgeView([(0, 1), (1, 2), (2, 3)]) + >>> predicted = node_classification.local_and_global_consistency(G) + >>> predicted + ['A', 'A', 'B', 'B'] + + References + ---------- + Zhou, D., Bousquet, O., Lal, T. N., Weston, J., & Schölkopf, B. (2004). + Learning with local and global consistency. + Advances in neural information processing systems, 16(16), 321-328. + """ + import numpy as np + import scipy as sp + + X = nx.to_scipy_sparse_array(G) # adjacency matrix + labels, label_dict = _get_label_info(G, label_name) + + if labels.shape[0] == 0: + raise nx.NetworkXError( + f"No node on the input graph is labeled by '{label_name}'." + ) + + n_samples = X.shape[0] + n_classes = label_dict.shape[0] + F = np.zeros((n_samples, n_classes)) + + # Build propagation matrix + degrees = X.sum(axis=0) + degrees[degrees == 0] = 1 # Avoid division by 0 + # TODO: csr_array + D2 = np.sqrt(sp.sparse.csr_array(sp.sparse.diags((1.0 / degrees), offsets=0))) + P = alpha * ((D2 @ X) @ D2) + # Build base matrix + B = np.zeros((n_samples, n_classes)) + B[labels[:, 0], labels[:, 1]] = 1 - alpha + + for _ in range(max_iter): + F = (P @ F) + B + + return label_dict[np.argmax(F, axis=1)].tolist() + + +def _get_label_info(G, label_name): + """Get and return information of labels from the input graph + + Parameters + ---------- + G : Network X graph + label_name : string + Name of the target label + + Returns + ------- + labels : numpy array, shape = [n_labeled_samples, 2] + Array of pairs of labeled node ID and label ID + label_dict : numpy array, shape = [n_classes] + Array of labels + i-th element contains the label corresponding label ID `i` + """ + import numpy as np + + labels = [] + label_to_id = {} + lid = 0 + for i, n in enumerate(G.nodes(data=True)): + if label_name in n[1]: + label = n[1][label_name] + if label not in label_to_id: + label_to_id[label] = lid + lid += 1 + labels.append([i, label_to_id[label]]) + labels = np.array(labels) + label_dict = np.array( + [label for label, _ in sorted(label_to_id.items(), key=lambda x: x[1])] + ) + return (labels, label_dict) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/non_randomness.py b/MLPY/Lib/site-packages/networkx/algorithms/non_randomness.py new file mode 100644 index 0000000000000000000000000000000000000000..777cecbc96948b2336cb06393345de2c9a4a0dad --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/non_randomness.py @@ -0,0 +1,96 @@ +r""" Computation of graph non-randomness +""" + +import math + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["non_randomness"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="weight") +def non_randomness(G, k=None, weight="weight"): + """Compute the non-randomness of graph G. + + The first returned value nr is the sum of non-randomness values of all + edges within the graph (where the non-randomness of an edge tends to be + small when the two nodes linked by that edge are from two different + communities). + + The second computed value nr_rd is a relative measure that indicates + to what extent graph G is different from random graphs in terms + of probability. When it is close to 0, the graph tends to be more + likely generated by an Erdos Renyi model. + + Parameters + ---------- + G : NetworkX graph + Graph must be symmetric, connected, and without self-loops. + + k : int + The number of communities in G. + If k is not set, the function will use a default community + detection algorithm to set it. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1, i.e., the graph is + binary. + + Returns + ------- + non-randomness : (float, float) tuple + Non-randomness, Relative non-randomness w.r.t. + Erdos Renyi random graphs. + + Raises + ------ + NetworkXException + if the input graph is not connected. + NetworkXError + if the input graph contains self-loops. + + Examples + -------- + >>> G = nx.karate_club_graph() + >>> nr, nr_rd = nx.non_randomness(G, 2) + >>> nr, nr_rd = nx.non_randomness(G, 2, 'weight') + + Notes + ----- + This computes Eq. (4.4) and (4.5) in Ref. [1]_. + + If a weight field is passed, this algorithm will use the eigenvalues + of the weighted adjacency matrix to compute Eq. (4.4) and (4.5). + + References + ---------- + .. [1] Xiaowei Ying and Xintao Wu, + On Randomness Measures for Social Networks, + SIAM International Conference on Data Mining. 2009 + """ + import numpy as np + + if not nx.is_connected(G): + raise nx.NetworkXException("Non connected graph.") + if len(list(nx.selfloop_edges(G))) > 0: + raise nx.NetworkXError("Graph must not contain self-loops") + + if k is None: + k = len(tuple(nx.community.label_propagation_communities(G))) + + # eq. 4.4 + eigenvalues = np.linalg.eigvals(nx.to_numpy_array(G, weight=weight)) + nr = np.real(np.sum(eigenvalues[:k])) + + n = G.number_of_nodes() + m = G.number_of_edges() + p = (2 * k * m) / (n * (n - k)) + + # eq. 4.5 + nr_rd = (nr - ((n - 2 * k) * p + k)) / math.sqrt(2 * k * p * (1 - p)) + + return nr, nr_rd diff --git a/MLPY/Lib/site-packages/networkx/algorithms/operators/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/operators/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0ebc6ab9998db144234c2601c24861b2c48fa339 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/operators/__init__.py @@ -0,0 +1,4 @@ +from networkx.algorithms.operators.all import * +from networkx.algorithms.operators.binary import * +from networkx.algorithms.operators.product import * +from networkx.algorithms.operators.unary import * diff --git a/MLPY/Lib/site-packages/networkx/algorithms/operators/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/operators/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47b806d0a3a1bd7d8048bf0b6a6883d4313ff48b Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/operators/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/operators/__pycache__/all.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/operators/__pycache__/all.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb22a472bf73e6db99bfad7f6d8204d0553d06e8 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/operators/__pycache__/all.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/operators/__pycache__/binary.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/operators/__pycache__/binary.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..293435eb94424b83a6f986c785edefeeee1d8bcc Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/operators/__pycache__/binary.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/operators/__pycache__/product.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/operators/__pycache__/product.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64b2ebee9f6e62f791add2ca58ca09fac35930fc Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/operators/__pycache__/product.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/operators/__pycache__/unary.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/operators/__pycache__/unary.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1929148220b31b72395141c4a2278889e4fac034 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/operators/__pycache__/unary.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/operators/all.py b/MLPY/Lib/site-packages/networkx/algorithms/operators/all.py new file mode 100644 index 0000000000000000000000000000000000000000..c9498c0a34d712e2a06531219ada75e603677d82 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/operators/all.py @@ -0,0 +1,319 @@ +"""Operations on many graphs. +""" +from itertools import chain, repeat + +import networkx as nx + +__all__ = ["union_all", "compose_all", "disjoint_union_all", "intersection_all"] + + +@nx._dispatch(graphs="[graphs]", preserve_all_attrs=True) +def union_all(graphs, rename=()): + """Returns the union of all graphs. + + The graphs must be disjoint, otherwise an exception is raised. + + Parameters + ---------- + graphs : iterable + Iterable of NetworkX graphs + + rename : iterable , optional + Node names of graphs can be changed by specifying the tuple + rename=('G-','H-') (for example). Node "u" in G is then renamed + "G-u" and "v" in H is renamed "H-v". Infinite generators (like itertools.count) + are also supported. + + Returns + ------- + U : a graph with the same type as the first graph in list + + Raises + ------ + ValueError + If `graphs` is an empty list. + + NetworkXError + In case of mixed type graphs, like MultiGraph and Graph, or directed and undirected graphs. + + Notes + ----- + For operating on mixed type graphs, they should be converted to the same type. + >>> G = nx.Graph() + >>> H = nx.DiGraph() + >>> GH = union_all([nx.DiGraph(G), H]) + + To force a disjoint union with node relabeling, use + disjoint_union_all(G,H) or convert_node_labels_to integers(). + + Graph, edge, and node attributes are propagated to the union graph. + If a graph attribute is present in multiple graphs, then the value + from the last graph in the list with that attribute is used. + + Examples + -------- + >>> G1 = nx.Graph([(1, 2), (2, 3)]) + >>> G2 = nx.Graph([(4, 5), (5, 6)]) + >>> result_graph = nx.union_all([G1, G2]) + >>> result_graph.nodes() + NodeView((1, 2, 3, 4, 5, 6)) + >>> result_graph.edges() + EdgeView([(1, 2), (2, 3), (4, 5), (5, 6)]) + + See Also + -------- + union + disjoint_union_all + """ + R = None + seen_nodes = set() + + # rename graph to obtain disjoint node labels + def add_prefix(graph, prefix): + if prefix is None: + return graph + + def label(x): + return f"{prefix}{x}" + + return nx.relabel_nodes(graph, label) + + rename = chain(rename, repeat(None)) + graphs = (add_prefix(G, name) for G, name in zip(graphs, rename)) + + for i, G in enumerate(graphs): + G_nodes_set = set(G.nodes) + if i == 0: + # Union is the same type as first graph + R = G.__class__() + elif G.is_directed() != R.is_directed(): + raise nx.NetworkXError("All graphs must be directed or undirected.") + elif G.is_multigraph() != R.is_multigraph(): + raise nx.NetworkXError("All graphs must be graphs or multigraphs.") + elif not seen_nodes.isdisjoint(G_nodes_set): + raise nx.NetworkXError( + "The node sets of the graphs are not disjoint.", + "Use appropriate rename" + "=(G1prefix,G2prefix,...,GNprefix)" + "or use disjoint_union(G1,G2,...,GN).", + ) + + seen_nodes |= G_nodes_set + R.graph.update(G.graph) + R.add_nodes_from(G.nodes(data=True)) + R.add_edges_from( + G.edges(keys=True, data=True) if G.is_multigraph() else G.edges(data=True) + ) + + if R is None: + raise ValueError("cannot apply union_all to an empty list") + + return R + + +@nx._dispatch(graphs="[graphs]", preserve_all_attrs=True) +def disjoint_union_all(graphs): + """Returns the disjoint union of all graphs. + + This operation forces distinct integer node labels starting with 0 + for the first graph in the list and numbering consecutively. + + Parameters + ---------- + graphs : iterable + Iterable of NetworkX graphs + + Returns + ------- + U : A graph with the same type as the first graph in list + + Raises + ------ + ValueError + If `graphs` is an empty list. + + NetworkXError + In case of mixed type graphs, like MultiGraph and Graph, or directed and undirected graphs. + + Examples + -------- + >>> G1 = nx.Graph([(1, 2), (2, 3)]) + >>> G2 = nx.Graph([(4, 5), (5, 6)]) + >>> U = nx.disjoint_union_all([G1, G2]) + >>> list(U.nodes()) + [0, 1, 2, 3, 4, 5] + >>> list(U.edges()) + [(0, 1), (1, 2), (3, 4), (4, 5)] + + Notes + ----- + For operating on mixed type graphs, they should be converted to the same type. + + Graph, edge, and node attributes are propagated to the union graph. + If a graph attribute is present in multiple graphs, then the value + from the last graph in the list with that attribute is used. + """ + + def yield_relabeled(graphs): + first_label = 0 + for G in graphs: + yield nx.convert_node_labels_to_integers(G, first_label=first_label) + first_label += len(G) + + R = union_all(yield_relabeled(graphs)) + + return R + + +@nx._dispatch(graphs="[graphs]", preserve_all_attrs=True) +def compose_all(graphs): + """Returns the composition of all graphs. + + Composition is the simple union of the node sets and edge sets. + The node sets of the supplied graphs need not be disjoint. + + Parameters + ---------- + graphs : iterable + Iterable of NetworkX graphs + + Returns + ------- + C : A graph with the same type as the first graph in list + + Raises + ------ + ValueError + If `graphs` is an empty list. + + NetworkXError + In case of mixed type graphs, like MultiGraph and Graph, or directed and undirected graphs. + + Examples + -------- + >>> G1 = nx.Graph([(1, 2), (2, 3)]) + >>> G2 = nx.Graph([(3, 4), (5, 6)]) + >>> C = nx.compose_all([G1, G2]) + >>> list(C.nodes()) + [1, 2, 3, 4, 5, 6] + >>> list(C.edges()) + [(1, 2), (2, 3), (3, 4), (5, 6)] + + Notes + ----- + For operating on mixed type graphs, they should be converted to the same type. + + Graph, edge, and node attributes are propagated to the union graph. + If a graph attribute is present in multiple graphs, then the value + from the last graph in the list with that attribute is used. + """ + R = None + + # add graph attributes, H attributes take precedent over G attributes + for i, G in enumerate(graphs): + if i == 0: + # create new graph + R = G.__class__() + elif G.is_directed() != R.is_directed(): + raise nx.NetworkXError("All graphs must be directed or undirected.") + elif G.is_multigraph() != R.is_multigraph(): + raise nx.NetworkXError("All graphs must be graphs or multigraphs.") + + R.graph.update(G.graph) + R.add_nodes_from(G.nodes(data=True)) + R.add_edges_from( + G.edges(keys=True, data=True) if G.is_multigraph() else G.edges(data=True) + ) + + if R is None: + raise ValueError("cannot apply compose_all to an empty list") + + return R + + +@nx._dispatch(graphs="[graphs]") +def intersection_all(graphs): + """Returns a new graph that contains only the nodes and the edges that exist in + all graphs. + + Parameters + ---------- + graphs : iterable + Iterable of NetworkX graphs + + Returns + ------- + R : A new graph with the same type as the first graph in list + + Raises + ------ + ValueError + If `graphs` is an empty list. + + NetworkXError + In case of mixed type graphs, like MultiGraph and Graph, or directed and undirected graphs. + + Notes + ----- + For operating on mixed type graphs, they should be converted to the same type. + + Attributes from the graph, nodes, and edges are not copied to the new + graph. + + The resulting graph can be updated with attributes if desired. For example, code which adds the minimum attribute for each node across all graphs could work. + >>> g = nx.Graph() + >>> g.add_node(0, capacity=4) + >>> g.add_node(1, capacity=3) + >>> g.add_edge(0, 1) + + >>> h = g.copy() + >>> h.nodes[0]["capacity"] = 2 + + >>> gh = nx.intersection_all([g, h]) + + >>> new_node_attr = {n: min(*(anyG.nodes[n].get('capacity', float('inf')) for anyG in [g, h])) for n in gh} + >>> nx.set_node_attributes(gh, new_node_attr, 'new_capacity') + >>> gh.nodes(data=True) + NodeDataView({0: {'new_capacity': 2}, 1: {'new_capacity': 3}}) + + Examples + -------- + >>> G1 = nx.Graph([(1, 2), (2, 3)]) + >>> G2 = nx.Graph([(2, 3), (3, 4)]) + >>> R = nx.intersection_all([G1, G2]) + >>> list(R.nodes()) + [2, 3] + >>> list(R.edges()) + [(2, 3)] + + """ + R = None + + for i, G in enumerate(graphs): + G_nodes_set = set(G.nodes) + G_edges_set = set(G.edges) + if not G.is_directed(): + if G.is_multigraph(): + G_edges_set.update((v, u, k) for u, v, k in list(G_edges_set)) + else: + G_edges_set.update((v, u) for u, v in list(G_edges_set)) + if i == 0: + # create new graph + R = G.__class__() + node_intersection = G_nodes_set + edge_intersection = G_edges_set + elif G.is_directed() != R.is_directed(): + raise nx.NetworkXError("All graphs must be directed or undirected.") + elif G.is_multigraph() != R.is_multigraph(): + raise nx.NetworkXError("All graphs must be graphs or multigraphs.") + else: + node_intersection &= G_nodes_set + edge_intersection &= G_edges_set + + if R is None: + raise ValueError("cannot apply intersection_all to an empty list") + + R.add_nodes_from(node_intersection) + R.add_edges_from(edge_intersection) + + return R diff --git a/MLPY/Lib/site-packages/networkx/algorithms/operators/binary.py b/MLPY/Lib/site-packages/networkx/algorithms/operators/binary.py new file mode 100644 index 0000000000000000000000000000000000000000..1979e80b68aa246d919688146a91dcbe5ab84b90 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/operators/binary.py @@ -0,0 +1,444 @@ +""" +Operations on graphs including union, intersection, difference. +""" +import networkx as nx + +__all__ = [ + "union", + "compose", + "disjoint_union", + "intersection", + "difference", + "symmetric_difference", + "full_join", +] +_G_H = {"G": 0, "H": 1} + + +@nx._dispatch(graphs=_G_H, preserve_all_attrs=True) +def union(G, H, rename=()): + """Combine graphs G and H. The names of nodes must be unique. + + A name collision between the graphs will raise an exception. + + A renaming facility is provided to avoid name collisions. + + + Parameters + ---------- + G, H : graph + A NetworkX graph + + rename : iterable , optional + Node names of G and H can be changed by specifying the tuple + rename=('G-','H-') (for example). Node "u" in G is then renamed + "G-u" and "v" in H is renamed "H-v". + + Returns + ------- + U : A union graph with the same type as G. + + See Also + -------- + compose + :func:`~networkx.Graph.update` + disjoint_union + + Notes + ----- + To combine graphs that have common nodes, consider compose(G, H) + or the method, Graph.update(). + + disjoint_union() is similar to union() except that it avoids name clashes + by relabeling the nodes with sequential integers. + + Edge and node attributes are propagated from G and H to the union graph. + Graph attributes are also propagated, but if they are present in both G and H, + then the value from H is used. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2)]) + >>> H = nx.Graph([(0, 1), (0, 3), (1, 3), (1, 2)]) + >>> U = nx.union(G, H, rename=("G", "H")) + >>> U.nodes + NodeView(('G0', 'G1', 'G2', 'H0', 'H1', 'H3', 'H2')) + >>> U.edges + EdgeView([('G0', 'G1'), ('G0', 'G2'), ('G1', 'G2'), ('H0', 'H1'), ('H0', 'H3'), ('H1', 'H3'), ('H1', 'H2')]) + + + """ + return nx.union_all([G, H], rename) + + +@nx._dispatch(graphs=_G_H, preserve_all_attrs=True) +def disjoint_union(G, H): + """Combine graphs G and H. The nodes are assumed to be unique (disjoint). + + This algorithm automatically relabels nodes to avoid name collisions. + + Parameters + ---------- + G,H : graph + A NetworkX graph + + Returns + ------- + U : A union graph with the same type as G. + + See Also + -------- + union + compose + :func:`~networkx.Graph.update` + + Notes + ----- + A new graph is created, of the same class as G. It is recommended + that G and H be either both directed or both undirected. + + The nodes of G are relabeled 0 to len(G)-1, and the nodes of H are + relabeled len(G) to len(G)+len(H)-1. + + Renumbering forces G and H to be disjoint, so no exception is ever raised for a name collision. + To preserve the check for common nodes, use union(). + + Edge and node attributes are propagated from G and H to the union graph. + Graph attributes are also propagated, but if they are present in both G and H, + then the value from H is used. + + To combine graphs that have common nodes, consider compose(G, H) + or the method, Graph.update(). + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2)]) + >>> H = nx.Graph([(0, 3), (1, 2), (2, 3)]) + >>> G.nodes[0]["key1"] = 5 + >>> H.nodes[0]["key2"] = 10 + >>> U = nx.disjoint_union(G, H) + >>> U.nodes(data=True) + NodeDataView({0: {'key1': 5}, 1: {}, 2: {}, 3: {'key2': 10}, 4: {}, 5: {}, 6: {}}) + >>> U.edges + EdgeView([(0, 1), (0, 2), (1, 2), (3, 4), (4, 6), (5, 6)]) + """ + return nx.disjoint_union_all([G, H]) + + +@nx._dispatch(graphs=_G_H) +def intersection(G, H): + """Returns a new graph that contains only the nodes and the edges that exist in + both G and H. + + Parameters + ---------- + G,H : graph + A NetworkX graph. G and H can have different node sets but must be both graphs or both multigraphs. + + Raises + ------ + NetworkXError + If one is a MultiGraph and the other one is a graph. + + Returns + ------- + GH : A new graph with the same type as G. + + Notes + ----- + Attributes from the graph, nodes, and edges are not copied to the new + graph. If you want a new graph of the intersection of G and H + with the attributes (including edge data) from G use remove_nodes_from() + as follows + + >>> G = nx.path_graph(3) + >>> H = nx.path_graph(5) + >>> R = G.copy() + >>> R.remove_nodes_from(n for n in G if n not in H) + >>> R.remove_edges_from(e for e in G.edges if e not in H.edges) + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2)]) + >>> H = nx.Graph([(0, 3), (1, 2), (2, 3)]) + >>> R = nx.intersection(G, H) + >>> R.nodes + NodeView((0, 1, 2)) + >>> R.edges + EdgeView([(1, 2)]) + """ + return nx.intersection_all([G, H]) + + +@nx._dispatch(graphs=_G_H) +def difference(G, H): + """Returns a new graph that contains the edges that exist in G but not in H. + + The node sets of H and G must be the same. + + Parameters + ---------- + G,H : graph + A NetworkX graph. G and H must have the same node sets. + + Returns + ------- + D : A new graph with the same type as G. + + Notes + ----- + Attributes from the graph, nodes, and edges are not copied to the new + graph. If you want a new graph of the difference of G and H with + the attributes (including edge data) from G use remove_nodes_from() + as follows: + + >>> G = nx.path_graph(3) + >>> H = nx.path_graph(5) + >>> R = G.copy() + >>> R.remove_nodes_from(n for n in G if n in H) + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3)]) + >>> H = nx.Graph([(0, 1), (1, 2), (0, 3)]) + >>> R = nx.difference(G, H) + >>> R.nodes + NodeView((0, 1, 2, 3)) + >>> R.edges + EdgeView([(0, 2), (1, 3)]) + """ + # create new graph + if not G.is_multigraph() == H.is_multigraph(): + raise nx.NetworkXError("G and H must both be graphs or multigraphs.") + R = nx.create_empty_copy(G) + + if set(G) != set(H): + raise nx.NetworkXError("Node sets of graphs not equal") + + if G.is_multigraph(): + edges = G.edges(keys=True) + else: + edges = G.edges() + for e in edges: + if not H.has_edge(*e): + R.add_edge(*e) + return R + + +@nx._dispatch(graphs=_G_H) +def symmetric_difference(G, H): + """Returns new graph with edges that exist in either G or H but not both. + + The node sets of H and G must be the same. + + Parameters + ---------- + G,H : graph + A NetworkX graph. G and H must have the same node sets. + + Returns + ------- + D : A new graph with the same type as G. + + Notes + ----- + Attributes from the graph, nodes, and edges are not copied to the new + graph. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3)]) + >>> H = nx.Graph([(0, 1), (1, 2), (0, 3)]) + >>> R = nx.symmetric_difference(G, H) + >>> R.nodes + NodeView((0, 1, 2, 3)) + >>> R.edges + EdgeView([(0, 2), (0, 3), (1, 3)]) + """ + # create new graph + if not G.is_multigraph() == H.is_multigraph(): + raise nx.NetworkXError("G and H must both be graphs or multigraphs.") + R = nx.create_empty_copy(G) + + if set(G) != set(H): + raise nx.NetworkXError("Node sets of graphs not equal") + + gnodes = set(G) # set of nodes in G + hnodes = set(H) # set of nodes in H + nodes = gnodes.symmetric_difference(hnodes) + R.add_nodes_from(nodes) + + if G.is_multigraph(): + edges = G.edges(keys=True) + else: + edges = G.edges() + # we could copy the data here but then this function doesn't + # match intersection and difference + for e in edges: + if not H.has_edge(*e): + R.add_edge(*e) + + if H.is_multigraph(): + edges = H.edges(keys=True) + else: + edges = H.edges() + for e in edges: + if not G.has_edge(*e): + R.add_edge(*e) + return R + + +@nx._dispatch(graphs=_G_H, preserve_all_attrs=True) +def compose(G, H): + """Compose graph G with H by combining nodes and edges into a single graph. + + The node sets and edges sets do not need to be disjoint. + + Composing preserves the attributes of nodes and edges. + Attribute values from H take precedent over attribute values from G. + + Parameters + ---------- + G, H : graph + A NetworkX graph + + Returns + ------- + C: A new graph with the same type as G + + See Also + -------- + :func:`~networkx.Graph.update` + union + disjoint_union + + Notes + ----- + It is recommended that G and H be either both directed or both undirected. + + For MultiGraphs, the edges are identified by incident nodes AND edge-key. + This can cause surprises (i.e., edge `(1, 2)` may or may not be the same + in two graphs) if you use MultiGraph without keeping track of edge keys. + + If combining the attributes of common nodes is not desired, consider union(), + which raises an exception for name collisions. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2)]) + >>> H = nx.Graph([(0, 1), (1, 2)]) + >>> R = nx.compose(G, H) + >>> R.nodes + NodeView((0, 1, 2)) + >>> R.edges + EdgeView([(0, 1), (0, 2), (1, 2)]) + + By default, the attributes from `H` take precedent over attributes from `G`. + If you prefer another way of combining attributes, you can update them after the compose operation: + + >>> G = nx.Graph([(0, 1, {'weight': 2.0}), (3, 0, {'weight': 100.0})]) + >>> H = nx.Graph([(0, 1, {'weight': 10.0}), (1, 2, {'weight': -1.0})]) + >>> nx.set_node_attributes(G, {0: 'dark', 1: 'light', 3: 'black'}, name='color') + >>> nx.set_node_attributes(H, {0: 'green', 1: 'orange', 2: 'yellow'}, name='color') + >>> GcomposeH = nx.compose(G, H) + + Normally, color attribute values of nodes of GcomposeH come from H. We can workaround this as follows: + + >>> node_data = {n: G.nodes[n]['color'] + " " + H.nodes[n]['color'] for n in G.nodes & H.nodes} + >>> nx.set_node_attributes(GcomposeH, node_data, 'color') + >>> print(GcomposeH.nodes[0]['color']) + dark green + + >>> print(GcomposeH.nodes[3]['color']) + black + + Similarly, we can update edge attributes after the compose operation in a way we prefer: + + >>> edge_data = {e: G.edges[e]['weight'] * H.edges[e]['weight'] for e in G.edges & H.edges} + >>> nx.set_edge_attributes(GcomposeH, edge_data, 'weight') + >>> print(GcomposeH.edges[(0, 1)]['weight']) + 20.0 + + >>> print(GcomposeH.edges[(3, 0)]['weight']) + 100.0 + """ + return nx.compose_all([G, H]) + + +@nx._dispatch(graphs=_G_H, preserve_all_attrs=True) +def full_join(G, H, rename=(None, None)): + """Returns the full join of graphs G and H. + + Full join is the union of G and H in which all edges between + G and H are added. + The node sets of G and H must be disjoint, + otherwise an exception is raised. + + Parameters + ---------- + G, H : graph + A NetworkX graph + + rename : tuple , default=(None, None) + Node names of G and H can be changed by specifying the tuple + rename=('G-','H-') (for example). Node "u" in G is then renamed + "G-u" and "v" in H is renamed "H-v". + + Returns + ------- + U : The full join graph with the same type as G. + + Notes + ----- + It is recommended that G and H be either both directed or both undirected. + + If G is directed, then edges from G to H are added as well as from H to G. + + Note that full_join() does not produce parallel edges for MultiGraphs. + + The full join operation of graphs G and H is the same as getting + their complement, performing a disjoint union, and finally getting + the complement of the resulting graph. + + Graph, edge, and node attributes are propagated from G and H + to the union graph. If a graph attribute is present in both + G and H the value from H is used. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2)]) + >>> H = nx.Graph([(3, 4)]) + >>> R = nx.full_join(G, H, rename=("G", "H")) + >>> R.nodes + NodeView(('G0', 'G1', 'G2', 'H3', 'H4')) + >>> R.edges + EdgeView([('G0', 'G1'), ('G0', 'G2'), ('G0', 'H3'), ('G0', 'H4'), ('G1', 'H3'), ('G1', 'H4'), ('G2', 'H3'), ('G2', 'H4'), ('H3', 'H4')]) + + See Also + -------- + union + disjoint_union + """ + R = union(G, H, rename) + + def add_prefix(graph, prefix): + if prefix is None: + return graph + + def label(x): + return f"{prefix}{x}" + + return nx.relabel_nodes(graph, label) + + G = add_prefix(G, rename[0]) + H = add_prefix(H, rename[1]) + + for i in G: + for j in H: + R.add_edge(i, j) + if R.is_directed(): + for i in H: + for j in G: + R.add_edge(i, j) + + return R diff --git a/MLPY/Lib/site-packages/networkx/algorithms/operators/product.py b/MLPY/Lib/site-packages/networkx/algorithms/operators/product.py new file mode 100644 index 0000000000000000000000000000000000000000..639cec3908560cbdf7993541025d0567e16e48ae --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/operators/product.py @@ -0,0 +1,534 @@ +""" +Graph products. +""" +from itertools import product + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "tensor_product", + "cartesian_product", + "lexicographic_product", + "strong_product", + "power", + "rooted_product", + "corona_product", +] +_G_H = {"G": 0, "H": 1} + + +def _dict_product(d1, d2): + return {k: (d1.get(k), d2.get(k)) for k in set(d1) | set(d2)} + + +# Generators for producing graph products +def _node_product(G, H): + for u, v in product(G, H): + yield ((u, v), _dict_product(G.nodes[u], H.nodes[v])) + + +def _directed_edges_cross_edges(G, H): + if not G.is_multigraph() and not H.is_multigraph(): + for u, v, c in G.edges(data=True): + for x, y, d in H.edges(data=True): + yield (u, x), (v, y), _dict_product(c, d) + if not G.is_multigraph() and H.is_multigraph(): + for u, v, c in G.edges(data=True): + for x, y, k, d in H.edges(data=True, keys=True): + yield (u, x), (v, y), k, _dict_product(c, d) + if G.is_multigraph() and not H.is_multigraph(): + for u, v, k, c in G.edges(data=True, keys=True): + for x, y, d in H.edges(data=True): + yield (u, x), (v, y), k, _dict_product(c, d) + if G.is_multigraph() and H.is_multigraph(): + for u, v, j, c in G.edges(data=True, keys=True): + for x, y, k, d in H.edges(data=True, keys=True): + yield (u, x), (v, y), (j, k), _dict_product(c, d) + + +def _undirected_edges_cross_edges(G, H): + if not G.is_multigraph() and not H.is_multigraph(): + for u, v, c in G.edges(data=True): + for x, y, d in H.edges(data=True): + yield (v, x), (u, y), _dict_product(c, d) + if not G.is_multigraph() and H.is_multigraph(): + for u, v, c in G.edges(data=True): + for x, y, k, d in H.edges(data=True, keys=True): + yield (v, x), (u, y), k, _dict_product(c, d) + if G.is_multigraph() and not H.is_multigraph(): + for u, v, k, c in G.edges(data=True, keys=True): + for x, y, d in H.edges(data=True): + yield (v, x), (u, y), k, _dict_product(c, d) + if G.is_multigraph() and H.is_multigraph(): + for u, v, j, c in G.edges(data=True, keys=True): + for x, y, k, d in H.edges(data=True, keys=True): + yield (v, x), (u, y), (j, k), _dict_product(c, d) + + +def _edges_cross_nodes(G, H): + if G.is_multigraph(): + for u, v, k, d in G.edges(data=True, keys=True): + for x in H: + yield (u, x), (v, x), k, d + else: + for u, v, d in G.edges(data=True): + for x in H: + if H.is_multigraph(): + yield (u, x), (v, x), None, d + else: + yield (u, x), (v, x), d + + +def _nodes_cross_edges(G, H): + if H.is_multigraph(): + for x in G: + for u, v, k, d in H.edges(data=True, keys=True): + yield (x, u), (x, v), k, d + else: + for x in G: + for u, v, d in H.edges(data=True): + if G.is_multigraph(): + yield (x, u), (x, v), None, d + else: + yield (x, u), (x, v), d + + +def _edges_cross_nodes_and_nodes(G, H): + if G.is_multigraph(): + for u, v, k, d in G.edges(data=True, keys=True): + for x in H: + for y in H: + yield (u, x), (v, y), k, d + else: + for u, v, d in G.edges(data=True): + for x in H: + for y in H: + if H.is_multigraph(): + yield (u, x), (v, y), None, d + else: + yield (u, x), (v, y), d + + +def _init_product_graph(G, H): + if G.is_directed() != H.is_directed(): + msg = "G and H must be both directed or both undirected" + raise nx.NetworkXError(msg) + if G.is_multigraph() or H.is_multigraph(): + GH = nx.MultiGraph() + else: + GH = nx.Graph() + if G.is_directed(): + GH = GH.to_directed() + return GH + + +@nx._dispatch(graphs=_G_H) +def tensor_product(G, H): + r"""Returns the tensor product of G and H. + + The tensor product $P$ of the graphs $G$ and $H$ has a node set that + is the tensor product of the node sets, $V(P)=V(G) \times V(H)$. + $P$ has an edge $((u,v), (x,y))$ if and only if $(u,x)$ is an edge in $G$ + and $(v,y)$ is an edge in $H$. + + Tensor product is sometimes also referred to as the categorical product, + direct product, cardinal product or conjunction. + + + Parameters + ---------- + G, H: graphs + Networkx graphs. + + Returns + ------- + P: NetworkX graph + The tensor product of G and H. P will be a multi-graph if either G + or H is a multi-graph, will be a directed if G and H are directed, + and undirected if G and H are undirected. + + Raises + ------ + NetworkXError + If G and H are not both directed or both undirected. + + Notes + ----- + Node attributes in P are two-tuple of the G and H node attributes. + Missing attributes are assigned None. + + Examples + -------- + >>> G = nx.Graph() + >>> H = nx.Graph() + >>> G.add_node(0, a1=True) + >>> H.add_node("a", a2="Spam") + >>> P = nx.tensor_product(G, H) + >>> list(P) + [(0, 'a')] + + Edge attributes and edge keys (for multigraphs) are also copied to the + new product graph + """ + GH = _init_product_graph(G, H) + GH.add_nodes_from(_node_product(G, H)) + GH.add_edges_from(_directed_edges_cross_edges(G, H)) + if not GH.is_directed(): + GH.add_edges_from(_undirected_edges_cross_edges(G, H)) + return GH + + +@nx._dispatch(graphs=_G_H) +def cartesian_product(G, H): + r"""Returns the Cartesian product of G and H. + + The Cartesian product $P$ of the graphs $G$ and $H$ has a node set that + is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$. + $P$ has an edge $((u,v),(x,y))$ if and only if either $u$ is equal to $x$ + and both $v$ and $y$ are adjacent in $H$ or if $v$ is equal to $y$ and + both $u$ and $x$ are adjacent in $G$. + + Parameters + ---------- + G, H: graphs + Networkx graphs. + + Returns + ------- + P: NetworkX graph + The Cartesian product of G and H. P will be a multi-graph if either G + or H is a multi-graph. Will be a directed if G and H are directed, + and undirected if G and H are undirected. + + Raises + ------ + NetworkXError + If G and H are not both directed or both undirected. + + Notes + ----- + Node attributes in P are two-tuple of the G and H node attributes. + Missing attributes are assigned None. + + Examples + -------- + >>> G = nx.Graph() + >>> H = nx.Graph() + >>> G.add_node(0, a1=True) + >>> H.add_node("a", a2="Spam") + >>> P = nx.cartesian_product(G, H) + >>> list(P) + [(0, 'a')] + + Edge attributes and edge keys (for multigraphs) are also copied to the + new product graph + """ + GH = _init_product_graph(G, H) + GH.add_nodes_from(_node_product(G, H)) + GH.add_edges_from(_edges_cross_nodes(G, H)) + GH.add_edges_from(_nodes_cross_edges(G, H)) + return GH + + +@nx._dispatch(graphs=_G_H) +def lexicographic_product(G, H): + r"""Returns the lexicographic product of G and H. + + The lexicographical product $P$ of the graphs $G$ and $H$ has a node set + that is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$. + $P$ has an edge $((u,v), (x,y))$ if and only if $(u,v)$ is an edge in $G$ + or $u==v$ and $(x,y)$ is an edge in $H$. + + Parameters + ---------- + G, H: graphs + Networkx graphs. + + Returns + ------- + P: NetworkX graph + The Cartesian product of G and H. P will be a multi-graph if either G + or H is a multi-graph. Will be a directed if G and H are directed, + and undirected if G and H are undirected. + + Raises + ------ + NetworkXError + If G and H are not both directed or both undirected. + + Notes + ----- + Node attributes in P are two-tuple of the G and H node attributes. + Missing attributes are assigned None. + + Examples + -------- + >>> G = nx.Graph() + >>> H = nx.Graph() + >>> G.add_node(0, a1=True) + >>> H.add_node("a", a2="Spam") + >>> P = nx.lexicographic_product(G, H) + >>> list(P) + [(0, 'a')] + + Edge attributes and edge keys (for multigraphs) are also copied to the + new product graph + """ + GH = _init_product_graph(G, H) + GH.add_nodes_from(_node_product(G, H)) + # Edges in G regardless of H designation + GH.add_edges_from(_edges_cross_nodes_and_nodes(G, H)) + # For each x in G, only if there is an edge in H + GH.add_edges_from(_nodes_cross_edges(G, H)) + return GH + + +@nx._dispatch(graphs=_G_H) +def strong_product(G, H): + r"""Returns the strong product of G and H. + + The strong product $P$ of the graphs $G$ and $H$ has a node set that + is the Cartesian product of the node sets, $V(P)=V(G) \times V(H)$. + $P$ has an edge $((u,v), (x,y))$ if and only if + $u==v$ and $(x,y)$ is an edge in $H$, or + $x==y$ and $(u,v)$ is an edge in $G$, or + $(u,v)$ is an edge in $G$ and $(x,y)$ is an edge in $H$. + + Parameters + ---------- + G, H: graphs + Networkx graphs. + + Returns + ------- + P: NetworkX graph + The Cartesian product of G and H. P will be a multi-graph if either G + or H is a multi-graph. Will be a directed if G and H are directed, + and undirected if G and H are undirected. + + Raises + ------ + NetworkXError + If G and H are not both directed or both undirected. + + Notes + ----- + Node attributes in P are two-tuple of the G and H node attributes. + Missing attributes are assigned None. + + Examples + -------- + >>> G = nx.Graph() + >>> H = nx.Graph() + >>> G.add_node(0, a1=True) + >>> H.add_node("a", a2="Spam") + >>> P = nx.strong_product(G, H) + >>> list(P) + [(0, 'a')] + + Edge attributes and edge keys (for multigraphs) are also copied to the + new product graph + """ + GH = _init_product_graph(G, H) + GH.add_nodes_from(_node_product(G, H)) + GH.add_edges_from(_nodes_cross_edges(G, H)) + GH.add_edges_from(_edges_cross_nodes(G, H)) + GH.add_edges_from(_directed_edges_cross_edges(G, H)) + if not GH.is_directed(): + GH.add_edges_from(_undirected_edges_cross_edges(G, H)) + return GH + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def power(G, k): + """Returns the specified power of a graph. + + The $k$th power of a simple graph $G$, denoted $G^k$, is a + graph on the same set of nodes in which two distinct nodes $u$ and + $v$ are adjacent in $G^k$ if and only if the shortest path + distance between $u$ and $v$ in $G$ is at most $k$. + + Parameters + ---------- + G : graph + A NetworkX simple graph object. + + k : positive integer + The power to which to raise the graph `G`. + + Returns + ------- + NetworkX simple graph + `G` to the power `k`. + + Raises + ------ + ValueError + If the exponent `k` is not positive. + + NetworkXNotImplemented + If `G` is not a simple graph. + + Examples + -------- + The number of edges will never decrease when taking successive + powers: + + >>> G = nx.path_graph(4) + >>> list(nx.power(G, 2).edges) + [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3)] + >>> list(nx.power(G, 3).edges) + [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)] + + The `k` th power of a cycle graph on *n* nodes is the complete graph + on *n* nodes, if `k` is at least ``n // 2``: + + >>> G = nx.cycle_graph(5) + >>> H = nx.complete_graph(5) + >>> nx.is_isomorphic(nx.power(G, 2), H) + True + >>> G = nx.cycle_graph(8) + >>> H = nx.complete_graph(8) + >>> nx.is_isomorphic(nx.power(G, 4), H) + True + + References + ---------- + .. [1] J. A. Bondy, U. S. R. Murty, *Graph Theory*. Springer, 2008. + + Notes + ----- + This definition of "power graph" comes from Exercise 3.1.6 of + *Graph Theory* by Bondy and Murty [1]_. + + """ + if k <= 0: + raise ValueError("k must be a positive integer") + H = nx.Graph() + H.add_nodes_from(G) + # update BFS code to ignore self loops. + for n in G: + seen = {} # level (number of hops) when seen in BFS + level = 1 # the current level + nextlevel = G[n] + while nextlevel: + thislevel = nextlevel # advance to next level + nextlevel = {} # and start a new list (fringe) + for v in thislevel: + if v == n: # avoid self loop + continue + if v not in seen: + seen[v] = level # set the level of vertex v + nextlevel.update(G[v]) # add neighbors of v + if k <= level: + break + level += 1 + H.add_edges_from((n, nbr) for nbr in seen) + return H + + +@not_implemented_for("multigraph") +@nx._dispatch(graphs=_G_H) +def rooted_product(G, H, root): + """Return the rooted product of graphs G and H rooted at root in H. + + A new graph is constructed representing the rooted product of + the inputted graphs, G and H, with a root in H. + A rooted product duplicates H for each nodes in G with the root + of H corresponding to the node in G. Nodes are renamed as the direct + product of G and H. The result is a subgraph of the cartesian product. + + Parameters + ---------- + G,H : graph + A NetworkX graph + root : node + A node in H + + Returns + ------- + R : The rooted product of G and H with a specified root in H + + Notes + ----- + The nodes of R are the Cartesian Product of the nodes of G and H. + The nodes of G and H are not relabeled. + """ + if root not in H: + raise nx.NetworkXError("root must be a vertex in H") + + R = nx.Graph() + R.add_nodes_from(product(G, H)) + + R.add_edges_from(((e[0], root), (e[1], root)) for e in G.edges()) + R.add_edges_from(((g, e[0]), (g, e[1])) for g in G for e in H.edges()) + + return R + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch(graphs=_G_H) +def corona_product(G, H): + r"""Returns the Corona product of G and H. + + The corona product of $G$ and $H$ is the graph $C = G \circ H$ obtained by + taking one copy of $G$, called the center graph, $|V(G)|$ copies of $H$, + called the outer graph, and making the $i$-th vertex of $G$ adjacent to + every vertex of the $i$-th copy of $H$, where $1 ≤ i ≤ |V(G)|$. + + Parameters + ---------- + G, H: NetworkX graphs + The graphs to take the carona product of. + `G` is the center graph and `H` is the outer graph + + Returns + ------- + C: NetworkX graph + The Corona product of G and H. + + Raises + ------ + NetworkXError + If G and H are not both directed or both undirected. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> H = nx.path_graph(2) + >>> C = nx.corona_product(G, H) + >>> list(C) + [0, 1, 2, 3, (0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1), (3, 0), (3, 1)] + >>> print(C) + Graph with 12 nodes and 16 edges + + References + ---------- + [1] M. Tavakoli, F. Rahbarnia, and A. R. Ashrafi, + "Studying the corona product of graphs under some graph invariants," + Transactions on Combinatorics, vol. 3, no. 3, pp. 43–49, Sep. 2014, + doi: 10.22108/toc.2014.5542. + [2] A. Faraji, "Corona Product in Graph Theory," Ali Faraji, May 11, 2021. + https://blog.alifaraji.ir/math/graph-theory/corona-product.html (accessed Dec. 07, 2021). + """ + GH = _init_product_graph(G, H) + GH.add_nodes_from(G) + GH.add_edges_from(G.edges) + + for G_node in G: + # copy nodes of H in GH, call it H_i + GH.add_nodes_from((G_node, v) for v in H) + + # copy edges of H_i based on H + GH.add_edges_from( + ((G_node, e0), (G_node, e1), d) for e0, e1, d in H.edges.data() + ) + + # creating new edges between H_i and a G's node + GH.add_edges_from((G_node, (G_node, H_node)) for H_node in H) + + return GH diff --git a/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2295007d337375a1b99eeb50f82e8345fc2e83b6 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/__pycache__/test_all.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/__pycache__/test_all.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0eccde84e801b03d614c8dac9fa25023e9083d7d Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/__pycache__/test_all.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/__pycache__/test_binary.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/__pycache__/test_binary.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05aa5880b0121ed80802dbfda404aa9f56f2738b Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/__pycache__/test_binary.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/__pycache__/test_product.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/__pycache__/test_product.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea03a522c1d78a04fe315ced46414a4fb3c8b0ee Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/__pycache__/test_product.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/__pycache__/test_unary.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/__pycache__/test_unary.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc76e792b69618db9748ffaa9a6622311b0eae5c Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/__pycache__/test_unary.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/test_all.py b/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/test_all.py new file mode 100644 index 0000000000000000000000000000000000000000..8ec29c150306080d536c1b1dc785209d4a113f6d --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/test_all.py @@ -0,0 +1,328 @@ +import pytest + +import networkx as nx +from networkx.utils import edges_equal + + +def test_union_all_attributes(): + g = nx.Graph() + g.add_node(0, x=4) + g.add_node(1, x=5) + g.add_edge(0, 1, size=5) + g.graph["name"] = "g" + + h = g.copy() + h.graph["name"] = "h" + h.graph["attr"] = "attr" + h.nodes[0]["x"] = 7 + + j = g.copy() + j.graph["name"] = "j" + j.graph["attr"] = "attr" + j.nodes[0]["x"] = 7 + + ghj = nx.union_all([g, h, j], rename=("g", "h", "j")) + assert set(ghj.nodes()) == {"h0", "h1", "g0", "g1", "j0", "j1"} + for n in ghj: + graph, node = n + assert ghj.nodes[n] == eval(graph).nodes[int(node)] + + assert ghj.graph["attr"] == "attr" + assert ghj.graph["name"] == "j" # j graph attributes take precedent + + +def test_intersection_all(): + G = nx.Graph() + H = nx.Graph() + R = nx.Graph(awesome=True) + G.add_nodes_from([1, 2, 3, 4]) + G.add_edge(1, 2) + G.add_edge(2, 3) + H.add_nodes_from([1, 2, 3, 4]) + H.add_edge(2, 3) + H.add_edge(3, 4) + R.add_nodes_from([1, 2, 3, 4]) + R.add_edge(2, 3) + R.add_edge(4, 1) + I = nx.intersection_all([G, H, R]) + assert set(I.nodes()) == {1, 2, 3, 4} + assert sorted(I.edges()) == [(2, 3)] + assert I.graph == {} + + +def test_intersection_all_different_node_sets(): + G = nx.Graph() + H = nx.Graph() + R = nx.Graph() + G.add_nodes_from([1, 2, 3, 4, 6, 7]) + G.add_edge(1, 2) + G.add_edge(2, 3) + G.add_edge(6, 7) + H.add_nodes_from([1, 2, 3, 4]) + H.add_edge(2, 3) + H.add_edge(3, 4) + R.add_nodes_from([1, 2, 3, 4, 8, 9]) + R.add_edge(2, 3) + R.add_edge(4, 1) + R.add_edge(8, 9) + I = nx.intersection_all([G, H, R]) + assert set(I.nodes()) == {1, 2, 3, 4} + assert sorted(I.edges()) == [(2, 3)] + + +def test_intersection_all_attributes(): + g = nx.Graph() + g.add_node(0, x=4) + g.add_node(1, x=5) + g.add_edge(0, 1, size=5) + g.graph["name"] = "g" + + h = g.copy() + h.graph["name"] = "h" + h.graph["attr"] = "attr" + h.nodes[0]["x"] = 7 + + gh = nx.intersection_all([g, h]) + assert set(gh.nodes()) == set(g.nodes()) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == sorted(g.edges()) + + +def test_intersection_all_attributes_different_node_sets(): + g = nx.Graph() + g.add_node(0, x=4) + g.add_node(1, x=5) + g.add_edge(0, 1, size=5) + g.graph["name"] = "g" + + h = g.copy() + g.add_node(2) + h.graph["name"] = "h" + h.graph["attr"] = "attr" + h.nodes[0]["x"] = 7 + + gh = nx.intersection_all([g, h]) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == sorted(g.edges()) + + +def test_intersection_all_multigraph_attributes(): + g = nx.MultiGraph() + g.add_edge(0, 1, key=0) + g.add_edge(0, 1, key=1) + g.add_edge(0, 1, key=2) + h = nx.MultiGraph() + h.add_edge(0, 1, key=0) + h.add_edge(0, 1, key=3) + gh = nx.intersection_all([g, h]) + assert set(gh.nodes()) == set(g.nodes()) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == [(0, 1)] + assert sorted(gh.edges(keys=True)) == [(0, 1, 0)] + + +def test_intersection_all_multigraph_attributes_different_node_sets(): + g = nx.MultiGraph() + g.add_edge(0, 1, key=0) + g.add_edge(0, 1, key=1) + g.add_edge(0, 1, key=2) + g.add_edge(1, 2, key=1) + g.add_edge(1, 2, key=2) + h = nx.MultiGraph() + h.add_edge(0, 1, key=0) + h.add_edge(0, 1, key=2) + h.add_edge(0, 1, key=3) + gh = nx.intersection_all([g, h]) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == [(0, 1), (0, 1)] + assert sorted(gh.edges(keys=True)) == [(0, 1, 0), (0, 1, 2)] + + +def test_intersection_all_digraph(): + g = nx.DiGraph() + g.add_edges_from([(1, 2), (2, 3)]) + h = nx.DiGraph() + h.add_edges_from([(2, 1), (2, 3)]) + gh = nx.intersection_all([g, h]) + assert sorted(gh.edges()) == [(2, 3)] + + +def test_union_all_and_compose_all(): + K3 = nx.complete_graph(3) + P3 = nx.path_graph(3) + + G1 = nx.DiGraph() + G1.add_edge("A", "B") + G1.add_edge("A", "C") + G1.add_edge("A", "D") + G2 = nx.DiGraph() + G2.add_edge("1", "2") + G2.add_edge("1", "3") + G2.add_edge("1", "4") + + G = nx.union_all([G1, G2]) + H = nx.compose_all([G1, G2]) + assert edges_equal(G.edges(), H.edges()) + assert not G.has_edge("A", "1") + pytest.raises(nx.NetworkXError, nx.union, K3, P3) + H1 = nx.union_all([H, G1], rename=("H", "G1")) + assert sorted(H1.nodes()) == [ + "G1A", + "G1B", + "G1C", + "G1D", + "H1", + "H2", + "H3", + "H4", + "HA", + "HB", + "HC", + "HD", + ] + + H2 = nx.union_all([H, G2], rename=("H", "")) + assert sorted(H2.nodes()) == [ + "1", + "2", + "3", + "4", + "H1", + "H2", + "H3", + "H4", + "HA", + "HB", + "HC", + "HD", + ] + + assert not H1.has_edge("NB", "NA") + + G = nx.compose_all([G, G]) + assert edges_equal(G.edges(), H.edges()) + + G2 = nx.union_all([G2, G2], rename=("", "copy")) + assert sorted(G2.nodes()) == [ + "1", + "2", + "3", + "4", + "copy1", + "copy2", + "copy3", + "copy4", + ] + + assert sorted(G2.neighbors("copy4")) == [] + assert sorted(G2.neighbors("copy1")) == ["copy2", "copy3", "copy4"] + assert len(G) == 8 + assert nx.number_of_edges(G) == 6 + + E = nx.disjoint_union_all([G, G]) + assert len(E) == 16 + assert nx.number_of_edges(E) == 12 + + E = nx.disjoint_union_all([G1, G2]) + assert sorted(E.nodes()) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] + + G1 = nx.DiGraph() + G1.add_edge("A", "B") + G2 = nx.DiGraph() + G2.add_edge(1, 2) + G3 = nx.DiGraph() + G3.add_edge(11, 22) + G4 = nx.union_all([G1, G2, G3], rename=("G1", "G2", "G3")) + assert sorted(G4.nodes()) == ["G1A", "G1B", "G21", "G22", "G311", "G322"] + + +def test_union_all_multigraph(): + G = nx.MultiGraph() + G.add_edge(1, 2, key=0) + G.add_edge(1, 2, key=1) + H = nx.MultiGraph() + H.add_edge(3, 4, key=0) + H.add_edge(3, 4, key=1) + GH = nx.union_all([G, H]) + assert set(GH) == set(G) | set(H) + assert set(GH.edges(keys=True)) == set(G.edges(keys=True)) | set(H.edges(keys=True)) + + +def test_input_output(): + l = [nx.Graph([(1, 2)]), nx.Graph([(3, 4)], awesome=True)] + U = nx.disjoint_union_all(l) + assert len(l) == 2 + assert U.graph["awesome"] + C = nx.compose_all(l) + assert len(l) == 2 + l = [nx.Graph([(1, 2)]), nx.Graph([(1, 2)])] + R = nx.intersection_all(l) + assert len(l) == 2 + + +def test_mixed_type_union(): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + H = nx.MultiGraph() + I = nx.Graph() + U = nx.union_all([G, H, I]) + with pytest.raises(nx.NetworkXError): + X = nx.Graph() + Y = nx.DiGraph() + XY = nx.union_all([X, Y]) + + +def test_mixed_type_disjoint_union(): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + H = nx.MultiGraph() + I = nx.Graph() + U = nx.disjoint_union_all([G, H, I]) + with pytest.raises(nx.NetworkXError): + X = nx.Graph() + Y = nx.DiGraph() + XY = nx.disjoint_union_all([X, Y]) + + +def test_mixed_type_intersection(): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + H = nx.MultiGraph() + I = nx.Graph() + U = nx.intersection_all([G, H, I]) + with pytest.raises(nx.NetworkXError): + X = nx.Graph() + Y = nx.DiGraph() + XY = nx.intersection_all([X, Y]) + + +def test_mixed_type_compose(): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + H = nx.MultiGraph() + I = nx.Graph() + U = nx.compose_all([G, H, I]) + with pytest.raises(nx.NetworkXError): + X = nx.Graph() + Y = nx.DiGraph() + XY = nx.compose_all([X, Y]) + + +def test_empty_union(): + with pytest.raises(ValueError): + nx.union_all([]) + + +def test_empty_disjoint_union(): + with pytest.raises(ValueError): + nx.disjoint_union_all([]) + + +def test_empty_compose_all(): + with pytest.raises(ValueError): + nx.compose_all([]) + + +def test_empty_intersection_all(): + with pytest.raises(ValueError): + nx.intersection_all([]) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/test_binary.py b/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/test_binary.py new file mode 100644 index 0000000000000000000000000000000000000000..97743822dd83fc45ac7c6ec9cb283e0c4d5ba723 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/test_binary.py @@ -0,0 +1,468 @@ +import os + +import pytest + +import networkx as nx +from networkx.classes.tests import dispatch_interface +from networkx.utils import edges_equal + + +def test_union_attributes(): + g = nx.Graph() + g.add_node(0, x=4) + g.add_node(1, x=5) + g.add_edge(0, 1, size=5) + g.graph["name"] = "g" + + h = g.copy() + h.graph["name"] = "h" + h.graph["attr"] = "attr" + h.nodes[0]["x"] = 7 + + gh = nx.union(g, h, rename=("g", "h")) + assert set(gh.nodes()) == {"h0", "h1", "g0", "g1"} + for n in gh: + graph, node = n + assert gh.nodes[n] == eval(graph).nodes[int(node)] + + assert gh.graph["attr"] == "attr" + assert gh.graph["name"] == "h" # h graph attributes take precedent + + +def test_intersection(): + G = nx.Graph() + H = nx.Graph() + G.add_nodes_from([1, 2, 3, 4]) + G.add_edge(1, 2) + G.add_edge(2, 3) + H.add_nodes_from([1, 2, 3, 4]) + H.add_edge(2, 3) + H.add_edge(3, 4) + I = nx.intersection(G, H) + assert set(I.nodes()) == {1, 2, 3, 4} + assert sorted(I.edges()) == [(2, 3)] + + ################## + # Tests for @nx._dispatch mechanism with multiple graph arguments + # nx.intersection is called as if it were a re-implementation + # from another package. + ################### + G2 = dispatch_interface.convert(G) + H2 = dispatch_interface.convert(H) + I2 = nx.intersection(G2, H2) + assert set(I2.nodes()) == {1, 2, 3, 4} + assert sorted(I2.edges()) == [(2, 3)] + # Only test if not performing auto convert testing of backend implementations + if not nx.utils.backends._dispatch._automatic_backends: + with pytest.raises(TypeError): + nx.intersection(G2, H) + with pytest.raises(TypeError): + nx.intersection(G, H2) + + +def test_intersection_node_sets_different(): + G = nx.Graph() + H = nx.Graph() + G.add_nodes_from([1, 2, 3, 4, 7]) + G.add_edge(1, 2) + G.add_edge(2, 3) + H.add_nodes_from([1, 2, 3, 4, 5, 6]) + H.add_edge(2, 3) + H.add_edge(3, 4) + H.add_edge(5, 6) + I = nx.intersection(G, H) + assert set(I.nodes()) == {1, 2, 3, 4} + assert sorted(I.edges()) == [(2, 3)] + + +def test_intersection_attributes(): + g = nx.Graph() + g.add_node(0, x=4) + g.add_node(1, x=5) + g.add_edge(0, 1, size=5) + g.graph["name"] = "g" + + h = g.copy() + h.graph["name"] = "h" + h.graph["attr"] = "attr" + h.nodes[0]["x"] = 7 + gh = nx.intersection(g, h) + + assert set(gh.nodes()) == set(g.nodes()) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == sorted(g.edges()) + + +def test_intersection_attributes_node_sets_different(): + g = nx.Graph() + g.add_node(0, x=4) + g.add_node(1, x=5) + g.add_node(2, x=3) + g.add_edge(0, 1, size=5) + g.graph["name"] = "g" + + h = g.copy() + h.graph["name"] = "h" + h.graph["attr"] = "attr" + h.nodes[0]["x"] = 7 + h.remove_node(2) + + gh = nx.intersection(g, h) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == sorted(g.edges()) + + +def test_intersection_multigraph_attributes(): + g = nx.MultiGraph() + g.add_edge(0, 1, key=0) + g.add_edge(0, 1, key=1) + g.add_edge(0, 1, key=2) + h = nx.MultiGraph() + h.add_edge(0, 1, key=0) + h.add_edge(0, 1, key=3) + gh = nx.intersection(g, h) + assert set(gh.nodes()) == set(g.nodes()) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == [(0, 1)] + assert sorted(gh.edges(keys=True)) == [(0, 1, 0)] + + +def test_intersection_multigraph_attributes_node_set_different(): + g = nx.MultiGraph() + g.add_edge(0, 1, key=0) + g.add_edge(0, 1, key=1) + g.add_edge(0, 1, key=2) + g.add_edge(0, 2, key=2) + g.add_edge(0, 2, key=1) + h = nx.MultiGraph() + h.add_edge(0, 1, key=0) + h.add_edge(0, 1, key=3) + gh = nx.intersection(g, h) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == [(0, 1)] + assert sorted(gh.edges(keys=True)) == [(0, 1, 0)] + + +def test_difference(): + G = nx.Graph() + H = nx.Graph() + G.add_nodes_from([1, 2, 3, 4]) + G.add_edge(1, 2) + G.add_edge(2, 3) + H.add_nodes_from([1, 2, 3, 4]) + H.add_edge(2, 3) + H.add_edge(3, 4) + D = nx.difference(G, H) + assert set(D.nodes()) == {1, 2, 3, 4} + assert sorted(D.edges()) == [(1, 2)] + D = nx.difference(H, G) + assert set(D.nodes()) == {1, 2, 3, 4} + assert sorted(D.edges()) == [(3, 4)] + D = nx.symmetric_difference(G, H) + assert set(D.nodes()) == {1, 2, 3, 4} + assert sorted(D.edges()) == [(1, 2), (3, 4)] + + +def test_difference2(): + G = nx.Graph() + H = nx.Graph() + G.add_nodes_from([1, 2, 3, 4]) + H.add_nodes_from([1, 2, 3, 4]) + G.add_edge(1, 2) + H.add_edge(1, 2) + G.add_edge(2, 3) + D = nx.difference(G, H) + assert set(D.nodes()) == {1, 2, 3, 4} + assert sorted(D.edges()) == [(2, 3)] + D = nx.difference(H, G) + assert set(D.nodes()) == {1, 2, 3, 4} + assert sorted(D.edges()) == [] + H.add_edge(3, 4) + D = nx.difference(H, G) + assert set(D.nodes()) == {1, 2, 3, 4} + assert sorted(D.edges()) == [(3, 4)] + + +def test_difference_attributes(): + g = nx.Graph() + g.add_node(0, x=4) + g.add_node(1, x=5) + g.add_edge(0, 1, size=5) + g.graph["name"] = "g" + + h = g.copy() + h.graph["name"] = "h" + h.graph["attr"] = "attr" + h.nodes[0]["x"] = 7 + + gh = nx.difference(g, h) + assert set(gh.nodes()) == set(g.nodes()) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == [] + + +def test_difference_multigraph_attributes(): + g = nx.MultiGraph() + g.add_edge(0, 1, key=0) + g.add_edge(0, 1, key=1) + g.add_edge(0, 1, key=2) + h = nx.MultiGraph() + h.add_edge(0, 1, key=0) + h.add_edge(0, 1, key=3) + gh = nx.difference(g, h) + assert set(gh.nodes()) == set(g.nodes()) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == [(0, 1), (0, 1)] + assert sorted(gh.edges(keys=True)) == [(0, 1, 1), (0, 1, 2)] + + +def test_difference_raise(): + G = nx.path_graph(4) + H = nx.path_graph(3) + pytest.raises(nx.NetworkXError, nx.difference, G, H) + pytest.raises(nx.NetworkXError, nx.symmetric_difference, G, H) + + +def test_symmetric_difference_multigraph(): + g = nx.MultiGraph() + g.add_edge(0, 1, key=0) + g.add_edge(0, 1, key=1) + g.add_edge(0, 1, key=2) + h = nx.MultiGraph() + h.add_edge(0, 1, key=0) + h.add_edge(0, 1, key=3) + gh = nx.symmetric_difference(g, h) + assert set(gh.nodes()) == set(g.nodes()) + assert set(gh.nodes()) == set(h.nodes()) + assert sorted(gh.edges()) == 3 * [(0, 1)] + assert sorted(sorted(e) for e in gh.edges(keys=True)) == [ + [0, 1, 1], + [0, 1, 2], + [0, 1, 3], + ] + + +def test_union_and_compose(): + K3 = nx.complete_graph(3) + P3 = nx.path_graph(3) + + G1 = nx.DiGraph() + G1.add_edge("A", "B") + G1.add_edge("A", "C") + G1.add_edge("A", "D") + G2 = nx.DiGraph() + G2.add_edge("1", "2") + G2.add_edge("1", "3") + G2.add_edge("1", "4") + + G = nx.union(G1, G2) + H = nx.compose(G1, G2) + assert edges_equal(G.edges(), H.edges()) + assert not G.has_edge("A", 1) + pytest.raises(nx.NetworkXError, nx.union, K3, P3) + H1 = nx.union(H, G1, rename=("H", "G1")) + assert sorted(H1.nodes()) == [ + "G1A", + "G1B", + "G1C", + "G1D", + "H1", + "H2", + "H3", + "H4", + "HA", + "HB", + "HC", + "HD", + ] + + H2 = nx.union(H, G2, rename=("H", "")) + assert sorted(H2.nodes()) == [ + "1", + "2", + "3", + "4", + "H1", + "H2", + "H3", + "H4", + "HA", + "HB", + "HC", + "HD", + ] + + assert not H1.has_edge("NB", "NA") + + G = nx.compose(G, G) + assert edges_equal(G.edges(), H.edges()) + + G2 = nx.union(G2, G2, rename=("", "copy")) + assert sorted(G2.nodes()) == [ + "1", + "2", + "3", + "4", + "copy1", + "copy2", + "copy3", + "copy4", + ] + + assert sorted(G2.neighbors("copy4")) == [] + assert sorted(G2.neighbors("copy1")) == ["copy2", "copy3", "copy4"] + assert len(G) == 8 + assert nx.number_of_edges(G) == 6 + + E = nx.disjoint_union(G, G) + assert len(E) == 16 + assert nx.number_of_edges(E) == 12 + + E = nx.disjoint_union(G1, G2) + assert sorted(E.nodes()) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] + + G = nx.Graph() + H = nx.Graph() + G.add_nodes_from([(1, {"a1": 1})]) + H.add_nodes_from([(1, {"b1": 1})]) + R = nx.compose(G, H) + assert R.nodes == {1: {"a1": 1, "b1": 1}} + + +def test_union_multigraph(): + G = nx.MultiGraph() + G.add_edge(1, 2, key=0) + G.add_edge(1, 2, key=1) + H = nx.MultiGraph() + H.add_edge(3, 4, key=0) + H.add_edge(3, 4, key=1) + GH = nx.union(G, H) + assert set(GH) == set(G) | set(H) + assert set(GH.edges(keys=True)) == set(G.edges(keys=True)) | set(H.edges(keys=True)) + + +def test_disjoint_union_multigraph(): + G = nx.MultiGraph() + G.add_edge(0, 1, key=0) + G.add_edge(0, 1, key=1) + H = nx.MultiGraph() + H.add_edge(2, 3, key=0) + H.add_edge(2, 3, key=1) + GH = nx.disjoint_union(G, H) + assert set(GH) == set(G) | set(H) + assert set(GH.edges(keys=True)) == set(G.edges(keys=True)) | set(H.edges(keys=True)) + + +def test_compose_multigraph(): + G = nx.MultiGraph() + G.add_edge(1, 2, key=0) + G.add_edge(1, 2, key=1) + H = nx.MultiGraph() + H.add_edge(3, 4, key=0) + H.add_edge(3, 4, key=1) + GH = nx.compose(G, H) + assert set(GH) == set(G) | set(H) + assert set(GH.edges(keys=True)) == set(G.edges(keys=True)) | set(H.edges(keys=True)) + H.add_edge(1, 2, key=2) + GH = nx.compose(G, H) + assert set(GH) == set(G) | set(H) + assert set(GH.edges(keys=True)) == set(G.edges(keys=True)) | set(H.edges(keys=True)) + + +def test_full_join_graph(): + # Simple Graphs + G = nx.Graph() + G.add_node(0) + G.add_edge(1, 2) + H = nx.Graph() + H.add_edge(3, 4) + + U = nx.full_join(G, H) + assert set(U) == set(G) | set(H) + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) + + # Rename + U = nx.full_join(G, H, rename=("g", "h")) + assert set(U) == {"g0", "g1", "g2", "h3", "h4"} + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) + + # Rename graphs with string-like nodes + G = nx.Graph() + G.add_node("a") + G.add_edge("b", "c") + H = nx.Graph() + H.add_edge("d", "e") + + U = nx.full_join(G, H, rename=("g", "h")) + assert set(U) == {"ga", "gb", "gc", "hd", "he"} + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) + + # DiGraphs + G = nx.DiGraph() + G.add_node(0) + G.add_edge(1, 2) + H = nx.DiGraph() + H.add_edge(3, 4) + + U = nx.full_join(G, H) + assert set(U) == set(G) | set(H) + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) * 2 + + # DiGraphs Rename + U = nx.full_join(G, H, rename=("g", "h")) + assert set(U) == {"g0", "g1", "g2", "h3", "h4"} + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) * 2 + + +def test_full_join_multigraph(): + # MultiGraphs + G = nx.MultiGraph() + G.add_node(0) + G.add_edge(1, 2) + H = nx.MultiGraph() + H.add_edge(3, 4) + + U = nx.full_join(G, H) + assert set(U) == set(G) | set(H) + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) + + # MultiGraphs rename + U = nx.full_join(G, H, rename=("g", "h")) + assert set(U) == {"g0", "g1", "g2", "h3", "h4"} + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) + + # MultiDiGraphs + G = nx.MultiDiGraph() + G.add_node(0) + G.add_edge(1, 2) + H = nx.MultiDiGraph() + H.add_edge(3, 4) + + U = nx.full_join(G, H) + assert set(U) == set(G) | set(H) + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) * 2 + + # MultiDiGraphs rename + U = nx.full_join(G, H, rename=("g", "h")) + assert set(U) == {"g0", "g1", "g2", "h3", "h4"} + assert len(U) == len(G) + len(H) + assert len(U.edges()) == len(G.edges()) + len(H.edges()) + len(G) * len(H) * 2 + + +def test_mixed_type_union(): + G = nx.Graph() + H = nx.MultiGraph() + pytest.raises(nx.NetworkXError, nx.union, G, H) + pytest.raises(nx.NetworkXError, nx.disjoint_union, G, H) + pytest.raises(nx.NetworkXError, nx.intersection, G, H) + pytest.raises(nx.NetworkXError, nx.difference, G, H) + pytest.raises(nx.NetworkXError, nx.symmetric_difference, G, H) + pytest.raises(nx.NetworkXError, nx.compose, G, H) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/test_product.py b/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/test_product.py new file mode 100644 index 0000000000000000000000000000000000000000..50bc7b7e59e09b73e3091b99edfb5a9c06d115a2 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/test_product.py @@ -0,0 +1,435 @@ +import pytest + +import networkx as nx +from networkx.utils import edges_equal + + +def test_tensor_product_raises(): + with pytest.raises(nx.NetworkXError): + P = nx.tensor_product(nx.DiGraph(), nx.Graph()) + + +def test_tensor_product_null(): + null = nx.null_graph() + empty10 = nx.empty_graph(10) + K3 = nx.complete_graph(3) + K10 = nx.complete_graph(10) + P3 = nx.path_graph(3) + P10 = nx.path_graph(10) + # null graph + G = nx.tensor_product(null, null) + assert nx.is_isomorphic(G, null) + # null_graph X anything = null_graph and v.v. + G = nx.tensor_product(null, empty10) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(null, K3) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(null, K10) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(null, P3) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(null, P10) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(empty10, null) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(K3, null) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(K10, null) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(P3, null) + assert nx.is_isomorphic(G, null) + G = nx.tensor_product(P10, null) + assert nx.is_isomorphic(G, null) + + +def test_tensor_product_size(): + P5 = nx.path_graph(5) + K3 = nx.complete_graph(3) + K5 = nx.complete_graph(5) + + G = nx.tensor_product(P5, K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.tensor_product(K3, K5) + assert nx.number_of_nodes(G) == 3 * 5 + + +def test_tensor_product_combinations(): + # basic smoke test, more realistic tests would be useful + P5 = nx.path_graph(5) + K3 = nx.complete_graph(3) + G = nx.tensor_product(P5, K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.tensor_product(P5, nx.MultiGraph(K3)) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.tensor_product(nx.MultiGraph(P5), K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.tensor_product(nx.MultiGraph(P5), nx.MultiGraph(K3)) + assert nx.number_of_nodes(G) == 5 * 3 + + G = nx.tensor_product(nx.DiGraph(P5), nx.DiGraph(K3)) + assert nx.number_of_nodes(G) == 5 * 3 + + +def test_tensor_product_classic_result(): + K2 = nx.complete_graph(2) + G = nx.petersen_graph() + G = nx.tensor_product(G, K2) + assert nx.is_isomorphic(G, nx.desargues_graph()) + + G = nx.cycle_graph(5) + G = nx.tensor_product(G, K2) + assert nx.is_isomorphic(G, nx.cycle_graph(10)) + + G = nx.tetrahedral_graph() + G = nx.tensor_product(G, K2) + assert nx.is_isomorphic(G, nx.cubical_graph()) + + +def test_tensor_product_random(): + G = nx.erdos_renyi_graph(10, 2 / 10.0) + H = nx.erdos_renyi_graph(10, 2 / 10.0) + GH = nx.tensor_product(G, H) + + for u_G, u_H in GH.nodes(): + for v_G, v_H in GH.nodes(): + if H.has_edge(u_H, v_H) and G.has_edge(u_G, v_G): + assert GH.has_edge((u_G, u_H), (v_G, v_H)) + else: + assert not GH.has_edge((u_G, u_H), (v_G, v_H)) + + +def test_cartesian_product_multigraph(): + G = nx.MultiGraph() + G.add_edge(1, 2, key=0) + G.add_edge(1, 2, key=1) + H = nx.MultiGraph() + H.add_edge(3, 4, key=0) + H.add_edge(3, 4, key=1) + GH = nx.cartesian_product(G, H) + assert set(GH) == {(1, 3), (2, 3), (2, 4), (1, 4)} + assert {(frozenset([u, v]), k) for u, v, k in GH.edges(keys=True)} == { + (frozenset([u, v]), k) + for u, v, k in [ + ((1, 3), (2, 3), 0), + ((1, 3), (2, 3), 1), + ((1, 3), (1, 4), 0), + ((1, 3), (1, 4), 1), + ((2, 3), (2, 4), 0), + ((2, 3), (2, 4), 1), + ((2, 4), (1, 4), 0), + ((2, 4), (1, 4), 1), + ] + } + + +def test_cartesian_product_raises(): + with pytest.raises(nx.NetworkXError): + P = nx.cartesian_product(nx.DiGraph(), nx.Graph()) + + +def test_cartesian_product_null(): + null = nx.null_graph() + empty10 = nx.empty_graph(10) + K3 = nx.complete_graph(3) + K10 = nx.complete_graph(10) + P3 = nx.path_graph(3) + P10 = nx.path_graph(10) + # null graph + G = nx.cartesian_product(null, null) + assert nx.is_isomorphic(G, null) + # null_graph X anything = null_graph and v.v. + G = nx.cartesian_product(null, empty10) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(null, K3) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(null, K10) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(null, P3) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(null, P10) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(empty10, null) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(K3, null) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(K10, null) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(P3, null) + assert nx.is_isomorphic(G, null) + G = nx.cartesian_product(P10, null) + assert nx.is_isomorphic(G, null) + + +def test_cartesian_product_size(): + # order(GXH)=order(G)*order(H) + K5 = nx.complete_graph(5) + P5 = nx.path_graph(5) + K3 = nx.complete_graph(3) + G = nx.cartesian_product(P5, K3) + assert nx.number_of_nodes(G) == 5 * 3 + assert nx.number_of_edges(G) == nx.number_of_edges(P5) * nx.number_of_nodes( + K3 + ) + nx.number_of_edges(K3) * nx.number_of_nodes(P5) + G = nx.cartesian_product(K3, K5) + assert nx.number_of_nodes(G) == 3 * 5 + assert nx.number_of_edges(G) == nx.number_of_edges(K5) * nx.number_of_nodes( + K3 + ) + nx.number_of_edges(K3) * nx.number_of_nodes(K5) + + +def test_cartesian_product_classic(): + # test some classic product graphs + P2 = nx.path_graph(2) + P3 = nx.path_graph(3) + # cube = 2-path X 2-path + G = nx.cartesian_product(P2, P2) + G = nx.cartesian_product(P2, G) + assert nx.is_isomorphic(G, nx.cubical_graph()) + + # 3x3 grid + G = nx.cartesian_product(P3, P3) + assert nx.is_isomorphic(G, nx.grid_2d_graph(3, 3)) + + +def test_cartesian_product_random(): + G = nx.erdos_renyi_graph(10, 2 / 10.0) + H = nx.erdos_renyi_graph(10, 2 / 10.0) + GH = nx.cartesian_product(G, H) + + for u_G, u_H in GH.nodes(): + for v_G, v_H in GH.nodes(): + if (u_G == v_G and H.has_edge(u_H, v_H)) or ( + u_H == v_H and G.has_edge(u_G, v_G) + ): + assert GH.has_edge((u_G, u_H), (v_G, v_H)) + else: + assert not GH.has_edge((u_G, u_H), (v_G, v_H)) + + +def test_lexicographic_product_raises(): + with pytest.raises(nx.NetworkXError): + P = nx.lexicographic_product(nx.DiGraph(), nx.Graph()) + + +def test_lexicographic_product_null(): + null = nx.null_graph() + empty10 = nx.empty_graph(10) + K3 = nx.complete_graph(3) + K10 = nx.complete_graph(10) + P3 = nx.path_graph(3) + P10 = nx.path_graph(10) + # null graph + G = nx.lexicographic_product(null, null) + assert nx.is_isomorphic(G, null) + # null_graph X anything = null_graph and v.v. + G = nx.lexicographic_product(null, empty10) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(null, K3) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(null, K10) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(null, P3) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(null, P10) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(empty10, null) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(K3, null) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(K10, null) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(P3, null) + assert nx.is_isomorphic(G, null) + G = nx.lexicographic_product(P10, null) + assert nx.is_isomorphic(G, null) + + +def test_lexicographic_product_size(): + K5 = nx.complete_graph(5) + P5 = nx.path_graph(5) + K3 = nx.complete_graph(3) + G = nx.lexicographic_product(P5, K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.lexicographic_product(K3, K5) + assert nx.number_of_nodes(G) == 3 * 5 + + +def test_lexicographic_product_combinations(): + P5 = nx.path_graph(5) + K3 = nx.complete_graph(3) + G = nx.lexicographic_product(P5, K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.lexicographic_product(nx.MultiGraph(P5), K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.lexicographic_product(P5, nx.MultiGraph(K3)) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.lexicographic_product(nx.MultiGraph(P5), nx.MultiGraph(K3)) + assert nx.number_of_nodes(G) == 5 * 3 + + # No classic easily found classic results for lexicographic product + + +def test_lexicographic_product_random(): + G = nx.erdos_renyi_graph(10, 2 / 10.0) + H = nx.erdos_renyi_graph(10, 2 / 10.0) + GH = nx.lexicographic_product(G, H) + + for u_G, u_H in GH.nodes(): + for v_G, v_H in GH.nodes(): + if G.has_edge(u_G, v_G) or (u_G == v_G and H.has_edge(u_H, v_H)): + assert GH.has_edge((u_G, u_H), (v_G, v_H)) + else: + assert not GH.has_edge((u_G, u_H), (v_G, v_H)) + + +def test_strong_product_raises(): + with pytest.raises(nx.NetworkXError): + P = nx.strong_product(nx.DiGraph(), nx.Graph()) + + +def test_strong_product_null(): + null = nx.null_graph() + empty10 = nx.empty_graph(10) + K3 = nx.complete_graph(3) + K10 = nx.complete_graph(10) + P3 = nx.path_graph(3) + P10 = nx.path_graph(10) + # null graph + G = nx.strong_product(null, null) + assert nx.is_isomorphic(G, null) + # null_graph X anything = null_graph and v.v. + G = nx.strong_product(null, empty10) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(null, K3) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(null, K10) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(null, P3) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(null, P10) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(empty10, null) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(K3, null) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(K10, null) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(P3, null) + assert nx.is_isomorphic(G, null) + G = nx.strong_product(P10, null) + assert nx.is_isomorphic(G, null) + + +def test_strong_product_size(): + K5 = nx.complete_graph(5) + P5 = nx.path_graph(5) + K3 = nx.complete_graph(3) + G = nx.strong_product(P5, K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.strong_product(K3, K5) + assert nx.number_of_nodes(G) == 3 * 5 + + +def test_strong_product_combinations(): + P5 = nx.path_graph(5) + K3 = nx.complete_graph(3) + G = nx.strong_product(P5, K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.strong_product(nx.MultiGraph(P5), K3) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.strong_product(P5, nx.MultiGraph(K3)) + assert nx.number_of_nodes(G) == 5 * 3 + G = nx.strong_product(nx.MultiGraph(P5), nx.MultiGraph(K3)) + assert nx.number_of_nodes(G) == 5 * 3 + + # No classic easily found classic results for strong product + + +def test_strong_product_random(): + G = nx.erdos_renyi_graph(10, 2 / 10.0) + H = nx.erdos_renyi_graph(10, 2 / 10.0) + GH = nx.strong_product(G, H) + + for u_G, u_H in GH.nodes(): + for v_G, v_H in GH.nodes(): + if ( + (u_G == v_G and H.has_edge(u_H, v_H)) + or (u_H == v_H and G.has_edge(u_G, v_G)) + or (G.has_edge(u_G, v_G) and H.has_edge(u_H, v_H)) + ): + assert GH.has_edge((u_G, u_H), (v_G, v_H)) + else: + assert not GH.has_edge((u_G, u_H), (v_G, v_H)) + + +def test_graph_power_raises(): + with pytest.raises(nx.NetworkXNotImplemented): + nx.power(nx.MultiDiGraph(), 2) + + +def test_graph_power(): + # wikipedia example for graph power + G = nx.cycle_graph(7) + G.add_edge(6, 7) + G.add_edge(7, 8) + G.add_edge(8, 9) + G.add_edge(9, 2) + H = nx.power(G, 2) + + assert edges_equal( + list(H.edges()), + [ + (0, 1), + (0, 2), + (0, 5), + (0, 6), + (0, 7), + (1, 9), + (1, 2), + (1, 3), + (1, 6), + (2, 3), + (2, 4), + (2, 8), + (2, 9), + (3, 4), + (3, 5), + (3, 9), + (4, 5), + (4, 6), + (5, 6), + (5, 7), + (6, 7), + (6, 8), + (7, 8), + (7, 9), + (8, 9), + ], + ) + + +def test_graph_power_negative(): + with pytest.raises(ValueError): + nx.power(nx.Graph(), -1) + + +def test_rooted_product_raises(): + with pytest.raises(nx.NetworkXError): + nx.rooted_product(nx.Graph(), nx.path_graph(2), 10) + + +def test_rooted_product(): + G = nx.cycle_graph(5) + H = nx.Graph() + H.add_edges_from([("a", "b"), ("b", "c"), ("b", "d")]) + R = nx.rooted_product(G, H, "a") + assert len(R) == len(G) * len(H) + assert R.size() == G.size() + len(G) * H.size() + + +def test_corona_product(): + G = nx.cycle_graph(3) + H = nx.path_graph(2) + C = nx.corona_product(G, H) + assert len(C) == (len(G) * len(H)) + len(G) + assert C.size() == G.size() + len(G) * H.size() + len(G) * len(H) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/test_unary.py b/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/test_unary.py new file mode 100644 index 0000000000000000000000000000000000000000..d68e55cd9c9fa37459b497c32a7a095576c306c3 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/operators/tests/test_unary.py @@ -0,0 +1,55 @@ +import pytest + +import networkx as nx + + +def test_complement(): + null = nx.null_graph() + empty1 = nx.empty_graph(1) + empty10 = nx.empty_graph(10) + K3 = nx.complete_graph(3) + K5 = nx.complete_graph(5) + K10 = nx.complete_graph(10) + P2 = nx.path_graph(2) + P3 = nx.path_graph(3) + P5 = nx.path_graph(5) + P10 = nx.path_graph(10) + # complement of the complete graph is empty + + G = nx.complement(K3) + assert nx.is_isomorphic(G, nx.empty_graph(3)) + G = nx.complement(K5) + assert nx.is_isomorphic(G, nx.empty_graph(5)) + # for any G, G=complement(complement(G)) + P3cc = nx.complement(nx.complement(P3)) + assert nx.is_isomorphic(P3, P3cc) + nullcc = nx.complement(nx.complement(null)) + assert nx.is_isomorphic(null, nullcc) + b = nx.bull_graph() + bcc = nx.complement(nx.complement(b)) + assert nx.is_isomorphic(b, bcc) + + +def test_complement_2(): + G1 = nx.DiGraph() + G1.add_edge("A", "B") + G1.add_edge("A", "C") + G1.add_edge("A", "D") + G1C = nx.complement(G1) + assert sorted(G1C.edges()) == [ + ("B", "A"), + ("B", "C"), + ("B", "D"), + ("C", "A"), + ("C", "B"), + ("C", "D"), + ("D", "A"), + ("D", "B"), + ("D", "C"), + ] + + +def test_reverse1(): + # Other tests for reverse are done by the DiGraph and MultiDigraph. + G1 = nx.Graph() + pytest.raises(nx.NetworkXError, nx.reverse, G1) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/operators/unary.py b/MLPY/Lib/site-packages/networkx/algorithms/operators/unary.py new file mode 100644 index 0000000000000000000000000000000000000000..ce6d9be90575c362e19167f0fb8394b0555a7050 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/operators/unary.py @@ -0,0 +1,76 @@ +"""Unary operations on graphs""" +import networkx as nx + +__all__ = ["complement", "reverse"] + + +@nx._dispatch +def complement(G): + """Returns the graph complement of G. + + Parameters + ---------- + G : graph + A NetworkX graph + + Returns + ------- + GC : A new graph. + + Notes + ----- + Note that `complement` does not create self-loops and also + does not produce parallel edges for MultiGraphs. + + Graph, node, and edge data are not propagated to the new graph. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (1, 3), (2, 3), (3, 4), (3, 5)]) + >>> G_complement = nx.complement(G) + >>> G_complement.edges() # This shows the edges of the complemented graph + EdgeView([(1, 4), (1, 5), (2, 4), (2, 5), (4, 5)]) + + """ + R = G.__class__() + R.add_nodes_from(G) + R.add_edges_from( + ((n, n2) for n, nbrs in G.adjacency() for n2 in G if n2 not in nbrs if n != n2) + ) + return R + + +@nx._dispatch +def reverse(G, copy=True): + """Returns the reverse directed graph of G. + + Parameters + ---------- + G : directed graph + A NetworkX directed graph + copy : bool + If True, then a new graph is returned. If False, then the graph is + reversed in place. + + Returns + ------- + H : directed graph + The reversed G. + + Raises + ------ + NetworkXError + If graph is undirected. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 3), (3, 4), (3, 5)]) + >>> G_reversed = nx.reverse(G) + >>> G_reversed.edges() + OutEdgeView([(2, 1), (3, 1), (3, 2), (4, 3), (5, 3)]) + + """ + if not G.is_directed(): + raise nx.NetworkXError("Cannot reverse an undirected graph.") + else: + return G.reverse(copy=copy) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/planar_drawing.py b/MLPY/Lib/site-packages/networkx/algorithms/planar_drawing.py new file mode 100644 index 0000000000000000000000000000000000000000..47f94f172154bdb63753832b29b48d6be78f406a --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/planar_drawing.py @@ -0,0 +1,464 @@ +from collections import defaultdict + +import networkx as nx + +__all__ = ["combinatorial_embedding_to_pos"] + + +def combinatorial_embedding_to_pos(embedding, fully_triangulate=False): + """Assigns every node a (x, y) position based on the given embedding + + The algorithm iteratively inserts nodes of the input graph in a certain + order and rearranges previously inserted nodes so that the planar drawing + stays valid. This is done efficiently by only maintaining relative + positions during the node placements and calculating the absolute positions + at the end. For more information see [1]_. + + Parameters + ---------- + embedding : nx.PlanarEmbedding + This defines the order of the edges + + fully_triangulate : bool + If set to True the algorithm adds edges to a copy of the input + embedding and makes it chordal. + + Returns + ------- + pos : dict + Maps each node to a tuple that defines the (x, y) position + + References + ---------- + .. [1] M. Chrobak and T.H. Payne: + A Linear-time Algorithm for Drawing a Planar Graph on a Grid 1989 + http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.51.6677 + + """ + if len(embedding.nodes()) < 4: + # Position the node in any triangle + default_positions = [(0, 0), (2, 0), (1, 1)] + pos = {} + for i, v in enumerate(embedding.nodes()): + pos[v] = default_positions[i] + return pos + + embedding, outer_face = triangulate_embedding(embedding, fully_triangulate) + + # The following dicts map a node to another node + # If a node is not in the key set it means that the node is not yet in G_k + # If a node maps to None then the corresponding subtree does not exist + left_t_child = {} + right_t_child = {} + + # The following dicts map a node to an integer + delta_x = {} + y_coordinate = {} + + node_list = get_canonical_ordering(embedding, outer_face) + + # 1. Phase: Compute relative positions + + # Initialization + v1, v2, v3 = node_list[0][0], node_list[1][0], node_list[2][0] + + delta_x[v1] = 0 + y_coordinate[v1] = 0 + right_t_child[v1] = v3 + left_t_child[v1] = None + + delta_x[v2] = 1 + y_coordinate[v2] = 0 + right_t_child[v2] = None + left_t_child[v2] = None + + delta_x[v3] = 1 + y_coordinate[v3] = 1 + right_t_child[v3] = v2 + left_t_child[v3] = None + + for k in range(3, len(node_list)): + vk, contour_neighbors = node_list[k] + wp = contour_neighbors[0] + wp1 = contour_neighbors[1] + wq = contour_neighbors[-1] + wq1 = contour_neighbors[-2] + adds_mult_tri = len(contour_neighbors) > 2 + + # Stretch gaps: + delta_x[wp1] += 1 + delta_x[wq] += 1 + + delta_x_wp_wq = sum(delta_x[x] for x in contour_neighbors[1:]) + + # Adjust offsets + delta_x[vk] = (-y_coordinate[wp] + delta_x_wp_wq + y_coordinate[wq]) // 2 + y_coordinate[vk] = (y_coordinate[wp] + delta_x_wp_wq + y_coordinate[wq]) // 2 + delta_x[wq] = delta_x_wp_wq - delta_x[vk] + if adds_mult_tri: + delta_x[wp1] -= delta_x[vk] + + # Install v_k: + right_t_child[wp] = vk + right_t_child[vk] = wq + if adds_mult_tri: + left_t_child[vk] = wp1 + right_t_child[wq1] = None + else: + left_t_child[vk] = None + + # 2. Phase: Set absolute positions + pos = {} + pos[v1] = (0, y_coordinate[v1]) + remaining_nodes = [v1] + while remaining_nodes: + parent_node = remaining_nodes.pop() + + # Calculate position for left child + set_position( + parent_node, left_t_child, remaining_nodes, delta_x, y_coordinate, pos + ) + # Calculate position for right child + set_position( + parent_node, right_t_child, remaining_nodes, delta_x, y_coordinate, pos + ) + return pos + + +def set_position(parent, tree, remaining_nodes, delta_x, y_coordinate, pos): + """Helper method to calculate the absolute position of nodes.""" + child = tree[parent] + parent_node_x = pos[parent][0] + if child is not None: + # Calculate pos of child + child_x = parent_node_x + delta_x[child] + pos[child] = (child_x, y_coordinate[child]) + # Remember to calculate pos of its children + remaining_nodes.append(child) + + +def get_canonical_ordering(embedding, outer_face): + """Returns a canonical ordering of the nodes + + The canonical ordering of nodes (v1, ..., vn) must fulfill the following + conditions: + (See Lemma 1 in [2]_) + + - For the subgraph G_k of the input graph induced by v1, ..., vk it holds: + - 2-connected + - internally triangulated + - the edge (v1, v2) is part of the outer face + - For a node v(k+1) the following holds: + - The node v(k+1) is part of the outer face of G_k + - It has at least two neighbors in G_k + - All neighbors of v(k+1) in G_k lie consecutively on the outer face of + G_k (excluding the edge (v1, v2)). + + The algorithm used here starts with G_n (containing all nodes). It first + selects the nodes v1 and v2. And then tries to find the order of the other + nodes by checking which node can be removed in order to fulfill the + conditions mentioned above. This is done by calculating the number of + chords of nodes on the outer face. For more information see [1]_. + + Parameters + ---------- + embedding : nx.PlanarEmbedding + The embedding must be triangulated + outer_face : list + The nodes on the outer face of the graph + + Returns + ------- + ordering : list + A list of tuples `(vk, wp_wq)`. Here `vk` is the node at this position + in the canonical ordering. The element `wp_wq` is a list of nodes that + make up the outer face of G_k. + + References + ---------- + .. [1] Steven Chaplick. + Canonical Orders of Planar Graphs and (some of) Their Applications 2015 + https://wuecampus2.uni-wuerzburg.de/moodle/pluginfile.php/545727/mod_resource/content/0/vg-ss15-vl03-canonical-orders-druckversion.pdf + .. [2] M. Chrobak and T.H. Payne: + A Linear-time Algorithm for Drawing a Planar Graph on a Grid 1989 + http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.51.6677 + + """ + v1 = outer_face[0] + v2 = outer_face[1] + chords = defaultdict(int) # Maps nodes to the number of their chords + marked_nodes = set() + ready_to_pick = set(outer_face) + + # Initialize outer_face_ccw_nbr (do not include v1 -> v2) + outer_face_ccw_nbr = {} + prev_nbr = v2 + for idx in range(2, len(outer_face)): + outer_face_ccw_nbr[prev_nbr] = outer_face[idx] + prev_nbr = outer_face[idx] + outer_face_ccw_nbr[prev_nbr] = v1 + + # Initialize outer_face_cw_nbr (do not include v2 -> v1) + outer_face_cw_nbr = {} + prev_nbr = v1 + for idx in range(len(outer_face) - 1, 0, -1): + outer_face_cw_nbr[prev_nbr] = outer_face[idx] + prev_nbr = outer_face[idx] + + def is_outer_face_nbr(x, y): + if x not in outer_face_ccw_nbr: + return outer_face_cw_nbr[x] == y + if x not in outer_face_cw_nbr: + return outer_face_ccw_nbr[x] == y + return outer_face_ccw_nbr[x] == y or outer_face_cw_nbr[x] == y + + def is_on_outer_face(x): + return x not in marked_nodes and (x in outer_face_ccw_nbr or x == v1) + + # Initialize number of chords + for v in outer_face: + for nbr in embedding.neighbors_cw_order(v): + if is_on_outer_face(nbr) and not is_outer_face_nbr(v, nbr): + chords[v] += 1 + ready_to_pick.discard(v) + + # Initialize canonical_ordering + canonical_ordering = [None] * len(embedding.nodes()) + canonical_ordering[0] = (v1, []) + canonical_ordering[1] = (v2, []) + ready_to_pick.discard(v1) + ready_to_pick.discard(v2) + + for k in range(len(embedding.nodes()) - 1, 1, -1): + # 1. Pick v from ready_to_pick + v = ready_to_pick.pop() + marked_nodes.add(v) + + # v has exactly two neighbors on the outer face (wp and wq) + wp = None + wq = None + # Iterate over neighbors of v to find wp and wq + nbr_iterator = iter(embedding.neighbors_cw_order(v)) + while True: + nbr = next(nbr_iterator) + if nbr in marked_nodes: + # Only consider nodes that are not yet removed + continue + if is_on_outer_face(nbr): + # nbr is either wp or wq + if nbr == v1: + wp = v1 + elif nbr == v2: + wq = v2 + else: + if outer_face_cw_nbr[nbr] == v: + # nbr is wp + wp = nbr + else: + # nbr is wq + wq = nbr + if wp is not None and wq is not None: + # We don't need to iterate any further + break + + # Obtain new nodes on outer face (neighbors of v from wp to wq) + wp_wq = [wp] + nbr = wp + while nbr != wq: + # Get next neighbor (clockwise on the outer face) + next_nbr = embedding[v][nbr]["ccw"] + wp_wq.append(next_nbr) + # Update outer face + outer_face_cw_nbr[nbr] = next_nbr + outer_face_ccw_nbr[next_nbr] = nbr + # Move to next neighbor of v + nbr = next_nbr + + if len(wp_wq) == 2: + # There was a chord between wp and wq, decrease number of chords + chords[wp] -= 1 + if chords[wp] == 0: + ready_to_pick.add(wp) + chords[wq] -= 1 + if chords[wq] == 0: + ready_to_pick.add(wq) + else: + # Update all chords involving w_(p+1) to w_(q-1) + new_face_nodes = set(wp_wq[1:-1]) + for w in new_face_nodes: + # If we do not find a chord for w later we can pick it next + ready_to_pick.add(w) + for nbr in embedding.neighbors_cw_order(w): + if is_on_outer_face(nbr) and not is_outer_face_nbr(w, nbr): + # There is a chord involving w + chords[w] += 1 + ready_to_pick.discard(w) + if nbr not in new_face_nodes: + # Also increase chord for the neighbor + # We only iterator over new_face_nodes + chords[nbr] += 1 + ready_to_pick.discard(nbr) + # Set the canonical ordering node and the list of contour neighbors + canonical_ordering[k] = (v, wp_wq) + + return canonical_ordering + + +def triangulate_face(embedding, v1, v2): + """Triangulates the face given by half edge (v, w) + + Parameters + ---------- + embedding : nx.PlanarEmbedding + v1 : node + The half-edge (v1, v2) belongs to the face that gets triangulated + v2 : node + """ + _, v3 = embedding.next_face_half_edge(v1, v2) + _, v4 = embedding.next_face_half_edge(v2, v3) + if v1 in (v2, v3): + # The component has less than 3 nodes + return + while v1 != v4: + # Add edge if not already present on other side + if embedding.has_edge(v1, v3): + # Cannot triangulate at this position + v1, v2, v3 = v2, v3, v4 + else: + # Add edge for triangulation + embedding.add_half_edge_cw(v1, v3, v2) + embedding.add_half_edge_ccw(v3, v1, v2) + v1, v2, v3 = v1, v3, v4 + # Get next node + _, v4 = embedding.next_face_half_edge(v2, v3) + + +def triangulate_embedding(embedding, fully_triangulate=True): + """Triangulates the embedding. + + Traverses faces of the embedding and adds edges to a copy of the + embedding to triangulate it. + The method also ensures that the resulting graph is 2-connected by adding + edges if the same vertex is contained twice on a path around a face. + + Parameters + ---------- + embedding : nx.PlanarEmbedding + The input graph must contain at least 3 nodes. + + fully_triangulate : bool + If set to False the face with the most nodes is chooses as outer face. + This outer face does not get triangulated. + + Returns + ------- + (embedding, outer_face) : (nx.PlanarEmbedding, list) tuple + The element `embedding` is a new embedding containing all edges from + the input embedding and the additional edges to triangulate the graph. + The element `outer_face` is a list of nodes that lie on the outer face. + If the graph is fully triangulated these are three arbitrary connected + nodes. + + """ + if len(embedding.nodes) <= 1: + return embedding, list(embedding.nodes) + embedding = nx.PlanarEmbedding(embedding) + + # Get a list with a node for each connected component + component_nodes = [next(iter(x)) for x in nx.connected_components(embedding)] + + # 1. Make graph a single component (add edge between components) + for i in range(len(component_nodes) - 1): + v1 = component_nodes[i] + v2 = component_nodes[i + 1] + embedding.connect_components(v1, v2) + + # 2. Calculate faces, ensure 2-connectedness and determine outer face + outer_face = [] # A face with the most number of nodes + face_list = [] + edges_visited = set() # Used to keep track of already visited faces + for v in embedding.nodes(): + for w in embedding.neighbors_cw_order(v): + new_face = make_bi_connected(embedding, v, w, edges_visited) + if new_face: + # Found a new face + face_list.append(new_face) + if len(new_face) > len(outer_face): + # The face is a candidate to be the outer face + outer_face = new_face + + # 3. Triangulate (internal) faces + for face in face_list: + if face is not outer_face or fully_triangulate: + # Triangulate this face + triangulate_face(embedding, face[0], face[1]) + + if fully_triangulate: + v1 = outer_face[0] + v2 = outer_face[1] + v3 = embedding[v2][v1]["ccw"] + outer_face = [v1, v2, v3] + + return embedding, outer_face + + +def make_bi_connected(embedding, starting_node, outgoing_node, edges_counted): + """Triangulate a face and make it 2-connected + + This method also adds all edges on the face to `edges_counted`. + + Parameters + ---------- + embedding: nx.PlanarEmbedding + The embedding that defines the faces + starting_node : node + A node on the face + outgoing_node : node + A node such that the half edge (starting_node, outgoing_node) belongs + to the face + edges_counted: set + Set of all half-edges that belong to a face that have been visited + + Returns + ------- + face_nodes: list + A list of all nodes at the border of this face + """ + + # Check if the face has already been calculated + if (starting_node, outgoing_node) in edges_counted: + # This face was already counted + return [] + edges_counted.add((starting_node, outgoing_node)) + + # Add all edges to edges_counted which have this face to their left + v1 = starting_node + v2 = outgoing_node + face_list = [starting_node] # List of nodes around the face + face_set = set(face_list) # Set for faster queries + _, v3 = embedding.next_face_half_edge(v1, v2) + + # Move the nodes v1, v2, v3 around the face: + while v2 != starting_node or v3 != outgoing_node: + if v1 == v2: + raise nx.NetworkXException("Invalid half-edge") + # cycle is not completed yet + if v2 in face_set: + # v2 encountered twice: Add edge to ensure 2-connectedness + embedding.add_half_edge_cw(v1, v3, v2) + embedding.add_half_edge_ccw(v3, v1, v2) + edges_counted.add((v2, v3)) + edges_counted.add((v3, v1)) + v2 = v1 + else: + face_set.add(v2) + face_list.append(v2) + + # set next edge + v1 = v2 + v2, v3 = embedding.next_face_half_edge(v2, v3) + + # remember that this edge has been counted + edges_counted.add((v1, v2)) + + return face_list diff --git a/MLPY/Lib/site-packages/networkx/algorithms/planarity.py b/MLPY/Lib/site-packages/networkx/algorithms/planarity.py new file mode 100644 index 0000000000000000000000000000000000000000..ad46f4739e5162082f7df05a1b696846d97fc85b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/planarity.py @@ -0,0 +1,1179 @@ +from collections import defaultdict + +import networkx as nx + +__all__ = ["check_planarity", "is_planar", "PlanarEmbedding"] + + +@nx._dispatch +def is_planar(G): + """Returns True if and only if `G` is planar. + + A graph is *planar* iff it can be drawn in a plane without + any edge intersections. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + bool + Whether the graph is planar. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2)]) + >>> nx.is_planar(G) + True + >>> nx.is_planar(nx.complete_graph(5)) + False + + See Also + -------- + check_planarity : + Check if graph is planar *and* return a `PlanarEmbedding` instance if True. + """ + + return check_planarity(G, counterexample=False)[0] + + +@nx._dispatch +def check_planarity(G, counterexample=False): + """Check if a graph is planar and return a counterexample or an embedding. + + A graph is planar iff it can be drawn in a plane without + any edge intersections. + + Parameters + ---------- + G : NetworkX graph + counterexample : bool + A Kuratowski subgraph (to proof non planarity) is only returned if set + to true. + + Returns + ------- + (is_planar, certificate) : (bool, NetworkX graph) tuple + is_planar is true if the graph is planar. + If the graph is planar `certificate` is a PlanarEmbedding + otherwise it is a Kuratowski subgraph. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2)]) + >>> is_planar, P = nx.check_planarity(G) + >>> print(is_planar) + True + + When `G` is planar, a `PlanarEmbedding` instance is returned: + + >>> P.get_data() + {0: [1, 2], 1: [0], 2: [0]} + + Notes + ----- + A (combinatorial) embedding consists of cyclic orderings of the incident + edges at each vertex. Given such an embedding there are multiple approaches + discussed in literature to drawing the graph (subject to various + constraints, e.g. integer coordinates), see e.g. [2]. + + The planarity check algorithm and extraction of the combinatorial embedding + is based on the Left-Right Planarity Test [1]. + + A counterexample is only generated if the corresponding parameter is set, + because the complexity of the counterexample generation is higher. + + See also + -------- + is_planar : + Check for planarity without creating a `PlanarEmbedding` or counterexample. + + References + ---------- + .. [1] Ulrik Brandes: + The Left-Right Planarity Test + 2009 + http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.217.9208 + .. [2] Takao Nishizeki, Md Saidur Rahman: + Planar graph drawing + Lecture Notes Series on Computing: Volume 12 + 2004 + """ + + planarity_state = LRPlanarity(G) + embedding = planarity_state.lr_planarity() + if embedding is None: + # graph is not planar + if counterexample: + return False, get_counterexample(G) + else: + return False, None + else: + # graph is planar + return True, embedding + + +@nx._dispatch +def check_planarity_recursive(G, counterexample=False): + """Recursive version of :meth:`check_planarity`.""" + planarity_state = LRPlanarity(G) + embedding = planarity_state.lr_planarity_recursive() + if embedding is None: + # graph is not planar + if counterexample: + return False, get_counterexample_recursive(G) + else: + return False, None + else: + # graph is planar + return True, embedding + + +@nx._dispatch +def get_counterexample(G): + """Obtains a Kuratowski subgraph. + + Raises nx.NetworkXException if G is planar. + + The function removes edges such that the graph is still not planar. + At some point the removal of any edge would make the graph planar. + This subgraph must be a Kuratowski subgraph. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + subgraph : NetworkX graph + A Kuratowski subgraph that proves that G is not planar. + + """ + # copy graph + G = nx.Graph(G) + + if check_planarity(G)[0]: + raise nx.NetworkXException("G is planar - no counter example.") + + # find Kuratowski subgraph + subgraph = nx.Graph() + for u in G: + nbrs = list(G[u]) + for v in nbrs: + G.remove_edge(u, v) + if check_planarity(G)[0]: + G.add_edge(u, v) + subgraph.add_edge(u, v) + + return subgraph + + +@nx._dispatch +def get_counterexample_recursive(G): + """Recursive version of :meth:`get_counterexample`.""" + + # copy graph + G = nx.Graph(G) + + if check_planarity_recursive(G)[0]: + raise nx.NetworkXException("G is planar - no counter example.") + + # find Kuratowski subgraph + subgraph = nx.Graph() + for u in G: + nbrs = list(G[u]) + for v in nbrs: + G.remove_edge(u, v) + if check_planarity_recursive(G)[0]: + G.add_edge(u, v) + subgraph.add_edge(u, v) + + return subgraph + + +class Interval: + """Represents a set of return edges. + + All return edges in an interval induce a same constraint on the contained + edges, which means that all edges must either have a left orientation or + all edges must have a right orientation. + """ + + def __init__(self, low=None, high=None): + self.low = low + self.high = high + + def empty(self): + """Check if the interval is empty""" + return self.low is None and self.high is None + + def copy(self): + """Returns a copy of this interval""" + return Interval(self.low, self.high) + + def conflicting(self, b, planarity_state): + """Returns True if interval I conflicts with edge b""" + return ( + not self.empty() + and planarity_state.lowpt[self.high] > planarity_state.lowpt[b] + ) + + +class ConflictPair: + """Represents a different constraint between two intervals. + + The edges in the left interval must have a different orientation than + the one in the right interval. + """ + + def __init__(self, left=Interval(), right=Interval()): + self.left = left + self.right = right + + def swap(self): + """Swap left and right intervals""" + temp = self.left + self.left = self.right + self.right = temp + + def lowest(self, planarity_state): + """Returns the lowest lowpoint of a conflict pair""" + if self.left.empty(): + return planarity_state.lowpt[self.right.low] + if self.right.empty(): + return planarity_state.lowpt[self.left.low] + return min( + planarity_state.lowpt[self.left.low], planarity_state.lowpt[self.right.low] + ) + + +def top_of_stack(l): + """Returns the element on top of the stack.""" + if not l: + return None + return l[-1] + + +class LRPlanarity: + """A class to maintain the state during planarity check.""" + + __slots__ = [ + "G", + "roots", + "height", + "lowpt", + "lowpt2", + "nesting_depth", + "parent_edge", + "DG", + "adjs", + "ordered_adjs", + "ref", + "side", + "S", + "stack_bottom", + "lowpt_edge", + "left_ref", + "right_ref", + "embedding", + ] + + def __init__(self, G): + # copy G without adding self-loops + self.G = nx.Graph() + self.G.add_nodes_from(G.nodes) + for e in G.edges: + if e[0] != e[1]: + self.G.add_edge(e[0], e[1]) + + self.roots = [] + + # distance from tree root + self.height = defaultdict(lambda: None) + + self.lowpt = {} # height of lowest return point of an edge + self.lowpt2 = {} # height of second lowest return point + self.nesting_depth = {} # for nesting order + + # None -> missing edge + self.parent_edge = defaultdict(lambda: None) + + # oriented DFS graph + self.DG = nx.DiGraph() + self.DG.add_nodes_from(G.nodes) + + self.adjs = {} + self.ordered_adjs = {} + + self.ref = defaultdict(lambda: None) + self.side = defaultdict(lambda: 1) + + # stack of conflict pairs + self.S = [] + self.stack_bottom = {} + self.lowpt_edge = {} + + self.left_ref = {} + self.right_ref = {} + + self.embedding = PlanarEmbedding() + + def lr_planarity(self): + """Execute the LR planarity test. + + Returns + ------- + embedding : dict + If the graph is planar an embedding is returned. Otherwise None. + """ + if self.G.order() > 2 and self.G.size() > 3 * self.G.order() - 6: + # graph is not planar + return None + + # make adjacency lists for dfs + for v in self.G: + self.adjs[v] = list(self.G[v]) + + # orientation of the graph by depth first search traversal + for v in self.G: + if self.height[v] is None: + self.height[v] = 0 + self.roots.append(v) + self.dfs_orientation(v) + + # Free no longer used variables + self.G = None + self.lowpt2 = None + self.adjs = None + + # testing + for v in self.DG: # sort the adjacency lists by nesting depth + # note: this sorting leads to non linear time + self.ordered_adjs[v] = sorted( + self.DG[v], key=lambda x: self.nesting_depth[(v, x)] + ) + for v in self.roots: + if not self.dfs_testing(v): + return None + + # Free no longer used variables + self.height = None + self.lowpt = None + self.S = None + self.stack_bottom = None + self.lowpt_edge = None + + for e in self.DG.edges: + self.nesting_depth[e] = self.sign(e) * self.nesting_depth[e] + + self.embedding.add_nodes_from(self.DG.nodes) + for v in self.DG: + # sort the adjacency lists again + self.ordered_adjs[v] = sorted( + self.DG[v], key=lambda x: self.nesting_depth[(v, x)] + ) + # initialize the embedding + previous_node = None + for w in self.ordered_adjs[v]: + self.embedding.add_half_edge_cw(v, w, previous_node) + previous_node = w + + # Free no longer used variables + self.DG = None + self.nesting_depth = None + self.ref = None + + # compute the complete embedding + for v in self.roots: + self.dfs_embedding(v) + + # Free no longer used variables + self.roots = None + self.parent_edge = None + self.ordered_adjs = None + self.left_ref = None + self.right_ref = None + self.side = None + + return self.embedding + + def lr_planarity_recursive(self): + """Recursive version of :meth:`lr_planarity`.""" + if self.G.order() > 2 and self.G.size() > 3 * self.G.order() - 6: + # graph is not planar + return None + + # orientation of the graph by depth first search traversal + for v in self.G: + if self.height[v] is None: + self.height[v] = 0 + self.roots.append(v) + self.dfs_orientation_recursive(v) + + # Free no longer used variable + self.G = None + + # testing + for v in self.DG: # sort the adjacency lists by nesting depth + # note: this sorting leads to non linear time + self.ordered_adjs[v] = sorted( + self.DG[v], key=lambda x: self.nesting_depth[(v, x)] + ) + for v in self.roots: + if not self.dfs_testing_recursive(v): + return None + + for e in self.DG.edges: + self.nesting_depth[e] = self.sign_recursive(e) * self.nesting_depth[e] + + self.embedding.add_nodes_from(self.DG.nodes) + for v in self.DG: + # sort the adjacency lists again + self.ordered_adjs[v] = sorted( + self.DG[v], key=lambda x: self.nesting_depth[(v, x)] + ) + # initialize the embedding + previous_node = None + for w in self.ordered_adjs[v]: + self.embedding.add_half_edge_cw(v, w, previous_node) + previous_node = w + + # compute the complete embedding + for v in self.roots: + self.dfs_embedding_recursive(v) + + return self.embedding + + def dfs_orientation(self, v): + """Orient the graph by DFS, compute lowpoints and nesting order.""" + # the recursion stack + dfs_stack = [v] + # index of next edge to handle in adjacency list of each node + ind = defaultdict(lambda: 0) + # boolean to indicate whether to skip the initial work for an edge + skip_init = defaultdict(lambda: False) + + while dfs_stack: + v = dfs_stack.pop() + e = self.parent_edge[v] + + for w in self.adjs[v][ind[v] :]: + vw = (v, w) + + if not skip_init[vw]: + if (v, w) in self.DG.edges or (w, v) in self.DG.edges: + ind[v] += 1 + continue # the edge was already oriented + + self.DG.add_edge(v, w) # orient the edge + + self.lowpt[vw] = self.height[v] + self.lowpt2[vw] = self.height[v] + if self.height[w] is None: # (v, w) is a tree edge + self.parent_edge[w] = vw + self.height[w] = self.height[v] + 1 + + dfs_stack.append(v) # revisit v after finishing w + dfs_stack.append(w) # visit w next + skip_init[vw] = True # don't redo this block + break # handle next node in dfs_stack (i.e. w) + else: # (v, w) is a back edge + self.lowpt[vw] = self.height[w] + + # determine nesting graph + self.nesting_depth[vw] = 2 * self.lowpt[vw] + if self.lowpt2[vw] < self.height[v]: # chordal + self.nesting_depth[vw] += 1 + + # update lowpoints of parent edge e + if e is not None: + if self.lowpt[vw] < self.lowpt[e]: + self.lowpt2[e] = min(self.lowpt[e], self.lowpt2[vw]) + self.lowpt[e] = self.lowpt[vw] + elif self.lowpt[vw] > self.lowpt[e]: + self.lowpt2[e] = min(self.lowpt2[e], self.lowpt[vw]) + else: + self.lowpt2[e] = min(self.lowpt2[e], self.lowpt2[vw]) + + ind[v] += 1 + + def dfs_orientation_recursive(self, v): + """Recursive version of :meth:`dfs_orientation`.""" + e = self.parent_edge[v] + for w in self.G[v]: + if (v, w) in self.DG.edges or (w, v) in self.DG.edges: + continue # the edge was already oriented + vw = (v, w) + self.DG.add_edge(v, w) # orient the edge + + self.lowpt[vw] = self.height[v] + self.lowpt2[vw] = self.height[v] + if self.height[w] is None: # (v, w) is a tree edge + self.parent_edge[w] = vw + self.height[w] = self.height[v] + 1 + self.dfs_orientation_recursive(w) + else: # (v, w) is a back edge + self.lowpt[vw] = self.height[w] + + # determine nesting graph + self.nesting_depth[vw] = 2 * self.lowpt[vw] + if self.lowpt2[vw] < self.height[v]: # chordal + self.nesting_depth[vw] += 1 + + # update lowpoints of parent edge e + if e is not None: + if self.lowpt[vw] < self.lowpt[e]: + self.lowpt2[e] = min(self.lowpt[e], self.lowpt2[vw]) + self.lowpt[e] = self.lowpt[vw] + elif self.lowpt[vw] > self.lowpt[e]: + self.lowpt2[e] = min(self.lowpt2[e], self.lowpt[vw]) + else: + self.lowpt2[e] = min(self.lowpt2[e], self.lowpt2[vw]) + + def dfs_testing(self, v): + """Test for LR partition.""" + # the recursion stack + dfs_stack = [v] + # index of next edge to handle in adjacency list of each node + ind = defaultdict(lambda: 0) + # boolean to indicate whether to skip the initial work for an edge + skip_init = defaultdict(lambda: False) + + while dfs_stack: + v = dfs_stack.pop() + e = self.parent_edge[v] + # to indicate whether to skip the final block after the for loop + skip_final = False + + for w in self.ordered_adjs[v][ind[v] :]: + ei = (v, w) + + if not skip_init[ei]: + self.stack_bottom[ei] = top_of_stack(self.S) + + if ei == self.parent_edge[w]: # tree edge + dfs_stack.append(v) # revisit v after finishing w + dfs_stack.append(w) # visit w next + skip_init[ei] = True # don't redo this block + skip_final = True # skip final work after breaking + break # handle next node in dfs_stack (i.e. w) + else: # back edge + self.lowpt_edge[ei] = ei + self.S.append(ConflictPair(right=Interval(ei, ei))) + + # integrate new return edges + if self.lowpt[ei] < self.height[v]: + if w == self.ordered_adjs[v][0]: # e_i has return edge + self.lowpt_edge[e] = self.lowpt_edge[ei] + else: # add constraints of e_i + if not self.add_constraints(ei, e): + # graph is not planar + return False + + ind[v] += 1 + + if not skip_final: + # remove back edges returning to parent + if e is not None: # v isn't root + self.remove_back_edges(e) + + return True + + def dfs_testing_recursive(self, v): + """Recursive version of :meth:`dfs_testing`.""" + e = self.parent_edge[v] + for w in self.ordered_adjs[v]: + ei = (v, w) + self.stack_bottom[ei] = top_of_stack(self.S) + if ei == self.parent_edge[w]: # tree edge + if not self.dfs_testing_recursive(w): + return False + else: # back edge + self.lowpt_edge[ei] = ei + self.S.append(ConflictPair(right=Interval(ei, ei))) + + # integrate new return edges + if self.lowpt[ei] < self.height[v]: + if w == self.ordered_adjs[v][0]: # e_i has return edge + self.lowpt_edge[e] = self.lowpt_edge[ei] + else: # add constraints of e_i + if not self.add_constraints(ei, e): + # graph is not planar + return False + + # remove back edges returning to parent + if e is not None: # v isn't root + self.remove_back_edges(e) + return True + + def add_constraints(self, ei, e): + P = ConflictPair() + # merge return edges of e_i into P.right + while True: + Q = self.S.pop() + if not Q.left.empty(): + Q.swap() + if not Q.left.empty(): # not planar + return False + if self.lowpt[Q.right.low] > self.lowpt[e]: + # merge intervals + if P.right.empty(): # topmost interval + P.right = Q.right.copy() + else: + self.ref[P.right.low] = Q.right.high + P.right.low = Q.right.low + else: # align + self.ref[Q.right.low] = self.lowpt_edge[e] + if top_of_stack(self.S) == self.stack_bottom[ei]: + break + # merge conflicting return edges of e_1,...,e_i-1 into P.L + while top_of_stack(self.S).left.conflicting(ei, self) or top_of_stack( + self.S + ).right.conflicting(ei, self): + Q = self.S.pop() + if Q.right.conflicting(ei, self): + Q.swap() + if Q.right.conflicting(ei, self): # not planar + return False + # merge interval below lowpt(e_i) into P.R + self.ref[P.right.low] = Q.right.high + if Q.right.low is not None: + P.right.low = Q.right.low + + if P.left.empty(): # topmost interval + P.left = Q.left.copy() + else: + self.ref[P.left.low] = Q.left.high + P.left.low = Q.left.low + + if not (P.left.empty() and P.right.empty()): + self.S.append(P) + return True + + def remove_back_edges(self, e): + u = e[0] + # trim back edges ending at parent u + # drop entire conflict pairs + while self.S and top_of_stack(self.S).lowest(self) == self.height[u]: + P = self.S.pop() + if P.left.low is not None: + self.side[P.left.low] = -1 + + if self.S: # one more conflict pair to consider + P = self.S.pop() + # trim left interval + while P.left.high is not None and P.left.high[1] == u: + P.left.high = self.ref[P.left.high] + if P.left.high is None and P.left.low is not None: + # just emptied + self.ref[P.left.low] = P.right.low + self.side[P.left.low] = -1 + P.left.low = None + # trim right interval + while P.right.high is not None and P.right.high[1] == u: + P.right.high = self.ref[P.right.high] + if P.right.high is None and P.right.low is not None: + # just emptied + self.ref[P.right.low] = P.left.low + self.side[P.right.low] = -1 + P.right.low = None + self.S.append(P) + + # side of e is side of a highest return edge + if self.lowpt[e] < self.height[u]: # e has return edge + hl = top_of_stack(self.S).left.high + hr = top_of_stack(self.S).right.high + + if hl is not None and (hr is None or self.lowpt[hl] > self.lowpt[hr]): + self.ref[e] = hl + else: + self.ref[e] = hr + + def dfs_embedding(self, v): + """Completes the embedding.""" + # the recursion stack + dfs_stack = [v] + # index of next edge to handle in adjacency list of each node + ind = defaultdict(lambda: 0) + + while dfs_stack: + v = dfs_stack.pop() + + for w in self.ordered_adjs[v][ind[v] :]: + ind[v] += 1 + ei = (v, w) + + if ei == self.parent_edge[w]: # tree edge + self.embedding.add_half_edge_first(w, v) + self.left_ref[v] = w + self.right_ref[v] = w + + dfs_stack.append(v) # revisit v after finishing w + dfs_stack.append(w) # visit w next + break # handle next node in dfs_stack (i.e. w) + else: # back edge + if self.side[ei] == 1: + self.embedding.add_half_edge_cw(w, v, self.right_ref[w]) + else: + self.embedding.add_half_edge_ccw(w, v, self.left_ref[w]) + self.left_ref[w] = v + + def dfs_embedding_recursive(self, v): + """Recursive version of :meth:`dfs_embedding`.""" + for w in self.ordered_adjs[v]: + ei = (v, w) + if ei == self.parent_edge[w]: # tree edge + self.embedding.add_half_edge_first(w, v) + self.left_ref[v] = w + self.right_ref[v] = w + self.dfs_embedding_recursive(w) + else: # back edge + if self.side[ei] == 1: + # place v directly after right_ref[w] in embed. list of w + self.embedding.add_half_edge_cw(w, v, self.right_ref[w]) + else: + # place v directly before left_ref[w] in embed. list of w + self.embedding.add_half_edge_ccw(w, v, self.left_ref[w]) + self.left_ref[w] = v + + def sign(self, e): + """Resolve the relative side of an edge to the absolute side.""" + # the recursion stack + dfs_stack = [e] + # dict to remember reference edges + old_ref = defaultdict(lambda: None) + + while dfs_stack: + e = dfs_stack.pop() + + if self.ref[e] is not None: + dfs_stack.append(e) # revisit e after finishing self.ref[e] + dfs_stack.append(self.ref[e]) # visit self.ref[e] next + old_ref[e] = self.ref[e] # remember value of self.ref[e] + self.ref[e] = None + else: + self.side[e] *= self.side[old_ref[e]] + + return self.side[e] + + def sign_recursive(self, e): + """Recursive version of :meth:`sign`.""" + if self.ref[e] is not None: + self.side[e] = self.side[e] * self.sign_recursive(self.ref[e]) + self.ref[e] = None + return self.side[e] + + +class PlanarEmbedding(nx.DiGraph): + """Represents a planar graph with its planar embedding. + + The planar embedding is given by a `combinatorial embedding + `_. + + .. note:: `check_planarity` is the preferred way to check if a graph is planar. + + **Neighbor ordering:** + + In comparison to a usual graph structure, the embedding also stores the + order of all neighbors for every vertex. + The order of the neighbors can be given in clockwise (cw) direction or + counterclockwise (ccw) direction. This order is stored as edge attributes + in the underlying directed graph. For the edge (u, v) the edge attribute + 'cw' is set to the neighbor of u that follows immediately after v in + clockwise direction. + + In order for a PlanarEmbedding to be valid it must fulfill multiple + conditions. It is possible to check if these conditions are fulfilled with + the method :meth:`check_structure`. + The conditions are: + + * Edges must go in both directions (because the edge attributes differ) + * Every edge must have a 'cw' and 'ccw' attribute which corresponds to a + correct planar embedding. + * A node with non zero degree must have a node attribute 'first_nbr'. + + As long as a PlanarEmbedding is invalid only the following methods should + be called: + + * :meth:`add_half_edge_ccw` + * :meth:`add_half_edge_cw` + * :meth:`connect_components` + * :meth:`add_half_edge_first` + + Even though the graph is a subclass of nx.DiGraph, it can still be used + for algorithms that require undirected graphs, because the method + :meth:`is_directed` is overridden. This is possible, because a valid + PlanarGraph must have edges in both directions. + + **Half edges:** + + In methods like `add_half_edge_ccw` the term "half-edge" is used, which is + a term that is used in `doubly connected edge lists + `_. It is used + to emphasize that the edge is only in one direction and there exists + another half-edge in the opposite direction. + While conventional edges always have two faces (including outer face) next + to them, it is possible to assign each half-edge *exactly one* face. + For a half-edge (u, v) that is orientated such that u is below v then the + face that belongs to (u, v) is to the right of this half-edge. + + See Also + -------- + is_planar : + Preferred way to check if an existing graph is planar. + + check_planarity : + A convenient way to create a `PlanarEmbedding`. If not planar, + it returns a subgraph that shows this. + + Examples + -------- + + Create an embedding of a star graph (compare `nx.star_graph(3)`): + + >>> G = nx.PlanarEmbedding() + >>> G.add_half_edge_cw(0, 1, None) + >>> G.add_half_edge_cw(0, 2, 1) + >>> G.add_half_edge_cw(0, 3, 2) + >>> G.add_half_edge_cw(1, 0, None) + >>> G.add_half_edge_cw(2, 0, None) + >>> G.add_half_edge_cw(3, 0, None) + + Alternatively the same embedding can also be defined in counterclockwise + orientation. The following results in exactly the same PlanarEmbedding: + + >>> G = nx.PlanarEmbedding() + >>> G.add_half_edge_ccw(0, 1, None) + >>> G.add_half_edge_ccw(0, 3, 1) + >>> G.add_half_edge_ccw(0, 2, 3) + >>> G.add_half_edge_ccw(1, 0, None) + >>> G.add_half_edge_ccw(2, 0, None) + >>> G.add_half_edge_ccw(3, 0, None) + + After creating a graph, it is possible to validate that the PlanarEmbedding + object is correct: + + >>> G.check_structure() + + """ + + def get_data(self): + """Converts the adjacency structure into a better readable structure. + + Returns + ------- + embedding : dict + A dict mapping all nodes to a list of neighbors sorted in + clockwise order. + + See Also + -------- + set_data + + """ + embedding = {} + for v in self: + embedding[v] = list(self.neighbors_cw_order(v)) + return embedding + + def set_data(self, data): + """Inserts edges according to given sorted neighbor list. + + The input format is the same as the output format of get_data(). + + Parameters + ---------- + data : dict + A dict mapping all nodes to a list of neighbors sorted in + clockwise order. + + See Also + -------- + get_data + + """ + for v in data: + for w in reversed(data[v]): + self.add_half_edge_first(v, w) + + def neighbors_cw_order(self, v): + """Generator for the neighbors of v in clockwise order. + + Parameters + ---------- + v : node + + Yields + ------ + node + + """ + if len(self[v]) == 0: + # v has no neighbors + return + start_node = self.nodes[v]["first_nbr"] + yield start_node + current_node = self[v][start_node]["cw"] + while start_node != current_node: + yield current_node + current_node = self[v][current_node]["cw"] + + def check_structure(self): + """Runs without exceptions if this object is valid. + + Checks that the following properties are fulfilled: + + * Edges go in both directions (because the edge attributes differ). + * Every edge has a 'cw' and 'ccw' attribute which corresponds to a + correct planar embedding. + * A node with a degree larger than 0 has a node attribute 'first_nbr'. + + Running this method verifies that the underlying Graph must be planar. + + Raises + ------ + NetworkXException + This exception is raised with a short explanation if the + PlanarEmbedding is invalid. + """ + # Check fundamental structure + for v in self: + try: + sorted_nbrs = set(self.neighbors_cw_order(v)) + except KeyError as err: + msg = f"Bad embedding. Missing orientation for a neighbor of {v}" + raise nx.NetworkXException(msg) from err + + unsorted_nbrs = set(self[v]) + if sorted_nbrs != unsorted_nbrs: + msg = "Bad embedding. Edge orientations not set correctly." + raise nx.NetworkXException(msg) + for w in self[v]: + # Check if opposite half-edge exists + if not self.has_edge(w, v): + msg = "Bad embedding. Opposite half-edge is missing." + raise nx.NetworkXException(msg) + + # Check planarity + counted_half_edges = set() + for component in nx.connected_components(self): + if len(component) == 1: + # Don't need to check single node component + continue + num_nodes = len(component) + num_half_edges = 0 + num_faces = 0 + for v in component: + for w in self.neighbors_cw_order(v): + num_half_edges += 1 + if (v, w) not in counted_half_edges: + # We encountered a new face + num_faces += 1 + # Mark all half-edges belonging to this face + self.traverse_face(v, w, counted_half_edges) + num_edges = num_half_edges // 2 # num_half_edges is even + if num_nodes - num_edges + num_faces != 2: + # The result does not match Euler's formula + msg = "Bad embedding. The graph does not match Euler's formula" + raise nx.NetworkXException(msg) + + def add_half_edge_ccw(self, start_node, end_node, reference_neighbor): + """Adds a half-edge from start_node to end_node. + + The half-edge is added counter clockwise next to the existing half-edge + (start_node, reference_neighbor). + + Parameters + ---------- + start_node : node + Start node of inserted edge. + end_node : node + End node of inserted edge. + reference_neighbor: node + End node of reference edge. + + Raises + ------ + NetworkXException + If the reference_neighbor does not exist. + + See Also + -------- + add_half_edge_cw + connect_components + add_half_edge_first + + """ + if reference_neighbor is None: + # The start node has no neighbors + self.add_edge(start_node, end_node) # Add edge to graph + self[start_node][end_node]["cw"] = end_node + self[start_node][end_node]["ccw"] = end_node + self.nodes[start_node]["first_nbr"] = end_node + else: + ccw_reference = self[start_node][reference_neighbor]["ccw"] + self.add_half_edge_cw(start_node, end_node, ccw_reference) + + if reference_neighbor == self.nodes[start_node].get("first_nbr", None): + # Update first neighbor + self.nodes[start_node]["first_nbr"] = end_node + + def add_half_edge_cw(self, start_node, end_node, reference_neighbor): + """Adds a half-edge from start_node to end_node. + + The half-edge is added clockwise next to the existing half-edge + (start_node, reference_neighbor). + + Parameters + ---------- + start_node : node + Start node of inserted edge. + end_node : node + End node of inserted edge. + reference_neighbor: node + End node of reference edge. + + Raises + ------ + NetworkXException + If the reference_neighbor does not exist. + + See Also + -------- + add_half_edge_ccw + connect_components + add_half_edge_first + """ + self.add_edge(start_node, end_node) # Add edge to graph + + if reference_neighbor is None: + # The start node has no neighbors + self[start_node][end_node]["cw"] = end_node + self[start_node][end_node]["ccw"] = end_node + self.nodes[start_node]["first_nbr"] = end_node + return + + if reference_neighbor not in self[start_node]: + raise nx.NetworkXException( + "Cannot add edge. Reference neighbor does not exist" + ) + + # Get half-edge at the other side + cw_reference = self[start_node][reference_neighbor]["cw"] + # Alter half-edge data structures + self[start_node][reference_neighbor]["cw"] = end_node + self[start_node][end_node]["cw"] = cw_reference + self[start_node][cw_reference]["ccw"] = end_node + self[start_node][end_node]["ccw"] = reference_neighbor + + def connect_components(self, v, w): + """Adds half-edges for (v, w) and (w, v) at some position. + + This method should only be called if v and w are in different + components, or it might break the embedding. + This especially means that if `connect_components(v, w)` + is called it is not allowed to call `connect_components(w, v)` + afterwards. The neighbor orientations in both directions are + all set correctly after the first call. + + Parameters + ---------- + v : node + w : node + + See Also + -------- + add_half_edge_ccw + add_half_edge_cw + add_half_edge_first + """ + self.add_half_edge_first(v, w) + self.add_half_edge_first(w, v) + + def add_half_edge_first(self, start_node, end_node): + """The added half-edge is inserted at the first position in the order. + + Parameters + ---------- + start_node : node + end_node : node + + See Also + -------- + add_half_edge_ccw + add_half_edge_cw + connect_components + """ + if start_node in self and "first_nbr" in self.nodes[start_node]: + reference = self.nodes[start_node]["first_nbr"] + else: + reference = None + self.add_half_edge_ccw(start_node, end_node, reference) + + def next_face_half_edge(self, v, w): + """Returns the following half-edge left of a face. + + Parameters + ---------- + v : node + w : node + + Returns + ------- + half-edge : tuple + """ + new_node = self[w][v]["ccw"] + return w, new_node + + def traverse_face(self, v, w, mark_half_edges=None): + """Returns nodes on the face that belong to the half-edge (v, w). + + The face that is traversed lies to the right of the half-edge (in an + orientation where v is below w). + + Optionally it is possible to pass a set to which all encountered half + edges are added. Before calling this method, this set must not include + any half-edges that belong to the face. + + Parameters + ---------- + v : node + Start node of half-edge. + w : node + End node of half-edge. + mark_half_edges: set, optional + Set to which all encountered half-edges are added. + + Returns + ------- + face : list + A list of nodes that lie on this face. + """ + if mark_half_edges is None: + mark_half_edges = set() + + face_nodes = [v] + mark_half_edges.add((v, w)) + prev_node = v + cur_node = w + # Last half-edge is (incoming_node, v) + incoming_node = self[v][w]["cw"] + + while cur_node != v or prev_node != incoming_node: + face_nodes.append(cur_node) + prev_node, cur_node = self.next_face_half_edge(prev_node, cur_node) + if (prev_node, cur_node) in mark_half_edges: + raise nx.NetworkXException("Bad planar embedding. Impossible face.") + mark_half_edges.add((prev_node, cur_node)) + + return face_nodes + + def is_directed(self): + """A valid PlanarEmbedding is undirected. + + All reverse edges are contained, i.e. for every existing + half-edge (v, w) the half-edge in the opposite direction (w, v) is also + contained. + """ + return False diff --git a/MLPY/Lib/site-packages/networkx/algorithms/polynomials.py b/MLPY/Lib/site-packages/networkx/algorithms/polynomials.py new file mode 100644 index 0000000000000000000000000000000000000000..57ecf0d09a976df4cc909d1ac9f0fe8780f66f32 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/polynomials.py @@ -0,0 +1,305 @@ +"""Provides algorithms supporting the computation of graph polynomials. + +Graph polynomials are polynomial-valued graph invariants that encode a wide +variety of structural information. Examples include the Tutte polynomial, +chromatic polynomial, characteristic polynomial, and matching polynomial. An +extensive treatment is provided in [1]_. + +For a simple example, the `~sympy.matrices.matrices.MatrixDeterminant.charpoly` +method can be used to compute the characteristic polynomial from the adjacency +matrix of a graph. Consider the complete graph ``K_4``: + +>>> import sympy +>>> x = sympy.Symbol("x") +>>> G = nx.complete_graph(4) +>>> A = nx.adjacency_matrix(G) +>>> M = sympy.SparseMatrix(A.todense()) +>>> M.charpoly(x).as_expr() +x**4 - 6*x**2 - 8*x - 3 + + +.. [1] Y. Shi, M. Dehmer, X. Li, I. Gutman, + "Graph Polynomials" +""" +from collections import deque + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["tutte_polynomial", "chromatic_polynomial"] + + +@not_implemented_for("directed") +@nx._dispatch +def tutte_polynomial(G): + r"""Returns the Tutte polynomial of `G` + + This function computes the Tutte polynomial via an iterative version of + the deletion-contraction algorithm. + + The Tutte polynomial `T_G(x, y)` is a fundamental graph polynomial invariant in + two variables. It encodes a wide array of information related to the + edge-connectivity of a graph; "Many problems about graphs can be reduced to + problems of finding and evaluating the Tutte polynomial at certain values" [1]_. + In fact, every deletion-contraction-expressible feature of a graph is a + specialization of the Tutte polynomial [2]_ (see Notes for examples). + + There are several equivalent definitions; here are three: + + Def 1 (rank-nullity expansion): For `G` an undirected graph, `n(G)` the + number of vertices of `G`, `E` the edge set of `G`, `V` the vertex set of + `G`, and `c(A)` the number of connected components of the graph with vertex + set `V` and edge set `A` [3]_: + + .. math:: + + T_G(x, y) = \sum_{A \in E} (x-1)^{c(A) - c(E)} (y-1)^{c(A) + |A| - n(G)} + + Def 2 (spanning tree expansion): Let `G` be an undirected graph, `T` a spanning + tree of `G`, and `E` the edge set of `G`. Let `E` have an arbitrary strict + linear order `L`. Let `B_e` be the unique minimal nonempty edge cut of + $E \setminus T \cup {e}$. An edge `e` is internally active with respect to + `T` and `L` if `e` is the least edge in `B_e` according to the linear order + `L`. The internal activity of `T` (denoted `i(T)`) is the number of edges + in $E \setminus T$ that are internally active with respect to `T` and `L`. + Let `P_e` be the unique path in $T \cup {e}$ whose source and target vertex + are the same. An edge `e` is externally active with respect to `T` and `L` + if `e` is the least edge in `P_e` according to the linear order `L`. The + external activity of `T` (denoted `e(T)`) is the number of edges in + $E \setminus T$ that are externally active with respect to `T` and `L`. + Then [4]_ [5]_: + + .. math:: + + T_G(x, y) = \sum_{T \text{ a spanning tree of } G} x^{i(T)} y^{e(T)} + + Def 3 (deletion-contraction recurrence): For `G` an undirected graph, `G-e` + the graph obtained from `G` by deleting edge `e`, `G/e` the graph obtained + from `G` by contracting edge `e`, `k(G)` the number of cut-edges of `G`, + and `l(G)` the number of self-loops of `G`: + + .. math:: + T_G(x, y) = \begin{cases} + x^{k(G)} y^{l(G)}, & \text{if all edges are cut-edges or self-loops} \\ + T_{G-e}(x, y) + T_{G/e}(x, y), & \text{otherwise, for an arbitrary edge $e$ not a cut-edge or loop} + \end{cases} + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + instance of `sympy.core.add.Add` + A Sympy expression representing the Tutte polynomial for `G`. + + Examples + -------- + >>> C = nx.cycle_graph(5) + >>> nx.tutte_polynomial(C) + x**4 + x**3 + x**2 + x + y + + >>> D = nx.diamond_graph() + >>> nx.tutte_polynomial(D) + x**3 + 2*x**2 + 2*x*y + x + y**2 + y + + Notes + ----- + Some specializations of the Tutte polynomial: + + - `T_G(1, 1)` counts the number of spanning trees of `G` + - `T_G(1, 2)` counts the number of connected spanning subgraphs of `G` + - `T_G(2, 1)` counts the number of spanning forests in `G` + - `T_G(0, 2)` counts the number of strong orientations of `G` + - `T_G(2, 0)` counts the number of acyclic orientations of `G` + + Edge contraction is defined and deletion-contraction is introduced in [6]_. + Combinatorial meaning of the coefficients is introduced in [7]_. + Universality, properties, and applications are discussed in [8]_. + + Practically, up-front computation of the Tutte polynomial may be useful when + users wish to repeatedly calculate edge-connectivity-related information + about one or more graphs. + + References + ---------- + .. [1] M. Brandt, + "The Tutte Polynomial." + Talking About Combinatorial Objects Seminar, 2015 + https://math.berkeley.edu/~brandtm/talks/tutte.pdf + .. [2] A. Björklund, T. Husfeldt, P. Kaski, M. Koivisto, + "Computing the Tutte polynomial in vertex-exponential time" + 49th Annual IEEE Symposium on Foundations of Computer Science, 2008 + https://ieeexplore.ieee.org/abstract/document/4691000 + .. [3] Y. Shi, M. Dehmer, X. Li, I. Gutman, + "Graph Polynomials," p. 14 + .. [4] Y. Shi, M. Dehmer, X. Li, I. Gutman, + "Graph Polynomials," p. 46 + .. [5] A. Nešetril, J. Goodall, + "Graph invariants, homomorphisms, and the Tutte polynomial" + https://iuuk.mff.cuni.cz/~andrew/Tutte.pdf + .. [6] D. B. West, + "Introduction to Graph Theory," p. 84 + .. [7] G. Coutinho, + "A brief introduction to the Tutte polynomial" + Structural Analysis of Complex Networks, 2011 + https://homepages.dcc.ufmg.br/~gabriel/seminars/coutinho_tuttepolynomial_seminar.pdf + .. [8] J. A. Ellis-Monaghan, C. Merino, + "Graph polynomials and their applications I: The Tutte polynomial" + Structural Analysis of Complex Networks, 2011 + https://arxiv.org/pdf/0803.3079.pdf + """ + import sympy + + x = sympy.Symbol("x") + y = sympy.Symbol("y") + stack = deque() + stack.append(nx.MultiGraph(G)) + + polynomial = 0 + while stack: + G = stack.pop() + bridges = set(nx.bridges(G)) + + e = None + for i in G.edges: + if (i[0], i[1]) not in bridges and i[0] != i[1]: + e = i + break + if not e: + loops = list(nx.selfloop_edges(G, keys=True)) + polynomial += x ** len(bridges) * y ** len(loops) + else: + # deletion-contraction + C = nx.contracted_edge(G, e, self_loops=True) + C.remove_edge(e[0], e[0]) + G.remove_edge(*e) + stack.append(G) + stack.append(C) + return sympy.simplify(polynomial) + + +@not_implemented_for("directed") +@nx._dispatch +def chromatic_polynomial(G): + r"""Returns the chromatic polynomial of `G` + + This function computes the chromatic polynomial via an iterative version of + the deletion-contraction algorithm. + + The chromatic polynomial `X_G(x)` is a fundamental graph polynomial + invariant in one variable. Evaluating `X_G(k)` for an natural number `k` + enumerates the proper k-colorings of `G`. + + There are several equivalent definitions; here are three: + + Def 1 (explicit formula): + For `G` an undirected graph, `c(G)` the number of connected components of + `G`, `E` the edge set of `G`, and `G(S)` the spanning subgraph of `G` with + edge set `S` [1]_: + + .. math:: + + X_G(x) = \sum_{S \subseteq E} (-1)^{|S|} x^{c(G(S))} + + + Def 2 (interpolating polynomial): + For `G` an undirected graph, `n(G)` the number of vertices of `G`, `k_0 = 0`, + and `k_i` the number of distinct ways to color the vertices of `G` with `i` + unique colors (for `i` a natural number at most `n(G)`), `X_G(x)` is the + unique Lagrange interpolating polynomial of degree `n(G)` through the points + `(0, k_0), (1, k_1), \dots, (n(G), k_{n(G)})` [2]_. + + + Def 3 (chromatic recurrence): + For `G` an undirected graph, `G-e` the graph obtained from `G` by deleting + edge `e`, `G/e` the graph obtained from `G` by contracting edge `e`, `n(G)` + the number of vertices of `G`, and `e(G)` the number of edges of `G` [3]_: + + .. math:: + X_G(x) = \begin{cases} + x^{n(G)}, & \text{if $e(G)=0$} \\ + X_{G-e}(x) - X_{G/e}(x), & \text{otherwise, for an arbitrary edge $e$} + \end{cases} + + This formulation is also known as the Fundamental Reduction Theorem [4]_. + + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + instance of `sympy.core.add.Add` + A Sympy expression representing the chromatic polynomial for `G`. + + Examples + -------- + >>> C = nx.cycle_graph(5) + >>> nx.chromatic_polynomial(C) + x**5 - 5*x**4 + 10*x**3 - 10*x**2 + 4*x + + >>> G = nx.complete_graph(4) + >>> nx.chromatic_polynomial(G) + x**4 - 6*x**3 + 11*x**2 - 6*x + + Notes + ----- + Interpretation of the coefficients is discussed in [5]_. Several special + cases are listed in [2]_. + + The chromatic polynomial is a specialization of the Tutte polynomial; in + particular, ``X_G(x) = T_G(x, 0)`` [6]_. + + The chromatic polynomial may take negative arguments, though evaluations + may not have chromatic interpretations. For instance, ``X_G(-1)`` enumerates + the acyclic orientations of `G` [7]_. + + References + ---------- + .. [1] D. B. West, + "Introduction to Graph Theory," p. 222 + .. [2] E. W. Weisstein + "Chromatic Polynomial" + MathWorld--A Wolfram Web Resource + https://mathworld.wolfram.com/ChromaticPolynomial.html + .. [3] D. B. West, + "Introduction to Graph Theory," p. 221 + .. [4] J. Zhang, J. Goodall, + "An Introduction to Chromatic Polynomials" + https://math.mit.edu/~apost/courses/18.204_2018/Julie_Zhang_paper.pdf + .. [5] R. C. Read, + "An Introduction to Chromatic Polynomials" + Journal of Combinatorial Theory, 1968 + https://math.berkeley.edu/~mrklug/ReadChromatic.pdf + .. [6] W. T. Tutte, + "Graph-polynomials" + Advances in Applied Mathematics, 2004 + https://www.sciencedirect.com/science/article/pii/S0196885803000411 + .. [7] R. P. Stanley, + "Acyclic orientations of graphs" + Discrete Mathematics, 2006 + https://math.mit.edu/~rstan/pubs/pubfiles/18.pdf + """ + import sympy + + x = sympy.Symbol("x") + stack = deque() + stack.append(nx.MultiGraph(G, contraction_idx=0)) + + polynomial = 0 + while stack: + G = stack.pop() + edges = list(G.edges) + if not edges: + polynomial += (-1) ** G.graph["contraction_idx"] * x ** len(G) + else: + e = edges[0] + C = nx.contracted_edge(G, e, self_loops=True) + C.graph["contraction_idx"] = G.graph["contraction_idx"] + 1 + C.remove_edge(e[0], e[0]) + G.remove_edge(*e) + stack.append(G) + stack.append(C) + return polynomial diff --git a/MLPY/Lib/site-packages/networkx/algorithms/reciprocity.py b/MLPY/Lib/site-packages/networkx/algorithms/reciprocity.py new file mode 100644 index 0000000000000000000000000000000000000000..cb36ae9d55127ba69f7cc9e163c69004620d8722 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/reciprocity.py @@ -0,0 +1,97 @@ +"""Algorithms to calculate reciprocity in a directed graph.""" +import networkx as nx +from networkx import NetworkXError + +from ..utils import not_implemented_for + +__all__ = ["reciprocity", "overall_reciprocity"] + + +@not_implemented_for("undirected", "multigraph") +@nx._dispatch +def reciprocity(G, nodes=None): + r"""Compute the reciprocity in a directed graph. + + The reciprocity of a directed graph is defined as the ratio + of the number of edges pointing in both directions to the total + number of edges in the graph. + Formally, $r = |{(u,v) \in G|(v,u) \in G}| / |{(u,v) \in G}|$. + + The reciprocity of a single node u is defined similarly, + it is the ratio of the number of edges in both directions to + the total number of edges attached to node u. + + Parameters + ---------- + G : graph + A networkx directed graph + nodes : container of nodes, optional (default=whole graph) + Compute reciprocity for nodes in this container. + + Returns + ------- + out : dictionary + Reciprocity keyed by node label. + + Notes + ----- + The reciprocity is not defined for isolated nodes. + In such cases this function will return None. + + """ + # If `nodes` is not specified, calculate the reciprocity of the graph. + if nodes is None: + return overall_reciprocity(G) + + # If `nodes` represents a single node in the graph, return only its + # reciprocity. + if nodes in G: + reciprocity = next(_reciprocity_iter(G, nodes))[1] + if reciprocity is None: + raise NetworkXError("Not defined for isolated nodes.") + else: + return reciprocity + + # Otherwise, `nodes` represents an iterable of nodes, so return a + # dictionary mapping node to its reciprocity. + return dict(_reciprocity_iter(G, nodes)) + + +def _reciprocity_iter(G, nodes): + """Return an iterator of (node, reciprocity).""" + n = G.nbunch_iter(nodes) + for node in n: + pred = set(G.predecessors(node)) + succ = set(G.successors(node)) + overlap = pred & succ + n_total = len(pred) + len(succ) + + # Reciprocity is not defined for isolated nodes. + # Return None. + if n_total == 0: + yield (node, None) + else: + reciprocity = 2 * len(overlap) / n_total + yield (node, reciprocity) + + +@not_implemented_for("undirected", "multigraph") +@nx._dispatch +def overall_reciprocity(G): + """Compute the reciprocity for the whole graph. + + See the doc of reciprocity for the definition. + + Parameters + ---------- + G : graph + A networkx graph + + """ + n_all_edge = G.number_of_edges() + n_overlap_edge = (n_all_edge - G.to_undirected().number_of_edges()) * 2 + + if n_all_edge == 0: + raise NetworkXError("Not defined for empty graphs") + + return n_overlap_edge / n_all_edge diff --git a/MLPY/Lib/site-packages/networkx/algorithms/regular.py b/MLPY/Lib/site-packages/networkx/algorithms/regular.py new file mode 100644 index 0000000000000000000000000000000000000000..f9397fab2fd97c347204f11f158ecc7fd7f4be63 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/regular.py @@ -0,0 +1,212 @@ +"""Functions for computing and verifying regular graphs.""" +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["is_regular", "is_k_regular", "k_factor"] + + +@nx._dispatch +def is_regular(G): + """Determines whether the graph ``G`` is a regular graph. + + A regular graph is a graph where each vertex has the same degree. A + regular digraph is a graph where the indegree and outdegree of each + vertex are equal. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + bool + Whether the given graph or digraph is regular. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 4), (4, 1)]) + >>> nx.is_regular(G) + True + + """ + n1 = nx.utils.arbitrary_element(G) + if not G.is_directed(): + d1 = G.degree(n1) + return all(d1 == d for _, d in G.degree) + else: + d_in = G.in_degree(n1) + in_regular = all(d_in == d for _, d in G.in_degree) + d_out = G.out_degree(n1) + out_regular = all(d_out == d for _, d in G.out_degree) + return in_regular and out_regular + + +@not_implemented_for("directed") +@nx._dispatch +def is_k_regular(G, k): + """Determines whether the graph ``G`` is a k-regular graph. + + A k-regular graph is a graph where each vertex has degree k. + + Parameters + ---------- + G : NetworkX graph + + Returns + ------- + bool + Whether the given graph is k-regular. + + Examples + -------- + >>> G = nx.Graph([(1, 2), (2, 3), (3, 4), (4, 1)]) + >>> nx.is_k_regular(G, k=3) + False + + """ + return all(d == k for n, d in G.degree) + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="matching_weight") +def k_factor(G, k, matching_weight="weight"): + """Compute a k-factor of G + + A k-factor of a graph is a spanning k-regular subgraph. + A spanning k-regular subgraph of G is a subgraph that contains + each vertex of G and a subset of the edges of G such that each + vertex has degree k. + + Parameters + ---------- + G : NetworkX graph + Undirected graph + + matching_weight: string, optional (default='weight') + Edge data key corresponding to the edge weight. + Used for finding the max-weighted perfect matching. + If key not found, uses 1 as weight. + + Returns + ------- + G2 : NetworkX graph + A k-factor of G + + Examples + -------- + >>> G = nx.Graph([(1, 2), (2, 3), (3, 4), (4, 1)]) + >>> G2 = nx.k_factor(G, k=1) + >>> G2.edges() + EdgeView([(1, 2), (3, 4)]) + + References + ---------- + .. [1] "An algorithm for computing simple k-factors.", + Meijer, Henk, Yurai Núñez-Rodríguez, and David Rappaport, + Information processing letters, 2009. + """ + + from networkx.algorithms.matching import is_perfect_matching, max_weight_matching + + class LargeKGadget: + def __init__(self, k, degree, node, g): + self.original = node + self.g = g + self.k = k + self.degree = degree + + self.outer_vertices = [(node, x) for x in range(degree)] + self.core_vertices = [(node, x + degree) for x in range(degree - k)] + + def replace_node(self): + adj_view = self.g[self.original] + neighbors = list(adj_view.keys()) + edge_attrs = list(adj_view.values()) + for outer, neighbor, edge_attrs in zip( + self.outer_vertices, neighbors, edge_attrs + ): + self.g.add_edge(outer, neighbor, **edge_attrs) + for core in self.core_vertices: + for outer in self.outer_vertices: + self.g.add_edge(core, outer) + self.g.remove_node(self.original) + + def restore_node(self): + self.g.add_node(self.original) + for outer in self.outer_vertices: + adj_view = self.g[outer] + for neighbor, edge_attrs in list(adj_view.items()): + if neighbor not in self.core_vertices: + self.g.add_edge(self.original, neighbor, **edge_attrs) + break + g.remove_nodes_from(self.outer_vertices) + g.remove_nodes_from(self.core_vertices) + + class SmallKGadget: + def __init__(self, k, degree, node, g): + self.original = node + self.k = k + self.degree = degree + self.g = g + + self.outer_vertices = [(node, x) for x in range(degree)] + self.inner_vertices = [(node, x + degree) for x in range(degree)] + self.core_vertices = [(node, x + 2 * degree) for x in range(k)] + + def replace_node(self): + adj_view = self.g[self.original] + for outer, inner, (neighbor, edge_attrs) in zip( + self.outer_vertices, self.inner_vertices, list(adj_view.items()) + ): + self.g.add_edge(outer, inner) + self.g.add_edge(outer, neighbor, **edge_attrs) + for core in self.core_vertices: + for inner in self.inner_vertices: + self.g.add_edge(core, inner) + self.g.remove_node(self.original) + + def restore_node(self): + self.g.add_node(self.original) + for outer in self.outer_vertices: + adj_view = self.g[outer] + for neighbor, edge_attrs in adj_view.items(): + if neighbor not in self.core_vertices: + self.g.add_edge(self.original, neighbor, **edge_attrs) + break + self.g.remove_nodes_from(self.outer_vertices) + self.g.remove_nodes_from(self.inner_vertices) + self.g.remove_nodes_from(self.core_vertices) + + # Step 1 + if any(d < k for _, d in G.degree): + raise nx.NetworkXUnfeasible("Graph contains a vertex with degree less than k") + g = G.copy() + + # Step 2 + gadgets = [] + for node, degree in list(g.degree): + if k < degree / 2.0: + gadget = SmallKGadget(k, degree, node, g) + else: + gadget = LargeKGadget(k, degree, node, g) + gadget.replace_node() + gadgets.append(gadget) + + # Step 3 + matching = max_weight_matching(g, maxcardinality=True, weight=matching_weight) + + # Step 4 + if not is_perfect_matching(g, matching): + raise nx.NetworkXUnfeasible( + "Cannot find k-factor because no perfect matching exists" + ) + + for edge in g.edges(): + if edge not in matching and (edge[1], edge[0]) not in matching: + g.remove_edge(edge[0], edge[1]) + + for gadget in gadgets: + gadget.restore_node() + + return g diff --git a/MLPY/Lib/site-packages/networkx/algorithms/richclub.py b/MLPY/Lib/site-packages/networkx/algorithms/richclub.py new file mode 100644 index 0000000000000000000000000000000000000000..e9980f0347b144d4d3b3386e15ab89305aa3ea5c --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/richclub.py @@ -0,0 +1,121 @@ +"""Functions for computing rich-club coefficients.""" + +from itertools import accumulate + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["rich_club_coefficient"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@nx._dispatch +def rich_club_coefficient(G, normalized=True, Q=100, seed=None): + r"""Returns the rich-club coefficient of the graph `G`. + + For each degree *k*, the *rich-club coefficient* is the ratio of the + number of actual to the number of potential edges for nodes with + degree greater than *k*: + + .. math:: + + \phi(k) = \frac{2 E_k}{N_k (N_k - 1)} + + where `N_k` is the number of nodes with degree larger than *k*, and + `E_k` is the number of edges among those nodes. + + Parameters + ---------- + G : NetworkX graph + Undirected graph with neither parallel edges nor self-loops. + normalized : bool (optional) + Normalize using randomized network as in [1]_ + Q : float (optional, default=100) + If `normalized` is True, perform `Q * m` double-edge + swaps, where `m` is the number of edges in `G`, to use as a + null-model for normalization. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + rc : dictionary + A dictionary, keyed by degree, with rich-club coefficient values. + + Examples + -------- + >>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)]) + >>> rc = nx.rich_club_coefficient(G, normalized=False, seed=42) + >>> rc[0] + 0.4 + + Notes + ----- + The rich club definition and algorithm are found in [1]_. This + algorithm ignores any edge weights and is not defined for directed + graphs or graphs with parallel edges or self loops. + + Estimates for appropriate values of `Q` are found in [2]_. + + References + ---------- + .. [1] Julian J. McAuley, Luciano da Fontoura Costa, + and Tibério S. Caetano, + "The rich-club phenomenon across complex network hierarchies", + Applied Physics Letters Vol 91 Issue 8, August 2007. + https://arxiv.org/abs/physics/0701290 + .. [2] R. Milo, N. Kashtan, S. Itzkovitz, M. E. J. Newman, U. Alon, + "Uniform generation of random graphs with arbitrary degree + sequences", 2006. https://arxiv.org/abs/cond-mat/0312028 + """ + if nx.number_of_selfloops(G) > 0: + raise Exception( + "rich_club_coefficient is not implemented for " "graphs with self loops." + ) + rc = _compute_rc(G) + if normalized: + # make R a copy of G, randomize with Q*|E| double edge swaps + # and use rich_club coefficient of R to normalize + R = G.copy() + E = R.number_of_edges() + nx.double_edge_swap(R, Q * E, max_tries=Q * E * 10, seed=seed) + rcran = _compute_rc(R) + rc = {k: v / rcran[k] for k, v in rc.items()} + return rc + + +def _compute_rc(G): + """Returns the rich-club coefficient for each degree in the graph + `G`. + + `G` is an undirected graph without multiedges. + + Returns a dictionary mapping degree to rich-club coefficient for + that degree. + + """ + deghist = nx.degree_histogram(G) + total = sum(deghist) + # Compute the number of nodes with degree greater than `k`, for each + # degree `k` (omitting the last entry, which is zero). + nks = (total - cs for cs in accumulate(deghist) if total - cs > 1) + # Create a sorted list of pairs of edge endpoint degrees. + # + # The list is sorted in reverse order so that we can pop from the + # right side of the list later, instead of popping from the left + # side of the list, which would have a linear time cost. + edge_degrees = sorted((sorted(map(G.degree, e)) for e in G.edges()), reverse=True) + ek = G.number_of_edges() + k1, k2 = edge_degrees.pop() + rc = {} + for d, nk in enumerate(nks): + while k1 <= d: + if len(edge_degrees) == 0: + ek = 0 + break + k1, k2 = edge_degrees.pop() + ek -= 1 + rc[d] = 2 * ek / (nk * (nk - 1)) + return rc diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eb0d91cecc902f6390cb8309c017cb1558f7753f --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/__init__.py @@ -0,0 +1,5 @@ +from networkx.algorithms.shortest_paths.generic import * +from networkx.algorithms.shortest_paths.unweighted import * +from networkx.algorithms.shortest_paths.weighted import * +from networkx.algorithms.shortest_paths.astar import * +from networkx.algorithms.shortest_paths.dense import * diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..885f8e7f0c89c2edbbe94ae3a416e3c6b3047285 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/__pycache__/astar.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/__pycache__/astar.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a0c86934ad13aefa9b37b52efcbb0292f1489bd Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/__pycache__/astar.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/__pycache__/dense.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/__pycache__/dense.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78282cd18145eb9cd3e77eb42dc91ded977856d3 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/__pycache__/dense.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/__pycache__/generic.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/__pycache__/generic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e00292ea8a243d7c2438cd7e6d711de03ab4f77b Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/__pycache__/generic.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/__pycache__/unweighted.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/__pycache__/unweighted.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52138136da04977aba0be4690c26deea79d9955b Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/__pycache__/unweighted.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/__pycache__/weighted.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/__pycache__/weighted.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7d116ec35b23662676a940383fd7a09dea09fc1 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/__pycache__/weighted.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/astar.py b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/astar.py new file mode 100644 index 0000000000000000000000000000000000000000..8f17a0c2a74b36f23baaac9310d14595dc353509 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/astar.py @@ -0,0 +1,214 @@ +"""Shortest paths and path lengths using the A* ("A star") algorithm. +""" +from heapq import heappop, heappush +from itertools import count + +import networkx as nx +from networkx.algorithms.shortest_paths.weighted import _weight_function + +__all__ = ["astar_path", "astar_path_length"] + + +@nx._dispatch(edge_attrs="weight", preserve_node_attrs="heuristic") +def astar_path(G, source, target, heuristic=None, weight="weight"): + """Returns a list of nodes in a shortest path between source and target + using the A* ("A-star") algorithm. + + There may be more than one shortest path. This returns only one. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : node + Ending node for path + + heuristic : function + A function to evaluate the estimate of the distance + from the a node to the target. The function takes + two nodes arguments and must return a number. + If the heuristic is inadmissible (if it might + overestimate the cost of reaching the goal from a node), + the result may not be a shortest path. + The algorithm does not support updating heuristic + values for the same node due to caching the first + heuristic calculation per node. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Raises + ------ + NetworkXNoPath + If no path exists between source and target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> print(nx.astar_path(G, 0, 4)) + [0, 1, 2, 3, 4] + >>> G = nx.grid_graph(dim=[3, 3]) # nodes are two-tuples (x,y) + >>> nx.set_edge_attributes(G, {e: e[1][0] * 2 for e in G.edges()}, "cost") + >>> def dist(a, b): + ... (x1, y1) = a + ... (x2, y2) = b + ... return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5 + >>> print(nx.astar_path(G, (0, 0), (2, 2), heuristic=dist, weight="cost")) + [(0, 0), (0, 1), (0, 2), (1, 2), (2, 2)] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + See Also + -------- + shortest_path, dijkstra_path + + """ + if source not in G or target not in G: + msg = f"Either source {source} or target {target} is not in G" + raise nx.NodeNotFound(msg) + + if heuristic is None: + # The default heuristic is h=0 - same as Dijkstra's algorithm + def heuristic(u, v): + return 0 + + push = heappush + pop = heappop + weight = _weight_function(G, weight) + + G_succ = G._adj # For speed-up (and works for both directed and undirected graphs) + + # The queue stores priority, node, cost to reach, and parent. + # Uses Python heapq to keep in priority order. + # Add a counter to the queue to prevent the underlying heap from + # attempting to compare the nodes themselves. The hash breaks ties in the + # priority and is guaranteed unique for all nodes in the graph. + c = count() + queue = [(0, next(c), source, 0, None)] + + # Maps enqueued nodes to distance of discovered paths and the + # computed heuristics to target. We avoid computing the heuristics + # more than once and inserting the node into the queue too many times. + enqueued = {} + # Maps explored nodes to parent closest to the source. + explored = {} + + while queue: + # Pop the smallest item from queue. + _, __, curnode, dist, parent = pop(queue) + + if curnode == target: + path = [curnode] + node = parent + while node is not None: + path.append(node) + node = explored[node] + path.reverse() + return path + + if curnode in explored: + # Do not override the parent of starting node + if explored[curnode] is None: + continue + + # Skip bad paths that were enqueued before finding a better one + qcost, h = enqueued[curnode] + if qcost < dist: + continue + + explored[curnode] = parent + + for neighbor, w in G_succ[curnode].items(): + cost = weight(curnode, neighbor, w) + if cost is None: + continue + ncost = dist + cost + if neighbor in enqueued: + qcost, h = enqueued[neighbor] + # if qcost <= ncost, a less costly path from the + # neighbor to the source was already determined. + # Therefore, we won't attempt to push this neighbor + # to the queue + if qcost <= ncost: + continue + else: + h = heuristic(neighbor, target) + enqueued[neighbor] = ncost, h + push(queue, (ncost + h, next(c), neighbor, ncost, curnode)) + + raise nx.NetworkXNoPath(f"Node {target} not reachable from {source}") + + +@nx._dispatch(edge_attrs="weight", preserve_node_attrs="heuristic") +def astar_path_length(G, source, target, heuristic=None, weight="weight"): + """Returns the length of the shortest path between source and target using + the A* ("A-star") algorithm. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : node + Ending node for path + + heuristic : function + A function to evaluate the estimate of the distance + from the a node to the target. The function takes + two nodes arguments and must return a number. + If the heuristic is inadmissible (if it might + overestimate the cost of reaching the goal from a node), + the result may not be a shortest path. + The algorithm does not support updating heuristic + values for the same node due to caching the first + heuristic calculation per node. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + Raises + ------ + NetworkXNoPath + If no path exists between source and target. + + See Also + -------- + astar_path + + """ + if source not in G or target not in G: + msg = f"Either source {source} or target {target} is not in G" + raise nx.NodeNotFound(msg) + + weight = _weight_function(G, weight) + path = astar_path(G, source, target, heuristic, weight) + return sum(weight(u, v, G[u][v]) for u, v in zip(path[:-1], path[1:])) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/dense.py b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/dense.py new file mode 100644 index 0000000000000000000000000000000000000000..08339b189dd68b5c7ae93af9308a4465558fc040 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/dense.py @@ -0,0 +1,256 @@ +"""Floyd-Warshall algorithm for shortest paths. +""" +import networkx as nx + +__all__ = [ + "floyd_warshall", + "floyd_warshall_predecessor_and_distance", + "reconstruct_path", + "floyd_warshall_numpy", +] + + +@nx._dispatch(edge_attrs="weight") +def floyd_warshall_numpy(G, nodelist=None, weight="weight"): + """Find all-pairs shortest path lengths using Floyd's algorithm. + + This algorithm for finding shortest paths takes advantage of + matrix representations of a graph and works well for dense + graphs where all-pairs shortest path lengths are desired. + The results are returned as a NumPy array, distance[i, j], + where i and j are the indexes of two nodes in nodelist. + The entry distance[i, j] is the distance along a shortest + path from i to j. If no path exists the distance is Inf. + + Parameters + ---------- + G : NetworkX graph + + nodelist : list, optional (default=G.nodes) + The rows and columns are ordered by the nodes in nodelist. + If nodelist is None then the ordering is produced by G.nodes. + Nodelist should include all nodes in G. + + weight: string, optional (default='weight') + Edge data key corresponding to the edge weight. + + Returns + ------- + distance : 2D numpy.ndarray + A numpy array of shortest path distances between nodes. + If there is no path between two nodes the value is Inf. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from([(0, 1, 5), (1, 2, 2), (2, 3, -3), (1, 3, 10), (3, 2, 8)]) + >>> nx.floyd_warshall_numpy(G) + array([[ 0., 5., 7., 4.], + [inf, 0., 2., -1.], + [inf, inf, 0., -3.], + [inf, inf, 8., 0.]]) + + Notes + ----- + Floyd's algorithm is appropriate for finding shortest paths in + dense graphs or graphs with negative weights when Dijkstra's + algorithm fails. This algorithm can still fail if there are negative + cycles. It has running time $O(n^3)$ with running space of $O(n^2)$. + + Raises + ------ + NetworkXError + If nodelist is not a list of the nodes in G. + """ + import numpy as np + + if nodelist is not None: + if not (len(nodelist) == len(G) == len(set(nodelist))): + raise nx.NetworkXError( + "nodelist must contain every node in G with no repeats." + "If you wanted a subgraph of G use G.subgraph(nodelist)" + ) + + # To handle cases when an edge has weight=0, we must make sure that + # nonedges are not given the value 0 as well. + A = nx.to_numpy_array( + G, nodelist, multigraph_weight=min, weight=weight, nonedge=np.inf + ) + n, m = A.shape + np.fill_diagonal(A, 0) # diagonal elements should be zero + for i in range(n): + # The second term has the same shape as A due to broadcasting + A = np.minimum(A, A[i, :][np.newaxis, :] + A[:, i][:, np.newaxis]) + return A + + +@nx._dispatch(edge_attrs="weight") +def floyd_warshall_predecessor_and_distance(G, weight="weight"): + """Find all-pairs shortest path lengths using Floyd's algorithm. + + Parameters + ---------- + G : NetworkX graph + + weight: string, optional (default= 'weight') + Edge data key corresponding to the edge weight. + + Returns + ------- + predecessor,distance : dictionaries + Dictionaries, keyed by source and target, of predecessors and distances + in the shortest path. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from( + ... [ + ... ("s", "u", 10), + ... ("s", "x", 5), + ... ("u", "v", 1), + ... ("u", "x", 2), + ... ("v", "y", 1), + ... ("x", "u", 3), + ... ("x", "v", 5), + ... ("x", "y", 2), + ... ("y", "s", 7), + ... ("y", "v", 6), + ... ] + ... ) + >>> predecessors, _ = nx.floyd_warshall_predecessor_and_distance(G) + >>> print(nx.reconstruct_path("s", "v", predecessors)) + ['s', 'x', 'u', 'v'] + + Notes + ----- + Floyd's algorithm is appropriate for finding shortest paths + in dense graphs or graphs with negative weights when Dijkstra's algorithm + fails. This algorithm can still fail if there are negative cycles. + It has running time $O(n^3)$ with running space of $O(n^2)$. + + See Also + -------- + floyd_warshall + floyd_warshall_numpy + all_pairs_shortest_path + all_pairs_shortest_path_length + """ + from collections import defaultdict + + # dictionary-of-dictionaries representation for dist and pred + # use some defaultdict magick here + # for dist the default is the floating point inf value + dist = defaultdict(lambda: defaultdict(lambda: float("inf"))) + for u in G: + dist[u][u] = 0 + pred = defaultdict(dict) + # initialize path distance dictionary to be the adjacency matrix + # also set the distance to self to 0 (zero diagonal) + undirected = not G.is_directed() + for u, v, d in G.edges(data=True): + e_weight = d.get(weight, 1.0) + dist[u][v] = min(e_weight, dist[u][v]) + pred[u][v] = u + if undirected: + dist[v][u] = min(e_weight, dist[v][u]) + pred[v][u] = v + for w in G: + dist_w = dist[w] # save recomputation + for u in G: + dist_u = dist[u] # save recomputation + for v in G: + d = dist_u[w] + dist_w[v] + if dist_u[v] > d: + dist_u[v] = d + pred[u][v] = pred[w][v] + return dict(pred), dict(dist) + + +@nx._dispatch(graphs=None) +def reconstruct_path(source, target, predecessors): + """Reconstruct a path from source to target using the predecessors + dict as returned by floyd_warshall_predecessor_and_distance + + Parameters + ---------- + source : node + Starting node for path + + target : node + Ending node for path + + predecessors: dictionary + Dictionary, keyed by source and target, of predecessors in the + shortest path, as returned by floyd_warshall_predecessor_and_distance + + Returns + ------- + path : list + A list of nodes containing the shortest path from source to target + + If source and target are the same, an empty list is returned + + Notes + ----- + This function is meant to give more applicability to the + floyd_warshall_predecessor_and_distance function + + See Also + -------- + floyd_warshall_predecessor_and_distance + """ + if source == target: + return [] + prev = predecessors[source] + curr = prev[target] + path = [target, curr] + while curr != source: + curr = prev[curr] + path.append(curr) + return list(reversed(path)) + + +@nx._dispatch(edge_attrs="weight") +def floyd_warshall(G, weight="weight"): + """Find all-pairs shortest path lengths using Floyd's algorithm. + + Parameters + ---------- + G : NetworkX graph + + weight: string, optional (default= 'weight') + Edge data key corresponding to the edge weight. + + + Returns + ------- + distance : dict + A dictionary, keyed by source and target, of shortest paths distances + between nodes. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from([(0, 1, 5), (1, 2, 2), (2, 3, -3), (1, 3, 10), (3, 2, 8)]) + >>> fw = nx.floyd_warshall(G, weight='weight') + >>> results = {a: dict(b) for a, b in fw.items()} + >>> print(results) + {0: {0: 0, 1: 5, 2: 7, 3: 4}, 1: {1: 0, 2: 2, 3: -1, 0: inf}, 2: {2: 0, 3: -3, 0: inf, 1: inf}, 3: {3: 0, 2: 8, 0: inf, 1: inf}} + + Notes + ----- + Floyd's algorithm is appropriate for finding shortest paths + in dense graphs or graphs with negative weights when Dijkstra's algorithm + fails. This algorithm can still fail if there are negative cycles. + It has running time $O(n^3)$ with running space of $O(n^2)$. + + See Also + -------- + floyd_warshall_predecessor_and_distance + floyd_warshall_numpy + all_pairs_shortest_path + all_pairs_shortest_path_length + """ + # could make this its own function to reduce memory costs + return floyd_warshall_predecessor_and_distance(G, weight=weight)[1] diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/generic.py b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/generic.py new file mode 100644 index 0000000000000000000000000000000000000000..e47c4b4f5a1fbc0811fbf67274b36f87c466ff00 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/generic.py @@ -0,0 +1,719 @@ +""" +Compute the shortest paths and path lengths between nodes in the graph. + +These algorithms work with undirected and directed graphs. + +""" +import warnings + +import networkx as nx + +__all__ = [ + "shortest_path", + "all_shortest_paths", + "single_source_all_shortest_paths", + "all_pairs_all_shortest_paths", + "shortest_path_length", + "average_shortest_path_length", + "has_path", +] + + +@nx._dispatch +def has_path(G, source, target): + """Returns *True* if *G* has a path from *source* to *target*. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : node + Ending node for path + """ + try: + nx.shortest_path(G, source, target) + except nx.NetworkXNoPath: + return False + return True + + +@nx._dispatch(edge_attrs="weight") +def shortest_path(G, source=None, target=None, weight=None, method="dijkstra"): + """Compute shortest paths in the graph. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Starting node for path. If not specified, compute shortest + paths for each possible starting node. + + target : node, optional + Ending node for path. If not specified, compute shortest + paths to all possible nodes. + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'dijkstra') + The algorithm to use to compute the path. + Supported options: 'dijkstra', 'bellman-ford'. + Other inputs produce a ValueError. + If `weight` is None, unweighted graph methods are used, and this + suggestion is ignored. + + Returns + ------- + path: list or dictionary + All returned paths include both the source and target in the path. + + If the source and target are both specified, return a single list + of nodes in a shortest path from the source to the target. + + If only the source is specified, return a dictionary keyed by + targets with a list of nodes in a shortest path from the source + to one of the targets. + + If only the target is specified, return a dictionary keyed by + sources with a list of nodes in a shortest path from one of the + sources to the target. + + If neither the source nor target are specified return a dictionary + of dictionaries with path[source][target]=[list of nodes in path]. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + ValueError + If `method` is not among the supported options. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> print(nx.shortest_path(G, source=0, target=4)) + [0, 1, 2, 3, 4] + >>> p = nx.shortest_path(G, source=0) # target not specified + >>> p[3] # shortest path from source=0 to target=3 + [0, 1, 2, 3] + >>> p = nx.shortest_path(G, target=4) # source not specified + >>> p[1] # shortest path from source=1 to target=4 + [1, 2, 3, 4] + >>> p = nx.shortest_path(G) # source, target not specified + >>> p[2][4] # shortest path from source=2 to target=4 + [2, 3, 4] + + Notes + ----- + There may be more than one shortest path between a source and target. + This returns only one of them. + + See Also + -------- + all_pairs_shortest_path + all_pairs_dijkstra_path + all_pairs_bellman_ford_path + single_source_shortest_path + single_source_dijkstra_path + single_source_bellman_ford_path + """ + if method not in ("dijkstra", "bellman-ford"): + # so we don't need to check in each branch later + raise ValueError(f"method not supported: {method}") + method = "unweighted" if weight is None else method + if source is None: + if target is None: + msg = "shortest_path for all_pairs will return an iterator in v3.3" + warnings.warn(msg, DeprecationWarning) + + # Find paths between all pairs. + if method == "unweighted": + paths = dict(nx.all_pairs_shortest_path(G)) + elif method == "dijkstra": + paths = dict(nx.all_pairs_dijkstra_path(G, weight=weight)) + else: # method == 'bellman-ford': + paths = dict(nx.all_pairs_bellman_ford_path(G, weight=weight)) + else: + # Find paths from all nodes co-accessible to the target. + if G.is_directed(): + G = G.reverse(copy=False) + if method == "unweighted": + paths = nx.single_source_shortest_path(G, target) + elif method == "dijkstra": + paths = nx.single_source_dijkstra_path(G, target, weight=weight) + else: # method == 'bellman-ford': + paths = nx.single_source_bellman_ford_path(G, target, weight=weight) + # Now flip the paths so they go from a source to the target. + for target in paths: + paths[target] = list(reversed(paths[target])) + else: + if target is None: + # Find paths to all nodes accessible from the source. + if method == "unweighted": + paths = nx.single_source_shortest_path(G, source) + elif method == "dijkstra": + paths = nx.single_source_dijkstra_path(G, source, weight=weight) + else: # method == 'bellman-ford': + paths = nx.single_source_bellman_ford_path(G, source, weight=weight) + else: + # Find shortest source-target path. + if method == "unweighted": + paths = nx.bidirectional_shortest_path(G, source, target) + elif method == "dijkstra": + _, paths = nx.bidirectional_dijkstra(G, source, target, weight) + else: # method == 'bellman-ford': + paths = nx.bellman_ford_path(G, source, target, weight) + return paths + + +@nx._dispatch(edge_attrs="weight") +def shortest_path_length(G, source=None, target=None, weight=None, method="dijkstra"): + """Compute shortest path lengths in the graph. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Starting node for path. + If not specified, compute shortest path lengths using all nodes as + source nodes. + + target : node, optional + Ending node for path. + If not specified, compute shortest path lengths using all nodes as + target nodes. + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'dijkstra') + The algorithm to use to compute the path length. + Supported options: 'dijkstra', 'bellman-ford'. + Other inputs produce a ValueError. + If `weight` is None, unweighted graph methods are used, and this + suggestion is ignored. + + Returns + ------- + length: int or iterator + If the source and target are both specified, return the length of + the shortest path from the source to the target. + + If only the source is specified, return a dict keyed by target + to the shortest path length from the source to that target. + + If only the target is specified, return a dict keyed by source + to the shortest path length from that source to the target. + + If neither the source nor target are specified, return an iterator + over (source, dictionary) where dictionary is keyed by target to + shortest path length from source to that target. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXNoPath + If no path exists between source and target. + + ValueError + If `method` is not among the supported options. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.shortest_path_length(G, source=0, target=4) + 4 + >>> p = nx.shortest_path_length(G, source=0) # target not specified + >>> p[4] + 4 + >>> p = nx.shortest_path_length(G, target=4) # source not specified + >>> p[0] + 4 + >>> p = dict(nx.shortest_path_length(G)) # source,target not specified + >>> p[0][4] + 4 + + Notes + ----- + The length of the path is always 1 less than the number of nodes involved + in the path since the length measures the number of edges followed. + + For digraphs this returns the shortest directed path length. To find path + lengths in the reverse direction use G.reverse(copy=False) first to flip + the edge orientation. + + See Also + -------- + all_pairs_shortest_path_length + all_pairs_dijkstra_path_length + all_pairs_bellman_ford_path_length + single_source_shortest_path_length + single_source_dijkstra_path_length + single_source_bellman_ford_path_length + """ + if method not in ("dijkstra", "bellman-ford"): + # so we don't need to check in each branch later + raise ValueError(f"method not supported: {method}") + method = "unweighted" if weight is None else method + if source is None: + if target is None: + # Find paths between all pairs. + if method == "unweighted": + paths = nx.all_pairs_shortest_path_length(G) + elif method == "dijkstra": + paths = nx.all_pairs_dijkstra_path_length(G, weight=weight) + else: # method == 'bellman-ford': + paths = nx.all_pairs_bellman_ford_path_length(G, weight=weight) + else: + # Find paths from all nodes co-accessible to the target. + if G.is_directed(): + G = G.reverse(copy=False) + if method == "unweighted": + path_length = nx.single_source_shortest_path_length + paths = path_length(G, target) + elif method == "dijkstra": + path_length = nx.single_source_dijkstra_path_length + paths = path_length(G, target, weight=weight) + else: # method == 'bellman-ford': + path_length = nx.single_source_bellman_ford_path_length + paths = path_length(G, target, weight=weight) + else: + if target is None: + # Find paths to all nodes accessible from the source. + if method == "unweighted": + paths = nx.single_source_shortest_path_length(G, source) + elif method == "dijkstra": + path_length = nx.single_source_dijkstra_path_length + paths = path_length(G, source, weight=weight) + else: # method == 'bellman-ford': + path_length = nx.single_source_bellman_ford_path_length + paths = path_length(G, source, weight=weight) + else: + # Find shortest source-target path. + if method == "unweighted": + p = nx.bidirectional_shortest_path(G, source, target) + paths = len(p) - 1 + elif method == "dijkstra": + paths = nx.dijkstra_path_length(G, source, target, weight) + else: # method == 'bellman-ford': + paths = nx.bellman_ford_path_length(G, source, target, weight) + return paths + + +@nx._dispatch(edge_attrs="weight") +def average_shortest_path_length(G, weight=None, method=None): + r"""Returns the average shortest path length. + + The average shortest path length is + + .. math:: + + a =\sum_{\substack{s,t \in V \\ s\neq t}} \frac{d(s, t)}{n(n-1)} + + where `V` is the set of nodes in `G`, + `d(s, t)` is the shortest path from `s` to `t`, + and `n` is the number of nodes in `G`. + + .. versionchanged:: 3.0 + An exception is raised for directed graphs that are not strongly + connected. + + Parameters + ---------- + G : NetworkX graph + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'unweighted' or 'dijkstra') + The algorithm to use to compute the path lengths. + Supported options are 'unweighted', 'dijkstra', 'bellman-ford', + 'floyd-warshall' and 'floyd-warshall-numpy'. + Other method values produce a ValueError. + The default method is 'unweighted' if `weight` is None, + otherwise the default method is 'dijkstra'. + + Raises + ------ + NetworkXPointlessConcept + If `G` is the null graph (that is, the graph on zero nodes). + + NetworkXError + If `G` is not connected (or not strongly connected, in the case + of a directed graph). + + ValueError + If `method` is not among the supported options. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.average_shortest_path_length(G) + 2.0 + + For disconnected graphs, you can compute the average shortest path + length for each component + + >>> G = nx.Graph([(1, 2), (3, 4)]) + >>> for C in (G.subgraph(c).copy() for c in nx.connected_components(G)): + ... print(nx.average_shortest_path_length(C)) + 1.0 + 1.0 + + """ + single_source_methods = ["unweighted", "dijkstra", "bellman-ford"] + all_pairs_methods = ["floyd-warshall", "floyd-warshall-numpy"] + supported_methods = single_source_methods + all_pairs_methods + + if method is None: + method = "unweighted" if weight is None else "dijkstra" + if method not in supported_methods: + raise ValueError(f"method not supported: {method}") + + n = len(G) + # For the special case of the null graph, raise an exception, since + # there are no paths in the null graph. + if n == 0: + msg = ( + "the null graph has no paths, thus there is no average " + "shortest path length" + ) + raise nx.NetworkXPointlessConcept(msg) + # For the special case of the trivial graph, return zero immediately. + if n == 1: + return 0 + # Shortest path length is undefined if the graph is not strongly connected. + if G.is_directed() and not nx.is_strongly_connected(G): + raise nx.NetworkXError("Graph is not strongly connected.") + # Shortest path length is undefined if the graph is not connected. + if not G.is_directed() and not nx.is_connected(G): + raise nx.NetworkXError("Graph is not connected.") + + # Compute all-pairs shortest paths. + def path_length(v): + if method == "unweighted": + return nx.single_source_shortest_path_length(G, v) + elif method == "dijkstra": + return nx.single_source_dijkstra_path_length(G, v, weight=weight) + elif method == "bellman-ford": + return nx.single_source_bellman_ford_path_length(G, v, weight=weight) + + if method in single_source_methods: + # Sum the distances for each (ordered) pair of source and target node. + s = sum(l for u in G for l in path_length(u).values()) + else: + if method == "floyd-warshall": + all_pairs = nx.floyd_warshall(G, weight=weight) + s = sum(sum(t.values()) for t in all_pairs.values()) + elif method == "floyd-warshall-numpy": + s = nx.floyd_warshall_numpy(G, weight=weight).sum() + return s / (n * (n - 1)) + + +@nx._dispatch(edge_attrs="weight") +def all_shortest_paths(G, source, target, weight=None, method="dijkstra"): + """Compute all shortest simple paths in the graph. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path. + + target : node + Ending node for path. + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'dijkstra') + The algorithm to use to compute the path lengths. + Supported options: 'dijkstra', 'bellman-ford'. + Other inputs produce a ValueError. + If `weight` is None, unweighted graph methods are used, and this + suggestion is ignored. + + Returns + ------- + paths : generator of lists + A generator of all paths between source and target. + + Raises + ------ + ValueError + If `method` is not among the supported options. + + NetworkXNoPath + If `target` cannot be reached from `source`. + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_path(G, [0, 1, 2]) + >>> nx.add_path(G, [0, 10, 2]) + >>> print([p for p in nx.all_shortest_paths(G, source=0, target=2)]) + [[0, 1, 2], [0, 10, 2]] + + Notes + ----- + There may be many shortest paths between the source and target. If G + contains zero-weight cycles, this function will not produce all shortest + paths because doing so would produce infinitely many paths of unbounded + length -- instead, we only produce the shortest simple paths. + + See Also + -------- + shortest_path + single_source_shortest_path + all_pairs_shortest_path + """ + method = "unweighted" if weight is None else method + if method == "unweighted": + pred = nx.predecessor(G, source) + elif method == "dijkstra": + pred, dist = nx.dijkstra_predecessor_and_distance(G, source, weight=weight) + elif method == "bellman-ford": + pred, dist = nx.bellman_ford_predecessor_and_distance(G, source, weight=weight) + else: + raise ValueError(f"method not supported: {method}") + + return _build_paths_from_predecessors({source}, target, pred) + + +@nx._dispatch(edge_attrs="weight") +def single_source_all_shortest_paths(G, source, weight=None, method="dijkstra"): + """Compute all shortest simple paths from the given source in the graph. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path. + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'dijkstra') + The algorithm to use to compute the path lengths. + Supported options: 'dijkstra', 'bellman-ford'. + Other inputs produce a ValueError. + If `weight` is None, unweighted graph methods are used, and this + suggestion is ignored. + + Returns + ------- + paths : generator of dictionary + A generator of all paths between source and all nodes in the graph. + + Raises + ------ + ValueError + If `method` is not among the supported options. + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_path(G, [0, 1, 2, 3, 0]) + >>> dict(nx.single_source_all_shortest_paths(G, source=0)) + {0: [[0]], 1: [[0, 1]], 2: [[0, 1, 2], [0, 3, 2]], 3: [[0, 3]]} + + Notes + ----- + There may be many shortest paths between the source and target. If G + contains zero-weight cycles, this function will not produce all shortest + paths because doing so would produce infinitely many paths of unbounded + length -- instead, we only produce the shortest simple paths. + + See Also + -------- + shortest_path + all_shortest_paths + single_source_shortest_path + all_pairs_shortest_path + all_pairs_all_shortest_paths + """ + method = "unweighted" if weight is None else method + if method == "unweighted": + pred = nx.predecessor(G, source) + elif method == "dijkstra": + pred, dist = nx.dijkstra_predecessor_and_distance(G, source, weight=weight) + elif method == "bellman-ford": + pred, dist = nx.bellman_ford_predecessor_and_distance(G, source, weight=weight) + else: + raise ValueError(f"method not supported: {method}") + for n in G: + try: + yield n, list(_build_paths_from_predecessors({source}, n, pred)) + except nx.NetworkXNoPath: + pass + + +@nx._dispatch(edge_attrs="weight") +def all_pairs_all_shortest_paths(G, weight=None, method="dijkstra"): + """Compute all shortest paths between all nodes. + + Parameters + ---------- + G : NetworkX graph + + weight : None, string or function, optional (default = None) + If None, every edge has weight/distance/cost 1. + If a string, use this edge attribute as the edge weight. + Any edge attribute not present defaults to 1. + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly + three positional arguments: the two endpoints of an edge and + the dictionary of edge attributes for that edge. + The function must return a number. + + method : string, optional (default = 'dijkstra') + The algorithm to use to compute the path lengths. + Supported options: 'dijkstra', 'bellman-ford'. + Other inputs produce a ValueError. + If `weight` is None, unweighted graph methods are used, and this + suggestion is ignored. + + Returns + ------- + paths : generator of dictionary + Dictionary of arrays, keyed by source and target, of all shortest paths. + + Raises + ------ + ValueError + If `method` is not among the supported options. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> dict(nx.all_pairs_all_shortest_paths(G))[0][2] + [[0, 1, 2], [0, 3, 2]] + >>> dict(nx.all_pairs_all_shortest_paths(G))[0][3] + [[0, 3]] + + Notes + ----- + There may be multiple shortest paths with equal lengths. Unlike + all_pairs_shortest_path, this method returns all shortest paths. + + See Also + -------- + all_pairs_shortest_path + single_source_all_shortest_paths + """ + for n in G: + yield n, dict( + single_source_all_shortest_paths(G, n, weight=weight, method=method) + ) + + +def _build_paths_from_predecessors(sources, target, pred): + """Compute all simple paths to target, given the predecessors found in + pred, terminating when any source in sources is found. + + Parameters + ---------- + sources : set + Starting nodes for path. + + target : node + Ending node for path. + + pred : dict + A dictionary of predecessor lists, keyed by node + + Returns + ------- + paths : generator of lists + A generator of all paths between source and target. + + Raises + ------ + NetworkXNoPath + If `target` cannot be reached from `source`. + + Notes + ----- + There may be many paths between the sources and target. If there are + cycles among the predecessors, this function will not produce all + possible paths because doing so would produce infinitely many paths + of unbounded length -- instead, we only produce simple paths. + + See Also + -------- + shortest_path + single_source_shortest_path + all_pairs_shortest_path + all_shortest_paths + bellman_ford_path + """ + if target not in pred: + raise nx.NetworkXNoPath(f"Target {target} cannot be reached from given sources") + + seen = {target} + stack = [[target, 0]] + top = 0 + while top >= 0: + node, i = stack[top] + if node in sources: + yield [p for p, n in reversed(stack[: top + 1])] + if len(pred[node]) > i: + stack[top][1] = i + 1 + next = pred[node][i] + if next in seen: + continue + else: + seen.add(next) + top += 1 + if top == len(stack): + stack.append([next, 0]) + else: + stack[top][:] = [next, 0] + else: + seen.discard(node) + top -= 1 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a11c0b6f16b159945e5e5614571968245891e09c Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_astar.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_astar.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e0259cd21220090040514c923cb45455ad1fa0d Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_astar.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_dense.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_dense.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8aca9c16eb3b3da61afee6f1b1977bb9959ce2b Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_dense.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_dense_numpy.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_dense_numpy.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4f6cfae493f8b4eec51c3a5025553ebf36afea6 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_dense_numpy.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_generic.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_generic.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..107ac4b615809b630bf20622317c78584039527a Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_generic.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_unweighted.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_unweighted.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0727f2d934994bf6c3ca3055861600e1c21b3e8f Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_unweighted.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_weighted.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_weighted.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ca0a527dd918f9203ebfec573548c0a8be7747b Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/__pycache__/test_weighted.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/test_astar.py b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/test_astar.py new file mode 100644 index 0000000000000000000000000000000000000000..680f76efbf936505644419b294db5136e0fab983 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/test_astar.py @@ -0,0 +1,210 @@ +import pytest + +import networkx as nx +from networkx.utils import pairwise + + +class TestAStar: + @classmethod + def setup_class(cls): + edges = [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + cls.XG = nx.DiGraph() + cls.XG.add_weighted_edges_from(edges) + + def test_multiple_optimal_paths(self): + """Tests that A* algorithm finds any of multiple optimal paths""" + heuristic_values = {"a": 1.35, "b": 1.18, "c": 0.67, "d": 0} + + def h(u, v): + return heuristic_values[u] + + graph = nx.Graph() + points = ["a", "b", "c", "d"] + edges = [("a", "b", 0.18), ("a", "c", 0.68), ("b", "c", 0.50), ("c", "d", 0.67)] + + graph.add_nodes_from(points) + graph.add_weighted_edges_from(edges) + + path1 = ["a", "c", "d"] + path2 = ["a", "b", "c", "d"] + assert nx.astar_path(graph, "a", "d", h) in (path1, path2) + + def test_astar_directed(self): + assert nx.astar_path(self.XG, "s", "v") == ["s", "x", "u", "v"] + assert nx.astar_path_length(self.XG, "s", "v") == 9 + + def test_astar_directed_weight_function(self): + w1 = lambda u, v, d: d["weight"] + assert nx.astar_path(self.XG, "x", "u", weight=w1) == ["x", "u"] + assert nx.astar_path_length(self.XG, "x", "u", weight=w1) == 3 + assert nx.astar_path(self.XG, "s", "v", weight=w1) == ["s", "x", "u", "v"] + assert nx.astar_path_length(self.XG, "s", "v", weight=w1) == 9 + + w2 = lambda u, v, d: None if (u, v) == ("x", "u") else d["weight"] + assert nx.astar_path(self.XG, "x", "u", weight=w2) == ["x", "y", "s", "u"] + assert nx.astar_path_length(self.XG, "x", "u", weight=w2) == 19 + assert nx.astar_path(self.XG, "s", "v", weight=w2) == ["s", "x", "v"] + assert nx.astar_path_length(self.XG, "s", "v", weight=w2) == 10 + + w3 = lambda u, v, d: d["weight"] + 10 + assert nx.astar_path(self.XG, "x", "u", weight=w3) == ["x", "u"] + assert nx.astar_path_length(self.XG, "x", "u", weight=w3) == 13 + assert nx.astar_path(self.XG, "s", "v", weight=w3) == ["s", "x", "v"] + assert nx.astar_path_length(self.XG, "s", "v", weight=w3) == 30 + + def test_astar_multigraph(self): + G = nx.MultiDiGraph(self.XG) + G.add_weighted_edges_from((u, v, 1000) for (u, v) in list(G.edges())) + assert nx.astar_path(G, "s", "v") == ["s", "x", "u", "v"] + assert nx.astar_path_length(G, "s", "v") == 9 + + def test_astar_undirected(self): + GG = self.XG.to_undirected() + # make sure we get lower weight + # to_undirected might choose either edge with weight 2 or weight 3 + GG["u"]["x"]["weight"] = 2 + GG["y"]["v"]["weight"] = 2 + assert nx.astar_path(GG, "s", "v") == ["s", "x", "u", "v"] + assert nx.astar_path_length(GG, "s", "v") == 8 + + def test_astar_directed2(self): + XG2 = nx.DiGraph() + edges = [ + (1, 4, 1), + (4, 5, 1), + (5, 6, 1), + (6, 3, 1), + (1, 3, 50), + (1, 2, 100), + (2, 3, 100), + ] + XG2.add_weighted_edges_from(edges) + assert nx.astar_path(XG2, 1, 3) == [1, 4, 5, 6, 3] + + def test_astar_undirected2(self): + XG3 = nx.Graph() + edges = [(0, 1, 2), (1, 2, 12), (2, 3, 1), (3, 4, 5), (4, 5, 1), (5, 0, 10)] + XG3.add_weighted_edges_from(edges) + assert nx.astar_path(XG3, 0, 3) == [0, 1, 2, 3] + assert nx.astar_path_length(XG3, 0, 3) == 15 + + def test_astar_undirected3(self): + XG4 = nx.Graph() + edges = [ + (0, 1, 2), + (1, 2, 2), + (2, 3, 1), + (3, 4, 1), + (4, 5, 1), + (5, 6, 1), + (6, 7, 1), + (7, 0, 1), + ] + XG4.add_weighted_edges_from(edges) + assert nx.astar_path(XG4, 0, 2) == [0, 1, 2] + assert nx.astar_path_length(XG4, 0, 2) == 4 + + """ Tests that A* finds correct path when multiple paths exist + and the best one is not expanded first (GH issue #3464) + """ + + def test_astar_directed3(self): + heuristic_values = {"n5": 36, "n2": 4, "n1": 0, "n0": 0} + + def h(u, v): + return heuristic_values[u] + + edges = [("n5", "n1", 11), ("n5", "n2", 9), ("n2", "n1", 1), ("n1", "n0", 32)] + graph = nx.DiGraph() + graph.add_weighted_edges_from(edges) + answer = ["n5", "n2", "n1", "n0"] + assert nx.astar_path(graph, "n5", "n0", h) == answer + + """ Tests that parent is not wrongly overridden when a node + is re-explored multiple times. + """ + + def test_astar_directed4(self): + edges = [ + ("a", "b", 1), + ("a", "c", 1), + ("b", "d", 2), + ("c", "d", 1), + ("d", "e", 1), + ] + graph = nx.DiGraph() + graph.add_weighted_edges_from(edges) + assert nx.astar_path(graph, "a", "e") == ["a", "c", "d", "e"] + + # >>> MXG4=NX.MultiGraph(XG4) + # >>> MXG4.add_edge(0,1,3) + # >>> NX.dijkstra_path(MXG4,0,2) + # [0, 1, 2] + + def test_astar_w1(self): + G = nx.DiGraph() + G.add_edges_from( + [ + ("s", "u"), + ("s", "x"), + ("u", "v"), + ("u", "x"), + ("v", "y"), + ("x", "u"), + ("x", "w"), + ("w", "v"), + ("x", "y"), + ("y", "s"), + ("y", "v"), + ] + ) + assert nx.astar_path(G, "s", "v") == ["s", "u", "v"] + assert nx.astar_path_length(G, "s", "v") == 2 + + def test_astar_nopath(self): + with pytest.raises(nx.NodeNotFound): + nx.astar_path(self.XG, "s", "moon") + + def test_cycle(self): + C = nx.cycle_graph(7) + assert nx.astar_path(C, 0, 3) == [0, 1, 2, 3] + assert nx.dijkstra_path(C, 0, 4) == [0, 6, 5, 4] + + def test_unorderable_nodes(self): + """Tests that A* accommodates nodes that are not orderable. + + For more information, see issue #554. + + """ + # Create the cycle graph on four nodes, with nodes represented + # as (unorderable) Python objects. + nodes = [object() for n in range(4)] + G = nx.Graph() + G.add_edges_from(pairwise(nodes, cyclic=True)) + path = nx.astar_path(G, nodes[0], nodes[2]) + assert len(path) == 3 + + def test_astar_NetworkXNoPath(self): + """Tests that exception is raised when there exists no + path between source and target""" + G = nx.gnp_random_graph(10, 0.2, seed=10) + with pytest.raises(nx.NetworkXNoPath): + nx.astar_path(G, 4, 9) + + def test_astar_NodeNotFound(self): + """Tests that exception is raised when either + source or target is not in graph""" + G = nx.gnp_random_graph(10, 0.2, seed=10) + with pytest.raises(nx.NodeNotFound): + nx.astar_path_length(G, 11, 9) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/test_dense.py b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/test_dense.py new file mode 100644 index 0000000000000000000000000000000000000000..6923bfef856c83bd3e65573b97fe96ff16cdbc71 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/test_dense.py @@ -0,0 +1,212 @@ +import pytest + +import networkx as nx + + +class TestFloyd: + @classmethod + def setup_class(cls): + pass + + def test_floyd_warshall_predecessor_and_distance(self): + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + path, dist = nx.floyd_warshall_predecessor_and_distance(XG) + assert dist["s"]["v"] == 9 + assert path["s"]["v"] == "u" + assert dist == { + "y": {"y": 0, "x": 12, "s": 7, "u": 15, "v": 6}, + "x": {"y": 2, "x": 0, "s": 9, "u": 3, "v": 4}, + "s": {"y": 7, "x": 5, "s": 0, "u": 8, "v": 9}, + "u": {"y": 2, "x": 2, "s": 9, "u": 0, "v": 1}, + "v": {"y": 1, "x": 13, "s": 8, "u": 16, "v": 0}, + } + + GG = XG.to_undirected() + # make sure we get lower weight + # to_undirected might choose either edge with weight 2 or weight 3 + GG["u"]["x"]["weight"] = 2 + path, dist = nx.floyd_warshall_predecessor_and_distance(GG) + assert dist["s"]["v"] == 8 + # skip this test, could be alternate path s-u-v + # assert_equal(path['s']['v'],'y') + + G = nx.DiGraph() # no weights + G.add_edges_from( + [ + ("s", "u"), + ("s", "x"), + ("u", "v"), + ("u", "x"), + ("v", "y"), + ("x", "u"), + ("x", "v"), + ("x", "y"), + ("y", "s"), + ("y", "v"), + ] + ) + path, dist = nx.floyd_warshall_predecessor_and_distance(G) + assert dist["s"]["v"] == 2 + # skip this test, could be alternate path s-u-v + # assert_equal(path['s']['v'],'x') + + # alternate interface + dist = nx.floyd_warshall(G) + assert dist["s"]["v"] == 2 + + # floyd_warshall_predecessor_and_distance returns + # dicts-of-defautdicts + # make sure we don't get empty dictionary + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [("v", "x", 5.0), ("y", "x", 5.0), ("v", "y", 6.0), ("x", "u", 2.0)] + ) + path, dist = nx.floyd_warshall_predecessor_and_distance(XG) + inf = float("inf") + assert dist == { + "v": {"v": 0, "x": 5.0, "y": 6.0, "u": 7.0}, + "x": {"x": 0, "u": 2.0, "v": inf, "y": inf}, + "y": {"y": 0, "x": 5.0, "v": inf, "u": 7.0}, + "u": {"u": 0, "v": inf, "x": inf, "y": inf}, + } + assert path == { + "v": {"x": "v", "y": "v", "u": "x"}, + "x": {"u": "x"}, + "y": {"x": "y", "u": "x"}, + } + + def test_reconstruct_path(self): + with pytest.raises(KeyError): + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + predecessors, _ = nx.floyd_warshall_predecessor_and_distance(XG) + + path = nx.reconstruct_path("s", "v", predecessors) + assert path == ["s", "x", "u", "v"] + + path = nx.reconstruct_path("s", "s", predecessors) + assert path == [] + + # this part raises the keyError + nx.reconstruct_path("1", "2", predecessors) + + def test_cycle(self): + path, dist = nx.floyd_warshall_predecessor_and_distance(nx.cycle_graph(7)) + assert dist[0][3] == 3 + assert path[0][3] == 2 + assert dist[0][4] == 3 + + def test_weighted(self): + XG3 = nx.Graph() + XG3.add_weighted_edges_from( + [[0, 1, 2], [1, 2, 12], [2, 3, 1], [3, 4, 5], [4, 5, 1], [5, 0, 10]] + ) + path, dist = nx.floyd_warshall_predecessor_and_distance(XG3) + assert dist[0][3] == 15 + assert path[0][3] == 2 + + def test_weighted2(self): + XG4 = nx.Graph() + XG4.add_weighted_edges_from( + [ + [0, 1, 2], + [1, 2, 2], + [2, 3, 1], + [3, 4, 1], + [4, 5, 1], + [5, 6, 1], + [6, 7, 1], + [7, 0, 1], + ] + ) + path, dist = nx.floyd_warshall_predecessor_and_distance(XG4) + assert dist[0][2] == 4 + assert path[0][2] == 1 + + def test_weight_parameter(self): + XG4 = nx.Graph() + XG4.add_edges_from( + [ + (0, 1, {"heavy": 2}), + (1, 2, {"heavy": 2}), + (2, 3, {"heavy": 1}), + (3, 4, {"heavy": 1}), + (4, 5, {"heavy": 1}), + (5, 6, {"heavy": 1}), + (6, 7, {"heavy": 1}), + (7, 0, {"heavy": 1}), + ] + ) + path, dist = nx.floyd_warshall_predecessor_and_distance(XG4, weight="heavy") + assert dist[0][2] == 4 + assert path[0][2] == 1 + + def test_zero_distance(self): + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + path, dist = nx.floyd_warshall_predecessor_and_distance(XG) + + for u in XG: + assert dist[u][u] == 0 + + GG = XG.to_undirected() + # make sure we get lower weight + # to_undirected might choose either edge with weight 2 or weight 3 + GG["u"]["x"]["weight"] = 2 + path, dist = nx.floyd_warshall_predecessor_and_distance(GG) + + for u in GG: + dist[u][u] = 0 + + def test_zero_weight(self): + G = nx.DiGraph() + edges = [(1, 2, -2), (2, 3, -4), (1, 5, 1), (5, 4, 0), (4, 3, -5), (2, 5, -7)] + G.add_weighted_edges_from(edges) + dist = nx.floyd_warshall(G) + assert dist[1][3] == -14 + + G = nx.MultiDiGraph() + edges.append((2, 5, -7)) + G.add_weighted_edges_from(edges) + dist = nx.floyd_warshall(G) + assert dist[1][3] == -14 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/test_dense_numpy.py b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/test_dense_numpy.py new file mode 100644 index 0000000000000000000000000000000000000000..1316e23e654a775e949cdbd34a86a474597f993a --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/test_dense_numpy.py @@ -0,0 +1,89 @@ +import pytest + +np = pytest.importorskip("numpy") + + +import networkx as nx + + +def test_cycle_numpy(): + dist = nx.floyd_warshall_numpy(nx.cycle_graph(7)) + assert dist[0, 3] == 3 + assert dist[0, 4] == 3 + + +def test_weighted_numpy_three_edges(): + XG3 = nx.Graph() + XG3.add_weighted_edges_from( + [[0, 1, 2], [1, 2, 12], [2, 3, 1], [3, 4, 5], [4, 5, 1], [5, 0, 10]] + ) + dist = nx.floyd_warshall_numpy(XG3) + assert dist[0, 3] == 15 + + +def test_weighted_numpy_two_edges(): + XG4 = nx.Graph() + XG4.add_weighted_edges_from( + [ + [0, 1, 2], + [1, 2, 2], + [2, 3, 1], + [3, 4, 1], + [4, 5, 1], + [5, 6, 1], + [6, 7, 1], + [7, 0, 1], + ] + ) + dist = nx.floyd_warshall_numpy(XG4) + assert dist[0, 2] == 4 + + +def test_weight_parameter_numpy(): + XG4 = nx.Graph() + XG4.add_edges_from( + [ + (0, 1, {"heavy": 2}), + (1, 2, {"heavy": 2}), + (2, 3, {"heavy": 1}), + (3, 4, {"heavy": 1}), + (4, 5, {"heavy": 1}), + (5, 6, {"heavy": 1}), + (6, 7, {"heavy": 1}), + (7, 0, {"heavy": 1}), + ] + ) + dist = nx.floyd_warshall_numpy(XG4, weight="heavy") + assert dist[0, 2] == 4 + + +def test_directed_cycle_numpy(): + G = nx.DiGraph() + nx.add_cycle(G, [0, 1, 2, 3]) + pred, dist = nx.floyd_warshall_predecessor_and_distance(G) + D = nx.utils.dict_to_numpy_array(dist) + np.testing.assert_equal(nx.floyd_warshall_numpy(G), D) + + +def test_zero_weight(): + G = nx.DiGraph() + edges = [(1, 2, -2), (2, 3, -4), (1, 5, 1), (5, 4, 0), (4, 3, -5), (2, 5, -7)] + G.add_weighted_edges_from(edges) + dist = nx.floyd_warshall_numpy(G) + assert int(np.min(dist)) == -14 + + G = nx.MultiDiGraph() + edges.append((2, 5, -7)) + G.add_weighted_edges_from(edges) + dist = nx.floyd_warshall_numpy(G) + assert int(np.min(dist)) == -14 + + +def test_nodelist(): + G = nx.path_graph(7) + dist = nx.floyd_warshall_numpy(G, nodelist=[3, 5, 4, 6, 2, 1, 0]) + assert dist[0, 3] == 3 + assert dist[0, 1] == 2 + assert dist[6, 2] == 4 + pytest.raises(nx.NetworkXError, nx.floyd_warshall_numpy, G, [1, 3]) + pytest.raises(nx.NetworkXError, nx.floyd_warshall_numpy, G, list(range(9))) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/test_generic.py b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/test_generic.py new file mode 100644 index 0000000000000000000000000000000000000000..9cf7e7a50c4ea80f7a37c5a82291671e003f3efe --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/test_generic.py @@ -0,0 +1,444 @@ +import pytest + +import networkx as nx + + +def validate_grid_path(r, c, s, t, p): + assert isinstance(p, list) + assert p[0] == s + assert p[-1] == t + s = ((s - 1) // c, (s - 1) % c) + t = ((t - 1) // c, (t - 1) % c) + assert len(p) == abs(t[0] - s[0]) + abs(t[1] - s[1]) + 1 + p = [((u - 1) // c, (u - 1) % c) for u in p] + for u in p: + assert 0 <= u[0] < r + assert 0 <= u[1] < c + for u, v in zip(p[:-1], p[1:]): + assert (abs(v[0] - u[0]), abs(v[1] - u[1])) in [(0, 1), (1, 0)] + + +class TestGenericPath: + @classmethod + def setup_class(cls): + from networkx import convert_node_labels_to_integers as cnlti + + cls.grid = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted") + cls.cycle = nx.cycle_graph(7) + cls.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph()) + cls.neg_weights = nx.DiGraph() + cls.neg_weights.add_edge(0, 1, weight=1) + cls.neg_weights.add_edge(0, 2, weight=3) + cls.neg_weights.add_edge(1, 3, weight=1) + cls.neg_weights.add_edge(2, 3, weight=-2) + + def test_shortest_path(self): + assert nx.shortest_path(self.cycle, 0, 3) == [0, 1, 2, 3] + assert nx.shortest_path(self.cycle, 0, 4) == [0, 6, 5, 4] + validate_grid_path(4, 4, 1, 12, nx.shortest_path(self.grid, 1, 12)) + assert nx.shortest_path(self.directed_cycle, 0, 3) == [0, 1, 2, 3] + # now with weights + assert nx.shortest_path(self.cycle, 0, 3, weight="weight") == [0, 1, 2, 3] + assert nx.shortest_path(self.cycle, 0, 4, weight="weight") == [0, 6, 5, 4] + validate_grid_path( + 4, 4, 1, 12, nx.shortest_path(self.grid, 1, 12, weight="weight") + ) + assert nx.shortest_path(self.directed_cycle, 0, 3, weight="weight") == [ + 0, + 1, + 2, + 3, + ] + # weights and method specified + assert nx.shortest_path( + self.directed_cycle, 0, 3, weight="weight", method="dijkstra" + ) == [0, 1, 2, 3] + assert nx.shortest_path( + self.directed_cycle, 0, 3, weight="weight", method="bellman-ford" + ) == [0, 1, 2, 3] + # when Dijkstra's will probably (depending on precise implementation) + # incorrectly return [0, 1, 3] instead + assert nx.shortest_path( + self.neg_weights, 0, 3, weight="weight", method="bellman-ford" + ) == [0, 2, 3] + # confirm bad method rejection + pytest.raises(ValueError, nx.shortest_path, self.cycle, method="SPAM") + # confirm absent source rejection + pytest.raises(nx.NodeNotFound, nx.shortest_path, self.cycle, 8) + + def test_shortest_path_target(self): + answer = {0: [0, 1], 1: [1], 2: [2, 1]} + sp = nx.shortest_path(nx.path_graph(3), target=1) + assert sp == answer + # with weights + sp = nx.shortest_path(nx.path_graph(3), target=1, weight="weight") + assert sp == answer + # weights and method specified + sp = nx.shortest_path( + nx.path_graph(3), target=1, weight="weight", method="dijkstra" + ) + assert sp == answer + sp = nx.shortest_path( + nx.path_graph(3), target=1, weight="weight", method="bellman-ford" + ) + assert sp == answer + + def test_shortest_path_length(self): + assert nx.shortest_path_length(self.cycle, 0, 3) == 3 + assert nx.shortest_path_length(self.grid, 1, 12) == 5 + assert nx.shortest_path_length(self.directed_cycle, 0, 4) == 4 + # now with weights + assert nx.shortest_path_length(self.cycle, 0, 3, weight="weight") == 3 + assert nx.shortest_path_length(self.grid, 1, 12, weight="weight") == 5 + assert nx.shortest_path_length(self.directed_cycle, 0, 4, weight="weight") == 4 + # weights and method specified + assert ( + nx.shortest_path_length( + self.cycle, 0, 3, weight="weight", method="dijkstra" + ) + == 3 + ) + assert ( + nx.shortest_path_length( + self.cycle, 0, 3, weight="weight", method="bellman-ford" + ) + == 3 + ) + # confirm bad method rejection + pytest.raises(ValueError, nx.shortest_path_length, self.cycle, method="SPAM") + # confirm absent source rejection + pytest.raises(nx.NodeNotFound, nx.shortest_path_length, self.cycle, 8) + + def test_shortest_path_length_target(self): + answer = {0: 1, 1: 0, 2: 1} + sp = dict(nx.shortest_path_length(nx.path_graph(3), target=1)) + assert sp == answer + # with weights + sp = nx.shortest_path_length(nx.path_graph(3), target=1, weight="weight") + assert sp == answer + # weights and method specified + sp = nx.shortest_path_length( + nx.path_graph(3), target=1, weight="weight", method="dijkstra" + ) + assert sp == answer + sp = nx.shortest_path_length( + nx.path_graph(3), target=1, weight="weight", method="bellman-ford" + ) + assert sp == answer + + def test_single_source_shortest_path(self): + p = nx.shortest_path(self.cycle, 0) + assert p[3] == [0, 1, 2, 3] + assert p == nx.single_source_shortest_path(self.cycle, 0) + p = nx.shortest_path(self.grid, 1) + validate_grid_path(4, 4, 1, 12, p[12]) + # now with weights + p = nx.shortest_path(self.cycle, 0, weight="weight") + assert p[3] == [0, 1, 2, 3] + assert p == nx.single_source_dijkstra_path(self.cycle, 0) + p = nx.shortest_path(self.grid, 1, weight="weight") + validate_grid_path(4, 4, 1, 12, p[12]) + # weights and method specified + p = nx.shortest_path(self.cycle, 0, method="dijkstra", weight="weight") + assert p[3] == [0, 1, 2, 3] + assert p == nx.single_source_shortest_path(self.cycle, 0) + p = nx.shortest_path(self.cycle, 0, method="bellman-ford", weight="weight") + assert p[3] == [0, 1, 2, 3] + assert p == nx.single_source_shortest_path(self.cycle, 0) + + def test_single_source_shortest_path_length(self): + ans = dict(nx.shortest_path_length(self.cycle, 0)) + assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == dict(nx.single_source_shortest_path_length(self.cycle, 0)) + ans = dict(nx.shortest_path_length(self.grid, 1)) + assert ans[16] == 6 + # now with weights + ans = dict(nx.shortest_path_length(self.cycle, 0, weight="weight")) + assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == dict(nx.single_source_dijkstra_path_length(self.cycle, 0)) + ans = dict(nx.shortest_path_length(self.grid, 1, weight="weight")) + assert ans[16] == 6 + # weights and method specified + ans = dict( + nx.shortest_path_length(self.cycle, 0, weight="weight", method="dijkstra") + ) + assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == dict(nx.single_source_dijkstra_path_length(self.cycle, 0)) + ans = dict( + nx.shortest_path_length( + self.cycle, 0, weight="weight", method="bellman-ford" + ) + ) + assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == dict(nx.single_source_bellman_ford_path_length(self.cycle, 0)) + + def test_single_source_all_shortest_paths(self): + cycle_ans = {0: [[0]], 1: [[0, 1]], 2: [[0, 1, 2], [0, 3, 2]], 3: [[0, 3]]} + ans = dict(nx.single_source_all_shortest_paths(nx.cycle_graph(4), 0)) + assert sorted(ans[2]) == cycle_ans[2] + ans = dict(nx.single_source_all_shortest_paths(self.grid, 1)) + grid_ans = [ + [1, 2, 3, 7, 11], + [1, 2, 6, 7, 11], + [1, 2, 6, 10, 11], + [1, 5, 6, 7, 11], + [1, 5, 6, 10, 11], + [1, 5, 9, 10, 11], + ] + assert sorted(ans[11]) == grid_ans + ans = dict( + nx.single_source_all_shortest_paths(nx.cycle_graph(4), 0, weight="weight") + ) + assert sorted(ans[2]) == cycle_ans[2] + ans = dict( + nx.single_source_all_shortest_paths( + nx.cycle_graph(4), 0, method="bellman-ford", weight="weight" + ) + ) + assert sorted(ans[2]) == cycle_ans[2] + ans = dict(nx.single_source_all_shortest_paths(self.grid, 1, weight="weight")) + assert sorted(ans[11]) == grid_ans + ans = dict( + nx.single_source_all_shortest_paths( + self.grid, 1, method="bellman-ford", weight="weight" + ) + ) + assert sorted(ans[11]) == grid_ans + G = nx.cycle_graph(4) + G.add_node(4) + ans = dict(nx.single_source_all_shortest_paths(G, 0)) + assert sorted(ans[2]) == [[0, 1, 2], [0, 3, 2]] + ans = dict(nx.single_source_all_shortest_paths(G, 4)) + assert sorted(ans[4]) == [[4]] + + def test_all_pairs_shortest_path(self): + p = nx.shortest_path(self.cycle) + assert p[0][3] == [0, 1, 2, 3] + assert p == dict(nx.all_pairs_shortest_path(self.cycle)) + p = nx.shortest_path(self.grid) + validate_grid_path(4, 4, 1, 12, p[1][12]) + # now with weights + p = nx.shortest_path(self.cycle, weight="weight") + assert p[0][3] == [0, 1, 2, 3] + assert p == dict(nx.all_pairs_dijkstra_path(self.cycle)) + p = nx.shortest_path(self.grid, weight="weight") + validate_grid_path(4, 4, 1, 12, p[1][12]) + # weights and method specified + p = nx.shortest_path(self.cycle, weight="weight", method="dijkstra") + assert p[0][3] == [0, 1, 2, 3] + assert p == dict(nx.all_pairs_dijkstra_path(self.cycle)) + p = nx.shortest_path(self.cycle, weight="weight", method="bellman-ford") + assert p[0][3] == [0, 1, 2, 3] + assert p == dict(nx.all_pairs_bellman_ford_path(self.cycle)) + + def test_all_pairs_shortest_path_length(self): + ans = dict(nx.shortest_path_length(self.cycle)) + assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == dict(nx.all_pairs_shortest_path_length(self.cycle)) + ans = dict(nx.shortest_path_length(self.grid)) + assert ans[1][16] == 6 + # now with weights + ans = dict(nx.shortest_path_length(self.cycle, weight="weight")) + assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == dict(nx.all_pairs_dijkstra_path_length(self.cycle)) + ans = dict(nx.shortest_path_length(self.grid, weight="weight")) + assert ans[1][16] == 6 + # weights and method specified + ans = dict( + nx.shortest_path_length(self.cycle, weight="weight", method="dijkstra") + ) + assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == dict(nx.all_pairs_dijkstra_path_length(self.cycle)) + ans = dict( + nx.shortest_path_length(self.cycle, weight="weight", method="bellman-ford") + ) + assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert ans == dict(nx.all_pairs_bellman_ford_path_length(self.cycle)) + + def test_all_pairs_all_shortest_paths(self): + ans = dict(nx.all_pairs_all_shortest_paths(nx.cycle_graph(4))) + assert sorted(ans[1][3]) == [[1, 0, 3], [1, 2, 3]] + ans = dict(nx.all_pairs_all_shortest_paths(nx.cycle_graph(4)), weight="weight") + assert sorted(ans[1][3]) == [[1, 0, 3], [1, 2, 3]] + ans = dict( + nx.all_pairs_all_shortest_paths(nx.cycle_graph(4)), + method="bellman-ford", + weight="weight", + ) + assert sorted(ans[1][3]) == [[1, 0, 3], [1, 2, 3]] + G = nx.cycle_graph(4) + G.add_node(4) + ans = dict(nx.all_pairs_all_shortest_paths(G)) + assert sorted(ans[4][4]) == [[4]] + + def test_has_path(self): + G = nx.Graph() + nx.add_path(G, range(3)) + nx.add_path(G, range(3, 5)) + assert nx.has_path(G, 0, 2) + assert not nx.has_path(G, 0, 4) + + def test_all_shortest_paths(self): + G = nx.Graph() + nx.add_path(G, [0, 1, 2, 3]) + nx.add_path(G, [0, 10, 20, 3]) + assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted(nx.all_shortest_paths(G, 0, 3)) + # with weights + G = nx.Graph() + nx.add_path(G, [0, 1, 2, 3]) + nx.add_path(G, [0, 10, 20, 3]) + assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted( + nx.all_shortest_paths(G, 0, 3, weight="weight") + ) + # weights and method specified + G = nx.Graph() + nx.add_path(G, [0, 1, 2, 3]) + nx.add_path(G, [0, 10, 20, 3]) + assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted( + nx.all_shortest_paths(G, 0, 3, weight="weight", method="dijkstra") + ) + G = nx.Graph() + nx.add_path(G, [0, 1, 2, 3]) + nx.add_path(G, [0, 10, 20, 3]) + assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted( + nx.all_shortest_paths(G, 0, 3, weight="weight", method="bellman-ford") + ) + + def test_all_shortest_paths_raise(self): + with pytest.raises(nx.NetworkXNoPath): + G = nx.path_graph(4) + G.add_node(4) + list(nx.all_shortest_paths(G, 0, 4)) + + def test_bad_method(self): + with pytest.raises(ValueError): + G = nx.path_graph(2) + list(nx.all_shortest_paths(G, 0, 1, weight="weight", method="SPAM")) + + def test_single_source_all_shortest_paths_bad_method(self): + with pytest.raises(ValueError): + G = nx.path_graph(2) + dict( + nx.single_source_all_shortest_paths( + G, 0, weight="weight", method="SPAM" + ) + ) + + def test_all_shortest_paths_zero_weight_edge(self): + g = nx.Graph() + nx.add_path(g, [0, 1, 3]) + nx.add_path(g, [0, 1, 2, 3]) + g.edges[1, 2]["weight"] = 0 + paths30d = list( + nx.all_shortest_paths(g, 3, 0, weight="weight", method="dijkstra") + ) + paths03d = list( + nx.all_shortest_paths(g, 0, 3, weight="weight", method="dijkstra") + ) + paths30b = list( + nx.all_shortest_paths(g, 3, 0, weight="weight", method="bellman-ford") + ) + paths03b = list( + nx.all_shortest_paths(g, 0, 3, weight="weight", method="bellman-ford") + ) + assert sorted(paths03d) == sorted(p[::-1] for p in paths30d) + assert sorted(paths03d) == sorted(p[::-1] for p in paths30b) + assert sorted(paths03b) == sorted(p[::-1] for p in paths30b) + + +class TestAverageShortestPathLength: + def test_cycle_graph(self): + ans = nx.average_shortest_path_length(nx.cycle_graph(7)) + assert ans == pytest.approx(2, abs=1e-7) + + def test_path_graph(self): + ans = nx.average_shortest_path_length(nx.path_graph(5)) + assert ans == pytest.approx(2, abs=1e-7) + + def test_weighted(self): + G = nx.Graph() + nx.add_cycle(G, range(7), weight=2) + ans = nx.average_shortest_path_length(G, weight="weight") + assert ans == pytest.approx(4, abs=1e-7) + G = nx.Graph() + nx.add_path(G, range(5), weight=2) + ans = nx.average_shortest_path_length(G, weight="weight") + assert ans == pytest.approx(4, abs=1e-7) + + def test_specified_methods(self): + G = nx.Graph() + nx.add_cycle(G, range(7), weight=2) + ans = nx.average_shortest_path_length(G, weight="weight", method="dijkstra") + assert ans == pytest.approx(4, abs=1e-7) + ans = nx.average_shortest_path_length(G, weight="weight", method="bellman-ford") + assert ans == pytest.approx(4, abs=1e-7) + ans = nx.average_shortest_path_length( + G, weight="weight", method="floyd-warshall" + ) + assert ans == pytest.approx(4, abs=1e-7) + + G = nx.Graph() + nx.add_path(G, range(5), weight=2) + ans = nx.average_shortest_path_length(G, weight="weight", method="dijkstra") + assert ans == pytest.approx(4, abs=1e-7) + ans = nx.average_shortest_path_length(G, weight="weight", method="bellman-ford") + assert ans == pytest.approx(4, abs=1e-7) + ans = nx.average_shortest_path_length( + G, weight="weight", method="floyd-warshall" + ) + assert ans == pytest.approx(4, abs=1e-7) + + def test_directed_not_strongly_connected(self): + G = nx.DiGraph([(0, 1)]) + with pytest.raises(nx.NetworkXError, match="Graph is not strongly connected"): + nx.average_shortest_path_length(G) + + def test_undirected_not_connected(self): + g = nx.Graph() + g.add_nodes_from(range(3)) + g.add_edge(0, 1) + pytest.raises(nx.NetworkXError, nx.average_shortest_path_length, g) + + def test_trivial_graph(self): + """Tests that the trivial graph has average path length zero, + since there is exactly one path of length zero in the trivial + graph. + + For more information, see issue #1960. + + """ + G = nx.trivial_graph() + assert nx.average_shortest_path_length(G) == 0 + + def test_null_graph(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.average_shortest_path_length(nx.null_graph()) + + def test_bad_method(self): + with pytest.raises(ValueError): + G = nx.path_graph(2) + nx.average_shortest_path_length(G, weight="weight", method="SPAM") + + +class TestAverageShortestPathLengthNumpy: + @classmethod + def setup_class(cls): + global np + import pytest + + np = pytest.importorskip("numpy") + + def test_specified_methods_numpy(self): + G = nx.Graph() + nx.add_cycle(G, range(7), weight=2) + ans = nx.average_shortest_path_length( + G, weight="weight", method="floyd-warshall-numpy" + ) + np.testing.assert_almost_equal(ans, 4) + + G = nx.Graph() + nx.add_path(G, range(5), weight=2) + ans = nx.average_shortest_path_length( + G, weight="weight", method="floyd-warshall-numpy" + ) + np.testing.assert_almost_equal(ans, 4) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/test_unweighted.py b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/test_unweighted.py new file mode 100644 index 0000000000000000000000000000000000000000..e2d999518a5573db03493bd572441c3faa706d6b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/test_unweighted.py @@ -0,0 +1,149 @@ +import pytest + +import networkx as nx + + +def validate_grid_path(r, c, s, t, p): + assert isinstance(p, list) + assert p[0] == s + assert p[-1] == t + s = ((s - 1) // c, (s - 1) % c) + t = ((t - 1) // c, (t - 1) % c) + assert len(p) == abs(t[0] - s[0]) + abs(t[1] - s[1]) + 1 + p = [((u - 1) // c, (u - 1) % c) for u in p] + for u in p: + assert 0 <= u[0] < r + assert 0 <= u[1] < c + for u, v in zip(p[:-1], p[1:]): + assert (abs(v[0] - u[0]), abs(v[1] - u[1])) in [(0, 1), (1, 0)] + + +class TestUnweightedPath: + @classmethod + def setup_class(cls): + from networkx import convert_node_labels_to_integers as cnlti + + cls.grid = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted") + cls.cycle = nx.cycle_graph(7) + cls.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph()) + + def test_bidirectional_shortest_path(self): + assert nx.bidirectional_shortest_path(self.cycle, 0, 3) == [0, 1, 2, 3] + assert nx.bidirectional_shortest_path(self.cycle, 0, 4) == [0, 6, 5, 4] + validate_grid_path( + 4, 4, 1, 12, nx.bidirectional_shortest_path(self.grid, 1, 12) + ) + assert nx.bidirectional_shortest_path(self.directed_cycle, 0, 3) == [0, 1, 2, 3] + # test source = target + assert nx.bidirectional_shortest_path(self.cycle, 3, 3) == [3] + + @pytest.mark.parametrize( + ("src", "tgt"), + ( + (8, 3), # source not in graph + (3, 8), # target not in graph + (8, 10), # neither source nor target in graph + (8, 8), # src == tgt, neither in graph - tests order of input checks + ), + ) + def test_bidirectional_shortest_path_src_tgt_not_in_graph(self, src, tgt): + with pytest.raises( + nx.NodeNotFound, + match=f"Either source {src} or target {tgt} is not in G", + ): + nx.bidirectional_shortest_path(self.cycle, src, tgt) + + def test_shortest_path_length(self): + assert nx.shortest_path_length(self.cycle, 0, 3) == 3 + assert nx.shortest_path_length(self.grid, 1, 12) == 5 + assert nx.shortest_path_length(self.directed_cycle, 0, 4) == 4 + # now with weights + assert nx.shortest_path_length(self.cycle, 0, 3, weight=True) == 3 + assert nx.shortest_path_length(self.grid, 1, 12, weight=True) == 5 + assert nx.shortest_path_length(self.directed_cycle, 0, 4, weight=True) == 4 + + def test_single_source_shortest_path(self): + p = nx.single_source_shortest_path(self.directed_cycle, 3) + assert p[0] == [3, 4, 5, 6, 0] + p = nx.single_source_shortest_path(self.cycle, 0) + assert p[3] == [0, 1, 2, 3] + p = nx.single_source_shortest_path(self.cycle, 0, cutoff=0) + assert p == {0: [0]} + + def test_single_source_shortest_path_length(self): + pl = nx.single_source_shortest_path_length + lengths = {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert dict(pl(self.cycle, 0)) == lengths + lengths = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6} + assert dict(pl(self.directed_cycle, 0)) == lengths + + def test_single_target_shortest_path(self): + p = nx.single_target_shortest_path(self.directed_cycle, 0) + assert p[3] == [3, 4, 5, 6, 0] + p = nx.single_target_shortest_path(self.cycle, 0) + assert p[3] == [3, 2, 1, 0] + p = nx.single_target_shortest_path(self.cycle, 0, cutoff=0) + assert p == {0: [0]} + # test missing targets + target = 8 + with pytest.raises(nx.NodeNotFound, match=f"Target {target} not in G"): + nx.single_target_shortest_path(self.cycle, target) + + def test_single_target_shortest_path_length(self): + pl = nx.single_target_shortest_path_length + lengths = {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert dict(pl(self.cycle, 0)) == lengths + lengths = {0: 0, 1: 6, 2: 5, 3: 4, 4: 3, 5: 2, 6: 1} + assert dict(pl(self.directed_cycle, 0)) == lengths + # test missing targets + target = 8 + with pytest.raises(nx.NodeNotFound, match=f"Target {target} is not in G"): + nx.single_target_shortest_path_length(self.cycle, target) + + def test_all_pairs_shortest_path(self): + p = dict(nx.all_pairs_shortest_path(self.cycle)) + assert p[0][3] == [0, 1, 2, 3] + p = dict(nx.all_pairs_shortest_path(self.grid)) + validate_grid_path(4, 4, 1, 12, p[1][12]) + + def test_all_pairs_shortest_path_length(self): + l = dict(nx.all_pairs_shortest_path_length(self.cycle)) + assert l[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + l = dict(nx.all_pairs_shortest_path_length(self.grid)) + assert l[1][16] == 6 + + def test_predecessor_path(self): + G = nx.path_graph(4) + assert nx.predecessor(G, 0) == {0: [], 1: [0], 2: [1], 3: [2]} + assert nx.predecessor(G, 0, 3) == [2] + + def test_predecessor_cycle(self): + G = nx.cycle_graph(4) + pred = nx.predecessor(G, 0) + assert pred[0] == [] + assert pred[1] == [0] + assert pred[2] in [[1, 3], [3, 1]] + assert pred[3] == [0] + + def test_predecessor_cutoff(self): + G = nx.path_graph(4) + p = nx.predecessor(G, 0, 3) + assert 4 not in p + + def test_predecessor_target(self): + G = nx.path_graph(4) + p = nx.predecessor(G, 0, 3) + assert p == [2] + p = nx.predecessor(G, 0, 3, cutoff=2) + assert p == [] + p, s = nx.predecessor(G, 0, 3, return_seen=True) + assert p == [2] + assert s == 3 + p, s = nx.predecessor(G, 0, 3, cutoff=2, return_seen=True) + assert p == [] + assert s == -1 + + def test_predecessor_missing_source(self): + source = 8 + with pytest.raises(nx.NodeNotFound, match=f"Source {source} not in G"): + nx.predecessor(self.cycle, source) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/test_weighted.py b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/test_weighted.py new file mode 100644 index 0000000000000000000000000000000000000000..dc88572d3571f7d94015b12d9ab870aee316516f --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/tests/test_weighted.py @@ -0,0 +1,972 @@ +import pytest + +import networkx as nx +from networkx.utils import pairwise + + +def validate_path(G, s, t, soln_len, path, weight="weight"): + assert path[0] == s + assert path[-1] == t + + if callable(weight): + weight_f = weight + else: + if G.is_multigraph(): + + def weight_f(u, v, d): + return min(e.get(weight, 1) for e in d.values()) + + else: + + def weight_f(u, v, d): + return d.get(weight, 1) + + computed = sum(weight_f(u, v, G[u][v]) for u, v in pairwise(path)) + assert soln_len == computed + + +def validate_length_path(G, s, t, soln_len, length, path, weight="weight"): + assert soln_len == length + validate_path(G, s, t, length, path, weight=weight) + + +class WeightedTestBase: + """Base class for test classes that test functions for computing + shortest paths in weighted graphs. + + """ + + def setup_method(self): + """Creates some graphs for use in the unit tests.""" + cnlti = nx.convert_node_labels_to_integers + self.grid = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted") + self.cycle = nx.cycle_graph(7) + self.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph()) + self.XG = nx.DiGraph() + self.XG.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + self.MXG = nx.MultiDiGraph(self.XG) + self.MXG.add_edge("s", "u", weight=15) + self.XG2 = nx.DiGraph() + self.XG2.add_weighted_edges_from( + [ + [1, 4, 1], + [4, 5, 1], + [5, 6, 1], + [6, 3, 1], + [1, 3, 50], + [1, 2, 100], + [2, 3, 100], + ] + ) + + self.XG3 = nx.Graph() + self.XG3.add_weighted_edges_from( + [[0, 1, 2], [1, 2, 12], [2, 3, 1], [3, 4, 5], [4, 5, 1], [5, 0, 10]] + ) + + self.XG4 = nx.Graph() + self.XG4.add_weighted_edges_from( + [ + [0, 1, 2], + [1, 2, 2], + [2, 3, 1], + [3, 4, 1], + [4, 5, 1], + [5, 6, 1], + [6, 7, 1], + [7, 0, 1], + ] + ) + self.MXG4 = nx.MultiGraph(self.XG4) + self.MXG4.add_edge(0, 1, weight=3) + self.G = nx.DiGraph() # no weights + self.G.add_edges_from( + [ + ("s", "u"), + ("s", "x"), + ("u", "v"), + ("u", "x"), + ("v", "y"), + ("x", "u"), + ("x", "v"), + ("x", "y"), + ("y", "s"), + ("y", "v"), + ] + ) + + +class TestWeightedPath(WeightedTestBase): + def test_dijkstra(self): + (D, P) = nx.single_source_dijkstra(self.XG, "s") + validate_path(self.XG, "s", "v", 9, P["v"]) + assert D["v"] == 9 + + validate_path( + self.XG, "s", "v", 9, nx.single_source_dijkstra_path(self.XG, "s")["v"] + ) + assert dict(nx.single_source_dijkstra_path_length(self.XG, "s"))["v"] == 9 + + validate_path( + self.XG, "s", "v", 9, nx.single_source_dijkstra(self.XG, "s")[1]["v"] + ) + validate_path( + self.MXG, "s", "v", 9, nx.single_source_dijkstra_path(self.MXG, "s")["v"] + ) + + GG = self.XG.to_undirected() + # make sure we get lower weight + # to_undirected might choose either edge with weight 2 or weight 3 + GG["u"]["x"]["weight"] = 2 + (D, P) = nx.single_source_dijkstra(GG, "s") + validate_path(GG, "s", "v", 8, P["v"]) + assert D["v"] == 8 # uses lower weight of 2 on u<->x edge + validate_path(GG, "s", "v", 8, nx.dijkstra_path(GG, "s", "v")) + assert nx.dijkstra_path_length(GG, "s", "v") == 8 + + validate_path(self.XG2, 1, 3, 4, nx.dijkstra_path(self.XG2, 1, 3)) + validate_path(self.XG3, 0, 3, 15, nx.dijkstra_path(self.XG3, 0, 3)) + assert nx.dijkstra_path_length(self.XG3, 0, 3) == 15 + validate_path(self.XG4, 0, 2, 4, nx.dijkstra_path(self.XG4, 0, 2)) + assert nx.dijkstra_path_length(self.XG4, 0, 2) == 4 + validate_path(self.MXG4, 0, 2, 4, nx.dijkstra_path(self.MXG4, 0, 2)) + validate_path( + self.G, "s", "v", 2, nx.single_source_dijkstra(self.G, "s", "v")[1] + ) + validate_path( + self.G, "s", "v", 2, nx.single_source_dijkstra(self.G, "s")[1]["v"] + ) + + validate_path(self.G, "s", "v", 2, nx.dijkstra_path(self.G, "s", "v")) + assert nx.dijkstra_path_length(self.G, "s", "v") == 2 + + # NetworkXError: node s not reachable from moon + pytest.raises(nx.NetworkXNoPath, nx.dijkstra_path, self.G, "s", "moon") + pytest.raises(nx.NetworkXNoPath, nx.dijkstra_path_length, self.G, "s", "moon") + + validate_path(self.cycle, 0, 3, 3, nx.dijkstra_path(self.cycle, 0, 3)) + validate_path(self.cycle, 0, 4, 3, nx.dijkstra_path(self.cycle, 0, 4)) + + assert nx.single_source_dijkstra(self.cycle, 0, 0) == (0, [0]) + + def test_bidirectional_dijkstra(self): + validate_length_path( + self.XG, "s", "v", 9, *nx.bidirectional_dijkstra(self.XG, "s", "v") + ) + validate_length_path( + self.G, "s", "v", 2, *nx.bidirectional_dijkstra(self.G, "s", "v") + ) + validate_length_path( + self.cycle, 0, 3, 3, *nx.bidirectional_dijkstra(self.cycle, 0, 3) + ) + validate_length_path( + self.cycle, 0, 4, 3, *nx.bidirectional_dijkstra(self.cycle, 0, 4) + ) + validate_length_path( + self.XG3, 0, 3, 15, *nx.bidirectional_dijkstra(self.XG3, 0, 3) + ) + validate_length_path( + self.XG4, 0, 2, 4, *nx.bidirectional_dijkstra(self.XG4, 0, 2) + ) + + # need more tests here + P = nx.single_source_dijkstra_path(self.XG, "s")["v"] + validate_path( + self.XG, + "s", + "v", + sum(self.XG[u][v]["weight"] for u, v in zip(P[:-1], P[1:])), + nx.dijkstra_path(self.XG, "s", "v"), + ) + + # check absent source + G = nx.path_graph(2) + pytest.raises(nx.NodeNotFound, nx.bidirectional_dijkstra, G, 3, 0) + + def test_weight_functions(self): + def heuristic(*z): + return sum(val**2 for val in z) + + def getpath(pred, v, s): + return [v] if v == s else getpath(pred, pred[v], s) + [v] + + def goldberg_radzik(g, s, t, weight="weight"): + pred, dist = nx.goldberg_radzik(g, s, weight=weight) + dist = dist[t] + return dist, getpath(pred, t, s) + + def astar(g, s, t, weight="weight"): + path = nx.astar_path(g, s, t, heuristic, weight=weight) + dist = nx.astar_path_length(g, s, t, heuristic, weight=weight) + return dist, path + + def vlp(G, s, t, l, F, w): + res = F(G, s, t, weight=w) + validate_length_path(G, s, t, l, *res, weight=w) + + G = self.cycle + s = 6 + t = 4 + path = [6] + list(range(t + 1)) + + def weight(u, v, _): + return 1 + v**2 + + length = sum(weight(u, v, None) for u, v in pairwise(path)) + vlp(G, s, t, length, nx.bidirectional_dijkstra, weight) + vlp(G, s, t, length, nx.single_source_dijkstra, weight) + vlp(G, s, t, length, nx.single_source_bellman_ford, weight) + vlp(G, s, t, length, goldberg_radzik, weight) + vlp(G, s, t, length, astar, weight) + + def weight(u, v, _): + return 2 ** (u * v) + + length = sum(weight(u, v, None) for u, v in pairwise(path)) + vlp(G, s, t, length, nx.bidirectional_dijkstra, weight) + vlp(G, s, t, length, nx.single_source_dijkstra, weight) + vlp(G, s, t, length, nx.single_source_bellman_ford, weight) + vlp(G, s, t, length, goldberg_radzik, weight) + vlp(G, s, t, length, astar, weight) + + def test_bidirectional_dijkstra_no_path(self): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5, 6]) + path = nx.bidirectional_dijkstra(G, 1, 6) + + @pytest.mark.parametrize( + "fn", + ( + nx.dijkstra_path, + nx.dijkstra_path_length, + nx.single_source_dijkstra_path, + nx.single_source_dijkstra_path_length, + nx.single_source_dijkstra, + nx.dijkstra_predecessor_and_distance, + ), + ) + def test_absent_source(self, fn): + G = nx.path_graph(2) + with pytest.raises(nx.NodeNotFound): + fn(G, 3, 0) + # Test when source == target, which is handled specially by some functions + with pytest.raises(nx.NodeNotFound): + fn(G, 3, 3) + + def test_dijkstra_predecessor1(self): + G = nx.path_graph(4) + assert nx.dijkstra_predecessor_and_distance(G, 0) == ( + {0: [], 1: [0], 2: [1], 3: [2]}, + {0: 0, 1: 1, 2: 2, 3: 3}, + ) + + def test_dijkstra_predecessor2(self): + # 4-cycle + G = nx.Graph([(0, 1), (1, 2), (2, 3), (3, 0)]) + pred, dist = nx.dijkstra_predecessor_and_distance(G, (0)) + assert pred[0] == [] + assert pred[1] == [0] + assert pred[2] in [[1, 3], [3, 1]] + assert pred[3] == [0] + assert dist == {0: 0, 1: 1, 2: 2, 3: 1} + + def test_dijkstra_predecessor3(self): + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + (P, D) = nx.dijkstra_predecessor_and_distance(XG, "s") + assert P["v"] == ["u"] + assert D["v"] == 9 + (P, D) = nx.dijkstra_predecessor_and_distance(XG, "s", cutoff=8) + assert "v" not in D + + def test_single_source_dijkstra_path_length(self): + pl = nx.single_source_dijkstra_path_length + assert dict(pl(self.MXG4, 0))[2] == 4 + spl = pl(self.MXG4, 0, cutoff=2) + assert 2 not in spl + + def test_bidirectional_dijkstra_multigraph(self): + G = nx.MultiGraph() + G.add_edge("a", "b", weight=10) + G.add_edge("a", "b", weight=100) + dp = nx.bidirectional_dijkstra(G, "a", "b") + assert dp == (10, ["a", "b"]) + + def test_dijkstra_pred_distance_multigraph(self): + G = nx.MultiGraph() + G.add_edge("a", "b", key="short", foo=5, weight=100) + G.add_edge("a", "b", key="long", bar=1, weight=110) + p, d = nx.dijkstra_predecessor_and_distance(G, "a") + assert p == {"a": [], "b": ["a"]} + assert d == {"a": 0, "b": 100} + + def test_negative_edge_cycle(self): + G = nx.cycle_graph(5, create_using=nx.DiGraph()) + assert not nx.negative_edge_cycle(G) + G.add_edge(8, 9, weight=-7) + G.add_edge(9, 8, weight=3) + graph_size = len(G) + assert nx.negative_edge_cycle(G) + assert graph_size == len(G) + pytest.raises(ValueError, nx.single_source_dijkstra_path_length, G, 8) + pytest.raises(ValueError, nx.single_source_dijkstra, G, 8) + pytest.raises(ValueError, nx.dijkstra_predecessor_and_distance, G, 8) + G.add_edge(9, 10) + pytest.raises(ValueError, nx.bidirectional_dijkstra, G, 8, 10) + G = nx.MultiDiGraph() + G.add_edge(2, 2, weight=-1) + assert nx.negative_edge_cycle(G) + + def test_negative_edge_cycle_empty(self): + G = nx.DiGraph() + assert not nx.negative_edge_cycle(G) + + def test_negative_edge_cycle_custom_weight_key(self): + d = nx.DiGraph() + d.add_edge("a", "b", w=-2) + d.add_edge("b", "a", w=-1) + assert nx.negative_edge_cycle(d, weight="w") + + def test_weight_function(self): + """Tests that a callable weight is interpreted as a weight + function instead of an edge attribute. + + """ + # Create a triangle in which the edge from node 0 to node 2 has + # a large weight and the other two edges have a small weight. + G = nx.complete_graph(3) + G.adj[0][2]["weight"] = 10 + G.adj[0][1]["weight"] = 1 + G.adj[1][2]["weight"] = 1 + + # The weight function will take the multiplicative inverse of + # the weights on the edges. This way, weights that were large + # before now become small and vice versa. + + def weight(u, v, d): + return 1 / d["weight"] + + # The shortest path from 0 to 2 using the actual weights on the + # edges should be [0, 1, 2]. + distance, path = nx.single_source_dijkstra(G, 0, 2) + assert distance == 2 + assert path == [0, 1, 2] + # However, with the above weight function, the shortest path + # should be [0, 2], since that has a very small weight. + distance, path = nx.single_source_dijkstra(G, 0, 2, weight=weight) + assert distance == 1 / 10 + assert path == [0, 2] + + def test_all_pairs_dijkstra_path(self): + cycle = nx.cycle_graph(7) + p = dict(nx.all_pairs_dijkstra_path(cycle)) + assert p[0][3] == [0, 1, 2, 3] + + cycle[1][2]["weight"] = 10 + p = dict(nx.all_pairs_dijkstra_path(cycle)) + assert p[0][3] == [0, 6, 5, 4, 3] + + def test_all_pairs_dijkstra_path_length(self): + cycle = nx.cycle_graph(7) + pl = dict(nx.all_pairs_dijkstra_path_length(cycle)) + assert pl[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + + cycle[1][2]["weight"] = 10 + pl = dict(nx.all_pairs_dijkstra_path_length(cycle)) + assert pl[0] == {0: 0, 1: 1, 2: 5, 3: 4, 4: 3, 5: 2, 6: 1} + + def test_all_pairs_dijkstra(self): + cycle = nx.cycle_graph(7) + out = dict(nx.all_pairs_dijkstra(cycle)) + assert out[0][0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1} + assert out[0][1][3] == [0, 1, 2, 3] + + cycle[1][2]["weight"] = 10 + out = dict(nx.all_pairs_dijkstra(cycle)) + assert out[0][0] == {0: 0, 1: 1, 2: 5, 3: 4, 4: 3, 5: 2, 6: 1} + assert out[0][1][3] == [0, 6, 5, 4, 3] + + +class TestDijkstraPathLength: + """Unit tests for the :func:`networkx.dijkstra_path_length` + function. + + """ + + def test_weight_function(self): + """Tests for computing the length of the shortest path using + Dijkstra's algorithm with a user-defined weight function. + + """ + # Create a triangle in which the edge from node 0 to node 2 has + # a large weight and the other two edges have a small weight. + G = nx.complete_graph(3) + G.adj[0][2]["weight"] = 10 + G.adj[0][1]["weight"] = 1 + G.adj[1][2]["weight"] = 1 + + # The weight function will take the multiplicative inverse of + # the weights on the edges. This way, weights that were large + # before now become small and vice versa. + + def weight(u, v, d): + return 1 / d["weight"] + + # The shortest path from 0 to 2 using the actual weights on the + # edges should be [0, 1, 2]. However, with the above weight + # function, the shortest path should be [0, 2], since that has a + # very small weight. + length = nx.dijkstra_path_length(G, 0, 2, weight=weight) + assert length == 1 / 10 + + +class TestMultiSourceDijkstra: + """Unit tests for the multi-source dialect of Dijkstra's shortest + path algorithms. + + """ + + def test_no_sources(self): + with pytest.raises(ValueError): + nx.multi_source_dijkstra(nx.Graph(), {}) + + def test_path_no_sources(self): + with pytest.raises(ValueError): + nx.multi_source_dijkstra_path(nx.Graph(), {}) + + def test_path_length_no_sources(self): + with pytest.raises(ValueError): + nx.multi_source_dijkstra_path_length(nx.Graph(), {}) + + @pytest.mark.parametrize( + "fn", + ( + nx.multi_source_dijkstra_path, + nx.multi_source_dijkstra_path_length, + nx.multi_source_dijkstra, + ), + ) + def test_absent_source(self, fn): + G = nx.path_graph(2) + with pytest.raises(nx.NodeNotFound): + fn(G, [3], 0) + with pytest.raises(nx.NodeNotFound): + fn(G, [3], 3) + + def test_two_sources(self): + edges = [(0, 1, 1), (1, 2, 1), (2, 3, 10), (3, 4, 1)] + G = nx.Graph() + G.add_weighted_edges_from(edges) + sources = {0, 4} + distances, paths = nx.multi_source_dijkstra(G, sources) + expected_distances = {0: 0, 1: 1, 2: 2, 3: 1, 4: 0} + expected_paths = {0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [4, 3], 4: [4]} + assert distances == expected_distances + assert paths == expected_paths + + def test_simple_paths(self): + G = nx.path_graph(4) + lengths = nx.multi_source_dijkstra_path_length(G, [0]) + assert lengths == {n: n for n in G} + paths = nx.multi_source_dijkstra_path(G, [0]) + assert paths == {n: list(range(n + 1)) for n in G} + + +class TestBellmanFordAndGoldbergRadzik(WeightedTestBase): + def test_single_node_graph(self): + G = nx.DiGraph() + G.add_node(0) + assert nx.single_source_bellman_ford_path(G, 0) == {0: [0]} + assert nx.single_source_bellman_ford_path_length(G, 0) == {0: 0} + assert nx.single_source_bellman_ford(G, 0) == ({0: 0}, {0: [0]}) + assert nx.bellman_ford_predecessor_and_distance(G, 0) == ({0: []}, {0: 0}) + assert nx.goldberg_radzik(G, 0) == ({0: None}, {0: 0}) + + def test_absent_source_bellman_ford(self): + # the check is in _bellman_ford; this provides regression testing + # against later changes to "client" Bellman-Ford functions + G = nx.path_graph(2) + for fn in ( + nx.bellman_ford_predecessor_and_distance, + nx.bellman_ford_path, + nx.bellman_ford_path_length, + nx.single_source_bellman_ford_path, + nx.single_source_bellman_ford_path_length, + nx.single_source_bellman_ford, + ): + pytest.raises(nx.NodeNotFound, fn, G, 3, 0) + pytest.raises(nx.NodeNotFound, fn, G, 3, 3) + + def test_absent_source_goldberg_radzik(self): + with pytest.raises(nx.NodeNotFound): + G = nx.path_graph(2) + nx.goldberg_radzik(G, 3, 0) + + def test_negative_cycle_heuristic(self): + G = nx.DiGraph() + G.add_edge(0, 1, weight=-1) + G.add_edge(1, 2, weight=-1) + G.add_edge(2, 3, weight=-1) + G.add_edge(3, 0, weight=3) + assert not nx.negative_edge_cycle(G, heuristic=True) + G.add_edge(2, 0, weight=1.999) + assert nx.negative_edge_cycle(G, heuristic=True) + G.edges[2, 0]["weight"] = 2 + assert not nx.negative_edge_cycle(G, heuristic=True) + + def test_negative_cycle_consistency(self): + import random + + unif = random.uniform + for random_seed in range(2): # range(20): + random.seed(random_seed) + for density in [0.1, 0.9]: # .3, .7, .9]: + for N in [1, 10, 20]: # range(1, 60 - int(30 * density)): + for max_cost in [1, 90]: # [1, 10, 40, 90]: + G = nx.binomial_graph(N, density, seed=4, directed=True) + edges = ((u, v, unif(-1, max_cost)) for u, v in G.edges) + G.add_weighted_edges_from(edges) + + no_heuristic = nx.negative_edge_cycle(G, heuristic=False) + with_heuristic = nx.negative_edge_cycle(G, heuristic=True) + assert no_heuristic == with_heuristic + + def test_negative_cycle(self): + G = nx.cycle_graph(5, create_using=nx.DiGraph()) + G.add_edge(1, 2, weight=-7) + for i in range(5): + pytest.raises( + nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, i + ) + pytest.raises( + nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, i + ) + pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, i) + pytest.raises( + nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, i + ) + pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, i) + G = nx.cycle_graph(5) # undirected Graph + G.add_edge(1, 2, weight=-3) + for i in range(5): + pytest.raises( + nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, i + ) + pytest.raises( + nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, i + ) + pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, i) + pytest.raises( + nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, i + ) + pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, i) + G = nx.DiGraph([(1, 1, {"weight": -1})]) + pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, 1) + pytest.raises( + nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, 1 + ) + pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, 1) + pytest.raises( + nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, 1 + ) + pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, 1) + G = nx.MultiDiGraph([(1, 1, {"weight": -1})]) + pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, 1) + pytest.raises( + nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, 1 + ) + pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, 1) + pytest.raises( + nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, 1 + ) + pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, 1) + + def test_zero_cycle(self): + G = nx.cycle_graph(5, create_using=nx.DiGraph()) + G.add_edge(2, 3, weight=-4) + # check that zero cycle doesn't raise + nx.goldberg_radzik(G, 1) + nx.bellman_ford_predecessor_and_distance(G, 1) + + G.add_edge(2, 3, weight=-4.0001) + # check that negative cycle does raise + pytest.raises( + nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, 1 + ) + pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, 1) + + def test_find_negative_cycle_longer_cycle(self): + G = nx.cycle_graph(5, create_using=nx.DiGraph()) + nx.add_cycle(G, [3, 5, 6, 7, 8, 9]) + G.add_edge(1, 2, weight=-30) + assert nx.find_negative_cycle(G, 1) == [0, 1, 2, 3, 4, 0] + assert nx.find_negative_cycle(G, 7) == [2, 3, 4, 0, 1, 2] + + def test_find_negative_cycle_no_cycle(self): + G = nx.path_graph(5, create_using=nx.DiGraph()) + pytest.raises(nx.NetworkXError, nx.find_negative_cycle, G, 3) + + def test_find_negative_cycle_single_edge(self): + G = nx.Graph() + G.add_edge(0, 1, weight=-1) + assert nx.find_negative_cycle(G, 1) == [1, 0, 1] + + def test_negative_weight(self): + G = nx.cycle_graph(5, create_using=nx.DiGraph()) + G.add_edge(1, 2, weight=-3) + assert nx.single_source_bellman_ford_path(G, 0) == { + 0: [0], + 1: [0, 1], + 2: [0, 1, 2], + 3: [0, 1, 2, 3], + 4: [0, 1, 2, 3, 4], + } + assert nx.single_source_bellman_ford_path_length(G, 0) == { + 0: 0, + 1: 1, + 2: -2, + 3: -1, + 4: 0, + } + assert nx.single_source_bellman_ford(G, 0) == ( + {0: 0, 1: 1, 2: -2, 3: -1, 4: 0}, + {0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [0, 1, 2, 3], 4: [0, 1, 2, 3, 4]}, + ) + assert nx.bellman_ford_predecessor_and_distance(G, 0) == ( + {0: [], 1: [0], 2: [1], 3: [2], 4: [3]}, + {0: 0, 1: 1, 2: -2, 3: -1, 4: 0}, + ) + assert nx.goldberg_radzik(G, 0) == ( + {0: None, 1: 0, 2: 1, 3: 2, 4: 3}, + {0: 0, 1: 1, 2: -2, 3: -1, 4: 0}, + ) + + def test_not_connected(self): + G = nx.complete_graph(6) + G.add_edge(10, 11) + G.add_edge(10, 12) + assert nx.single_source_bellman_ford_path(G, 0) == { + 0: [0], + 1: [0, 1], + 2: [0, 2], + 3: [0, 3], + 4: [0, 4], + 5: [0, 5], + } + assert nx.single_source_bellman_ford_path_length(G, 0) == { + 0: 0, + 1: 1, + 2: 1, + 3: 1, + 4: 1, + 5: 1, + } + assert nx.single_source_bellman_ford(G, 0) == ( + {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, + {0: [0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5]}, + ) + assert nx.bellman_ford_predecessor_and_distance(G, 0) == ( + {0: [], 1: [0], 2: [0], 3: [0], 4: [0], 5: [0]}, + {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, + ) + assert nx.goldberg_radzik(G, 0) == ( + {0: None, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, + {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, + ) + + # not connected, with a component not containing the source that + # contains a negative cycle. + G = nx.complete_graph(6) + G.add_edges_from( + [ + ("A", "B", {"load": 3}), + ("B", "C", {"load": -10}), + ("C", "A", {"load": 2}), + ] + ) + assert nx.single_source_bellman_ford_path(G, 0, weight="load") == { + 0: [0], + 1: [0, 1], + 2: [0, 2], + 3: [0, 3], + 4: [0, 4], + 5: [0, 5], + } + assert nx.single_source_bellman_ford_path_length(G, 0, weight="load") == { + 0: 0, + 1: 1, + 2: 1, + 3: 1, + 4: 1, + 5: 1, + } + assert nx.single_source_bellman_ford(G, 0, weight="load") == ( + {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, + {0: [0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5]}, + ) + assert nx.bellman_ford_predecessor_and_distance(G, 0, weight="load") == ( + {0: [], 1: [0], 2: [0], 3: [0], 4: [0], 5: [0]}, + {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, + ) + assert nx.goldberg_radzik(G, 0, weight="load") == ( + {0: None, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, + {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, + ) + + def test_multigraph(self): + assert nx.bellman_ford_path(self.MXG, "s", "v") == ["s", "x", "u", "v"] + assert nx.bellman_ford_path_length(self.MXG, "s", "v") == 9 + assert nx.single_source_bellman_ford_path(self.MXG, "s")["v"] == [ + "s", + "x", + "u", + "v", + ] + assert nx.single_source_bellman_ford_path_length(self.MXG, "s")["v"] == 9 + D, P = nx.single_source_bellman_ford(self.MXG, "s", target="v") + assert D == 9 + assert P == ["s", "x", "u", "v"] + P, D = nx.bellman_ford_predecessor_and_distance(self.MXG, "s") + assert P["v"] == ["u"] + assert D["v"] == 9 + P, D = nx.goldberg_radzik(self.MXG, "s") + assert P["v"] == "u" + assert D["v"] == 9 + assert nx.bellman_ford_path(self.MXG4, 0, 2) == [0, 1, 2] + assert nx.bellman_ford_path_length(self.MXG4, 0, 2) == 4 + assert nx.single_source_bellman_ford_path(self.MXG4, 0)[2] == [0, 1, 2] + assert nx.single_source_bellman_ford_path_length(self.MXG4, 0)[2] == 4 + D, P = nx.single_source_bellman_ford(self.MXG4, 0, target=2) + assert D == 4 + assert P == [0, 1, 2] + P, D = nx.bellman_ford_predecessor_and_distance(self.MXG4, 0) + assert P[2] == [1] + assert D[2] == 4 + P, D = nx.goldberg_radzik(self.MXG4, 0) + assert P[2] == 1 + assert D[2] == 4 + + def test_others(self): + assert nx.bellman_ford_path(self.XG, "s", "v") == ["s", "x", "u", "v"] + assert nx.bellman_ford_path_length(self.XG, "s", "v") == 9 + assert nx.single_source_bellman_ford_path(self.XG, "s")["v"] == [ + "s", + "x", + "u", + "v", + ] + assert nx.single_source_bellman_ford_path_length(self.XG, "s")["v"] == 9 + D, P = nx.single_source_bellman_ford(self.XG, "s", target="v") + assert D == 9 + assert P == ["s", "x", "u", "v"] + (P, D) = nx.bellman_ford_predecessor_and_distance(self.XG, "s") + assert P["v"] == ["u"] + assert D["v"] == 9 + (P, D) = nx.goldberg_radzik(self.XG, "s") + assert P["v"] == "u" + assert D["v"] == 9 + + def test_path_graph(self): + G = nx.path_graph(4) + assert nx.single_source_bellman_ford_path(G, 0) == { + 0: [0], + 1: [0, 1], + 2: [0, 1, 2], + 3: [0, 1, 2, 3], + } + assert nx.single_source_bellman_ford_path_length(G, 0) == { + 0: 0, + 1: 1, + 2: 2, + 3: 3, + } + assert nx.single_source_bellman_ford(G, 0) == ( + {0: 0, 1: 1, 2: 2, 3: 3}, + {0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [0, 1, 2, 3]}, + ) + assert nx.bellman_ford_predecessor_and_distance(G, 0) == ( + {0: [], 1: [0], 2: [1], 3: [2]}, + {0: 0, 1: 1, 2: 2, 3: 3}, + ) + assert nx.goldberg_radzik(G, 0) == ( + {0: None, 1: 0, 2: 1, 3: 2}, + {0: 0, 1: 1, 2: 2, 3: 3}, + ) + assert nx.single_source_bellman_ford_path(G, 3) == { + 0: [3, 2, 1, 0], + 1: [3, 2, 1], + 2: [3, 2], + 3: [3], + } + assert nx.single_source_bellman_ford_path_length(G, 3) == { + 0: 3, + 1: 2, + 2: 1, + 3: 0, + } + assert nx.single_source_bellman_ford(G, 3) == ( + {0: 3, 1: 2, 2: 1, 3: 0}, + {0: [3, 2, 1, 0], 1: [3, 2, 1], 2: [3, 2], 3: [3]}, + ) + assert nx.bellman_ford_predecessor_and_distance(G, 3) == ( + {0: [1], 1: [2], 2: [3], 3: []}, + {0: 3, 1: 2, 2: 1, 3: 0}, + ) + assert nx.goldberg_radzik(G, 3) == ( + {0: 1, 1: 2, 2: 3, 3: None}, + {0: 3, 1: 2, 2: 1, 3: 0}, + ) + + def test_4_cycle(self): + # 4-cycle + G = nx.Graph([(0, 1), (1, 2), (2, 3), (3, 0)]) + dist, path = nx.single_source_bellman_ford(G, 0) + assert dist == {0: 0, 1: 1, 2: 2, 3: 1} + assert path[0] == [0] + assert path[1] == [0, 1] + assert path[2] in [[0, 1, 2], [0, 3, 2]] + assert path[3] == [0, 3] + + pred, dist = nx.bellman_ford_predecessor_and_distance(G, 0) + assert pred[0] == [] + assert pred[1] == [0] + assert pred[2] in [[1, 3], [3, 1]] + assert pred[3] == [0] + assert dist == {0: 0, 1: 1, 2: 2, 3: 1} + + pred, dist = nx.goldberg_radzik(G, 0) + assert pred[0] is None + assert pred[1] == 0 + assert pred[2] in [1, 3] + assert pred[3] == 0 + assert dist == {0: 0, 1: 1, 2: 2, 3: 1} + + def test_negative_weight_bf_path(self): + G = nx.DiGraph() + G.add_nodes_from("abcd") + G.add_edge("a", "d", weight=0) + G.add_edge("a", "b", weight=1) + G.add_edge("b", "c", weight=-3) + G.add_edge("c", "d", weight=1) + + assert nx.bellman_ford_path(G, "a", "d") == ["a", "b", "c", "d"] + assert nx.bellman_ford_path_length(G, "a", "d") == -1 + + def test_zero_cycle_smoke(self): + D = nx.DiGraph() + D.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1), (3, 1, -2)]) + + nx.bellman_ford_path(D, 1, 3) + nx.dijkstra_path(D, 1, 3) + nx.bidirectional_dijkstra(D, 1, 3) + # FIXME nx.goldberg_radzik(D, 1) + + +class TestJohnsonAlgorithm(WeightedTestBase): + def test_single_node_graph(self): + G = nx.DiGraph() + G.add_node(0) + assert nx.johnson(G) == {0: {0: [0]}} + + def test_negative_cycle(self): + G = nx.DiGraph() + G.add_weighted_edges_from( + [ + ("0", "3", 3), + ("0", "1", -5), + ("1", "0", -5), + ("0", "2", 2), + ("1", "2", 4), + ("2", "3", 1), + ] + ) + pytest.raises(nx.NetworkXUnbounded, nx.johnson, G) + G = nx.Graph() + G.add_weighted_edges_from( + [ + ("0", "3", 3), + ("0", "1", -5), + ("1", "0", -5), + ("0", "2", 2), + ("1", "2", 4), + ("2", "3", 1), + ] + ) + pytest.raises(nx.NetworkXUnbounded, nx.johnson, G) + + def test_negative_weights(self): + G = nx.DiGraph() + G.add_weighted_edges_from( + [("0", "3", 3), ("0", "1", -5), ("0", "2", 2), ("1", "2", 4), ("2", "3", 1)] + ) + paths = nx.johnson(G) + assert paths == { + "1": {"1": ["1"], "3": ["1", "2", "3"], "2": ["1", "2"]}, + "0": { + "1": ["0", "1"], + "0": ["0"], + "3": ["0", "1", "2", "3"], + "2": ["0", "1", "2"], + }, + "3": {"3": ["3"]}, + "2": {"3": ["2", "3"], "2": ["2"]}, + } + + def test_unweighted_graph(self): + G = nx.Graph() + G.add_edges_from([(1, 0), (2, 1)]) + H = G.copy() + nx.set_edge_attributes(H, values=1, name="weight") + assert nx.johnson(G) == nx.johnson(H) + + def test_partially_weighted_graph_with_negative_edges(self): + G = nx.DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 0), (1, 0)]) + G[1][0]["weight"] = -2 + G[0][1]["weight"] = 3 + G[1][2]["weight"] = -4 + + H = G.copy() + H[2][0]["weight"] = 1 + + I = G.copy() + I[2][0]["weight"] = 8 + + assert nx.johnson(G) == nx.johnson(H) + assert nx.johnson(G) != nx.johnson(I) + + def test_graphs(self): + validate_path(self.XG, "s", "v", 9, nx.johnson(self.XG)["s"]["v"]) + validate_path(self.MXG, "s", "v", 9, nx.johnson(self.MXG)["s"]["v"]) + validate_path(self.XG2, 1, 3, 4, nx.johnson(self.XG2)[1][3]) + validate_path(self.XG3, 0, 3, 15, nx.johnson(self.XG3)[0][3]) + validate_path(self.XG4, 0, 2, 4, nx.johnson(self.XG4)[0][2]) + validate_path(self.MXG4, 0, 2, 4, nx.johnson(self.MXG4)[0][2]) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/unweighted.py b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/unweighted.py new file mode 100644 index 0000000000000000000000000000000000000000..1503ba7478446ae1b8fb28c2722883946712cfd8 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/unweighted.py @@ -0,0 +1,570 @@ +""" +Shortest path algorithms for unweighted graphs. +""" +import warnings + +import networkx as nx + +__all__ = [ + "bidirectional_shortest_path", + "single_source_shortest_path", + "single_source_shortest_path_length", + "single_target_shortest_path", + "single_target_shortest_path_length", + "all_pairs_shortest_path", + "all_pairs_shortest_path_length", + "predecessor", +] + + +@nx._dispatch +def single_source_shortest_path_length(G, source, cutoff=None): + """Compute the shortest path lengths from source to all reachable nodes. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + Returns + ------- + lengths : dict + Dict keyed by node to shortest path length to source. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length = nx.single_source_shortest_path_length(G, 0) + >>> length[4] + 4 + >>> for node in length: + ... print(f"{node}: {length[node]}") + 0: 0 + 1: 1 + 2: 2 + 3: 3 + 4: 4 + + See Also + -------- + shortest_path_length + """ + if source not in G: + raise nx.NodeNotFound(f"Source {source} is not in G") + if cutoff is None: + cutoff = float("inf") + nextlevel = [source] + return dict(_single_shortest_path_length(G._adj, nextlevel, cutoff)) + + +def _single_shortest_path_length(adj, firstlevel, cutoff): + """Yields (node, level) in a breadth first search + + Shortest Path Length helper function + Parameters + ---------- + adj : dict + Adjacency dict or view + firstlevel : list + starting nodes, e.g. [source] or [target] + cutoff : int or float + level at which we stop the process + """ + seen = set(firstlevel) + nextlevel = firstlevel + level = 0 + n = len(adj) + for v in nextlevel: + yield (v, level) + while nextlevel and cutoff > level: + level += 1 + thislevel = nextlevel + nextlevel = [] + for v in thislevel: + for w in adj[v]: + if w not in seen: + seen.add(w) + nextlevel.append(w) + yield (w, level) + if len(seen) == n: + return + + +@nx._dispatch +def single_target_shortest_path_length(G, target, cutoff=None): + """Compute the shortest path lengths to target from all reachable nodes. + + Parameters + ---------- + G : NetworkX graph + + target : node + Target node for path + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + Returns + ------- + lengths : iterator + (source, shortest path length) iterator + + Examples + -------- + >>> G = nx.path_graph(5, create_using=nx.DiGraph()) + >>> length = dict(nx.single_target_shortest_path_length(G, 4)) + >>> length[0] + 4 + >>> for node in range(5): + ... print(f"{node}: {length[node]}") + 0: 4 + 1: 3 + 2: 2 + 3: 1 + 4: 0 + + See Also + -------- + single_source_shortest_path_length, shortest_path_length + """ + if target not in G: + raise nx.NodeNotFound(f"Target {target} is not in G") + + msg = "single_target_shortest_path_length will return a dict starting in v3.3" + warnings.warn(msg, DeprecationWarning) + + if cutoff is None: + cutoff = float("inf") + # handle either directed or undirected + adj = G._pred if G.is_directed() else G._adj + nextlevel = [target] + # for version 3.3 we will return a dict like this: + # return dict(_single_shortest_path_length(adj, nextlevel, cutoff)) + return _single_shortest_path_length(adj, nextlevel, cutoff) + + +@nx._dispatch +def all_pairs_shortest_path_length(G, cutoff=None): + """Computes the shortest path lengths between all nodes in `G`. + + Parameters + ---------- + G : NetworkX graph + + cutoff : integer, optional + Depth at which to stop the search. Only paths of length at most + `cutoff` are returned. + + Returns + ------- + lengths : iterator + (source, dictionary) iterator with dictionary keyed by target and + shortest path length as the key value. + + Notes + ----- + The iterator returned only has reachable node pairs. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length = dict(nx.all_pairs_shortest_path_length(G)) + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"1 - {node}: {length[1][node]}") + 1 - 0: 1 + 1 - 1: 0 + 1 - 2: 1 + 1 - 3: 2 + 1 - 4: 3 + >>> length[3][2] + 1 + >>> length[2][2] + 0 + + """ + length = single_source_shortest_path_length + # TODO This can be trivially parallelized. + for n in G: + yield (n, length(G, n, cutoff=cutoff)) + + +@nx._dispatch +def bidirectional_shortest_path(G, source, target): + """Returns a list of nodes in a shortest path between source and target. + + Parameters + ---------- + G : NetworkX graph + + source : node label + starting node for path + + target : node label + ending node for path + + Returns + ------- + path: list + List of nodes in a path from source to target. + + Raises + ------ + NetworkXNoPath + If no path exists between source and target. + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_path(G, [0, 1, 2, 3, 0, 4, 5, 6, 7, 4]) + >>> nx.bidirectional_shortest_path(G, 2, 6) + [2, 1, 0, 4, 5, 6] + + See Also + -------- + shortest_path + + Notes + ----- + This algorithm is used by shortest_path(G, source, target). + """ + + if source not in G or target not in G: + msg = f"Either source {source} or target {target} is not in G" + raise nx.NodeNotFound(msg) + + # call helper to do the real work + results = _bidirectional_pred_succ(G, source, target) + pred, succ, w = results + + # build path from pred+w+succ + path = [] + # from source to w + while w is not None: + path.append(w) + w = pred[w] + path.reverse() + # from w to target + w = succ[path[-1]] + while w is not None: + path.append(w) + w = succ[w] + + return path + + +def _bidirectional_pred_succ(G, source, target): + """Bidirectional shortest path helper. + + Returns (pred, succ, w) where + pred is a dictionary of predecessors from w to the source, and + succ is a dictionary of successors from w to the target. + """ + # does BFS from both source and target and meets in the middle + if target == source: + return ({target: None}, {source: None}, source) + + # handle either directed or undirected + if G.is_directed(): + Gpred = G.pred + Gsucc = G.succ + else: + Gpred = G.adj + Gsucc = G.adj + + # predecessor and successors in search + pred = {source: None} + succ = {target: None} + + # initialize fringes, start with forward + forward_fringe = [source] + reverse_fringe = [target] + + while forward_fringe and reverse_fringe: + if len(forward_fringe) <= len(reverse_fringe): + this_level = forward_fringe + forward_fringe = [] + for v in this_level: + for w in Gsucc[v]: + if w not in pred: + forward_fringe.append(w) + pred[w] = v + if w in succ: # path found + return pred, succ, w + else: + this_level = reverse_fringe + reverse_fringe = [] + for v in this_level: + for w in Gpred[v]: + if w not in succ: + succ[w] = v + reverse_fringe.append(w) + if w in pred: # found path + return pred, succ, w + + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") + + +@nx._dispatch +def single_source_shortest_path(G, source, cutoff=None): + """Compute shortest path between source + and all other nodes reachable from source. + + Parameters + ---------- + G : NetworkX graph + + source : node label + Starting node for path + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + Returns + ------- + paths : dictionary + Dictionary, keyed by target, of shortest paths. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> path = nx.single_source_shortest_path(G, 0) + >>> path[4] + [0, 1, 2, 3, 4] + + Notes + ----- + The shortest path is not necessarily unique. So there can be multiple + paths between the source and each target node, all of which have the + same 'shortest' length. For each target node, this function returns + only one of those paths. + + See Also + -------- + shortest_path + """ + if source not in G: + raise nx.NodeNotFound(f"Source {source} not in G") + + def join(p1, p2): + return p1 + p2 + + if cutoff is None: + cutoff = float("inf") + nextlevel = {source: 1} # list of nodes to check at next level + paths = {source: [source]} # paths dictionary (paths to key from source) + return dict(_single_shortest_path(G.adj, nextlevel, paths, cutoff, join)) + + +def _single_shortest_path(adj, firstlevel, paths, cutoff, join): + """Returns shortest paths + + Shortest Path helper function + Parameters + ---------- + adj : dict + Adjacency dict or view + firstlevel : dict + starting nodes, e.g. {source: 1} or {target: 1} + paths : dict + paths for starting nodes, e.g. {source: [source]} + cutoff : int or float + level at which we stop the process + join : function + function to construct a path from two partial paths. Requires two + list inputs `p1` and `p2`, and returns a list. Usually returns + `p1 + p2` (forward from source) or `p2 + p1` (backward from target) + """ + level = 0 # the current level + nextlevel = firstlevel + while nextlevel and cutoff > level: + thislevel = nextlevel + nextlevel = {} + for v in thislevel: + for w in adj[v]: + if w not in paths: + paths[w] = join(paths[v], [w]) + nextlevel[w] = 1 + level += 1 + return paths + + +@nx._dispatch +def single_target_shortest_path(G, target, cutoff=None): + """Compute shortest path to target from all nodes that reach target. + + Parameters + ---------- + G : NetworkX graph + + target : node label + Target node for path + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + Returns + ------- + paths : dictionary + Dictionary, keyed by target, of shortest paths. + + Examples + -------- + >>> G = nx.path_graph(5, create_using=nx.DiGraph()) + >>> path = nx.single_target_shortest_path(G, 4) + >>> path[0] + [0, 1, 2, 3, 4] + + Notes + ----- + The shortest path is not necessarily unique. So there can be multiple + paths between the source and each target node, all of which have the + same 'shortest' length. For each target node, this function returns + only one of those paths. + + See Also + -------- + shortest_path, single_source_shortest_path + """ + if target not in G: + raise nx.NodeNotFound(f"Target {target} not in G") + + def join(p1, p2): + return p2 + p1 + + # handle undirected graphs + adj = G.pred if G.is_directed() else G.adj + if cutoff is None: + cutoff = float("inf") + nextlevel = {target: 1} # list of nodes to check at next level + paths = {target: [target]} # paths dictionary (paths to key from source) + return dict(_single_shortest_path(adj, nextlevel, paths, cutoff, join)) + + +@nx._dispatch +def all_pairs_shortest_path(G, cutoff=None): + """Compute shortest paths between all nodes. + + Parameters + ---------- + G : NetworkX graph + + cutoff : integer, optional + Depth at which to stop the search. Only paths of length at most + `cutoff` are returned. + + Returns + ------- + paths : iterator + Dictionary, keyed by source and target, of shortest paths. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> path = dict(nx.all_pairs_shortest_path(G)) + >>> print(path[0][4]) + [0, 1, 2, 3, 4] + + Notes + ----- + There may be multiple shortest paths with the same length between + two nodes. For each pair, this function returns only one of those paths. + + See Also + -------- + floyd_warshall + all_pairs_all_shortest_paths + + """ + # TODO This can be trivially parallelized. + for n in G: + yield (n, single_source_shortest_path(G, n, cutoff=cutoff)) + + +@nx._dispatch +def predecessor(G, source, target=None, cutoff=None, return_seen=None): + """Returns dict of predecessors for the path from source to all nodes in G. + + Parameters + ---------- + G : NetworkX graph + + source : node label + Starting node for path + + target : node label, optional + Ending node for path. If provided only predecessors between + source and target are returned + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + return_seen : bool, optional (default=None) + Whether to return a dictionary, keyed by node, of the level (number of + hops) to reach the node (as seen during breadth-first-search). + + Returns + ------- + pred : dictionary + Dictionary, keyed by node, of predecessors in the shortest path. + + + (pred, seen): tuple of dictionaries + If `return_seen` argument is set to `True`, then a tuple of dictionaries + is returned. The first element is the dictionary, keyed by node, of + predecessors in the shortest path. The second element is the dictionary, + keyed by node, of the level (number of hops) to reach the node (as seen + during breadth-first-search). + + Examples + -------- + >>> G = nx.path_graph(4) + >>> list(G) + [0, 1, 2, 3] + >>> nx.predecessor(G, 0) + {0: [], 1: [0], 2: [1], 3: [2]} + >>> nx.predecessor(G, 0, return_seen=True) + ({0: [], 1: [0], 2: [1], 3: [2]}, {0: 0, 1: 1, 2: 2, 3: 3}) + + + """ + if source not in G: + raise nx.NodeNotFound(f"Source {source} not in G") + + level = 0 # the current level + nextlevel = [source] # list of nodes to check at next level + seen = {source: level} # level (number of hops) when seen in BFS + pred = {source: []} # predecessor dictionary + while nextlevel: + level = level + 1 + thislevel = nextlevel + nextlevel = [] + for v in thislevel: + for w in G[v]: + if w not in seen: + pred[w] = [v] + seen[w] = level + nextlevel.append(w) + elif seen[w] == level: # add v to predecessor list if it + pred[w].append(v) # is at the correct level + if cutoff and cutoff <= level: + break + + if target is not None: + if return_seen: + if target not in pred: + return ([], -1) # No predecessor + return (pred[target], seen[target]) + else: + if target not in pred: + return [] # No predecessor + return pred[target] + else: + if return_seen: + return (pred, seen) + else: + return pred diff --git a/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/weighted.py b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/weighted.py new file mode 100644 index 0000000000000000000000000000000000000000..bbbb03d9eee83543133af4c7a31df143a46429f3 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/shortest_paths/weighted.py @@ -0,0 +1,2514 @@ +""" +Shortest path algorithms for weighted graphs. +""" + +from collections import deque +from heapq import heappop, heappush +from itertools import count + +import networkx as nx +from networkx.algorithms.shortest_paths.generic import _build_paths_from_predecessors + +__all__ = [ + "dijkstra_path", + "dijkstra_path_length", + "bidirectional_dijkstra", + "single_source_dijkstra", + "single_source_dijkstra_path", + "single_source_dijkstra_path_length", + "multi_source_dijkstra", + "multi_source_dijkstra_path", + "multi_source_dijkstra_path_length", + "all_pairs_dijkstra", + "all_pairs_dijkstra_path", + "all_pairs_dijkstra_path_length", + "dijkstra_predecessor_and_distance", + "bellman_ford_path", + "bellman_ford_path_length", + "single_source_bellman_ford", + "single_source_bellman_ford_path", + "single_source_bellman_ford_path_length", + "all_pairs_bellman_ford_path", + "all_pairs_bellman_ford_path_length", + "bellman_ford_predecessor_and_distance", + "negative_edge_cycle", + "find_negative_cycle", + "goldberg_radzik", + "johnson", +] + + +def _weight_function(G, weight): + """Returns a function that returns the weight of an edge. + + The returned function is specifically suitable for input to + functions :func:`_dijkstra` and :func:`_bellman_ford_relaxation`. + + Parameters + ---------- + G : NetworkX graph. + + weight : string or function + If it is callable, `weight` itself is returned. If it is a string, + it is assumed to be the name of the edge attribute that represents + the weight of an edge. In that case, a function is returned that + gets the edge weight according to the specified edge attribute. + + Returns + ------- + function + This function returns a callable that accepts exactly three inputs: + a node, an node adjacent to the first one, and the edge attribute + dictionary for the eedge joining those nodes. That function returns + a number representing the weight of an edge. + + If `G` is a multigraph, and `weight` is not callable, the + minimum edge weight over all parallel edges is returned. If any edge + does not have an attribute with key `weight`, it is assumed to + have weight one. + + """ + if callable(weight): + return weight + # If the weight keyword argument is not callable, we assume it is a + # string representing the edge attribute containing the weight of + # the edge. + if G.is_multigraph(): + return lambda u, v, d: min(attr.get(weight, 1) for attr in d.values()) + return lambda u, v, data: data.get(weight, 1) + + +@nx._dispatch(edge_attrs="weight") +def dijkstra_path(G, source, target, weight="weight"): + """Returns the shortest weighted path from source to target in G. + + Uses Dijkstra's Method to compute the shortest weighted path + between two nodes in a graph. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node + + target : node + Ending node + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + path : list + List of nodes in a shortest path. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXNoPath + If no path exists between source and target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> print(nx.dijkstra_path(G, 0, 4)) + [0, 1, 2, 3, 4] + + Find edges of shortest path in Multigraph + + >>> G = nx.MultiDiGraph() + >>> G.add_weighted_edges_from([(1, 2, 0.75), (1, 2, 0.5), (2, 3, 0.5), (1, 3, 1.5)]) + >>> nodes = nx.dijkstra_path(G, 1, 3) + >>> edges = nx.utils.pairwise(nodes) + >>> list((u, v, min(G[u][v], key=lambda k: G[u][v][k].get('weight', 1))) for u, v in edges) + [(1, 2, 1), (2, 3, 0)] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + The weight function can be used to include node weights. + + >>> def func(u, v, d): + ... node_u_wt = G.nodes[u].get("node_weight", 1) + ... node_v_wt = G.nodes[v].get("node_weight", 1) + ... edge_wt = d.get("weight", 1) + ... return node_u_wt / 2 + node_v_wt / 2 + edge_wt + + In this example we take the average of start and end node + weights of an edge and add it to the weight of the edge. + + The function :func:`single_source_dijkstra` computes both + path and length-of-path if you need both, use that. + + See Also + -------- + bidirectional_dijkstra + bellman_ford_path + single_source_dijkstra + """ + (length, path) = single_source_dijkstra(G, source, target=target, weight=weight) + return path + + +@nx._dispatch(edge_attrs="weight") +def dijkstra_path_length(G, source, target, weight="weight"): + """Returns the shortest weighted path length in G from source to target. + + Uses Dijkstra's Method to compute the shortest weighted path length + between two nodes in a graph. + + Parameters + ---------- + G : NetworkX graph + + source : node label + starting node for path + + target : node label + ending node for path + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + length : number + Shortest path length. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXNoPath + If no path exists between source and target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.dijkstra_path_length(G, 0, 4) + 4 + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + The function :func:`single_source_dijkstra` computes both + path and length-of-path if you need both, use that. + + See Also + -------- + bidirectional_dijkstra + bellman_ford_path_length + single_source_dijkstra + + """ + if source not in G: + raise nx.NodeNotFound(f"Node {source} not found in graph") + if source == target: + return 0 + weight = _weight_function(G, weight) + length = _dijkstra(G, source, weight, target=target) + try: + return length[target] + except KeyError as err: + raise nx.NetworkXNoPath(f"Node {target} not reachable from {source}") from err + + +@nx._dispatch(edge_attrs="weight") +def single_source_dijkstra_path(G, source, cutoff=None, weight="weight"): + """Find shortest weighted paths in G from a source node. + + Compute shortest path between source and all other reachable + nodes for a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path. + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + paths : dictionary + Dictionary of shortest path lengths keyed by target. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> path = nx.single_source_dijkstra_path(G, 0) + >>> path[4] + [0, 1, 2, 3, 4] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + See Also + -------- + single_source_dijkstra, single_source_bellman_ford + + """ + return multi_source_dijkstra_path(G, {source}, cutoff=cutoff, weight=weight) + + +@nx._dispatch(edge_attrs="weight") +def single_source_dijkstra_path_length(G, source, cutoff=None, weight="weight"): + """Find shortest weighted path lengths in G from a source node. + + Compute the shortest path length between source and all other + reachable nodes for a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + source : node label + Starting node for path + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + length : dict + Dict keyed by node to shortest path length from source. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length = nx.single_source_dijkstra_path_length(G, 0) + >>> length[4] + 4 + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"{node}: {length[node]}") + 0: 0 + 1: 1 + 2: 2 + 3: 3 + 4: 4 + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + See Also + -------- + single_source_dijkstra, single_source_bellman_ford_path_length + + """ + return multi_source_dijkstra_path_length(G, {source}, cutoff=cutoff, weight=weight) + + +@nx._dispatch(edge_attrs="weight") +def single_source_dijkstra(G, source, target=None, cutoff=None, weight="weight"): + """Find shortest weighted paths and lengths from a source node. + + Compute the shortest path length between source and all other + reachable nodes for a weighted graph. + + Uses Dijkstra's algorithm to compute shortest paths and lengths + between a source and all other reachable nodes in a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + source : node label + Starting node for path + + target : node label, optional + Ending node for path + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + distance, path : pair of dictionaries, or numeric and list. + If target is None, paths and lengths to all nodes are computed. + The return value is a tuple of two dictionaries keyed by target nodes. + The first dictionary stores distance to each target node. + The second stores the path to each target node. + If target is not None, returns a tuple (distance, path), where + distance is the distance from source to target and path is a list + representing the path from source to target. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length, path = nx.single_source_dijkstra(G, 0) + >>> length[4] + 4 + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"{node}: {length[node]}") + 0: 0 + 1: 1 + 2: 2 + 3: 3 + 4: 4 + >>> path[4] + [0, 1, 2, 3, 4] + >>> length, path = nx.single_source_dijkstra(G, 0, 1) + >>> length + 1 + >>> path + [0, 1] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + Based on the Python cookbook recipe (119466) at + https://code.activestate.com/recipes/119466/ + + This algorithm is not guaranteed to work if edge weights + are negative or are floating point numbers + (overflows and roundoff errors can cause problems). + + See Also + -------- + single_source_dijkstra_path + single_source_dijkstra_path_length + single_source_bellman_ford + """ + return multi_source_dijkstra( + G, {source}, cutoff=cutoff, target=target, weight=weight + ) + + +@nx._dispatch(edge_attrs="weight") +def multi_source_dijkstra_path(G, sources, cutoff=None, weight="weight"): + """Find shortest weighted paths in G from a given set of source + nodes. + + Compute shortest path between any of the source nodes and all other + reachable nodes for a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + sources : non-empty set of nodes + Starting nodes for paths. If this is just a set containing a + single node, then all paths computed by this function will start + from that node. If there are two or more nodes in the set, the + computed paths may begin from any one of the start nodes. + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + paths : dictionary + Dictionary of shortest paths keyed by target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> path = nx.multi_source_dijkstra_path(G, {0, 4}) + >>> path[1] + [0, 1] + >>> path[3] + [4, 3] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + Raises + ------ + ValueError + If `sources` is empty. + NodeNotFound + If any of `sources` is not in `G`. + + See Also + -------- + multi_source_dijkstra, multi_source_bellman_ford + + """ + length, path = multi_source_dijkstra(G, sources, cutoff=cutoff, weight=weight) + return path + + +@nx._dispatch(edge_attrs="weight") +def multi_source_dijkstra_path_length(G, sources, cutoff=None, weight="weight"): + """Find shortest weighted path lengths in G from a given set of + source nodes. + + Compute the shortest path length between any of the source nodes and + all other reachable nodes for a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + sources : non-empty set of nodes + Starting nodes for paths. If this is just a set containing a + single node, then all paths computed by this function will start + from that node. If there are two or more nodes in the set, the + computed paths may begin from any one of the start nodes. + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + length : dict + Dict keyed by node to shortest path length to nearest source. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length = nx.multi_source_dijkstra_path_length(G, {0, 4}) + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"{node}: {length[node]}") + 0: 0 + 1: 1 + 2: 2 + 3: 1 + 4: 0 + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + Raises + ------ + ValueError + If `sources` is empty. + NodeNotFound + If any of `sources` is not in `G`. + + See Also + -------- + multi_source_dijkstra + + """ + if not sources: + raise ValueError("sources must not be empty") + for s in sources: + if s not in G: + raise nx.NodeNotFound(f"Node {s} not found in graph") + weight = _weight_function(G, weight) + return _dijkstra_multisource(G, sources, weight, cutoff=cutoff) + + +@nx._dispatch(edge_attrs="weight") +def multi_source_dijkstra(G, sources, target=None, cutoff=None, weight="weight"): + """Find shortest weighted paths and lengths from a given set of + source nodes. + + Uses Dijkstra's algorithm to compute the shortest paths and lengths + between one of the source nodes and the given `target`, or all other + reachable nodes if not specified, for a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + sources : non-empty set of nodes + Starting nodes for paths. If this is just a set containing a + single node, then all paths computed by this function will start + from that node. If there are two or more nodes in the set, the + computed paths may begin from any one of the start nodes. + + target : node label, optional + Ending node for path + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + distance, path : pair of dictionaries, or numeric and list + If target is None, returns a tuple of two dictionaries keyed by node. + The first dictionary stores distance from one of the source nodes. + The second stores the path from one of the sources to that node. + If target is not None, returns a tuple of (distance, path) where + distance is the distance from source to target and path is a list + representing the path from source to target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length, path = nx.multi_source_dijkstra(G, {0, 4}) + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"{node}: {length[node]}") + 0: 0 + 1: 1 + 2: 2 + 3: 1 + 4: 0 + >>> path[1] + [0, 1] + >>> path[3] + [4, 3] + + >>> length, path = nx.multi_source_dijkstra(G, {0, 4}, 1) + >>> length + 1 + >>> path + [0, 1] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + Based on the Python cookbook recipe (119466) at + https://code.activestate.com/recipes/119466/ + + This algorithm is not guaranteed to work if edge weights + are negative or are floating point numbers + (overflows and roundoff errors can cause problems). + + Raises + ------ + ValueError + If `sources` is empty. + NodeNotFound + If any of `sources` is not in `G`. + + See Also + -------- + multi_source_dijkstra_path + multi_source_dijkstra_path_length + + """ + if not sources: + raise ValueError("sources must not be empty") + for s in sources: + if s not in G: + raise nx.NodeNotFound(f"Node {s} not found in graph") + if target in sources: + return (0, [target]) + weight = _weight_function(G, weight) + paths = {source: [source] for source in sources} # dictionary of paths + dist = _dijkstra_multisource( + G, sources, weight, paths=paths, cutoff=cutoff, target=target + ) + if target is None: + return (dist, paths) + try: + return (dist[target], paths[target]) + except KeyError as err: + raise nx.NetworkXNoPath(f"No path to {target}.") from err + + +def _dijkstra(G, source, weight, pred=None, paths=None, cutoff=None, target=None): + """Uses Dijkstra's algorithm to find shortest weighted paths from a + single source. + + This is a convenience function for :func:`_dijkstra_multisource` + with all the arguments the same, except the keyword argument + `sources` set to ``[source]``. + + """ + return _dijkstra_multisource( + G, [source], weight, pred=pred, paths=paths, cutoff=cutoff, target=target + ) + + +def _dijkstra_multisource( + G, sources, weight, pred=None, paths=None, cutoff=None, target=None +): + """Uses Dijkstra's algorithm to find shortest weighted paths + + Parameters + ---------- + G : NetworkX graph + + sources : non-empty iterable of nodes + Starting nodes for paths. If this is just an iterable containing + a single node, then all paths computed by this function will + start from that node. If there are two or more nodes in this + iterable, the computed paths may begin from any one of the start + nodes. + + weight: function + Function with (u, v, data) input that returns that edge's weight + or None to indicate a hidden edge + + pred: dict of lists, optional(default=None) + dict to store a list of predecessors keyed by that node + If None, predecessors are not stored. + + paths: dict, optional (default=None) + dict to store the path list from source to each node, keyed by node. + If None, paths are not stored. + + target : node label, optional + Ending node for path. Search is halted when target is found. + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + Returns + ------- + distance : dictionary + A mapping from node to shortest distance to that node from one + of the source nodes. + + Raises + ------ + NodeNotFound + If any of `sources` is not in `G`. + + Notes + ----- + The optional predecessor and path dictionaries can be accessed by + the caller through the original pred and paths objects passed + as arguments. No need to explicitly return pred or paths. + + """ + G_succ = G._adj # For speed-up (and works for both directed and undirected graphs) + + push = heappush + pop = heappop + dist = {} # dictionary of final distances + seen = {} + # fringe is heapq with 3-tuples (distance,c,node) + # use the count c to avoid comparing nodes (may not be able to) + c = count() + fringe = [] + for source in sources: + seen[source] = 0 + push(fringe, (0, next(c), source)) + while fringe: + (d, _, v) = pop(fringe) + if v in dist: + continue # already searched this node. + dist[v] = d + if v == target: + break + for u, e in G_succ[v].items(): + cost = weight(v, u, e) + if cost is None: + continue + vu_dist = dist[v] + cost + if cutoff is not None: + if vu_dist > cutoff: + continue + if u in dist: + u_dist = dist[u] + if vu_dist < u_dist: + raise ValueError("Contradictory paths found:", "negative weights?") + elif pred is not None and vu_dist == u_dist: + pred[u].append(v) + elif u not in seen or vu_dist < seen[u]: + seen[u] = vu_dist + push(fringe, (vu_dist, next(c), u)) + if paths is not None: + paths[u] = paths[v] + [u] + if pred is not None: + pred[u] = [v] + elif vu_dist == seen[u]: + if pred is not None: + pred[u].append(v) + + # The optional predecessor and path dictionaries can be accessed + # by the caller via the pred and paths objects passed as arguments. + return dist + + +@nx._dispatch(edge_attrs="weight") +def dijkstra_predecessor_and_distance(G, source, cutoff=None, weight="weight"): + """Compute weighted shortest path length and predecessors. + + Uses Dijkstra's Method to obtain the shortest weighted paths + and return dictionaries of predecessors for each node and + distance for each node from the `source`. + + Parameters + ---------- + G : NetworkX graph + + source : node label + Starting node for path + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + pred, distance : dictionaries + Returns two dictionaries representing a list of predecessors + of a node and the distance to each node. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The list of predecessors contains more than one element only when + there are more than one shortest paths to the key node. + + Examples + -------- + >>> G = nx.path_graph(5, create_using=nx.DiGraph()) + >>> pred, dist = nx.dijkstra_predecessor_and_distance(G, 0) + >>> sorted(pred.items()) + [(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])] + >>> sorted(dist.items()) + [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)] + + >>> pred, dist = nx.dijkstra_predecessor_and_distance(G, 0, 1) + >>> sorted(pred.items()) + [(0, []), (1, [0])] + >>> sorted(dist.items()) + [(0, 0), (1, 1)] + """ + if source not in G: + raise nx.NodeNotFound(f"Node {source} is not found in the graph") + weight = _weight_function(G, weight) + pred = {source: []} # dictionary of predecessors + return (pred, _dijkstra(G, source, weight, pred=pred, cutoff=cutoff)) + + +@nx._dispatch(edge_attrs="weight") +def all_pairs_dijkstra(G, cutoff=None, weight="weight"): + """Find shortest weighted paths and lengths between all nodes. + + Parameters + ---------- + G : NetworkX graph + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edge[u][v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Yields + ------ + (node, (distance, path)) : (node obj, (dict, dict)) + Each source node has two associated dicts. The first holds distance + keyed by target and the second holds paths keyed by target. + (See single_source_dijkstra for the source/target node terminology.) + If desired you can apply `dict()` to this function to create a dict + keyed by source node to the two dicts. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> len_path = dict(nx.all_pairs_dijkstra(G)) + >>> len_path[3][0][1] + 2 + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"3 - {node}: {len_path[3][0][node]}") + 3 - 0: 3 + 3 - 1: 2 + 3 - 2: 1 + 3 - 3: 0 + 3 - 4: 1 + >>> len_path[3][1][1] + [3, 2, 1] + >>> for n, (dist, path) in nx.all_pairs_dijkstra(G): + ... print(path[1]) + [0, 1] + [1] + [2, 1] + [3, 2, 1] + [4, 3, 2, 1] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The yielded dicts only have keys for reachable nodes. + """ + for n in G: + dist, path = single_source_dijkstra(G, n, cutoff=cutoff, weight=weight) + yield (n, (dist, path)) + + +@nx._dispatch(edge_attrs="weight") +def all_pairs_dijkstra_path_length(G, cutoff=None, weight="weight"): + """Compute shortest path lengths between all nodes in a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + distance : iterator + (source, dictionary) iterator with dictionary keyed by target and + shortest path length as the key value. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length = dict(nx.all_pairs_dijkstra_path_length(G)) + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"1 - {node}: {length[1][node]}") + 1 - 0: 1 + 1 - 1: 0 + 1 - 2: 1 + 1 - 3: 2 + 1 - 4: 3 + >>> length[3][2] + 1 + >>> length[2][2] + 0 + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The dictionary returned only has keys for reachable node pairs. + """ + length = single_source_dijkstra_path_length + for n in G: + yield (n, length(G, n, cutoff=cutoff, weight=weight)) + + +@nx._dispatch(edge_attrs="weight") +def all_pairs_dijkstra_path(G, cutoff=None, weight="weight"): + """Compute shortest paths between all nodes in a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + cutoff : integer or float, optional + Length (sum of edge weights) at which the search is stopped. + If cutoff is provided, only return paths with summed weight <= cutoff. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + paths : iterator + (source, dictionary) iterator with dictionary keyed by target and + shortest path as the key value. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> path = dict(nx.all_pairs_dijkstra_path(G)) + >>> path[0][4] + [0, 1, 2, 3, 4] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + See Also + -------- + floyd_warshall, all_pairs_bellman_ford_path + + """ + path = single_source_dijkstra_path + # TODO This can be trivially parallelized. + for n in G: + yield (n, path(G, n, cutoff=cutoff, weight=weight)) + + +@nx._dispatch(edge_attrs="weight") +def bellman_ford_predecessor_and_distance( + G, source, target=None, weight="weight", heuristic=False +): + """Compute shortest path lengths and predecessors on shortest paths + in weighted graphs. + + The algorithm has a running time of $O(mn)$ where $n$ is the number of + nodes and $m$ is the number of edges. It is slower than Dijkstra but + can handle negative edge weights. + + If a negative cycle is detected, you can use :func:`find_negative_cycle` + to return the cycle and examine it. Shortest paths are not defined when + a negative cycle exists because once reached, the path can cycle forever + to build up arbitrarily low weights. + + Parameters + ---------- + G : NetworkX graph + The algorithm works for all types of graphs, including directed + graphs and multigraphs. + + source: node label + Starting node for path + + target : node label, optional + Ending node for path + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + heuristic : bool + Determines whether to use a heuristic to early detect negative + cycles at a hopefully negligible cost. + + Returns + ------- + pred, dist : dictionaries + Returns two dictionaries keyed by node to predecessor in the + path and to the distance from the source respectively. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXUnbounded + If the (di)graph contains a negative (di)cycle, the + algorithm raises an exception to indicate the presence of the + negative (di)cycle. Note: any negative weight edge in an + undirected graph is a negative cycle. + + Examples + -------- + >>> G = nx.path_graph(5, create_using=nx.DiGraph()) + >>> pred, dist = nx.bellman_ford_predecessor_and_distance(G, 0) + >>> sorted(pred.items()) + [(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])] + >>> sorted(dist.items()) + [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)] + + >>> pred, dist = nx.bellman_ford_predecessor_and_distance(G, 0, 1) + >>> sorted(pred.items()) + [(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])] + >>> sorted(dist.items()) + [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)] + + >>> G = nx.cycle_graph(5, create_using=nx.DiGraph()) + >>> G[1][2]["weight"] = -7 + >>> nx.bellman_ford_predecessor_and_distance(G, 0) + Traceback (most recent call last): + ... + networkx.exception.NetworkXUnbounded: Negative cycle detected. + + See Also + -------- + find_negative_cycle + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The dictionaries returned only have keys for nodes reachable from + the source. + + In the case where the (di)graph is not connected, if a component + not containing the source contains a negative (di)cycle, it + will not be detected. + + In NetworkX v2.1 and prior, the source node had predecessor `[None]`. + In NetworkX v2.2 this changed to the source node having predecessor `[]` + """ + if source not in G: + raise nx.NodeNotFound(f"Node {source} is not found in the graph") + weight = _weight_function(G, weight) + if G.is_multigraph(): + if any( + weight(u, v, {k: d}) < 0 + for u, v, k, d in nx.selfloop_edges(G, keys=True, data=True) + ): + raise nx.NetworkXUnbounded("Negative cycle detected.") + else: + if any(weight(u, v, d) < 0 for u, v, d in nx.selfloop_edges(G, data=True)): + raise nx.NetworkXUnbounded("Negative cycle detected.") + + dist = {source: 0} + pred = {source: []} + + if len(G) == 1: + return pred, dist + + weight = _weight_function(G, weight) + + dist = _bellman_ford( + G, [source], weight, pred=pred, dist=dist, target=target, heuristic=heuristic + ) + return (pred, dist) + + +def _bellman_ford( + G, + source, + weight, + pred=None, + paths=None, + dist=None, + target=None, + heuristic=True, +): + """Calls relaxation loop for Bellman–Ford algorithm and builds paths + + This is an implementation of the SPFA variant. + See https://en.wikipedia.org/wiki/Shortest_Path_Faster_Algorithm + + Parameters + ---------- + G : NetworkX graph + + source: list + List of source nodes. The shortest path from any of the source + nodes will be found if multiple sources are provided. + + weight : function + The weight of an edge is the value returned by the function. The + function must accept exactly three positional arguments: the two + endpoints of an edge and the dictionary of edge attributes for + that edge. The function must return a number. + + pred: dict of lists, optional (default=None) + dict to store a list of predecessors keyed by that node + If None, predecessors are not stored + + paths: dict, optional (default=None) + dict to store the path list from source to each node, keyed by node + If None, paths are not stored + + dist: dict, optional (default=None) + dict to store distance from source to the keyed node + If None, returned dist dict contents default to 0 for every node in the + source list + + target: node label, optional + Ending node for path. Path lengths to other destinations may (and + probably will) be incorrect. + + heuristic : bool + Determines whether to use a heuristic to early detect negative + cycles at a hopefully negligible cost. + + Returns + ------- + dist : dict + Returns a dict keyed by node to the distance from the source. + Dicts for paths and pred are in the mutated input dicts by those names. + + Raises + ------ + NodeNotFound + If any of `source` is not in `G`. + + NetworkXUnbounded + If the (di)graph contains a negative (di)cycle, the + algorithm raises an exception to indicate the presence of the + negative (di)cycle. Note: any negative weight edge in an + undirected graph is a negative cycle + """ + if pred is None: + pred = {v: [] for v in source} + + if dist is None: + dist = {v: 0 for v in source} + + negative_cycle_found = _inner_bellman_ford( + G, + source, + weight, + pred, + dist, + heuristic, + ) + if negative_cycle_found is not None: + raise nx.NetworkXUnbounded("Negative cycle detected.") + + if paths is not None: + sources = set(source) + dsts = [target] if target is not None else pred + for dst in dsts: + gen = _build_paths_from_predecessors(sources, dst, pred) + paths[dst] = next(gen) + + return dist + + +def _inner_bellman_ford( + G, + sources, + weight, + pred, + dist=None, + heuristic=True, +): + """Inner Relaxation loop for Bellman–Ford algorithm. + + This is an implementation of the SPFA variant. + See https://en.wikipedia.org/wiki/Shortest_Path_Faster_Algorithm + + Parameters + ---------- + G : NetworkX graph + + source: list + List of source nodes. The shortest path from any of the source + nodes will be found if multiple sources are provided. + + weight : function + The weight of an edge is the value returned by the function. The + function must accept exactly three positional arguments: the two + endpoints of an edge and the dictionary of edge attributes for + that edge. The function must return a number. + + pred: dict of lists + dict to store a list of predecessors keyed by that node + + dist: dict, optional (default=None) + dict to store distance from source to the keyed node + If None, returned dist dict contents default to 0 for every node in the + source list + + heuristic : bool + Determines whether to use a heuristic to early detect negative + cycles at a hopefully negligible cost. + + Returns + ------- + node or None + Return a node `v` where processing discovered a negative cycle. + If no negative cycle found, return None. + + Raises + ------ + NodeNotFound + If any of `source` is not in `G`. + """ + for s in sources: + if s not in G: + raise nx.NodeNotFound(f"Source {s} not in G") + + if pred is None: + pred = {v: [] for v in sources} + + if dist is None: + dist = {v: 0 for v in sources} + + # Heuristic Storage setup. Note: use None because nodes cannot be None + nonexistent_edge = (None, None) + pred_edge = {v: None for v in sources} + recent_update = {v: nonexistent_edge for v in sources} + + G_succ = G._adj # For speed-up (and works for both directed and undirected graphs) + inf = float("inf") + n = len(G) + + count = {} + q = deque(sources) + in_q = set(sources) + while q: + u = q.popleft() + in_q.remove(u) + + # Skip relaxations if any of the predecessors of u is in the queue. + if all(pred_u not in in_q for pred_u in pred[u]): + dist_u = dist[u] + for v, e in G_succ[u].items(): + dist_v = dist_u + weight(u, v, e) + + if dist_v < dist.get(v, inf): + # In this conditional branch we are updating the path with v. + # If it happens that some earlier update also added node v + # that implies the existence of a negative cycle since + # after the update node v would lie on the update path twice. + # The update path is stored up to one of the source nodes, + # therefore u is always in the dict recent_update + if heuristic: + if v in recent_update[u]: + # Negative cycle found! + pred[v].append(u) + return v + + # Transfer the recent update info from u to v if the + # same source node is the head of the update path. + # If the source node is responsible for the cost update, + # then clear the history and use it instead. + if v in pred_edge and pred_edge[v] == u: + recent_update[v] = recent_update[u] + else: + recent_update[v] = (u, v) + + if v not in in_q: + q.append(v) + in_q.add(v) + count_v = count.get(v, 0) + 1 + if count_v == n: + # Negative cycle found! + return v + + count[v] = count_v + dist[v] = dist_v + pred[v] = [u] + pred_edge[v] = u + + elif dist.get(v) is not None and dist_v == dist.get(v): + pred[v].append(u) + + # successfully found shortest_path. No negative cycles found. + return None + + +@nx._dispatch(edge_attrs="weight") +def bellman_ford_path(G, source, target, weight="weight"): + """Returns the shortest path from source to target in a weighted graph G. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node + + target : node + Ending node + + weight : string or function (default="weight") + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + path : list + List of nodes in a shortest path. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXNoPath + If no path exists between source and target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.bellman_ford_path(G, 0, 4) + [0, 1, 2, 3, 4] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + See Also + -------- + dijkstra_path, bellman_ford_path_length + """ + length, path = single_source_bellman_ford(G, source, target=target, weight=weight) + return path + + +@nx._dispatch(edge_attrs="weight") +def bellman_ford_path_length(G, source, target, weight="weight"): + """Returns the shortest path length from source to target + in a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + source : node label + starting node for path + + target : node label + ending node for path + + weight : string or function (default="weight") + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + length : number + Shortest path length. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXNoPath + If no path exists between source and target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.bellman_ford_path_length(G, 0, 4) + 4 + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + See Also + -------- + dijkstra_path_length, bellman_ford_path + """ + if source == target: + if source not in G: + raise nx.NodeNotFound(f"Node {source} not found in graph") + return 0 + + weight = _weight_function(G, weight) + + length = _bellman_ford(G, [source], weight, target=target) + + try: + return length[target] + except KeyError as err: + raise nx.NetworkXNoPath(f"node {target} not reachable from {source}") from err + + +@nx._dispatch(edge_attrs="weight") +def single_source_bellman_ford_path(G, source, weight="weight"): + """Compute shortest path between source and all other reachable + nodes for a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path. + + weight : string or function (default="weight") + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + paths : dictionary + Dictionary of shortest path lengths keyed by target. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> path = nx.single_source_bellman_ford_path(G, 0) + >>> path[4] + [0, 1, 2, 3, 4] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + See Also + -------- + single_source_dijkstra, single_source_bellman_ford + + """ + (length, path) = single_source_bellman_ford(G, source, weight=weight) + return path + + +@nx._dispatch(edge_attrs="weight") +def single_source_bellman_ford_path_length(G, source, weight="weight"): + """Compute the shortest path length between source and all other + reachable nodes for a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + source : node label + Starting node for path + + weight : string or function (default="weight") + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + length : dictionary + Dictionary of shortest path length keyed by target + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length = nx.single_source_bellman_ford_path_length(G, 0) + >>> length[4] + 4 + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"{node}: {length[node]}") + 0: 0 + 1: 1 + 2: 2 + 3: 3 + 4: 4 + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + See Also + -------- + single_source_dijkstra, single_source_bellman_ford + + """ + weight = _weight_function(G, weight) + return _bellman_ford(G, [source], weight) + + +@nx._dispatch(edge_attrs="weight") +def single_source_bellman_ford(G, source, target=None, weight="weight"): + """Compute shortest paths and lengths in a weighted graph G. + + Uses Bellman-Ford algorithm for shortest paths. + + Parameters + ---------- + G : NetworkX graph + + source : node label + Starting node for path + + target : node label, optional + Ending node for path + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + distance, path : pair of dictionaries, or numeric and list + If target is None, returns a tuple of two dictionaries keyed by node. + The first dictionary stores distance from one of the source nodes. + The second stores the path from one of the sources to that node. + If target is not None, returns a tuple of (distance, path) where + distance is the distance from source to target and path is a list + representing the path from source to target. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length, path = nx.single_source_bellman_ford(G, 0) + >>> length[4] + 4 + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"{node}: {length[node]}") + 0: 0 + 1: 1 + 2: 2 + 3: 3 + 4: 4 + >>> path[4] + [0, 1, 2, 3, 4] + >>> length, path = nx.single_source_bellman_ford(G, 0, 1) + >>> length + 1 + >>> path + [0, 1] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + See Also + -------- + single_source_dijkstra + single_source_bellman_ford_path + single_source_bellman_ford_path_length + """ + if source == target: + if source not in G: + raise nx.NodeNotFound(f"Node {source} is not found in the graph") + return (0, [source]) + + weight = _weight_function(G, weight) + + paths = {source: [source]} # dictionary of paths + dist = _bellman_ford(G, [source], weight, paths=paths, target=target) + if target is None: + return (dist, paths) + try: + return (dist[target], paths[target]) + except KeyError as err: + msg = f"Node {target} not reachable from {source}" + raise nx.NetworkXNoPath(msg) from err + + +@nx._dispatch(edge_attrs="weight") +def all_pairs_bellman_ford_path_length(G, weight="weight"): + """Compute shortest path lengths between all nodes in a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + weight : string or function (default="weight") + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + distance : iterator + (source, dictionary) iterator with dictionary keyed by target and + shortest path length as the key value. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length = dict(nx.all_pairs_bellman_ford_path_length(G)) + >>> for node in [0, 1, 2, 3, 4]: + ... print(f"1 - {node}: {length[1][node]}") + 1 - 0: 1 + 1 - 1: 0 + 1 - 2: 1 + 1 - 3: 2 + 1 - 4: 3 + >>> length[3][2] + 1 + >>> length[2][2] + 0 + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The dictionary returned only has keys for reachable node pairs. + """ + length = single_source_bellman_ford_path_length + for n in G: + yield (n, dict(length(G, n, weight=weight))) + + +@nx._dispatch(edge_attrs="weight") +def all_pairs_bellman_ford_path(G, weight="weight"): + """Compute shortest paths between all nodes in a weighted graph. + + Parameters + ---------- + G : NetworkX graph + + weight : string or function (default="weight") + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + paths : iterator + (source, dictionary) iterator with dictionary keyed by target and + shortest path as the key value. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> path = dict(nx.all_pairs_bellman_ford_path(G)) + >>> path[0][4] + [0, 1, 2, 3, 4] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + See Also + -------- + floyd_warshall, all_pairs_dijkstra_path + + """ + path = single_source_bellman_ford_path + # TODO This can be trivially parallelized. + for n in G: + yield (n, path(G, n, weight=weight)) + + +@nx._dispatch(edge_attrs="weight") +def goldberg_radzik(G, source, weight="weight"): + """Compute shortest path lengths and predecessors on shortest paths + in weighted graphs. + + The algorithm has a running time of $O(mn)$ where $n$ is the number of + nodes and $m$ is the number of edges. It is slower than Dijkstra but + can handle negative edge weights. + + Parameters + ---------- + G : NetworkX graph + The algorithm works for all types of graphs, including directed + graphs and multigraphs. + + source: node label + Starting node for path + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + pred, dist : dictionaries + Returns two dictionaries keyed by node to predecessor in the + path and to the distance from the source respectively. + + Raises + ------ + NodeNotFound + If `source` is not in `G`. + + NetworkXUnbounded + If the (di)graph contains a negative (di)cycle, the + algorithm raises an exception to indicate the presence of the + negative (di)cycle. Note: any negative weight edge in an + undirected graph is a negative cycle. + + As of NetworkX v3.2, a zero weight cycle is no longer + incorrectly reported as a negative weight cycle. + + + Examples + -------- + >>> G = nx.path_graph(5, create_using=nx.DiGraph()) + >>> pred, dist = nx.goldberg_radzik(G, 0) + >>> sorted(pred.items()) + [(0, None), (1, 0), (2, 1), (3, 2), (4, 3)] + >>> sorted(dist.items()) + [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)] + + >>> G = nx.cycle_graph(5, create_using=nx.DiGraph()) + >>> G[1][2]["weight"] = -7 + >>> nx.goldberg_radzik(G, 0) + Traceback (most recent call last): + ... + networkx.exception.NetworkXUnbounded: Negative cycle detected. + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The dictionaries returned only have keys for nodes reachable from + the source. + + In the case where the (di)graph is not connected, if a component + not containing the source contains a negative (di)cycle, it + will not be detected. + + """ + if source not in G: + raise nx.NodeNotFound(f"Node {source} is not found in the graph") + weight = _weight_function(G, weight) + if G.is_multigraph(): + if any( + weight(u, v, {k: d}) < 0 + for u, v, k, d in nx.selfloop_edges(G, keys=True, data=True) + ): + raise nx.NetworkXUnbounded("Negative cycle detected.") + else: + if any(weight(u, v, d) < 0 for u, v, d in nx.selfloop_edges(G, data=True)): + raise nx.NetworkXUnbounded("Negative cycle detected.") + + if len(G) == 1: + return {source: None}, {source: 0} + + G_succ = G._adj # For speed-up (and works for both directed and undirected graphs) + + inf = float("inf") + d = {u: inf for u in G} + d[source] = 0 + pred = {source: None} + + def topo_sort(relabeled): + """Topologically sort nodes relabeled in the previous round and detect + negative cycles. + """ + # List of nodes to scan in this round. Denoted by A in Goldberg and + # Radzik's paper. + to_scan = [] + # In the DFS in the loop below, neg_count records for each node the + # number of edges of negative reduced costs on the path from a DFS root + # to the node in the DFS forest. The reduced cost of an edge (u, v) is + # defined as d[u] + weight[u][v] - d[v]. + # + # neg_count also doubles as the DFS visit marker array. + neg_count = {} + for u in relabeled: + # Skip visited nodes. + if u in neg_count: + continue + d_u = d[u] + # Skip nodes without out-edges of negative reduced costs. + if all(d_u + weight(u, v, e) >= d[v] for v, e in G_succ[u].items()): + continue + # Nonrecursive DFS that inserts nodes reachable from u via edges of + # nonpositive reduced costs into to_scan in (reverse) topological + # order. + stack = [(u, iter(G_succ[u].items()))] + in_stack = {u} + neg_count[u] = 0 + while stack: + u, it = stack[-1] + try: + v, e = next(it) + except StopIteration: + to_scan.append(u) + stack.pop() + in_stack.remove(u) + continue + t = d[u] + weight(u, v, e) + d_v = d[v] + if t < d_v: + is_neg = t < d_v + d[v] = t + pred[v] = u + if v not in neg_count: + neg_count[v] = neg_count[u] + int(is_neg) + stack.append((v, iter(G_succ[v].items()))) + in_stack.add(v) + elif v in in_stack and neg_count[u] + int(is_neg) > neg_count[v]: + # (u, v) is a back edge, and the cycle formed by the + # path v to u and (u, v) contains at least one edge of + # negative reduced cost. The cycle must be of negative + # cost. + raise nx.NetworkXUnbounded("Negative cycle detected.") + to_scan.reverse() + return to_scan + + def relax(to_scan): + """Relax out-edges of relabeled nodes.""" + relabeled = set() + # Scan nodes in to_scan in topological order and relax incident + # out-edges. Add the relabled nodes to labeled. + for u in to_scan: + d_u = d[u] + for v, e in G_succ[u].items(): + w_e = weight(u, v, e) + if d_u + w_e < d[v]: + d[v] = d_u + w_e + pred[v] = u + relabeled.add(v) + return relabeled + + # Set of nodes relabled in the last round of scan operations. Denoted by B + # in Goldberg and Radzik's paper. + relabeled = {source} + + while relabeled: + to_scan = topo_sort(relabeled) + relabeled = relax(to_scan) + + d = {u: d[u] for u in pred} + return pred, d + + +@nx._dispatch(edge_attrs="weight") +def negative_edge_cycle(G, weight="weight", heuristic=True): + """Returns True if there exists a negative edge cycle anywhere in G. + + Parameters + ---------- + G : NetworkX graph + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + heuristic : bool + Determines whether to use a heuristic to early detect negative + cycles at a negligible cost. In case of graphs with a negative cycle, + the performance of detection increases by at least an order of magnitude. + + Returns + ------- + negative_cycle : bool + True if a negative edge cycle exists, otherwise False. + + Examples + -------- + >>> G = nx.cycle_graph(5, create_using=nx.DiGraph()) + >>> print(nx.negative_edge_cycle(G)) + False + >>> G[1][2]["weight"] = -7 + >>> print(nx.negative_edge_cycle(G)) + True + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + This algorithm uses bellman_ford_predecessor_and_distance() but finds + negative cycles on any component by first adding a new node connected to + every node, and starting bellman_ford_predecessor_and_distance on that + node. It then removes that extra node. + """ + if G.size() == 0: + return False + + # find unused node to use temporarily + newnode = -1 + while newnode in G: + newnode -= 1 + # connect it to all nodes + G.add_edges_from([(newnode, n) for n in G]) + + try: + bellman_ford_predecessor_and_distance( + G, newnode, weight=weight, heuristic=heuristic + ) + except nx.NetworkXUnbounded: + return True + finally: + G.remove_node(newnode) + return False + + +@nx._dispatch(edge_attrs="weight") +def find_negative_cycle(G, source, weight="weight"): + """Returns a cycle with negative total weight if it exists. + + Bellman-Ford is used to find shortest_paths. That algorithm + stops if there exists a negative cycle. This algorithm + picks up from there and returns the found negative cycle. + + The cycle consists of a list of nodes in the cycle order. The last + node equals the first to make it a cycle. + You can look up the edge weights in the original graph. In the case + of multigraphs the relevant edge is the minimal weight edge between + the nodes in the 2-tuple. + + If the graph has no negative cycle, a NetworkXError is raised. + + Parameters + ---------- + G : NetworkX graph + + source: node label + The search for the negative cycle will start from this node. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from([(0, 1, 2), (1, 2, 2), (2, 0, 1), (1, 4, 2), (4, 0, -5)]) + >>> nx.find_negative_cycle(G, 0) + [4, 0, 1, 4] + + Returns + ------- + cycle : list + A list of nodes in the order of the cycle found. The last node + equals the first to indicate a cycle. + + Raises + ------ + NetworkXError + If no negative cycle is found. + """ + weight = _weight_function(G, weight) + pred = {source: []} + + v = _inner_bellman_ford(G, [source], weight, pred=pred) + if v is None: + raise nx.NetworkXError("No negative cycles detected.") + + # negative cycle detected... find it + neg_cycle = [] + stack = [(v, list(pred[v]))] + seen = {v} + while stack: + node, preds = stack[-1] + if v in preds: + # found the cycle + neg_cycle.extend([node, v]) + neg_cycle = list(reversed(neg_cycle)) + return neg_cycle + + if preds: + nbr = preds.pop() + if nbr not in seen: + stack.append((nbr, list(pred[nbr]))) + neg_cycle.append(node) + seen.add(nbr) + else: + stack.pop() + if neg_cycle: + neg_cycle.pop() + else: + if v in G[v] and weight(G, v, v) < 0: + return [v, v] + # should not reach here + raise nx.NetworkXError("Negative cycle is detected but not found") + # should not get here... + msg = "negative cycle detected but not identified" + raise nx.NetworkXUnbounded(msg) + + +@nx._dispatch(edge_attrs="weight") +def bidirectional_dijkstra(G, source, target, weight="weight"): + r"""Dijkstra's algorithm for shortest paths using bidirectional search. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node. + + target : node + Ending node. + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number or None to indicate a hidden edge. + + Returns + ------- + length, path : number and list + length is the distance from source to target. + path is a list of nodes on a path from source to target. + + Raises + ------ + NodeNotFound + If either `source` or `target` is not in `G`. + + NetworkXNoPath + If no path exists between source and target. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> length, path = nx.bidirectional_dijkstra(G, 0, 4) + >>> print(length) + 4 + >>> print(path) + [0, 1, 2, 3, 4] + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + The weight function can be used to hide edges by returning None. + So ``weight = lambda u, v, d: 1 if d['color']=="red" else None`` + will find the shortest red path. + + In practice bidirectional Dijkstra is much more than twice as fast as + ordinary Dijkstra. + + Ordinary Dijkstra expands nodes in a sphere-like manner from the + source. The radius of this sphere will eventually be the length + of the shortest path. Bidirectional Dijkstra will expand nodes + from both the source and the target, making two spheres of half + this radius. Volume of the first sphere is `\pi*r*r` while the + others are `2*\pi*r/2*r/2`, making up half the volume. + + This algorithm is not guaranteed to work if edge weights + are negative or are floating point numbers + (overflows and roundoff errors can cause problems). + + See Also + -------- + shortest_path + shortest_path_length + """ + if source not in G or target not in G: + msg = f"Either source {source} or target {target} is not in G" + raise nx.NodeNotFound(msg) + + if source == target: + return (0, [source]) + + weight = _weight_function(G, weight) + push = heappush + pop = heappop + # Init: [Forward, Backward] + dists = [{}, {}] # dictionary of final distances + paths = [{source: [source]}, {target: [target]}] # dictionary of paths + fringe = [[], []] # heap of (distance, node) for choosing node to expand + seen = [{source: 0}, {target: 0}] # dict of distances to seen nodes + c = count() + # initialize fringe heap + push(fringe[0], (0, next(c), source)) + push(fringe[1], (0, next(c), target)) + # neighs for extracting correct neighbor information + if G.is_directed(): + neighs = [G._succ, G._pred] + else: + neighs = [G._adj, G._adj] + # variables to hold shortest discovered path + # finaldist = 1e30000 + finalpath = [] + dir = 1 + while fringe[0] and fringe[1]: + # choose direction + # dir == 0 is forward direction and dir == 1 is back + dir = 1 - dir + # extract closest to expand + (dist, _, v) = pop(fringe[dir]) + if v in dists[dir]: + # Shortest path to v has already been found + continue + # update distance + dists[dir][v] = dist # equal to seen[dir][v] + if v in dists[1 - dir]: + # if we have scanned v in both directions we are done + # we have now discovered the shortest path + return (finaldist, finalpath) + + for w, d in neighs[dir][v].items(): + # weight(v, w, d) for forward and weight(w, v, d) for back direction + cost = weight(v, w, d) if dir == 0 else weight(w, v, d) + if cost is None: + continue + vwLength = dists[dir][v] + cost + if w in dists[dir]: + if vwLength < dists[dir][w]: + raise ValueError("Contradictory paths found: negative weights?") + elif w not in seen[dir] or vwLength < seen[dir][w]: + # relaxing + seen[dir][w] = vwLength + push(fringe[dir], (vwLength, next(c), w)) + paths[dir][w] = paths[dir][v] + [w] + if w in seen[0] and w in seen[1]: + # see if this path is better than the already + # discovered shortest path + totaldist = seen[0][w] + seen[1][w] + if finalpath == [] or finaldist > totaldist: + finaldist = totaldist + revpath = paths[1][w][:] + revpath.reverse() + finalpath = paths[0][w] + revpath[1:] + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") + + +@nx._dispatch(edge_attrs="weight") +def johnson(G, weight="weight"): + r"""Uses Johnson's Algorithm to compute shortest paths. + + Johnson's Algorithm finds a shortest path between each pair of + nodes in a weighted graph even if negative weights are present. + + Parameters + ---------- + G : NetworkX graph + + weight : string or function + If this is a string, then edge weights will be accessed via the + edge attribute with this key (that is, the weight of the edge + joining `u` to `v` will be ``G.edges[u, v][weight]``). If no + such edge attribute exists, the weight of the edge is assumed to + be one. + + If this is a function, the weight of an edge is the value + returned by the function. The function must accept exactly three + positional arguments: the two endpoints of an edge and the + dictionary of edge attributes for that edge. The function must + return a number. + + Returns + ------- + distance : dictionary + Dictionary, keyed by source and target, of shortest paths. + + Examples + -------- + >>> graph = nx.DiGraph() + >>> graph.add_weighted_edges_from( + ... [("0", "3", 3), ("0", "1", -5), ("0", "2", 2), ("1", "2", 4), ("2", "3", 1)] + ... ) + >>> paths = nx.johnson(graph, weight="weight") + >>> paths["0"]["2"] + ['0', '1', '2'] + + Notes + ----- + Johnson's algorithm is suitable even for graphs with negative weights. It + works by using the Bellman–Ford algorithm to compute a transformation of + the input graph that removes all negative weights, allowing Dijkstra's + algorithm to be used on the transformed graph. + + The time complexity of this algorithm is $O(n^2 \log n + n m)$, + where $n$ is the number of nodes and $m$ the number of edges in the + graph. For dense graphs, this may be faster than the Floyd–Warshall + algorithm. + + See Also + -------- + floyd_warshall_predecessor_and_distance + floyd_warshall_numpy + all_pairs_shortest_path + all_pairs_shortest_path_length + all_pairs_dijkstra_path + bellman_ford_predecessor_and_distance + all_pairs_bellman_ford_path + all_pairs_bellman_ford_path_length + + """ + dist = {v: 0 for v in G} + pred = {v: [] for v in G} + weight = _weight_function(G, weight) + + # Calculate distance of shortest paths + dist_bellman = _bellman_ford(G, list(G), weight, pred=pred, dist=dist) + + # Update the weight function to take into account the Bellman--Ford + # relaxation distances. + def new_weight(u, v, d): + return weight(u, v, d) + dist_bellman[u] - dist_bellman[v] + + def dist_path(v): + paths = {v: [v]} + _dijkstra(G, v, new_weight, paths=paths) + return paths + + return {v: dist_path(v) for v in G} diff --git a/MLPY/Lib/site-packages/networkx/algorithms/similarity.py b/MLPY/Lib/site-packages/networkx/algorithms/similarity.py new file mode 100644 index 0000000000000000000000000000000000000000..3d943c20d7e2a1a5e01b9f825802cb6d6a99047e --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/similarity.py @@ -0,0 +1,1710 @@ +""" Functions measuring similarity using graph edit distance. + +The graph edit distance is the number of edge/node changes needed +to make two graphs isomorphic. + +The default algorithm/implementation is sub-optimal for some graphs. +The problem of finding the exact Graph Edit Distance (GED) is NP-hard +so it is often slow. If the simple interface `graph_edit_distance` +takes too long for your graph, try `optimize_graph_edit_distance` +and/or `optimize_edit_paths`. + +At the same time, I encourage capable people to investigate +alternative GED algorithms, in order to improve the choices available. +""" + +import math +import time +import warnings +from dataclasses import dataclass +from itertools import product + +import networkx as nx + +__all__ = [ + "graph_edit_distance", + "optimal_edit_paths", + "optimize_graph_edit_distance", + "optimize_edit_paths", + "simrank_similarity", + "panther_similarity", + "generate_random_paths", +] + + +def debug_print(*args, **kwargs): + print(*args, **kwargs) + + +@nx._dispatch( + graphs={"G1": 0, "G2": 1}, preserve_edge_attrs=True, preserve_node_attrs=True +) +def graph_edit_distance( + G1, + G2, + node_match=None, + edge_match=None, + node_subst_cost=None, + node_del_cost=None, + node_ins_cost=None, + edge_subst_cost=None, + edge_del_cost=None, + edge_ins_cost=None, + roots=None, + upper_bound=None, + timeout=None, +): + """Returns GED (graph edit distance) between graphs G1 and G2. + + Graph edit distance is a graph similarity measure analogous to + Levenshtein distance for strings. It is defined as minimum cost + of edit path (sequence of node and edge edit operations) + transforming graph G1 to graph isomorphic to G2. + + Parameters + ---------- + G1, G2: graphs + The two graphs G1 and G2 must be of the same type. + + node_match : callable + A function that returns True if node n1 in G1 and n2 in G2 + should be considered equal during matching. + + The function will be called like + + node_match(G1.nodes[n1], G2.nodes[n2]). + + That is, the function will receive the node attribute + dictionaries for n1 and n2 as inputs. + + Ignored if node_subst_cost is specified. If neither + node_match nor node_subst_cost are specified then node + attributes are not considered. + + edge_match : callable + A function that returns True if the edge attribute dictionaries + for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should + be considered equal during matching. + + The function will be called like + + edge_match(G1[u1][v1], G2[u2][v2]). + + That is, the function will receive the edge attribute + dictionaries of the edges under consideration. + + Ignored if edge_subst_cost is specified. If neither + edge_match nor edge_subst_cost are specified then edge + attributes are not considered. + + node_subst_cost, node_del_cost, node_ins_cost : callable + Functions that return the costs of node substitution, node + deletion, and node insertion, respectively. + + The functions will be called like + + node_subst_cost(G1.nodes[n1], G2.nodes[n2]), + node_del_cost(G1.nodes[n1]), + node_ins_cost(G2.nodes[n2]). + + That is, the functions will receive the node attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function node_subst_cost overrides node_match if specified. + If neither node_match nor node_subst_cost are specified then + default node substitution cost of 0 is used (node attributes + are not considered during matching). + + If node_del_cost is not specified then default node deletion + cost of 1 is used. If node_ins_cost is not specified then + default node insertion cost of 1 is used. + + edge_subst_cost, edge_del_cost, edge_ins_cost : callable + Functions that return the costs of edge substitution, edge + deletion, and edge insertion, respectively. + + The functions will be called like + + edge_subst_cost(G1[u1][v1], G2[u2][v2]), + edge_del_cost(G1[u1][v1]), + edge_ins_cost(G2[u2][v2]). + + That is, the functions will receive the edge attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function edge_subst_cost overrides edge_match if specified. + If neither edge_match nor edge_subst_cost are specified then + default edge substitution cost of 0 is used (edge attributes + are not considered during matching). + + If edge_del_cost is not specified then default edge deletion + cost of 1 is used. If edge_ins_cost is not specified then + default edge insertion cost of 1 is used. + + roots : 2-tuple + Tuple where first element is a node in G1 and the second + is a node in G2. + These nodes are forced to be matched in the comparison to + allow comparison between rooted graphs. + + upper_bound : numeric + Maximum edit distance to consider. Return None if no edit + distance under or equal to upper_bound exists. + + timeout : numeric + Maximum number of seconds to execute. + After timeout is met, the current best GED is returned. + + Examples + -------- + >>> G1 = nx.cycle_graph(6) + >>> G2 = nx.wheel_graph(7) + >>> nx.graph_edit_distance(G1, G2) + 7.0 + + >>> G1 = nx.star_graph(5) + >>> G2 = nx.star_graph(5) + >>> nx.graph_edit_distance(G1, G2, roots=(0, 0)) + 0.0 + >>> nx.graph_edit_distance(G1, G2, roots=(1, 0)) + 8.0 + + See Also + -------- + optimal_edit_paths, optimize_graph_edit_distance, + + is_isomorphic: test for graph edit distance of 0 + + References + ---------- + .. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick + Martineau. An Exact Graph Edit Distance Algorithm for Solving + Pattern Recognition Problems. 4th International Conference on + Pattern Recognition Applications and Methods 2015, Jan 2015, + Lisbon, Portugal. 2015, + <10.5220/0005209202710278>. + https://hal.archives-ouvertes.fr/hal-01168816 + + """ + bestcost = None + for _, _, cost in optimize_edit_paths( + G1, + G2, + node_match, + edge_match, + node_subst_cost, + node_del_cost, + node_ins_cost, + edge_subst_cost, + edge_del_cost, + edge_ins_cost, + upper_bound, + True, + roots, + timeout, + ): + # assert bestcost is None or cost < bestcost + bestcost = cost + return bestcost + + +@nx._dispatch(graphs={"G1": 0, "G2": 1}) +def optimal_edit_paths( + G1, + G2, + node_match=None, + edge_match=None, + node_subst_cost=None, + node_del_cost=None, + node_ins_cost=None, + edge_subst_cost=None, + edge_del_cost=None, + edge_ins_cost=None, + upper_bound=None, +): + """Returns all minimum-cost edit paths transforming G1 to G2. + + Graph edit path is a sequence of node and edge edit operations + transforming graph G1 to graph isomorphic to G2. Edit operations + include substitutions, deletions, and insertions. + + Parameters + ---------- + G1, G2: graphs + The two graphs G1 and G2 must be of the same type. + + node_match : callable + A function that returns True if node n1 in G1 and n2 in G2 + should be considered equal during matching. + + The function will be called like + + node_match(G1.nodes[n1], G2.nodes[n2]). + + That is, the function will receive the node attribute + dictionaries for n1 and n2 as inputs. + + Ignored if node_subst_cost is specified. If neither + node_match nor node_subst_cost are specified then node + attributes are not considered. + + edge_match : callable + A function that returns True if the edge attribute dictionaries + for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should + be considered equal during matching. + + The function will be called like + + edge_match(G1[u1][v1], G2[u2][v2]). + + That is, the function will receive the edge attribute + dictionaries of the edges under consideration. + + Ignored if edge_subst_cost is specified. If neither + edge_match nor edge_subst_cost are specified then edge + attributes are not considered. + + node_subst_cost, node_del_cost, node_ins_cost : callable + Functions that return the costs of node substitution, node + deletion, and node insertion, respectively. + + The functions will be called like + + node_subst_cost(G1.nodes[n1], G2.nodes[n2]), + node_del_cost(G1.nodes[n1]), + node_ins_cost(G2.nodes[n2]). + + That is, the functions will receive the node attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function node_subst_cost overrides node_match if specified. + If neither node_match nor node_subst_cost are specified then + default node substitution cost of 0 is used (node attributes + are not considered during matching). + + If node_del_cost is not specified then default node deletion + cost of 1 is used. If node_ins_cost is not specified then + default node insertion cost of 1 is used. + + edge_subst_cost, edge_del_cost, edge_ins_cost : callable + Functions that return the costs of edge substitution, edge + deletion, and edge insertion, respectively. + + The functions will be called like + + edge_subst_cost(G1[u1][v1], G2[u2][v2]), + edge_del_cost(G1[u1][v1]), + edge_ins_cost(G2[u2][v2]). + + That is, the functions will receive the edge attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function edge_subst_cost overrides edge_match if specified. + If neither edge_match nor edge_subst_cost are specified then + default edge substitution cost of 0 is used (edge attributes + are not considered during matching). + + If edge_del_cost is not specified then default edge deletion + cost of 1 is used. If edge_ins_cost is not specified then + default edge insertion cost of 1 is used. + + upper_bound : numeric + Maximum edit distance to consider. + + Returns + ------- + edit_paths : list of tuples (node_edit_path, edge_edit_path) + node_edit_path : list of tuples (u, v) + edge_edit_path : list of tuples ((u1, v1), (u2, v2)) + + cost : numeric + Optimal edit path cost (graph edit distance). + + Examples + -------- + >>> G1 = nx.cycle_graph(4) + >>> G2 = nx.wheel_graph(5) + >>> paths, cost = nx.optimal_edit_paths(G1, G2) + >>> len(paths) + 40 + >>> cost + 5.0 + + See Also + -------- + graph_edit_distance, optimize_edit_paths + + References + ---------- + .. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick + Martineau. An Exact Graph Edit Distance Algorithm for Solving + Pattern Recognition Problems. 4th International Conference on + Pattern Recognition Applications and Methods 2015, Jan 2015, + Lisbon, Portugal. 2015, + <10.5220/0005209202710278>. + https://hal.archives-ouvertes.fr/hal-01168816 + + """ + paths = [] + bestcost = None + for vertex_path, edge_path, cost in optimize_edit_paths( + G1, + G2, + node_match, + edge_match, + node_subst_cost, + node_del_cost, + node_ins_cost, + edge_subst_cost, + edge_del_cost, + edge_ins_cost, + upper_bound, + False, + ): + # assert bestcost is None or cost <= bestcost + if bestcost is not None and cost < bestcost: + paths = [] + paths.append((vertex_path, edge_path)) + bestcost = cost + return paths, bestcost + + +@nx._dispatch(graphs={"G1": 0, "G2": 1}) +def optimize_graph_edit_distance( + G1, + G2, + node_match=None, + edge_match=None, + node_subst_cost=None, + node_del_cost=None, + node_ins_cost=None, + edge_subst_cost=None, + edge_del_cost=None, + edge_ins_cost=None, + upper_bound=None, +): + """Returns consecutive approximations of GED (graph edit distance) + between graphs G1 and G2. + + Graph edit distance is a graph similarity measure analogous to + Levenshtein distance for strings. It is defined as minimum cost + of edit path (sequence of node and edge edit operations) + transforming graph G1 to graph isomorphic to G2. + + Parameters + ---------- + G1, G2: graphs + The two graphs G1 and G2 must be of the same type. + + node_match : callable + A function that returns True if node n1 in G1 and n2 in G2 + should be considered equal during matching. + + The function will be called like + + node_match(G1.nodes[n1], G2.nodes[n2]). + + That is, the function will receive the node attribute + dictionaries for n1 and n2 as inputs. + + Ignored if node_subst_cost is specified. If neither + node_match nor node_subst_cost are specified then node + attributes are not considered. + + edge_match : callable + A function that returns True if the edge attribute dictionaries + for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should + be considered equal during matching. + + The function will be called like + + edge_match(G1[u1][v1], G2[u2][v2]). + + That is, the function will receive the edge attribute + dictionaries of the edges under consideration. + + Ignored if edge_subst_cost is specified. If neither + edge_match nor edge_subst_cost are specified then edge + attributes are not considered. + + node_subst_cost, node_del_cost, node_ins_cost : callable + Functions that return the costs of node substitution, node + deletion, and node insertion, respectively. + + The functions will be called like + + node_subst_cost(G1.nodes[n1], G2.nodes[n2]), + node_del_cost(G1.nodes[n1]), + node_ins_cost(G2.nodes[n2]). + + That is, the functions will receive the node attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function node_subst_cost overrides node_match if specified. + If neither node_match nor node_subst_cost are specified then + default node substitution cost of 0 is used (node attributes + are not considered during matching). + + If node_del_cost is not specified then default node deletion + cost of 1 is used. If node_ins_cost is not specified then + default node insertion cost of 1 is used. + + edge_subst_cost, edge_del_cost, edge_ins_cost : callable + Functions that return the costs of edge substitution, edge + deletion, and edge insertion, respectively. + + The functions will be called like + + edge_subst_cost(G1[u1][v1], G2[u2][v2]), + edge_del_cost(G1[u1][v1]), + edge_ins_cost(G2[u2][v2]). + + That is, the functions will receive the edge attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function edge_subst_cost overrides edge_match if specified. + If neither edge_match nor edge_subst_cost are specified then + default edge substitution cost of 0 is used (edge attributes + are not considered during matching). + + If edge_del_cost is not specified then default edge deletion + cost of 1 is used. If edge_ins_cost is not specified then + default edge insertion cost of 1 is used. + + upper_bound : numeric + Maximum edit distance to consider. + + Returns + ------- + Generator of consecutive approximations of graph edit distance. + + Examples + -------- + >>> G1 = nx.cycle_graph(6) + >>> G2 = nx.wheel_graph(7) + >>> for v in nx.optimize_graph_edit_distance(G1, G2): + ... minv = v + >>> minv + 7.0 + + See Also + -------- + graph_edit_distance, optimize_edit_paths + + References + ---------- + .. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick + Martineau. An Exact Graph Edit Distance Algorithm for Solving + Pattern Recognition Problems. 4th International Conference on + Pattern Recognition Applications and Methods 2015, Jan 2015, + Lisbon, Portugal. 2015, + <10.5220/0005209202710278>. + https://hal.archives-ouvertes.fr/hal-01168816 + """ + for _, _, cost in optimize_edit_paths( + G1, + G2, + node_match, + edge_match, + node_subst_cost, + node_del_cost, + node_ins_cost, + edge_subst_cost, + edge_del_cost, + edge_ins_cost, + upper_bound, + True, + ): + yield cost + + +@nx._dispatch( + graphs={"G1": 0, "G2": 1}, preserve_edge_attrs=True, preserve_node_attrs=True +) +def optimize_edit_paths( + G1, + G2, + node_match=None, + edge_match=None, + node_subst_cost=None, + node_del_cost=None, + node_ins_cost=None, + edge_subst_cost=None, + edge_del_cost=None, + edge_ins_cost=None, + upper_bound=None, + strictly_decreasing=True, + roots=None, + timeout=None, +): + """GED (graph edit distance) calculation: advanced interface. + + Graph edit path is a sequence of node and edge edit operations + transforming graph G1 to graph isomorphic to G2. Edit operations + include substitutions, deletions, and insertions. + + Graph edit distance is defined as minimum cost of edit path. + + Parameters + ---------- + G1, G2: graphs + The two graphs G1 and G2 must be of the same type. + + node_match : callable + A function that returns True if node n1 in G1 and n2 in G2 + should be considered equal during matching. + + The function will be called like + + node_match(G1.nodes[n1], G2.nodes[n2]). + + That is, the function will receive the node attribute + dictionaries for n1 and n2 as inputs. + + Ignored if node_subst_cost is specified. If neither + node_match nor node_subst_cost are specified then node + attributes are not considered. + + edge_match : callable + A function that returns True if the edge attribute dictionaries + for the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should + be considered equal during matching. + + The function will be called like + + edge_match(G1[u1][v1], G2[u2][v2]). + + That is, the function will receive the edge attribute + dictionaries of the edges under consideration. + + Ignored if edge_subst_cost is specified. If neither + edge_match nor edge_subst_cost are specified then edge + attributes are not considered. + + node_subst_cost, node_del_cost, node_ins_cost : callable + Functions that return the costs of node substitution, node + deletion, and node insertion, respectively. + + The functions will be called like + + node_subst_cost(G1.nodes[n1], G2.nodes[n2]), + node_del_cost(G1.nodes[n1]), + node_ins_cost(G2.nodes[n2]). + + That is, the functions will receive the node attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function node_subst_cost overrides node_match if specified. + If neither node_match nor node_subst_cost are specified then + default node substitution cost of 0 is used (node attributes + are not considered during matching). + + If node_del_cost is not specified then default node deletion + cost of 1 is used. If node_ins_cost is not specified then + default node insertion cost of 1 is used. + + edge_subst_cost, edge_del_cost, edge_ins_cost : callable + Functions that return the costs of edge substitution, edge + deletion, and edge insertion, respectively. + + The functions will be called like + + edge_subst_cost(G1[u1][v1], G2[u2][v2]), + edge_del_cost(G1[u1][v1]), + edge_ins_cost(G2[u2][v2]). + + That is, the functions will receive the edge attribute + dictionaries as inputs. The functions are expected to return + positive numeric values. + + Function edge_subst_cost overrides edge_match if specified. + If neither edge_match nor edge_subst_cost are specified then + default edge substitution cost of 0 is used (edge attributes + are not considered during matching). + + If edge_del_cost is not specified then default edge deletion + cost of 1 is used. If edge_ins_cost is not specified then + default edge insertion cost of 1 is used. + + upper_bound : numeric + Maximum edit distance to consider. + + strictly_decreasing : bool + If True, return consecutive approximations of strictly + decreasing cost. Otherwise, return all edit paths of cost + less than or equal to the previous minimum cost. + + roots : 2-tuple + Tuple where first element is a node in G1 and the second + is a node in G2. + These nodes are forced to be matched in the comparison to + allow comparison between rooted graphs. + + timeout : numeric + Maximum number of seconds to execute. + After timeout is met, the current best GED is returned. + + Returns + ------- + Generator of tuples (node_edit_path, edge_edit_path, cost) + node_edit_path : list of tuples (u, v) + edge_edit_path : list of tuples ((u1, v1), (u2, v2)) + cost : numeric + + See Also + -------- + graph_edit_distance, optimize_graph_edit_distance, optimal_edit_paths + + References + ---------- + .. [1] Zeina Abu-Aisheh, Romain Raveaux, Jean-Yves Ramel, Patrick + Martineau. An Exact Graph Edit Distance Algorithm for Solving + Pattern Recognition Problems. 4th International Conference on + Pattern Recognition Applications and Methods 2015, Jan 2015, + Lisbon, Portugal. 2015, + <10.5220/0005209202710278>. + https://hal.archives-ouvertes.fr/hal-01168816 + + """ + # TODO: support DiGraph + + import numpy as np + import scipy as sp + + @dataclass + class CostMatrix: + C: ... + lsa_row_ind: ... + lsa_col_ind: ... + ls: ... + + def make_CostMatrix(C, m, n): + # assert(C.shape == (m + n, m + n)) + lsa_row_ind, lsa_col_ind = sp.optimize.linear_sum_assignment(C) + + # Fixup dummy assignments: + # each substitution i<->j should have dummy assignment m+j<->n+i + # NOTE: fast reduce of Cv relies on it + # assert len(lsa_row_ind) == len(lsa_col_ind) + indexes = zip(range(len(lsa_row_ind)), lsa_row_ind, lsa_col_ind) + subst_ind = [k for k, i, j in indexes if i < m and j < n] + indexes = zip(range(len(lsa_row_ind)), lsa_row_ind, lsa_col_ind) + dummy_ind = [k for k, i, j in indexes if i >= m and j >= n] + # assert len(subst_ind) == len(dummy_ind) + lsa_row_ind[dummy_ind] = lsa_col_ind[subst_ind] + m + lsa_col_ind[dummy_ind] = lsa_row_ind[subst_ind] + n + + return CostMatrix( + C, lsa_row_ind, lsa_col_ind, C[lsa_row_ind, lsa_col_ind].sum() + ) + + def extract_C(C, i, j, m, n): + # assert(C.shape == (m + n, m + n)) + row_ind = [k in i or k - m in j for k in range(m + n)] + col_ind = [k in j or k - n in i for k in range(m + n)] + return C[row_ind, :][:, col_ind] + + def reduce_C(C, i, j, m, n): + # assert(C.shape == (m + n, m + n)) + row_ind = [k not in i and k - m not in j for k in range(m + n)] + col_ind = [k not in j and k - n not in i for k in range(m + n)] + return C[row_ind, :][:, col_ind] + + def reduce_ind(ind, i): + # assert set(ind) == set(range(len(ind))) + rind = ind[[k not in i for k in ind]] + for k in set(i): + rind[rind >= k] -= 1 + return rind + + def match_edges(u, v, pending_g, pending_h, Ce, matched_uv=None): + """ + Parameters: + u, v: matched vertices, u=None or v=None for + deletion/insertion + pending_g, pending_h: lists of edges not yet mapped + Ce: CostMatrix of pending edge mappings + matched_uv: partial vertex edit path + list of tuples (u, v) of previously matched vertex + mappings u<->v, u=None or v=None for + deletion/insertion + + Returns: + list of (i, j): indices of edge mappings g<->h + localCe: local CostMatrix of edge mappings + (basically submatrix of Ce at cross of rows i, cols j) + """ + M = len(pending_g) + N = len(pending_h) + # assert Ce.C.shape == (M + N, M + N) + + # only attempt to match edges after one node match has been made + # this will stop self-edges on the first node being automatically deleted + # even when a substitution is the better option + if matched_uv is None or len(matched_uv) == 0: + g_ind = [] + h_ind = [] + else: + g_ind = [ + i + for i in range(M) + if pending_g[i][:2] == (u, u) + or any( + pending_g[i][:2] in ((p, u), (u, p), (p, p)) for p, q in matched_uv + ) + ] + h_ind = [ + j + for j in range(N) + if pending_h[j][:2] == (v, v) + or any( + pending_h[j][:2] in ((q, v), (v, q), (q, q)) for p, q in matched_uv + ) + ] + + m = len(g_ind) + n = len(h_ind) + + if m or n: + C = extract_C(Ce.C, g_ind, h_ind, M, N) + # assert C.shape == (m + n, m + n) + + # Forbid structurally invalid matches + # NOTE: inf remembered from Ce construction + for k, i in enumerate(g_ind): + g = pending_g[i][:2] + for l, j in enumerate(h_ind): + h = pending_h[j][:2] + if nx.is_directed(G1) or nx.is_directed(G2): + if any( + g == (p, u) and h == (q, v) or g == (u, p) and h == (v, q) + for p, q in matched_uv + ): + continue + else: + if any( + g in ((p, u), (u, p)) and h in ((q, v), (v, q)) + for p, q in matched_uv + ): + continue + if g == (u, u) or any(g == (p, p) for p, q in matched_uv): + continue + if h == (v, v) or any(h == (q, q) for p, q in matched_uv): + continue + C[k, l] = inf + + localCe = make_CostMatrix(C, m, n) + ij = [ + ( + g_ind[k] if k < m else M + h_ind[l], + h_ind[l] if l < n else N + g_ind[k], + ) + for k, l in zip(localCe.lsa_row_ind, localCe.lsa_col_ind) + if k < m or l < n + ] + + else: + ij = [] + localCe = CostMatrix(np.empty((0, 0)), [], [], 0) + + return ij, localCe + + def reduce_Ce(Ce, ij, m, n): + if len(ij): + i, j = zip(*ij) + m_i = m - sum(1 for t in i if t < m) + n_j = n - sum(1 for t in j if t < n) + return make_CostMatrix(reduce_C(Ce.C, i, j, m, n), m_i, n_j) + return Ce + + def get_edit_ops( + matched_uv, pending_u, pending_v, Cv, pending_g, pending_h, Ce, matched_cost + ): + """ + Parameters: + matched_uv: partial vertex edit path + list of tuples (u, v) of vertex mappings u<->v, + u=None or v=None for deletion/insertion + pending_u, pending_v: lists of vertices not yet mapped + Cv: CostMatrix of pending vertex mappings + pending_g, pending_h: lists of edges not yet mapped + Ce: CostMatrix of pending edge mappings + matched_cost: cost of partial edit path + + Returns: + sequence of + (i, j): indices of vertex mapping u<->v + Cv_ij: reduced CostMatrix of pending vertex mappings + (basically Cv with row i, col j removed) + list of (x, y): indices of edge mappings g<->h + Ce_xy: reduced CostMatrix of pending edge mappings + (basically Ce with rows x, cols y removed) + cost: total cost of edit operation + NOTE: most promising ops first + """ + m = len(pending_u) + n = len(pending_v) + # assert Cv.C.shape == (m + n, m + n) + + # 1) a vertex mapping from optimal linear sum assignment + i, j = min( + (k, l) for k, l in zip(Cv.lsa_row_ind, Cv.lsa_col_ind) if k < m or l < n + ) + xy, localCe = match_edges( + pending_u[i] if i < m else None, + pending_v[j] if j < n else None, + pending_g, + pending_h, + Ce, + matched_uv, + ) + Ce_xy = reduce_Ce(Ce, xy, len(pending_g), len(pending_h)) + # assert Ce.ls <= localCe.ls + Ce_xy.ls + if prune(matched_cost + Cv.ls + localCe.ls + Ce_xy.ls): + pass + else: + # get reduced Cv efficiently + Cv_ij = CostMatrix( + reduce_C(Cv.C, (i,), (j,), m, n), + reduce_ind(Cv.lsa_row_ind, (i, m + j)), + reduce_ind(Cv.lsa_col_ind, (j, n + i)), + Cv.ls - Cv.C[i, j], + ) + yield (i, j), Cv_ij, xy, Ce_xy, Cv.C[i, j] + localCe.ls + + # 2) other candidates, sorted by lower-bound cost estimate + other = [] + fixed_i, fixed_j = i, j + if m <= n: + candidates = ( + (t, fixed_j) + for t in range(m + n) + if t != fixed_i and (t < m or t == m + fixed_j) + ) + else: + candidates = ( + (fixed_i, t) + for t in range(m + n) + if t != fixed_j and (t < n or t == n + fixed_i) + ) + for i, j in candidates: + if prune(matched_cost + Cv.C[i, j] + Ce.ls): + continue + Cv_ij = make_CostMatrix( + reduce_C(Cv.C, (i,), (j,), m, n), + m - 1 if i < m else m, + n - 1 if j < n else n, + ) + # assert Cv.ls <= Cv.C[i, j] + Cv_ij.ls + if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + Ce.ls): + continue + xy, localCe = match_edges( + pending_u[i] if i < m else None, + pending_v[j] if j < n else None, + pending_g, + pending_h, + Ce, + matched_uv, + ) + if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + localCe.ls): + continue + Ce_xy = reduce_Ce(Ce, xy, len(pending_g), len(pending_h)) + # assert Ce.ls <= localCe.ls + Ce_xy.ls + if prune(matched_cost + Cv.C[i, j] + Cv_ij.ls + localCe.ls + Ce_xy.ls): + continue + other.append(((i, j), Cv_ij, xy, Ce_xy, Cv.C[i, j] + localCe.ls)) + + yield from sorted(other, key=lambda t: t[4] + t[1].ls + t[3].ls) + + def get_edit_paths( + matched_uv, + pending_u, + pending_v, + Cv, + matched_gh, + pending_g, + pending_h, + Ce, + matched_cost, + ): + """ + Parameters: + matched_uv: partial vertex edit path + list of tuples (u, v) of vertex mappings u<->v, + u=None or v=None for deletion/insertion + pending_u, pending_v: lists of vertices not yet mapped + Cv: CostMatrix of pending vertex mappings + matched_gh: partial edge edit path + list of tuples (g, h) of edge mappings g<->h, + g=None or h=None for deletion/insertion + pending_g, pending_h: lists of edges not yet mapped + Ce: CostMatrix of pending edge mappings + matched_cost: cost of partial edit path + + Returns: + sequence of (vertex_path, edge_path, cost) + vertex_path: complete vertex edit path + list of tuples (u, v) of vertex mappings u<->v, + u=None or v=None for deletion/insertion + edge_path: complete edge edit path + list of tuples (g, h) of edge mappings g<->h, + g=None or h=None for deletion/insertion + cost: total cost of edit path + NOTE: path costs are non-increasing + """ + # debug_print('matched-uv:', matched_uv) + # debug_print('matched-gh:', matched_gh) + # debug_print('matched-cost:', matched_cost) + # debug_print('pending-u:', pending_u) + # debug_print('pending-v:', pending_v) + # debug_print(Cv.C) + # assert list(sorted(G1.nodes)) == list(sorted(list(u for u, v in matched_uv if u is not None) + pending_u)) + # assert list(sorted(G2.nodes)) == list(sorted(list(v for u, v in matched_uv if v is not None) + pending_v)) + # debug_print('pending-g:', pending_g) + # debug_print('pending-h:', pending_h) + # debug_print(Ce.C) + # assert list(sorted(G1.edges)) == list(sorted(list(g for g, h in matched_gh if g is not None) + pending_g)) + # assert list(sorted(G2.edges)) == list(sorted(list(h for g, h in matched_gh if h is not None) + pending_h)) + # debug_print() + + if prune(matched_cost + Cv.ls + Ce.ls): + return + + if not max(len(pending_u), len(pending_v)): + # assert not len(pending_g) + # assert not len(pending_h) + # path completed! + # assert matched_cost <= maxcost_value + nonlocal maxcost_value + maxcost_value = min(maxcost_value, matched_cost) + yield matched_uv, matched_gh, matched_cost + + else: + edit_ops = get_edit_ops( + matched_uv, + pending_u, + pending_v, + Cv, + pending_g, + pending_h, + Ce, + matched_cost, + ) + for ij, Cv_ij, xy, Ce_xy, edit_cost in edit_ops: + i, j = ij + # assert Cv.C[i, j] + sum(Ce.C[t] for t in xy) == edit_cost + if prune(matched_cost + edit_cost + Cv_ij.ls + Ce_xy.ls): + continue + + # dive deeper + u = pending_u.pop(i) if i < len(pending_u) else None + v = pending_v.pop(j) if j < len(pending_v) else None + matched_uv.append((u, v)) + for x, y in xy: + len_g = len(pending_g) + len_h = len(pending_h) + matched_gh.append( + ( + pending_g[x] if x < len_g else None, + pending_h[y] if y < len_h else None, + ) + ) + sortedx = sorted(x for x, y in xy) + sortedy = sorted(y for x, y in xy) + G = [ + (pending_g.pop(x) if x < len(pending_g) else None) + for x in reversed(sortedx) + ] + H = [ + (pending_h.pop(y) if y < len(pending_h) else None) + for y in reversed(sortedy) + ] + + yield from get_edit_paths( + matched_uv, + pending_u, + pending_v, + Cv_ij, + matched_gh, + pending_g, + pending_h, + Ce_xy, + matched_cost + edit_cost, + ) + + # backtrack + if u is not None: + pending_u.insert(i, u) + if v is not None: + pending_v.insert(j, v) + matched_uv.pop() + for x, g in zip(sortedx, reversed(G)): + if g is not None: + pending_g.insert(x, g) + for y, h in zip(sortedy, reversed(H)): + if h is not None: + pending_h.insert(y, h) + for _ in xy: + matched_gh.pop() + + # Initialization + + pending_u = list(G1.nodes) + pending_v = list(G2.nodes) + + initial_cost = 0 + if roots: + root_u, root_v = roots + if root_u not in pending_u or root_v not in pending_v: + raise nx.NodeNotFound("Root node not in graph.") + + # remove roots from pending + pending_u.remove(root_u) + pending_v.remove(root_v) + + # cost matrix of vertex mappings + m = len(pending_u) + n = len(pending_v) + C = np.zeros((m + n, m + n)) + if node_subst_cost: + C[0:m, 0:n] = np.array( + [ + node_subst_cost(G1.nodes[u], G2.nodes[v]) + for u in pending_u + for v in pending_v + ] + ).reshape(m, n) + if roots: + initial_cost = node_subst_cost(G1.nodes[root_u], G2.nodes[root_v]) + elif node_match: + C[0:m, 0:n] = np.array( + [ + 1 - int(node_match(G1.nodes[u], G2.nodes[v])) + for u in pending_u + for v in pending_v + ] + ).reshape(m, n) + if roots: + initial_cost = 1 - node_match(G1.nodes[root_u], G2.nodes[root_v]) + else: + # all zeroes + pass + # assert not min(m, n) or C[0:m, 0:n].min() >= 0 + if node_del_cost: + del_costs = [node_del_cost(G1.nodes[u]) for u in pending_u] + else: + del_costs = [1] * len(pending_u) + # assert not m or min(del_costs) >= 0 + if node_ins_cost: + ins_costs = [node_ins_cost(G2.nodes[v]) for v in pending_v] + else: + ins_costs = [1] * len(pending_v) + # assert not n or min(ins_costs) >= 0 + inf = C[0:m, 0:n].sum() + sum(del_costs) + sum(ins_costs) + 1 + C[0:m, n : n + m] = np.array( + [del_costs[i] if i == j else inf for i in range(m) for j in range(m)] + ).reshape(m, m) + C[m : m + n, 0:n] = np.array( + [ins_costs[i] if i == j else inf for i in range(n) for j in range(n)] + ).reshape(n, n) + Cv = make_CostMatrix(C, m, n) + # debug_print(f"Cv: {m} x {n}") + # debug_print(Cv.C) + + pending_g = list(G1.edges) + pending_h = list(G2.edges) + + # cost matrix of edge mappings + m = len(pending_g) + n = len(pending_h) + C = np.zeros((m + n, m + n)) + if edge_subst_cost: + C[0:m, 0:n] = np.array( + [ + edge_subst_cost(G1.edges[g], G2.edges[h]) + for g in pending_g + for h in pending_h + ] + ).reshape(m, n) + elif edge_match: + C[0:m, 0:n] = np.array( + [ + 1 - int(edge_match(G1.edges[g], G2.edges[h])) + for g in pending_g + for h in pending_h + ] + ).reshape(m, n) + else: + # all zeroes + pass + # assert not min(m, n) or C[0:m, 0:n].min() >= 0 + if edge_del_cost: + del_costs = [edge_del_cost(G1.edges[g]) for g in pending_g] + else: + del_costs = [1] * len(pending_g) + # assert not m or min(del_costs) >= 0 + if edge_ins_cost: + ins_costs = [edge_ins_cost(G2.edges[h]) for h in pending_h] + else: + ins_costs = [1] * len(pending_h) + # assert not n or min(ins_costs) >= 0 + inf = C[0:m, 0:n].sum() + sum(del_costs) + sum(ins_costs) + 1 + C[0:m, n : n + m] = np.array( + [del_costs[i] if i == j else inf for i in range(m) for j in range(m)] + ).reshape(m, m) + C[m : m + n, 0:n] = np.array( + [ins_costs[i] if i == j else inf for i in range(n) for j in range(n)] + ).reshape(n, n) + Ce = make_CostMatrix(C, m, n) + # debug_print(f'Ce: {m} x {n}') + # debug_print(Ce.C) + # debug_print() + + maxcost_value = Cv.C.sum() + Ce.C.sum() + 1 + + if timeout is not None: + if timeout <= 0: + raise nx.NetworkXError("Timeout value must be greater than 0") + start = time.perf_counter() + + def prune(cost): + if timeout is not None: + if time.perf_counter() - start > timeout: + return True + if upper_bound is not None: + if cost > upper_bound: + return True + if cost > maxcost_value: + return True + if strictly_decreasing and cost >= maxcost_value: + return True + return False + + # Now go! + + done_uv = [] if roots is None else [roots] + + for vertex_path, edge_path, cost in get_edit_paths( + done_uv, pending_u, pending_v, Cv, [], pending_g, pending_h, Ce, initial_cost + ): + # assert sorted(G1.nodes) == sorted(u for u, v in vertex_path if u is not None) + # assert sorted(G2.nodes) == sorted(v for u, v in vertex_path if v is not None) + # assert sorted(G1.edges) == sorted(g for g, h in edge_path if g is not None) + # assert sorted(G2.edges) == sorted(h for g, h in edge_path if h is not None) + # print(vertex_path, edge_path, cost, file = sys.stderr) + # assert cost == maxcost_value + yield list(vertex_path), list(edge_path), cost + + +@nx._dispatch +def simrank_similarity( + G, + source=None, + target=None, + importance_factor=0.9, + max_iterations=1000, + tolerance=1e-4, +): + """Returns the SimRank similarity of nodes in the graph ``G``. + + SimRank is a similarity metric that says "two objects are considered + to be similar if they are referenced by similar objects." [1]_. + + The pseudo-code definition from the paper is:: + + def simrank(G, u, v): + in_neighbors_u = G.predecessors(u) + in_neighbors_v = G.predecessors(v) + scale = C / (len(in_neighbors_u) * len(in_neighbors_v)) + return scale * sum(simrank(G, w, x) + for w, x in product(in_neighbors_u, + in_neighbors_v)) + + where ``G`` is the graph, ``u`` is the source, ``v`` is the target, + and ``C`` is a float decay or importance factor between 0 and 1. + + The SimRank algorithm for determining node similarity is defined in + [2]_. + + Parameters + ---------- + G : NetworkX graph + A NetworkX graph + + source : node + If this is specified, the returned dictionary maps each node + ``v`` in the graph to the similarity between ``source`` and + ``v``. + + target : node + If both ``source`` and ``target`` are specified, the similarity + value between ``source`` and ``target`` is returned. If + ``target`` is specified but ``source`` is not, this argument is + ignored. + + importance_factor : float + The relative importance of indirect neighbors with respect to + direct neighbors. + + max_iterations : integer + Maximum number of iterations. + + tolerance : float + Error tolerance used to check convergence. When an iteration of + the algorithm finds that no similarity value changes more than + this amount, the algorithm halts. + + Returns + ------- + similarity : dictionary or float + If ``source`` and ``target`` are both ``None``, this returns a + dictionary of dictionaries, where keys are node pairs and value + are similarity of the pair of nodes. + + If ``source`` is not ``None`` but ``target`` is, this returns a + dictionary mapping node to the similarity of ``source`` and that + node. + + If neither ``source`` nor ``target`` is ``None``, this returns + the similarity value for the given pair of nodes. + + Examples + -------- + >>> G = nx.cycle_graph(2) + >>> nx.simrank_similarity(G) + {0: {0: 1.0, 1: 0.0}, 1: {0: 0.0, 1: 1.0}} + >>> nx.simrank_similarity(G, source=0) + {0: 1.0, 1: 0.0} + >>> nx.simrank_similarity(G, source=0, target=0) + 1.0 + + The result of this function can be converted to a numpy array + representing the SimRank matrix by using the node order of the + graph to determine which row and column represent each node. + Other ordering of nodes is also possible. + + >>> import numpy as np + >>> sim = nx.simrank_similarity(G) + >>> np.array([[sim[u][v] for v in G] for u in G]) + array([[1., 0.], + [0., 1.]]) + >>> sim_1d = nx.simrank_similarity(G, source=0) + >>> np.array([sim[0][v] for v in G]) + array([1., 0.]) + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/SimRank + .. [2] G. Jeh and J. Widom. + "SimRank: a measure of structural-context similarity", + In KDD'02: Proceedings of the Eighth ACM SIGKDD + International Conference on Knowledge Discovery and Data Mining, + pp. 538--543. ACM Press, 2002. + """ + import numpy as np + + nodelist = list(G) + s_indx = None if source is None else nodelist.index(source) + t_indx = None if target is None else nodelist.index(target) + + x = _simrank_similarity_numpy( + G, s_indx, t_indx, importance_factor, max_iterations, tolerance + ) + + if isinstance(x, np.ndarray): + if x.ndim == 1: + return dict(zip(G, x)) + # else x.ndim == 2 + return {u: dict(zip(G, row)) for u, row in zip(G, x)} + return x + + +def _simrank_similarity_python( + G, + source=None, + target=None, + importance_factor=0.9, + max_iterations=1000, + tolerance=1e-4, +): + """Returns the SimRank similarity of nodes in the graph ``G``. + + This pure Python version is provided for pedagogical purposes. + + Examples + -------- + >>> G = nx.cycle_graph(2) + >>> nx.similarity._simrank_similarity_python(G) + {0: {0: 1, 1: 0.0}, 1: {0: 0.0, 1: 1}} + >>> nx.similarity._simrank_similarity_python(G, source=0) + {0: 1, 1: 0.0} + >>> nx.similarity._simrank_similarity_python(G, source=0, target=0) + 1 + """ + # build up our similarity adjacency dictionary output + newsim = {u: {v: 1 if u == v else 0 for v in G} for u in G} + + # These functions compute the update to the similarity value of the nodes + # `u` and `v` with respect to the previous similarity values. + def avg_sim(s): + return sum(newsim[w][x] for (w, x) in s) / len(s) if s else 0.0 + + Gadj = G.pred if G.is_directed() else G.adj + + def sim(u, v): + return importance_factor * avg_sim(list(product(Gadj[u], Gadj[v]))) + + for its in range(max_iterations): + oldsim = newsim + newsim = {u: {v: sim(u, v) if u != v else 1 for v in G} for u in G} + is_close = all( + all( + abs(newsim[u][v] - old) <= tolerance * (1 + abs(old)) + for v, old in nbrs.items() + ) + for u, nbrs in oldsim.items() + ) + if is_close: + break + + if its + 1 == max_iterations: + raise nx.ExceededMaxIterations( + f"simrank did not converge after {max_iterations} iterations." + ) + + if source is not None and target is not None: + return newsim[source][target] + if source is not None: + return newsim[source] + return newsim + + +def _simrank_similarity_numpy( + G, + source=None, + target=None, + importance_factor=0.9, + max_iterations=1000, + tolerance=1e-4, +): + """Calculate SimRank of nodes in ``G`` using matrices with ``numpy``. + + The SimRank algorithm for determining node similarity is defined in + [1]_. + + Parameters + ---------- + G : NetworkX graph + A NetworkX graph + + source : node + If this is specified, the returned dictionary maps each node + ``v`` in the graph to the similarity between ``source`` and + ``v``. + + target : node + If both ``source`` and ``target`` are specified, the similarity + value between ``source`` and ``target`` is returned. If + ``target`` is specified but ``source`` is not, this argument is + ignored. + + importance_factor : float + The relative importance of indirect neighbors with respect to + direct neighbors. + + max_iterations : integer + Maximum number of iterations. + + tolerance : float + Error tolerance used to check convergence. When an iteration of + the algorithm finds that no similarity value changes more than + this amount, the algorithm halts. + + Returns + ------- + similarity : numpy array or float + If ``source`` and ``target`` are both ``None``, this returns a + 2D array containing SimRank scores of the nodes. + + If ``source`` is not ``None`` but ``target`` is, this returns an + 1D array containing SimRank scores of ``source`` and that + node. + + If neither ``source`` nor ``target`` is ``None``, this returns + the similarity value for the given pair of nodes. + + Examples + -------- + >>> G = nx.cycle_graph(2) + >>> nx.similarity._simrank_similarity_numpy(G) + array([[1., 0.], + [0., 1.]]) + >>> nx.similarity._simrank_similarity_numpy(G, source=0) + array([1., 0.]) + >>> nx.similarity._simrank_similarity_numpy(G, source=0, target=0) + 1.0 + + References + ---------- + .. [1] G. Jeh and J. Widom. + "SimRank: a measure of structural-context similarity", + In KDD'02: Proceedings of the Eighth ACM SIGKDD + International Conference on Knowledge Discovery and Data Mining, + pp. 538--543. ACM Press, 2002. + """ + # This algorithm follows roughly + # + # S = max{C * (A.T * S * A), I} + # + # where C is the importance factor, A is the column normalized + # adjacency matrix, and I is the identity matrix. + import numpy as np + + adjacency_matrix = nx.to_numpy_array(G) + + # column-normalize the ``adjacency_matrix`` + s = np.array(adjacency_matrix.sum(axis=0)) + s[s == 0] = 1 + adjacency_matrix /= s # adjacency_matrix.sum(axis=0) + + newsim = np.eye(len(G), dtype=np.float64) + for its in range(max_iterations): + prevsim = newsim.copy() + newsim = importance_factor * ((adjacency_matrix.T @ prevsim) @ adjacency_matrix) + np.fill_diagonal(newsim, 1.0) + + if np.allclose(prevsim, newsim, atol=tolerance): + break + + if its + 1 == max_iterations: + raise nx.ExceededMaxIterations( + f"simrank did not converge after {max_iterations} iterations." + ) + + if source is not None and target is not None: + return newsim[source, target] + if source is not None: + return newsim[source] + return newsim + + +@nx._dispatch(edge_attrs="weight") +def panther_similarity( + G, source, k=5, path_length=5, c=0.5, delta=0.1, eps=None, weight="weight" +): + r"""Returns the Panther similarity of nodes in the graph `G` to node ``v``. + + Panther is a similarity metric that says "two objects are considered + to be similar if they frequently appear on the same paths." [1]_. + + Parameters + ---------- + G : NetworkX graph + A NetworkX graph + source : node + Source node for which to find the top `k` similar other nodes + k : int (default = 5) + The number of most similar nodes to return + path_length : int (default = 5) + How long the randomly generated paths should be (``T`` in [1]_) + c : float (default = 0.5) + A universal positive constant used to scale the number + of sample random paths to generate. + delta : float (default = 0.1) + The probability that the similarity $S$ is not an epsilon-approximation to (R, phi), + where $R$ is the number of random paths and $\phi$ is the probability + that an element sampled from a set $A \subseteq D$, where $D$ is the domain. + eps : float or None (default = None) + The error bound. Per [1]_, a good value is ``sqrt(1/|E|)``. Therefore, + if no value is provided, the recommended computed value will be used. + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + + Returns + ------- + similarity : dictionary + Dictionary of nodes to similarity scores (as floats). Note: + the self-similarity (i.e., ``v``) will not be included in + the returned dictionary. + + Examples + -------- + >>> G = nx.star_graph(10) + >>> sim = nx.panther_similarity(G, 0) + + References + ---------- + .. [1] Zhang, J., Tang, J., Ma, C., Tong, H., Jing, Y., & Li, J. + Panther: Fast top-k similarity search on large networks. + In Proceedings of the ACM SIGKDD International Conference + on Knowledge Discovery and Data Mining (Vol. 2015-August, pp. 1445–1454). + Association for Computing Machinery. https://doi.org/10.1145/2783258.2783267. + """ + import numpy as np + + num_nodes = G.number_of_nodes() + if num_nodes < k: + warnings.warn( + f"Number of nodes is {num_nodes}, but requested k is {k}. " + "Setting k to number of nodes." + ) + k = num_nodes + # According to [1], they empirically determined + # a good value for ``eps`` to be sqrt( 1 / |E| ) + if eps is None: + eps = np.sqrt(1.0 / G.number_of_edges()) + + inv_node_map = {name: index for index, name in enumerate(G.nodes)} + node_map = np.array(G) + + # Calculate the sample size ``R`` for how many paths + # to randomly generate + t_choose_2 = math.comb(path_length, 2) + sample_size = int((c / eps**2) * (np.log2(t_choose_2) + 1 + np.log(1 / delta))) + index_map = {} + _ = list( + generate_random_paths( + G, sample_size, path_length=path_length, index_map=index_map, weight=weight + ) + ) + S = np.zeros(num_nodes) + + inv_sample_size = 1 / sample_size + + source_paths = set(index_map[source]) + + # Calculate the path similarities + # between ``source`` (v) and ``node`` (v_j) + # using our inverted index mapping of + # vertices to paths + for node, paths in index_map.items(): + # Only consider paths where both + # ``node`` and ``source`` are present + common_paths = source_paths.intersection(paths) + S[inv_node_map[node]] = len(common_paths) * inv_sample_size + + # Retrieve top ``k`` similar + # Note: the below performed anywhere from 4-10x faster + # (depending on input sizes) vs the equivalent ``np.argsort(S)[::-1]`` + top_k_unsorted = np.argpartition(S, -k)[-k:] + top_k_sorted = top_k_unsorted[np.argsort(S[top_k_unsorted])][::-1] + + # Add back the similarity scores + top_k_sorted_names = (node_map[n] for n in top_k_sorted) + top_k_with_val = dict(zip(top_k_sorted_names, S[top_k_sorted])) + + # Remove the self-similarity + top_k_with_val.pop(source, None) + return top_k_with_val + + +@nx._dispatch(edge_attrs="weight") +def generate_random_paths( + G, sample_size, path_length=5, index_map=None, weight="weight" +): + """Randomly generate `sample_size` paths of length `path_length`. + + Parameters + ---------- + G : NetworkX graph + A NetworkX graph + sample_size : integer + The number of paths to generate. This is ``R`` in [1]_. + path_length : integer (default = 5) + The maximum size of the path to randomly generate. + This is ``T`` in [1]_. According to the paper, ``T >= 5`` is + recommended. + index_map : dictionary, optional + If provided, this will be populated with the inverted + index of nodes mapped to the set of generated random path + indices within ``paths``. + weight : string or None, optional (default="weight") + The name of an edge attribute that holds the numerical value + used as a weight. If None then each edge has weight 1. + + Returns + ------- + paths : generator of lists + Generator of `sample_size` paths each with length `path_length`. + + Examples + -------- + Note that the return value is the list of paths: + + >>> G = nx.star_graph(3) + >>> random_path = nx.generate_random_paths(G, 2) + + By passing a dictionary into `index_map`, it will build an + inverted index mapping of nodes to the paths in which that node is present: + + >>> G = nx.star_graph(3) + >>> index_map = {} + >>> random_path = nx.generate_random_paths(G, 3, index_map=index_map) + >>> paths_containing_node_0 = [random_path[path_idx] for path_idx in index_map.get(0, [])] + + References + ---------- + .. [1] Zhang, J., Tang, J., Ma, C., Tong, H., Jing, Y., & Li, J. + Panther: Fast top-k similarity search on large networks. + In Proceedings of the ACM SIGKDD International Conference + on Knowledge Discovery and Data Mining (Vol. 2015-August, pp. 1445–1454). + Association for Computing Machinery. https://doi.org/10.1145/2783258.2783267. + """ + import numpy as np + + # Calculate transition probabilities between + # every pair of vertices according to Eq. (3) + adj_mat = nx.to_numpy_array(G, weight=weight) + inv_row_sums = np.reciprocal(adj_mat.sum(axis=1)).reshape(-1, 1) + transition_probabilities = adj_mat * inv_row_sums + + node_map = np.array(G) + num_nodes = G.number_of_nodes() + + for path_index in range(sample_size): + # Sample current vertex v = v_i uniformly at random + node_index = np.random.randint(0, high=num_nodes) + node = node_map[node_index] + + # Add v into p_r and add p_r into the path set + # of v, i.e., P_v + path = [node] + + # Build the inverted index (P_v) of vertices to paths + if index_map is not None: + if node in index_map: + index_map[node].add(path_index) + else: + index_map[node] = {path_index} + + starting_index = node_index + for _ in range(path_length): + # Randomly sample a neighbor (v_j) according + # to transition probabilities from ``node`` (v) to its neighbors + neighbor_index = np.random.choice( + num_nodes, p=transition_probabilities[starting_index] + ) + + # Set current vertex (v = v_j) + starting_index = neighbor_index + + # Add v into p_r + neighbor_node = node_map[neighbor_index] + path.append(neighbor_node) + + # Add p_r into P_v + if index_map is not None: + if neighbor_node in index_map: + index_map[neighbor_node].add(path_index) + else: + index_map[neighbor_node] = {path_index} + + yield path diff --git a/MLPY/Lib/site-packages/networkx/algorithms/simple_paths.py b/MLPY/Lib/site-packages/networkx/algorithms/simple_paths.py new file mode 100644 index 0000000000000000000000000000000000000000..cb288a67c3bf2e073236cae78b02c0e9d266baae --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/simple_paths.py @@ -0,0 +1,978 @@ +from heapq import heappop, heappush +from itertools import count + +import networkx as nx +from networkx.algorithms.shortest_paths.weighted import _weight_function +from networkx.utils import not_implemented_for, pairwise + +__all__ = [ + "all_simple_paths", + "is_simple_path", + "shortest_simple_paths", + "all_simple_edge_paths", +] + + +@nx._dispatch +def is_simple_path(G, nodes): + """Returns True if and only if `nodes` form a simple path in `G`. + + A *simple path* in a graph is a nonempty sequence of nodes in which + no node appears more than once in the sequence, and each adjacent + pair of nodes in the sequence is adjacent in the graph. + + Parameters + ---------- + G : graph + A NetworkX graph. + nodes : list + A list of one or more nodes in the graph `G`. + + Returns + ------- + bool + Whether the given list of nodes represents a simple path in `G`. + + Notes + ----- + An empty list of nodes is not a path but a list of one node is a + path. Here's an explanation why. + + This function operates on *node paths*. One could also consider + *edge paths*. There is a bijection between node paths and edge + paths. + + The *length of a path* is the number of edges in the path, so a list + of nodes of length *n* corresponds to a path of length *n* - 1. + Thus the smallest edge path would be a list of zero edges, the empty + path. This corresponds to a list of one node. + + To convert between a node path and an edge path, you can use code + like the following:: + + >>> from networkx.utils import pairwise + >>> nodes = [0, 1, 2, 3] + >>> edges = list(pairwise(nodes)) + >>> edges + [(0, 1), (1, 2), (2, 3)] + >>> nodes = [edges[0][0]] + [v for u, v in edges] + >>> nodes + [0, 1, 2, 3] + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> nx.is_simple_path(G, [2, 3, 0]) + True + >>> nx.is_simple_path(G, [0, 2]) + False + + """ + # The empty list is not a valid path. Could also return + # NetworkXPointlessConcept here. + if len(nodes) == 0: + return False + + # If the list is a single node, just check that the node is actually + # in the graph. + if len(nodes) == 1: + return nodes[0] in G + + # check that all nodes in the list are in the graph, if at least one + # is not in the graph, then this is not a simple path + if not all(n in G for n in nodes): + return False + + # If the list contains repeated nodes, then it's not a simple path + if len(set(nodes)) != len(nodes): + return False + + # Test that each adjacent pair of nodes is adjacent. + return all(v in G[u] for u, v in pairwise(nodes)) + + +@nx._dispatch +def all_simple_paths(G, source, target, cutoff=None): + """Generate all simple paths in the graph G from source to target. + + A simple path is a path with no repeated nodes. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : nodes + Single node or iterable of nodes at which to end path + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + Returns + ------- + path_generator: generator + A generator that produces lists of simple paths. If there are no paths + between the source and target within the given cutoff the generator + produces no output. If it is possible to traverse the same sequence of + nodes in multiple ways, namely through parallel edges, then it will be + returned multiple times (once for each viable edge combination). + + Examples + -------- + This iterator generates lists of nodes:: + + >>> G = nx.complete_graph(4) + >>> for path in nx.all_simple_paths(G, source=0, target=3): + ... print(path) + ... + [0, 1, 2, 3] + [0, 1, 3] + [0, 2, 1, 3] + [0, 2, 3] + [0, 3] + + You can generate only those paths that are shorter than a certain + length by using the `cutoff` keyword argument:: + + >>> paths = nx.all_simple_paths(G, source=0, target=3, cutoff=2) + >>> print(list(paths)) + [[0, 1, 3], [0, 2, 3], [0, 3]] + + To get each path as the corresponding list of edges, you can use the + :func:`networkx.utils.pairwise` helper function:: + + >>> paths = nx.all_simple_paths(G, source=0, target=3) + >>> for path in map(nx.utils.pairwise, paths): + ... print(list(path)) + [(0, 1), (1, 2), (2, 3)] + [(0, 1), (1, 3)] + [(0, 2), (2, 1), (1, 3)] + [(0, 2), (2, 3)] + [(0, 3)] + + Pass an iterable of nodes as target to generate all paths ending in any of several nodes:: + + >>> G = nx.complete_graph(4) + >>> for path in nx.all_simple_paths(G, source=0, target=[3, 2]): + ... print(path) + ... + [0, 1, 2] + [0, 1, 2, 3] + [0, 1, 3] + [0, 1, 3, 2] + [0, 2] + [0, 2, 1, 3] + [0, 2, 3] + [0, 3] + [0, 3, 1, 2] + [0, 3, 2] + + Iterate over each path from the root nodes to the leaf nodes in a + directed acyclic graph using a functional programming approach:: + + >>> from itertools import chain + >>> from itertools import product + >>> from itertools import starmap + >>> from functools import partial + >>> + >>> chaini = chain.from_iterable + >>> + >>> G = nx.DiGraph([(0, 1), (1, 2), (0, 3), (3, 2)]) + >>> roots = (v for v, d in G.in_degree() if d == 0) + >>> leaves = (v for v, d in G.out_degree() if d == 0) + >>> all_paths = partial(nx.all_simple_paths, G) + >>> list(chaini(starmap(all_paths, product(roots, leaves)))) + [[0, 1, 2], [0, 3, 2]] + + The same list computed using an iterative approach:: + + >>> G = nx.DiGraph([(0, 1), (1, 2), (0, 3), (3, 2)]) + >>> roots = (v for v, d in G.in_degree() if d == 0) + >>> leaves = (v for v, d in G.out_degree() if d == 0) + >>> all_paths = [] + >>> for root in roots: + ... for leaf in leaves: + ... paths = nx.all_simple_paths(G, root, leaf) + ... all_paths.extend(paths) + >>> all_paths + [[0, 1, 2], [0, 3, 2]] + + Iterate over each path from the root nodes to the leaf nodes in a + directed acyclic graph passing all leaves together to avoid unnecessary + compute:: + + >>> G = nx.DiGraph([(0, 1), (2, 1), (1, 3), (1, 4)]) + >>> roots = (v for v, d in G.in_degree() if d == 0) + >>> leaves = [v for v, d in G.out_degree() if d == 0] + >>> all_paths = [] + >>> for root in roots: + ... paths = nx.all_simple_paths(G, root, leaves) + ... all_paths.extend(paths) + >>> all_paths + [[0, 1, 3], [0, 1, 4], [2, 1, 3], [2, 1, 4]] + + If parallel edges offer multiple ways to traverse a given sequence of + nodes, this sequence of nodes will be returned multiple times: + + >>> G = nx.MultiDiGraph([(0, 1), (0, 1), (1, 2)]) + >>> list(nx.all_simple_paths(G, 0, 2)) + [[0, 1, 2], [0, 1, 2]] + + Notes + ----- + This algorithm uses a modified depth-first search to generate the + paths [1]_. A single path can be found in $O(V+E)$ time but the + number of simple paths in a graph can be very large, e.g. $O(n!)$ in + the complete graph of order $n$. + + This function does not check that a path exists between `source` and + `target`. For large graphs, this may result in very long runtimes. + Consider using `has_path` to check that a path exists between `source` and + `target` before calling this function on large graphs. + + References + ---------- + .. [1] R. Sedgewick, "Algorithms in C, Part 5: Graph Algorithms", + Addison Wesley Professional, 3rd ed., 2001. + + See Also + -------- + all_shortest_paths, shortest_path, has_path + + """ + if source not in G: + raise nx.NodeNotFound(f"source node {source} not in graph") + if target in G: + targets = {target} + else: + try: + targets = set(target) + except TypeError as err: + raise nx.NodeNotFound(f"target node {target} not in graph") from err + if source in targets: + return _empty_generator() + if cutoff is None: + cutoff = len(G) - 1 + if cutoff < 1: + return _empty_generator() + if G.is_multigraph(): + return _all_simple_paths_multigraph(G, source, targets, cutoff) + else: + return _all_simple_paths_graph(G, source, targets, cutoff) + + +def _empty_generator(): + yield from () + + +def _all_simple_paths_graph(G, source, targets, cutoff): + visited = {source: True} + stack = [iter(G[source])] + while stack: + children = stack[-1] + child = next(children, None) + if child is None: + stack.pop() + visited.popitem() + elif len(visited) < cutoff: + if child in visited: + continue + if child in targets: + yield list(visited) + [child] + visited[child] = True + if targets - set(visited.keys()): # expand stack until find all targets + stack.append(iter(G[child])) + else: + visited.popitem() # maybe other ways to child + else: # len(visited) == cutoff: + for target in (targets & (set(children) | {child})) - set(visited.keys()): + yield list(visited) + [target] + stack.pop() + visited.popitem() + + +def _all_simple_paths_multigraph(G, source, targets, cutoff): + visited = {source: True} + stack = [(v for u, v in G.edges(source))] + while stack: + children = stack[-1] + child = next(children, None) + if child is None: + stack.pop() + visited.popitem() + elif len(visited) < cutoff: + if child in visited: + continue + if child in targets: + yield list(visited) + [child] + visited[child] = True + if targets - set(visited.keys()): + stack.append((v for u, v in G.edges(child))) + else: + visited.popitem() + else: # len(visited) == cutoff: + for target in targets - set(visited.keys()): + count = ([child] + list(children)).count(target) + for i in range(count): + yield list(visited) + [target] + stack.pop() + visited.popitem() + + +@nx._dispatch +def all_simple_edge_paths(G, source, target, cutoff=None): + """Generate lists of edges for all simple paths in G from source to target. + + A simple path is a path with no repeated nodes. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : nodes + Single node or iterable of nodes at which to end path + + cutoff : integer, optional + Depth to stop the search. Only paths of length <= cutoff are returned. + + Returns + ------- + path_generator: generator + A generator that produces lists of simple paths. If there are no paths + between the source and target within the given cutoff the generator + produces no output. + For multigraphs, the list of edges have elements of the form `(u,v,k)`. + Where `k` corresponds to the edge key. + + Examples + -------- + + Print the simple path edges of a Graph:: + + >>> g = nx.Graph([(1, 2), (2, 4), (1, 3), (3, 4)]) + >>> for path in sorted(nx.all_simple_edge_paths(g, 1, 4)): + ... print(path) + [(1, 2), (2, 4)] + [(1, 3), (3, 4)] + + Print the simple path edges of a MultiGraph. Returned edges come with + their associated keys:: + + >>> mg = nx.MultiGraph() + >>> mg.add_edge(1, 2, key="k0") + 'k0' + >>> mg.add_edge(1, 2, key="k1") + 'k1' + >>> mg.add_edge(2, 3, key="k0") + 'k0' + >>> for path in sorted(nx.all_simple_edge_paths(mg, 1, 3)): + ... print(path) + [(1, 2, 'k0'), (2, 3, 'k0')] + [(1, 2, 'k1'), (2, 3, 'k0')] + + + Notes + ----- + This algorithm uses a modified depth-first search to generate the + paths [1]_. A single path can be found in $O(V+E)$ time but the + number of simple paths in a graph can be very large, e.g. $O(n!)$ in + the complete graph of order $n$. + + References + ---------- + .. [1] R. Sedgewick, "Algorithms in C, Part 5: Graph Algorithms", + Addison Wesley Professional, 3rd ed., 2001. + + See Also + -------- + all_shortest_paths, shortest_path, all_simple_paths + + """ + if source not in G: + raise nx.NodeNotFound("source node %s not in graph" % source) + if target in G: + targets = {target} + else: + try: + targets = set(target) + except TypeError: + raise nx.NodeNotFound("target node %s not in graph" % target) + if source in targets: + return [] + if cutoff is None: + cutoff = len(G) - 1 + if cutoff < 1: + return [] + if G.is_multigraph(): + for simp_path in _all_simple_edge_paths_multigraph(G, source, targets, cutoff): + yield simp_path + else: + for simp_path in _all_simple_paths_graph(G, source, targets, cutoff): + yield list(zip(simp_path[:-1], simp_path[1:])) + + +def _all_simple_edge_paths_multigraph(G, source, targets, cutoff): + if not cutoff or cutoff < 1: + return [] + visited = [source] + stack = [iter(G.edges(source, keys=True))] + + while stack: + children = stack[-1] + child = next(children, None) + if child is None: + stack.pop() + visited.pop() + elif len(visited) < cutoff: + if child[1] in targets: + yield visited[1:] + [child] + elif child[1] not in [v[0] for v in visited[1:]]: + visited.append(child) + stack.append(iter(G.edges(child[1], keys=True))) + else: # len(visited) == cutoff: + for u, v, k in [child] + list(children): + if v in targets: + yield visited[1:] + [(u, v, k)] + stack.pop() + visited.pop() + + +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="weight") +def shortest_simple_paths(G, source, target, weight=None): + """Generate all simple paths in the graph G from source to target, + starting from shortest ones. + + A simple path is a path with no repeated nodes. + + If a weighted shortest path search is to be used, no negative weights + are allowed. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for path + + target : node + Ending node for path + + weight : string or function + If it is a string, it is the name of the edge attribute to be + used as a weight. + + If it is a function, the weight of an edge is the value returned + by the function. The function must accept exactly three positional + arguments: the two endpoints of an edge and the dictionary of edge + attributes for that edge. The function must return a number. + + If None all edges are considered to have unit weight. Default + value None. + + Returns + ------- + path_generator: generator + A generator that produces lists of simple paths, in order from + shortest to longest. + + Raises + ------ + NetworkXNoPath + If no path exists between source and target. + + NetworkXError + If source or target nodes are not in the input graph. + + NetworkXNotImplemented + If the input graph is a Multi[Di]Graph. + + Examples + -------- + + >>> G = nx.cycle_graph(7) + >>> paths = list(nx.shortest_simple_paths(G, 0, 3)) + >>> print(paths) + [[0, 1, 2, 3], [0, 6, 5, 4, 3]] + + You can use this function to efficiently compute the k shortest/best + paths between two nodes. + + >>> from itertools import islice + >>> def k_shortest_paths(G, source, target, k, weight=None): + ... return list( + ... islice(nx.shortest_simple_paths(G, source, target, weight=weight), k) + ... ) + >>> for path in k_shortest_paths(G, 0, 3, 2): + ... print(path) + [0, 1, 2, 3] + [0, 6, 5, 4, 3] + + Notes + ----- + This procedure is based on algorithm by Jin Y. Yen [1]_. Finding + the first $K$ paths requires $O(KN^3)$ operations. + + See Also + -------- + all_shortest_paths + shortest_path + all_simple_paths + + References + ---------- + .. [1] Jin Y. Yen, "Finding the K Shortest Loopless Paths in a + Network", Management Science, Vol. 17, No. 11, Theory Series + (Jul., 1971), pp. 712-716. + + """ + if source not in G: + raise nx.NodeNotFound(f"source node {source} not in graph") + + if target not in G: + raise nx.NodeNotFound(f"target node {target} not in graph") + + if weight is None: + length_func = len + shortest_path_func = _bidirectional_shortest_path + else: + wt = _weight_function(G, weight) + + def length_func(path): + return sum( + wt(u, v, G.get_edge_data(u, v)) for (u, v) in zip(path, path[1:]) + ) + + shortest_path_func = _bidirectional_dijkstra + + listA = [] + listB = PathBuffer() + prev_path = None + while True: + if not prev_path: + length, path = shortest_path_func(G, source, target, weight=weight) + listB.push(length, path) + else: + ignore_nodes = set() + ignore_edges = set() + for i in range(1, len(prev_path)): + root = prev_path[:i] + root_length = length_func(root) + for path in listA: + if path[:i] == root: + ignore_edges.add((path[i - 1], path[i])) + try: + length, spur = shortest_path_func( + G, + root[-1], + target, + ignore_nodes=ignore_nodes, + ignore_edges=ignore_edges, + weight=weight, + ) + path = root[:-1] + spur + listB.push(root_length + length, path) + except nx.NetworkXNoPath: + pass + ignore_nodes.add(root[-1]) + + if listB: + path = listB.pop() + yield path + listA.append(path) + prev_path = path + else: + break + + +class PathBuffer: + def __init__(self): + self.paths = set() + self.sortedpaths = [] + self.counter = count() + + def __len__(self): + return len(self.sortedpaths) + + def push(self, cost, path): + hashable_path = tuple(path) + if hashable_path not in self.paths: + heappush(self.sortedpaths, (cost, next(self.counter), path)) + self.paths.add(hashable_path) + + def pop(self): + (cost, num, path) = heappop(self.sortedpaths) + hashable_path = tuple(path) + self.paths.remove(hashable_path) + return path + + +def _bidirectional_shortest_path( + G, source, target, ignore_nodes=None, ignore_edges=None, weight=None +): + """Returns the shortest path between source and target ignoring + nodes and edges in the containers ignore_nodes and ignore_edges. + + This is a custom modification of the standard bidirectional shortest + path implementation at networkx.algorithms.unweighted + + Parameters + ---------- + G : NetworkX graph + + source : node + starting node for path + + target : node + ending node for path + + ignore_nodes : container of nodes + nodes to ignore, optional + + ignore_edges : container of edges + edges to ignore, optional + + weight : None + This function accepts a weight argument for convenience of + shortest_simple_paths function. It will be ignored. + + Returns + ------- + path: list + List of nodes in a path from source to target. + + Raises + ------ + NetworkXNoPath + If no path exists between source and target. + + See Also + -------- + shortest_path + + """ + # call helper to do the real work + results = _bidirectional_pred_succ(G, source, target, ignore_nodes, ignore_edges) + pred, succ, w = results + + # build path from pred+w+succ + path = [] + # from w to target + while w is not None: + path.append(w) + w = succ[w] + # from source to w + w = pred[path[0]] + while w is not None: + path.insert(0, w) + w = pred[w] + + return len(path), path + + +def _bidirectional_pred_succ(G, source, target, ignore_nodes=None, ignore_edges=None): + """Bidirectional shortest path helper. + Returns (pred,succ,w) where + pred is a dictionary of predecessors from w to the source, and + succ is a dictionary of successors from w to the target. + """ + # does BFS from both source and target and meets in the middle + if ignore_nodes and (source in ignore_nodes or target in ignore_nodes): + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") + if target == source: + return ({target: None}, {source: None}, source) + + # handle either directed or undirected + if G.is_directed(): + Gpred = G.predecessors + Gsucc = G.successors + else: + Gpred = G.neighbors + Gsucc = G.neighbors + + # support optional nodes filter + if ignore_nodes: + + def filter_iter(nodes): + def iterate(v): + for w in nodes(v): + if w not in ignore_nodes: + yield w + + return iterate + + Gpred = filter_iter(Gpred) + Gsucc = filter_iter(Gsucc) + + # support optional edges filter + if ignore_edges: + if G.is_directed(): + + def filter_pred_iter(pred_iter): + def iterate(v): + for w in pred_iter(v): + if (w, v) not in ignore_edges: + yield w + + return iterate + + def filter_succ_iter(succ_iter): + def iterate(v): + for w in succ_iter(v): + if (v, w) not in ignore_edges: + yield w + + return iterate + + Gpred = filter_pred_iter(Gpred) + Gsucc = filter_succ_iter(Gsucc) + + else: + + def filter_iter(nodes): + def iterate(v): + for w in nodes(v): + if (v, w) not in ignore_edges and (w, v) not in ignore_edges: + yield w + + return iterate + + Gpred = filter_iter(Gpred) + Gsucc = filter_iter(Gsucc) + + # predecessor and successors in search + pred = {source: None} + succ = {target: None} + + # initialize fringes, start with forward + forward_fringe = [source] + reverse_fringe = [target] + + while forward_fringe and reverse_fringe: + if len(forward_fringe) <= len(reverse_fringe): + this_level = forward_fringe + forward_fringe = [] + for v in this_level: + for w in Gsucc(v): + if w not in pred: + forward_fringe.append(w) + pred[w] = v + if w in succ: + # found path + return pred, succ, w + else: + this_level = reverse_fringe + reverse_fringe = [] + for v in this_level: + for w in Gpred(v): + if w not in succ: + succ[w] = v + reverse_fringe.append(w) + if w in pred: + # found path + return pred, succ, w + + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") + + +def _bidirectional_dijkstra( + G, source, target, weight="weight", ignore_nodes=None, ignore_edges=None +): + """Dijkstra's algorithm for shortest paths using bidirectional search. + + This function returns the shortest path between source and target + ignoring nodes and edges in the containers ignore_nodes and + ignore_edges. + + This is a custom modification of the standard Dijkstra bidirectional + shortest path implementation at networkx.algorithms.weighted + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node. + + target : node + Ending node. + + weight: string, function, optional (default='weight') + Edge data key or weight function corresponding to the edge weight + + ignore_nodes : container of nodes + nodes to ignore, optional + + ignore_edges : container of edges + edges to ignore, optional + + Returns + ------- + length : number + Shortest path length. + + Returns a tuple of two dictionaries keyed by node. + The first dictionary stores distance from the source. + The second stores the path from the source to that node. + + Raises + ------ + NetworkXNoPath + If no path exists between source and target. + + Notes + ----- + Edge weight attributes must be numerical. + Distances are calculated as sums of weighted edges traversed. + + In practice bidirectional Dijkstra is much more than twice as fast as + ordinary Dijkstra. + + Ordinary Dijkstra expands nodes in a sphere-like manner from the + source. The radius of this sphere will eventually be the length + of the shortest path. Bidirectional Dijkstra will expand nodes + from both the source and the target, making two spheres of half + this radius. Volume of the first sphere is pi*r*r while the + others are 2*pi*r/2*r/2, making up half the volume. + + This algorithm is not guaranteed to work if edge weights + are negative or are floating point numbers + (overflows and roundoff errors can cause problems). + + See Also + -------- + shortest_path + shortest_path_length + """ + if ignore_nodes and (source in ignore_nodes or target in ignore_nodes): + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") + if source == target: + if source not in G: + raise nx.NodeNotFound(f"Node {source} not in graph") + return (0, [source]) + + # handle either directed or undirected + if G.is_directed(): + Gpred = G.predecessors + Gsucc = G.successors + else: + Gpred = G.neighbors + Gsucc = G.neighbors + + # support optional nodes filter + if ignore_nodes: + + def filter_iter(nodes): + def iterate(v): + for w in nodes(v): + if w not in ignore_nodes: + yield w + + return iterate + + Gpred = filter_iter(Gpred) + Gsucc = filter_iter(Gsucc) + + # support optional edges filter + if ignore_edges: + if G.is_directed(): + + def filter_pred_iter(pred_iter): + def iterate(v): + for w in pred_iter(v): + if (w, v) not in ignore_edges: + yield w + + return iterate + + def filter_succ_iter(succ_iter): + def iterate(v): + for w in succ_iter(v): + if (v, w) not in ignore_edges: + yield w + + return iterate + + Gpred = filter_pred_iter(Gpred) + Gsucc = filter_succ_iter(Gsucc) + + else: + + def filter_iter(nodes): + def iterate(v): + for w in nodes(v): + if (v, w) not in ignore_edges and (w, v) not in ignore_edges: + yield w + + return iterate + + Gpred = filter_iter(Gpred) + Gsucc = filter_iter(Gsucc) + + push = heappush + pop = heappop + # Init: Forward Backward + dists = [{}, {}] # dictionary of final distances + paths = [{source: [source]}, {target: [target]}] # dictionary of paths + fringe = [[], []] # heap of (distance, node) tuples for + # extracting next node to expand + seen = [{source: 0}, {target: 0}] # dictionary of distances to + # nodes seen + c = count() + # initialize fringe heap + push(fringe[0], (0, next(c), source)) + push(fringe[1], (0, next(c), target)) + # neighs for extracting correct neighbor information + neighs = [Gsucc, Gpred] + # variables to hold shortest discovered path + # finaldist = 1e30000 + finalpath = [] + dir = 1 + while fringe[0] and fringe[1]: + # choose direction + # dir == 0 is forward direction and dir == 1 is back + dir = 1 - dir + # extract closest to expand + (dist, _, v) = pop(fringe[dir]) + if v in dists[dir]: + # Shortest path to v has already been found + continue + # update distance + dists[dir][v] = dist # equal to seen[dir][v] + if v in dists[1 - dir]: + # if we have scanned v in both directions we are done + # we have now discovered the shortest path + return (finaldist, finalpath) + + wt = _weight_function(G, weight) + for w in neighs[dir](v): + if dir == 0: # forward + minweight = wt(v, w, G.get_edge_data(v, w)) + vwLength = dists[dir][v] + minweight + else: # back, must remember to change v,w->w,v + minweight = wt(w, v, G.get_edge_data(w, v)) + vwLength = dists[dir][v] + minweight + + if w in dists[dir]: + if vwLength < dists[dir][w]: + raise ValueError("Contradictory paths found: negative weights?") + elif w not in seen[dir] or vwLength < seen[dir][w]: + # relaxing + seen[dir][w] = vwLength + push(fringe[dir], (vwLength, next(c), w)) + paths[dir][w] = paths[dir][v] + [w] + if w in seen[0] and w in seen[1]: + # see if this path is better than the already + # discovered shortest path + totaldist = seen[0][w] + seen[1][w] + if finalpath == [] or finaldist > totaldist: + finaldist = totaldist + revpath = paths[1][w][:] + revpath.reverse() + finalpath = paths[0][w] + revpath[1:] + raise nx.NetworkXNoPath(f"No path between {source} and {target}.") diff --git a/MLPY/Lib/site-packages/networkx/algorithms/smallworld.py b/MLPY/Lib/site-packages/networkx/algorithms/smallworld.py new file mode 100644 index 0000000000000000000000000000000000000000..172c4f9a879a62b5657f8b347dd1d4d6c6cc295b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/smallworld.py @@ -0,0 +1,403 @@ +"""Functions for estimating the small-world-ness of graphs. + +A small world network is characterized by a small average shortest path length, +and a large clustering coefficient. + +Small-worldness is commonly measured with the coefficient sigma or omega. + +Both coefficients compare the average clustering coefficient and shortest path +length of a given graph against the same quantities for an equivalent random +or lattice graph. + +For more information, see the Wikipedia article on small-world network [1]_. + +.. [1] Small-world network:: https://en.wikipedia.org/wiki/Small-world_network + +""" +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = ["random_reference", "lattice_reference", "sigma", "omega"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@py_random_state(3) +@nx._dispatch +def random_reference(G, niter=1, connectivity=True, seed=None): + """Compute a random graph by swapping edges of a given graph. + + Parameters + ---------- + G : graph + An undirected graph with 4 or more nodes. + + niter : integer (optional, default=1) + An edge is rewired approximately `niter` times. + + connectivity : boolean (optional, default=True) + When True, ensure connectivity for the randomized graph. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : graph + The randomized graph. + + Raises + ------ + NetworkXError + If there are fewer than 4 nodes or 2 edges in `G` + + Notes + ----- + The implementation is adapted from the algorithm by Maslov and Sneppen + (2002) [1]_. + + References + ---------- + .. [1] Maslov, Sergei, and Kim Sneppen. + "Specificity and stability in topology of protein networks." + Science 296.5569 (2002): 910-913. + """ + if len(G) < 4: + raise nx.NetworkXError("Graph has fewer than four nodes.") + if len(G.edges) < 2: + raise nx.NetworkXError("Graph has fewer that 2 edges") + + from networkx.utils import cumulative_distribution, discrete_sequence + + local_conn = nx.connectivity.local_edge_connectivity + + G = G.copy() + keys, degrees = zip(*G.degree()) # keys, degree + cdf = cumulative_distribution(degrees) # cdf of degree + nnodes = len(G) + nedges = nx.number_of_edges(G) + niter = niter * nedges + ntries = int(nnodes * nedges / (nnodes * (nnodes - 1) / 2)) + swapcount = 0 + + for i in range(niter): + n = 0 + while n < ntries: + # pick two random edges without creating edge list + # choose source node indices from discrete distribution + (ai, ci) = discrete_sequence(2, cdistribution=cdf, seed=seed) + if ai == ci: + continue # same source, skip + a = keys[ai] # convert index to label + c = keys[ci] + # choose target uniformly from neighbors + b = seed.choice(list(G.neighbors(a))) + d = seed.choice(list(G.neighbors(c))) + if b in [a, c, d] or d in [a, b, c]: + continue # all vertices should be different + + # don't create parallel edges + if (d not in G[a]) and (b not in G[c]): + G.add_edge(a, d) + G.add_edge(c, b) + G.remove_edge(a, b) + G.remove_edge(c, d) + + # Check if the graph is still connected + if connectivity and local_conn(G, a, b) == 0: + # Not connected, revert the swap + G.remove_edge(a, d) + G.remove_edge(c, b) + G.add_edge(a, b) + G.add_edge(c, d) + else: + swapcount += 1 + break + n += 1 + return G + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@py_random_state(4) +@nx._dispatch +def lattice_reference(G, niter=5, D=None, connectivity=True, seed=None): + """Latticize the given graph by swapping edges. + + Parameters + ---------- + G : graph + An undirected graph. + + niter : integer (optional, default=1) + An edge is rewired approximately niter times. + + D : numpy.array (optional, default=None) + Distance to the diagonal matrix. + + connectivity : boolean (optional, default=True) + Ensure connectivity for the latticized graph when set to True. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : graph + The latticized graph. + + Raises + ------ + NetworkXError + If there are fewer than 4 nodes or 2 edges in `G` + + Notes + ----- + The implementation is adapted from the algorithm by Sporns et al. [1]_. + which is inspired from the original work by Maslov and Sneppen(2002) [2]_. + + References + ---------- + .. [1] Sporns, Olaf, and Jonathan D. Zwi. + "The small world of the cerebral cortex." + Neuroinformatics 2.2 (2004): 145-162. + .. [2] Maslov, Sergei, and Kim Sneppen. + "Specificity and stability in topology of protein networks." + Science 296.5569 (2002): 910-913. + """ + import numpy as np + + from networkx.utils import cumulative_distribution, discrete_sequence + + local_conn = nx.connectivity.local_edge_connectivity + + if len(G) < 4: + raise nx.NetworkXError("Graph has fewer than four nodes.") + if len(G.edges) < 2: + raise nx.NetworkXError("Graph has fewer that 2 edges") + # Instead of choosing uniformly at random from a generated edge list, + # this algorithm chooses nonuniformly from the set of nodes with + # probability weighted by degree. + G = G.copy() + keys, degrees = zip(*G.degree()) # keys, degree + cdf = cumulative_distribution(degrees) # cdf of degree + + nnodes = len(G) + nedges = nx.number_of_edges(G) + if D is None: + D = np.zeros((nnodes, nnodes)) + un = np.arange(1, nnodes) + um = np.arange(nnodes - 1, 0, -1) + u = np.append((0,), np.where(un < um, un, um)) + + for v in range(int(np.ceil(nnodes / 2))): + D[nnodes - v - 1, :] = np.append(u[v + 1 :], u[: v + 1]) + D[v, :] = D[nnodes - v - 1, :][::-1] + + niter = niter * nedges + # maximal number of rewiring attempts per 'niter' + max_attempts = int(nnodes * nedges / (nnodes * (nnodes - 1) / 2)) + + for _ in range(niter): + n = 0 + while n < max_attempts: + # pick two random edges without creating edge list + # choose source node indices from discrete distribution + (ai, ci) = discrete_sequence(2, cdistribution=cdf, seed=seed) + if ai == ci: + continue # same source, skip + a = keys[ai] # convert index to label + c = keys[ci] + # choose target uniformly from neighbors + b = seed.choice(list(G.neighbors(a))) + d = seed.choice(list(G.neighbors(c))) + bi = keys.index(b) + di = keys.index(d) + + if b in [a, c, d] or d in [a, b, c]: + continue # all vertices should be different + + # don't create parallel edges + if (d not in G[a]) and (b not in G[c]): + if D[ai, bi] + D[ci, di] >= D[ai, ci] + D[bi, di]: + # only swap if we get closer to the diagonal + G.add_edge(a, d) + G.add_edge(c, b) + G.remove_edge(a, b) + G.remove_edge(c, d) + + # Check if the graph is still connected + if connectivity and local_conn(G, a, b) == 0: + # Not connected, revert the swap + G.remove_edge(a, d) + G.remove_edge(c, b) + G.add_edge(a, b) + G.add_edge(c, d) + else: + break + n += 1 + + return G + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@py_random_state(3) +@nx._dispatch +def sigma(G, niter=100, nrand=10, seed=None): + """Returns the small-world coefficient (sigma) of the given graph. + + The small-world coefficient is defined as: + sigma = C/Cr / L/Lr + where C and L are respectively the average clustering coefficient and + average shortest path length of G. Cr and Lr are respectively the average + clustering coefficient and average shortest path length of an equivalent + random graph. + + A graph is commonly classified as small-world if sigma>1. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + niter : integer (optional, default=100) + Approximate number of rewiring per edge to compute the equivalent + random graph. + nrand : integer (optional, default=10) + Number of random graphs generated to compute the average clustering + coefficient (Cr) and average shortest path length (Lr). + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + sigma : float + The small-world coefficient of G. + + Notes + ----- + The implementation is adapted from Humphries et al. [1]_ [2]_. + + References + ---------- + .. [1] The brainstem reticular formation is a small-world, not scale-free, + network M. D. Humphries, K. Gurney and T. J. Prescott, + Proc. Roy. Soc. B 2006 273, 503-511, doi:10.1098/rspb.2005.3354. + .. [2] Humphries and Gurney (2008). + "Network 'Small-World-Ness': A Quantitative Method for Determining + Canonical Network Equivalence". + PLoS One. 3 (4). PMID 18446219. doi:10.1371/journal.pone.0002051. + """ + import numpy as np + + # Compute the mean clustering coefficient and average shortest path length + # for an equivalent random graph + randMetrics = {"C": [], "L": []} + for i in range(nrand): + Gr = random_reference(G, niter=niter, seed=seed) + randMetrics["C"].append(nx.transitivity(Gr)) + randMetrics["L"].append(nx.average_shortest_path_length(Gr)) + + C = nx.transitivity(G) + L = nx.average_shortest_path_length(G) + Cr = np.mean(randMetrics["C"]) + Lr = np.mean(randMetrics["L"]) + + sigma = (C / Cr) / (L / Lr) + + return sigma + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@py_random_state(3) +@nx._dispatch +def omega(G, niter=5, nrand=10, seed=None): + """Returns the small-world coefficient (omega) of a graph + + The small-world coefficient of a graph G is: + + omega = Lr/L - C/Cl + + where C and L are respectively the average clustering coefficient and + average shortest path length of G. Lr is the average shortest path length + of an equivalent random graph and Cl is the average clustering coefficient + of an equivalent lattice graph. + + The small-world coefficient (omega) measures how much G is like a lattice + or a random graph. Negative values mean G is similar to a lattice whereas + positive values mean G is a random graph. + Values close to 0 mean that G has small-world characteristics. + + Parameters + ---------- + G : NetworkX graph + An undirected graph. + + niter: integer (optional, default=5) + Approximate number of rewiring per edge to compute the equivalent + random graph. + + nrand: integer (optional, default=10) + Number of random graphs generated to compute the maximal clustering + coefficient (Cr) and average shortest path length (Lr). + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + + Returns + ------- + omega : float + The small-world coefficient (omega) + + Notes + ----- + The implementation is adapted from the algorithm by Telesford et al. [1]_. + + References + ---------- + .. [1] Telesford, Joyce, Hayasaka, Burdette, and Laurienti (2011). + "The Ubiquity of Small-World Networks". + Brain Connectivity. 1 (0038): 367-75. PMC 3604768. PMID 22432451. + doi:10.1089/brain.2011.0038. + """ + import numpy as np + + # Compute the mean clustering coefficient and average shortest path length + # for an equivalent random graph + randMetrics = {"C": [], "L": []} + + # Calculate initial average clustering coefficient which potentially will + # get replaced by higher clustering coefficients from generated lattice + # reference graphs + Cl = nx.average_clustering(G) + + niter_lattice_reference = niter + niter_random_reference = niter * 2 + + for _ in range(nrand): + # Generate random graph + Gr = random_reference(G, niter=niter_random_reference, seed=seed) + randMetrics["L"].append(nx.average_shortest_path_length(Gr)) + + # Generate lattice graph + Gl = lattice_reference(G, niter=niter_lattice_reference, seed=seed) + + # Replace old clustering coefficient, if clustering is higher in + # generated lattice reference + Cl_temp = nx.average_clustering(Gl) + if Cl_temp > Cl: + Cl = Cl_temp + + C = nx.average_clustering(G) + L = nx.average_shortest_path_length(G) + Lr = np.mean(randMetrics["L"]) + + omega = (Lr / L) - (C / Cl) + + return omega diff --git a/MLPY/Lib/site-packages/networkx/algorithms/smetric.py b/MLPY/Lib/site-packages/networkx/algorithms/smetric.py new file mode 100644 index 0000000000000000000000000000000000000000..80ae314bbdda4c101b15f0ef20983c4f1e259cb0 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/smetric.py @@ -0,0 +1,60 @@ +import networkx as nx + +__all__ = ["s_metric"] + + +@nx._dispatch +def s_metric(G, **kwargs): + """Returns the s-metric [1]_ of graph. + + The s-metric is defined as the sum of the products ``deg(u) * deg(v)`` + for every edge ``(u, v)`` in `G`. + + Parameters + ---------- + G : graph + The graph used to compute the s-metric. + normalized : bool (optional) + Normalize the value. + + .. deprecated:: 3.2 + + The `normalized` keyword argument is deprecated and will be removed + in the future + + Returns + ------- + s : float + The s-metric of the graph. + + References + ---------- + .. [1] Lun Li, David Alderson, John C. Doyle, and Walter Willinger, + Towards a Theory of Scale-Free Graphs: + Definition, Properties, and Implications (Extended Version), 2005. + https://arxiv.org/abs/cond-mat/0501169 + """ + # NOTE: This entire code block + the **kwargs in the signature can all be + # removed when the deprecation expires. + # Normalized is always False, since all `normalized=True` did was raise + # a NotImplementedError + if kwargs: + # Warn for `normalize`, raise for any other kwarg + if "normalized" in kwargs: + import warnings + + warnings.warn( + "\n\nThe `normalized` keyword is deprecated and will be removed\n" + "in the future. To silence this warning, remove `normalized`\n" + "when calling `s_metric`.\n\n" + "The value of `normalized` is ignored.", + DeprecationWarning, + stacklevel=3, + ) + else: + # Typical raising behavior for Python when kwarg not recognized + raise TypeError( + f"s_metric got an unexpected keyword argument '{list(kwargs.keys())[0]}'" + ) + + return float(sum(G.degree(u) * G.degree(v) for (u, v) in G.edges())) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/sparsifiers.py b/MLPY/Lib/site-packages/networkx/algorithms/sparsifiers.py new file mode 100644 index 0000000000000000000000000000000000000000..a94aee0d09d65edb9591037bb7b9e3b59972d0d2 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/sparsifiers.py @@ -0,0 +1,295 @@ +"""Functions for computing sparsifiers of graphs.""" +import math + +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = ["spanner"] + + +@not_implemented_for("directed") +@not_implemented_for("multigraph") +@py_random_state(3) +@nx._dispatch(edge_attrs="weight") +def spanner(G, stretch, weight=None, seed=None): + """Returns a spanner of the given graph with the given stretch. + + A spanner of a graph G = (V, E) with stretch t is a subgraph + H = (V, E_S) such that E_S is a subset of E and the distance between + any pair of nodes in H is at most t times the distance between the + nodes in G. + + Parameters + ---------- + G : NetworkX graph + An undirected simple graph. + + stretch : float + The stretch of the spanner. + + weight : object + The edge attribute to use as distance. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + NetworkX graph + A spanner of the given graph with the given stretch. + + Raises + ------ + ValueError + If a stretch less than 1 is given. + + Notes + ----- + This function implements the spanner algorithm by Baswana and Sen, + see [1]. + + This algorithm is a randomized las vegas algorithm: The expected + running time is O(km) where k = (stretch + 1) // 2 and m is the + number of edges in G. The returned graph is always a spanner of the + given graph with the specified stretch. For weighted graphs the + number of edges in the spanner is O(k * n^(1 + 1 / k)) where k is + defined as above and n is the number of nodes in G. For unweighted + graphs the number of edges is O(n^(1 + 1 / k) + kn). + + References + ---------- + [1] S. Baswana, S. Sen. A Simple and Linear Time Randomized + Algorithm for Computing Sparse Spanners in Weighted Graphs. + Random Struct. Algorithms 30(4): 532-563 (2007). + """ + if stretch < 1: + raise ValueError("stretch must be at least 1") + + k = (stretch + 1) // 2 + + # initialize spanner H with empty edge set + H = nx.empty_graph() + H.add_nodes_from(G.nodes) + + # phase 1: forming the clusters + # the residual graph has V' from the paper as its node set + # and E' from the paper as its edge set + residual_graph = _setup_residual_graph(G, weight) + # clustering is a dictionary that maps nodes in a cluster to the + # cluster center + clustering = {v: v for v in G.nodes} + sample_prob = math.pow(G.number_of_nodes(), -1 / k) + size_limit = 2 * math.pow(G.number_of_nodes(), 1 + 1 / k) + + i = 0 + while i < k - 1: + # step 1: sample centers + sampled_centers = set() + for center in set(clustering.values()): + if seed.random() < sample_prob: + sampled_centers.add(center) + + # combined loop for steps 2 and 3 + edges_to_add = set() + edges_to_remove = set() + new_clustering = {} + for v in residual_graph.nodes: + if clustering[v] in sampled_centers: + continue + + # step 2: find neighboring (sampled) clusters and + # lightest edges to them + lightest_edge_neighbor, lightest_edge_weight = _lightest_edge_dicts( + residual_graph, clustering, v + ) + neighboring_sampled_centers = ( + set(lightest_edge_weight.keys()) & sampled_centers + ) + + # step 3: add edges to spanner + if not neighboring_sampled_centers: + # connect to each neighboring center via lightest edge + for neighbor in lightest_edge_neighbor.values(): + edges_to_add.add((v, neighbor)) + # remove all incident edges + for neighbor in residual_graph.adj[v]: + edges_to_remove.add((v, neighbor)) + + else: # there is a neighboring sampled center + closest_center = min( + neighboring_sampled_centers, key=lightest_edge_weight.get + ) + closest_center_weight = lightest_edge_weight[closest_center] + closest_center_neighbor = lightest_edge_neighbor[closest_center] + + edges_to_add.add((v, closest_center_neighbor)) + new_clustering[v] = closest_center + + # connect to centers with edge weight less than + # closest_center_weight + for center, edge_weight in lightest_edge_weight.items(): + if edge_weight < closest_center_weight: + neighbor = lightest_edge_neighbor[center] + edges_to_add.add((v, neighbor)) + + # remove edges to centers with edge weight less than + # closest_center_weight + for neighbor in residual_graph.adj[v]: + neighbor_cluster = clustering[neighbor] + neighbor_weight = lightest_edge_weight[neighbor_cluster] + if ( + neighbor_cluster == closest_center + or neighbor_weight < closest_center_weight + ): + edges_to_remove.add((v, neighbor)) + + # check whether iteration added too many edges to spanner, + # if so repeat + if len(edges_to_add) > size_limit: + # an iteration is repeated O(1) times on expectation + continue + + # iteration succeeded + i = i + 1 + + # actually add edges to spanner + for u, v in edges_to_add: + _add_edge_to_spanner(H, residual_graph, u, v, weight) + + # actually delete edges from residual graph + residual_graph.remove_edges_from(edges_to_remove) + + # copy old clustering data to new_clustering + for node, center in clustering.items(): + if center in sampled_centers: + new_clustering[node] = center + clustering = new_clustering + + # step 4: remove intra-cluster edges + for u in residual_graph.nodes: + for v in list(residual_graph.adj[u]): + if clustering[u] == clustering[v]: + residual_graph.remove_edge(u, v) + + # update residual graph node set + for v in list(residual_graph.nodes): + if v not in clustering: + residual_graph.remove_node(v) + + # phase 2: vertex-cluster joining + for v in residual_graph.nodes: + lightest_edge_neighbor, _ = _lightest_edge_dicts(residual_graph, clustering, v) + for neighbor in lightest_edge_neighbor.values(): + _add_edge_to_spanner(H, residual_graph, v, neighbor, weight) + + return H + + +def _setup_residual_graph(G, weight): + """Setup residual graph as a copy of G with unique edges weights. + + The node set of the residual graph corresponds to the set V' from + the Baswana-Sen paper and the edge set corresponds to the set E' + from the paper. + + This function associates distinct weights to the edges of the + residual graph (even for unweighted input graphs), as required by + the algorithm. + + Parameters + ---------- + G : NetworkX graph + An undirected simple graph. + + weight : object + The edge attribute to use as distance. + + Returns + ------- + NetworkX graph + The residual graph used for the Baswana-Sen algorithm. + """ + residual_graph = G.copy() + + # establish unique edge weights, even for unweighted graphs + for u, v in G.edges(): + if not weight: + residual_graph[u][v]["weight"] = (id(u), id(v)) + else: + residual_graph[u][v]["weight"] = (G[u][v][weight], id(u), id(v)) + + return residual_graph + + +def _lightest_edge_dicts(residual_graph, clustering, node): + """Find the lightest edge to each cluster. + + Searches for the minimum-weight edge to each cluster adjacent to + the given node. + + Parameters + ---------- + residual_graph : NetworkX graph + The residual graph used by the Baswana-Sen algorithm. + + clustering : dictionary + The current clustering of the nodes. + + node : node + The node from which the search originates. + + Returns + ------- + lightest_edge_neighbor, lightest_edge_weight : dictionary, dictionary + lightest_edge_neighbor is a dictionary that maps a center C to + a node v in the corresponding cluster such that the edge from + the given node to v is the lightest edge from the given node to + any node in cluster. lightest_edge_weight maps a center C to the + weight of the aforementioned edge. + + Notes + ----- + If a cluster has no node that is adjacent to the given node in the + residual graph then the center of the cluster is not a key in the + returned dictionaries. + """ + lightest_edge_neighbor = {} + lightest_edge_weight = {} + for neighbor in residual_graph.adj[node]: + neighbor_center = clustering[neighbor] + weight = residual_graph[node][neighbor]["weight"] + if ( + neighbor_center not in lightest_edge_weight + or weight < lightest_edge_weight[neighbor_center] + ): + lightest_edge_neighbor[neighbor_center] = neighbor + lightest_edge_weight[neighbor_center] = weight + return lightest_edge_neighbor, lightest_edge_weight + + +def _add_edge_to_spanner(H, residual_graph, u, v, weight): + """Add the edge {u, v} to the spanner H and take weight from + the residual graph. + + Parameters + ---------- + H : NetworkX graph + The spanner under construction. + + residual_graph : NetworkX graph + The residual graph used by the Baswana-Sen algorithm. The weight + for the edge is taken from this graph. + + u : node + One endpoint of the edge. + + v : node + The other endpoint of the edge. + + weight : object + The edge attribute to use as distance. + """ + H.add_edge(u, v) + if weight: + H[u][v][weight] = residual_graph[u][v]["weight"][0] diff --git a/MLPY/Lib/site-packages/networkx/algorithms/structuralholes.py b/MLPY/Lib/site-packages/networkx/algorithms/structuralholes.py new file mode 100644 index 0000000000000000000000000000000000000000..c676177b38e584af178332cfdf65e0a8f0d3c7e4 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/structuralholes.py @@ -0,0 +1,283 @@ +"""Functions for computing measures of structural holes.""" + +import networkx as nx + +__all__ = ["constraint", "local_constraint", "effective_size"] + + +@nx._dispatch(edge_attrs="weight") +def mutual_weight(G, u, v, weight=None): + """Returns the sum of the weights of the edge from `u` to `v` and + the edge from `v` to `u` in `G`. + + `weight` is the edge data key that represents the edge weight. If + the specified key is `None` or is not in the edge data for an edge, + that edge is assumed to have weight 1. + + Pre-conditions: `u` and `v` must both be in `G`. + + """ + try: + a_uv = G[u][v].get(weight, 1) + except KeyError: + a_uv = 0 + try: + a_vu = G[v][u].get(weight, 1) + except KeyError: + a_vu = 0 + return a_uv + a_vu + + +@nx._dispatch(edge_attrs="weight") +def normalized_mutual_weight(G, u, v, norm=sum, weight=None): + """Returns normalized mutual weight of the edges from `u` to `v` + with respect to the mutual weights of the neighbors of `u` in `G`. + + `norm` specifies how the normalization factor is computed. It must + be a function that takes a single argument and returns a number. + The argument will be an iterable of mutual weights + of pairs ``(u, w)``, where ``w`` ranges over each (in- and + out-)neighbor of ``u``. Commons values for `normalization` are + ``sum`` and ``max``. + + `weight` can be ``None`` or a string, if None, all edge weights + are considered equal. Otherwise holds the name of the edge + attribute used as weight. + + """ + scale = norm(mutual_weight(G, u, w, weight) for w in set(nx.all_neighbors(G, u))) + return 0 if scale == 0 else mutual_weight(G, u, v, weight) / scale + + +@nx._dispatch(edge_attrs="weight") +def effective_size(G, nodes=None, weight=None): + r"""Returns the effective size of all nodes in the graph ``G``. + + The *effective size* of a node's ego network is based on the concept + of redundancy. A person's ego network has redundancy to the extent + that her contacts are connected to each other as well. The + nonredundant part of a person's relationships is the effective + size of her ego network [1]_. Formally, the effective size of a + node $u$, denoted $e(u)$, is defined by + + .. math:: + + e(u) = \sum_{v \in N(u) \setminus \{u\}} + \left(1 - \sum_{w \in N(v)} p_{uw} m_{vw}\right) + + where $N(u)$ is the set of neighbors of $u$ and $p_{uw}$ is the + normalized mutual weight of the (directed or undirected) edges + joining $u$ and $v$, for each vertex $u$ and $v$ [1]_. And $m_{vw}$ + is the mutual weight of $v$ and $w$ divided by $v$ highest mutual + weight with any of its neighbors. The *mutual weight* of $u$ and $v$ + is the sum of the weights of edges joining them (edge weights are + assumed to be one if the graph is unweighted). + + For the case of unweighted and undirected graphs, Borgatti proposed + a simplified formula to compute effective size [2]_ + + .. math:: + + e(u) = n - \frac{2t}{n} + + where `t` is the number of ties in the ego network (not including + ties to ego) and `n` is the number of nodes (excluding ego). + + Parameters + ---------- + G : NetworkX graph + The graph containing ``v``. Directed graphs are treated like + undirected graphs when computing neighbors of ``v``. + + nodes : container, optional + Container of nodes in the graph ``G`` to compute the effective size. + If None, the effective size of every node is computed. + + weight : None or string, optional + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + + Returns + ------- + dict + Dictionary with nodes as keys and the effective size of the node as values. + + Notes + ----- + Burt also defined the related concept of *efficiency* of a node's ego + network, which is its effective size divided by the degree of that + node [1]_. So you can easily compute efficiency: + + >>> G = nx.DiGraph() + >>> G.add_edges_from([(0, 1), (0, 2), (1, 0), (2, 1)]) + >>> esize = nx.effective_size(G) + >>> efficiency = {n: v / G.degree(n) for n, v in esize.items()} + + See also + -------- + constraint + + References + ---------- + .. [1] Burt, Ronald S. + *Structural Holes: The Social Structure of Competition.* + Cambridge: Harvard University Press, 1995. + + .. [2] Borgatti, S. + "Structural Holes: Unpacking Burt's Redundancy Measures" + CONNECTIONS 20(1):35-38. + http://www.analytictech.com/connections/v20(1)/holes.htm + + """ + + def redundancy(G, u, v, weight=None): + nmw = normalized_mutual_weight + r = sum( + nmw(G, u, w, weight=weight) * nmw(G, v, w, norm=max, weight=weight) + for w in set(nx.all_neighbors(G, u)) + ) + return 1 - r + + effective_size = {} + if nodes is None: + nodes = G + # Use Borgatti's simplified formula for unweighted and undirected graphs + if not G.is_directed() and weight is None: + for v in nodes: + # Effective size is not defined for isolated nodes + if len(G[v]) == 0: + effective_size[v] = float("nan") + continue + E = nx.ego_graph(G, v, center=False, undirected=True) + effective_size[v] = len(E) - (2 * E.size()) / len(E) + else: + for v in nodes: + # Effective size is not defined for isolated nodes + if len(G[v]) == 0: + effective_size[v] = float("nan") + continue + effective_size[v] = sum( + redundancy(G, v, u, weight) for u in set(nx.all_neighbors(G, v)) + ) + return effective_size + + +@nx._dispatch(edge_attrs="weight") +def constraint(G, nodes=None, weight=None): + r"""Returns the constraint on all nodes in the graph ``G``. + + The *constraint* is a measure of the extent to which a node *v* is + invested in those nodes that are themselves invested in the + neighbors of *v*. Formally, the *constraint on v*, denoted `c(v)`, + is defined by + + .. math:: + + c(v) = \sum_{w \in N(v) \setminus \{v\}} \ell(v, w) + + where $N(v)$ is the subset of the neighbors of `v` that are either + predecessors or successors of `v` and $\ell(v, w)$ is the local + constraint on `v` with respect to `w` [1]_. For the definition of local + constraint, see :func:`local_constraint`. + + Parameters + ---------- + G : NetworkX graph + The graph containing ``v``. This can be either directed or undirected. + + nodes : container, optional + Container of nodes in the graph ``G`` to compute the constraint. If + None, the constraint of every node is computed. + + weight : None or string, optional + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + + Returns + ------- + dict + Dictionary with nodes as keys and the constraint on the node as values. + + See also + -------- + local_constraint + + References + ---------- + .. [1] Burt, Ronald S. + "Structural holes and good ideas". + American Journal of Sociology (110): 349–399. + + """ + if nodes is None: + nodes = G + constraint = {} + for v in nodes: + # Constraint is not defined for isolated nodes + if len(G[v]) == 0: + constraint[v] = float("nan") + continue + constraint[v] = sum( + local_constraint(G, v, n, weight) for n in set(nx.all_neighbors(G, v)) + ) + return constraint + + +@nx._dispatch(edge_attrs="weight") +def local_constraint(G, u, v, weight=None): + r"""Returns the local constraint on the node ``u`` with respect to + the node ``v`` in the graph ``G``. + + Formally, the *local constraint on u with respect to v*, denoted + $\ell(v)$, is defined by + + .. math:: + + \ell(u, v) = \left(p_{uv} + \sum_{w \in N(v)} p_{uw} p_{wv}\right)^2, + + where $N(v)$ is the set of neighbors of $v$ and $p_{uv}$ is the + normalized mutual weight of the (directed or undirected) edges + joining $u$ and $v$, for each vertex $u$ and $v$ [1]_. The *mutual + weight* of $u$ and $v$ is the sum of the weights of edges joining + them (edge weights are assumed to be one if the graph is + unweighted). + + Parameters + ---------- + G : NetworkX graph + The graph containing ``u`` and ``v``. This can be either + directed or undirected. + + u : node + A node in the graph ``G``. + + v : node + A node in the graph ``G``. + + weight : None or string, optional + If None, all edge weights are considered equal. + Otherwise holds the name of the edge attribute used as weight. + + Returns + ------- + float + The constraint of the node ``v`` in the graph ``G``. + + See also + -------- + constraint + + References + ---------- + .. [1] Burt, Ronald S. + "Structural holes and good ideas". + American Journal of Sociology (110): 349–399. + + """ + nmw = normalized_mutual_weight + direct = nmw(G, u, v, weight=weight) + indirect = sum( + nmw(G, u, w, weight=weight) * nmw(G, w, v, weight=weight) + for w in set(nx.all_neighbors(G, u)) + ) + return (direct + indirect) ** 2 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/summarization.py b/MLPY/Lib/site-packages/networkx/algorithms/summarization.py new file mode 100644 index 0000000000000000000000000000000000000000..26665e09b1ab559a6982311d2ee3d93ea9cc6a00 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/summarization.py @@ -0,0 +1,561 @@ +""" +Graph summarization finds smaller representations of graphs resulting in faster +runtime of algorithms, reduced storage needs, and noise reduction. +Summarization has applications in areas such as visualization, pattern mining, +clustering and community detection, and more. Core graph summarization +techniques are grouping/aggregation, bit-compression, +simplification/sparsification, and influence based. Graph summarization +algorithms often produce either summary graphs in the form of supergraphs or +sparsified graphs, or a list of independent structures. Supergraphs are the +most common product, which consist of supernodes and original nodes and are +connected by edges and superedges, which represent aggregate edges between +nodes and supernodes. + +Grouping/aggregation based techniques compress graphs by representing +close/connected nodes and edges in a graph by a single node/edge in a +supergraph. Nodes can be grouped together into supernodes based on their +structural similarities or proximity within a graph to reduce the total number +of nodes in a graph. Edge-grouping techniques group edges into lossy/lossless +nodes called compressor or virtual nodes to reduce the total number of edges in +a graph. Edge-grouping techniques can be lossless, meaning that they can be +used to re-create the original graph, or techniques can be lossy, requiring +less space to store the summary graph, but at the expense of lower +reconstruction accuracy of the original graph. + +Bit-compression techniques minimize the amount of information needed to +describe the original graph, while revealing structural patterns in the +original graph. The two-part minimum description length (MDL) is often used to +represent the model and the original graph in terms of the model. A key +difference between graph compression and graph summarization is that graph +summarization focuses on finding structural patterns within the original graph, +whereas graph compression focuses on compressions the original graph to be as +small as possible. **NOTE**: Some bit-compression methods exist solely to +compress a graph without creating a summary graph or finding comprehensible +structural patterns. + +Simplification/Sparsification techniques attempt to create a sparse +representation of a graph by removing unimportant nodes and edges from the +graph. Sparsified graphs differ from supergraphs created by +grouping/aggregation by only containing a subset of the original nodes and +edges of the original graph. + +Influence based techniques aim to find a high-level description of influence +propagation in a large graph. These methods are scarce and have been mostly +applied to social graphs. + +*dedensification* is a grouping/aggregation based technique to compress the +neighborhoods around high-degree nodes in unweighted graphs by adding +compressor nodes that summarize multiple edges of the same type to +high-degree nodes (nodes with a degree greater than a given threshold). +Dedensification was developed for the purpose of increasing performance of +query processing around high-degree nodes in graph databases and enables direct +operations on the compressed graph. The structural patterns surrounding +high-degree nodes in the original is preserved while using fewer edges and +adding a small number of compressor nodes. The degree of nodes present in the +original graph is also preserved. The current implementation of dedensification +supports graphs with one edge type. + +For more information on graph summarization, see `Graph Summarization Methods +and Applications: A Survey `_ +""" +from collections import Counter, defaultdict + +import networkx as nx + +__all__ = ["dedensify", "snap_aggregation"] + + +@nx._dispatch +def dedensify(G, threshold, prefix=None, copy=True): + """Compresses neighborhoods around high-degree nodes + + Reduces the number of edges to high-degree nodes by adding compressor nodes + that summarize multiple edges of the same type to high-degree nodes (nodes + with a degree greater than a given threshold). Dedensification also has + the added benefit of reducing the number of edges around high-degree nodes. + The implementation currently supports graphs with a single edge type. + + Parameters + ---------- + G: graph + A networkx graph + threshold: int + Minimum degree threshold of a node to be considered a high degree node. + The threshold must be greater than or equal to 2. + prefix: str or None, optional (default: None) + An optional prefix for denoting compressor nodes + copy: bool, optional (default: True) + Indicates if dedensification should be done inplace + + Returns + ------- + dedensified networkx graph : (graph, set) + 2-tuple of the dedensified graph and set of compressor nodes + + Notes + ----- + According to the algorithm in [1]_, removes edges in a graph by + compressing/decompressing the neighborhoods around high degree nodes by + adding compressor nodes that summarize multiple edges of the same type + to high-degree nodes. Dedensification will only add a compressor node when + doing so will reduce the total number of edges in the given graph. This + implementation currently supports graphs with a single edge type. + + Examples + -------- + Dedensification will only add compressor nodes when doing so would result + in fewer edges:: + + >>> original_graph = nx.DiGraph() + >>> original_graph.add_nodes_from( + ... ["1", "2", "3", "4", "5", "6", "A", "B", "C"] + ... ) + >>> original_graph.add_edges_from( + ... [ + ... ("1", "C"), ("1", "B"), + ... ("2", "C"), ("2", "B"), ("2", "A"), + ... ("3", "B"), ("3", "A"), ("3", "6"), + ... ("4", "C"), ("4", "B"), ("4", "A"), + ... ("5", "B"), ("5", "A"), + ... ("6", "5"), + ... ("A", "6") + ... ] + ... ) + >>> c_graph, c_nodes = nx.dedensify(original_graph, threshold=2) + >>> original_graph.number_of_edges() + 15 + >>> c_graph.number_of_edges() + 14 + + A dedensified, directed graph can be "densified" to reconstruct the + original graph:: + + >>> original_graph = nx.DiGraph() + >>> original_graph.add_nodes_from( + ... ["1", "2", "3", "4", "5", "6", "A", "B", "C"] + ... ) + >>> original_graph.add_edges_from( + ... [ + ... ("1", "C"), ("1", "B"), + ... ("2", "C"), ("2", "B"), ("2", "A"), + ... ("3", "B"), ("3", "A"), ("3", "6"), + ... ("4", "C"), ("4", "B"), ("4", "A"), + ... ("5", "B"), ("5", "A"), + ... ("6", "5"), + ... ("A", "6") + ... ] + ... ) + >>> c_graph, c_nodes = nx.dedensify(original_graph, threshold=2) + >>> # re-densifies the compressed graph into the original graph + >>> for c_node in c_nodes: + ... all_neighbors = set(nx.all_neighbors(c_graph, c_node)) + ... out_neighbors = set(c_graph.neighbors(c_node)) + ... for out_neighbor in out_neighbors: + ... c_graph.remove_edge(c_node, out_neighbor) + ... in_neighbors = all_neighbors - out_neighbors + ... for in_neighbor in in_neighbors: + ... c_graph.remove_edge(in_neighbor, c_node) + ... for out_neighbor in out_neighbors: + ... c_graph.add_edge(in_neighbor, out_neighbor) + ... c_graph.remove_node(c_node) + ... + >>> nx.is_isomorphic(original_graph, c_graph) + True + + References + ---------- + .. [1] Maccioni, A., & Abadi, D. J. (2016, August). + Scalable pattern matching over compressed graphs via dedensification. + In Proceedings of the 22nd ACM SIGKDD International Conference on + Knowledge Discovery and Data Mining (pp. 1755-1764). + http://www.cs.umd.edu/~abadi/papers/graph-dedense.pdf + """ + if threshold < 2: + raise nx.NetworkXError("The degree threshold must be >= 2") + + degrees = G.in_degree if G.is_directed() else G.degree + # Group nodes based on degree threshold + high_degree_nodes = {n for n, d in degrees if d > threshold} + low_degree_nodes = G.nodes() - high_degree_nodes + + auxiliary = {} + for node in G: + high_degree_neighbors = frozenset(high_degree_nodes & set(G[node])) + if high_degree_neighbors: + if high_degree_neighbors in auxiliary: + auxiliary[high_degree_neighbors].add(node) + else: + auxiliary[high_degree_neighbors] = {node} + + if copy: + G = G.copy() + + compressor_nodes = set() + for index, (high_degree_nodes, low_degree_nodes) in enumerate(auxiliary.items()): + low_degree_node_count = len(low_degree_nodes) + high_degree_node_count = len(high_degree_nodes) + old_edges = high_degree_node_count * low_degree_node_count + new_edges = high_degree_node_count + low_degree_node_count + if old_edges <= new_edges: + continue + compression_node = "".join(str(node) for node in high_degree_nodes) + if prefix: + compression_node = str(prefix) + compression_node + for node in low_degree_nodes: + for high_node in high_degree_nodes: + if G.has_edge(node, high_node): + G.remove_edge(node, high_node) + + G.add_edge(node, compression_node) + for node in high_degree_nodes: + G.add_edge(compression_node, node) + compressor_nodes.add(compression_node) + return G, compressor_nodes + + +def _snap_build_graph( + G, + groups, + node_attributes, + edge_attributes, + neighbor_info, + edge_types, + prefix, + supernode_attribute, + superedge_attribute, +): + """ + Build the summary graph from the data structures produced in the SNAP aggregation algorithm + + Used in the SNAP aggregation algorithm to build the output summary graph and supernode + lookup dictionary. This process uses the original graph and the data structures to + create the supernodes with the correct node attributes, and the superedges with the correct + edge attributes + + Parameters + ---------- + G: networkx.Graph + the original graph to be summarized + groups: dict + A dictionary of unique group IDs and their corresponding node groups + node_attributes: iterable + An iterable of the node attributes considered in the summarization process + edge_attributes: iterable + An iterable of the edge attributes considered in the summarization process + neighbor_info: dict + A data structure indicating the number of edges a node has with the + groups in the current summarization of each edge type + edge_types: dict + dictionary of edges in the graph and their corresponding attributes recognized + in the summarization + prefix: string + The prefix to be added to all supernodes + supernode_attribute: str + The node attribute for recording the supernode groupings of nodes + superedge_attribute: str + The edge attribute for recording the edge types represented by superedges + + Returns + ------- + summary graph: Networkx graph + """ + output = G.__class__() + node_label_lookup = {} + for index, group_id in enumerate(groups): + group_set = groups[group_id] + supernode = f"{prefix}{index}" + node_label_lookup[group_id] = supernode + supernode_attributes = { + attr: G.nodes[next(iter(group_set))][attr] for attr in node_attributes + } + supernode_attributes[supernode_attribute] = group_set + output.add_node(supernode, **supernode_attributes) + + for group_id in groups: + group_set = groups[group_id] + source_supernode = node_label_lookup[group_id] + for other_group, group_edge_types in neighbor_info[ + next(iter(group_set)) + ].items(): + if group_edge_types: + target_supernode = node_label_lookup[other_group] + summary_graph_edge = (source_supernode, target_supernode) + + edge_types = [ + dict(zip(edge_attributes, edge_type)) + for edge_type in group_edge_types + ] + + has_edge = output.has_edge(*summary_graph_edge) + if output.is_multigraph(): + if not has_edge: + for edge_type in edge_types: + output.add_edge(*summary_graph_edge, **edge_type) + elif not output.is_directed(): + existing_edge_data = output.get_edge_data(*summary_graph_edge) + for edge_type in edge_types: + if edge_type not in existing_edge_data.values(): + output.add_edge(*summary_graph_edge, **edge_type) + else: + superedge_attributes = {superedge_attribute: edge_types} + output.add_edge(*summary_graph_edge, **superedge_attributes) + + return output + + +def _snap_eligible_group(G, groups, group_lookup, edge_types): + """ + Determines if a group is eligible to be split. + + A group is eligible to be split if all nodes in the group have edges of the same type(s) + with the same other groups. + + Parameters + ---------- + G: graph + graph to be summarized + groups: dict + A dictionary of unique group IDs and their corresponding node groups + group_lookup: dict + dictionary of nodes and their current corresponding group ID + edge_types: dict + dictionary of edges in the graph and their corresponding attributes recognized + in the summarization + + Returns + ------- + tuple: group ID to split, and neighbor-groups participation_counts data structure + """ + neighbor_info = {node: {gid: Counter() for gid in groups} for node in group_lookup} + for group_id in groups: + current_group = groups[group_id] + + # build neighbor_info for nodes in group + for node in current_group: + neighbor_info[node] = {group_id: Counter() for group_id in groups} + edges = G.edges(node, keys=True) if G.is_multigraph() else G.edges(node) + for edge in edges: + neighbor = edge[1] + edge_type = edge_types[edge] + neighbor_group_id = group_lookup[neighbor] + neighbor_info[node][neighbor_group_id][edge_type] += 1 + + # check if group_id is eligible to be split + group_size = len(current_group) + for other_group_id in groups: + edge_counts = Counter() + for node in current_group: + edge_counts.update(neighbor_info[node][other_group_id].keys()) + + if not all(count == group_size for count in edge_counts.values()): + # only the neighbor_info of the returned group_id is required for handling group splits + return group_id, neighbor_info + + # if no eligible groups, complete neighbor_info is calculated + return None, neighbor_info + + +def _snap_split(groups, neighbor_info, group_lookup, group_id): + """ + Splits a group based on edge types and updates the groups accordingly + + Splits the group with the given group_id based on the edge types + of the nodes so that each new grouping will all have the same + edges with other nodes. + + Parameters + ---------- + groups: dict + A dictionary of unique group IDs and their corresponding node groups + neighbor_info: dict + A data structure indicating the number of edges a node has with the + groups in the current summarization of each edge type + edge_types: dict + dictionary of edges in the graph and their corresponding attributes recognized + in the summarization + group_lookup: dict + dictionary of nodes and their current corresponding group ID + group_id: object + ID of group to be split + + Returns + ------- + dict + The updated groups based on the split + """ + new_group_mappings = defaultdict(set) + for node in groups[group_id]: + signature = tuple( + frozenset(edge_types) for edge_types in neighbor_info[node].values() + ) + new_group_mappings[signature].add(node) + + # leave the biggest new_group as the original group + new_groups = sorted(new_group_mappings.values(), key=len) + for new_group in new_groups[:-1]: + # Assign unused integer as the new_group_id + # ids are tuples, so will not interact with the original group_ids + new_group_id = len(groups) + groups[new_group_id] = new_group + groups[group_id] -= new_group + for node in new_group: + group_lookup[node] = new_group_id + + return groups + + +@nx._dispatch(node_attrs="[node_attributes]", edge_attrs="[edge_attributes]") +def snap_aggregation( + G, + node_attributes, + edge_attributes=(), + prefix="Supernode-", + supernode_attribute="group", + superedge_attribute="types", +): + """Creates a summary graph based on attributes and connectivity. + + This function uses the Summarization by Grouping Nodes on Attributes + and Pairwise edges (SNAP) algorithm for summarizing a given + graph by grouping nodes by node attributes and their edge attributes + into supernodes in a summary graph. This name SNAP should not be + confused with the Stanford Network Analysis Project (SNAP). + + Here is a high-level view of how this algorithm works: + + 1) Group nodes by node attribute values. + + 2) Iteratively split groups until all nodes in each group have edges + to nodes in the same groups. That is, until all the groups are homogeneous + in their member nodes' edges to other groups. For example, + if all the nodes in group A only have edge to nodes in group B, then the + group is homogeneous and does not need to be split. If all nodes in group B + have edges with nodes in groups {A, C}, but some also have edges with other + nodes in B, then group B is not homogeneous and needs to be split into + groups have edges with {A, C} and a group of nodes having + edges with {A, B, C}. This way, viewers of the summary graph can + assume that all nodes in the group have the exact same node attributes and + the exact same edges. + + 3) Build the output summary graph, where the groups are represented by + super-nodes. Edges represent the edges shared between all the nodes in each + respective groups. + + A SNAP summary graph can be used to visualize graphs that are too large to display + or visually analyze, or to efficiently identify sets of similar nodes with similar connectivity + patterns to other sets of similar nodes based on specified node and/or edge attributes in a graph. + + Parameters + ---------- + G: graph + Networkx Graph to be summarized + node_attributes: iterable, required + An iterable of the node attributes used to group nodes in the summarization process. Nodes + with the same values for these attributes will be grouped together in the summary graph. + edge_attributes: iterable, optional + An iterable of the edge attributes considered in the summarization process. If provided, unique + combinations of the attribute values found in the graph are used to + determine the edge types in the graph. If not provided, all edges + are considered to be of the same type. + prefix: str + The prefix used to denote supernodes in the summary graph. Defaults to 'Supernode-'. + supernode_attribute: str + The node attribute for recording the supernode groupings of nodes. Defaults to 'group'. + superedge_attribute: str + The edge attribute for recording the edge types of multiple edges. Defaults to 'types'. + + Returns + ------- + networkx.Graph: summary graph + + Examples + -------- + SNAP aggregation takes a graph and summarizes it in the context of user-provided + node and edge attributes such that a viewer can more easily extract and + analyze the information represented by the graph + + >>> nodes = { + ... "A": dict(color="Red"), + ... "B": dict(color="Red"), + ... "C": dict(color="Red"), + ... "D": dict(color="Red"), + ... "E": dict(color="Blue"), + ... "F": dict(color="Blue"), + ... } + >>> edges = [ + ... ("A", "E", "Strong"), + ... ("B", "F", "Strong"), + ... ("C", "E", "Weak"), + ... ("D", "F", "Weak"), + ... ] + >>> G = nx.Graph() + >>> for node in nodes: + ... attributes = nodes[node] + ... G.add_node(node, **attributes) + ... + >>> for source, target, type in edges: + ... G.add_edge(source, target, type=type) + ... + >>> node_attributes = ('color', ) + >>> edge_attributes = ('type', ) + >>> summary_graph = nx.snap_aggregation(G, node_attributes=node_attributes, edge_attributes=edge_attributes) + + Notes + ----- + The summary graph produced is called a maximum Attribute-edge + compatible (AR-compatible) grouping. According to [1]_, an + AR-compatible grouping means that all nodes in each group have the same + exact node attribute values and the same exact edges and + edge types to one or more nodes in the same groups. The maximal + AR-compatible grouping is the grouping with the minimal cardinality. + + The AR-compatible grouping is the most detailed grouping provided by + any of the SNAP algorithms. + + References + ---------- + .. [1] Y. Tian, R. A. Hankins, and J. M. Patel. Efficient aggregation + for graph summarization. In Proc. 2008 ACM-SIGMOD Int. Conf. + Management of Data (SIGMOD’08), pages 567–580, Vancouver, Canada, + June 2008. + """ + edge_types = { + edge: tuple(attrs.get(attr) for attr in edge_attributes) + for edge, attrs in G.edges.items() + } + if not G.is_directed(): + if G.is_multigraph(): + # list is needed to avoid mutating while iterating + edges = [((v, u, k), etype) for (u, v, k), etype in edge_types.items()] + else: + # list is needed to avoid mutating while iterating + edges = [((v, u), etype) for (u, v), etype in edge_types.items()] + edge_types.update(edges) + + group_lookup = { + node: tuple(attrs[attr] for attr in node_attributes) + for node, attrs in G.nodes.items() + } + groups = defaultdict(set) + for node, node_type in group_lookup.items(): + groups[node_type].add(node) + + eligible_group_id, neighbor_info = _snap_eligible_group( + G, groups, group_lookup, edge_types + ) + while eligible_group_id: + groups = _snap_split(groups, neighbor_info, group_lookup, eligible_group_id) + eligible_group_id, neighbor_info = _snap_eligible_group( + G, groups, group_lookup, edge_types + ) + return _snap_build_graph( + G, + groups, + node_attributes, + edge_attributes, + neighbor_info, + edge_types, + prefix, + supernode_attribute, + superedge_attribute, + ) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/swap.py b/MLPY/Lib/site-packages/networkx/algorithms/swap.py new file mode 100644 index 0000000000000000000000000000000000000000..926be49831ef34129674e39b4d98f89525011b29 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/swap.py @@ -0,0 +1,405 @@ +"""Swap edges in a graph. +""" + +import math + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = ["double_edge_swap", "connected_double_edge_swap", "directed_edge_swap"] + + +@nx.utils.not_implemented_for("undirected") +@py_random_state(3) +@nx._dispatch +def directed_edge_swap(G, *, nswap=1, max_tries=100, seed=None): + """Swap three edges in a directed graph while keeping the node degrees fixed. + + A directed edge swap swaps three edges such that a -> b -> c -> d becomes + a -> c -> b -> d. This pattern of swapping allows all possible states with the + same in- and out-degree distribution in a directed graph to be reached. + + If the swap would create parallel edges (e.g. if a -> c already existed in the + previous example), another attempt is made to find a suitable trio of edges. + + Parameters + ---------- + G : DiGraph + A directed graph + + nswap : integer (optional, default=1) + Number of three-edge (directed) swaps to perform + + max_tries : integer (optional, default=100) + Maximum number of attempts to swap edges + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : DiGraph + The graph after the edges are swapped. + + Raises + ------ + NetworkXError + If `G` is not directed, or + If nswap > max_tries, or + If there are fewer than 4 nodes or 3 edges in `G`. + NetworkXAlgorithmError + If the number of swap attempts exceeds `max_tries` before `nswap` swaps are made + + Notes + ----- + Does not enforce any connectivity constraints. + + The graph G is modified in place. + + References + ---------- + .. [1] Erdős, Péter L., et al. “A Simple Havel-Hakimi Type Algorithm to Realize + Graphical Degree Sequences of Directed Graphs.” ArXiv:0905.4913 [Math], + Jan. 2010. https://doi.org/10.48550/arXiv.0905.4913. + Published 2010 in Elec. J. Combinatorics (17(1)). R66. + http://www.combinatorics.org/Volume_17/PDF/v17i1r66.pdf + .. [2] “Combinatorics - Reaching All Possible Simple Directed Graphs with a given + Degree Sequence with 2-Edge Swaps.” Mathematics Stack Exchange, + https://math.stackexchange.com/questions/22272/. Accessed 30 May 2022. + """ + if nswap > max_tries: + raise nx.NetworkXError("Number of swaps > number of tries allowed.") + if len(G) < 4: + raise nx.NetworkXError("DiGraph has fewer than four nodes.") + if len(G.edges) < 3: + raise nx.NetworkXError("DiGraph has fewer than 3 edges") + + # Instead of choosing uniformly at random from a generated edge list, + # this algorithm chooses nonuniformly from the set of nodes with + # probability weighted by degree. + tries = 0 + swapcount = 0 + keys, degrees = zip(*G.degree()) # keys, degree + cdf = nx.utils.cumulative_distribution(degrees) # cdf of degree + discrete_sequence = nx.utils.discrete_sequence + + while swapcount < nswap: + # choose source node index from discrete distribution + start_index = discrete_sequence(1, cdistribution=cdf, seed=seed)[0] + start = keys[start_index] + tries += 1 + + if tries > max_tries: + msg = f"Maximum number of swap attempts ({tries}) exceeded before desired swaps achieved ({nswap})." + raise nx.NetworkXAlgorithmError(msg) + + # If the given node doesn't have any out edges, then there isn't anything to swap + if G.out_degree(start) == 0: + continue + second = seed.choice(list(G.succ[start])) + if start == second: + continue + + if G.out_degree(second) == 0: + continue + third = seed.choice(list(G.succ[second])) + if second == third: + continue + + if G.out_degree(third) == 0: + continue + fourth = seed.choice(list(G.succ[third])) + if third == fourth: + continue + + if ( + third not in G.succ[start] + and fourth not in G.succ[second] + and second not in G.succ[third] + ): + # Swap nodes + G.add_edge(start, third) + G.add_edge(third, second) + G.add_edge(second, fourth) + G.remove_edge(start, second) + G.remove_edge(second, third) + G.remove_edge(third, fourth) + swapcount += 1 + + return G + + +@py_random_state(3) +@nx._dispatch +def double_edge_swap(G, nswap=1, max_tries=100, seed=None): + """Swap two edges in the graph while keeping the node degrees fixed. + + A double-edge swap removes two randomly chosen edges u-v and x-y + and creates the new edges u-x and v-y:: + + u--v u v + becomes | | + x--y x y + + If either the edge u-x or v-y already exist no swap is performed + and another attempt is made to find a suitable edge pair. + + Parameters + ---------- + G : graph + An undirected graph + + nswap : integer (optional, default=1) + Number of double-edge swaps to perform + + max_tries : integer (optional) + Maximum number of attempts to swap edges + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : graph + The graph after double edge swaps. + + Raises + ------ + NetworkXError + If `G` is directed, or + If `nswap` > `max_tries`, or + If there are fewer than 4 nodes or 2 edges in `G`. + NetworkXAlgorithmError + If the number of swap attempts exceeds `max_tries` before `nswap` swaps are made + + Notes + ----- + Does not enforce any connectivity constraints. + + The graph G is modified in place. + """ + if G.is_directed(): + raise nx.NetworkXError( + "double_edge_swap() not defined for directed graphs. Use directed_edge_swap instead." + ) + if nswap > max_tries: + raise nx.NetworkXError("Number of swaps > number of tries allowed.") + if len(G) < 4: + raise nx.NetworkXError("Graph has fewer than four nodes.") + if len(G.edges) < 2: + raise nx.NetworkXError("Graph has fewer than 2 edges") + # Instead of choosing uniformly at random from a generated edge list, + # this algorithm chooses nonuniformly from the set of nodes with + # probability weighted by degree. + n = 0 + swapcount = 0 + keys, degrees = zip(*G.degree()) # keys, degree + cdf = nx.utils.cumulative_distribution(degrees) # cdf of degree + discrete_sequence = nx.utils.discrete_sequence + while swapcount < nswap: + # if random.random() < 0.5: continue # trick to avoid periodicities? + # pick two random edges without creating edge list + # choose source node indices from discrete distribution + (ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed) + if ui == xi: + continue # same source, skip + u = keys[ui] # convert index to label + x = keys[xi] + # choose target uniformly from neighbors + v = seed.choice(list(G[u])) + y = seed.choice(list(G[x])) + if v == y: + continue # same target, skip + if (x not in G[u]) and (y not in G[v]): # don't create parallel edges + G.add_edge(u, x) + G.add_edge(v, y) + G.remove_edge(u, v) + G.remove_edge(x, y) + swapcount += 1 + if n >= max_tries: + e = ( + f"Maximum number of swap attempts ({n}) exceeded " + f"before desired swaps achieved ({nswap})." + ) + raise nx.NetworkXAlgorithmError(e) + n += 1 + return G + + +@py_random_state(3) +@nx._dispatch +def connected_double_edge_swap(G, nswap=1, _window_threshold=3, seed=None): + """Attempts the specified number of double-edge swaps in the graph `G`. + + A double-edge swap removes two randomly chosen edges `(u, v)` and `(x, + y)` and creates the new edges `(u, x)` and `(v, y)`:: + + u--v u v + becomes | | + x--y x y + + If either `(u, x)` or `(v, y)` already exist, then no swap is performed + so the actual number of swapped edges is always *at most* `nswap`. + + Parameters + ---------- + G : graph + An undirected graph + + nswap : integer (optional, default=1) + Number of double-edge swaps to perform + + _window_threshold : integer + + The window size below which connectedness of the graph will be checked + after each swap. + + The "window" in this function is a dynamically updated integer that + represents the number of swap attempts to make before checking if the + graph remains connected. It is an optimization used to decrease the + running time of the algorithm in exchange for increased complexity of + implementation. + + If the window size is below this threshold, then the algorithm checks + after each swap if the graph remains connected by checking if there is a + path joining the two nodes whose edge was just removed. If the window + size is above this threshold, then the algorithm performs do all the + swaps in the window and only then check if the graph is still connected. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + int + The number of successful swaps + + Raises + ------ + + NetworkXError + + If the input graph is not connected, or if the graph has fewer than four + nodes. + + Notes + ----- + + The initial graph `G` must be connected, and the resulting graph is + connected. The graph `G` is modified in place. + + References + ---------- + .. [1] C. Gkantsidis and M. Mihail and E. Zegura, + The Markov chain simulation method for generating connected + power law random graphs, 2003. + http://citeseer.ist.psu.edu/gkantsidis03markov.html + """ + if not nx.is_connected(G): + raise nx.NetworkXError("Graph not connected") + if len(G) < 4: + raise nx.NetworkXError("Graph has fewer than four nodes.") + n = 0 + swapcount = 0 + deg = G.degree() + # Label key for nodes + dk = [n for n, d in G.degree()] + cdf = nx.utils.cumulative_distribution([d for n, d in G.degree()]) + discrete_sequence = nx.utils.discrete_sequence + window = 1 + while n < nswap: + wcount = 0 + swapped = [] + # If the window is small, we just check each time whether the graph is + # connected by checking if the nodes that were just separated are still + # connected. + if window < _window_threshold: + # This Boolean keeps track of whether there was a failure or not. + fail = False + while wcount < window and n < nswap: + # Pick two random edges without creating the edge list. Choose + # source nodes from the discrete degree distribution. + (ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed) + # If the source nodes are the same, skip this pair. + if ui == xi: + continue + # Convert an index to a node label. + u = dk[ui] + x = dk[xi] + # Choose targets uniformly from neighbors. + v = seed.choice(list(G.neighbors(u))) + y = seed.choice(list(G.neighbors(x))) + # If the target nodes are the same, skip this pair. + if v == y: + continue + if x not in G[u] and y not in G[v]: + G.remove_edge(u, v) + G.remove_edge(x, y) + G.add_edge(u, x) + G.add_edge(v, y) + swapped.append((u, v, x, y)) + swapcount += 1 + n += 1 + # If G remains connected... + if nx.has_path(G, u, v): + wcount += 1 + # Otherwise, undo the changes. + else: + G.add_edge(u, v) + G.add_edge(x, y) + G.remove_edge(u, x) + G.remove_edge(v, y) + swapcount -= 1 + fail = True + # If one of the swaps failed, reduce the window size. + if fail: + window = math.ceil(window / 2) + else: + window += 1 + # If the window is large, then there is a good chance that a bunch of + # swaps will work. It's quicker to do all those swaps first and then + # check if the graph remains connected. + else: + while wcount < window and n < nswap: + # Pick two random edges without creating the edge list. Choose + # source nodes from the discrete degree distribution. + (ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed) + # If the source nodes are the same, skip this pair. + if ui == xi: + continue + # Convert an index to a node label. + u = dk[ui] + x = dk[xi] + # Choose targets uniformly from neighbors. + v = seed.choice(list(G.neighbors(u))) + y = seed.choice(list(G.neighbors(x))) + # If the target nodes are the same, skip this pair. + if v == y: + continue + if x not in G[u] and y not in G[v]: + G.remove_edge(u, v) + G.remove_edge(x, y) + G.add_edge(u, x) + G.add_edge(v, y) + swapped.append((u, v, x, y)) + swapcount += 1 + n += 1 + wcount += 1 + # If the graph remains connected, increase the window size. + if nx.is_connected(G): + window += 1 + # Otherwise, undo the changes from the previous window and decrease + # the window size. + else: + while swapped: + (u, v, x, y) = swapped.pop() + G.add_edge(u, v) + G.add_edge(x, y) + G.remove_edge(u, x) + G.remove_edge(v, y) + swapcount -= 1 + window = math.ceil(window / 2) + return swapcount diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e787bdb8595164ca65f3705fd1cd3d65a11db44 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_asteroidal.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_asteroidal.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..460adeeac47c946b3c46e603f2e60cb89d047af2 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_asteroidal.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_boundary.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_boundary.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a0819f6da21084f9a392b8b200fe685ad7f8dbe Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_boundary.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_bridges.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_bridges.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34f0b1c079a2fbb36190696c5b1aef63ffb6afb6 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_bridges.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_chains.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_chains.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c28da2752df374bb05151f28f3613028868e6413 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_chains.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_chordal.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_chordal.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2a76148e1d9db1165d7fb064c0a1a51f0e295a6 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_chordal.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_clique.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_clique.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84127b370f93d43a00ddf7bd9888496d71fbbd60 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_clique.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_cluster.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_cluster.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2afb197ae075bc1d904d0d0b8ddc52f492543bcc Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_cluster.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_communicability.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_communicability.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce13aedbd5d84d1ec131c63e05da84da6e3f25df Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_communicability.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_core.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_core.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a6713ec951b6b57e28262421458213e19545160 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_core.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_covering.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_covering.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68bc521db2068140049f401b9f1114948cc5f59f Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_covering.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_cuts.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_cuts.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ec95fe6071f9d2516e236caab450da5d35addd8 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_cuts.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_cycles.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_cycles.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..544a587f1e4b77780b2be980458860a04818c5d0 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_cycles.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_d_separation.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_d_separation.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d9df981ff4c3da8dd8ba181c467c85842106f44 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_d_separation.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_dag.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_dag.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80e333d452f6c7daf2c8f3560b31a746ceadad39 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_dag.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_distance_measures.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_distance_measures.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f85d246ff9cec6b76c8d7f7cf7d81f8dd64815dd Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_distance_measures.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_distance_regular.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_distance_regular.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..570274432bf0259c3b24cb22f7b5afffaaeb2bd3 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_distance_regular.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_dominance.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_dominance.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26c4547e203a5305ea614e21384a7c4706cbe702 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_dominance.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_dominating.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_dominating.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d2d4162707043ea68f9d37147e7726c5db0e718 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_dominating.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_efficiency.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_efficiency.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33f8fafa5130e893f220d7644bb0dab04ff21435 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_efficiency.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_euler.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_euler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c77380ad37e7dac049c10559f75c26770aacce56 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_euler.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_graph_hashing.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_graph_hashing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2062df4dde00a1865789a163aa7cbb07c3527c5b Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_graph_hashing.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_graphical.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_graphical.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ccf42bfa2cb212ac2ce8c100f5154c1a8ca3f22 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_graphical.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_hierarchy.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_hierarchy.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcd02466f29f17c69d5bdddf1ab026d0430313a6 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_hierarchy.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_hybrid.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_hybrid.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29273f562d381f886872f35adc4d085f93986657 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_hybrid.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_isolate.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_isolate.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e90a7724366daa79ed9cca91881e98fe8bedd520 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_isolate.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_link_prediction.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_link_prediction.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a80dded65284ba0d41382d36e0404030a1fc6c8 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_link_prediction.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_lowest_common_ancestors.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_lowest_common_ancestors.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1666869543f271b69115b1bfa6c2a95ae1855dc8 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_lowest_common_ancestors.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_matching.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_matching.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f70dad5b50e25c03d55ac8f96cad8e28339fc95f Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_matching.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_max_weight_clique.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_max_weight_clique.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..511aa515ba17656c26f19730311e7e9bb97f50f9 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_max_weight_clique.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_mis.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_mis.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..091e5172c718b879d2d7fdb61162f9921f4cdc24 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_mis.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_moral.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_moral.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf9a0600f369a40466e00fe0df69d2771c1a19d8 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_moral.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_node_classification.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_node_classification.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03290a75eed7189c5c7c1d85efa36197d4c8185c Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_node_classification.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_non_randomness.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_non_randomness.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6255bc5cceb242d297c4d6c00179ba0b28684b7a Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_non_randomness.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_planar_drawing.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_planar_drawing.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af098680ead543d2de4aedf214890570218792ae Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_planar_drawing.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_planarity.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_planarity.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0adc76148639d12f39fdff112a0e717bb90578c3 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_planarity.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_polynomials.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_polynomials.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f95b6bcff209d55a57efacf9a9f0b09bf4391e77 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_polynomials.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_reciprocity.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_reciprocity.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..accf1df5a74c8c54f5dcc12b467c2816f3710c7a Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_reciprocity.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_regular.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_regular.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bb4e26c4984998af7b3b25409cf1f224132b6bf Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_regular.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_richclub.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_richclub.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..702bdb459e1ee957b313b3b9b5df0dac2614eaf9 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_richclub.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_similarity.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_similarity.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4606ced69b32ce8a2e4f37e9bea85836eb6bc9dd Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_similarity.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_simple_paths.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_simple_paths.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b56482196002c6d36c4638150884f2de0e03047 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_simple_paths.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_smallworld.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_smallworld.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26cc197148a3e9eae7a3de0931a36116eaf1bbf5 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_smallworld.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_smetric.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_smetric.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff13e4aad950a80277954a370670b6b61ac4b5e4 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_smetric.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_sparsifiers.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_sparsifiers.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31167603d3baf8ddebf2215545cf7c63dcbb3c45 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_sparsifiers.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_structuralholes.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_structuralholes.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d4540be775b9b014dd271325687bb0b3455c0c4 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_structuralholes.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_summarization.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_summarization.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f86888b8b987aad7a9da5140b2f1cf41104e24b Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_summarization.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_swap.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_swap.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e85242fc5bf176aabd97fbe5d5902b3edbd916d Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_swap.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_threshold.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_threshold.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b8ce74fc7e8e4dcdcad171bd4b1ad49d88d1c60 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_threshold.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_time_dependent.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_time_dependent.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..016247ac6124c0ab9c7de6419c6f40a827bf02e2 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_time_dependent.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_tournament.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_tournament.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c1d0b8bbcb44117cf10c583145b54eb3e847b43 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_tournament.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_triads.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_triads.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bc454a5ca125d955f48784938e4c4900998aea5 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_triads.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_vitality.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_vitality.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e737e02b6a86bb80c21dcc6241cda289898cd09 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_vitality.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_voronoi.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_voronoi.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4d1deb7c953fe236779308cc99fecd5d11507a7d Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_voronoi.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_walks.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_walks.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb95f78c2603514a4dea294c2177f305d1fd6d76 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_walks.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_wiener.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_wiener.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ec3e70630d869b4f69a7ff3e1d21d2838ae40bf Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tests/__pycache__/test_wiener.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_asteroidal.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_asteroidal.py new file mode 100644 index 0000000000000000000000000000000000000000..67131b2d05026317b496d06e6b382836c8c26367 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_asteroidal.py @@ -0,0 +1,23 @@ +import networkx as nx + + +def test_is_at_free(): + is_at_free = nx.asteroidal.is_at_free + + cycle = nx.cycle_graph(6) + assert not is_at_free(cycle) + + path = nx.path_graph(6) + assert is_at_free(path) + + small_graph = nx.complete_graph(2) + assert is_at_free(small_graph) + + petersen = nx.petersen_graph() + assert not is_at_free(petersen) + + clique = nx.complete_graph(6) + assert is_at_free(clique) + + line_clique = nx.line_graph(clique) + assert not is_at_free(line_clique) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_boundary.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_boundary.py new file mode 100644 index 0000000000000000000000000000000000000000..856be465556941fe6f2bfc2c8bab6d4b508cf999 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_boundary.py @@ -0,0 +1,154 @@ +"""Unit tests for the :mod:`networkx.algorithms.boundary` module.""" + +from itertools import combinations + +import pytest + +import networkx as nx +from networkx import convert_node_labels_to_integers as cnlti +from networkx.utils import edges_equal + + +class TestNodeBoundary: + """Unit tests for the :func:`~networkx.node_boundary` function.""" + + def test_null_graph(self): + """Tests that the null graph has empty node boundaries.""" + null = nx.null_graph() + assert nx.node_boundary(null, []) == set() + assert nx.node_boundary(null, [], []) == set() + assert nx.node_boundary(null, [1, 2, 3]) == set() + assert nx.node_boundary(null, [1, 2, 3], [4, 5, 6]) == set() + assert nx.node_boundary(null, [1, 2, 3], [3, 4, 5]) == set() + + def test_path_graph(self): + P10 = cnlti(nx.path_graph(10), first_label=1) + assert nx.node_boundary(P10, []) == set() + assert nx.node_boundary(P10, [], []) == set() + assert nx.node_boundary(P10, [1, 2, 3]) == {4} + assert nx.node_boundary(P10, [4, 5, 6]) == {3, 7} + assert nx.node_boundary(P10, [3, 4, 5, 6, 7]) == {2, 8} + assert nx.node_boundary(P10, [8, 9, 10]) == {7} + assert nx.node_boundary(P10, [4, 5, 6], [9, 10]) == set() + + def test_complete_graph(self): + K10 = cnlti(nx.complete_graph(10), first_label=1) + assert nx.node_boundary(K10, []) == set() + assert nx.node_boundary(K10, [], []) == set() + assert nx.node_boundary(K10, [1, 2, 3]) == {4, 5, 6, 7, 8, 9, 10} + assert nx.node_boundary(K10, [4, 5, 6]) == {1, 2, 3, 7, 8, 9, 10} + assert nx.node_boundary(K10, [3, 4, 5, 6, 7]) == {1, 2, 8, 9, 10} + assert nx.node_boundary(K10, [4, 5, 6], []) == set() + assert nx.node_boundary(K10, K10) == set() + assert nx.node_boundary(K10, [1, 2, 3], [3, 4, 5]) == {4, 5} + + def test_petersen(self): + """Check boundaries in the petersen graph + + cheeger(G,k)=min(|bdy(S)|/|S| for |S|=k, 0>> list(cycles("abc")) + [('a', 'b', 'c'), ('b', 'c', 'a'), ('c', 'a', 'b')] + + """ + n = len(seq) + cycled_seq = cycle(seq) + for x in seq: + yield tuple(islice(cycled_seq, n)) + next(cycled_seq) + + +def cyclic_equals(seq1, seq2): + """Decide whether two sequences are equal up to cyclic permutations. + + For example:: + + >>> cyclic_equals("xyz", "zxy") + True + >>> cyclic_equals("xyz", "zyx") + False + + """ + # Cast seq2 to a tuple since `cycles()` yields tuples. + seq2 = tuple(seq2) + return any(x == tuple(seq2) for x in cycles(seq1)) + + +class TestChainDecomposition: + """Unit tests for the chain decomposition function.""" + + def assertContainsChain(self, chain, expected): + # A cycle could be expressed in two different orientations, one + # forward and one backward, so we need to check for cyclic + # equality in both orientations. + reversed_chain = list(reversed([tuple(reversed(e)) for e in chain])) + for candidate in expected: + if cyclic_equals(chain, candidate): + break + if cyclic_equals(reversed_chain, candidate): + break + else: + self.fail("chain not found") + + def test_decomposition(self): + edges = [ + # DFS tree edges. + (1, 2), + (2, 3), + (3, 4), + (3, 5), + (5, 6), + (6, 7), + (7, 8), + (5, 9), + (9, 10), + # Nontree edges. + (1, 3), + (1, 4), + (2, 5), + (5, 10), + (6, 8), + ] + G = nx.Graph(edges) + expected = [ + [(1, 3), (3, 2), (2, 1)], + [(1, 4), (4, 3)], + [(2, 5), (5, 3)], + [(5, 10), (10, 9), (9, 5)], + [(6, 8), (8, 7), (7, 6)], + ] + chains = list(nx.chain_decomposition(G, root=1)) + assert len(chains) == len(expected) + + # This chain decomposition isn't unique + # for chain in chains: + # print(chain) + # self.assertContainsChain(chain, expected) + + def test_barbell_graph(self): + # The (3, 0) barbell graph has two triangles joined by a single edge. + G = nx.barbell_graph(3, 0) + chains = list(nx.chain_decomposition(G, root=0)) + expected = [[(0, 1), (1, 2), (2, 0)], [(3, 4), (4, 5), (5, 3)]] + assert len(chains) == len(expected) + for chain in chains: + self.assertContainsChain(chain, expected) + + def test_disconnected_graph(self): + """Test for a graph with multiple connected components.""" + G = nx.barbell_graph(3, 0) + H = nx.barbell_graph(3, 0) + mapping = dict(zip(range(6), "abcdef")) + nx.relabel_nodes(H, mapping, copy=False) + G = nx.union(G, H) + chains = list(nx.chain_decomposition(G)) + expected = [ + [(0, 1), (1, 2), (2, 0)], + [(3, 4), (4, 5), (5, 3)], + [("a", "b"), ("b", "c"), ("c", "a")], + [("d", "e"), ("e", "f"), ("f", "d")], + ] + assert len(chains) == len(expected) + for chain in chains: + self.assertContainsChain(chain, expected) + + def test_disconnected_graph_root_node(self): + """Test for a single component of a disconnected graph.""" + G = nx.barbell_graph(3, 0) + H = nx.barbell_graph(3, 0) + mapping = dict(zip(range(6), "abcdef")) + nx.relabel_nodes(H, mapping, copy=False) + G = nx.union(G, H) + chains = list(nx.chain_decomposition(G, root="a")) + expected = [ + [("a", "b"), ("b", "c"), ("c", "a")], + [("d", "e"), ("e", "f"), ("f", "d")], + ] + assert len(chains) == len(expected) + for chain in chains: + self.assertContainsChain(chain, expected) + + def test_chain_decomposition_root_not_in_G(self): + """Test chain decomposition when root is not in graph""" + G = nx.Graph() + G.add_nodes_from([1, 2, 3]) + with pytest.raises(nx.NodeNotFound): + nx.has_bridges(G, root=6) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_chordal.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_chordal.py new file mode 100644 index 0000000000000000000000000000000000000000..148b22f2632d722522483b556f11285a8e823126 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_chordal.py @@ -0,0 +1,129 @@ +import pytest + +import networkx as nx + + +class TestMCS: + @classmethod + def setup_class(cls): + # simple graph + connected_chordal_G = nx.Graph() + connected_chordal_G.add_edges_from( + [ + (1, 2), + (1, 3), + (2, 3), + (2, 4), + (3, 4), + (3, 5), + (3, 6), + (4, 5), + (4, 6), + (5, 6), + ] + ) + cls.connected_chordal_G = connected_chordal_G + + chordal_G = nx.Graph() + chordal_G.add_edges_from( + [ + (1, 2), + (1, 3), + (2, 3), + (2, 4), + (3, 4), + (3, 5), + (3, 6), + (4, 5), + (4, 6), + (5, 6), + (7, 8), + ] + ) + chordal_G.add_node(9) + cls.chordal_G = chordal_G + + non_chordal_G = nx.Graph() + non_chordal_G.add_edges_from([(1, 2), (1, 3), (2, 4), (2, 5), (3, 4), (3, 5)]) + cls.non_chordal_G = non_chordal_G + + self_loop_G = nx.Graph() + self_loop_G.add_edges_from([(1, 1)]) + cls.self_loop_G = self_loop_G + + @pytest.mark.parametrize("G", (nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph())) + def test_is_chordal_not_implemented(self, G): + with pytest.raises(nx.NetworkXNotImplemented): + nx.is_chordal(G) + + def test_is_chordal(self): + assert not nx.is_chordal(self.non_chordal_G) + assert nx.is_chordal(self.chordal_G) + assert nx.is_chordal(self.connected_chordal_G) + assert nx.is_chordal(nx.Graph()) + assert nx.is_chordal(nx.complete_graph(3)) + assert nx.is_chordal(nx.cycle_graph(3)) + assert not nx.is_chordal(nx.cycle_graph(5)) + assert nx.is_chordal(self.self_loop_G) + + def test_induced_nodes(self): + G = nx.generators.classic.path_graph(10) + Induced_nodes = nx.find_induced_nodes(G, 1, 9, 2) + assert Induced_nodes == {1, 2, 3, 4, 5, 6, 7, 8, 9} + pytest.raises( + nx.NetworkXTreewidthBoundExceeded, nx.find_induced_nodes, G, 1, 9, 1 + ) + Induced_nodes = nx.find_induced_nodes(self.chordal_G, 1, 6) + assert Induced_nodes == {1, 2, 4, 6} + pytest.raises(nx.NetworkXError, nx.find_induced_nodes, self.non_chordal_G, 1, 5) + + def test_graph_treewidth(self): + with pytest.raises(nx.NetworkXError, match="Input graph is not chordal"): + nx.chordal_graph_treewidth(self.non_chordal_G) + + def test_chordal_find_cliques(self): + cliques = { + frozenset([9]), + frozenset([7, 8]), + frozenset([1, 2, 3]), + frozenset([2, 3, 4]), + frozenset([3, 4, 5, 6]), + } + assert set(nx.chordal_graph_cliques(self.chordal_G)) == cliques + with pytest.raises(nx.NetworkXError, match="Input graph is not chordal"): + set(nx.chordal_graph_cliques(self.non_chordal_G)) + with pytest.raises(nx.NetworkXError, match="Input graph is not chordal"): + set(nx.chordal_graph_cliques(self.self_loop_G)) + + def test_chordal_find_cliques_path(self): + G = nx.path_graph(10) + cliqueset = nx.chordal_graph_cliques(G) + for u, v in G.edges(): + assert frozenset([u, v]) in cliqueset or frozenset([v, u]) in cliqueset + + def test_chordal_find_cliquesCC(self): + cliques = {frozenset([1, 2, 3]), frozenset([2, 3, 4]), frozenset([3, 4, 5, 6])} + cgc = nx.chordal_graph_cliques + assert set(cgc(self.connected_chordal_G)) == cliques + + def test_complete_to_chordal_graph(self): + fgrg = nx.fast_gnp_random_graph + test_graphs = [ + nx.barbell_graph(6, 2), + nx.cycle_graph(15), + nx.wheel_graph(20), + nx.grid_graph([10, 4]), + nx.ladder_graph(15), + nx.star_graph(5), + nx.bull_graph(), + fgrg(20, 0.3, seed=1), + ] + for G in test_graphs: + H, a = nx.complete_to_chordal_graph(G) + assert nx.is_chordal(H) + assert len(a) == H.number_of_nodes() + if nx.is_chordal(G): + assert G.number_of_edges() == H.number_of_edges() + assert set(a.values()) == {0} + else: + assert len(set(a.values())) == H.number_of_nodes() diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_clique.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_clique.py new file mode 100644 index 0000000000000000000000000000000000000000..3bee210982888a142f07a043bbde24bdad80fae9 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_clique.py @@ -0,0 +1,291 @@ +import pytest + +import networkx as nx +from networkx import convert_node_labels_to_integers as cnlti + + +class TestCliques: + def setup_method(self): + z = [3, 4, 3, 4, 2, 4, 2, 1, 1, 1, 1] + self.G = cnlti(nx.generators.havel_hakimi_graph(z), first_label=1) + self.cl = list(nx.find_cliques(self.G)) + H = nx.complete_graph(6) + H = nx.relabel_nodes(H, {i: i + 1 for i in range(6)}) + H.remove_edges_from([(2, 6), (2, 5), (2, 4), (1, 3), (5, 3)]) + self.H = H + + def test_find_cliques1(self): + cl = list(nx.find_cliques(self.G)) + rcl = nx.find_cliques_recursive(self.G) + expected = [[2, 6, 1, 3], [2, 6, 4], [5, 4, 7], [8, 9], [10, 11]] + assert sorted(map(sorted, cl)) == sorted(map(sorted, rcl)) + assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) + + def test_selfloops(self): + self.G.add_edge(1, 1) + cl = list(nx.find_cliques(self.G)) + rcl = list(nx.find_cliques_recursive(self.G)) + assert set(map(frozenset, cl)) == set(map(frozenset, rcl)) + answer = [{2, 6, 1, 3}, {2, 6, 4}, {5, 4, 7}, {8, 9}, {10, 11}] + assert len(answer) == len(cl) + assert all(set(c) in answer for c in cl) + + def test_find_cliques2(self): + hcl = list(nx.find_cliques(self.H)) + assert sorted(map(sorted, hcl)) == [[1, 2], [1, 4, 5, 6], [2, 3], [3, 4, 6]] + + def test_find_cliques3(self): + # all cliques are [[2, 6, 1, 3], [2, 6, 4], [5, 4, 7], [8, 9], [10, 11]] + + cl = list(nx.find_cliques(self.G, [2])) + rcl = nx.find_cliques_recursive(self.G, [2]) + expected = [[2, 6, 1, 3], [2, 6, 4]] + assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected)) + assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) + + cl = list(nx.find_cliques(self.G, [2, 3])) + rcl = nx.find_cliques_recursive(self.G, [2, 3]) + expected = [[2, 6, 1, 3]] + assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected)) + assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) + + cl = list(nx.find_cliques(self.G, [2, 6, 4])) + rcl = nx.find_cliques_recursive(self.G, [2, 6, 4]) + expected = [[2, 6, 4]] + assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected)) + assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) + + cl = list(nx.find_cliques(self.G, [2, 6, 4])) + rcl = nx.find_cliques_recursive(self.G, [2, 6, 4]) + expected = [[2, 6, 4]] + assert sorted(map(sorted, rcl)) == sorted(map(sorted, expected)) + assert sorted(map(sorted, cl)) == sorted(map(sorted, expected)) + + with pytest.raises(ValueError): + list(nx.find_cliques(self.G, [2, 6, 4, 1])) + + with pytest.raises(ValueError): + list(nx.find_cliques_recursive(self.G, [2, 6, 4, 1])) + + def test_number_of_cliques(self): + G = self.G + assert nx.number_of_cliques(G, 1) == 1 + assert list(nx.number_of_cliques(G, [1]).values()) == [1] + assert list(nx.number_of_cliques(G, [1, 2]).values()) == [1, 2] + assert nx.number_of_cliques(G, [1, 2]) == {1: 1, 2: 2} + assert nx.number_of_cliques(G, 2) == 2 + assert nx.number_of_cliques(G) == { + 1: 1, + 2: 2, + 3: 1, + 4: 2, + 5: 1, + 6: 2, + 7: 1, + 8: 1, + 9: 1, + 10: 1, + 11: 1, + } + assert nx.number_of_cliques(G, nodes=list(G)) == { + 1: 1, + 2: 2, + 3: 1, + 4: 2, + 5: 1, + 6: 2, + 7: 1, + 8: 1, + 9: 1, + 10: 1, + 11: 1, + } + assert nx.number_of_cliques(G, nodes=[2, 3, 4]) == {2: 2, 3: 1, 4: 2} + assert nx.number_of_cliques(G, cliques=self.cl) == { + 1: 1, + 2: 2, + 3: 1, + 4: 2, + 5: 1, + 6: 2, + 7: 1, + 8: 1, + 9: 1, + 10: 1, + 11: 1, + } + assert nx.number_of_cliques(G, list(G), cliques=self.cl) == { + 1: 1, + 2: 2, + 3: 1, + 4: 2, + 5: 1, + 6: 2, + 7: 1, + 8: 1, + 9: 1, + 10: 1, + 11: 1, + } + + def test_node_clique_number(self): + G = self.G + assert nx.node_clique_number(G, 1) == 4 + assert list(nx.node_clique_number(G, [1]).values()) == [4] + assert list(nx.node_clique_number(G, [1, 2]).values()) == [4, 4] + assert nx.node_clique_number(G, [1, 2]) == {1: 4, 2: 4} + assert nx.node_clique_number(G, 1) == 4 + assert nx.node_clique_number(G) == { + 1: 4, + 2: 4, + 3: 4, + 4: 3, + 5: 3, + 6: 4, + 7: 3, + 8: 2, + 9: 2, + 10: 2, + 11: 2, + } + assert nx.node_clique_number(G, cliques=self.cl) == { + 1: 4, + 2: 4, + 3: 4, + 4: 3, + 5: 3, + 6: 4, + 7: 3, + 8: 2, + 9: 2, + 10: 2, + 11: 2, + } + assert nx.node_clique_number(G, [1, 2], cliques=self.cl) == {1: 4, 2: 4} + assert nx.node_clique_number(G, 1, cliques=self.cl) == 4 + + def test_make_clique_bipartite(self): + G = self.G + B = nx.make_clique_bipartite(G) + assert sorted(B) == [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] + # Project onto the nodes of the original graph. + H = nx.projected_graph(B, range(1, 12)) + assert H.adj == G.adj + # Project onto the nodes representing the cliques. + H1 = nx.projected_graph(B, range(-5, 0)) + # Relabel the negative numbers as positive ones. + H1 = nx.relabel_nodes(H1, {-v: v for v in range(1, 6)}) + assert sorted(H1) == [1, 2, 3, 4, 5] + + def test_make_max_clique_graph(self): + """Tests that the maximal clique graph is the same as the bipartite + clique graph after being projected onto the nodes representing the + cliques. + + """ + G = self.G + B = nx.make_clique_bipartite(G) + # Project onto the nodes representing the cliques. + H1 = nx.projected_graph(B, range(-5, 0)) + # Relabel the negative numbers as nonnegative ones, starting at + # 0. + H1 = nx.relabel_nodes(H1, {-v: v - 1 for v in range(1, 6)}) + H2 = nx.make_max_clique_graph(G) + assert H1.adj == H2.adj + + def test_directed(self): + with pytest.raises(nx.NetworkXNotImplemented): + next(nx.find_cliques(nx.DiGraph())) + + def test_find_cliques_trivial(self): + G = nx.Graph() + assert sorted(nx.find_cliques(G)) == [] + assert sorted(nx.find_cliques_recursive(G)) == [] + + def test_make_max_clique_graph_create_using(self): + G = nx.Graph([(1, 2), (3, 1), (4, 1), (5, 6)]) + E = nx.Graph([(0, 1), (0, 2), (1, 2)]) + E.add_node(3) + assert nx.is_isomorphic(nx.make_max_clique_graph(G, create_using=nx.Graph), E) + + +class TestEnumerateAllCliques: + def test_paper_figure_4(self): + # Same graph as given in Fig. 4 of paper enumerate_all_cliques is + # based on. + # http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1559964&isnumber=33129 + G = nx.Graph() + edges_fig_4 = [ + ("a", "b"), + ("a", "c"), + ("a", "d"), + ("a", "e"), + ("b", "c"), + ("b", "d"), + ("b", "e"), + ("c", "d"), + ("c", "e"), + ("d", "e"), + ("f", "b"), + ("f", "c"), + ("f", "g"), + ("g", "f"), + ("g", "c"), + ("g", "d"), + ("g", "e"), + ] + G.add_edges_from(edges_fig_4) + + cliques = list(nx.enumerate_all_cliques(G)) + clique_sizes = list(map(len, cliques)) + assert sorted(clique_sizes) == clique_sizes + + expected_cliques = [ + ["a"], + ["b"], + ["c"], + ["d"], + ["e"], + ["f"], + ["g"], + ["a", "b"], + ["a", "b", "d"], + ["a", "b", "d", "e"], + ["a", "b", "e"], + ["a", "c"], + ["a", "c", "d"], + ["a", "c", "d", "e"], + ["a", "c", "e"], + ["a", "d"], + ["a", "d", "e"], + ["a", "e"], + ["b", "c"], + ["b", "c", "d"], + ["b", "c", "d", "e"], + ["b", "c", "e"], + ["b", "c", "f"], + ["b", "d"], + ["b", "d", "e"], + ["b", "e"], + ["b", "f"], + ["c", "d"], + ["c", "d", "e"], + ["c", "d", "e", "g"], + ["c", "d", "g"], + ["c", "e"], + ["c", "e", "g"], + ["c", "f"], + ["c", "f", "g"], + ["c", "g"], + ["d", "e"], + ["d", "e", "g"], + ["d", "g"], + ["e", "g"], + ["f", "g"], + ["a", "b", "c"], + ["a", "b", "c", "d"], + ["a", "b", "c", "d", "e"], + ["a", "b", "c", "e"], + ] + + assert sorted(map(sorted, cliques)) == sorted(map(sorted, expected_cliques)) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_cluster.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_cluster.py new file mode 100644 index 0000000000000000000000000000000000000000..d69f036ff6cc87e179af5fde6ce7ee21b6c193c4 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_cluster.py @@ -0,0 +1,543 @@ +import pytest + +import networkx as nx + + +class TestTriangles: + def test_empty(self): + G = nx.Graph() + assert list(nx.triangles(G).values()) == [] + + def test_path(self): + G = nx.path_graph(10) + assert list(nx.triangles(G).values()) == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + assert nx.triangles(G) == { + 0: 0, + 1: 0, + 2: 0, + 3: 0, + 4: 0, + 5: 0, + 6: 0, + 7: 0, + 8: 0, + 9: 0, + } + + def test_cubical(self): + G = nx.cubical_graph() + assert list(nx.triangles(G).values()) == [0, 0, 0, 0, 0, 0, 0, 0] + assert nx.triangles(G, 1) == 0 + assert list(nx.triangles(G, [1, 2]).values()) == [0, 0] + assert nx.triangles(G, 1) == 0 + assert nx.triangles(G, [1, 2]) == {1: 0, 2: 0} + + def test_k5(self): + G = nx.complete_graph(5) + assert list(nx.triangles(G).values()) == [6, 6, 6, 6, 6] + assert sum(nx.triangles(G).values()) / 3 == 10 + assert nx.triangles(G, 1) == 6 + G.remove_edge(1, 2) + assert list(nx.triangles(G).values()) == [5, 3, 3, 5, 5] + assert nx.triangles(G, 1) == 3 + G.add_edge(3, 3) # ignore self-edges + assert list(nx.triangles(G).values()) == [5, 3, 3, 5, 5] + assert nx.triangles(G, 3) == 5 + + +class TestDirectedClustering: + def test_clustering(self): + G = nx.DiGraph() + assert list(nx.clustering(G).values()) == [] + assert nx.clustering(G) == {} + + def test_path(self): + G = nx.path_graph(10, create_using=nx.DiGraph()) + assert list(nx.clustering(G).values()) == [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + assert nx.clustering(G) == { + 0: 0, + 1: 0, + 2: 0, + 3: 0, + 4: 0, + 5: 0, + 6: 0, + 7: 0, + 8: 0, + 9: 0, + } + assert nx.clustering(G, 0) == 0 + + def test_k5(self): + G = nx.complete_graph(5, create_using=nx.DiGraph()) + assert list(nx.clustering(G).values()) == [1, 1, 1, 1, 1] + assert nx.average_clustering(G) == 1 + G.remove_edge(1, 2) + assert list(nx.clustering(G).values()) == [ + 11 / 12, + 1, + 1, + 11 / 12, + 11 / 12, + ] + assert nx.clustering(G, [1, 4]) == {1: 1, 4: 11 / 12} + G.remove_edge(2, 1) + assert list(nx.clustering(G).values()) == [ + 5 / 6, + 1, + 1, + 5 / 6, + 5 / 6, + ] + assert nx.clustering(G, [1, 4]) == {1: 1, 4: 0.83333333333333337} + assert nx.clustering(G, 4) == 5 / 6 + + def test_triangle_and_edge(self): + G = nx.cycle_graph(3, create_using=nx.DiGraph()) + G.add_edge(0, 4) + assert nx.clustering(G)[0] == 1 / 6 + + +class TestDirectedWeightedClustering: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + + def test_clustering(self): + G = nx.DiGraph() + assert list(nx.clustering(G, weight="weight").values()) == [] + assert nx.clustering(G) == {} + + def test_path(self): + G = nx.path_graph(10, create_using=nx.DiGraph()) + assert list(nx.clustering(G, weight="weight").values()) == [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + assert nx.clustering(G, weight="weight") == { + 0: 0, + 1: 0, + 2: 0, + 3: 0, + 4: 0, + 5: 0, + 6: 0, + 7: 0, + 8: 0, + 9: 0, + } + + def test_k5(self): + G = nx.complete_graph(5, create_using=nx.DiGraph()) + assert list(nx.clustering(G, weight="weight").values()) == [1, 1, 1, 1, 1] + assert nx.average_clustering(G, weight="weight") == 1 + G.remove_edge(1, 2) + assert list(nx.clustering(G, weight="weight").values()) == [ + 11 / 12, + 1, + 1, + 11 / 12, + 11 / 12, + ] + assert nx.clustering(G, [1, 4], weight="weight") == {1: 1, 4: 11 / 12} + G.remove_edge(2, 1) + assert list(nx.clustering(G, weight="weight").values()) == [ + 5 / 6, + 1, + 1, + 5 / 6, + 5 / 6, + ] + assert nx.clustering(G, [1, 4], weight="weight") == { + 1: 1, + 4: 0.83333333333333337, + } + + def test_triangle_and_edge(self): + G = nx.cycle_graph(3, create_using=nx.DiGraph()) + G.add_edge(0, 4, weight=2) + assert nx.clustering(G)[0] == 1 / 6 + # Relaxed comparisons to allow graphblas-algorithms to pass tests + np.testing.assert_allclose(nx.clustering(G, weight="weight")[0], 1 / 12) + np.testing.assert_allclose(nx.clustering(G, 0, weight="weight"), 1 / 12) + + +class TestWeightedClustering: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + + def test_clustering(self): + G = nx.Graph() + assert list(nx.clustering(G, weight="weight").values()) == [] + assert nx.clustering(G) == {} + + def test_path(self): + G = nx.path_graph(10) + assert list(nx.clustering(G, weight="weight").values()) == [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + assert nx.clustering(G, weight="weight") == { + 0: 0, + 1: 0, + 2: 0, + 3: 0, + 4: 0, + 5: 0, + 6: 0, + 7: 0, + 8: 0, + 9: 0, + } + + def test_cubical(self): + G = nx.cubical_graph() + assert list(nx.clustering(G, weight="weight").values()) == [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + assert nx.clustering(G, 1) == 0 + assert list(nx.clustering(G, [1, 2], weight="weight").values()) == [0, 0] + assert nx.clustering(G, 1, weight="weight") == 0 + assert nx.clustering(G, [1, 2], weight="weight") == {1: 0, 2: 0} + + def test_k5(self): + G = nx.complete_graph(5) + assert list(nx.clustering(G, weight="weight").values()) == [1, 1, 1, 1, 1] + assert nx.average_clustering(G, weight="weight") == 1 + G.remove_edge(1, 2) + assert list(nx.clustering(G, weight="weight").values()) == [ + 5 / 6, + 1, + 1, + 5 / 6, + 5 / 6, + ] + assert nx.clustering(G, [1, 4], weight="weight") == { + 1: 1, + 4: 0.83333333333333337, + } + + def test_triangle_and_edge(self): + G = nx.cycle_graph(3) + G.add_edge(0, 4, weight=2) + assert nx.clustering(G)[0] == 1 / 3 + np.testing.assert_allclose(nx.clustering(G, weight="weight")[0], 1 / 6) + np.testing.assert_allclose(nx.clustering(G, 0, weight="weight"), 1 / 6) + + def test_triangle_and_signed_edge(self): + G = nx.cycle_graph(3) + G.add_edge(0, 1, weight=-1) + G.add_edge(3, 0, weight=0) + assert nx.clustering(G)[0] == 1 / 3 + assert nx.clustering(G, weight="weight")[0] == -1 / 3 + + +class TestClustering: + @classmethod + def setup_class(cls): + pytest.importorskip("numpy") + + def test_clustering(self): + G = nx.Graph() + assert list(nx.clustering(G).values()) == [] + assert nx.clustering(G) == {} + + def test_path(self): + G = nx.path_graph(10) + assert list(nx.clustering(G).values()) == [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + assert nx.clustering(G) == { + 0: 0, + 1: 0, + 2: 0, + 3: 0, + 4: 0, + 5: 0, + 6: 0, + 7: 0, + 8: 0, + 9: 0, + } + + def test_cubical(self): + G = nx.cubical_graph() + assert list(nx.clustering(G).values()) == [0, 0, 0, 0, 0, 0, 0, 0] + assert nx.clustering(G, 1) == 0 + assert list(nx.clustering(G, [1, 2]).values()) == [0, 0] + assert nx.clustering(G, 1) == 0 + assert nx.clustering(G, [1, 2]) == {1: 0, 2: 0} + + def test_k5(self): + G = nx.complete_graph(5) + assert list(nx.clustering(G).values()) == [1, 1, 1, 1, 1] + assert nx.average_clustering(G) == 1 + G.remove_edge(1, 2) + assert list(nx.clustering(G).values()) == [ + 5 / 6, + 1, + 1, + 5 / 6, + 5 / 6, + ] + assert nx.clustering(G, [1, 4]) == {1: 1, 4: 0.83333333333333337} + + def test_k5_signed(self): + G = nx.complete_graph(5) + assert list(nx.clustering(G).values()) == [1, 1, 1, 1, 1] + assert nx.average_clustering(G) == 1 + G.remove_edge(1, 2) + G.add_edge(0, 1, weight=-1) + assert list(nx.clustering(G, weight="weight").values()) == [ + 1 / 6, + -1 / 3, + 1, + 3 / 6, + 3 / 6, + ] + + +class TestTransitivity: + def test_transitivity(self): + G = nx.Graph() + assert nx.transitivity(G) == 0 + + def test_path(self): + G = nx.path_graph(10) + assert nx.transitivity(G) == 0 + + def test_cubical(self): + G = nx.cubical_graph() + assert nx.transitivity(G) == 0 + + def test_k5(self): + G = nx.complete_graph(5) + assert nx.transitivity(G) == 1 + G.remove_edge(1, 2) + assert nx.transitivity(G) == 0.875 + + +class TestSquareClustering: + def test_clustering(self): + G = nx.Graph() + assert list(nx.square_clustering(G).values()) == [] + assert nx.square_clustering(G) == {} + + def test_path(self): + G = nx.path_graph(10) + assert list(nx.square_clustering(G).values()) == [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ] + assert nx.square_clustering(G) == { + 0: 0, + 1: 0, + 2: 0, + 3: 0, + 4: 0, + 5: 0, + 6: 0, + 7: 0, + 8: 0, + 9: 0, + } + + def test_cubical(self): + G = nx.cubical_graph() + assert list(nx.square_clustering(G).values()) == [ + 1 / 3, + 1 / 3, + 1 / 3, + 1 / 3, + 1 / 3, + 1 / 3, + 1 / 3, + 1 / 3, + ] + assert list(nx.square_clustering(G, [1, 2]).values()) == [1 / 3, 1 / 3] + assert nx.square_clustering(G, [1])[1] == 1 / 3 + assert nx.square_clustering(G, 1) == 1 / 3 + assert nx.square_clustering(G, [1, 2]) == {1: 1 / 3, 2: 1 / 3} + + def test_k5(self): + G = nx.complete_graph(5) + assert list(nx.square_clustering(G).values()) == [1, 1, 1, 1, 1] + + def test_bipartite_k5(self): + G = nx.complete_bipartite_graph(5, 5) + assert list(nx.square_clustering(G).values()) == [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + + def test_lind_square_clustering(self): + """Test C4 for figure 1 Lind et al (2005)""" + G = nx.Graph( + [ + (1, 2), + (1, 3), + (1, 6), + (1, 7), + (2, 4), + (2, 5), + (3, 4), + (3, 5), + (6, 7), + (7, 8), + (6, 8), + (7, 9), + (7, 10), + (6, 11), + (6, 12), + (2, 13), + (2, 14), + (3, 15), + (3, 16), + ] + ) + G1 = G.subgraph([1, 2, 3, 4, 5, 13, 14, 15, 16]) + G2 = G.subgraph([1, 6, 7, 8, 9, 10, 11, 12]) + assert nx.square_clustering(G, [1])[1] == 3 / 43 + assert nx.square_clustering(G1, [1])[1] == 2 / 6 + assert nx.square_clustering(G2, [1])[1] == 1 / 5 + + def test_peng_square_clustering(self): + """Test eq2 for figure 1 Peng et al (2008)""" + G = nx.Graph([(1, 2), (1, 3), (2, 4), (3, 4), (3, 5), (3, 6)]) + assert nx.square_clustering(G, [1])[1] == 1 / 3 + + +class TestAverageClustering: + @classmethod + def setup_class(cls): + pytest.importorskip("numpy") + + def test_empty(self): + G = nx.Graph() + with pytest.raises(ZeroDivisionError): + nx.average_clustering(G) + + def test_average_clustering(self): + G = nx.cycle_graph(3) + G.add_edge(2, 3) + assert nx.average_clustering(G) == (1 + 1 + 1 / 3) / 4 + assert nx.average_clustering(G, count_zeros=True) == (1 + 1 + 1 / 3) / 4 + assert nx.average_clustering(G, count_zeros=False) == (1 + 1 + 1 / 3) / 3 + assert nx.average_clustering(G, [1, 2, 3]) == (1 + 1 / 3) / 3 + assert nx.average_clustering(G, [1, 2, 3], count_zeros=True) == (1 + 1 / 3) / 3 + assert nx.average_clustering(G, [1, 2, 3], count_zeros=False) == (1 + 1 / 3) / 2 + + def test_average_clustering_signed(self): + G = nx.cycle_graph(3) + G.add_edge(2, 3) + G.add_edge(0, 1, weight=-1) + assert nx.average_clustering(G, weight="weight") == (-1 - 1 - 1 / 3) / 4 + assert ( + nx.average_clustering(G, weight="weight", count_zeros=True) + == (-1 - 1 - 1 / 3) / 4 + ) + assert ( + nx.average_clustering(G, weight="weight", count_zeros=False) + == (-1 - 1 - 1 / 3) / 3 + ) + + +class TestDirectedAverageClustering: + @classmethod + def setup_class(cls): + pytest.importorskip("numpy") + + def test_empty(self): + G = nx.DiGraph() + with pytest.raises(ZeroDivisionError): + nx.average_clustering(G) + + def test_average_clustering(self): + G = nx.cycle_graph(3, create_using=nx.DiGraph()) + G.add_edge(2, 3) + assert nx.average_clustering(G) == (1 + 1 + 1 / 3) / 8 + assert nx.average_clustering(G, count_zeros=True) == (1 + 1 + 1 / 3) / 8 + assert nx.average_clustering(G, count_zeros=False) == (1 + 1 + 1 / 3) / 6 + assert nx.average_clustering(G, [1, 2, 3]) == (1 + 1 / 3) / 6 + assert nx.average_clustering(G, [1, 2, 3], count_zeros=True) == (1 + 1 / 3) / 6 + assert nx.average_clustering(G, [1, 2, 3], count_zeros=False) == (1 + 1 / 3) / 4 + + +class TestGeneralizedDegree: + def test_generalized_degree(self): + G = nx.Graph() + assert nx.generalized_degree(G) == {} + + def test_path(self): + G = nx.path_graph(5) + assert nx.generalized_degree(G, 0) == {0: 1} + assert nx.generalized_degree(G, 1) == {0: 2} + + def test_cubical(self): + G = nx.cubical_graph() + assert nx.generalized_degree(G, 0) == {0: 3} + + def test_k5(self): + G = nx.complete_graph(5) + assert nx.generalized_degree(G, 0) == {3: 4} + G.remove_edge(0, 1) + assert nx.generalized_degree(G, 0) == {2: 3} + assert nx.generalized_degree(G, [1, 2]) == {1: {2: 3}, 2: {2: 2, 3: 2}} + assert nx.generalized_degree(G) == { + 0: {2: 3}, + 1: {2: 3}, + 2: {2: 2, 3: 2}, + 3: {2: 2, 3: 2}, + 4: {2: 2, 3: 2}, + } diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_communicability.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_communicability.py new file mode 100644 index 0000000000000000000000000000000000000000..0f447094548415c089710b9b62ac4d73a27efeb5 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_communicability.py @@ -0,0 +1,80 @@ +from collections import defaultdict + +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx.algorithms.communicability_alg import communicability, communicability_exp + + +class TestCommunicability: + def test_communicability(self): + answer = { + 0: {0: 1.5430806348152435, 1: 1.1752011936438012}, + 1: {0: 1.1752011936438012, 1: 1.5430806348152435}, + } + # answer={(0, 0): 1.5430806348152435, + # (0, 1): 1.1752011936438012, + # (1, 0): 1.1752011936438012, + # (1, 1): 1.5430806348152435} + + result = communicability(nx.path_graph(2)) + for k1, val in result.items(): + for k2 in val: + assert answer[k1][k2] == pytest.approx(result[k1][k2], abs=1e-7) + + def test_communicability2(self): + answer_orig = { + ("1", "1"): 1.6445956054135658, + ("1", "Albert"): 0.7430186221096251, + ("1", "Aric"): 0.7430186221096251, + ("1", "Dan"): 1.6208126320442937, + ("1", "Franck"): 0.42639707170035257, + ("Albert", "1"): 0.7430186221096251, + ("Albert", "Albert"): 2.4368257358712189, + ("Albert", "Aric"): 1.4368257358712191, + ("Albert", "Dan"): 2.0472097037446453, + ("Albert", "Franck"): 1.8340111678944691, + ("Aric", "1"): 0.7430186221096251, + ("Aric", "Albert"): 1.4368257358712191, + ("Aric", "Aric"): 2.4368257358712193, + ("Aric", "Dan"): 2.0472097037446457, + ("Aric", "Franck"): 1.8340111678944691, + ("Dan", "1"): 1.6208126320442937, + ("Dan", "Albert"): 2.0472097037446453, + ("Dan", "Aric"): 2.0472097037446457, + ("Dan", "Dan"): 3.1306328496328168, + ("Dan", "Franck"): 1.4860372442192515, + ("Franck", "1"): 0.42639707170035257, + ("Franck", "Albert"): 1.8340111678944691, + ("Franck", "Aric"): 1.8340111678944691, + ("Franck", "Dan"): 1.4860372442192515, + ("Franck", "Franck"): 2.3876142275231915, + } + + answer = defaultdict(dict) + for (k1, k2), v in answer_orig.items(): + answer[k1][k2] = v + + G1 = nx.Graph( + [ + ("Franck", "Aric"), + ("Aric", "Dan"), + ("Dan", "Albert"), + ("Albert", "Franck"), + ("Dan", "1"), + ("Franck", "Albert"), + ] + ) + + result = communicability(G1) + for k1, val in result.items(): + for k2 in val: + assert answer[k1][k2] == pytest.approx(result[k1][k2], abs=1e-7) + + result = communicability_exp(G1) + for k1, val in result.items(): + for k2 in val: + assert answer[k1][k2] == pytest.approx(result[k1][k2], abs=1e-7) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_core.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_core.py new file mode 100644 index 0000000000000000000000000000000000000000..535af31b680209a9b57895246c2eb2ae268c9d55 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_core.py @@ -0,0 +1,185 @@ +import pytest + +import networkx as nx +from networkx.utils import nodes_equal + + +class TestCore: + @classmethod + def setup_class(cls): + # G is the example graph in Figure 1 from Batagelj and + # Zaversnik's paper titled An O(m) Algorithm for Cores + # Decomposition of Networks, 2003, + # http://arXiv.org/abs/cs/0310049. With nodes labeled as + # shown, the 3-core is given by nodes 1-8, the 2-core by nodes + # 9-16, the 1-core by nodes 17-20 and node 21 is in the + # 0-core. + t1 = nx.convert_node_labels_to_integers(nx.tetrahedral_graph(), 1) + t2 = nx.convert_node_labels_to_integers(t1, 5) + G = nx.union(t1, t2) + G.add_edges_from( + [ + (3, 7), + (2, 11), + (11, 5), + (11, 12), + (5, 12), + (12, 19), + (12, 18), + (3, 9), + (7, 9), + (7, 10), + (9, 10), + (9, 20), + (17, 13), + (13, 14), + (14, 15), + (15, 16), + (16, 13), + ] + ) + G.add_node(21) + cls.G = G + + # Create the graph H resulting from the degree sequence + # [0, 1, 2, 2, 2, 2, 3] when using the Havel-Hakimi algorithm. + + degseq = [0, 1, 2, 2, 2, 2, 3] + H = nx.havel_hakimi_graph(degseq) + mapping = {6: 0, 0: 1, 4: 3, 5: 6, 3: 4, 1: 2, 2: 5} + cls.H = nx.relabel_nodes(H, mapping) + + def test_trivial(self): + """Empty graph""" + G = nx.Graph() + assert nx.core_number(G) == {} + + def test_core_number(self): + core = nx.core_number(self.G) + nodes_by_core = [sorted(n for n in core if core[n] == val) for val in range(4)] + assert nodes_equal(nodes_by_core[0], [21]) + assert nodes_equal(nodes_by_core[1], [17, 18, 19, 20]) + assert nodes_equal(nodes_by_core[2], [9, 10, 11, 12, 13, 14, 15, 16]) + assert nodes_equal(nodes_by_core[3], [1, 2, 3, 4, 5, 6, 7, 8]) + + def test_core_number2(self): + core = nx.core_number(self.H) + nodes_by_core = [sorted(n for n in core if core[n] == val) for val in range(3)] + assert nodes_equal(nodes_by_core[0], [0]) + assert nodes_equal(nodes_by_core[1], [1, 3]) + assert nodes_equal(nodes_by_core[2], [2, 4, 5, 6]) + + def test_core_number_self_loop(self): + G = nx.cycle_graph(3) + G.add_edge(0, 0) + with pytest.raises(nx.NetworkXError, match="Input graph has self loops"): + nx.core_number(G) + + def test_directed_core_number(self): + """core number had a bug for directed graphs found in issue #1959""" + # small example where too timid edge removal can make cn[2] = 3 + G = nx.DiGraph() + edges = [(1, 2), (2, 1), (2, 3), (2, 4), (3, 4), (4, 3)] + G.add_edges_from(edges) + assert nx.core_number(G) == {1: 2, 2: 2, 3: 2, 4: 2} + # small example where too aggressive edge removal can make cn[2] = 2 + more_edges = [(1, 5), (3, 5), (4, 5), (3, 6), (4, 6), (5, 6)] + G.add_edges_from(more_edges) + assert nx.core_number(G) == {1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3} + + def test_main_core(self): + main_core_subgraph = nx.k_core(self.H) + assert sorted(main_core_subgraph.nodes()) == [2, 4, 5, 6] + + def test_k_core(self): + # k=0 + k_core_subgraph = nx.k_core(self.H, k=0) + assert sorted(k_core_subgraph.nodes()) == sorted(self.H.nodes()) + # k=1 + k_core_subgraph = nx.k_core(self.H, k=1) + assert sorted(k_core_subgraph.nodes()) == [1, 2, 3, 4, 5, 6] + # k = 2 + k_core_subgraph = nx.k_core(self.H, k=2) + assert sorted(k_core_subgraph.nodes()) == [2, 4, 5, 6] + + def test_main_crust(self): + main_crust_subgraph = nx.k_crust(self.H) + assert sorted(main_crust_subgraph.nodes()) == [0, 1, 3] + + def test_k_crust(self): + # k = 0 + k_crust_subgraph = nx.k_crust(self.H, k=2) + assert sorted(k_crust_subgraph.nodes()) == sorted(self.H.nodes()) + # k=1 + k_crust_subgraph = nx.k_crust(self.H, k=1) + assert sorted(k_crust_subgraph.nodes()) == [0, 1, 3] + # k=2 + k_crust_subgraph = nx.k_crust(self.H, k=0) + assert sorted(k_crust_subgraph.nodes()) == [0] + + def test_main_shell(self): + main_shell_subgraph = nx.k_shell(self.H) + assert sorted(main_shell_subgraph.nodes()) == [2, 4, 5, 6] + + def test_k_shell(self): + # k=0 + k_shell_subgraph = nx.k_shell(self.H, k=2) + assert sorted(k_shell_subgraph.nodes()) == [2, 4, 5, 6] + # k=1 + k_shell_subgraph = nx.k_shell(self.H, k=1) + assert sorted(k_shell_subgraph.nodes()) == [1, 3] + # k=2 + k_shell_subgraph = nx.k_shell(self.H, k=0) + assert sorted(k_shell_subgraph.nodes()) == [0] + + def test_k_corona(self): + # k=0 + k_corona_subgraph = nx.k_corona(self.H, k=2) + assert sorted(k_corona_subgraph.nodes()) == [2, 4, 5, 6] + # k=1 + k_corona_subgraph = nx.k_corona(self.H, k=1) + assert sorted(k_corona_subgraph.nodes()) == [1] + # k=2 + k_corona_subgraph = nx.k_corona(self.H, k=0) + assert sorted(k_corona_subgraph.nodes()) == [0] + + def test_k_truss(self): + # k=-1 + k_truss_subgraph = nx.k_truss(self.G, -1) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 21)) + # k=0 + k_truss_subgraph = nx.k_truss(self.G, 0) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 21)) + # k=1 + k_truss_subgraph = nx.k_truss(self.G, 1) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 21)) + # k=2 + k_truss_subgraph = nx.k_truss(self.G, 2) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 21)) + # k=3 + k_truss_subgraph = nx.k_truss(self.G, 3) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 13)) + + k_truss_subgraph = nx.k_truss(self.G, 4) + assert sorted(k_truss_subgraph.nodes()) == list(range(1, 9)) + + k_truss_subgraph = nx.k_truss(self.G, 5) + assert sorted(k_truss_subgraph.nodes()) == [] + + def test_onion_layers(self): + layers = nx.onion_layers(self.G) + nodes_by_layer = [ + sorted(n for n in layers if layers[n] == val) for val in range(1, 7) + ] + assert nodes_equal(nodes_by_layer[0], [21]) + assert nodes_equal(nodes_by_layer[1], [17, 18, 19, 20]) + assert nodes_equal(nodes_by_layer[2], [10, 12, 13, 14, 15, 16]) + assert nodes_equal(nodes_by_layer[3], [9, 11]) + assert nodes_equal(nodes_by_layer[4], [1, 2, 4, 5, 6, 8]) + assert nodes_equal(nodes_by_layer[5], [3, 7]) + + def test_onion_self_loop(self): + G = nx.cycle_graph(3) + G.add_edge(0, 0) + with pytest.raises(nx.NetworkXError, match="Input graph contains self loops"): + nx.onion_layers(G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_covering.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_covering.py new file mode 100644 index 0000000000000000000000000000000000000000..b2f97a866b0e09c199c2edb9f40f20986caa8fbc --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_covering.py @@ -0,0 +1,85 @@ +import pytest + +import networkx as nx + + +class TestMinEdgeCover: + """Tests for :func:`networkx.algorithms.min_edge_cover`""" + + def test_empty_graph(self): + G = nx.Graph() + assert nx.min_edge_cover(G) == set() + + def test_graph_with_loop(self): + G = nx.Graph() + G.add_edge(0, 0) + assert nx.min_edge_cover(G) == {(0, 0)} + + def test_graph_with_isolated_v(self): + G = nx.Graph() + G.add_node(1) + with pytest.raises( + nx.NetworkXException, + match="Graph has a node with no edge incident on it, so no edge cover exists.", + ): + nx.min_edge_cover(G) + + def test_graph_single_edge(self): + G = nx.Graph([(0, 1)]) + assert nx.min_edge_cover(G) in ({(0, 1)}, {(1, 0)}) + + def test_graph_two_edge_path(self): + G = nx.path_graph(3) + min_cover = nx.min_edge_cover(G) + assert len(min_cover) == 2 + for u, v in G.edges: + assert (u, v) in min_cover or (v, u) in min_cover + + def test_bipartite_explicit(self): + G = nx.Graph() + G.add_nodes_from([1, 2, 3, 4], bipartite=0) + G.add_nodes_from(["a", "b", "c"], bipartite=1) + G.add_edges_from([(1, "a"), (1, "b"), (2, "b"), (2, "c"), (3, "c"), (4, "a")]) + # Use bipartite method by prescribing the algorithm + min_cover = nx.min_edge_cover( + G, nx.algorithms.bipartite.matching.eppstein_matching + ) + assert nx.is_edge_cover(G, min_cover) + assert len(min_cover) == 8 + # Use the default method which is not specialized for bipartite + min_cover2 = nx.min_edge_cover(G) + assert nx.is_edge_cover(G, min_cover2) + assert len(min_cover2) == 4 + + def test_complete_graph_even(self): + G = nx.complete_graph(10) + min_cover = nx.min_edge_cover(G) + assert nx.is_edge_cover(G, min_cover) + assert len(min_cover) == 5 + + def test_complete_graph_odd(self): + G = nx.complete_graph(11) + min_cover = nx.min_edge_cover(G) + assert nx.is_edge_cover(G, min_cover) + assert len(min_cover) == 6 + + +class TestIsEdgeCover: + """Tests for :func:`networkx.algorithms.is_edge_cover`""" + + def test_empty_graph(self): + G = nx.Graph() + assert nx.is_edge_cover(G, set()) + + def test_graph_with_loop(self): + G = nx.Graph() + G.add_edge(1, 1) + assert nx.is_edge_cover(G, {(1, 1)}) + + def test_graph_single_edge(self): + G = nx.Graph() + G.add_edge(0, 1) + assert nx.is_edge_cover(G, {(0, 0), (1, 1)}) + assert nx.is_edge_cover(G, {(0, 1), (1, 0)}) + assert nx.is_edge_cover(G, {(0, 1)}) + assert not nx.is_edge_cover(G, {(0, 0)}) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_cuts.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_cuts.py new file mode 100644 index 0000000000000000000000000000000000000000..6d8656e330e205c61d9f469560697a3875a0555b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_cuts.py @@ -0,0 +1,172 @@ +"""Unit tests for the :mod:`networkx.algorithms.cuts` module.""" + + +import networkx as nx + + +class TestCutSize: + """Unit tests for the :func:`~networkx.cut_size` function.""" + + def test_symmetric(self): + """Tests that the cut size is symmetric.""" + G = nx.barbell_graph(3, 0) + S = {0, 1, 4} + T = {2, 3, 5} + assert nx.cut_size(G, S, T) == 4 + assert nx.cut_size(G, T, S) == 4 + + def test_single_edge(self): + """Tests for a cut of a single edge.""" + G = nx.barbell_graph(3, 0) + S = {0, 1, 2} + T = {3, 4, 5} + assert nx.cut_size(G, S, T) == 1 + assert nx.cut_size(G, T, S) == 1 + + def test_directed(self): + """Tests that each directed edge is counted once in the cut.""" + G = nx.barbell_graph(3, 0).to_directed() + S = {0, 1, 2} + T = {3, 4, 5} + assert nx.cut_size(G, S, T) == 2 + assert nx.cut_size(G, T, S) == 2 + + def test_directed_symmetric(self): + """Tests that a cut in a directed graph is symmetric.""" + G = nx.barbell_graph(3, 0).to_directed() + S = {0, 1, 4} + T = {2, 3, 5} + assert nx.cut_size(G, S, T) == 8 + assert nx.cut_size(G, T, S) == 8 + + def test_multigraph(self): + """Tests that parallel edges are each counted for a cut.""" + G = nx.MultiGraph(["ab", "ab"]) + assert nx.cut_size(G, {"a"}, {"b"}) == 2 + + +class TestVolume: + """Unit tests for the :func:`~networkx.volume` function.""" + + def test_graph(self): + G = nx.cycle_graph(4) + assert nx.volume(G, {0, 1}) == 4 + + def test_digraph(self): + G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 0)]) + assert nx.volume(G, {0, 1}) == 2 + + def test_multigraph(self): + edges = list(nx.cycle_graph(4).edges()) + G = nx.MultiGraph(edges * 2) + assert nx.volume(G, {0, 1}) == 8 + + def test_multidigraph(self): + edges = [(0, 1), (1, 2), (2, 3), (3, 0)] + G = nx.MultiDiGraph(edges * 2) + assert nx.volume(G, {0, 1}) == 4 + + def test_barbell(self): + G = nx.barbell_graph(3, 0) + assert nx.volume(G, {0, 1, 2}) == 7 + assert nx.volume(G, {3, 4, 5}) == 7 + + +class TestNormalizedCutSize: + """Unit tests for the :func:`~networkx.normalized_cut_size` function.""" + + def test_graph(self): + G = nx.path_graph(4) + S = {1, 2} + T = set(G) - S + size = nx.normalized_cut_size(G, S, T) + # The cut looks like this: o-{-o--o-}-o + expected = 2 * ((1 / 4) + (1 / 2)) + assert expected == size + # Test with no input T + assert expected == nx.normalized_cut_size(G, S) + + def test_directed(self): + G = nx.DiGraph([(0, 1), (1, 2), (2, 3)]) + S = {1, 2} + T = set(G) - S + size = nx.normalized_cut_size(G, S, T) + # The cut looks like this: o-{->o-->o-}->o + expected = 2 * ((1 / 2) + (1 / 1)) + assert expected == size + # Test with no input T + assert expected == nx.normalized_cut_size(G, S) + + +class TestConductance: + """Unit tests for the :func:`~networkx.conductance` function.""" + + def test_graph(self): + G = nx.barbell_graph(5, 0) + # Consider the singleton sets containing the "bridge" nodes. + # There is only one cut edge, and each set has volume five. + S = {4} + T = {5} + conductance = nx.conductance(G, S, T) + expected = 1 / 5 + assert expected == conductance + # Test with no input T + G2 = nx.barbell_graph(3, 0) + # There is only one cut edge, and each set has volume seven. + S2 = {0, 1, 2} + assert nx.conductance(G2, S2) == 1 / 7 + + +class TestEdgeExpansion: + """Unit tests for the :func:`~networkx.edge_expansion` function.""" + + def test_graph(self): + G = nx.barbell_graph(5, 0) + S = set(range(5)) + T = set(G) - S + expansion = nx.edge_expansion(G, S, T) + expected = 1 / 5 + assert expected == expansion + # Test with no input T + assert expected == nx.edge_expansion(G, S) + + +class TestNodeExpansion: + """Unit tests for the :func:`~networkx.node_expansion` function.""" + + def test_graph(self): + G = nx.path_graph(8) + S = {3, 4, 5} + expansion = nx.node_expansion(G, S) + # The neighborhood of S has cardinality five, and S has + # cardinality three. + expected = 5 / 3 + assert expected == expansion + + +class TestBoundaryExpansion: + """Unit tests for the :func:`~networkx.boundary_expansion` function.""" + + def test_graph(self): + G = nx.complete_graph(10) + S = set(range(4)) + expansion = nx.boundary_expansion(G, S) + # The node boundary of S has cardinality six, and S has + # cardinality three. + expected = 6 / 4 + assert expected == expansion + + +class TestMixingExpansion: + """Unit tests for the :func:`~networkx.mixing_expansion` function.""" + + def test_graph(self): + G = nx.barbell_graph(5, 0) + S = set(range(5)) + T = set(G) - S + expansion = nx.mixing_expansion(G, S, T) + # There is one cut edge, and the total number of edges in the + # graph is twice the total number of edges in a clique of size + # five, plus one more for the bridge. + expected = 1 / (2 * (5 * 4 + 1)) + assert expected == expansion diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_cycles.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_cycles.py new file mode 100644 index 0000000000000000000000000000000000000000..230f513519bc751c61c7570207f886b770079627 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_cycles.py @@ -0,0 +1,971 @@ +from itertools import chain, islice, tee +from math import inf +from random import shuffle + +import pytest + +import networkx as nx +from networkx.algorithms.traversal.edgedfs import FORWARD, REVERSE + + +def check_independent(basis): + if len(basis) == 0: + return + try: + import numpy as np + except ImportError: + return + + H = nx.Graph() + for b in basis: + nx.add_cycle(H, b) + inc = nx.incidence_matrix(H, oriented=True) + rank = np.linalg.matrix_rank(inc.toarray(), tol=None, hermitian=False) + assert inc.shape[1] - rank == len(basis) + + +class TestCycles: + @classmethod + def setup_class(cls): + G = nx.Graph() + nx.add_cycle(G, [0, 1, 2, 3]) + nx.add_cycle(G, [0, 3, 4, 5]) + nx.add_cycle(G, [0, 1, 6, 7, 8]) + G.add_edge(8, 9) + cls.G = G + + def is_cyclic_permutation(self, a, b): + n = len(a) + if len(b) != n: + return False + l = a + a + return any(l[i : i + n] == b for i in range(n)) + + def test_cycle_basis(self): + G = self.G + cy = nx.cycle_basis(G, 0) + sort_cy = sorted(sorted(c) for c in cy) + assert sort_cy == [[0, 1, 2, 3], [0, 1, 6, 7, 8], [0, 3, 4, 5]] + cy = nx.cycle_basis(G, 1) + sort_cy = sorted(sorted(c) for c in cy) + assert sort_cy == [[0, 1, 2, 3], [0, 1, 6, 7, 8], [0, 3, 4, 5]] + cy = nx.cycle_basis(G, 9) + sort_cy = sorted(sorted(c) for c in cy) + assert sort_cy == [[0, 1, 2, 3], [0, 1, 6, 7, 8], [0, 3, 4, 5]] + # test disconnected graphs + nx.add_cycle(G, "ABC") + cy = nx.cycle_basis(G, 9) + sort_cy = sorted(sorted(c) for c in cy[:-1]) + [sorted(cy[-1])] + assert sort_cy == [[0, 1, 2, 3], [0, 1, 6, 7, 8], [0, 3, 4, 5], ["A", "B", "C"]] + + def test_cycle_basis2(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.DiGraph() + cy = nx.cycle_basis(G, 0) + + def test_cycle_basis3(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.MultiGraph() + cy = nx.cycle_basis(G, 0) + + def test_cycle_basis_ordered(self): + # see gh-6654 replace sets with (ordered) dicts + G = nx.cycle_graph(5) + G.update(nx.cycle_graph(range(3, 8))) + cbG = nx.cycle_basis(G) + + perm = {1: 0, 0: 1} # switch 0 and 1 + H = nx.relabel_nodes(G, perm) + cbH = [[perm.get(n, n) for n in cyc] for cyc in nx.cycle_basis(H)] + assert cbG == cbH + + def test_cycle_basis_self_loop(self): + """Tests the function for graphs with self loops""" + G = nx.Graph() + nx.add_cycle(G, [0, 1, 2, 3]) + nx.add_cycle(G, [0, 0, 6, 2]) + cy = nx.cycle_basis(G) + sort_cy = sorted(sorted(c) for c in cy) + assert sort_cy == [[0], [0, 1, 2], [0, 2, 3], [0, 2, 6]] + + def test_simple_cycles(self): + edges = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2)] + G = nx.DiGraph(edges) + cc = sorted(nx.simple_cycles(G)) + ca = [[0], [0, 1, 2], [0, 2], [1, 2], [2]] + assert len(cc) == len(ca) + for c in cc: + assert any(self.is_cyclic_permutation(c, rc) for rc in ca) + + def test_unsortable(self): + # this test ensures that graphs whose nodes without an intrinsic + # ordering do not cause issues + G = nx.DiGraph() + nx.add_cycle(G, ["a", 1]) + c = list(nx.simple_cycles(G)) + assert len(c) == 1 + + def test_simple_cycles_small(self): + G = nx.DiGraph() + nx.add_cycle(G, [1, 2, 3]) + c = sorted(nx.simple_cycles(G)) + assert len(c) == 1 + assert self.is_cyclic_permutation(c[0], [1, 2, 3]) + nx.add_cycle(G, [10, 20, 30]) + cc = sorted(nx.simple_cycles(G)) + assert len(cc) == 2 + ca = [[1, 2, 3], [10, 20, 30]] + for c in cc: + assert any(self.is_cyclic_permutation(c, rc) for rc in ca) + + def test_simple_cycles_empty(self): + G = nx.DiGraph() + assert list(nx.simple_cycles(G)) == [] + + def worst_case_graph(self, k): + # see figure 1 in Johnson's paper + # this graph has exactly 3k simple cycles + G = nx.DiGraph() + for n in range(2, k + 2): + G.add_edge(1, n) + G.add_edge(n, k + 2) + G.add_edge(2 * k + 1, 1) + for n in range(k + 2, 2 * k + 2): + G.add_edge(n, 2 * k + 2) + G.add_edge(n, n + 1) + G.add_edge(2 * k + 3, k + 2) + for n in range(2 * k + 3, 3 * k + 3): + G.add_edge(2 * k + 2, n) + G.add_edge(n, 3 * k + 3) + G.add_edge(3 * k + 3, 2 * k + 2) + return G + + def test_worst_case_graph(self): + # see figure 1 in Johnson's paper + for k in range(3, 10): + G = self.worst_case_graph(k) + l = len(list(nx.simple_cycles(G))) + assert l == 3 * k + + def test_recursive_simple_and_not(self): + for k in range(2, 10): + G = self.worst_case_graph(k) + cc = sorted(nx.simple_cycles(G)) + rcc = sorted(nx.recursive_simple_cycles(G)) + assert len(cc) == len(rcc) + for c in cc: + assert any(self.is_cyclic_permutation(c, r) for r in rcc) + for rc in rcc: + assert any(self.is_cyclic_permutation(rc, c) for c in cc) + + def test_simple_graph_with_reported_bug(self): + G = nx.DiGraph() + edges = [ + (0, 2), + (0, 3), + (1, 0), + (1, 3), + (2, 1), + (2, 4), + (3, 2), + (3, 4), + (4, 0), + (4, 1), + (4, 5), + (5, 0), + (5, 1), + (5, 2), + (5, 3), + ] + G.add_edges_from(edges) + cc = sorted(nx.simple_cycles(G)) + assert len(cc) == 26 + rcc = sorted(nx.recursive_simple_cycles(G)) + assert len(cc) == len(rcc) + for c in cc: + assert any(self.is_cyclic_permutation(c, rc) for rc in rcc) + for rc in rcc: + assert any(self.is_cyclic_permutation(rc, c) for c in cc) + + +def pairwise(iterable): + a, b = tee(iterable) + next(b, None) + return zip(a, b) + + +def cycle_edges(c): + return pairwise(chain(c, islice(c, 1))) + + +def directed_cycle_edgeset(c): + return frozenset(cycle_edges(c)) + + +def undirected_cycle_edgeset(c): + if len(c) == 1: + return frozenset(cycle_edges(c)) + return frozenset(map(frozenset, cycle_edges(c))) + + +def multigraph_cycle_edgeset(c): + if len(c) <= 2: + return frozenset(cycle_edges(c)) + else: + return frozenset(map(frozenset, cycle_edges(c))) + + +class TestCycleEnumeration: + @staticmethod + def K(n): + return nx.complete_graph(n) + + @staticmethod + def D(n): + return nx.complete_graph(n).to_directed() + + @staticmethod + def edgeset_function(g): + if g.is_directed(): + return directed_cycle_edgeset + elif g.is_multigraph(): + return multigraph_cycle_edgeset + else: + return undirected_cycle_edgeset + + def check_cycle(self, g, c, es, cache, source, original_c, length_bound, chordless): + if length_bound is not None and len(c) > length_bound: + raise RuntimeError( + f"computed cycle {original_c} exceeds length bound {length_bound}" + ) + if source == "computed": + if es in cache: + raise RuntimeError( + f"computed cycle {original_c} has already been found!" + ) + else: + cache[es] = tuple(original_c) + else: + if es in cache: + cache.pop(es) + else: + raise RuntimeError(f"expected cycle {original_c} was not computed") + + if not all(g.has_edge(*e) for e in es): + raise RuntimeError( + f"{source} claimed cycle {original_c} is not a cycle of g" + ) + if chordless and len(g.subgraph(c).edges) > len(c): + raise RuntimeError(f"{source} cycle {original_c} is not chordless") + + def check_cycle_algorithm( + self, + g, + expected_cycles, + length_bound=None, + chordless=False, + algorithm=None, + ): + if algorithm is None: + algorithm = nx.chordless_cycles if chordless else nx.simple_cycles + + # note: we shuffle the labels of g to rule out accidentally-correct + # behavior which occurred during the development of chordless cycle + # enumeration algorithms + + relabel = list(range(len(g))) + shuffle(relabel) + label = dict(zip(g, relabel)) + unlabel = dict(zip(relabel, g)) + h = nx.relabel_nodes(g, label, copy=True) + + edgeset = self.edgeset_function(h) + + params = {} + if length_bound is not None: + params["length_bound"] = length_bound + + cycle_cache = {} + for c in algorithm(h, **params): + original_c = [unlabel[x] for x in c] + es = edgeset(c) + self.check_cycle( + h, c, es, cycle_cache, "computed", original_c, length_bound, chordless + ) + + if isinstance(expected_cycles, int): + if len(cycle_cache) != expected_cycles: + raise RuntimeError( + f"expected {expected_cycles} cycles, got {len(cycle_cache)}" + ) + return + for original_c in expected_cycles: + c = [label[x] for x in original_c] + es = edgeset(c) + self.check_cycle( + h, c, es, cycle_cache, "expected", original_c, length_bound, chordless + ) + + if len(cycle_cache): + for c in cycle_cache.values(): + raise RuntimeError( + f"computed cycle {c} is valid but not in the expected cycle set!" + ) + + def check_cycle_enumeration_integer_sequence( + self, + g_family, + cycle_counts, + length_bound=None, + chordless=False, + algorithm=None, + ): + for g, num_cycles in zip(g_family, cycle_counts): + self.check_cycle_algorithm( + g, + num_cycles, + length_bound=length_bound, + chordless=chordless, + algorithm=algorithm, + ) + + def test_directed_chordless_cycle_digons(self): + g = nx.DiGraph() + nx.add_cycle(g, range(5)) + nx.add_cycle(g, range(5)[::-1]) + g.add_edge(0, 0) + expected_cycles = [(0,), (1, 2), (2, 3), (3, 4)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + self.check_cycle_algorithm(g, expected_cycles, chordless=True, length_bound=2) + + expected_cycles = [c for c in expected_cycles if len(c) < 2] + self.check_cycle_algorithm(g, expected_cycles, chordless=True, length_bound=1) + + def test_directed_chordless_cycle_undirected(self): + g = nx.DiGraph([(1, 2), (2, 3), (3, 4), (4, 5), (5, 0), (5, 1), (0, 2)]) + expected_cycles = [(0, 2, 3, 4, 5), (1, 2, 3, 4, 5)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + g = nx.DiGraph() + nx.add_cycle(g, range(5)) + nx.add_cycle(g, range(4, 9)) + g.add_edge(7, 3) + expected_cycles = [(0, 1, 2, 3, 4), (3, 4, 5, 6, 7), (4, 5, 6, 7, 8)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + g.add_edge(3, 7) + expected_cycles = [(0, 1, 2, 3, 4), (3, 7), (4, 5, 6, 7, 8)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + expected_cycles = [(3, 7)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True, length_bound=4) + + g.remove_edge(7, 3) + expected_cycles = [(0, 1, 2, 3, 4), (4, 5, 6, 7, 8)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + g = nx.DiGraph((i, j) for i in range(10) for j in range(i)) + expected_cycles = [] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + def test_chordless_cycles_directed(self): + G = nx.DiGraph() + nx.add_cycle(G, range(5)) + nx.add_cycle(G, range(4, 12)) + expected = [[*range(5)], [*range(4, 12)]] + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 5], length_bound=5, chordless=True + ) + + G.add_edge(7, 3) + expected.append([*range(3, 8)]) + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 5], length_bound=5, chordless=True + ) + + G.add_edge(3, 7) + expected[-1] = [7, 3] + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 5], length_bound=5, chordless=True + ) + + expected.pop() + G.remove_edge(7, 3) + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 5], length_bound=5, chordless=True + ) + + def test_directed_chordless_cycle_diclique(self): + g_family = [self.D(n) for n in range(10)] + expected_cycles = [(n * n - n) // 2 for n in range(10)] + self.check_cycle_enumeration_integer_sequence( + g_family, expected_cycles, chordless=True + ) + + expected_cycles = [(n * n - n) // 2 for n in range(10)] + self.check_cycle_enumeration_integer_sequence( + g_family, expected_cycles, length_bound=2 + ) + + def test_directed_chordless_loop_blockade(self): + g = nx.DiGraph((i, i) for i in range(10)) + nx.add_cycle(g, range(10)) + expected_cycles = [(i,) for i in range(10)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + self.check_cycle_algorithm(g, expected_cycles, length_bound=1) + + g = nx.MultiDiGraph(g) + g.add_edges_from((i, i) for i in range(0, 10, 2)) + expected_cycles = [(i,) for i in range(1, 10, 2)] + self.check_cycle_algorithm(g, expected_cycles, chordless=True) + + def test_simple_cycles_notable_clique_sequences(self): + # A000292: Number of labeled graphs on n+3 nodes that are triangles. + g_family = [self.K(n) for n in range(2, 12)] + expected = [0, 1, 4, 10, 20, 35, 56, 84, 120, 165, 220] + self.check_cycle_enumeration_integer_sequence( + g_family, expected, length_bound=3 + ) + + def triangles(g, **kwargs): + yield from (c for c in nx.simple_cycles(g, **kwargs) if len(c) == 3) + + # directed complete graphs have twice as many triangles thanks to reversal + g_family = [self.D(n) for n in range(2, 12)] + expected = [2 * e for e in expected] + self.check_cycle_enumeration_integer_sequence( + g_family, expected, length_bound=3, algorithm=triangles + ) + + def four_cycles(g, **kwargs): + yield from (c for c in nx.simple_cycles(g, **kwargs) if len(c) == 4) + + # A050534: the number of 4-cycles in the complete graph K_{n+1} + expected = [0, 0, 0, 3, 15, 45, 105, 210, 378, 630, 990] + g_family = [self.K(n) for n in range(1, 12)] + self.check_cycle_enumeration_integer_sequence( + g_family, expected, length_bound=4, algorithm=four_cycles + ) + + # directed complete graphs have twice as many 4-cycles thanks to reversal + expected = [2 * e for e in expected] + g_family = [self.D(n) for n in range(1, 15)] + self.check_cycle_enumeration_integer_sequence( + g_family, expected, length_bound=4, algorithm=four_cycles + ) + + # A006231: the number of elementary circuits in a complete directed graph with n nodes + expected = [0, 1, 5, 20, 84, 409, 2365] + g_family = [self.D(n) for n in range(1, 8)] + self.check_cycle_enumeration_integer_sequence(g_family, expected) + + # A002807: Number of cycles in the complete graph on n nodes K_{n}. + expected = [0, 0, 0, 1, 7, 37, 197, 1172] + g_family = [self.K(n) for n in range(8)] + self.check_cycle_enumeration_integer_sequence(g_family, expected) + + def test_directed_chordless_cycle_parallel_multiedges(self): + g = nx.MultiGraph() + + nx.add_cycle(g, range(5)) + expected = [[*range(5)]] + self.check_cycle_algorithm(g, expected, chordless=True) + + nx.add_cycle(g, range(5)) + expected = [*cycle_edges(range(5))] + self.check_cycle_algorithm(g, expected, chordless=True) + + nx.add_cycle(g, range(5)) + expected = [] + self.check_cycle_algorithm(g, expected, chordless=True) + + g = nx.MultiDiGraph() + + nx.add_cycle(g, range(5)) + expected = [[*range(5)]] + self.check_cycle_algorithm(g, expected, chordless=True) + + nx.add_cycle(g, range(5)) + self.check_cycle_algorithm(g, [], chordless=True) + + nx.add_cycle(g, range(5)) + self.check_cycle_algorithm(g, [], chordless=True) + + g = nx.MultiDiGraph() + + nx.add_cycle(g, range(5)) + nx.add_cycle(g, range(5)[::-1]) + expected = [*cycle_edges(range(5))] + self.check_cycle_algorithm(g, expected, chordless=True) + + nx.add_cycle(g, range(5)) + self.check_cycle_algorithm(g, [], chordless=True) + + def test_chordless_cycles_graph(self): + G = nx.Graph() + nx.add_cycle(G, range(5)) + nx.add_cycle(G, range(4, 12)) + expected = [[*range(5)], [*range(4, 12)]] + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 5], length_bound=5, chordless=True + ) + + G.add_edge(7, 3) + expected.append([*range(3, 8)]) + expected.append([4, 3, 7, 8, 9, 10, 11]) + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 5], length_bound=5, chordless=True + ) + + def test_chordless_cycles_giant_hamiltonian(self): + # ... o - e - o - e - o ... # o = odd, e = even + # ... ---/ \-----/ \--- ... # <-- "long" edges + # + # each long edge belongs to exactly one triangle, and one giant cycle + # of length n/2. The remaining edges each belong to a triangle + + n = 1000 + assert n % 2 == 0 + G = nx.Graph() + for v in range(n): + if not v % 2: + G.add_edge(v, (v + 2) % n) + G.add_edge(v, (v + 1) % n) + + expected = [[*range(0, n, 2)]] + [ + [x % n for x in range(i, i + 3)] for i in range(0, n, 2) + ] + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 3], length_bound=3, chordless=True + ) + + # ... o -> e -> o -> e -> o ... # o = odd, e = even + # ... <---/ \---<---/ \---< ... # <-- "long" edges + # + # this time, we orient the short and long edges in opposition + # the cycle structure of this graph is the same, but we need to reverse + # the long one in our representation. Also, we need to drop the size + # because our partitioning algorithm uses strongly connected components + # instead of separating graphs by their strong articulation points + + n = 100 + assert n % 2 == 0 + G = nx.DiGraph() + for v in range(n): + G.add_edge(v, (v + 1) % n) + if not v % 2: + G.add_edge((v + 2) % n, v) + + expected = [[*range(n - 2, -2, -2)]] + [ + [x % n for x in range(i, i + 3)] for i in range(0, n, 2) + ] + self.check_cycle_algorithm(G, expected, chordless=True) + self.check_cycle_algorithm( + G, [c for c in expected if len(c) <= 3], length_bound=3, chordless=True + ) + + def test_simple_cycles_acyclic_tournament(self): + n = 10 + G = nx.DiGraph((x, y) for x in range(n) for y in range(x)) + self.check_cycle_algorithm(G, []) + self.check_cycle_algorithm(G, [], chordless=True) + + for k in range(n + 1): + self.check_cycle_algorithm(G, [], length_bound=k) + self.check_cycle_algorithm(G, [], length_bound=k, chordless=True) + + def test_simple_cycles_graph(self): + testG = nx.cycle_graph(8) + cyc1 = tuple(range(8)) + self.check_cycle_algorithm(testG, [cyc1]) + + testG.add_edge(4, -1) + nx.add_path(testG, [3, -2, -3, -4]) + self.check_cycle_algorithm(testG, [cyc1]) + + testG.update(nx.cycle_graph(range(8, 16))) + cyc2 = tuple(range(8, 16)) + self.check_cycle_algorithm(testG, [cyc1, cyc2]) + + testG.update(nx.cycle_graph(range(4, 12))) + cyc3 = tuple(range(4, 12)) + expected = { + (0, 1, 2, 3, 4, 5, 6, 7), # cyc1 + (8, 9, 10, 11, 12, 13, 14, 15), # cyc2 + (4, 5, 6, 7, 8, 9, 10, 11), # cyc3 + (4, 5, 6, 7, 8, 15, 14, 13, 12, 11), # cyc2 + cyc3 + (0, 1, 2, 3, 4, 11, 10, 9, 8, 7), # cyc1 + cyc3 + (0, 1, 2, 3, 4, 11, 12, 13, 14, 15, 8, 7), # cyc1 + cyc2 + cyc3 + } + self.check_cycle_algorithm(testG, expected) + assert len(expected) == (2**3 - 1) - 1 # 1 disjoint comb: cyc1 + cyc2 + + # Basis size = 5 (2 loops overlapping gives 5 small loops + # E + # / \ Note: A-F = 10-15 + # 1-2-3-4-5 + # / | | \ cyc1=012DAB -- left + # 0 D F 6 cyc2=234E -- top + # \ | | / cyc3=45678F -- right + # B-A-9-8-7 cyc4=89AC -- bottom + # \ / cyc5=234F89AD -- middle + # C + # + # combinations of 5 basis elements: 2^5 - 1 (one includes no cycles) + # + # disjoint combs: (11 total) not simple cycles + # Any pair not including cyc5 => choose(4, 2) = 6 + # Any triple not including cyc5 => choose(4, 3) = 4 + # Any quad not including cyc5 => choose(4, 4) = 1 + # + # we expect 31 - 11 = 20 simple cycles + # + testG = nx.cycle_graph(12) + testG.update(nx.cycle_graph([12, 10, 13, 2, 14, 4, 15, 8]).edges) + expected = (2**5 - 1) - 11 # 11 disjoint combinations + self.check_cycle_algorithm(testG, expected) + + def test_simple_cycles_bounded(self): + # iteratively construct a cluster of nested cycles running in the same direction + # there should be one cycle of every length + d = nx.DiGraph() + expected = [] + for n in range(10): + nx.add_cycle(d, range(n)) + expected.append(n) + for k, e in enumerate(expected): + self.check_cycle_algorithm(d, e, length_bound=k) + + # iteratively construct a path of undirected cycles, connected at articulation + # points. there should be one cycle of every length except 2: no digons + g = nx.Graph() + top = 0 + expected = [] + for n in range(10): + expected.append(n if n < 2 else n - 1) + if n == 2: + # no digons in undirected graphs + continue + nx.add_cycle(g, range(top, top + n)) + top += n + for k, e in enumerate(expected): + self.check_cycle_algorithm(g, e, length_bound=k) + + def test_simple_cycles_bound_corner_cases(self): + G = nx.cycle_graph(4) + DG = nx.cycle_graph(4, create_using=nx.DiGraph) + assert list(nx.simple_cycles(G, length_bound=0)) == [] + assert list(nx.simple_cycles(DG, length_bound=0)) == [] + assert list(nx.chordless_cycles(G, length_bound=0)) == [] + assert list(nx.chordless_cycles(DG, length_bound=0)) == [] + + def test_simple_cycles_bound_error(self): + with pytest.raises(ValueError): + G = nx.DiGraph() + for c in nx.simple_cycles(G, -1): + assert False + + with pytest.raises(ValueError): + G = nx.Graph() + for c in nx.simple_cycles(G, -1): + assert False + + with pytest.raises(ValueError): + G = nx.Graph() + for c in nx.chordless_cycles(G, -1): + assert False + + with pytest.raises(ValueError): + G = nx.DiGraph() + for c in nx.chordless_cycles(G, -1): + assert False + + def test_chordless_cycles_clique(self): + g_family = [self.K(n) for n in range(2, 15)] + expected = [0, 1, 4, 10, 20, 35, 56, 84, 120, 165, 220, 286, 364] + self.check_cycle_enumeration_integer_sequence( + g_family, expected, chordless=True + ) + + # directed cliques have as many digons as undirected graphs have edges + expected = [(n * n - n) // 2 for n in range(15)] + g_family = [self.D(n) for n in range(15)] + self.check_cycle_enumeration_integer_sequence( + g_family, expected, chordless=True + ) + + +# These tests might fail with hash randomization since they depend on +# edge_dfs. For more information, see the comments in: +# networkx/algorithms/traversal/tests/test_edgedfs.py + + +class TestFindCycle: + @classmethod + def setup_class(cls): + cls.nodes = [0, 1, 2, 3] + cls.edges = [(-1, 0), (0, 1), (1, 0), (1, 0), (2, 1), (3, 1)] + + def test_graph_nocycle(self): + G = nx.Graph(self.edges) + pytest.raises(nx.exception.NetworkXNoCycle, nx.find_cycle, G, self.nodes) + + def test_graph_cycle(self): + G = nx.Graph(self.edges) + G.add_edge(2, 0) + x = list(nx.find_cycle(G, self.nodes)) + x_ = [(0, 1), (1, 2), (2, 0)] + assert x == x_ + + def test_graph_orientation_none(self): + G = nx.Graph(self.edges) + G.add_edge(2, 0) + x = list(nx.find_cycle(G, self.nodes, orientation=None)) + x_ = [(0, 1), (1, 2), (2, 0)] + assert x == x_ + + def test_graph_orientation_original(self): + G = nx.Graph(self.edges) + G.add_edge(2, 0) + x = list(nx.find_cycle(G, self.nodes, orientation="original")) + x_ = [(0, 1, FORWARD), (1, 2, FORWARD), (2, 0, FORWARD)] + assert x == x_ + + def test_digraph(self): + G = nx.DiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes)) + x_ = [(0, 1), (1, 0)] + assert x == x_ + + def test_digraph_orientation_none(self): + G = nx.DiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes, orientation=None)) + x_ = [(0, 1), (1, 0)] + assert x == x_ + + def test_digraph_orientation_original(self): + G = nx.DiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes, orientation="original")) + x_ = [(0, 1, FORWARD), (1, 0, FORWARD)] + assert x == x_ + + def test_multigraph(self): + G = nx.MultiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes)) + x_ = [(0, 1, 0), (1, 0, 1)] # or (1, 0, 2) + # Hash randomization...could be any edge. + assert x[0] == x_[0] + assert x[1][:2] == x_[1][:2] + + def test_multidigraph(self): + G = nx.MultiDiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes)) + x_ = [(0, 1, 0), (1, 0, 0)] # (1, 0, 1) + assert x[0] == x_[0] + assert x[1][:2] == x_[1][:2] + + def test_digraph_ignore(self): + G = nx.DiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes, orientation="ignore")) + x_ = [(0, 1, FORWARD), (1, 0, FORWARD)] + assert x == x_ + + def test_digraph_reverse(self): + G = nx.DiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes, orientation="reverse")) + x_ = [(1, 0, REVERSE), (0, 1, REVERSE)] + assert x == x_ + + def test_multidigraph_ignore(self): + G = nx.MultiDiGraph(self.edges) + x = list(nx.find_cycle(G, self.nodes, orientation="ignore")) + x_ = [(0, 1, 0, FORWARD), (1, 0, 0, FORWARD)] # or (1, 0, 1, 1) + assert x[0] == x_[0] + assert x[1][:2] == x_[1][:2] + assert x[1][3] == x_[1][3] + + def test_multidigraph_ignore2(self): + # Loop traversed an edge while ignoring its orientation. + G = nx.MultiDiGraph([(0, 1), (1, 2), (1, 2)]) + x = list(nx.find_cycle(G, [0, 1, 2], orientation="ignore")) + x_ = [(1, 2, 0, FORWARD), (1, 2, 1, REVERSE)] + assert x == x_ + + def test_multidigraph_original(self): + # Node 2 doesn't need to be searched again from visited from 4. + # The goal here is to cover the case when 2 to be researched from 4, + # when 4 is visited from the first time (so we must make sure that 4 + # is not visited from 2, and hence, we respect the edge orientation). + G = nx.MultiDiGraph([(0, 1), (1, 2), (2, 3), (4, 2)]) + pytest.raises( + nx.exception.NetworkXNoCycle, + nx.find_cycle, + G, + [0, 1, 2, 3, 4], + orientation="original", + ) + + def test_dag(self): + G = nx.DiGraph([(0, 1), (0, 2), (1, 2)]) + pytest.raises( + nx.exception.NetworkXNoCycle, nx.find_cycle, G, orientation="original" + ) + x = list(nx.find_cycle(G, orientation="ignore")) + assert x == [(0, 1, FORWARD), (1, 2, FORWARD), (0, 2, REVERSE)] + + def test_prev_explored(self): + # https://github.com/networkx/networkx/issues/2323 + + G = nx.DiGraph() + G.add_edges_from([(1, 0), (2, 0), (1, 2), (2, 1)]) + pytest.raises(nx.NetworkXNoCycle, nx.find_cycle, G, source=0) + x = list(nx.find_cycle(G, 1)) + x_ = [(1, 2), (2, 1)] + assert x == x_ + + x = list(nx.find_cycle(G, 2)) + x_ = [(2, 1), (1, 2)] + assert x == x_ + + x = list(nx.find_cycle(G)) + x_ = [(1, 2), (2, 1)] + assert x == x_ + + def test_no_cycle(self): + # https://github.com/networkx/networkx/issues/2439 + + G = nx.DiGraph() + G.add_edges_from([(1, 2), (2, 0), (3, 1), (3, 2)]) + pytest.raises(nx.NetworkXNoCycle, nx.find_cycle, G, source=0) + pytest.raises(nx.NetworkXNoCycle, nx.find_cycle, G) + + +def assert_basis_equal(a, b): + assert sorted(a) == sorted(b) + + +class TestMinimumCycleBasis: + @classmethod + def setup_class(cls): + T = nx.Graph() + nx.add_cycle(T, [1, 2, 3, 4], weight=1) + T.add_edge(2, 4, weight=5) + cls.diamond_graph = T + + def test_unweighted_diamond(self): + mcb = nx.minimum_cycle_basis(self.diamond_graph) + assert_basis_equal(mcb, [[2, 4, 1], [3, 4, 2]]) + + def test_weighted_diamond(self): + mcb = nx.minimum_cycle_basis(self.diamond_graph, weight="weight") + assert_basis_equal(mcb, [[2, 4, 1], [4, 3, 2, 1]]) + + def test_dimensionality(self): + # checks |MCB|=|E|-|V|+|NC| + ntrial = 10 + for seed in range(1234, 1234 + ntrial): + rg = nx.erdos_renyi_graph(10, 0.3, seed=seed) + nnodes = rg.number_of_nodes() + nedges = rg.number_of_edges() + ncomp = nx.number_connected_components(rg) + + mcb = nx.minimum_cycle_basis(rg) + assert len(mcb) == nedges - nnodes + ncomp + check_independent(mcb) + + def test_complete_graph(self): + cg = nx.complete_graph(5) + mcb = nx.minimum_cycle_basis(cg) + assert all(len(cycle) == 3 for cycle in mcb) + check_independent(mcb) + + def test_tree_graph(self): + tg = nx.balanced_tree(3, 3) + assert not nx.minimum_cycle_basis(tg) + + def test_petersen_graph(self): + G = nx.petersen_graph() + mcb = list(nx.minimum_cycle_basis(G)) + expected = [ + [4, 9, 7, 5, 0], + [1, 2, 3, 4, 0], + [1, 6, 8, 5, 0], + [4, 3, 8, 5, 0], + [1, 6, 9, 4, 0], + [1, 2, 7, 5, 0], + ] + assert len(mcb) == len(expected) + assert all(c in expected for c in mcb) + + # check that order of the nodes is a path + for c in mcb: + assert all(G.has_edge(u, v) for u, v in nx.utils.pairwise(c, cyclic=True)) + # check independence of the basis + check_independent(mcb) + + def test_gh6787_variable_weighted_complete_graph(self): + N = 8 + cg = nx.complete_graph(N) + cg.add_weighted_edges_from([(u, v, 9) for u, v in cg.edges]) + cg.add_weighted_edges_from([(u, v, 1) for u, v in nx.cycle_graph(N).edges]) + mcb = nx.minimum_cycle_basis(cg, weight="weight") + check_independent(mcb) + + def test_gh6787_and_edge_attribute_names(self): + G = nx.cycle_graph(4) + G.add_weighted_edges_from([(0, 2, 10), (1, 3, 10)], weight="dist") + expected = [[1, 3, 0], [3, 2, 1, 0], [1, 2, 0]] + mcb = list(nx.minimum_cycle_basis(G, weight="dist")) + assert len(mcb) == len(expected) + assert all(c in expected for c in mcb) + + # test not using a weight with weight attributes + expected = [[1, 3, 0], [1, 2, 0], [3, 2, 0]] + mcb = list(nx.minimum_cycle_basis(G)) + assert len(mcb) == len(expected) + assert all(c in expected for c in mcb) + + +class TestGirth: + @pytest.mark.parametrize( + ("G", "expected"), + ( + (nx.chvatal_graph(), 4), + (nx.tutte_graph(), 4), + (nx.petersen_graph(), 5), + (nx.heawood_graph(), 6), + (nx.pappus_graph(), 6), + (nx.random_tree(10, seed=42), inf), + (nx.empty_graph(10), inf), + (nx.Graph(chain(cycle_edges(range(5)), cycle_edges(range(6, 10)))), 4), + ( + nx.Graph( + [ + (0, 6), + (0, 8), + (0, 9), + (1, 8), + (2, 8), + (2, 9), + (4, 9), + (5, 9), + (6, 8), + (6, 9), + (7, 8), + ] + ), + 3, + ), + ), + ) + def test_girth(self, G, expected): + assert nx.girth(G) == expected diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_d_separation.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_d_separation.py new file mode 100644 index 0000000000000000000000000000000000000000..a94d4dd4cfb0a3edb9e664e5bb1bc37412966475 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_d_separation.py @@ -0,0 +1,228 @@ +from itertools import combinations + +import pytest + +import networkx as nx + + +def path_graph(): + """Return a path graph of length three.""" + G = nx.path_graph(3, create_using=nx.DiGraph) + G.graph["name"] = "path" + nx.freeze(G) + return G + + +def fork_graph(): + """Return a three node fork graph.""" + G = nx.DiGraph(name="fork") + G.add_edges_from([(0, 1), (0, 2)]) + nx.freeze(G) + return G + + +def collider_graph(): + """Return a collider/v-structure graph with three nodes.""" + G = nx.DiGraph(name="collider") + G.add_edges_from([(0, 2), (1, 2)]) + nx.freeze(G) + return G + + +def naive_bayes_graph(): + """Return a simply Naive Bayes PGM graph.""" + G = nx.DiGraph(name="naive_bayes") + G.add_edges_from([(0, 1), (0, 2), (0, 3), (0, 4)]) + nx.freeze(G) + return G + + +def asia_graph(): + """Return the 'Asia' PGM graph.""" + G = nx.DiGraph(name="asia") + G.add_edges_from( + [ + ("asia", "tuberculosis"), + ("smoking", "cancer"), + ("smoking", "bronchitis"), + ("tuberculosis", "either"), + ("cancer", "either"), + ("either", "xray"), + ("either", "dyspnea"), + ("bronchitis", "dyspnea"), + ] + ) + nx.freeze(G) + return G + + +@pytest.fixture(name="path_graph") +def path_graph_fixture(): + return path_graph() + + +@pytest.fixture(name="fork_graph") +def fork_graph_fixture(): + return fork_graph() + + +@pytest.fixture(name="collider_graph") +def collider_graph_fixture(): + return collider_graph() + + +@pytest.fixture(name="naive_bayes_graph") +def naive_bayes_graph_fixture(): + return naive_bayes_graph() + + +@pytest.fixture(name="asia_graph") +def asia_graph_fixture(): + return asia_graph() + + +@pytest.mark.parametrize( + "graph", + [path_graph(), fork_graph(), collider_graph(), naive_bayes_graph(), asia_graph()], +) +def test_markov_condition(graph): + """Test that the Markov condition holds for each PGM graph.""" + for node in graph.nodes: + parents = set(graph.predecessors(node)) + non_descendants = graph.nodes - nx.descendants(graph, node) - {node} - parents + assert nx.d_separated(graph, {node}, non_descendants, parents) + + +def test_path_graph_dsep(path_graph): + """Example-based test of d-separation for path_graph.""" + assert nx.d_separated(path_graph, {0}, {2}, {1}) + assert not nx.d_separated(path_graph, {0}, {2}, {}) + + +def test_fork_graph_dsep(fork_graph): + """Example-based test of d-separation for fork_graph.""" + assert nx.d_separated(fork_graph, {1}, {2}, {0}) + assert not nx.d_separated(fork_graph, {1}, {2}, {}) + + +def test_collider_graph_dsep(collider_graph): + """Example-based test of d-separation for collider_graph.""" + assert nx.d_separated(collider_graph, {0}, {1}, {}) + assert not nx.d_separated(collider_graph, {0}, {1}, {2}) + + +def test_naive_bayes_dsep(naive_bayes_graph): + """Example-based test of d-separation for naive_bayes_graph.""" + for u, v in combinations(range(1, 5), 2): + assert nx.d_separated(naive_bayes_graph, {u}, {v}, {0}) + assert not nx.d_separated(naive_bayes_graph, {u}, {v}, {}) + + +def test_asia_graph_dsep(asia_graph): + """Example-based test of d-separation for asia_graph.""" + assert nx.d_separated( + asia_graph, {"asia", "smoking"}, {"dyspnea", "xray"}, {"bronchitis", "either"} + ) + assert nx.d_separated( + asia_graph, {"tuberculosis", "cancer"}, {"bronchitis"}, {"smoking", "xray"} + ) + + +def test_undirected_graphs_are_not_supported(): + """ + Test that undirected graphs are not supported. + + d-separation and its related algorithms do not apply in + the case of undirected graphs. + """ + g = nx.path_graph(3, nx.Graph) + with pytest.raises(nx.NetworkXNotImplemented): + nx.d_separated(g, {0}, {1}, {2}) + with pytest.raises(nx.NetworkXNotImplemented): + nx.is_minimal_d_separator(g, {0}, {1}, {2}) + with pytest.raises(nx.NetworkXNotImplemented): + nx.minimal_d_separator(g, {0}, {1}) + + +def test_cyclic_graphs_raise_error(): + """ + Test that cycle graphs should cause erroring. + + This is because PGMs assume a directed acyclic graph. + """ + g = nx.cycle_graph(3, nx.DiGraph) + with pytest.raises(nx.NetworkXError): + nx.d_separated(g, {0}, {1}, {2}) + with pytest.raises(nx.NetworkXError): + nx.minimal_d_separator(g, 0, 1) + with pytest.raises(nx.NetworkXError): + nx.is_minimal_d_separator(g, 0, 1, {2}) + + +def test_invalid_nodes_raise_error(asia_graph): + """ + Test that graphs that have invalid nodes passed in raise errors. + """ + with pytest.raises(nx.NodeNotFound): + nx.d_separated(asia_graph, {0}, {1}, {2}) + with pytest.raises(nx.NodeNotFound): + nx.is_minimal_d_separator(asia_graph, 0, 1, {2}) + with pytest.raises(nx.NodeNotFound): + nx.minimal_d_separator(asia_graph, 0, 1) + + +def test_minimal_d_separator(): + # Case 1: + # create a graph A -> B <- C + # B -> D -> E; + # B -> F; + # G -> E; + edge_list = [("A", "B"), ("C", "B"), ("B", "D"), ("D", "E"), ("B", "F"), ("G", "E")] + G = nx.DiGraph(edge_list) + assert not nx.d_separated(G, {"B"}, {"E"}, set()) + + # minimal set of the corresponding graph + # for B and E should be (D,) + Zmin = nx.minimal_d_separator(G, "B", "E") + + # the minimal separating set should pass the test for minimality + assert nx.is_minimal_d_separator(G, "B", "E", Zmin) + assert Zmin == {"D"} + + # Case 2: + # create a graph A -> B -> C + # B -> D -> C; + edge_list = [("A", "B"), ("B", "C"), ("B", "D"), ("D", "C")] + G = nx.DiGraph(edge_list) + assert not nx.d_separated(G, {"A"}, {"C"}, set()) + Zmin = nx.minimal_d_separator(G, "A", "C") + + # the minimal separating set should pass the test for minimality + assert nx.is_minimal_d_separator(G, "A", "C", Zmin) + assert Zmin == {"B"} + + Znotmin = Zmin.union({"D"}) + assert not nx.is_minimal_d_separator(G, "A", "C", Znotmin) + + +def test_minimal_d_separator_checks_dsep(): + """Test that is_minimal_d_separator checks for d-separation as well.""" + g = nx.DiGraph() + g.add_edges_from( + [ + ("A", "B"), + ("A", "E"), + ("B", "C"), + ("B", "D"), + ("D", "C"), + ("D", "F"), + ("E", "D"), + ("E", "F"), + ] + ) + + assert not nx.d_separated(g, {"C"}, {"F"}, {"D"}) + + # since {'D'} and {} are not d-separators, we return false + assert not nx.is_minimal_d_separator(g, "C", "F", {"D"}) + assert not nx.is_minimal_d_separator(g, "C", "F", {}) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_dag.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_dag.py new file mode 100644 index 0000000000000000000000000000000000000000..540c0c55e4d6bf840b347c7b2107c6303562e567 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_dag.py @@ -0,0 +1,771 @@ +from collections import deque +from itertools import combinations, permutations + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, pairwise + + +# Recipe from the itertools documentation. +def _consume(iterator): + "Consume the iterator entirely." + # Feed the entire iterator into a zero-length deque. + deque(iterator, maxlen=0) + + +class TestDagLongestPath: + """Unit tests computing the longest path in a directed acyclic graph.""" + + def test_empty(self): + G = nx.DiGraph() + assert nx.dag_longest_path(G) == [] + + def test_unweighted1(self): + edges = [(1, 2), (2, 3), (2, 4), (3, 5), (5, 6), (3, 7)] + G = nx.DiGraph(edges) + assert nx.dag_longest_path(G) == [1, 2, 3, 5, 6] + + def test_unweighted2(self): + edges = [(1, 2), (2, 3), (3, 4), (4, 5), (1, 3), (1, 5), (3, 5)] + G = nx.DiGraph(edges) + assert nx.dag_longest_path(G) == [1, 2, 3, 4, 5] + + def test_weighted(self): + G = nx.DiGraph() + edges = [(1, 2, -5), (2, 3, 1), (3, 4, 1), (4, 5, 0), (3, 5, 4), (1, 6, 2)] + G.add_weighted_edges_from(edges) + assert nx.dag_longest_path(G) == [2, 3, 5] + + def test_undirected_not_implemented(self): + G = nx.Graph() + pytest.raises(nx.NetworkXNotImplemented, nx.dag_longest_path, G) + + def test_unorderable_nodes(self): + """Tests that computing the longest path does not depend on + nodes being orderable. + + For more information, see issue #1989. + + """ + # Create the directed path graph on four nodes in a diamond shape, + # with nodes represented as (unorderable) Python objects. + nodes = [object() for n in range(4)] + G = nx.DiGraph() + G.add_edge(nodes[0], nodes[1]) + G.add_edge(nodes[0], nodes[2]) + G.add_edge(nodes[2], nodes[3]) + G.add_edge(nodes[1], nodes[3]) + + # this will raise NotImplementedError when nodes need to be ordered + nx.dag_longest_path(G) + + def test_multigraph_unweighted(self): + edges = [(1, 2), (2, 3), (2, 3), (3, 4), (4, 5), (1, 3), (1, 5), (3, 5)] + G = nx.MultiDiGraph(edges) + assert nx.dag_longest_path(G) == [1, 2, 3, 4, 5] + + def test_multigraph_weighted(self): + G = nx.MultiDiGraph() + edges = [ + (1, 2, 2), + (2, 3, 2), + (1, 3, 1), + (1, 3, 5), + (1, 3, 2), + ] + G.add_weighted_edges_from(edges) + assert nx.dag_longest_path(G) == [1, 3] + + def test_multigraph_weighted_default_weight(self): + G = nx.MultiDiGraph([(1, 2), (2, 3)]) # Unweighted edges + G.add_weighted_edges_from([(1, 3, 1), (1, 3, 5), (1, 3, 2)]) + + # Default value for default weight is 1 + assert nx.dag_longest_path(G) == [1, 3] + assert nx.dag_longest_path(G, default_weight=3) == [1, 2, 3] + + +class TestDagLongestPathLength: + """Unit tests for computing the length of a longest path in a + directed acyclic graph. + + """ + + def test_unweighted(self): + edges = [(1, 2), (2, 3), (2, 4), (3, 5), (5, 6), (5, 7)] + G = nx.DiGraph(edges) + assert nx.dag_longest_path_length(G) == 4 + + edges = [(1, 2), (2, 3), (3, 4), (4, 5), (1, 3), (1, 5), (3, 5)] + G = nx.DiGraph(edges) + assert nx.dag_longest_path_length(G) == 4 + + # test degenerate graphs + G = nx.DiGraph() + G.add_node(1) + assert nx.dag_longest_path_length(G) == 0 + + def test_undirected_not_implemented(self): + G = nx.Graph() + pytest.raises(nx.NetworkXNotImplemented, nx.dag_longest_path_length, G) + + def test_weighted(self): + edges = [(1, 2, -5), (2, 3, 1), (3, 4, 1), (4, 5, 0), (3, 5, 4), (1, 6, 2)] + G = nx.DiGraph() + G.add_weighted_edges_from(edges) + assert nx.dag_longest_path_length(G) == 5 + + def test_multigraph_unweighted(self): + edges = [(1, 2), (2, 3), (2, 3), (3, 4), (4, 5), (1, 3), (1, 5), (3, 5)] + G = nx.MultiDiGraph(edges) + assert nx.dag_longest_path_length(G) == 4 + + def test_multigraph_weighted(self): + G = nx.MultiDiGraph() + edges = [ + (1, 2, 2), + (2, 3, 2), + (1, 3, 1), + (1, 3, 5), + (1, 3, 2), + ] + G.add_weighted_edges_from(edges) + assert nx.dag_longest_path_length(G) == 5 + + +class TestDAG: + @classmethod + def setup_class(cls): + pass + + def test_topological_sort1(self): + DG = nx.DiGraph([(1, 2), (1, 3), (2, 3)]) + + for algorithm in [nx.topological_sort, nx.lexicographical_topological_sort]: + assert tuple(algorithm(DG)) == (1, 2, 3) + + DG.add_edge(3, 2) + + for algorithm in [nx.topological_sort, nx.lexicographical_topological_sort]: + pytest.raises(nx.NetworkXUnfeasible, _consume, algorithm(DG)) + + DG.remove_edge(2, 3) + + for algorithm in [nx.topological_sort, nx.lexicographical_topological_sort]: + assert tuple(algorithm(DG)) == (1, 3, 2) + + DG.remove_edge(3, 2) + + assert tuple(nx.topological_sort(DG)) in {(1, 2, 3), (1, 3, 2)} + assert tuple(nx.lexicographical_topological_sort(DG)) == (1, 2, 3) + + def test_is_directed_acyclic_graph(self): + G = nx.generators.complete_graph(2) + assert not nx.is_directed_acyclic_graph(G) + assert not nx.is_directed_acyclic_graph(G.to_directed()) + assert not nx.is_directed_acyclic_graph(nx.Graph([(3, 4), (4, 5)])) + assert nx.is_directed_acyclic_graph(nx.DiGraph([(3, 4), (4, 5)])) + + def test_topological_sort2(self): + DG = nx.DiGraph( + { + 1: [2], + 2: [3], + 3: [4], + 4: [5], + 5: [1], + 11: [12], + 12: [13], + 13: [14], + 14: [15], + } + ) + pytest.raises(nx.NetworkXUnfeasible, _consume, nx.topological_sort(DG)) + + assert not nx.is_directed_acyclic_graph(DG) + + DG.remove_edge(1, 2) + _consume(nx.topological_sort(DG)) + assert nx.is_directed_acyclic_graph(DG) + + def test_topological_sort3(self): + DG = nx.DiGraph() + DG.add_edges_from([(1, i) for i in range(2, 5)]) + DG.add_edges_from([(2, i) for i in range(5, 9)]) + DG.add_edges_from([(6, i) for i in range(9, 12)]) + DG.add_edges_from([(4, i) for i in range(12, 15)]) + + def validate(order): + assert isinstance(order, list) + assert set(order) == set(DG) + for u, v in combinations(order, 2): + assert not nx.has_path(DG, v, u) + + validate(list(nx.topological_sort(DG))) + + DG.add_edge(14, 1) + pytest.raises(nx.NetworkXUnfeasible, _consume, nx.topological_sort(DG)) + + def test_topological_sort4(self): + G = nx.Graph() + G.add_edge(1, 2) + # Only directed graphs can be topologically sorted. + pytest.raises(nx.NetworkXError, _consume, nx.topological_sort(G)) + + def test_topological_sort5(self): + G = nx.DiGraph() + G.add_edge(0, 1) + assert list(nx.topological_sort(G)) == [0, 1] + + def test_topological_sort6(self): + for algorithm in [nx.topological_sort, nx.lexicographical_topological_sort]: + + def runtime_error(): + DG = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + first = True + for x in algorithm(DG): + if first: + first = False + DG.add_edge(5 - x, 5) + + def unfeasible_error(): + DG = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + first = True + for x in algorithm(DG): + if first: + first = False + DG.remove_node(4) + + def runtime_error2(): + DG = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + first = True + for x in algorithm(DG): + if first: + first = False + DG.remove_node(2) + + pytest.raises(RuntimeError, runtime_error) + pytest.raises(RuntimeError, runtime_error2) + pytest.raises(nx.NetworkXUnfeasible, unfeasible_error) + + def test_all_topological_sorts_1(self): + DG = nx.DiGraph([(1, 2), (2, 3), (3, 4), (4, 5)]) + assert list(nx.all_topological_sorts(DG)) == [[1, 2, 3, 4, 5]] + + def test_all_topological_sorts_2(self): + DG = nx.DiGraph([(1, 3), (2, 1), (2, 4), (4, 3), (4, 5)]) + assert sorted(nx.all_topological_sorts(DG)) == [ + [2, 1, 4, 3, 5], + [2, 1, 4, 5, 3], + [2, 4, 1, 3, 5], + [2, 4, 1, 5, 3], + [2, 4, 5, 1, 3], + ] + + def test_all_topological_sorts_3(self): + def unfeasible(): + DG = nx.DiGraph([(1, 2), (2, 3), (3, 4), (4, 2), (4, 5)]) + # convert to list to execute generator + list(nx.all_topological_sorts(DG)) + + def not_implemented(): + G = nx.Graph([(1, 2), (2, 3)]) + # convert to list to execute generator + list(nx.all_topological_sorts(G)) + + def not_implemented_2(): + G = nx.MultiGraph([(1, 2), (1, 2), (2, 3)]) + list(nx.all_topological_sorts(G)) + + pytest.raises(nx.NetworkXUnfeasible, unfeasible) + pytest.raises(nx.NetworkXNotImplemented, not_implemented) + pytest.raises(nx.NetworkXNotImplemented, not_implemented_2) + + def test_all_topological_sorts_4(self): + DG = nx.DiGraph() + for i in range(7): + DG.add_node(i) + assert sorted(map(list, permutations(DG.nodes))) == sorted( + nx.all_topological_sorts(DG) + ) + + def test_all_topological_sorts_multigraph_1(self): + DG = nx.MultiDiGraph([(1, 2), (1, 2), (2, 3), (3, 4), (3, 5), (3, 5), (3, 5)]) + assert sorted(nx.all_topological_sorts(DG)) == sorted( + [[1, 2, 3, 4, 5], [1, 2, 3, 5, 4]] + ) + + def test_all_topological_sorts_multigraph_2(self): + N = 9 + edges = [] + for i in range(1, N): + edges.extend([(i, i + 1)] * i) + DG = nx.MultiDiGraph(edges) + assert list(nx.all_topological_sorts(DG)) == [list(range(1, N + 1))] + + def test_ancestors(self): + G = nx.DiGraph() + ancestors = nx.algorithms.dag.ancestors + G.add_edges_from([(1, 2), (1, 3), (4, 2), (4, 3), (4, 5), (2, 6), (5, 6)]) + assert ancestors(G, 6) == {1, 2, 4, 5} + assert ancestors(G, 3) == {1, 4} + assert ancestors(G, 1) == set() + pytest.raises(nx.NetworkXError, ancestors, G, 8) + + def test_descendants(self): + G = nx.DiGraph() + descendants = nx.algorithms.dag.descendants + G.add_edges_from([(1, 2), (1, 3), (4, 2), (4, 3), (4, 5), (2, 6), (5, 6)]) + assert descendants(G, 1) == {2, 3, 6} + assert descendants(G, 4) == {2, 3, 5, 6} + assert descendants(G, 3) == set() + pytest.raises(nx.NetworkXError, descendants, G, 8) + + def test_transitive_closure(self): + G = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + assert edges_equal(nx.transitive_closure(G).edges(), solution) + G = nx.DiGraph([(1, 2), (2, 3), (2, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4)] + assert edges_equal(nx.transitive_closure(G).edges(), solution) + G = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + solution = [(1, 2), (2, 1), (2, 3), (3, 2), (1, 3), (3, 1)] + soln = sorted(solution + [(n, n) for n in G]) + assert edges_equal(sorted(nx.transitive_closure(G).edges()), soln) + + G = nx.Graph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + assert edges_equal(sorted(nx.transitive_closure(G).edges()), solution) + + G = nx.MultiGraph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + assert edges_equal(sorted(nx.transitive_closure(G).edges()), solution) + + G = nx.MultiDiGraph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + assert edges_equal(sorted(nx.transitive_closure(G).edges()), solution) + + # test if edge data is copied + G = nx.DiGraph([(1, 2, {"a": 3}), (2, 3, {"b": 0}), (3, 4)]) + H = nx.transitive_closure(G) + for u, v in G.edges(): + assert G.get_edge_data(u, v) == H.get_edge_data(u, v) + + k = 10 + G = nx.DiGraph((i, i + 1, {"f": "b", "weight": i}) for i in range(k)) + H = nx.transitive_closure(G) + for u, v in G.edges(): + assert G.get_edge_data(u, v) == H.get_edge_data(u, v) + + G = nx.Graph() + with pytest.raises(nx.NetworkXError): + nx.transitive_closure(G, reflexive="wrong input") + + def test_reflexive_transitive_closure(self): + G = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + soln = sorted(solution + [(n, n) for n in G]) + assert edges_equal(nx.transitive_closure(G).edges(), solution) + assert edges_equal(nx.transitive_closure(G, False).edges(), solution) + assert edges_equal(nx.transitive_closure(G, True).edges(), soln) + assert edges_equal(nx.transitive_closure(G, None).edges(), solution) + + G = nx.DiGraph([(1, 2), (2, 3), (2, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4)] + soln = sorted(solution + [(n, n) for n in G]) + assert edges_equal(nx.transitive_closure(G).edges(), solution) + assert edges_equal(nx.transitive_closure(G, False).edges(), solution) + assert edges_equal(nx.transitive_closure(G, True).edges(), soln) + assert edges_equal(nx.transitive_closure(G, None).edges(), solution) + + G = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + solution = sorted([(1, 2), (2, 1), (2, 3), (3, 2), (1, 3), (3, 1)]) + soln = sorted(solution + [(n, n) for n in G]) + assert edges_equal(sorted(nx.transitive_closure(G).edges()), soln) + assert edges_equal(sorted(nx.transitive_closure(G, False).edges()), soln) + assert edges_equal(sorted(nx.transitive_closure(G, None).edges()), solution) + assert edges_equal(sorted(nx.transitive_closure(G, True).edges()), soln) + + G = nx.Graph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + soln = sorted(solution + [(n, n) for n in G]) + assert edges_equal(nx.transitive_closure(G).edges(), solution) + assert edges_equal(nx.transitive_closure(G, False).edges(), solution) + assert edges_equal(nx.transitive_closure(G, True).edges(), soln) + assert edges_equal(nx.transitive_closure(G, None).edges(), solution) + + G = nx.MultiGraph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + soln = sorted(solution + [(n, n) for n in G]) + assert edges_equal(nx.transitive_closure(G).edges(), solution) + assert edges_equal(nx.transitive_closure(G, False).edges(), solution) + assert edges_equal(nx.transitive_closure(G, True).edges(), soln) + assert edges_equal(nx.transitive_closure(G, None).edges(), solution) + + G = nx.MultiDiGraph([(1, 2), (2, 3), (3, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + soln = sorted(solution + [(n, n) for n in G]) + assert edges_equal(nx.transitive_closure(G).edges(), solution) + assert edges_equal(nx.transitive_closure(G, False).edges(), solution) + assert edges_equal(nx.transitive_closure(G, True).edges(), soln) + assert edges_equal(nx.transitive_closure(G, None).edges(), solution) + + def test_transitive_closure_dag(self): + G = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + transitive_closure = nx.algorithms.dag.transitive_closure_dag + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + assert edges_equal(transitive_closure(G).edges(), solution) + G = nx.DiGraph([(1, 2), (2, 3), (2, 4)]) + solution = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4)] + assert edges_equal(transitive_closure(G).edges(), solution) + G = nx.Graph([(1, 2), (2, 3), (3, 4)]) + pytest.raises(nx.NetworkXNotImplemented, transitive_closure, G) + + # test if edge data is copied + G = nx.DiGraph([(1, 2, {"a": 3}), (2, 3, {"b": 0}), (3, 4)]) + H = transitive_closure(G) + for u, v in G.edges(): + assert G.get_edge_data(u, v) == H.get_edge_data(u, v) + + k = 10 + G = nx.DiGraph((i, i + 1, {"foo": "bar", "weight": i}) for i in range(k)) + H = transitive_closure(G) + for u, v in G.edges(): + assert G.get_edge_data(u, v) == H.get_edge_data(u, v) + + def test_transitive_reduction(self): + G = nx.DiGraph([(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)]) + transitive_reduction = nx.algorithms.dag.transitive_reduction + solution = [(1, 2), (2, 3), (3, 4)] + assert edges_equal(transitive_reduction(G).edges(), solution) + G = nx.DiGraph([(1, 2), (1, 3), (1, 4), (2, 3), (2, 4)]) + transitive_reduction = nx.algorithms.dag.transitive_reduction + solution = [(1, 2), (2, 3), (2, 4)] + assert edges_equal(transitive_reduction(G).edges(), solution) + G = nx.Graph([(1, 2), (2, 3), (3, 4)]) + pytest.raises(nx.NetworkXNotImplemented, transitive_reduction, G) + + def _check_antichains(self, solution, result): + sol = [frozenset(a) for a in solution] + res = [frozenset(a) for a in result] + assert set(sol) == set(res) + + def test_antichains(self): + antichains = nx.algorithms.dag.antichains + G = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + solution = [[], [4], [3], [2], [1]] + self._check_antichains(list(antichains(G)), solution) + G = nx.DiGraph([(1, 2), (2, 3), (2, 4), (3, 5), (5, 6), (5, 7)]) + solution = [ + [], + [4], + [7], + [7, 4], + [6], + [6, 4], + [6, 7], + [6, 7, 4], + [5], + [5, 4], + [3], + [3, 4], + [2], + [1], + ] + self._check_antichains(list(antichains(G)), solution) + G = nx.DiGraph([(1, 2), (1, 3), (3, 4), (3, 5), (5, 6)]) + solution = [ + [], + [6], + [5], + [4], + [4, 6], + [4, 5], + [3], + [2], + [2, 6], + [2, 5], + [2, 4], + [2, 4, 6], + [2, 4, 5], + [2, 3], + [1], + ] + self._check_antichains(list(antichains(G)), solution) + G = nx.DiGraph({0: [1, 2], 1: [4], 2: [3], 3: [4]}) + solution = [[], [4], [3], [2], [1], [1, 3], [1, 2], [0]] + self._check_antichains(list(antichains(G)), solution) + G = nx.DiGraph() + self._check_antichains(list(antichains(G)), [[]]) + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2]) + solution = [[], [0], [1], [1, 0], [2], [2, 0], [2, 1], [2, 1, 0]] + self._check_antichains(list(antichains(G)), solution) + + def f(x): + return list(antichains(x)) + + G = nx.Graph([(1, 2), (2, 3), (3, 4)]) + pytest.raises(nx.NetworkXNotImplemented, f, G) + G = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + pytest.raises(nx.NetworkXUnfeasible, f, G) + + def test_lexicographical_topological_sort(self): + G = nx.DiGraph([(1, 2), (2, 3), (1, 4), (1, 5), (2, 6)]) + assert list(nx.lexicographical_topological_sort(G)) == [1, 2, 3, 4, 5, 6] + assert list(nx.lexicographical_topological_sort(G, key=lambda x: x)) == [ + 1, + 2, + 3, + 4, + 5, + 6, + ] + assert list(nx.lexicographical_topological_sort(G, key=lambda x: -x)) == [ + 1, + 5, + 4, + 2, + 6, + 3, + ] + + def test_lexicographical_topological_sort2(self): + """ + Check the case of two or more nodes with same key value. + Want to avoid exception raised due to comparing nodes directly. + See Issue #3493 + """ + + class Test_Node: + def __init__(self, n): + self.label = n + self.priority = 1 + + def __repr__(self): + return f"Node({self.label})" + + def sorting_key(node): + return node.priority + + test_nodes = [Test_Node(n) for n in range(4)] + G = nx.DiGraph() + edges = [(0, 1), (0, 2), (0, 3), (2, 3)] + G.add_edges_from((test_nodes[a], test_nodes[b]) for a, b in edges) + + sorting = list(nx.lexicographical_topological_sort(G, key=sorting_key)) + assert sorting == test_nodes + + +def test_topological_generations(): + G = nx.DiGraph( + {1: [2, 3], 2: [4, 5], 3: [7], 4: [], 5: [6, 7], 6: [], 7: []} + ).reverse() + # order within each generation is inconsequential + generations = [sorted(gen) for gen in nx.topological_generations(G)] + expected = [[4, 6, 7], [3, 5], [2], [1]] + assert generations == expected + + MG = nx.MultiDiGraph(G.edges) + MG.add_edge(2, 1) + generations = [sorted(gen) for gen in nx.topological_generations(MG)] + assert generations == expected + + +def test_topological_generations_empty(): + G = nx.DiGraph() + assert list(nx.topological_generations(G)) == [] + + +def test_topological_generations_cycle(): + G = nx.DiGraph([[2, 1], [3, 1], [1, 2]]) + with pytest.raises(nx.NetworkXUnfeasible): + list(nx.topological_generations(G)) + + +def test_is_aperiodic_cycle(): + G = nx.DiGraph() + nx.add_cycle(G, [1, 2, 3, 4]) + assert not nx.is_aperiodic(G) + + +def test_is_aperiodic_cycle2(): + G = nx.DiGraph() + nx.add_cycle(G, [1, 2, 3, 4]) + nx.add_cycle(G, [3, 4, 5, 6, 7]) + assert nx.is_aperiodic(G) + + +def test_is_aperiodic_cycle3(): + G = nx.DiGraph() + nx.add_cycle(G, [1, 2, 3, 4]) + nx.add_cycle(G, [3, 4, 5, 6]) + assert not nx.is_aperiodic(G) + + +def test_is_aperiodic_cycle4(): + G = nx.DiGraph() + nx.add_cycle(G, [1, 2, 3, 4]) + G.add_edge(1, 3) + assert nx.is_aperiodic(G) + + +def test_is_aperiodic_selfloop(): + G = nx.DiGraph() + nx.add_cycle(G, [1, 2, 3, 4]) + G.add_edge(1, 1) + assert nx.is_aperiodic(G) + + +def test_is_aperiodic_raise(): + G = nx.Graph() + pytest.raises(nx.NetworkXError, nx.is_aperiodic, G) + + +def test_is_aperiodic_bipartite(): + # Bipartite graph + G = nx.DiGraph(nx.davis_southern_women_graph()) + assert not nx.is_aperiodic(G) + + +def test_is_aperiodic_rary_tree(): + G = nx.full_rary_tree(3, 27, create_using=nx.DiGraph()) + assert not nx.is_aperiodic(G) + + +def test_is_aperiodic_disconnected(): + # disconnected graph + G = nx.DiGraph() + nx.add_cycle(G, [1, 2, 3, 4]) + nx.add_cycle(G, [5, 6, 7, 8]) + assert not nx.is_aperiodic(G) + G.add_edge(1, 3) + G.add_edge(5, 7) + assert nx.is_aperiodic(G) + + +def test_is_aperiodic_disconnected2(): + G = nx.DiGraph() + nx.add_cycle(G, [0, 1, 2]) + G.add_edge(3, 3) + assert not nx.is_aperiodic(G) + + +class TestDagToBranching: + """Unit tests for the :func:`networkx.dag_to_branching` function.""" + + def test_single_root(self): + """Tests that a directed acyclic graph with a single degree + zero node produces an arborescence. + + """ + G = nx.DiGraph([(0, 1), (0, 2), (1, 3), (2, 3)]) + B = nx.dag_to_branching(G) + expected = nx.DiGraph([(0, 1), (1, 3), (0, 2), (2, 4)]) + assert nx.is_arborescence(B) + assert nx.is_isomorphic(B, expected) + + def test_multiple_roots(self): + """Tests that a directed acyclic graph with multiple degree zero + nodes creates an arborescence with multiple (weakly) connected + components. + + """ + G = nx.DiGraph([(0, 1), (0, 2), (1, 3), (2, 3), (5, 2)]) + B = nx.dag_to_branching(G) + expected = nx.DiGraph([(0, 1), (1, 3), (0, 2), (2, 4), (5, 6), (6, 7)]) + assert nx.is_branching(B) + assert not nx.is_arborescence(B) + assert nx.is_isomorphic(B, expected) + + # # Attributes are not copied by this function. If they were, this would + # # be a good test to uncomment. + # def test_copy_attributes(self): + # """Tests that node attributes are copied in the branching.""" + # G = nx.DiGraph([(0, 1), (0, 2), (1, 3), (2, 3)]) + # for v in G: + # G.node[v]['label'] = str(v) + # B = nx.dag_to_branching(G) + # # Determine the root node of the branching. + # root = next(v for v, d in B.in_degree() if d == 0) + # assert_equal(B.node[root]['label'], '0') + # children = B[root] + # # Get the left and right children, nodes 1 and 2, respectively. + # left, right = sorted(children, key=lambda v: B.node[v]['label']) + # assert_equal(B.node[left]['label'], '1') + # assert_equal(B.node[right]['label'], '2') + # # Get the left grandchild. + # children = B[left] + # assert_equal(len(children), 1) + # left_grandchild = arbitrary_element(children) + # assert_equal(B.node[left_grandchild]['label'], '3') + # # Get the right grandchild. + # children = B[right] + # assert_equal(len(children), 1) + # right_grandchild = arbitrary_element(children) + # assert_equal(B.node[right_grandchild]['label'], '3') + + def test_already_arborescence(self): + """Tests that a directed acyclic graph that is already an + arborescence produces an isomorphic arborescence as output. + + """ + A = nx.balanced_tree(2, 2, create_using=nx.DiGraph()) + B = nx.dag_to_branching(A) + assert nx.is_isomorphic(A, B) + + def test_already_branching(self): + """Tests that a directed acyclic graph that is already a + branching produces an isomorphic branching as output. + + """ + T1 = nx.balanced_tree(2, 2, create_using=nx.DiGraph()) + T2 = nx.balanced_tree(2, 2, create_using=nx.DiGraph()) + G = nx.disjoint_union(T1, T2) + B = nx.dag_to_branching(G) + assert nx.is_isomorphic(G, B) + + def test_not_acyclic(self): + """Tests that a non-acyclic graph causes an exception.""" + with pytest.raises(nx.HasACycle): + G = nx.DiGraph(pairwise("abc", cyclic=True)) + nx.dag_to_branching(G) + + def test_undirected(self): + with pytest.raises(nx.NetworkXNotImplemented): + nx.dag_to_branching(nx.Graph()) + + def test_multigraph(self): + with pytest.raises(nx.NetworkXNotImplemented): + nx.dag_to_branching(nx.MultiGraph()) + + def test_multidigraph(self): + with pytest.raises(nx.NetworkXNotImplemented): + nx.dag_to_branching(nx.MultiDiGraph()) + + +def test_ancestors_descendants_undirected(): + """Regression test to ensure ancestors and descendants work as expected on + undirected graphs.""" + G = nx.path_graph(5) + nx.ancestors(G, 2) == nx.descendants(G, 2) == {0, 1, 3, 4} + + +def test_compute_v_structures_raise(): + G = nx.Graph() + pytest.raises(nx.NetworkXNotImplemented, nx.compute_v_structures, G) + + +def test_compute_v_structures(): + edges = [(0, 1), (0, 2), (3, 2)] + G = nx.DiGraph(edges) + + v_structs = set(nx.compute_v_structures(G)) + assert len(v_structs) == 1 + assert (0, 2, 3) in v_structs + + edges = [("A", "B"), ("C", "B"), ("B", "D"), ("D", "E"), ("G", "E")] + G = nx.DiGraph(edges) + v_structs = set(nx.compute_v_structures(G)) + assert len(v_structs) == 2 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_distance_measures.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_distance_measures.py new file mode 100644 index 0000000000000000000000000000000000000000..d4b5cf2f1193b0c9b8f898b90096d32142bf1105 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_distance_measures.py @@ -0,0 +1,668 @@ +from random import Random + +import pytest + +import networkx as nx +from networkx import convert_node_labels_to_integers as cnlti +from networkx.algorithms.distance_measures import _extrema_bounding + + +def test__extrema_bounding_invalid_compute_kwarg(): + G = nx.path_graph(3) + with pytest.raises(ValueError, match="compute must be one of"): + _extrema_bounding(G, compute="spam") + + +class TestDistance: + def setup_method(self): + G = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted") + self.G = G + + def test_eccentricity(self): + assert nx.eccentricity(self.G, 1) == 6 + e = nx.eccentricity(self.G) + assert e[1] == 6 + + sp = dict(nx.shortest_path_length(self.G)) + e = nx.eccentricity(self.G, sp=sp) + assert e[1] == 6 + + e = nx.eccentricity(self.G, v=1) + assert e == 6 + + # This behavior changed in version 1.8 (ticket #739) + e = nx.eccentricity(self.G, v=[1, 1]) + assert e[1] == 6 + e = nx.eccentricity(self.G, v=[1, 2]) + assert e[1] == 6 + + # test against graph with one node + G = nx.path_graph(1) + e = nx.eccentricity(G) + assert e[0] == 0 + e = nx.eccentricity(G, v=0) + assert e == 0 + pytest.raises(nx.NetworkXError, nx.eccentricity, G, 1) + + # test against empty graph + G = nx.empty_graph() + e = nx.eccentricity(G) + assert e == {} + + def test_diameter(self): + assert nx.diameter(self.G) == 6 + + def test_radius(self): + assert nx.radius(self.G) == 4 + + def test_periphery(self): + assert set(nx.periphery(self.G)) == {1, 4, 13, 16} + + def test_center(self): + assert set(nx.center(self.G)) == {6, 7, 10, 11} + + def test_bound_diameter(self): + assert nx.diameter(self.G, usebounds=True) == 6 + + def test_bound_radius(self): + assert nx.radius(self.G, usebounds=True) == 4 + + def test_bound_periphery(self): + result = {1, 4, 13, 16} + assert set(nx.periphery(self.G, usebounds=True)) == result + + def test_bound_center(self): + result = {6, 7, 10, 11} + assert set(nx.center(self.G, usebounds=True)) == result + + def test_radius_exception(self): + G = nx.Graph() + G.add_edge(1, 2) + G.add_edge(3, 4) + pytest.raises(nx.NetworkXError, nx.diameter, G) + + def test_eccentricity_infinite(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph([(1, 2), (3, 4)]) + e = nx.eccentricity(G) + + def test_eccentricity_undirected_not_connected(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph([(1, 2), (3, 4)]) + e = nx.eccentricity(G, sp=1) + + def test_eccentricity_directed_weakly_connected(self): + with pytest.raises(nx.NetworkXError): + DG = nx.DiGraph([(1, 2), (1, 3)]) + nx.eccentricity(DG) + + +class TestWeightedDistance: + def setup_method(self): + G = nx.Graph() + G.add_edge(0, 1, weight=0.6, cost=0.6, high_cost=6) + G.add_edge(0, 2, weight=0.2, cost=0.2, high_cost=2) + G.add_edge(2, 3, weight=0.1, cost=0.1, high_cost=1) + G.add_edge(2, 4, weight=0.7, cost=0.7, high_cost=7) + G.add_edge(2, 5, weight=0.9, cost=0.9, high_cost=9) + G.add_edge(1, 5, weight=0.3, cost=0.3, high_cost=3) + self.G = G + self.weight_fn = lambda v, u, e: 2 + + def test_eccentricity_weight_None(self): + assert nx.eccentricity(self.G, 1, weight=None) == 3 + e = nx.eccentricity(self.G, weight=None) + assert e[1] == 3 + + e = nx.eccentricity(self.G, v=1, weight=None) + assert e == 3 + + # This behavior changed in version 1.8 (ticket #739) + e = nx.eccentricity(self.G, v=[1, 1], weight=None) + assert e[1] == 3 + e = nx.eccentricity(self.G, v=[1, 2], weight=None) + assert e[1] == 3 + + def test_eccentricity_weight_attr(self): + assert nx.eccentricity(self.G, 1, weight="weight") == 1.5 + e = nx.eccentricity(self.G, weight="weight") + assert ( + e + == nx.eccentricity(self.G, weight="cost") + != nx.eccentricity(self.G, weight="high_cost") + ) + assert e[1] == 1.5 + + e = nx.eccentricity(self.G, v=1, weight="weight") + assert e == 1.5 + + # This behavior changed in version 1.8 (ticket #739) + e = nx.eccentricity(self.G, v=[1, 1], weight="weight") + assert e[1] == 1.5 + e = nx.eccentricity(self.G, v=[1, 2], weight="weight") + assert e[1] == 1.5 + + def test_eccentricity_weight_fn(self): + assert nx.eccentricity(self.G, 1, weight=self.weight_fn) == 6 + e = nx.eccentricity(self.G, weight=self.weight_fn) + assert e[1] == 6 + + e = nx.eccentricity(self.G, v=1, weight=self.weight_fn) + assert e == 6 + + # This behavior changed in version 1.8 (ticket #739) + e = nx.eccentricity(self.G, v=[1, 1], weight=self.weight_fn) + assert e[1] == 6 + e = nx.eccentricity(self.G, v=[1, 2], weight=self.weight_fn) + assert e[1] == 6 + + def test_diameter_weight_None(self): + assert nx.diameter(self.G, weight=None) == 3 + + def test_diameter_weight_attr(self): + assert ( + nx.diameter(self.G, weight="weight") + == nx.diameter(self.G, weight="cost") + == 1.6 + != nx.diameter(self.G, weight="high_cost") + ) + + def test_diameter_weight_fn(self): + assert nx.diameter(self.G, weight=self.weight_fn) == 6 + + def test_radius_weight_None(self): + assert pytest.approx(nx.radius(self.G, weight=None)) == 2 + + def test_radius_weight_attr(self): + assert ( + pytest.approx(nx.radius(self.G, weight="weight")) + == pytest.approx(nx.radius(self.G, weight="cost")) + == 0.9 + != nx.radius(self.G, weight="high_cost") + ) + + def test_radius_weight_fn(self): + assert nx.radius(self.G, weight=self.weight_fn) == 4 + + def test_periphery_weight_None(self): + for v in set(nx.periphery(self.G, weight=None)): + assert nx.eccentricity(self.G, v, weight=None) == nx.diameter( + self.G, weight=None + ) + + def test_periphery_weight_attr(self): + periphery = set(nx.periphery(self.G, weight="weight")) + assert ( + periphery + == set(nx.periphery(self.G, weight="cost")) + == set(nx.periphery(self.G, weight="high_cost")) + ) + for v in periphery: + assert ( + nx.eccentricity(self.G, v, weight="high_cost") + != nx.eccentricity(self.G, v, weight="weight") + == nx.eccentricity(self.G, v, weight="cost") + == nx.diameter(self.G, weight="weight") + == nx.diameter(self.G, weight="cost") + != nx.diameter(self.G, weight="high_cost") + ) + assert nx.eccentricity(self.G, v, weight="high_cost") == nx.diameter( + self.G, weight="high_cost" + ) + + def test_periphery_weight_fn(self): + for v in set(nx.periphery(self.G, weight=self.weight_fn)): + assert nx.eccentricity(self.G, v, weight=self.weight_fn) == nx.diameter( + self.G, weight=self.weight_fn + ) + + def test_center_weight_None(self): + for v in set(nx.center(self.G, weight=None)): + assert pytest.approx(nx.eccentricity(self.G, v, weight=None)) == nx.radius( + self.G, weight=None + ) + + def test_center_weight_attr(self): + center = set(nx.center(self.G, weight="weight")) + assert ( + center + == set(nx.center(self.G, weight="cost")) + != set(nx.center(self.G, weight="high_cost")) + ) + for v in center: + assert ( + nx.eccentricity(self.G, v, weight="high_cost") + != pytest.approx(nx.eccentricity(self.G, v, weight="weight")) + == pytest.approx(nx.eccentricity(self.G, v, weight="cost")) + == nx.radius(self.G, weight="weight") + == nx.radius(self.G, weight="cost") + != nx.radius(self.G, weight="high_cost") + ) + assert nx.eccentricity(self.G, v, weight="high_cost") == nx.radius( + self.G, weight="high_cost" + ) + + def test_center_weight_fn(self): + for v in set(nx.center(self.G, weight=self.weight_fn)): + assert nx.eccentricity(self.G, v, weight=self.weight_fn) == nx.radius( + self.G, weight=self.weight_fn + ) + + def test_bound_diameter_weight_None(self): + assert nx.diameter(self.G, usebounds=True, weight=None) == 3 + + def test_bound_diameter_weight_attr(self): + assert ( + nx.diameter(self.G, usebounds=True, weight="high_cost") + != nx.diameter(self.G, usebounds=True, weight="weight") + == nx.diameter(self.G, usebounds=True, weight="cost") + == 1.6 + != nx.diameter(self.G, usebounds=True, weight="high_cost") + ) + assert nx.diameter(self.G, usebounds=True, weight="high_cost") == nx.diameter( + self.G, usebounds=True, weight="high_cost" + ) + + def test_bound_diameter_weight_fn(self): + assert nx.diameter(self.G, usebounds=True, weight=self.weight_fn) == 6 + + def test_bound_radius_weight_None(self): + assert pytest.approx(nx.radius(self.G, usebounds=True, weight=None)) == 2 + + def test_bound_radius_weight_attr(self): + assert ( + nx.radius(self.G, usebounds=True, weight="high_cost") + != pytest.approx(nx.radius(self.G, usebounds=True, weight="weight")) + == pytest.approx(nx.radius(self.G, usebounds=True, weight="cost")) + == 0.9 + != nx.radius(self.G, usebounds=True, weight="high_cost") + ) + assert nx.radius(self.G, usebounds=True, weight="high_cost") == nx.radius( + self.G, usebounds=True, weight="high_cost" + ) + + def test_bound_radius_weight_fn(self): + assert nx.radius(self.G, usebounds=True, weight=self.weight_fn) == 4 + + def test_bound_periphery_weight_None(self): + result = {1, 3, 4} + assert set(nx.periphery(self.G, usebounds=True, weight=None)) == result + + def test_bound_periphery_weight_attr(self): + result = {4, 5} + assert ( + set(nx.periphery(self.G, usebounds=True, weight="weight")) + == set(nx.periphery(self.G, usebounds=True, weight="cost")) + == result + ) + + def test_bound_periphery_weight_fn(self): + result = {1, 3, 4} + assert ( + set(nx.periphery(self.G, usebounds=True, weight=self.weight_fn)) == result + ) + + def test_bound_center_weight_None(self): + result = {0, 2, 5} + assert set(nx.center(self.G, usebounds=True, weight=None)) == result + + def test_bound_center_weight_attr(self): + result = {0} + assert ( + set(nx.center(self.G, usebounds=True, weight="weight")) + == set(nx.center(self.G, usebounds=True, weight="cost")) + == result + ) + + def test_bound_center_weight_fn(self): + result = {0, 2, 5} + assert set(nx.center(self.G, usebounds=True, weight=self.weight_fn)) == result + + +class TestResistanceDistance: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + + def setup_method(self): + G = nx.Graph() + G.add_edge(1, 2, weight=2) + G.add_edge(2, 3, weight=4) + G.add_edge(3, 4, weight=1) + G.add_edge(1, 4, weight=3) + self.G = G + + def test_resistance_distance_directed_graph(self): + G = nx.DiGraph() + with pytest.raises(nx.NetworkXNotImplemented): + nx.resistance_distance(G) + + def test_resistance_distance_empty(self): + G = nx.Graph() + with pytest.raises(nx.NetworkXError): + nx.resistance_distance(G) + + def test_resistance_distance_not_connected(self): + with pytest.raises(nx.NetworkXError): + self.G.add_node(5) + nx.resistance_distance(self.G, 1, 5) + + def test_resistance_distance_nodeA_not_in_graph(self): + with pytest.raises(nx.NetworkXError): + nx.resistance_distance(self.G, 9, 1) + + def test_resistance_distance_nodeB_not_in_graph(self): + with pytest.raises(nx.NetworkXError): + nx.resistance_distance(self.G, 1, 9) + + def test_resistance_distance(self): + rd = nx.resistance_distance(self.G, 1, 3, "weight", True) + test_data = 1 / (1 / (2 + 4) + 1 / (1 + 3)) + assert round(rd, 5) == round(test_data, 5) + + def test_resistance_distance_noinv(self): + rd = nx.resistance_distance(self.G, 1, 3, "weight", False) + test_data = 1 / (1 / (1 / 2 + 1 / 4) + 1 / (1 / 1 + 1 / 3)) + assert round(rd, 5) == round(test_data, 5) + + def test_resistance_distance_no_weight(self): + rd = nx.resistance_distance(self.G, 1, 3) + assert round(rd, 5) == 1 + + def test_resistance_distance_neg_weight(self): + self.G[2][3]["weight"] = -4 + rd = nx.resistance_distance(self.G, 1, 3, "weight", True) + test_data = 1 / (1 / (2 + -4) + 1 / (1 + 3)) + assert round(rd, 5) == round(test_data, 5) + + def test_multigraph(self): + G = nx.MultiGraph() + G.add_edge(1, 2, weight=2) + G.add_edge(2, 3, weight=4) + G.add_edge(3, 4, weight=1) + G.add_edge(1, 4, weight=3) + rd = nx.resistance_distance(G, 1, 3, "weight", True) + assert np.isclose(rd, 1 / (1 / (2 + 4) + 1 / (1 + 3))) + + def test_resistance_distance_div0(self): + with pytest.raises(ZeroDivisionError): + self.G[1][2]["weight"] = 0 + nx.resistance_distance(self.G, 1, 3, "weight") + + def test_resistance_distance_same_node(self): + assert nx.resistance_distance(self.G, 1, 1) == 0 + + def test_resistance_distance_only_nodeA(self): + rd = nx.resistance_distance(self.G, nodeA=1) + test_data = {} + test_data[1] = 0 + test_data[2] = 0.75 + test_data[3] = 1 + test_data[4] = 0.75 + assert type(rd) == dict + assert sorted(rd.keys()) == sorted(test_data.keys()) + for key in rd: + assert np.isclose(rd[key], test_data[key]) + + def test_resistance_distance_only_nodeB(self): + rd = nx.resistance_distance(self.G, nodeB=1) + test_data = {} + test_data[1] = 0 + test_data[2] = 0.75 + test_data[3] = 1 + test_data[4] = 0.75 + assert type(rd) == dict + assert sorted(rd.keys()) == sorted(test_data.keys()) + for key in rd: + assert np.isclose(rd[key], test_data[key]) + + def test_resistance_distance_all(self): + rd = nx.resistance_distance(self.G) + assert type(rd) == dict + assert round(rd[1][3], 5) == 1 + + +class TestBarycenter: + """Test :func:`networkx.algorithms.distance_measures.barycenter`.""" + + def barycenter_as_subgraph(self, g, **kwargs): + """Return the subgraph induced on the barycenter of g""" + b = nx.barycenter(g, **kwargs) + assert isinstance(b, list) + assert set(b) <= set(g) + return g.subgraph(b) + + def test_must_be_connected(self): + pytest.raises(nx.NetworkXNoPath, nx.barycenter, nx.empty_graph(5)) + + def test_sp_kwarg(self): + # Complete graph K_5. Normally it works... + K_5 = nx.complete_graph(5) + sp = dict(nx.shortest_path_length(K_5)) + assert nx.barycenter(K_5, sp=sp) == list(K_5) + + # ...but not with the weight argument + for u, v, data in K_5.edges.data(): + data["weight"] = 1 + pytest.raises(ValueError, nx.barycenter, K_5, sp=sp, weight="weight") + + # ...and a corrupted sp can make it seem like K_5 is disconnected + del sp[0][1] + pytest.raises(nx.NetworkXNoPath, nx.barycenter, K_5, sp=sp) + + def test_trees(self): + """The barycenter of a tree is a single vertex or an edge. + + See [West01]_, p. 78. + """ + prng = Random(0xDEADBEEF) + for i in range(50): + RT = nx.random_labeled_tree(prng.randint(1, 75), seed=prng) + b = self.barycenter_as_subgraph(RT) + if len(b) == 2: + assert b.size() == 1 + else: + assert len(b) == 1 + assert b.size() == 0 + + def test_this_one_specific_tree(self): + """Test the tree pictured at the bottom of [West01]_, p. 78.""" + g = nx.Graph( + { + "a": ["b"], + "b": ["a", "x"], + "x": ["b", "y"], + "y": ["x", "z"], + "z": ["y", 0, 1, 2, 3, 4], + 0: ["z"], + 1: ["z"], + 2: ["z"], + 3: ["z"], + 4: ["z"], + } + ) + b = self.barycenter_as_subgraph(g, attr="barycentricity") + assert list(b) == ["z"] + assert not b.edges + expected_barycentricity = { + 0: 23, + 1: 23, + 2: 23, + 3: 23, + 4: 23, + "a": 35, + "b": 27, + "x": 21, + "y": 17, + "z": 15, + } + for node, barycentricity in expected_barycentricity.items(): + assert g.nodes[node]["barycentricity"] == barycentricity + + # Doubling weights should do nothing but double the barycentricities + for edge in g.edges: + g.edges[edge]["weight"] = 2 + b = self.barycenter_as_subgraph(g, weight="weight", attr="barycentricity2") + assert list(b) == ["z"] + assert not b.edges + for node, barycentricity in expected_barycentricity.items(): + assert g.nodes[node]["barycentricity2"] == barycentricity * 2 + + +class TestKemenyConstant: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + + def setup_method(self): + G = nx.Graph() + w12 = 2 + w13 = 3 + w23 = 4 + G.add_edge(1, 2, weight=w12) + G.add_edge(1, 3, weight=w13) + G.add_edge(2, 3, weight=w23) + self.G = G + + def test_kemeny_constant_directed(self): + G = nx.DiGraph() + G.add_edge(1, 2) + G.add_edge(1, 3) + G.add_edge(2, 3) + with pytest.raises(nx.NetworkXNotImplemented): + nx.kemeny_constant(G) + + def test_kemeny_constant_not_connected(self): + self.G.add_node(5) + with pytest.raises(nx.NetworkXError): + nx.kemeny_constant(self.G) + + def test_kemeny_constant_no_nodes(self): + G = nx.Graph() + with pytest.raises(nx.NetworkXError): + nx.kemeny_constant(G) + + def test_kemeny_constant_negative_weight(self): + G = nx.Graph() + w12 = 2 + w13 = 3 + w23 = -10 + G.add_edge(1, 2, weight=w12) + G.add_edge(1, 3, weight=w13) + G.add_edge(2, 3, weight=w23) + with pytest.raises(nx.NetworkXError): + nx.kemeny_constant(G, weight="weight") + + def test_kemeny_constant(self): + K = nx.kemeny_constant(self.G, weight="weight") + w12 = 2 + w13 = 3 + w23 = 4 + test_data = ( + 3 + / 2 + * (w12 + w13) + * (w12 + w23) + * (w13 + w23) + / ( + w12**2 * (w13 + w23) + + w13**2 * (w12 + w23) + + w23**2 * (w12 + w13) + + 3 * w12 * w13 * w23 + ) + ) + assert np.isclose(K, test_data) + + def test_kemeny_constant_no_weight(self): + K = nx.kemeny_constant(self.G) + assert np.isclose(K, 4 / 3) + + def test_kemeny_constant_multigraph(self): + G = nx.MultiGraph() + w12_1 = 2 + w12_2 = 1 + w13 = 3 + w23 = 4 + G.add_edge(1, 2, weight=w12_1) + G.add_edge(1, 2, weight=w12_2) + G.add_edge(1, 3, weight=w13) + G.add_edge(2, 3, weight=w23) + K = nx.kemeny_constant(G, weight="weight") + w12 = w12_1 + w12_2 + test_data = ( + 3 + / 2 + * (w12 + w13) + * (w12 + w23) + * (w13 + w23) + / ( + w12**2 * (w13 + w23) + + w13**2 * (w12 + w23) + + w23**2 * (w12 + w13) + + 3 * w12 * w13 * w23 + ) + ) + assert np.isclose(K, test_data) + + def test_kemeny_constant_weight0(self): + G = nx.Graph() + w12 = 0 + w13 = 3 + w23 = 4 + G.add_edge(1, 2, weight=w12) + G.add_edge(1, 3, weight=w13) + G.add_edge(2, 3, weight=w23) + K = nx.kemeny_constant(G, weight="weight") + test_data = ( + 3 + / 2 + * (w12 + w13) + * (w12 + w23) + * (w13 + w23) + / ( + w12**2 * (w13 + w23) + + w13**2 * (w12 + w23) + + w23**2 * (w12 + w13) + + 3 * w12 * w13 * w23 + ) + ) + assert np.isclose(K, test_data) + + def test_kemeny_constant_selfloop(self): + G = nx.Graph() + w11 = 1 + w12 = 2 + w13 = 3 + w23 = 4 + G.add_edge(1, 1, weight=w11) + G.add_edge(1, 2, weight=w12) + G.add_edge(1, 3, weight=w13) + G.add_edge(2, 3, weight=w23) + K = nx.kemeny_constant(G, weight="weight") + test_data = ( + (2 * w11 + 3 * w12 + 3 * w13) + * (w12 + w23) + * (w13 + w23) + / ( + (w12 * w13 + w12 * w23 + w13 * w23) + * (w11 + 2 * w12 + 2 * w13 + 2 * w23) + ) + ) + assert np.isclose(K, test_data) + + def test_kemeny_constant_complete_bipartite_graph(self): + # Theorem 1 in https://www.sciencedirect.com/science/article/pii/S0166218X20302912 + n1 = 5 + n2 = 4 + G = nx.complete_bipartite_graph(n1, n2) + K = nx.kemeny_constant(G) + assert np.isclose(K, n1 + n2 - 3 / 2) + + def test_kemeny_constant_path_graph(self): + # Theorem 2 in https://www.sciencedirect.com/science/article/pii/S0166218X20302912 + n = 10 + G = nx.path_graph(n) + K = nx.kemeny_constant(G) + assert np.isclose(K, n**2 / 3 - 2 * n / 3 + 1 / 2) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_distance_regular.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_distance_regular.py new file mode 100644 index 0000000000000000000000000000000000000000..d336b1882147f7fdae2bbfb81071164f97afa207 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_distance_regular.py @@ -0,0 +1,66 @@ +import networkx as nx +from networkx import is_strongly_regular + + +class TestDistanceRegular: + def test_is_distance_regular(self): + assert nx.is_distance_regular(nx.icosahedral_graph()) + assert nx.is_distance_regular(nx.petersen_graph()) + assert nx.is_distance_regular(nx.cubical_graph()) + assert nx.is_distance_regular(nx.complete_bipartite_graph(3, 3)) + assert nx.is_distance_regular(nx.tetrahedral_graph()) + assert nx.is_distance_regular(nx.dodecahedral_graph()) + assert nx.is_distance_regular(nx.pappus_graph()) + assert nx.is_distance_regular(nx.heawood_graph()) + assert nx.is_distance_regular(nx.cycle_graph(3)) + # no distance regular + assert not nx.is_distance_regular(nx.path_graph(4)) + + def test_not_connected(self): + G = nx.cycle_graph(4) + nx.add_cycle(G, [5, 6, 7]) + assert not nx.is_distance_regular(G) + + def test_global_parameters(self): + b, c = nx.intersection_array(nx.cycle_graph(5)) + g = nx.global_parameters(b, c) + assert list(g) == [(0, 0, 2), (1, 0, 1), (1, 1, 0)] + b, c = nx.intersection_array(nx.cycle_graph(3)) + g = nx.global_parameters(b, c) + assert list(g) == [(0, 0, 2), (1, 1, 0)] + + def test_intersection_array(self): + b, c = nx.intersection_array(nx.cycle_graph(5)) + assert b == [2, 1] + assert c == [1, 1] + b, c = nx.intersection_array(nx.dodecahedral_graph()) + assert b == [3, 2, 1, 1, 1] + assert c == [1, 1, 1, 2, 3] + b, c = nx.intersection_array(nx.icosahedral_graph()) + assert b == [5, 2, 1] + assert c == [1, 2, 5] + + +class TestStronglyRegular: + """Unit tests for the :func:`~networkx.is_strongly_regular` + function. + + """ + + def test_cycle_graph(self): + """Tests that the cycle graph on five vertices is strongly + regular. + + """ + G = nx.cycle_graph(5) + assert is_strongly_regular(G) + + def test_petersen_graph(self): + """Tests that the Petersen graph is strongly regular.""" + G = nx.petersen_graph() + assert is_strongly_regular(G) + + def test_path_graph(self): + """Tests that the path graph is not strongly regular.""" + G = nx.path_graph(4) + assert not is_strongly_regular(G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_dominance.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_dominance.py new file mode 100644 index 0000000000000000000000000000000000000000..f026e4b0a481ab6ad3f104926297ffab33bf1fa9 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_dominance.py @@ -0,0 +1,285 @@ +import pytest + +import networkx as nx + + +class TestImmediateDominators: + def test_exceptions(self): + G = nx.Graph() + G.add_node(0) + pytest.raises(nx.NetworkXNotImplemented, nx.immediate_dominators, G, 0) + G = nx.MultiGraph(G) + pytest.raises(nx.NetworkXNotImplemented, nx.immediate_dominators, G, 0) + G = nx.DiGraph([[0, 0]]) + pytest.raises(nx.NetworkXError, nx.immediate_dominators, G, 1) + + def test_singleton(self): + G = nx.DiGraph() + G.add_node(0) + assert nx.immediate_dominators(G, 0) == {0: 0} + G.add_edge(0, 0) + assert nx.immediate_dominators(G, 0) == {0: 0} + + def test_path(self): + n = 5 + G = nx.path_graph(n, create_using=nx.DiGraph()) + assert nx.immediate_dominators(G, 0) == {i: max(i - 1, 0) for i in range(n)} + + def test_cycle(self): + n = 5 + G = nx.cycle_graph(n, create_using=nx.DiGraph()) + assert nx.immediate_dominators(G, 0) == {i: max(i - 1, 0) for i in range(n)} + + def test_unreachable(self): + n = 5 + assert n > 1 + G = nx.path_graph(n, create_using=nx.DiGraph()) + assert nx.immediate_dominators(G, n // 2) == { + i: max(i - 1, n // 2) for i in range(n // 2, n) + } + + def test_irreducible1(self): + # Graph taken from Figure 2 of + # K. D. Cooper, T. J. Harvey, and K. Kennedy. + # A simple, fast dominance algorithm. + # Software Practice & Experience, 4:110, 2001. + edges = [(1, 2), (2, 1), (3, 2), (4, 1), (5, 3), (5, 4)] + G = nx.DiGraph(edges) + assert nx.immediate_dominators(G, 5) == {i: 5 for i in range(1, 6)} + + def test_irreducible2(self): + # Graph taken from Figure 4 of + # K. D. Cooper, T. J. Harvey, and K. Kennedy. + # A simple, fast dominance algorithm. + # Software Practice & Experience, 4:110, 2001. + edges = [(1, 2), (2, 1), (2, 3), (3, 2), (4, 2), (4, 3), (5, 1), (6, 4), (6, 5)] + G = nx.DiGraph(edges) + result = nx.immediate_dominators(G, 6) + assert result == {i: 6 for i in range(1, 7)} + + def test_domrel_png(self): + # Graph taken from https://commons.wikipedia.org/wiki/File:Domrel.png + edges = [(1, 2), (2, 3), (2, 4), (2, 6), (3, 5), (4, 5), (5, 2)] + G = nx.DiGraph(edges) + result = nx.immediate_dominators(G, 1) + assert result == {1: 1, 2: 1, 3: 2, 4: 2, 5: 2, 6: 2} + # Test postdominance. + result = nx.immediate_dominators(G.reverse(copy=False), 6) + assert result == {1: 2, 2: 6, 3: 5, 4: 5, 5: 2, 6: 6} + + def test_boost_example(self): + # Graph taken from Figure 1 of + # http://www.boost.org/doc/libs/1_56_0/libs/graph/doc/lengauer_tarjan_dominator.htm + edges = [(0, 1), (1, 2), (1, 3), (2, 7), (3, 4), (4, 5), (4, 6), (5, 7), (6, 4)] + G = nx.DiGraph(edges) + result = nx.immediate_dominators(G, 0) + assert result == {0: 0, 1: 0, 2: 1, 3: 1, 4: 3, 5: 4, 6: 4, 7: 1} + # Test postdominance. + result = nx.immediate_dominators(G.reverse(copy=False), 7) + assert result == {0: 1, 1: 7, 2: 7, 3: 4, 4: 5, 5: 7, 6: 4, 7: 7} + + +class TestDominanceFrontiers: + def test_exceptions(self): + G = nx.Graph() + G.add_node(0) + pytest.raises(nx.NetworkXNotImplemented, nx.dominance_frontiers, G, 0) + G = nx.MultiGraph(G) + pytest.raises(nx.NetworkXNotImplemented, nx.dominance_frontiers, G, 0) + G = nx.DiGraph([[0, 0]]) + pytest.raises(nx.NetworkXError, nx.dominance_frontiers, G, 1) + + def test_singleton(self): + G = nx.DiGraph() + G.add_node(0) + assert nx.dominance_frontiers(G, 0) == {0: set()} + G.add_edge(0, 0) + assert nx.dominance_frontiers(G, 0) == {0: set()} + + def test_path(self): + n = 5 + G = nx.path_graph(n, create_using=nx.DiGraph()) + assert nx.dominance_frontiers(G, 0) == {i: set() for i in range(n)} + + def test_cycle(self): + n = 5 + G = nx.cycle_graph(n, create_using=nx.DiGraph()) + assert nx.dominance_frontiers(G, 0) == {i: set() for i in range(n)} + + def test_unreachable(self): + n = 5 + assert n > 1 + G = nx.path_graph(n, create_using=nx.DiGraph()) + assert nx.dominance_frontiers(G, n // 2) == {i: set() for i in range(n // 2, n)} + + def test_irreducible1(self): + # Graph taken from Figure 2 of + # K. D. Cooper, T. J. Harvey, and K. Kennedy. + # A simple, fast dominance algorithm. + # Software Practice & Experience, 4:110, 2001. + edges = [(1, 2), (2, 1), (3, 2), (4, 1), (5, 3), (5, 4)] + G = nx.DiGraph(edges) + assert dict(nx.dominance_frontiers(G, 5).items()) == { + 1: {2}, + 2: {1}, + 3: {2}, + 4: {1}, + 5: set(), + } + + def test_irreducible2(self): + # Graph taken from Figure 4 of + # K. D. Cooper, T. J. Harvey, and K. Kennedy. + # A simple, fast dominance algorithm. + # Software Practice & Experience, 4:110, 2001. + edges = [(1, 2), (2, 1), (2, 3), (3, 2), (4, 2), (4, 3), (5, 1), (6, 4), (6, 5)] + G = nx.DiGraph(edges) + assert nx.dominance_frontiers(G, 6) == { + 1: {2}, + 2: {1, 3}, + 3: {2}, + 4: {2, 3}, + 5: {1}, + 6: set(), + } + + def test_domrel_png(self): + # Graph taken from https://commons.wikipedia.org/wiki/File:Domrel.png + edges = [(1, 2), (2, 3), (2, 4), (2, 6), (3, 5), (4, 5), (5, 2)] + G = nx.DiGraph(edges) + assert nx.dominance_frontiers(G, 1) == { + 1: set(), + 2: {2}, + 3: {5}, + 4: {5}, + 5: {2}, + 6: set(), + } + # Test postdominance. + result = nx.dominance_frontiers(G.reverse(copy=False), 6) + assert result == {1: set(), 2: {2}, 3: {2}, 4: {2}, 5: {2}, 6: set()} + + def test_boost_example(self): + # Graph taken from Figure 1 of + # http://www.boost.org/doc/libs/1_56_0/libs/graph/doc/lengauer_tarjan_dominator.htm + edges = [(0, 1), (1, 2), (1, 3), (2, 7), (3, 4), (4, 5), (4, 6), (5, 7), (6, 4)] + G = nx.DiGraph(edges) + assert nx.dominance_frontiers(G, 0) == { + 0: set(), + 1: set(), + 2: {7}, + 3: {7}, + 4: {4, 7}, + 5: {7}, + 6: {4}, + 7: set(), + } + # Test postdominance. + result = nx.dominance_frontiers(G.reverse(copy=False), 7) + expected = { + 0: set(), + 1: set(), + 2: {1}, + 3: {1}, + 4: {1, 4}, + 5: {1}, + 6: {4}, + 7: set(), + } + assert result == expected + + def test_discard_issue(self): + # https://github.com/networkx/networkx/issues/2071 + g = nx.DiGraph() + g.add_edges_from( + [ + ("b0", "b1"), + ("b1", "b2"), + ("b2", "b3"), + ("b3", "b1"), + ("b1", "b5"), + ("b5", "b6"), + ("b5", "b8"), + ("b6", "b7"), + ("b8", "b7"), + ("b7", "b3"), + ("b3", "b4"), + ] + ) + df = nx.dominance_frontiers(g, "b0") + assert df == { + "b4": set(), + "b5": {"b3"}, + "b6": {"b7"}, + "b7": {"b3"}, + "b0": set(), + "b1": {"b1"}, + "b2": {"b3"}, + "b3": {"b1"}, + "b8": {"b7"}, + } + + def test_loop(self): + g = nx.DiGraph() + g.add_edges_from([("a", "b"), ("b", "c"), ("b", "a")]) + df = nx.dominance_frontiers(g, "a") + assert df == {"a": set(), "b": set(), "c": set()} + + def test_missing_immediate_doms(self): + # see https://github.com/networkx/networkx/issues/2070 + g = nx.DiGraph() + edges = [ + ("entry_1", "b1"), + ("b1", "b2"), + ("b2", "b3"), + ("b3", "exit"), + ("entry_2", "b3"), + ] + + # entry_1 + # | + # b1 + # | + # b2 entry_2 + # | / + # b3 + # | + # exit + + g.add_edges_from(edges) + # formerly raised KeyError on entry_2 when parsing b3 + # because entry_2 does not have immediate doms (no path) + nx.dominance_frontiers(g, "entry_1") + + def test_loops_larger(self): + # from + # http://ecee.colorado.edu/~waite/Darmstadt/motion.html + g = nx.DiGraph() + edges = [ + ("entry", "exit"), + ("entry", "1"), + ("1", "2"), + ("2", "3"), + ("3", "4"), + ("4", "5"), + ("5", "6"), + ("6", "exit"), + ("6", "2"), + ("5", "3"), + ("4", "4"), + ] + + g.add_edges_from(edges) + df = nx.dominance_frontiers(g, "entry") + answer = { + "entry": set(), + "1": {"exit"}, + "2": {"exit", "2"}, + "3": {"exit", "3", "2"}, + "4": {"exit", "4", "3", "2"}, + "5": {"exit", "3", "2"}, + "6": {"exit", "2"}, + "exit": set(), + } + for n in df: + assert set(df[n]) == set(answer[n]) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_dominating.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_dominating.py new file mode 100644 index 0000000000000000000000000000000000000000..b945c7386374d7076ee08db67631cc7d845e6762 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_dominating.py @@ -0,0 +1,46 @@ +import pytest + +import networkx as nx + + +def test_dominating_set(): + G = nx.gnp_random_graph(100, 0.1) + D = nx.dominating_set(G) + assert nx.is_dominating_set(G, D) + D = nx.dominating_set(G, start_with=0) + assert nx.is_dominating_set(G, D) + + +def test_complete(): + """In complete graphs each node is a dominating set. + Thus the dominating set has to be of cardinality 1. + """ + K4 = nx.complete_graph(4) + assert len(nx.dominating_set(K4)) == 1 + K5 = nx.complete_graph(5) + assert len(nx.dominating_set(K5)) == 1 + + +def test_raise_dominating_set(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + D = nx.dominating_set(G, start_with=10) + + +def test_is_dominating_set(): + G = nx.path_graph(4) + d = {1, 3} + assert nx.is_dominating_set(G, d) + d = {0, 2} + assert nx.is_dominating_set(G, d) + d = {1} + assert not nx.is_dominating_set(G, d) + + +def test_wikipedia_is_dominating_set(): + """Example from https://en.wikipedia.org/wiki/Dominating_set""" + G = nx.cycle_graph(4) + G.add_edges_from([(0, 4), (1, 4), (2, 5)]) + assert nx.is_dominating_set(G, {4, 3, 5}) + assert nx.is_dominating_set(G, {0, 2}) + assert nx.is_dominating_set(G, {1, 2}) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_efficiency.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_efficiency.py new file mode 100644 index 0000000000000000000000000000000000000000..9a2e7d0463b3a0abeb8395df4ab870456faa64b7 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_efficiency.py @@ -0,0 +1,58 @@ +"""Unit tests for the :mod:`networkx.algorithms.efficiency` module.""" + +import networkx as nx + + +class TestEfficiency: + def setup_method(self): + # G1 is a disconnected graph + self.G1 = nx.Graph() + self.G1.add_nodes_from([1, 2, 3]) + # G2 is a cycle graph + self.G2 = nx.cycle_graph(4) + # G3 is the triangle graph with one additional edge + self.G3 = nx.lollipop_graph(3, 1) + + def test_efficiency_disconnected_nodes(self): + """ + When nodes are disconnected, efficiency is 0 + """ + assert nx.efficiency(self.G1, 1, 2) == 0 + + def test_local_efficiency_disconnected_graph(self): + """ + In a disconnected graph the efficiency is 0 + """ + assert nx.local_efficiency(self.G1) == 0 + + def test_efficiency(self): + assert nx.efficiency(self.G2, 0, 1) == 1 + assert nx.efficiency(self.G2, 0, 2) == 1 / 2 + + def test_global_efficiency(self): + assert nx.global_efficiency(self.G2) == 5 / 6 + + def test_global_efficiency_complete_graph(self): + """ + Tests that the average global efficiency of the complete graph is one. + """ + for n in range(2, 10): + G = nx.complete_graph(n) + assert nx.global_efficiency(G) == 1 + + def test_local_efficiency_complete_graph(self): + """ + Test that the local efficiency for a complete graph with at least 3 + nodes should be one. For a graph with only 2 nodes, the induced + subgraph has no edges. + """ + for n in range(3, 10): + G = nx.complete_graph(n) + assert nx.local_efficiency(G) == 1 + + def test_using_ego_graph(self): + """ + Test that the ego graph is used when computing local efficiency. + For more information, see GitHub issue #2710. + """ + assert nx.local_efficiency(self.G3) == 7 / 12 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_euler.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_euler.py new file mode 100644 index 0000000000000000000000000000000000000000..08eaf7fccc668da8534ab233a86b139937ec23aa --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_euler.py @@ -0,0 +1,307 @@ +import collections + +import pytest + +import networkx as nx + + +class TestIsEulerian: + def test_is_eulerian(self): + assert nx.is_eulerian(nx.complete_graph(5)) + assert nx.is_eulerian(nx.complete_graph(7)) + assert nx.is_eulerian(nx.hypercube_graph(4)) + assert nx.is_eulerian(nx.hypercube_graph(6)) + + assert not nx.is_eulerian(nx.complete_graph(4)) + assert not nx.is_eulerian(nx.complete_graph(6)) + assert not nx.is_eulerian(nx.hypercube_graph(3)) + assert not nx.is_eulerian(nx.hypercube_graph(5)) + + assert not nx.is_eulerian(nx.petersen_graph()) + assert not nx.is_eulerian(nx.path_graph(4)) + + def test_is_eulerian2(self): + # not connected + G = nx.Graph() + G.add_nodes_from([1, 2, 3]) + assert not nx.is_eulerian(G) + # not strongly connected + G = nx.DiGraph() + G.add_nodes_from([1, 2, 3]) + assert not nx.is_eulerian(G) + G = nx.MultiDiGraph() + G.add_edge(1, 2) + G.add_edge(2, 3) + G.add_edge(2, 3) + G.add_edge(3, 1) + assert not nx.is_eulerian(G) + + +class TestEulerianCircuit: + def test_eulerian_circuit_cycle(self): + G = nx.cycle_graph(4) + + edges = list(nx.eulerian_circuit(G, source=0)) + nodes = [u for u, v in edges] + assert nodes == [0, 3, 2, 1] + assert edges == [(0, 3), (3, 2), (2, 1), (1, 0)] + + edges = list(nx.eulerian_circuit(G, source=1)) + nodes = [u for u, v in edges] + assert nodes == [1, 2, 3, 0] + assert edges == [(1, 2), (2, 3), (3, 0), (0, 1)] + + G = nx.complete_graph(3) + + edges = list(nx.eulerian_circuit(G, source=0)) + nodes = [u for u, v in edges] + assert nodes == [0, 2, 1] + assert edges == [(0, 2), (2, 1), (1, 0)] + + edges = list(nx.eulerian_circuit(G, source=1)) + nodes = [u for u, v in edges] + assert nodes == [1, 2, 0] + assert edges == [(1, 2), (2, 0), (0, 1)] + + def test_eulerian_circuit_digraph(self): + G = nx.DiGraph() + nx.add_cycle(G, [0, 1, 2, 3]) + + edges = list(nx.eulerian_circuit(G, source=0)) + nodes = [u for u, v in edges] + assert nodes == [0, 1, 2, 3] + assert edges == [(0, 1), (1, 2), (2, 3), (3, 0)] + + edges = list(nx.eulerian_circuit(G, source=1)) + nodes = [u for u, v in edges] + assert nodes == [1, 2, 3, 0] + assert edges == [(1, 2), (2, 3), (3, 0), (0, 1)] + + def test_multigraph(self): + G = nx.MultiGraph() + nx.add_cycle(G, [0, 1, 2, 3]) + G.add_edge(1, 2) + G.add_edge(1, 2) + edges = list(nx.eulerian_circuit(G, source=0)) + nodes = [u for u, v in edges] + assert nodes == [0, 3, 2, 1, 2, 1] + assert edges == [(0, 3), (3, 2), (2, 1), (1, 2), (2, 1), (1, 0)] + + def test_multigraph_with_keys(self): + G = nx.MultiGraph() + nx.add_cycle(G, [0, 1, 2, 3]) + G.add_edge(1, 2) + G.add_edge(1, 2) + edges = list(nx.eulerian_circuit(G, source=0, keys=True)) + nodes = [u for u, v, k in edges] + assert nodes == [0, 3, 2, 1, 2, 1] + assert edges[:2] == [(0, 3, 0), (3, 2, 0)] + assert collections.Counter(edges[2:5]) == collections.Counter( + [(2, 1, 0), (1, 2, 1), (2, 1, 2)] + ) + assert edges[5:] == [(1, 0, 0)] + + def test_not_eulerian(self): + with pytest.raises(nx.NetworkXError): + f = list(nx.eulerian_circuit(nx.complete_graph(4))) + + +class TestIsSemiEulerian: + def test_is_semieulerian(self): + # Test graphs with Eulerian paths but no cycles return True. + assert nx.is_semieulerian(nx.path_graph(4)) + G = nx.path_graph(6, create_using=nx.DiGraph) + assert nx.is_semieulerian(G) + + # Test graphs with Eulerian cycles return False. + assert not nx.is_semieulerian(nx.complete_graph(5)) + assert not nx.is_semieulerian(nx.complete_graph(7)) + assert not nx.is_semieulerian(nx.hypercube_graph(4)) + assert not nx.is_semieulerian(nx.hypercube_graph(6)) + + +class TestHasEulerianPath: + def test_has_eulerian_path_cyclic(self): + # Test graphs with Eulerian cycles return True. + assert nx.has_eulerian_path(nx.complete_graph(5)) + assert nx.has_eulerian_path(nx.complete_graph(7)) + assert nx.has_eulerian_path(nx.hypercube_graph(4)) + assert nx.has_eulerian_path(nx.hypercube_graph(6)) + + def test_has_eulerian_path_non_cyclic(self): + # Test graphs with Eulerian paths but no cycles return True. + assert nx.has_eulerian_path(nx.path_graph(4)) + G = nx.path_graph(6, create_using=nx.DiGraph) + assert nx.has_eulerian_path(G) + + def test_has_eulerian_path_directed_graph(self): + # Test directed graphs and returns False + G = nx.DiGraph() + G.add_edges_from([(0, 1), (1, 2), (0, 2)]) + assert not nx.has_eulerian_path(G) + + # Test directed graphs without isolated node returns True + G = nx.DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 0)]) + assert nx.has_eulerian_path(G) + + # Test directed graphs with isolated node returns False + G.add_node(3) + assert not nx.has_eulerian_path(G) + + @pytest.mark.parametrize("G", (nx.Graph(), nx.DiGraph())) + def test_has_eulerian_path_not_weakly_connected(self, G): + G.add_edges_from([(0, 1), (2, 3), (3, 2)]) + assert not nx.has_eulerian_path(G) + + @pytest.mark.parametrize("G", (nx.Graph(), nx.DiGraph())) + def test_has_eulerian_path_unbalancedins_more_than_one(self, G): + G.add_edges_from([(0, 1), (2, 3)]) + assert not nx.has_eulerian_path(G) + + +class TestFindPathStart: + def testfind_path_start(self): + find_path_start = nx.algorithms.euler._find_path_start + # Test digraphs return correct starting node. + G = nx.path_graph(6, create_using=nx.DiGraph) + assert find_path_start(G) == 0 + edges = [(0, 1), (1, 2), (2, 0), (4, 0)] + assert find_path_start(nx.DiGraph(edges)) == 4 + + # Test graph with no Eulerian path return None. + edges = [(0, 1), (1, 2), (2, 3), (2, 4)] + assert find_path_start(nx.DiGraph(edges)) is None + + +class TestEulerianPath: + def test_eulerian_path(self): + x = [(4, 0), (0, 1), (1, 2), (2, 0)] + for e1, e2 in zip(x, nx.eulerian_path(nx.DiGraph(x))): + assert e1 == e2 + + def test_eulerian_path_straight_link(self): + G = nx.DiGraph() + result = [(1, 2), (2, 3), (3, 4), (4, 5)] + G.add_edges_from(result) + assert result == list(nx.eulerian_path(G)) + assert result == list(nx.eulerian_path(G, source=1)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=3)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=4)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=5)) + + def test_eulerian_path_multigraph(self): + G = nx.MultiDiGraph() + result = [(2, 1), (1, 2), (2, 1), (1, 2), (2, 3), (3, 4), (4, 3)] + G.add_edges_from(result) + assert result == list(nx.eulerian_path(G)) + assert result == list(nx.eulerian_path(G, source=2)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=3)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=4)) + + def test_eulerian_path_eulerian_circuit(self): + G = nx.DiGraph() + result = [(1, 2), (2, 3), (3, 4), (4, 1)] + result2 = [(2, 3), (3, 4), (4, 1), (1, 2)] + result3 = [(3, 4), (4, 1), (1, 2), (2, 3)] + G.add_edges_from(result) + assert result == list(nx.eulerian_path(G)) + assert result == list(nx.eulerian_path(G, source=1)) + assert result2 == list(nx.eulerian_path(G, source=2)) + assert result3 == list(nx.eulerian_path(G, source=3)) + + def test_eulerian_path_undirected(self): + G = nx.Graph() + result = [(1, 2), (2, 3), (3, 4), (4, 5)] + result2 = [(5, 4), (4, 3), (3, 2), (2, 1)] + G.add_edges_from(result) + assert list(nx.eulerian_path(G)) in (result, result2) + assert result == list(nx.eulerian_path(G, source=1)) + assert result2 == list(nx.eulerian_path(G, source=5)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=3)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=2)) + + def test_eulerian_path_multigraph_undirected(self): + G = nx.MultiGraph() + result = [(2, 1), (1, 2), (2, 1), (1, 2), (2, 3), (3, 4)] + G.add_edges_from(result) + assert result == list(nx.eulerian_path(G)) + assert result == list(nx.eulerian_path(G, source=2)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=3)) + with pytest.raises(nx.NetworkXError): + list(nx.eulerian_path(G, source=1)) + + @pytest.mark.parametrize( + ("graph_type", "result"), + ( + (nx.MultiGraph, [(0, 1, 0), (1, 0, 1)]), + (nx.MultiDiGraph, [(0, 1, 0), (1, 0, 0)]), + ), + ) + def test_eulerian_with_keys(self, graph_type, result): + G = graph_type([(0, 1), (1, 0)]) + answer = nx.eulerian_path(G, keys=True) + assert list(answer) == result + + +class TestEulerize: + def test_disconnected(self): + with pytest.raises(nx.NetworkXError): + G = nx.from_edgelist([(0, 1), (2, 3)]) + nx.eulerize(G) + + def test_null_graph(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.eulerize(nx.Graph()) + + def test_null_multigraph(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.eulerize(nx.MultiGraph()) + + def test_on_empty_graph(self): + with pytest.raises(nx.NetworkXError): + nx.eulerize(nx.empty_graph(3)) + + def test_on_eulerian(self): + G = nx.cycle_graph(3) + H = nx.eulerize(G) + assert nx.is_isomorphic(G, H) + + def test_on_eulerian_multigraph(self): + G = nx.MultiGraph(nx.cycle_graph(3)) + G.add_edge(0, 1) + H = nx.eulerize(G) + assert nx.is_eulerian(H) + + def test_on_complete_graph(self): + G = nx.complete_graph(4) + assert nx.is_eulerian(nx.eulerize(G)) + assert nx.is_eulerian(nx.eulerize(nx.MultiGraph(G))) + + def test_on_non_eulerian_graph(self): + G = nx.cycle_graph(18) + G.add_edge(0, 18) + G.add_edge(18, 19) + G.add_edge(17, 19) + G.add_edge(4, 20) + G.add_edge(20, 21) + G.add_edge(21, 22) + G.add_edge(22, 23) + G.add_edge(23, 24) + G.add_edge(24, 25) + G.add_edge(25, 26) + G.add_edge(26, 27) + G.add_edge(27, 28) + G.add_edge(28, 13) + assert not nx.is_eulerian(G) + G = nx.eulerize(G) + assert nx.is_eulerian(G) + assert nx.number_of_edges(G) == 39 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_graph_hashing.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_graph_hashing.py new file mode 100644 index 0000000000000000000000000000000000000000..cffa8bb22c186bd05ee2bf67b066263997f9fb69 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_graph_hashing.py @@ -0,0 +1,657 @@ +import pytest + +import networkx as nx +from networkx.generators import directed + +# Unit tests for the :func:`~networkx.weisfeiler_lehman_graph_hash` function + + +def test_empty_graph_hash(): + """ + empty graphs should give hashes regardless of other params + """ + G1 = nx.empty_graph() + G2 = nx.empty_graph() + + h1 = nx.weisfeiler_lehman_graph_hash(G1) + h2 = nx.weisfeiler_lehman_graph_hash(G2) + h3 = nx.weisfeiler_lehman_graph_hash(G2, edge_attr="edge_attr1") + h4 = nx.weisfeiler_lehman_graph_hash(G2, node_attr="node_attr1") + h5 = nx.weisfeiler_lehman_graph_hash( + G2, edge_attr="edge_attr1", node_attr="node_attr1" + ) + h6 = nx.weisfeiler_lehman_graph_hash(G2, iterations=10) + + assert h1 == h2 + assert h1 == h3 + assert h1 == h4 + assert h1 == h5 + assert h1 == h6 + + +def test_directed(): + """ + A directed graph with no bi-directional edges should yield different a graph hash + to the same graph taken as undirected if there are no hash collisions. + """ + r = 10 + for i in range(r): + G_directed = nx.gn_graph(10 + r, seed=100 + i) + G_undirected = nx.to_undirected(G_directed) + + h_directed = nx.weisfeiler_lehman_graph_hash(G_directed) + h_undirected = nx.weisfeiler_lehman_graph_hash(G_undirected) + + assert h_directed != h_undirected + + +def test_reversed(): + """ + A directed graph with no bi-directional edges should yield different a graph hash + to the same graph taken with edge directions reversed if there are no hash collisions. + Here we test a cycle graph which is the minimal counterexample + """ + G = nx.cycle_graph(5, create_using=nx.DiGraph) + nx.set_node_attributes(G, {n: str(n) for n in G.nodes()}, name="label") + + G_reversed = G.reverse() + + h = nx.weisfeiler_lehman_graph_hash(G, node_attr="label") + h_reversed = nx.weisfeiler_lehman_graph_hash(G_reversed, node_attr="label") + + assert h != h_reversed + + +def test_isomorphic(): + """ + graph hashes should be invariant to node-relabeling (when the output is reindexed + by the same mapping) + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=200 + i) + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g1_hash = nx.weisfeiler_lehman_graph_hash(G1) + g2_hash = nx.weisfeiler_lehman_graph_hash(G2) + + assert g1_hash == g2_hash + + +def test_isomorphic_edge_attr(): + """ + Isomorphic graphs with differing edge attributes should yield different graph + hashes if the 'edge_attr' argument is supplied and populated in the graph, + and there are no hash collisions. + The output should still be invariant to node-relabeling + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=300 + i) + + for a, b in G1.edges: + G1[a][b]["edge_attr1"] = f"{a}-{b}-1" + G1[a][b]["edge_attr2"] = f"{a}-{b}-2" + + g1_hash_with_edge_attr1 = nx.weisfeiler_lehman_graph_hash( + G1, edge_attr="edge_attr1" + ) + g1_hash_with_edge_attr2 = nx.weisfeiler_lehman_graph_hash( + G1, edge_attr="edge_attr2" + ) + g1_hash_no_edge_attr = nx.weisfeiler_lehman_graph_hash(G1, edge_attr=None) + + assert g1_hash_with_edge_attr1 != g1_hash_no_edge_attr + assert g1_hash_with_edge_attr2 != g1_hash_no_edge_attr + assert g1_hash_with_edge_attr1 != g1_hash_with_edge_attr2 + + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g2_hash_with_edge_attr1 = nx.weisfeiler_lehman_graph_hash( + G2, edge_attr="edge_attr1" + ) + g2_hash_with_edge_attr2 = nx.weisfeiler_lehman_graph_hash( + G2, edge_attr="edge_attr2" + ) + + assert g1_hash_with_edge_attr1 == g2_hash_with_edge_attr1 + assert g1_hash_with_edge_attr2 == g2_hash_with_edge_attr2 + + +def test_missing_edge_attr(): + """ + If the 'edge_attr' argument is supplied but is missing from an edge in the graph, + we should raise a KeyError + """ + G = nx.Graph() + G.add_edges_from([(1, 2, {"edge_attr1": "a"}), (1, 3, {})]) + pytest.raises(KeyError, nx.weisfeiler_lehman_graph_hash, G, edge_attr="edge_attr1") + + +def test_isomorphic_node_attr(): + """ + Isomorphic graphs with differing node attributes should yield different graph + hashes if the 'node_attr' argument is supplied and populated in the graph, and + there are no hash collisions. + The output should still be invariant to node-relabeling + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=400 + i) + + for u in G1.nodes(): + G1.nodes[u]["node_attr1"] = f"{u}-1" + G1.nodes[u]["node_attr2"] = f"{u}-2" + + g1_hash_with_node_attr1 = nx.weisfeiler_lehman_graph_hash( + G1, node_attr="node_attr1" + ) + g1_hash_with_node_attr2 = nx.weisfeiler_lehman_graph_hash( + G1, node_attr="node_attr2" + ) + g1_hash_no_node_attr = nx.weisfeiler_lehman_graph_hash(G1, node_attr=None) + + assert g1_hash_with_node_attr1 != g1_hash_no_node_attr + assert g1_hash_with_node_attr2 != g1_hash_no_node_attr + assert g1_hash_with_node_attr1 != g1_hash_with_node_attr2 + + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g2_hash_with_node_attr1 = nx.weisfeiler_lehman_graph_hash( + G2, node_attr="node_attr1" + ) + g2_hash_with_node_attr2 = nx.weisfeiler_lehman_graph_hash( + G2, node_attr="node_attr2" + ) + + assert g1_hash_with_node_attr1 == g2_hash_with_node_attr1 + assert g1_hash_with_node_attr2 == g2_hash_with_node_attr2 + + +def test_missing_node_attr(): + """ + If the 'node_attr' argument is supplied but is missing from a node in the graph, + we should raise a KeyError + """ + G = nx.Graph() + G.add_nodes_from([(1, {"node_attr1": "a"}), (2, {})]) + G.add_edges_from([(1, 2), (2, 3), (3, 1), (1, 4)]) + pytest.raises(KeyError, nx.weisfeiler_lehman_graph_hash, G, node_attr="node_attr1") + + +def test_isomorphic_edge_attr_and_node_attr(): + """ + Isomorphic graphs with differing node attributes should yield different graph + hashes if the 'node_attr' and 'edge_attr' argument is supplied and populated in + the graph, and there are no hash collisions. + The output should still be invariant to node-relabeling + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=500 + i) + + for u in G1.nodes(): + G1.nodes[u]["node_attr1"] = f"{u}-1" + G1.nodes[u]["node_attr2"] = f"{u}-2" + + for a, b in G1.edges: + G1[a][b]["edge_attr1"] = f"{a}-{b}-1" + G1[a][b]["edge_attr2"] = f"{a}-{b}-2" + + g1_hash_edge1_node1 = nx.weisfeiler_lehman_graph_hash( + G1, edge_attr="edge_attr1", node_attr="node_attr1" + ) + g1_hash_edge2_node2 = nx.weisfeiler_lehman_graph_hash( + G1, edge_attr="edge_attr2", node_attr="node_attr2" + ) + g1_hash_edge1_node2 = nx.weisfeiler_lehman_graph_hash( + G1, edge_attr="edge_attr1", node_attr="node_attr2" + ) + g1_hash_no_attr = nx.weisfeiler_lehman_graph_hash(G1) + + assert g1_hash_edge1_node1 != g1_hash_no_attr + assert g1_hash_edge2_node2 != g1_hash_no_attr + assert g1_hash_edge1_node1 != g1_hash_edge2_node2 + assert g1_hash_edge1_node2 != g1_hash_edge2_node2 + assert g1_hash_edge1_node2 != g1_hash_edge1_node1 + + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g2_hash_edge1_node1 = nx.weisfeiler_lehman_graph_hash( + G2, edge_attr="edge_attr1", node_attr="node_attr1" + ) + g2_hash_edge2_node2 = nx.weisfeiler_lehman_graph_hash( + G2, edge_attr="edge_attr2", node_attr="node_attr2" + ) + + assert g1_hash_edge1_node1 == g2_hash_edge1_node1 + assert g1_hash_edge2_node2 == g2_hash_edge2_node2 + + +def test_digest_size(): + """ + The hash string lengths should be as expected for a variety of graphs and + digest sizes + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G = nx.erdos_renyi_graph(n, p * i, seed=1000 + i) + + h16 = nx.weisfeiler_lehman_graph_hash(G) + h32 = nx.weisfeiler_lehman_graph_hash(G, digest_size=32) + + assert h16 != h32 + assert len(h16) == 16 * 2 + assert len(h32) == 32 * 2 + + +# Unit tests for the :func:`~networkx.weisfeiler_lehman_hash_subgraphs` function + + +def is_subiteration(a, b): + """ + returns True if that each hash sequence in 'a' is a prefix for + the corresponding sequence indexed by the same node in 'b'. + """ + return all(b[node][: len(hashes)] == hashes for node, hashes in a.items()) + + +def hexdigest_sizes_correct(a, digest_size): + """ + returns True if all hex digest sizes are the expected length in a node:subgraph-hashes + dictionary. Hex digest string length == 2 * bytes digest length since each pair of hex + digits encodes 1 byte (https://docs.python.org/3/library/hashlib.html) + """ + hexdigest_size = digest_size * 2 + list_digest_sizes_correct = lambda l: all(len(x) == hexdigest_size for x in l) + return all(list_digest_sizes_correct(hashes) for hashes in a.values()) + + +def test_empty_graph_subgraph_hash(): + """ " + empty graphs should give empty dict subgraph hashes regardless of other params + """ + G = nx.empty_graph() + + subgraph_hashes1 = nx.weisfeiler_lehman_subgraph_hashes(G) + subgraph_hashes2 = nx.weisfeiler_lehman_subgraph_hashes(G, edge_attr="edge_attr") + subgraph_hashes3 = nx.weisfeiler_lehman_subgraph_hashes(G, node_attr="edge_attr") + subgraph_hashes4 = nx.weisfeiler_lehman_subgraph_hashes(G, iterations=2) + subgraph_hashes5 = nx.weisfeiler_lehman_subgraph_hashes(G, digest_size=64) + + assert subgraph_hashes1 == {} + assert subgraph_hashes2 == {} + assert subgraph_hashes3 == {} + assert subgraph_hashes4 == {} + assert subgraph_hashes5 == {} + + +def test_directed_subgraph_hash(): + """ + A directed graph with no bi-directional edges should yield different subgraph hashes + to the same graph taken as undirected, if all hashes don't collide. + """ + r = 10 + for i in range(r): + G_directed = nx.gn_graph(10 + r, seed=100 + i) + G_undirected = nx.to_undirected(G_directed) + + directed_subgraph_hashes = nx.weisfeiler_lehman_subgraph_hashes(G_directed) + undirected_subgraph_hashes = nx.weisfeiler_lehman_subgraph_hashes(G_undirected) + + assert directed_subgraph_hashes != undirected_subgraph_hashes + + +def test_reversed_subgraph_hash(): + """ + A directed graph with no bi-directional edges should yield different subgraph hashes + to the same graph taken with edge directions reversed if there are no hash collisions. + Here we test a cycle graph which is the minimal counterexample + """ + G = nx.cycle_graph(5, create_using=nx.DiGraph) + nx.set_node_attributes(G, {n: str(n) for n in G.nodes()}, name="label") + + G_reversed = G.reverse() + + h = nx.weisfeiler_lehman_subgraph_hashes(G, node_attr="label") + h_reversed = nx.weisfeiler_lehman_subgraph_hashes(G_reversed, node_attr="label") + + assert h != h_reversed + + +def test_isomorphic_subgraph_hash(): + """ + the subgraph hashes should be invariant to node-relabeling when the output is reindexed + by the same mapping and all hashes don't collide. + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=200 + i) + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g1_subgraph_hashes = nx.weisfeiler_lehman_subgraph_hashes(G1) + g2_subgraph_hashes = nx.weisfeiler_lehman_subgraph_hashes(G2) + + assert g1_subgraph_hashes == {-1 * k: v for k, v in g2_subgraph_hashes.items()} + + +def test_isomorphic_edge_attr_subgraph_hash(): + """ + Isomorphic graphs with differing edge attributes should yield different subgraph + hashes if the 'edge_attr' argument is supplied and populated in the graph, and + all hashes don't collide. + The output should still be invariant to node-relabeling + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=300 + i) + + for a, b in G1.edges: + G1[a][b]["edge_attr1"] = f"{a}-{b}-1" + G1[a][b]["edge_attr2"] = f"{a}-{b}-2" + + g1_hash_with_edge_attr1 = nx.weisfeiler_lehman_subgraph_hashes( + G1, edge_attr="edge_attr1" + ) + g1_hash_with_edge_attr2 = nx.weisfeiler_lehman_subgraph_hashes( + G1, edge_attr="edge_attr2" + ) + g1_hash_no_edge_attr = nx.weisfeiler_lehman_subgraph_hashes(G1, edge_attr=None) + + assert g1_hash_with_edge_attr1 != g1_hash_no_edge_attr + assert g1_hash_with_edge_attr2 != g1_hash_no_edge_attr + assert g1_hash_with_edge_attr1 != g1_hash_with_edge_attr2 + + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g2_hash_with_edge_attr1 = nx.weisfeiler_lehman_subgraph_hashes( + G2, edge_attr="edge_attr1" + ) + g2_hash_with_edge_attr2 = nx.weisfeiler_lehman_subgraph_hashes( + G2, edge_attr="edge_attr2" + ) + + assert g1_hash_with_edge_attr1 == { + -1 * k: v for k, v in g2_hash_with_edge_attr1.items() + } + assert g1_hash_with_edge_attr2 == { + -1 * k: v for k, v in g2_hash_with_edge_attr2.items() + } + + +def test_missing_edge_attr_subgraph_hash(): + """ + If the 'edge_attr' argument is supplied but is missing from an edge in the graph, + we should raise a KeyError + """ + G = nx.Graph() + G.add_edges_from([(1, 2, {"edge_attr1": "a"}), (1, 3, {})]) + pytest.raises( + KeyError, nx.weisfeiler_lehman_subgraph_hashes, G, edge_attr="edge_attr1" + ) + + +def test_isomorphic_node_attr_subgraph_hash(): + """ + Isomorphic graphs with differing node attributes should yield different subgraph + hashes if the 'node_attr' argument is supplied and populated in the graph, and + all hashes don't collide. + The output should still be invariant to node-relabeling + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=400 + i) + + for u in G1.nodes(): + G1.nodes[u]["node_attr1"] = f"{u}-1" + G1.nodes[u]["node_attr2"] = f"{u}-2" + + g1_hash_with_node_attr1 = nx.weisfeiler_lehman_subgraph_hashes( + G1, node_attr="node_attr1" + ) + g1_hash_with_node_attr2 = nx.weisfeiler_lehman_subgraph_hashes( + G1, node_attr="node_attr2" + ) + g1_hash_no_node_attr = nx.weisfeiler_lehman_subgraph_hashes(G1, node_attr=None) + + assert g1_hash_with_node_attr1 != g1_hash_no_node_attr + assert g1_hash_with_node_attr2 != g1_hash_no_node_attr + assert g1_hash_with_node_attr1 != g1_hash_with_node_attr2 + + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g2_hash_with_node_attr1 = nx.weisfeiler_lehman_subgraph_hashes( + G2, node_attr="node_attr1" + ) + g2_hash_with_node_attr2 = nx.weisfeiler_lehman_subgraph_hashes( + G2, node_attr="node_attr2" + ) + + assert g1_hash_with_node_attr1 == { + -1 * k: v for k, v in g2_hash_with_node_attr1.items() + } + assert g1_hash_with_node_attr2 == { + -1 * k: v for k, v in g2_hash_with_node_attr2.items() + } + + +def test_missing_node_attr_subgraph_hash(): + """ + If the 'node_attr' argument is supplied but is missing from a node in the graph, + we should raise a KeyError + """ + G = nx.Graph() + G.add_nodes_from([(1, {"node_attr1": "a"}), (2, {})]) + G.add_edges_from([(1, 2), (2, 3), (3, 1), (1, 4)]) + pytest.raises( + KeyError, nx.weisfeiler_lehman_subgraph_hashes, G, node_attr="node_attr1" + ) + + +def test_isomorphic_edge_attr_and_node_attr_subgraph_hash(): + """ + Isomorphic graphs with differing node attributes should yield different subgraph + hashes if the 'node_attr' and 'edge_attr' argument is supplied and populated in + the graph, and all hashes don't collide + The output should still be invariant to node-relabeling + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G1 = nx.erdos_renyi_graph(n, p * i, seed=500 + i) + + for u in G1.nodes(): + G1.nodes[u]["node_attr1"] = f"{u}-1" + G1.nodes[u]["node_attr2"] = f"{u}-2" + + for a, b in G1.edges: + G1[a][b]["edge_attr1"] = f"{a}-{b}-1" + G1[a][b]["edge_attr2"] = f"{a}-{b}-2" + + g1_hash_edge1_node1 = nx.weisfeiler_lehman_subgraph_hashes( + G1, edge_attr="edge_attr1", node_attr="node_attr1" + ) + g1_hash_edge2_node2 = nx.weisfeiler_lehman_subgraph_hashes( + G1, edge_attr="edge_attr2", node_attr="node_attr2" + ) + g1_hash_edge1_node2 = nx.weisfeiler_lehman_subgraph_hashes( + G1, edge_attr="edge_attr1", node_attr="node_attr2" + ) + g1_hash_no_attr = nx.weisfeiler_lehman_subgraph_hashes(G1) + + assert g1_hash_edge1_node1 != g1_hash_no_attr + assert g1_hash_edge2_node2 != g1_hash_no_attr + assert g1_hash_edge1_node1 != g1_hash_edge2_node2 + assert g1_hash_edge1_node2 != g1_hash_edge2_node2 + assert g1_hash_edge1_node2 != g1_hash_edge1_node1 + + G2 = nx.relabel_nodes(G1, {u: -1 * u for u in G1.nodes()}) + + g2_hash_edge1_node1 = nx.weisfeiler_lehman_subgraph_hashes( + G2, edge_attr="edge_attr1", node_attr="node_attr1" + ) + g2_hash_edge2_node2 = nx.weisfeiler_lehman_subgraph_hashes( + G2, edge_attr="edge_attr2", node_attr="node_attr2" + ) + + assert g1_hash_edge1_node1 == { + -1 * k: v for k, v in g2_hash_edge1_node1.items() + } + assert g1_hash_edge2_node2 == { + -1 * k: v for k, v in g2_hash_edge2_node2.items() + } + + +def test_iteration_depth(): + """ + All nodes should have the correct number of subgraph hashes in the output when + using degree as initial node labels + Subsequent iteration depths for the same graph should be additive for each node + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G = nx.erdos_renyi_graph(n, p * i, seed=600 + i) + + depth3 = nx.weisfeiler_lehman_subgraph_hashes(G, iterations=3) + depth4 = nx.weisfeiler_lehman_subgraph_hashes(G, iterations=4) + depth5 = nx.weisfeiler_lehman_subgraph_hashes(G, iterations=5) + + assert all(len(hashes) == 3 for hashes in depth3.values()) + assert all(len(hashes) == 4 for hashes in depth4.values()) + assert all(len(hashes) == 5 for hashes in depth5.values()) + + assert is_subiteration(depth3, depth4) + assert is_subiteration(depth4, depth5) + assert is_subiteration(depth3, depth5) + + +def test_iteration_depth_edge_attr(): + """ + All nodes should have the correct number of subgraph hashes in the output when + setting initial node labels empty and using an edge attribute when aggregating + neighborhoods. + Subsequent iteration depths for the same graph should be additive for each node + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G = nx.erdos_renyi_graph(n, p * i, seed=700 + i) + + for a, b in G.edges: + G[a][b]["edge_attr1"] = f"{a}-{b}-1" + + depth3 = nx.weisfeiler_lehman_subgraph_hashes( + G, edge_attr="edge_attr1", iterations=3 + ) + depth4 = nx.weisfeiler_lehman_subgraph_hashes( + G, edge_attr="edge_attr1", iterations=4 + ) + depth5 = nx.weisfeiler_lehman_subgraph_hashes( + G, edge_attr="edge_attr1", iterations=5 + ) + + assert all(len(hashes) == 3 for hashes in depth3.values()) + assert all(len(hashes) == 4 for hashes in depth4.values()) + assert all(len(hashes) == 5 for hashes in depth5.values()) + + assert is_subiteration(depth3, depth4) + assert is_subiteration(depth4, depth5) + assert is_subiteration(depth3, depth5) + + +def test_iteration_depth_node_attr(): + """ + All nodes should have the correct number of subgraph hashes in the output when + setting initial node labels to an attribute. + Subsequent iteration depths for the same graph should be additive for each node + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G = nx.erdos_renyi_graph(n, p * i, seed=800 + i) + + for u in G.nodes(): + G.nodes[u]["node_attr1"] = f"{u}-1" + + depth3 = nx.weisfeiler_lehman_subgraph_hashes( + G, node_attr="node_attr1", iterations=3 + ) + depth4 = nx.weisfeiler_lehman_subgraph_hashes( + G, node_attr="node_attr1", iterations=4 + ) + depth5 = nx.weisfeiler_lehman_subgraph_hashes( + G, node_attr="node_attr1", iterations=5 + ) + + assert all(len(hashes) == 3 for hashes in depth3.values()) + assert all(len(hashes) == 4 for hashes in depth4.values()) + assert all(len(hashes) == 5 for hashes in depth5.values()) + + assert is_subiteration(depth3, depth4) + assert is_subiteration(depth4, depth5) + assert is_subiteration(depth3, depth5) + + +def test_iteration_depth_node_edge_attr(): + """ + All nodes should have the correct number of subgraph hashes in the output when + setting initial node labels to an attribute and also using an edge attribute when + aggregating neighborhoods. + Subsequent iteration depths for the same graph should be additive for each node + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G = nx.erdos_renyi_graph(n, p * i, seed=900 + i) + + for u in G.nodes(): + G.nodes[u]["node_attr1"] = f"{u}-1" + + for a, b in G.edges: + G[a][b]["edge_attr1"] = f"{a}-{b}-1" + + depth3 = nx.weisfeiler_lehman_subgraph_hashes( + G, edge_attr="edge_attr1", node_attr="node_attr1", iterations=3 + ) + depth4 = nx.weisfeiler_lehman_subgraph_hashes( + G, edge_attr="edge_attr1", node_attr="node_attr1", iterations=4 + ) + depth5 = nx.weisfeiler_lehman_subgraph_hashes( + G, edge_attr="edge_attr1", node_attr="node_attr1", iterations=5 + ) + + assert all(len(hashes) == 3 for hashes in depth3.values()) + assert all(len(hashes) == 4 for hashes in depth4.values()) + assert all(len(hashes) == 5 for hashes in depth5.values()) + + assert is_subiteration(depth3, depth4) + assert is_subiteration(depth4, depth5) + assert is_subiteration(depth3, depth5) + + +def test_digest_size_subgraph_hash(): + """ + The hash string lengths should be as expected for a variety of graphs and + digest sizes + """ + n, r = 100, 10 + p = 1.0 / r + for i in range(1, r + 1): + G = nx.erdos_renyi_graph(n, p * i, seed=1000 + i) + + digest_size16_hashes = nx.weisfeiler_lehman_subgraph_hashes(G) + digest_size32_hashes = nx.weisfeiler_lehman_subgraph_hashes(G, digest_size=32) + + assert digest_size16_hashes != digest_size32_hashes + + assert hexdigest_sizes_correct(digest_size16_hashes, 16) + assert hexdigest_sizes_correct(digest_size32_hashes, 32) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_graphical.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_graphical.py new file mode 100644 index 0000000000000000000000000000000000000000..99f766f799d8573e80d905482f4b685a2d16bcc0 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_graphical.py @@ -0,0 +1,163 @@ +import pytest + +import networkx as nx + + +def test_valid_degree_sequence1(): + n = 100 + p = 0.3 + for i in range(10): + G = nx.erdos_renyi_graph(n, p) + deg = (d for n, d in G.degree()) + assert nx.is_graphical(deg, method="eg") + assert nx.is_graphical(deg, method="hh") + + +def test_valid_degree_sequence2(): + n = 100 + for i in range(10): + G = nx.barabasi_albert_graph(n, 1) + deg = (d for n, d in G.degree()) + assert nx.is_graphical(deg, method="eg") + assert nx.is_graphical(deg, method="hh") + + +def test_string_input(): + pytest.raises(nx.NetworkXException, nx.is_graphical, [], "foo") + pytest.raises(nx.NetworkXException, nx.is_graphical, ["red"], "hh") + pytest.raises(nx.NetworkXException, nx.is_graphical, ["red"], "eg") + + +def test_non_integer_input(): + pytest.raises(nx.NetworkXException, nx.is_graphical, [72.5], "eg") + pytest.raises(nx.NetworkXException, nx.is_graphical, [72.5], "hh") + + +def test_negative_input(): + assert not nx.is_graphical([-1], "hh") + assert not nx.is_graphical([-1], "eg") + + +class TestAtlas: + @classmethod + def setup_class(cls): + global atlas + from networkx.generators import atlas + + cls.GAG = atlas.graph_atlas_g() + + def test_atlas(self): + for graph in self.GAG: + deg = (d for n, d in graph.degree()) + assert nx.is_graphical(deg, method="eg") + assert nx.is_graphical(deg, method="hh") + + +def test_small_graph_true(): + z = [5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1] + assert nx.is_graphical(z, method="hh") + assert nx.is_graphical(z, method="eg") + z = [10, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2] + assert nx.is_graphical(z, method="hh") + assert nx.is_graphical(z, method="eg") + z = [1, 1, 1, 1, 1, 2, 2, 2, 3, 4] + assert nx.is_graphical(z, method="hh") + assert nx.is_graphical(z, method="eg") + + +def test_small_graph_false(): + z = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1] + assert not nx.is_graphical(z, method="hh") + assert not nx.is_graphical(z, method="eg") + z = [6, 5, 4, 4, 2, 1, 1, 1] + assert not nx.is_graphical(z, method="hh") + assert not nx.is_graphical(z, method="eg") + z = [1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4] + assert not nx.is_graphical(z, method="hh") + assert not nx.is_graphical(z, method="eg") + + +def test_directed_degree_sequence(): + # Test a range of valid directed degree sequences + n, r = 100, 10 + p = 1.0 / r + for i in range(r): + G = nx.erdos_renyi_graph(n, p * (i + 1), None, True) + din = (d for n, d in G.in_degree()) + dout = (d for n, d in G.out_degree()) + assert nx.is_digraphical(din, dout) + + +def test_small_directed_sequences(): + dout = [5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1] + din = [3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1] + assert nx.is_digraphical(din, dout) + # Test nongraphical directed sequence + dout = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1] + din = [103, 102, 102, 102, 102, 102, 102, 102, 102, 102] + assert not nx.is_digraphical(din, dout) + # Test digraphical small sequence + dout = [1, 1, 1, 1, 1, 2, 2, 2, 3, 4] + din = [2, 2, 2, 2, 2, 2, 2, 2, 1, 1] + assert nx.is_digraphical(din, dout) + # Test nonmatching sum + din = [2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1] + assert not nx.is_digraphical(din, dout) + # Test for negative integer in sequence + din = [2, 2, 2, -2, 2, 2, 2, 2, 1, 1, 4] + assert not nx.is_digraphical(din, dout) + # Test for noninteger + din = dout = [1, 1, 1.1, 1] + assert not nx.is_digraphical(din, dout) + din = dout = [1, 1, "rer", 1] + assert not nx.is_digraphical(din, dout) + + +def test_multi_sequence(): + # Test nongraphical multi sequence + seq = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1] + assert not nx.is_multigraphical(seq) + # Test small graphical multi sequence + seq = [6, 5, 4, 4, 2, 1, 1, 1] + assert nx.is_multigraphical(seq) + # Test for negative integer in sequence + seq = [6, 5, 4, -4, 2, 1, 1, 1] + assert not nx.is_multigraphical(seq) + # Test for sequence with odd sum + seq = [1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4] + assert not nx.is_multigraphical(seq) + # Test for noninteger + seq = [1, 1, 1.1, 1] + assert not nx.is_multigraphical(seq) + seq = [1, 1, "rer", 1] + assert not nx.is_multigraphical(seq) + + +def test_pseudo_sequence(): + # Test small valid pseudo sequence + seq = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1] + assert nx.is_pseudographical(seq) + # Test for sequence with odd sum + seq = [1000, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1] + assert not nx.is_pseudographical(seq) + # Test for negative integer in sequence + seq = [1000, 3, 3, 3, 3, 2, 2, -2, 1, 1] + assert not nx.is_pseudographical(seq) + # Test for noninteger + seq = [1, 1, 1.1, 1] + assert not nx.is_pseudographical(seq) + seq = [1, 1, "rer", 1] + assert not nx.is_pseudographical(seq) + + +def test_numpy_degree_sequence(): + np = pytest.importorskip("numpy") + ds = np.array([1, 2, 2, 2, 1], dtype=np.int64) + assert nx.is_graphical(ds, "eg") + assert nx.is_graphical(ds, "hh") + ds = np.array([1, 2, 2, 2, 1], dtype=np.float64) + assert nx.is_graphical(ds, "eg") + assert nx.is_graphical(ds, "hh") + ds = np.array([1.1, 2, 2, 2, 1], dtype=np.float64) + pytest.raises(nx.NetworkXException, nx.is_graphical, ds, "eg") + pytest.raises(nx.NetworkXException, nx.is_graphical, ds, "hh") diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_hierarchy.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_hierarchy.py new file mode 100644 index 0000000000000000000000000000000000000000..227c89c222005544f8559ade55502c1f7a7003d5 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_hierarchy.py @@ -0,0 +1,39 @@ +import pytest + +import networkx as nx + + +def test_hierarchy_exception(): + G = nx.cycle_graph(5) + pytest.raises(nx.NetworkXError, nx.flow_hierarchy, G) + + +def test_hierarchy_cycle(): + G = nx.cycle_graph(5, create_using=nx.DiGraph()) + assert nx.flow_hierarchy(G) == 0.0 + + +def test_hierarchy_tree(): + G = nx.full_rary_tree(2, 16, create_using=nx.DiGraph()) + assert nx.flow_hierarchy(G) == 1.0 + + +def test_hierarchy_1(): + G = nx.DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 1), (3, 4), (0, 4)]) + assert nx.flow_hierarchy(G) == 0.5 + + +def test_hierarchy_weight(): + G = nx.DiGraph() + G.add_edges_from( + [ + (0, 1, {"weight": 0.3}), + (1, 2, {"weight": 0.1}), + (2, 3, {"weight": 0.1}), + (3, 1, {"weight": 0.1}), + (3, 4, {"weight": 0.3}), + (0, 4, {"weight": 0.3}), + ] + ) + assert nx.flow_hierarchy(G, weight="weight") == 0.75 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_hybrid.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_hybrid.py new file mode 100644 index 0000000000000000000000000000000000000000..6af0016498549caed58772e304c93113a8b693d9 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_hybrid.py @@ -0,0 +1,24 @@ +import networkx as nx + + +def test_2d_grid_graph(): + # FC article claims 2d grid graph of size n is (3,3)-connected + # and (5,9)-connected, but I don't think it is (5,9)-connected + G = nx.grid_2d_graph(8, 8, periodic=True) + assert nx.is_kl_connected(G, 3, 3) + assert not nx.is_kl_connected(G, 5, 9) + (H, graphOK) = nx.kl_connected_subgraph(G, 5, 9, same_as_graph=True) + assert not graphOK + + +def test_small_graph(): + G = nx.Graph() + G.add_edge(1, 2) + G.add_edge(1, 3) + G.add_edge(2, 3) + assert nx.is_kl_connected(G, 2, 2) + H = nx.kl_connected_subgraph(G, 2, 2) + (H, graphOK) = nx.kl_connected_subgraph( + G, 2, 2, low_memory=True, same_as_graph=True + ) + assert graphOK diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_isolate.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_isolate.py new file mode 100644 index 0000000000000000000000000000000000000000..d29b306d2b13c2457905c41218e5c60793b309ba --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_isolate.py @@ -0,0 +1,26 @@ +"""Unit tests for the :mod:`networkx.algorithms.isolates` module.""" + +import networkx as nx + + +def test_is_isolate(): + G = nx.Graph() + G.add_edge(0, 1) + G.add_node(2) + assert not nx.is_isolate(G, 0) + assert not nx.is_isolate(G, 1) + assert nx.is_isolate(G, 2) + + +def test_isolates(): + G = nx.Graph() + G.add_edge(0, 1) + G.add_nodes_from([2, 3]) + assert sorted(nx.isolates(G)) == [2, 3] + + +def test_number_of_isolates(): + G = nx.Graph() + G.add_edge(0, 1) + G.add_nodes_from([2, 3]) + assert nx.number_of_isolates(G) == 2 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_link_prediction.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_link_prediction.py new file mode 100644 index 0000000000000000000000000000000000000000..7fc04d20672ef13058779f55be48ddac1b1a048d --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_link_prediction.py @@ -0,0 +1,582 @@ +import math +from functools import partial + +import pytest + +import networkx as nx + + +def _test_func(G, ebunch, expected, predict_func, **kwargs): + result = predict_func(G, ebunch, **kwargs) + exp_dict = {tuple(sorted([u, v])): score for u, v, score in expected} + res_dict = {tuple(sorted([u, v])): score for u, v, score in result} + + assert len(exp_dict) == len(res_dict) + for p in exp_dict: + assert exp_dict[p] == pytest.approx(res_dict[p], abs=1e-7) + + +class TestResourceAllocationIndex: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.resource_allocation_index) + cls.test = partial(_test_func, predict_func=cls.func) + + def test_K5(self): + G = nx.complete_graph(5) + self.test(G, [(0, 1)], [(0, 1, 0.75)]) + + def test_P3(self): + G = nx.path_graph(3) + self.test(G, [(0, 2)], [(0, 2, 0.5)]) + + def test_S4(self): + G = nx.star_graph(4) + self.test(G, [(1, 2)], [(1, 2, 0.25)]) + + def test_notimplemented(self): + assert pytest.raises( + nx.NetworkXNotImplemented, self.func, nx.DiGraph([(0, 1), (1, 2)]), [(0, 2)] + ) + assert pytest.raises( + nx.NetworkXNotImplemented, + self.func, + nx.MultiGraph([(0, 1), (1, 2)]), + [(0, 2)], + ) + assert pytest.raises( + nx.NetworkXNotImplemented, + self.func, + nx.MultiDiGraph([(0, 1), (1, 2)]), + [(0, 2)], + ) + + def test_no_common_neighbor(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_equal_nodes(self): + G = nx.complete_graph(4) + self.test(G, [(0, 0)], [(0, 0, 1)]) + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + self.test(G, None, [(0, 3, 0.5), (1, 2, 0.5), (1, 3, 0)]) + + +class TestJaccardCoefficient: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.jaccard_coefficient) + cls.test = partial(_test_func, predict_func=cls.func) + + def test_K5(self): + G = nx.complete_graph(5) + self.test(G, [(0, 1)], [(0, 1, 0.6)]) + + def test_P4(self): + G = nx.path_graph(4) + self.test(G, [(0, 2)], [(0, 2, 0.5)]) + + def test_notimplemented(self): + assert pytest.raises( + nx.NetworkXNotImplemented, self.func, nx.DiGraph([(0, 1), (1, 2)]), [(0, 2)] + ) + assert pytest.raises( + nx.NetworkXNotImplemented, + self.func, + nx.MultiGraph([(0, 1), (1, 2)]), + [(0, 2)], + ) + assert pytest.raises( + nx.NetworkXNotImplemented, + self.func, + nx.MultiDiGraph([(0, 1), (1, 2)]), + [(0, 2)], + ) + + def test_no_common_neighbor(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (2, 3)]) + self.test(G, [(0, 2)], [(0, 2, 0)]) + + def test_isolated_nodes(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + self.test(G, None, [(0, 3, 0.5), (1, 2, 0.5), (1, 3, 0)]) + + +class TestAdamicAdarIndex: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.adamic_adar_index) + cls.test = partial(_test_func, predict_func=cls.func) + + def test_K5(self): + G = nx.complete_graph(5) + self.test(G, [(0, 1)], [(0, 1, 3 / math.log(4))]) + + def test_P3(self): + G = nx.path_graph(3) + self.test(G, [(0, 2)], [(0, 2, 1 / math.log(2))]) + + def test_S4(self): + G = nx.star_graph(4) + self.test(G, [(1, 2)], [(1, 2, 1 / math.log(4))]) + + def test_notimplemented(self): + assert pytest.raises( + nx.NetworkXNotImplemented, self.func, nx.DiGraph([(0, 1), (1, 2)]), [(0, 2)] + ) + assert pytest.raises( + nx.NetworkXNotImplemented, + self.func, + nx.MultiGraph([(0, 1), (1, 2)]), + [(0, 2)], + ) + assert pytest.raises( + nx.NetworkXNotImplemented, + self.func, + nx.MultiDiGraph([(0, 1), (1, 2)]), + [(0, 2)], + ) + + def test_no_common_neighbor(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_equal_nodes(self): + G = nx.complete_graph(4) + self.test(G, [(0, 0)], [(0, 0, 3 / math.log(3))]) + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + self.test( + G, None, [(0, 3, 1 / math.log(2)), (1, 2, 1 / math.log(2)), (1, 3, 0)] + ) + + +class TestCommonNeighborCentrality: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.common_neighbor_centrality) + cls.test = partial(_test_func, predict_func=cls.func) + + def test_K5(self): + G = nx.complete_graph(5) + self.test(G, [(0, 1)], [(0, 1, 3.0)], alpha=1) + self.test(G, [(0, 1)], [(0, 1, 5.0)], alpha=0) + + def test_P3(self): + G = nx.path_graph(3) + self.test(G, [(0, 2)], [(0, 2, 1.25)], alpha=0.5) + + def test_S4(self): + G = nx.star_graph(4) + self.test(G, [(1, 2)], [(1, 2, 1.75)], alpha=0.5) + + @pytest.mark.parametrize("graph_type", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)) + def test_notimplemented(self, graph_type): + assert pytest.raises( + nx.NetworkXNotImplemented, self.func, graph_type([(0, 1), (1, 2)]), [(0, 2)] + ) + + def test_no_common_neighbor(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_equal_nodes(self): + G = nx.complete_graph(4) + assert pytest.raises(nx.NetworkXAlgorithmError, self.test, G, [(0, 0)], []) + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + self.test(G, None, [(0, 3, 1.5), (1, 2, 1.5), (1, 3, 2 / 3)], alpha=0.5) + + +class TestPreferentialAttachment: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.preferential_attachment) + cls.test = partial(_test_func, predict_func=cls.func) + + def test_K5(self): + G = nx.complete_graph(5) + self.test(G, [(0, 1)], [(0, 1, 16)]) + + def test_P3(self): + G = nx.path_graph(3) + self.test(G, [(0, 1)], [(0, 1, 2)]) + + def test_S4(self): + G = nx.star_graph(4) + self.test(G, [(0, 2)], [(0, 2, 4)]) + + def test_notimplemented(self): + assert pytest.raises( + nx.NetworkXNotImplemented, self.func, nx.DiGraph([(0, 1), (1, 2)]), [(0, 2)] + ) + assert pytest.raises( + nx.NetworkXNotImplemented, + self.func, + nx.MultiGraph([(0, 1), (1, 2)]), + [(0, 2)], + ) + assert pytest.raises( + nx.NetworkXNotImplemented, + self.func, + nx.MultiDiGraph([(0, 1), (1, 2)]), + [(0, 2)], + ) + + def test_zero_degrees(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + self.test(G, None, [(0, 3, 2), (1, 2, 2), (1, 3, 1)]) + + +class TestCNSoundarajanHopcroft: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.cn_soundarajan_hopcroft) + cls.test = partial(_test_func, predict_func=cls.func, community="community") + + def test_K5(self): + G = nx.complete_graph(5) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 1 + self.test(G, [(0, 1)], [(0, 1, 5)]) + + def test_P3(self): + G = nx.path_graph(3) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 0 + self.test(G, [(0, 2)], [(0, 2, 1)]) + + def test_S4(self): + G = nx.star_graph(4) + G.nodes[0]["community"] = 1 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 1 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 0 + self.test(G, [(1, 2)], [(1, 2, 2)]) + + def test_notimplemented(self): + G = nx.DiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([0, 1, 2], community=0) + assert pytest.raises(nx.NetworkXNotImplemented, self.func, G, [(0, 2)]) + G = nx.MultiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([0, 1, 2], community=0) + assert pytest.raises(nx.NetworkXNotImplemented, self.func, G, [(0, 2)]) + G = nx.MultiDiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([0, 1, 2], community=0) + assert pytest.raises(nx.NetworkXNotImplemented, self.func, G, [(0, 2)]) + + def test_no_common_neighbor(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_equal_nodes(self): + G = nx.complete_graph(3) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + self.test(G, [(0, 0)], [(0, 0, 4)]) + + def test_different_community(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 1 + self.test(G, [(0, 3)], [(0, 3, 2)]) + + def test_no_community_information(self): + G = nx.complete_graph(5) + assert pytest.raises(nx.NetworkXAlgorithmError, list, self.func(G, [(0, 1)])) + + def test_insufficient_community_information(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[3]["community"] = 0 + assert pytest.raises(nx.NetworkXAlgorithmError, list, self.func(G, [(0, 3)])) + + def test_sufficient_community_information(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 4), (4, 5)]) + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 0 + self.test(G, [(1, 4)], [(1, 4, 4)]) + + def test_custom_community_attribute_name(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["cmty"] = 0 + G.nodes[1]["cmty"] = 0 + G.nodes[2]["cmty"] = 0 + G.nodes[3]["cmty"] = 1 + self.test(G, [(0, 3)], [(0, 3, 2)], community="cmty") + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + self.test(G, None, [(0, 3, 2), (1, 2, 1), (1, 3, 0)]) + + +class TestRAIndexSoundarajanHopcroft: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.ra_index_soundarajan_hopcroft) + cls.test = partial(_test_func, predict_func=cls.func, community="community") + + def test_K5(self): + G = nx.complete_graph(5) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 1 + self.test(G, [(0, 1)], [(0, 1, 0.5)]) + + def test_P3(self): + G = nx.path_graph(3) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 0 + self.test(G, [(0, 2)], [(0, 2, 0)]) + + def test_S4(self): + G = nx.star_graph(4) + G.nodes[0]["community"] = 1 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 1 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 0 + self.test(G, [(1, 2)], [(1, 2, 0.25)]) + + def test_notimplemented(self): + G = nx.DiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([0, 1, 2], community=0) + assert pytest.raises(nx.NetworkXNotImplemented, self.func, G, [(0, 2)]) + G = nx.MultiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([0, 1, 2], community=0) + assert pytest.raises(nx.NetworkXNotImplemented, self.func, G, [(0, 2)]) + G = nx.MultiDiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([0, 1, 2], community=0) + assert pytest.raises(nx.NetworkXNotImplemented, self.func, G, [(0, 2)]) + + def test_no_common_neighbor(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_equal_nodes(self): + G = nx.complete_graph(3) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + self.test(G, [(0, 0)], [(0, 0, 1)]) + + def test_different_community(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 1 + self.test(G, [(0, 3)], [(0, 3, 0)]) + + def test_no_community_information(self): + G = nx.complete_graph(5) + assert pytest.raises(nx.NetworkXAlgorithmError, list, self.func(G, [(0, 1)])) + + def test_insufficient_community_information(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[3]["community"] = 0 + assert pytest.raises(nx.NetworkXAlgorithmError, list, self.func(G, [(0, 3)])) + + def test_sufficient_community_information(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 4), (4, 5)]) + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 0 + self.test(G, [(1, 4)], [(1, 4, 1)]) + + def test_custom_community_attribute_name(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["cmty"] = 0 + G.nodes[1]["cmty"] = 0 + G.nodes[2]["cmty"] = 0 + G.nodes[3]["cmty"] = 1 + self.test(G, [(0, 3)], [(0, 3, 0)], community="cmty") + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + self.test(G, None, [(0, 3, 0.5), (1, 2, 0), (1, 3, 0)]) + + +class TestWithinInterCluster: + @classmethod + def setup_class(cls): + cls.delta = 0.001 + cls.func = staticmethod(nx.within_inter_cluster) + cls.test = partial( + _test_func, predict_func=cls.func, delta=cls.delta, community="community" + ) + + def test_K5(self): + G = nx.complete_graph(5) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 1 + self.test(G, [(0, 1)], [(0, 1, 2 / (1 + self.delta))]) + + def test_P3(self): + G = nx.path_graph(3) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 0 + self.test(G, [(0, 2)], [(0, 2, 0)]) + + def test_S4(self): + G = nx.star_graph(4) + G.nodes[0]["community"] = 1 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 1 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 0 + self.test(G, [(1, 2)], [(1, 2, 1 / self.delta)]) + + def test_notimplemented(self): + G = nx.DiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([0, 1, 2], community=0) + assert pytest.raises(nx.NetworkXNotImplemented, self.func, G, [(0, 2)]) + G = nx.MultiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([0, 1, 2], community=0) + assert pytest.raises(nx.NetworkXNotImplemented, self.func, G, [(0, 2)]) + G = nx.MultiDiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([0, 1, 2], community=0) + assert pytest.raises(nx.NetworkXNotImplemented, self.func, G, [(0, 2)]) + + def test_no_common_neighbor(self): + G = nx.Graph() + G.add_nodes_from([0, 1]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + self.test(G, [(0, 1)], [(0, 1, 0)]) + + def test_equal_nodes(self): + G = nx.complete_graph(3) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + self.test(G, [(0, 0)], [(0, 0, 2 / self.delta)]) + + def test_different_community(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 1 + self.test(G, [(0, 3)], [(0, 3, 0)]) + + def test_no_inter_cluster_common_neighbor(self): + G = nx.complete_graph(4) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + self.test(G, [(0, 3)], [(0, 3, 2 / self.delta)]) + + def test_no_community_information(self): + G = nx.complete_graph(5) + assert pytest.raises(nx.NetworkXAlgorithmError, list, self.func(G, [(0, 1)])) + + def test_insufficient_community_information(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 0 + G.nodes[3]["community"] = 0 + assert pytest.raises(nx.NetworkXAlgorithmError, list, self.func(G, [(0, 3)])) + + def test_sufficient_community_information(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 4), (4, 5)]) + G.nodes[1]["community"] = 0 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + G.nodes[4]["community"] = 0 + self.test(G, [(1, 4)], [(1, 4, 2 / self.delta)]) + + def test_invalid_delta(self): + G = nx.complete_graph(3) + G.add_nodes_from([0, 1, 2], community=0) + assert pytest.raises(nx.NetworkXAlgorithmError, self.func, G, [(0, 1)], 0) + assert pytest.raises(nx.NetworkXAlgorithmError, self.func, G, [(0, 1)], -0.5) + + def test_custom_community_attribute_name(self): + G = nx.complete_graph(4) + G.nodes[0]["cmty"] = 0 + G.nodes[1]["cmty"] = 0 + G.nodes[2]["cmty"] = 0 + G.nodes[3]["cmty"] = 0 + self.test(G, [(0, 3)], [(0, 3, 2 / self.delta)], community="cmty") + + def test_all_nonexistent_edges(self): + G = nx.Graph() + G.add_edges_from([(0, 1), (0, 2), (2, 3)]) + G.nodes[0]["community"] = 0 + G.nodes[1]["community"] = 1 + G.nodes[2]["community"] = 0 + G.nodes[3]["community"] = 0 + self.test(G, None, [(0, 3, 1 / self.delta), (1, 2, 0), (1, 3, 0)]) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_lowest_common_ancestors.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_lowest_common_ancestors.py new file mode 100644 index 0000000000000000000000000000000000000000..66d75220327cb27c8b378505aea2780ea96021af --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_lowest_common_ancestors.py @@ -0,0 +1,427 @@ +from itertools import chain, combinations, product + +import pytest + +import networkx as nx + +tree_all_pairs_lca = nx.tree_all_pairs_lowest_common_ancestor +all_pairs_lca = nx.all_pairs_lowest_common_ancestor + + +def get_pair(dictionary, n1, n2): + if (n1, n2) in dictionary: + return dictionary[n1, n2] + else: + return dictionary[n2, n1] + + +class TestTreeLCA: + @classmethod + def setup_class(cls): + cls.DG = nx.DiGraph() + edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)] + cls.DG.add_edges_from(edges) + cls.ans = dict(tree_all_pairs_lca(cls.DG, 0)) + gold = {(n, n): n for n in cls.DG} + gold.update({(0, i): 0 for i in range(1, 7)}) + gold.update( + { + (1, 2): 0, + (1, 3): 1, + (1, 4): 1, + (1, 5): 0, + (1, 6): 0, + (2, 3): 0, + (2, 4): 0, + (2, 5): 2, + (2, 6): 2, + (3, 4): 1, + (3, 5): 0, + (3, 6): 0, + (4, 5): 0, + (4, 6): 0, + (5, 6): 2, + } + ) + + cls.gold = gold + + @staticmethod + def assert_has_same_pairs(d1, d2): + for a, b in ((min(pair), max(pair)) for pair in chain(d1, d2)): + assert get_pair(d1, a, b) == get_pair(d2, a, b) + + def test_tree_all_pairs_lca_default_root(self): + assert dict(tree_all_pairs_lca(self.DG)) == self.ans + + def test_tree_all_pairs_lca_return_subset(self): + test_pairs = [(0, 1), (0, 1), (1, 0)] + ans = dict(tree_all_pairs_lca(self.DG, 0, test_pairs)) + assert (0, 1) in ans and (1, 0) in ans + assert len(ans) == 2 + + def test_tree_all_pairs_lca(self): + all_pairs = chain(combinations(self.DG, 2), ((node, node) for node in self.DG)) + + ans = dict(tree_all_pairs_lca(self.DG, 0, all_pairs)) + self.assert_has_same_pairs(ans, self.ans) + + def test_tree_all_pairs_gold_example(self): + ans = dict(tree_all_pairs_lca(self.DG)) + self.assert_has_same_pairs(self.gold, ans) + + def test_tree_all_pairs_lca_invalid_input(self): + empty_digraph = tree_all_pairs_lca(nx.DiGraph()) + pytest.raises(nx.NetworkXPointlessConcept, list, empty_digraph) + + bad_pairs_digraph = tree_all_pairs_lca(self.DG, pairs=[(-1, -2)]) + pytest.raises(nx.NodeNotFound, list, bad_pairs_digraph) + + def test_tree_all_pairs_lca_subtrees(self): + ans = dict(tree_all_pairs_lca(self.DG, 1)) + gold = { + pair: lca + for (pair, lca) in self.gold.items() + if all(n in (1, 3, 4) for n in pair) + } + self.assert_has_same_pairs(gold, ans) + + def test_tree_all_pairs_lca_disconnected_nodes(self): + G = nx.DiGraph() + G.add_node(1) + assert {(1, 1): 1} == dict(tree_all_pairs_lca(G)) + + G.add_node(0) + assert {(1, 1): 1} == dict(tree_all_pairs_lca(G, 1)) + assert {(0, 0): 0} == dict(tree_all_pairs_lca(G, 0)) + + pytest.raises(nx.NetworkXError, list, tree_all_pairs_lca(G)) + + def test_tree_all_pairs_lca_error_if_input_not_tree(self): + # Cycle + G = nx.DiGraph([(1, 2), (2, 1)]) + pytest.raises(nx.NetworkXError, list, tree_all_pairs_lca(G)) + # DAG + G = nx.DiGraph([(0, 2), (1, 2)]) + pytest.raises(nx.NetworkXError, list, tree_all_pairs_lca(G)) + + def test_tree_all_pairs_lca_generator(self): + pairs = iter([(0, 1), (0, 1), (1, 0)]) + some_pairs = dict(tree_all_pairs_lca(self.DG, 0, pairs)) + assert (0, 1) in some_pairs and (1, 0) in some_pairs + assert len(some_pairs) == 2 + + def test_tree_all_pairs_lca_nonexisting_pairs_exception(self): + lca = tree_all_pairs_lca(self.DG, 0, [(-1, -1)]) + pytest.raises(nx.NodeNotFound, list, lca) + # check if node is None + lca = tree_all_pairs_lca(self.DG, None, [(-1, -1)]) + pytest.raises(nx.NodeNotFound, list, lca) + + def test_tree_all_pairs_lca_routine_bails_on_DAGs(self): + G = nx.DiGraph([(3, 4), (5, 4)]) + pytest.raises(nx.NetworkXError, list, tree_all_pairs_lca(G)) + + def test_tree_all_pairs_lca_not_implemented(self): + NNI = nx.NetworkXNotImplemented + G = nx.Graph([(0, 1)]) + with pytest.raises(NNI): + next(tree_all_pairs_lca(G)) + with pytest.raises(NNI): + next(all_pairs_lca(G)) + pytest.raises(NNI, nx.lowest_common_ancestor, G, 0, 1) + G = nx.MultiGraph([(0, 1)]) + with pytest.raises(NNI): + next(tree_all_pairs_lca(G)) + with pytest.raises(NNI): + next(all_pairs_lca(G)) + pytest.raises(NNI, nx.lowest_common_ancestor, G, 0, 1) + + def test_tree_all_pairs_lca_trees_without_LCAs(self): + G = nx.DiGraph() + G.add_node(3) + ans = list(tree_all_pairs_lca(G)) + assert ans == [((3, 3), 3)] + + +class TestMultiTreeLCA(TestTreeLCA): + @classmethod + def setup_class(cls): + cls.DG = nx.MultiDiGraph() + edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)] + cls.DG.add_edges_from(edges) + cls.ans = dict(tree_all_pairs_lca(cls.DG, 0)) + # add multiedges + cls.DG.add_edges_from(edges) + + gold = {(n, n): n for n in cls.DG} + gold.update({(0, i): 0 for i in range(1, 7)}) + gold.update( + { + (1, 2): 0, + (1, 3): 1, + (1, 4): 1, + (1, 5): 0, + (1, 6): 0, + (2, 3): 0, + (2, 4): 0, + (2, 5): 2, + (2, 6): 2, + (3, 4): 1, + (3, 5): 0, + (3, 6): 0, + (4, 5): 0, + (4, 6): 0, + (5, 6): 2, + } + ) + + cls.gold = gold + + +class TestDAGLCA: + @classmethod + def setup_class(cls): + cls.DG = nx.DiGraph() + nx.add_path(cls.DG, (0, 1, 2, 3)) + nx.add_path(cls.DG, (0, 4, 3)) + nx.add_path(cls.DG, (0, 5, 6, 8, 3)) + nx.add_path(cls.DG, (5, 7, 8)) + cls.DG.add_edge(6, 2) + cls.DG.add_edge(7, 2) + + cls.root_distance = nx.shortest_path_length(cls.DG, source=0) + + cls.gold = { + (1, 1): 1, + (1, 2): 1, + (1, 3): 1, + (1, 4): 0, + (1, 5): 0, + (1, 6): 0, + (1, 7): 0, + (1, 8): 0, + (2, 2): 2, + (2, 3): 2, + (2, 4): 0, + (2, 5): 5, + (2, 6): 6, + (2, 7): 7, + (2, 8): 7, + (3, 3): 3, + (3, 4): 4, + (3, 5): 5, + (3, 6): 6, + (3, 7): 7, + (3, 8): 8, + (4, 4): 4, + (4, 5): 0, + (4, 6): 0, + (4, 7): 0, + (4, 8): 0, + (5, 5): 5, + (5, 6): 5, + (5, 7): 5, + (5, 8): 5, + (6, 6): 6, + (6, 7): 5, + (6, 8): 6, + (7, 7): 7, + (7, 8): 7, + (8, 8): 8, + } + cls.gold.update(((0, n), 0) for n in cls.DG) + + def assert_lca_dicts_same(self, d1, d2, G=None): + """Checks if d1 and d2 contain the same pairs and + have a node at the same distance from root for each. + If G is None use self.DG.""" + if G is None: + G = self.DG + root_distance = self.root_distance + else: + roots = [n for n, deg in G.in_degree if deg == 0] + assert len(roots) == 1 + root_distance = nx.shortest_path_length(G, source=roots[0]) + + for a, b in ((min(pair), max(pair)) for pair in chain(d1, d2)): + assert ( + root_distance[get_pair(d1, a, b)] == root_distance[get_pair(d2, a, b)] + ) + + def test_all_pairs_lca_gold_example(self): + self.assert_lca_dicts_same(dict(all_pairs_lca(self.DG)), self.gold) + + def test_all_pairs_lca_all_pairs_given(self): + all_pairs = list(product(self.DG.nodes(), self.DG.nodes())) + ans = all_pairs_lca(self.DG, pairs=all_pairs) + self.assert_lca_dicts_same(dict(ans), self.gold) + + def test_all_pairs_lca_generator(self): + all_pairs = product(self.DG.nodes(), self.DG.nodes()) + ans = all_pairs_lca(self.DG, pairs=all_pairs) + self.assert_lca_dicts_same(dict(ans), self.gold) + + def test_all_pairs_lca_input_graph_with_two_roots(self): + G = self.DG.copy() + G.add_edge(9, 10) + G.add_edge(9, 4) + gold = self.gold.copy() + gold[9, 9] = 9 + gold[9, 10] = 9 + gold[9, 4] = 9 + gold[9, 3] = 9 + gold[10, 4] = 9 + gold[10, 3] = 9 + gold[10, 10] = 10 + + testing = dict(all_pairs_lca(G)) + + G.add_edge(-1, 9) + G.add_edge(-1, 0) + self.assert_lca_dicts_same(testing, gold, G) + + def test_all_pairs_lca_nonexisting_pairs_exception(self): + pytest.raises(nx.NodeNotFound, all_pairs_lca, self.DG, [(-1, -1)]) + + def test_all_pairs_lca_pairs_without_lca(self): + G = self.DG.copy() + G.add_node(-1) + gen = all_pairs_lca(G, [(-1, -1), (-1, 0)]) + assert dict(gen) == {(-1, -1): -1} + + def test_all_pairs_lca_null_graph(self): + pytest.raises(nx.NetworkXPointlessConcept, all_pairs_lca, nx.DiGraph()) + + def test_all_pairs_lca_non_dags(self): + pytest.raises(nx.NetworkXError, all_pairs_lca, nx.DiGraph([(3, 4), (4, 3)])) + + def test_all_pairs_lca_nonempty_graph_without_lca(self): + G = nx.DiGraph() + G.add_node(3) + ans = list(all_pairs_lca(G)) + assert ans == [((3, 3), 3)] + + def test_all_pairs_lca_bug_gh4942(self): + G = nx.DiGraph([(0, 2), (1, 2), (2, 3)]) + ans = list(all_pairs_lca(G)) + assert len(ans) == 9 + + def test_all_pairs_lca_default_kwarg(self): + G = nx.DiGraph([(0, 1), (2, 1)]) + sentinel = object() + assert nx.lowest_common_ancestor(G, 0, 2, default=sentinel) is sentinel + + def test_all_pairs_lca_identity(self): + G = nx.DiGraph() + G.add_node(3) + assert nx.lowest_common_ancestor(G, 3, 3) == 3 + + def test_all_pairs_lca_issue_4574(self): + G = nx.DiGraph() + G.add_nodes_from(range(17)) + G.add_edges_from( + [ + (2, 0), + (1, 2), + (3, 2), + (5, 2), + (8, 2), + (11, 2), + (4, 5), + (6, 5), + (7, 8), + (10, 8), + (13, 11), + (14, 11), + (15, 11), + (9, 10), + (12, 13), + (16, 15), + ] + ) + + assert nx.lowest_common_ancestor(G, 7, 9) == None + + def test_all_pairs_lca_one_pair_gh4942(self): + G = nx.DiGraph() + # Note: order edge addition is critical to the test + G.add_edge(0, 1) + G.add_edge(2, 0) + G.add_edge(2, 3) + G.add_edge(4, 0) + G.add_edge(5, 2) + + assert nx.lowest_common_ancestor(G, 1, 3) == 2 + + +class TestMultiDiGraph_DAGLCA(TestDAGLCA): + @classmethod + def setup_class(cls): + cls.DG = nx.MultiDiGraph() + nx.add_path(cls.DG, (0, 1, 2, 3)) + # add multiedges + nx.add_path(cls.DG, (0, 1, 2, 3)) + nx.add_path(cls.DG, (0, 4, 3)) + nx.add_path(cls.DG, (0, 5, 6, 8, 3)) + nx.add_path(cls.DG, (5, 7, 8)) + cls.DG.add_edge(6, 2) + cls.DG.add_edge(7, 2) + + cls.root_distance = nx.shortest_path_length(cls.DG, source=0) + + cls.gold = { + (1, 1): 1, + (1, 2): 1, + (1, 3): 1, + (1, 4): 0, + (1, 5): 0, + (1, 6): 0, + (1, 7): 0, + (1, 8): 0, + (2, 2): 2, + (2, 3): 2, + (2, 4): 0, + (2, 5): 5, + (2, 6): 6, + (2, 7): 7, + (2, 8): 7, + (3, 3): 3, + (3, 4): 4, + (3, 5): 5, + (3, 6): 6, + (3, 7): 7, + (3, 8): 8, + (4, 4): 4, + (4, 5): 0, + (4, 6): 0, + (4, 7): 0, + (4, 8): 0, + (5, 5): 5, + (5, 6): 5, + (5, 7): 5, + (5, 8): 5, + (6, 6): 6, + (6, 7): 5, + (6, 8): 6, + (7, 7): 7, + (7, 8): 7, + (8, 8): 8, + } + cls.gold.update(((0, n), 0) for n in cls.DG) + + +def test_all_pairs_lca_self_ancestors(): + """Self-ancestors should always be the node itself, i.e. lca of (0, 0) is 0. + See gh-4458.""" + # DAG for test - note order of node/edge addition is relevant + G = nx.DiGraph() + G.add_nodes_from(range(5)) + G.add_edges_from([(1, 0), (2, 0), (3, 2), (4, 1), (4, 3)]) + + ap_lca = nx.all_pairs_lowest_common_ancestor + assert all(u == v == a for (u, v), a in ap_lca(G) if u == v) + MG = nx.MultiDiGraph(G) + assert all(u == v == a for (u, v), a in ap_lca(MG) if u == v) + MG.add_edges_from([(1, 0), (2, 0)]) + assert all(u == v == a for (u, v), a in ap_lca(MG) if u == v) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_matching.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_matching.py new file mode 100644 index 0000000000000000000000000000000000000000..37853e3896c0fd6bcac1f46524a844ae2e2fb518 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_matching.py @@ -0,0 +1,605 @@ +import math +from itertools import permutations + +from pytest import raises + +import networkx as nx +from networkx.algorithms.matching import matching_dict_to_set +from networkx.utils import edges_equal + + +class TestMaxWeightMatching: + """Unit tests for the + :func:`~networkx.algorithms.matching.max_weight_matching` function. + + """ + + def test_trivial1(self): + """Empty graph""" + G = nx.Graph() + assert nx.max_weight_matching(G) == set() + assert nx.min_weight_matching(G) == set() + + def test_selfloop(self): + G = nx.Graph() + G.add_edge(0, 0, weight=100) + assert nx.max_weight_matching(G) == set() + assert nx.min_weight_matching(G) == set() + + def test_single_edge(self): + G = nx.Graph() + G.add_edge(0, 1) + assert edges_equal( + nx.max_weight_matching(G), matching_dict_to_set({0: 1, 1: 0}) + ) + assert edges_equal( + nx.min_weight_matching(G), matching_dict_to_set({0: 1, 1: 0}) + ) + + def test_two_path(self): + G = nx.Graph() + G.add_edge("one", "two", weight=10) + G.add_edge("two", "three", weight=11) + assert edges_equal( + nx.max_weight_matching(G), + matching_dict_to_set({"three": "two", "two": "three"}), + ) + assert edges_equal( + nx.min_weight_matching(G), + matching_dict_to_set({"one": "two", "two": "one"}), + ) + + def test_path(self): + G = nx.Graph() + G.add_edge(1, 2, weight=5) + G.add_edge(2, 3, weight=11) + G.add_edge(3, 4, weight=5) + assert edges_equal( + nx.max_weight_matching(G), matching_dict_to_set({2: 3, 3: 2}) + ) + assert edges_equal( + nx.max_weight_matching(G, 1), matching_dict_to_set({1: 2, 2: 1, 3: 4, 4: 3}) + ) + assert edges_equal( + nx.min_weight_matching(G), matching_dict_to_set({1: 2, 3: 4}) + ) + assert edges_equal( + nx.min_weight_matching(G, 1), matching_dict_to_set({1: 2, 3: 4}) + ) + + def test_square(self): + G = nx.Graph() + G.add_edge(1, 4, weight=2) + G.add_edge(2, 3, weight=2) + G.add_edge(1, 2, weight=1) + G.add_edge(3, 4, weight=4) + assert edges_equal( + nx.max_weight_matching(G), matching_dict_to_set({1: 2, 3: 4}) + ) + assert edges_equal( + nx.min_weight_matching(G), matching_dict_to_set({1: 4, 2: 3}) + ) + + def test_edge_attribute_name(self): + G = nx.Graph() + G.add_edge("one", "two", weight=10, abcd=11) + G.add_edge("two", "three", weight=11, abcd=10) + assert edges_equal( + nx.max_weight_matching(G, weight="abcd"), + matching_dict_to_set({"one": "two", "two": "one"}), + ) + assert edges_equal( + nx.min_weight_matching(G, weight="abcd"), + matching_dict_to_set({"three": "two"}), + ) + + def test_floating_point_weights(self): + G = nx.Graph() + G.add_edge(1, 2, weight=math.pi) + G.add_edge(2, 3, weight=math.exp(1)) + G.add_edge(1, 3, weight=3.0) + G.add_edge(1, 4, weight=math.sqrt(2.0)) + assert edges_equal( + nx.max_weight_matching(G), matching_dict_to_set({1: 4, 2: 3, 3: 2, 4: 1}) + ) + assert edges_equal( + nx.min_weight_matching(G), matching_dict_to_set({1: 4, 2: 3, 3: 2, 4: 1}) + ) + + def test_negative_weights(self): + G = nx.Graph() + G.add_edge(1, 2, weight=2) + G.add_edge(1, 3, weight=-2) + G.add_edge(2, 3, weight=1) + G.add_edge(2, 4, weight=-1) + G.add_edge(3, 4, weight=-6) + assert edges_equal( + nx.max_weight_matching(G), matching_dict_to_set({1: 2, 2: 1}) + ) + assert edges_equal( + nx.max_weight_matching(G, maxcardinality=True), + matching_dict_to_set({1: 3, 2: 4, 3: 1, 4: 2}), + ) + assert edges_equal( + nx.min_weight_matching(G), matching_dict_to_set({1: 2, 3: 4}) + ) + + def test_s_blossom(self): + """Create S-blossom and use it for augmentation:""" + G = nx.Graph() + G.add_weighted_edges_from([(1, 2, 8), (1, 3, 9), (2, 3, 10), (3, 4, 7)]) + answer = matching_dict_to_set({1: 2, 2: 1, 3: 4, 4: 3}) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + G.add_weighted_edges_from([(1, 6, 5), (4, 5, 6)]) + answer = matching_dict_to_set({1: 6, 2: 3, 3: 2, 4: 5, 5: 4, 6: 1}) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_s_t_blossom(self): + """Create S-blossom, relabel as T-blossom, use for augmentation:""" + G = nx.Graph() + G.add_weighted_edges_from( + [(1, 2, 9), (1, 3, 8), (2, 3, 10), (1, 4, 5), (4, 5, 4), (1, 6, 3)] + ) + answer = matching_dict_to_set({1: 6, 2: 3, 3: 2, 4: 5, 5: 4, 6: 1}) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + G.add_edge(4, 5, weight=3) + G.add_edge(1, 6, weight=4) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + G.remove_edge(1, 6) + G.add_edge(3, 6, weight=4) + answer = matching_dict_to_set({1: 2, 2: 1, 3: 6, 4: 5, 5: 4, 6: 3}) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nested_s_blossom(self): + """Create nested S-blossom, use for augmentation:""" + + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 9), + (1, 3, 9), + (2, 3, 10), + (2, 4, 8), + (3, 5, 8), + (4, 5, 10), + (5, 6, 6), + ] + ) + dict_format = {1: 3, 2: 4, 3: 1, 4: 2, 5: 6, 6: 5} + expected = {frozenset(e) for e in matching_dict_to_set(dict_format)} + answer = {frozenset(e) for e in nx.max_weight_matching(G)} + assert answer == expected + answer = {frozenset(e) for e in nx.min_weight_matching(G)} + assert answer == expected + + def test_nested_s_blossom_relabel(self): + """Create S-blossom, relabel as S, include in nested S-blossom:""" + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 10), + (1, 7, 10), + (2, 3, 12), + (3, 4, 20), + (3, 5, 20), + (4, 5, 25), + (5, 6, 10), + (6, 7, 10), + (7, 8, 8), + ] + ) + answer = matching_dict_to_set({1: 2, 2: 1, 3: 4, 4: 3, 5: 6, 6: 5, 7: 8, 8: 7}) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nested_s_blossom_expand(self): + """Create nested S-blossom, augment, expand recursively:""" + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 8), + (1, 3, 8), + (2, 3, 10), + (2, 4, 12), + (3, 5, 12), + (4, 5, 14), + (4, 6, 12), + (5, 7, 12), + (6, 7, 14), + (7, 8, 12), + ] + ) + answer = matching_dict_to_set({1: 2, 2: 1, 3: 5, 4: 6, 5: 3, 6: 4, 7: 8, 8: 7}) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_s_blossom_relabel_expand(self): + """Create S-blossom, relabel as T, expand:""" + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 23), + (1, 5, 22), + (1, 6, 15), + (2, 3, 25), + (3, 4, 22), + (4, 5, 25), + (4, 8, 14), + (5, 7, 13), + ] + ) + answer = matching_dict_to_set({1: 6, 2: 3, 3: 2, 4: 8, 5: 7, 6: 1, 7: 5, 8: 4}) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nested_s_blossom_relabel_expand(self): + """Create nested S-blossom, relabel as T, expand:""" + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 19), + (1, 3, 20), + (1, 8, 8), + (2, 3, 25), + (2, 4, 18), + (3, 5, 18), + (4, 5, 13), + (4, 7, 7), + (5, 6, 7), + ] + ) + answer = matching_dict_to_set({1: 8, 2: 3, 3: 2, 4: 7, 5: 6, 6: 5, 7: 4, 8: 1}) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nasty_blossom1(self): + """Create blossom, relabel as T in more than one way, expand, + augment: + """ + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 45), + (1, 5, 45), + (2, 3, 50), + (3, 4, 45), + (4, 5, 50), + (1, 6, 30), + (3, 9, 35), + (4, 8, 35), + (5, 7, 26), + (9, 10, 5), + ] + ) + ansdict = {1: 6, 2: 3, 3: 2, 4: 8, 5: 7, 6: 1, 7: 5, 8: 4, 9: 10, 10: 9} + answer = matching_dict_to_set(ansdict) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nasty_blossom2(self): + """Again but slightly different:""" + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 45), + (1, 5, 45), + (2, 3, 50), + (3, 4, 45), + (4, 5, 50), + (1, 6, 30), + (3, 9, 35), + (4, 8, 26), + (5, 7, 40), + (9, 10, 5), + ] + ) + ans = {1: 6, 2: 3, 3: 2, 4: 8, 5: 7, 6: 1, 7: 5, 8: 4, 9: 10, 10: 9} + answer = matching_dict_to_set(ans) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nasty_blossom_least_slack(self): + """Create blossom, relabel as T, expand such that a new + least-slack S-to-free dge is produced, augment: + """ + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 45), + (1, 5, 45), + (2, 3, 50), + (3, 4, 45), + (4, 5, 50), + (1, 6, 30), + (3, 9, 35), + (4, 8, 28), + (5, 7, 26), + (9, 10, 5), + ] + ) + ans = {1: 6, 2: 3, 3: 2, 4: 8, 5: 7, 6: 1, 7: 5, 8: 4, 9: 10, 10: 9} + answer = matching_dict_to_set(ans) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nasty_blossom_augmenting(self): + """Create nested blossom, relabel as T in more than one way""" + # expand outer blossom such that inner blossom ends up on an + # augmenting path: + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 45), + (1, 7, 45), + (2, 3, 50), + (3, 4, 45), + (4, 5, 95), + (4, 6, 94), + (5, 6, 94), + (6, 7, 50), + (1, 8, 30), + (3, 11, 35), + (5, 9, 36), + (7, 10, 26), + (11, 12, 5), + ] + ) + ans = { + 1: 8, + 2: 3, + 3: 2, + 4: 6, + 5: 9, + 6: 4, + 7: 10, + 8: 1, + 9: 5, + 10: 7, + 11: 12, + 12: 11, + } + answer = matching_dict_to_set(ans) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_nasty_blossom_expand_recursively(self): + """Create nested S-blossom, relabel as S, expand recursively:""" + G = nx.Graph() + G.add_weighted_edges_from( + [ + (1, 2, 40), + (1, 3, 40), + (2, 3, 60), + (2, 4, 55), + (3, 5, 55), + (4, 5, 50), + (1, 8, 15), + (5, 7, 30), + (7, 6, 10), + (8, 10, 10), + (4, 9, 30), + ] + ) + ans = {1: 2, 2: 1, 3: 5, 4: 9, 5: 3, 6: 7, 7: 6, 8: 10, 9: 4, 10: 8} + answer = matching_dict_to_set(ans) + assert edges_equal(nx.max_weight_matching(G), answer) + assert edges_equal(nx.min_weight_matching(G), answer) + + def test_wrong_graph_type(self): + error = nx.NetworkXNotImplemented + raises(error, nx.max_weight_matching, nx.MultiGraph()) + raises(error, nx.max_weight_matching, nx.MultiDiGraph()) + raises(error, nx.max_weight_matching, nx.DiGraph()) + raises(error, nx.min_weight_matching, nx.DiGraph()) + + +class TestIsMatching: + """Unit tests for the + :func:`~networkx.algorithms.matching.is_matching` function. + + """ + + def test_dict(self): + G = nx.path_graph(4) + assert nx.is_matching(G, {0: 1, 1: 0, 2: 3, 3: 2}) + + def test_empty_matching(self): + G = nx.path_graph(4) + assert nx.is_matching(G, set()) + + def test_single_edge(self): + G = nx.path_graph(4) + assert nx.is_matching(G, {(1, 2)}) + + def test_edge_order(self): + G = nx.path_graph(4) + assert nx.is_matching(G, {(0, 1), (2, 3)}) + assert nx.is_matching(G, {(1, 0), (2, 3)}) + assert nx.is_matching(G, {(0, 1), (3, 2)}) + assert nx.is_matching(G, {(1, 0), (3, 2)}) + + def test_valid_matching(self): + G = nx.path_graph(4) + assert nx.is_matching(G, {(0, 1), (2, 3)}) + + def test_invalid_input(self): + error = nx.NetworkXError + G = nx.path_graph(4) + # edge to node not in G + raises(error, nx.is_matching, G, {(0, 5), (2, 3)}) + # edge not a 2-tuple + raises(error, nx.is_matching, G, {(0, 1, 2), (2, 3)}) + raises(error, nx.is_matching, G, {(0,), (2, 3)}) + + def test_selfloops(self): + error = nx.NetworkXError + G = nx.path_graph(4) + # selfloop for node not in G + raises(error, nx.is_matching, G, {(5, 5), (2, 3)}) + # selfloop edge not in G + assert not nx.is_matching(G, {(0, 0), (1, 2), (2, 3)}) + # selfloop edge in G + G.add_edge(0, 0) + assert not nx.is_matching(G, {(0, 0), (1, 2)}) + + def test_invalid_matching(self): + G = nx.path_graph(4) + assert not nx.is_matching(G, {(0, 1), (1, 2), (2, 3)}) + + def test_invalid_edge(self): + G = nx.path_graph(4) + assert not nx.is_matching(G, {(0, 3), (1, 2)}) + raises(nx.NetworkXError, nx.is_matching, G, {(0, 55)}) + + G = nx.DiGraph(G.edges) + assert nx.is_matching(G, {(0, 1)}) + assert not nx.is_matching(G, {(1, 0)}) + + +class TestIsMaximalMatching: + """Unit tests for the + :func:`~networkx.algorithms.matching.is_maximal_matching` function. + + """ + + def test_dict(self): + G = nx.path_graph(4) + assert nx.is_maximal_matching(G, {0: 1, 1: 0, 2: 3, 3: 2}) + + def test_invalid_input(self): + error = nx.NetworkXError + G = nx.path_graph(4) + # edge to node not in G + raises(error, nx.is_maximal_matching, G, {(0, 5)}) + raises(error, nx.is_maximal_matching, G, {(5, 0)}) + # edge not a 2-tuple + raises(error, nx.is_maximal_matching, G, {(0, 1, 2), (2, 3)}) + raises(error, nx.is_maximal_matching, G, {(0,), (2, 3)}) + + def test_valid(self): + G = nx.path_graph(4) + assert nx.is_maximal_matching(G, {(0, 1), (2, 3)}) + + def test_not_matching(self): + G = nx.path_graph(4) + assert not nx.is_maximal_matching(G, {(0, 1), (1, 2), (2, 3)}) + assert not nx.is_maximal_matching(G, {(0, 3)}) + G.add_edge(0, 0) + assert not nx.is_maximal_matching(G, {(0, 0)}) + + def test_not_maximal(self): + G = nx.path_graph(4) + assert not nx.is_maximal_matching(G, {(0, 1)}) + + +class TestIsPerfectMatching: + """Unit tests for the + :func:`~networkx.algorithms.matching.is_perfect_matching` function. + + """ + + def test_dict(self): + G = nx.path_graph(4) + assert nx.is_perfect_matching(G, {0: 1, 1: 0, 2: 3, 3: 2}) + + def test_valid(self): + G = nx.path_graph(4) + assert nx.is_perfect_matching(G, {(0, 1), (2, 3)}) + + def test_valid_not_path(self): + G = nx.cycle_graph(4) + G.add_edge(0, 4) + G.add_edge(1, 4) + G.add_edge(5, 2) + + assert nx.is_perfect_matching(G, {(1, 4), (0, 3), (5, 2)}) + + def test_invalid_input(self): + error = nx.NetworkXError + G = nx.path_graph(4) + # edge to node not in G + raises(error, nx.is_perfect_matching, G, {(0, 5)}) + raises(error, nx.is_perfect_matching, G, {(5, 0)}) + # edge not a 2-tuple + raises(error, nx.is_perfect_matching, G, {(0, 1, 2), (2, 3)}) + raises(error, nx.is_perfect_matching, G, {(0,), (2, 3)}) + + def test_selfloops(self): + error = nx.NetworkXError + G = nx.path_graph(4) + # selfloop for node not in G + raises(error, nx.is_perfect_matching, G, {(5, 5), (2, 3)}) + # selfloop edge not in G + assert not nx.is_perfect_matching(G, {(0, 0), (1, 2), (2, 3)}) + # selfloop edge in G + G.add_edge(0, 0) + assert not nx.is_perfect_matching(G, {(0, 0), (1, 2)}) + + def test_not_matching(self): + G = nx.path_graph(4) + assert not nx.is_perfect_matching(G, {(0, 3)}) + assert not nx.is_perfect_matching(G, {(0, 1), (1, 2), (2, 3)}) + + def test_maximal_but_not_perfect(self): + G = nx.cycle_graph(4) + G.add_edge(0, 4) + G.add_edge(1, 4) + + assert not nx.is_perfect_matching(G, {(1, 4), (0, 3)}) + + +class TestMaximalMatching: + """Unit tests for the + :func:`~networkx.algorithms.matching.maximal_matching`. + + """ + + def test_valid_matching(self): + edges = [(1, 2), (1, 5), (2, 3), (2, 5), (3, 4), (3, 6), (5, 6)] + G = nx.Graph(edges) + matching = nx.maximal_matching(G) + assert nx.is_maximal_matching(G, matching) + + def test_single_edge_matching(self): + # In the star graph, any maximal matching has just one edge. + G = nx.star_graph(5) + matching = nx.maximal_matching(G) + assert 1 == len(matching) + assert nx.is_maximal_matching(G, matching) + + def test_self_loops(self): + # Create the path graph with two self-loops. + G = nx.path_graph(3) + G.add_edges_from([(0, 0), (1, 1)]) + matching = nx.maximal_matching(G) + assert len(matching) == 1 + # The matching should never include self-loops. + assert not any(u == v for u, v in matching) + assert nx.is_maximal_matching(G, matching) + + def test_ordering(self): + """Tests that a maximal matching is computed correctly + regardless of the order in which nodes are added to the graph. + + """ + for nodes in permutations(range(3)): + G = nx.Graph() + G.add_nodes_from(nodes) + G.add_edges_from([(0, 1), (0, 2)]) + matching = nx.maximal_matching(G) + assert len(matching) == 1 + assert nx.is_maximal_matching(G, matching) + + def test_wrong_graph_type(self): + error = nx.NetworkXNotImplemented + raises(error, nx.maximal_matching, nx.MultiGraph()) + raises(error, nx.maximal_matching, nx.MultiDiGraph()) + raises(error, nx.maximal_matching, nx.DiGraph()) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_max_weight_clique.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_max_weight_clique.py new file mode 100644 index 0000000000000000000000000000000000000000..fc3900c58a80b08f01357bd4ad75a0a68c838047 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_max_weight_clique.py @@ -0,0 +1,181 @@ +"""Maximum weight clique test suite. + +""" + +import pytest + +import networkx as nx + + +class TestMaximumWeightClique: + def test_basic_cases(self): + def check_basic_case(graph_func, expected_weight, weight_accessor): + graph = graph_func() + clique, weight = nx.algorithms.max_weight_clique(graph, weight_accessor) + assert verify_clique( + graph, clique, weight, expected_weight, weight_accessor + ) + + for graph_func, (expected_weight, expected_size) in TEST_CASES.items(): + check_basic_case(graph_func, expected_weight, "weight") + check_basic_case(graph_func, expected_size, None) + + def test_key_error(self): + graph = two_node_graph() + with pytest.raises(KeyError): + nx.algorithms.max_weight_clique(graph, "nonexistent-key") + + def test_error_on_non_integer_weight(self): + graph = two_node_graph() + graph.nodes[2]["weight"] = 1.5 + with pytest.raises(ValueError): + nx.algorithms.max_weight_clique(graph) + + def test_unaffected_by_self_loops(self): + graph = two_node_graph() + graph.add_edge(1, 1) + graph.add_edge(2, 2) + clique, weight = nx.algorithms.max_weight_clique(graph, "weight") + assert verify_clique(graph, clique, weight, 30, "weight") + graph = three_node_independent_set() + graph.add_edge(1, 1) + clique, weight = nx.algorithms.max_weight_clique(graph, "weight") + assert verify_clique(graph, clique, weight, 20, "weight") + + def test_30_node_prob(self): + G = nx.Graph() + G.add_nodes_from(range(1, 31)) + for i in range(1, 31): + G.nodes[i]["weight"] = i + 1 + # fmt: off + G.add_edges_from( + [ + (1, 12), (1, 13), (1, 15), (1, 16), (1, 18), (1, 19), (1, 20), + (1, 23), (1, 26), (1, 28), (1, 29), (1, 30), (2, 3), (2, 4), + (2, 5), (2, 8), (2, 9), (2, 10), (2, 14), (2, 17), (2, 18), + (2, 21), (2, 22), (2, 23), (2, 27), (3, 9), (3, 15), (3, 21), + (3, 22), (3, 23), (3, 24), (3, 27), (3, 28), (3, 29), (4, 5), + (4, 6), (4, 8), (4, 21), (4, 22), (4, 23), (4, 26), (4, 28), + (4, 30), (5, 6), (5, 8), (5, 9), (5, 13), (5, 14), (5, 15), + (5, 16), (5, 20), (5, 21), (5, 22), (5, 25), (5, 28), (5, 29), + (6, 7), (6, 8), (6, 13), (6, 17), (6, 18), (6, 19), (6, 24), + (6, 26), (6, 27), (6, 28), (6, 29), (7, 12), (7, 14), (7, 15), + (7, 16), (7, 17), (7, 20), (7, 25), (7, 27), (7, 29), (7, 30), + (8, 10), (8, 15), (8, 16), (8, 18), (8, 20), (8, 22), (8, 24), + (8, 26), (8, 27), (8, 28), (8, 30), (9, 11), (9, 12), (9, 13), + (9, 14), (9, 15), (9, 16), (9, 19), (9, 20), (9, 21), (9, 24), + (9, 30), (10, 12), (10, 15), (10, 18), (10, 19), (10, 20), + (10, 22), (10, 23), (10, 24), (10, 26), (10, 27), (10, 29), + (10, 30), (11, 13), (11, 15), (11, 16), (11, 17), (11, 18), + (11, 19), (11, 20), (11, 22), (11, 29), (11, 30), (12, 14), + (12, 17), (12, 18), (12, 19), (12, 20), (12, 21), (12, 23), + (12, 25), (12, 26), (12, 30), (13, 20), (13, 22), (13, 23), + (13, 24), (13, 30), (14, 16), (14, 20), (14, 21), (14, 22), + (14, 23), (14, 25), (14, 26), (14, 27), (14, 29), (14, 30), + (15, 17), (15, 18), (15, 20), (15, 21), (15, 26), (15, 27), + (15, 28), (16, 17), (16, 18), (16, 19), (16, 20), (16, 21), + (16, 29), (16, 30), (17, 18), (17, 21), (17, 22), (17, 25), + (17, 27), (17, 28), (17, 30), (18, 19), (18, 20), (18, 21), + (18, 22), (18, 23), (18, 24), (19, 20), (19, 22), (19, 23), + (19, 24), (19, 25), (19, 27), (19, 30), (20, 21), (20, 23), + (20, 24), (20, 26), (20, 28), (20, 29), (21, 23), (21, 26), + (21, 27), (21, 29), (22, 24), (22, 25), (22, 26), (22, 29), + (23, 25), (23, 30), (24, 25), (24, 26), (25, 27), (25, 29), + (26, 27), (26, 28), (26, 30), (28, 29), (29, 30), + ] + ) + # fmt: on + clique, weight = nx.algorithms.max_weight_clique(G) + assert verify_clique(G, clique, weight, 111, "weight") + + +# ############################ Utility functions ############################ +def verify_clique( + graph, clique, reported_clique_weight, expected_clique_weight, weight_accessor +): + for node1 in clique: + for node2 in clique: + if node1 == node2: + continue + if not graph.has_edge(node1, node2): + return False + + if weight_accessor is None: + clique_weight = len(clique) + else: + clique_weight = sum(graph.nodes[v]["weight"] for v in clique) + + if clique_weight != expected_clique_weight: + return False + if clique_weight != reported_clique_weight: + return False + + return True + + +# ############################ Graph Generation ############################ + + +def empty_graph(): + return nx.Graph() + + +def one_node_graph(): + graph = nx.Graph() + graph.add_nodes_from([1]) + graph.nodes[1]["weight"] = 10 + return graph + + +def two_node_graph(): + graph = nx.Graph() + graph.add_nodes_from([1, 2]) + graph.add_edges_from([(1, 2)]) + graph.nodes[1]["weight"] = 10 + graph.nodes[2]["weight"] = 20 + return graph + + +def three_node_clique(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3]) + graph.add_edges_from([(1, 2), (1, 3), (2, 3)]) + graph.nodes[1]["weight"] = 10 + graph.nodes[2]["weight"] = 20 + graph.nodes[3]["weight"] = 5 + return graph + + +def three_node_independent_set(): + graph = nx.Graph() + graph.add_nodes_from([1, 2, 3]) + graph.nodes[1]["weight"] = 10 + graph.nodes[2]["weight"] = 20 + graph.nodes[3]["weight"] = 5 + return graph + + +def disconnected(): + graph = nx.Graph() + graph.add_edges_from([(1, 2), (2, 3), (4, 5), (5, 6)]) + graph.nodes[1]["weight"] = 10 + graph.nodes[2]["weight"] = 20 + graph.nodes[3]["weight"] = 5 + graph.nodes[4]["weight"] = 100 + graph.nodes[5]["weight"] = 200 + graph.nodes[6]["weight"] = 50 + return graph + + +# -------------------------------------------------------------------------- +# Basic tests for all strategies +# For each basic graph function, specify expected weight of max weight clique +# and expected size of maximum clique +TEST_CASES = { + empty_graph: (0, 0), + one_node_graph: (10, 1), + two_node_graph: (30, 2), + three_node_clique: (35, 3), + three_node_independent_set: (20, 1), + disconnected: (300, 2), +} diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_mis.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_mis.py new file mode 100644 index 0000000000000000000000000000000000000000..379c5c07c7a050ac2ab799355070eb5e674fc621 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_mis.py @@ -0,0 +1,62 @@ +""" +Tests for maximal (not maximum) independent sets. + +""" + +import random + +import pytest + +import networkx as nx + + +def test_random_seed(): + G = nx.empty_graph(5) + assert nx.maximal_independent_set(G, seed=1) == [1, 0, 3, 2, 4] + + +@pytest.mark.parametrize("graph", [nx.complete_graph(5), nx.complete_graph(55)]) +def test_K5(graph): + """Maximal independent set for complete graphs""" + assert all(nx.maximal_independent_set(graph, [n]) == [n] for n in graph) + + +def test_exceptions(): + """Bad input should raise exception.""" + G = nx.florentine_families_graph() + pytest.raises(nx.NetworkXUnfeasible, nx.maximal_independent_set, G, ["Smith"]) + pytest.raises( + nx.NetworkXUnfeasible, nx.maximal_independent_set, G, ["Salviati", "Pazzi"] + ) + # MaximalIndependentSet is not implemented for directed graphs + pytest.raises(nx.NetworkXNotImplemented, nx.maximal_independent_set, nx.DiGraph(G)) + + +def test_florentine_family(): + G = nx.florentine_families_graph() + indep = nx.maximal_independent_set(G, ["Medici", "Bischeri"]) + assert set(indep) == { + "Medici", + "Bischeri", + "Castellani", + "Pazzi", + "Ginori", + "Lamberteschi", + } + + +def test_bipartite(): + G = nx.complete_bipartite_graph(12, 34) + indep = nx.maximal_independent_set(G, [4, 5, 9, 10]) + assert sorted(indep) == list(range(12)) + + +def test_random_graphs(): + """Generate 5 random graphs of different types and sizes and + make sure that all sets are independent and maximal.""" + for i in range(0, 50, 10): + G = nx.erdos_renyi_graph(i * 10 + 1, random.random()) + IS = nx.maximal_independent_set(G) + assert G.subgraph(IS).number_of_edges() == 0 + neighbors_of_MIS = set.union(*(set(G.neighbors(v)) for v in IS)) + assert all(v in neighbors_of_MIS for v in set(G.nodes()).difference(IS)) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_moral.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_moral.py new file mode 100644 index 0000000000000000000000000000000000000000..fc98c9729a95897857013ae22333e3b8c17202fb --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_moral.py @@ -0,0 +1,15 @@ +import networkx as nx +from networkx.algorithms.moral import moral_graph + + +def test_get_moral_graph(): + graph = nx.DiGraph() + graph.add_nodes_from([1, 2, 3, 4, 5, 6, 7]) + graph.add_edges_from([(1, 2), (3, 2), (4, 1), (4, 5), (6, 5), (7, 5)]) + H = moral_graph(graph) + assert not H.is_directed() + assert H.has_edge(1, 3) + assert H.has_edge(4, 6) + assert H.has_edge(6, 7) + assert H.has_edge(4, 7) + assert not H.has_edge(1, 5) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_node_classification.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_node_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..2e1fc79d48ae830625c3528f52e805d2e0d183ad --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_node_classification.py @@ -0,0 +1,140 @@ +import pytest + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + +import networkx as nx +from networkx.algorithms import node_classification + + +class TestHarmonicFunction: + def test_path_graph(self): + G = nx.path_graph(4) + label_name = "label" + G.nodes[0][label_name] = "A" + G.nodes[3][label_name] = "B" + predicted = node_classification.harmonic_function(G, label_name=label_name) + assert predicted[0] == "A" + assert predicted[1] == "A" + assert predicted[2] == "B" + assert predicted[3] == "B" + + def test_no_labels(self): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + node_classification.harmonic_function(G) + + def test_no_nodes(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + node_classification.harmonic_function(G) + + def test_no_edges(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + node_classification.harmonic_function(G) + + def test_digraph(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.DiGraph() + G.add_edge(0, 1) + G.add_edge(1, 2) + G.add_edge(2, 3) + label_name = "label" + G.nodes[0][label_name] = "A" + G.nodes[3][label_name] = "B" + node_classification.harmonic_function(G) + + def test_one_labeled_node(self): + G = nx.path_graph(4) + label_name = "label" + G.nodes[0][label_name] = "A" + predicted = node_classification.harmonic_function(G, label_name=label_name) + assert predicted[0] == "A" + assert predicted[1] == "A" + assert predicted[2] == "A" + assert predicted[3] == "A" + + def test_nodes_all_labeled(self): + G = nx.karate_club_graph() + label_name = "club" + predicted = node_classification.harmonic_function(G, label_name=label_name) + for i in range(len(G)): + assert predicted[i] == G.nodes[i][label_name] + + def test_labeled_nodes_are_not_changed(self): + G = nx.karate_club_graph() + label_name = "club" + label_removed = {0, 1, 2, 3, 4, 5, 6, 7} + for i in label_removed: + del G.nodes[i][label_name] + predicted = node_classification.harmonic_function(G, label_name=label_name) + label_not_removed = set(range(len(G))) - label_removed + for i in label_not_removed: + assert predicted[i] == G.nodes[i][label_name] + + +class TestLocalAndGlobalConsistency: + def test_path_graph(self): + G = nx.path_graph(4) + label_name = "label" + G.nodes[0][label_name] = "A" + G.nodes[3][label_name] = "B" + predicted = node_classification.local_and_global_consistency( + G, label_name=label_name + ) + assert predicted[0] == "A" + assert predicted[1] == "A" + assert predicted[2] == "B" + assert predicted[3] == "B" + + def test_no_labels(self): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(4) + node_classification.local_and_global_consistency(G) + + def test_no_nodes(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + node_classification.local_and_global_consistency(G) + + def test_no_edges(self): + with pytest.raises(nx.NetworkXError): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + node_classification.local_and_global_consistency(G) + + def test_digraph(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.DiGraph() + G.add_edge(0, 1) + G.add_edge(1, 2) + G.add_edge(2, 3) + label_name = "label" + G.nodes[0][label_name] = "A" + G.nodes[3][label_name] = "B" + node_classification.harmonic_function(G) + + def test_one_labeled_node(self): + G = nx.path_graph(4) + label_name = "label" + G.nodes[0][label_name] = "A" + predicted = node_classification.local_and_global_consistency( + G, label_name=label_name + ) + assert predicted[0] == "A" + assert predicted[1] == "A" + assert predicted[2] == "A" + assert predicted[3] == "A" + + def test_nodes_all_labeled(self): + G = nx.karate_club_graph() + label_name = "club" + predicted = node_classification.local_and_global_consistency( + G, alpha=0, label_name=label_name + ) + for i in range(len(G)): + assert predicted[i] == G.nodes[i][label_name] diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_non_randomness.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_non_randomness.py new file mode 100644 index 0000000000000000000000000000000000000000..1f6de597e7cde7942bf8480253f737d7701b58f6 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_non_randomness.py @@ -0,0 +1,37 @@ +import pytest + +import networkx as nx + +np = pytest.importorskip("numpy") + + +@pytest.mark.parametrize( + "k, weight, expected", + [ + (None, None, 7.21), # infers 3 communities + (2, None, 11.7), + (None, "weight", 25.45), + (2, "weight", 38.8), + ], +) +def test_non_randomness(k, weight, expected): + G = nx.karate_club_graph() + np.testing.assert_almost_equal( + nx.non_randomness(G, k, weight)[0], expected, decimal=2 + ) + + +def test_non_connected(): + G = nx.Graph() + G.add_edge(1, 2) + G.add_node(3) + with pytest.raises(nx.NetworkXException): + nx.non_randomness(G) + + +def test_self_loops(): + G = nx.Graph() + G.add_edge(1, 2) + G.add_edge(1, 1) + with pytest.raises(nx.NetworkXError): + nx.non_randomness(G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_planar_drawing.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_planar_drawing.py new file mode 100644 index 0000000000000000000000000000000000000000..c1c45ece998cd15831d904ccefb393a3dde8aeab --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_planar_drawing.py @@ -0,0 +1,274 @@ +import math + +import pytest + +import networkx as nx +from networkx.algorithms.planar_drawing import triangulate_embedding + + +def test_graph1(): + embedding_data = {0: [1, 2, 3], 1: [2, 0], 2: [3, 0, 1], 3: [2, 0]} + check_embedding_data(embedding_data) + + +def test_graph2(): + embedding_data = { + 0: [8, 6], + 1: [2, 6, 9], + 2: [8, 1, 7, 9, 6, 4], + 3: [9], + 4: [2], + 5: [6, 8], + 6: [9, 1, 0, 5, 2], + 7: [9, 2], + 8: [0, 2, 5], + 9: [1, 6, 2, 7, 3], + } + check_embedding_data(embedding_data) + + +def test_circle_graph(): + embedding_data = { + 0: [1, 9], + 1: [0, 2], + 2: [1, 3], + 3: [2, 4], + 4: [3, 5], + 5: [4, 6], + 6: [5, 7], + 7: [6, 8], + 8: [7, 9], + 9: [8, 0], + } + check_embedding_data(embedding_data) + + +def test_grid_graph(): + embedding_data = { + (0, 1): [(0, 0), (1, 1), (0, 2)], + (1, 2): [(1, 1), (2, 2), (0, 2)], + (0, 0): [(0, 1), (1, 0)], + (2, 1): [(2, 0), (2, 2), (1, 1)], + (1, 1): [(2, 1), (1, 2), (0, 1), (1, 0)], + (2, 0): [(1, 0), (2, 1)], + (2, 2): [(1, 2), (2, 1)], + (1, 0): [(0, 0), (2, 0), (1, 1)], + (0, 2): [(1, 2), (0, 1)], + } + check_embedding_data(embedding_data) + + +def test_one_node_graph(): + embedding_data = {0: []} + check_embedding_data(embedding_data) + + +def test_two_node_graph(): + embedding_data = {0: [1], 1: [0]} + check_embedding_data(embedding_data) + + +def test_three_node_graph(): + embedding_data = {0: [1, 2], 1: [0, 2], 2: [0, 1]} + check_embedding_data(embedding_data) + + +def test_multiple_component_graph1(): + embedding_data = {0: [], 1: []} + check_embedding_data(embedding_data) + + +def test_multiple_component_graph2(): + embedding_data = {0: [1, 2], 1: [0, 2], 2: [0, 1], 3: [4, 5], 4: [3, 5], 5: [3, 4]} + check_embedding_data(embedding_data) + + +def test_invalid_half_edge(): + with pytest.raises(nx.NetworkXException): + embedding_data = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2, 4], 4: [1, 2, 3]} + embedding = nx.PlanarEmbedding() + embedding.set_data(embedding_data) + nx.combinatorial_embedding_to_pos(embedding) + + +def test_triangulate_embedding1(): + embedding = nx.PlanarEmbedding() + embedding.add_node(1) + expected_embedding = {1: []} + check_triangulation(embedding, expected_embedding) + + +def test_triangulate_embedding2(): + embedding = nx.PlanarEmbedding() + embedding.connect_components(1, 2) + expected_embedding = {1: [2], 2: [1]} + check_triangulation(embedding, expected_embedding) + + +def check_triangulation(embedding, expected_embedding): + res_embedding, _ = triangulate_embedding(embedding, True) + assert ( + res_embedding.get_data() == expected_embedding + ), "Expected embedding incorrect" + res_embedding, _ = triangulate_embedding(embedding, False) + assert ( + res_embedding.get_data() == expected_embedding + ), "Expected embedding incorrect" + + +def check_embedding_data(embedding_data): + """Checks that the planar embedding of the input is correct""" + embedding = nx.PlanarEmbedding() + embedding.set_data(embedding_data) + pos_fully = nx.combinatorial_embedding_to_pos(embedding, False) + msg = "Planar drawing does not conform to the embedding (fully " "triangulation)" + assert planar_drawing_conforms_to_embedding(embedding, pos_fully), msg + check_edge_intersections(embedding, pos_fully) + pos_internally = nx.combinatorial_embedding_to_pos(embedding, True) + msg = "Planar drawing does not conform to the embedding (internal " "triangulation)" + assert planar_drawing_conforms_to_embedding(embedding, pos_internally), msg + check_edge_intersections(embedding, pos_internally) + + +def is_close(a, b, rel_tol=1e-09, abs_tol=0.0): + # Check if float numbers are basically equal, for python >=3.5 there is + # function for that in the standard library + return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) + + +def point_in_between(a, b, p): + # checks if p is on the line between a and b + x1, y1 = a + x2, y2 = b + px, py = p + dist_1_2 = math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2) + dist_1_p = math.sqrt((x1 - px) ** 2 + (y1 - py) ** 2) + dist_2_p = math.sqrt((x2 - px) ** 2 + (y2 - py) ** 2) + return is_close(dist_1_p + dist_2_p, dist_1_2) + + +def check_edge_intersections(G, pos): + """Check all edges in G for intersections. + + Raises an exception if an intersection is found. + + Parameters + ---------- + G : NetworkX graph + pos : dict + Maps every node to a tuple (x, y) representing its position + + """ + for a, b in G.edges(): + for c, d in G.edges(): + # Check if end points are different + if a != c and b != d and b != c and a != d: + x1, y1 = pos[a] + x2, y2 = pos[b] + x3, y3 = pos[c] + x4, y4 = pos[d] + determinant = (x1 - x2) * (y3 - y4) - (y1 - y2) * (x3 - x4) + if determinant != 0: # the lines are not parallel + # calculate intersection point, see: + # https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection + px = (x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * ( + x3 * y4 - y3 * x4 + ) / determinant + py = (x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * ( + x3 * y4 - y3 * x4 + ) / determinant + + # Check if intersection lies between the points + if point_in_between(pos[a], pos[b], (px, py)) and point_in_between( + pos[c], pos[d], (px, py) + ): + msg = f"There is an intersection at {px},{py}" + raise nx.NetworkXException(msg) + + # Check overlap + msg = "A node lies on a edge connecting two other nodes" + if ( + point_in_between(pos[a], pos[b], pos[c]) + or point_in_between(pos[a], pos[b], pos[d]) + or point_in_between(pos[c], pos[d], pos[a]) + or point_in_between(pos[c], pos[d], pos[b]) + ): + raise nx.NetworkXException(msg) + # No edge intersection found + + +class Vector: + """Compare vectors by their angle without loss of precision + + All vectors in direction [0, 1] are the smallest. + The vectors grow in clockwise direction. + """ + + __slots__ = ["x", "y", "node", "quadrant"] + + def __init__(self, x, y, node): + self.x = x + self.y = y + self.node = node + if self.x >= 0 and self.y > 0: + self.quadrant = 1 + elif self.x > 0 and self.y <= 0: + self.quadrant = 2 + elif self.x <= 0 and self.y < 0: + self.quadrant = 3 + else: + self.quadrant = 4 + + def __eq__(self, other): + return self.quadrant == other.quadrant and self.x * other.y == self.y * other.x + + def __lt__(self, other): + if self.quadrant < other.quadrant: + return True + elif self.quadrant > other.quadrant: + return False + else: + return self.x * other.y < self.y * other.x + + def __ne__(self, other): + return self != other + + def __le__(self, other): + return not other < self + + def __gt__(self, other): + return other < self + + def __ge__(self, other): + return not self < other + + +def planar_drawing_conforms_to_embedding(embedding, pos): + """Checks if pos conforms to the planar embedding + + Returns true iff the neighbors are actually oriented in the orientation + specified of the embedding + """ + for v in embedding: + nbr_vectors = [] + v_pos = pos[v] + for nbr in embedding[v]: + new_vector = Vector(pos[nbr][0] - v_pos[0], pos[nbr][1] - v_pos[1], nbr) + nbr_vectors.append(new_vector) + # Sort neighbors according to their phi angle + nbr_vectors.sort() + for idx, nbr_vector in enumerate(nbr_vectors): + cw_vector = nbr_vectors[(idx + 1) % len(nbr_vectors)] + ccw_vector = nbr_vectors[idx - 1] + if ( + embedding[v][nbr_vector.node]["cw"] != cw_vector.node + or embedding[v][nbr_vector.node]["ccw"] != ccw_vector.node + ): + return False + if cw_vector.node != nbr_vector.node and cw_vector == nbr_vector: + # Lines overlap + return False + if ccw_vector.node != nbr_vector.node and ccw_vector == nbr_vector: + # Lines overlap + return False + return True diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_planarity.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_planarity.py new file mode 100644 index 0000000000000000000000000000000000000000..470b1d23bb806e9e1297b8cb52f5c03a569a47ff --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_planarity.py @@ -0,0 +1,442 @@ +import pytest + +import networkx as nx +from networkx.algorithms.planarity import ( + check_planarity_recursive, + get_counterexample, + get_counterexample_recursive, +) + + +class TestLRPlanarity: + """Nose Unit tests for the :mod:`networkx.algorithms.planarity` module. + + Tests three things: + 1. Check that the result is correct + (returns planar if and only if the graph is actually planar) + 2. In case a counter example is returned: Check if it is correct + 3. In case an embedding is returned: Check if its actually an embedding + """ + + @staticmethod + def check_graph(G, is_planar=None): + """Raises an exception if the lr_planarity check returns a wrong result + + Parameters + ---------- + G : NetworkX graph + is_planar : bool + The expected result of the planarity check. + If set to None only counter example or embedding are verified. + + """ + + # obtain results of planarity check + is_planar_lr, result = nx.check_planarity(G, True) + is_planar_lr_rec, result_rec = check_planarity_recursive(G, True) + + if is_planar is not None: + # set a message for the assert + if is_planar: + msg = "Wrong planarity check result. Should be planar." + else: + msg = "Wrong planarity check result. Should be non-planar." + + # check if the result is as expected + assert is_planar == is_planar_lr, msg + assert is_planar == is_planar_lr_rec, msg + + if is_planar_lr: + # check embedding + check_embedding(G, result) + check_embedding(G, result_rec) + else: + # check counter example + check_counterexample(G, result) + check_counterexample(G, result_rec) + + def test_simple_planar_graph(self): + e = [ + (1, 2), + (2, 3), + (3, 4), + (4, 6), + (6, 7), + (7, 1), + (1, 5), + (5, 2), + (2, 4), + (4, 5), + (5, 7), + ] + self.check_graph(nx.Graph(e), is_planar=True) + + def test_planar_with_selfloop(self): + e = [ + (1, 1), + (2, 2), + (3, 3), + (4, 4), + (5, 5), + (1, 2), + (1, 3), + (1, 5), + (2, 5), + (2, 4), + (3, 4), + (3, 5), + (4, 5), + ] + self.check_graph(nx.Graph(e), is_planar=True) + + def test_k3_3(self): + self.check_graph(nx.complete_bipartite_graph(3, 3), is_planar=False) + + def test_k5(self): + self.check_graph(nx.complete_graph(5), is_planar=False) + + def test_multiple_components_planar(self): + e = [(1, 2), (2, 3), (3, 1), (4, 5), (5, 6), (6, 4)] + self.check_graph(nx.Graph(e), is_planar=True) + + def test_multiple_components_non_planar(self): + G = nx.complete_graph(5) + # add another planar component to the non planar component + # G stays non planar + G.add_edges_from([(6, 7), (7, 8), (8, 6)]) + self.check_graph(G, is_planar=False) + + def test_non_planar_with_selfloop(self): + G = nx.complete_graph(5) + # add self loops + for i in range(5): + G.add_edge(i, i) + self.check_graph(G, is_planar=False) + + def test_non_planar1(self): + # tests a graph that has no subgraph directly isomorph to K5 or K3_3 + e = [ + (1, 5), + (1, 6), + (1, 7), + (2, 6), + (2, 3), + (3, 5), + (3, 7), + (4, 5), + (4, 6), + (4, 7), + ] + self.check_graph(nx.Graph(e), is_planar=False) + + def test_loop(self): + # test a graph with a selfloop + e = [(1, 2), (2, 2)] + G = nx.Graph(e) + self.check_graph(G, is_planar=True) + + def test_comp(self): + # test multiple component graph + e = [(1, 2), (3, 4)] + G = nx.Graph(e) + G.remove_edge(1, 2) + self.check_graph(G, is_planar=True) + + def test_goldner_harary(self): + # test goldner-harary graph (a maximal planar graph) + e = [ + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (1, 7), + (1, 8), + (1, 10), + (1, 11), + (2, 3), + (2, 4), + (2, 6), + (2, 7), + (2, 9), + (2, 10), + (2, 11), + (3, 4), + (4, 5), + (4, 6), + (4, 7), + (5, 7), + (6, 7), + (7, 8), + (7, 9), + (7, 10), + (8, 10), + (9, 10), + (10, 11), + ] + G = nx.Graph(e) + self.check_graph(G, is_planar=True) + + def test_planar_multigraph(self): + G = nx.MultiGraph([(1, 2), (1, 2), (1, 2), (1, 2), (2, 3), (3, 1)]) + self.check_graph(G, is_planar=True) + + def test_non_planar_multigraph(self): + G = nx.MultiGraph(nx.complete_graph(5)) + G.add_edges_from([(1, 2)] * 5) + self.check_graph(G, is_planar=False) + + def test_planar_digraph(self): + G = nx.DiGraph([(1, 2), (2, 3), (2, 4), (4, 1), (4, 2), (1, 4), (3, 2)]) + self.check_graph(G, is_planar=True) + + def test_non_planar_digraph(self): + G = nx.DiGraph(nx.complete_graph(5)) + G.remove_edge(1, 2) + G.remove_edge(4, 1) + self.check_graph(G, is_planar=False) + + def test_single_component(self): + # Test a graph with only a single node + G = nx.Graph() + G.add_node(1) + self.check_graph(G, is_planar=True) + + def test_graph1(self): + G = nx.Graph( + [ + (3, 10), + (2, 13), + (1, 13), + (7, 11), + (0, 8), + (8, 13), + (0, 2), + (0, 7), + (0, 10), + (1, 7), + ] + ) + self.check_graph(G, is_planar=True) + + def test_graph2(self): + G = nx.Graph( + [ + (1, 2), + (4, 13), + (0, 13), + (4, 5), + (7, 10), + (1, 7), + (0, 3), + (2, 6), + (5, 6), + (7, 13), + (4, 8), + (0, 8), + (0, 9), + (2, 13), + (6, 7), + (3, 6), + (2, 8), + ] + ) + self.check_graph(G, is_planar=False) + + def test_graph3(self): + G = nx.Graph( + [ + (0, 7), + (3, 11), + (3, 4), + (8, 9), + (4, 11), + (1, 7), + (1, 13), + (1, 11), + (3, 5), + (5, 7), + (1, 3), + (0, 4), + (5, 11), + (5, 13), + ] + ) + self.check_graph(G, is_planar=False) + + def test_counterexample_planar(self): + with pytest.raises(nx.NetworkXException): + # Try to get a counterexample of a planar graph + G = nx.Graph() + G.add_node(1) + get_counterexample(G) + + def test_counterexample_planar_recursive(self): + with pytest.raises(nx.NetworkXException): + # Try to get a counterexample of a planar graph + G = nx.Graph() + G.add_node(1) + get_counterexample_recursive(G) + + +def check_embedding(G, embedding): + """Raises an exception if the combinatorial embedding is not correct + + Parameters + ---------- + G : NetworkX graph + embedding : a dict mapping nodes to a list of edges + This specifies the ordering of the outgoing edges from a node for + a combinatorial embedding + + Notes + ----- + Checks the following things: + - The type of the embedding is correct + - The nodes and edges match the original graph + - Every half edge has its matching opposite half edge + - No intersections of edges (checked by Euler's formula) + """ + + if not isinstance(embedding, nx.PlanarEmbedding): + raise nx.NetworkXException("Bad embedding. Not of type nx.PlanarEmbedding") + + # Check structure + embedding.check_structure() + + # Check that graphs are equivalent + + assert set(G.nodes) == set( + embedding.nodes + ), "Bad embedding. Nodes don't match the original graph." + + # Check that the edges are equal + g_edges = set() + for edge in G.edges: + if edge[0] != edge[1]: + g_edges.add((edge[0], edge[1])) + g_edges.add((edge[1], edge[0])) + assert g_edges == set( + embedding.edges + ), "Bad embedding. Edges don't match the original graph." + + +def check_counterexample(G, sub_graph): + """Raises an exception if the counterexample is wrong. + + Parameters + ---------- + G : NetworkX graph + subdivision_nodes : set + A set of nodes inducing a subgraph as a counterexample + """ + # 1. Create the sub graph + sub_graph = nx.Graph(sub_graph) + + # 2. Remove self loops + for u in sub_graph: + if sub_graph.has_edge(u, u): + sub_graph.remove_edge(u, u) + + # keep track of nodes we might need to contract + contract = list(sub_graph) + + # 3. Contract Edges + while len(contract) > 0: + contract_node = contract.pop() + if contract_node not in sub_graph: + # Node was already contracted + continue + degree = sub_graph.degree[contract_node] + # Check if we can remove the node + if degree == 2: + # Get the two neighbors + neighbors = iter(sub_graph[contract_node]) + u = next(neighbors) + v = next(neighbors) + # Save nodes for later + contract.append(u) + contract.append(v) + # Contract edge + sub_graph.remove_node(contract_node) + sub_graph.add_edge(u, v) + + # 4. Check for isomorphism with K5 or K3_3 graphs + if len(sub_graph) == 5: + if not nx.is_isomorphic(nx.complete_graph(5), sub_graph): + raise nx.NetworkXException("Bad counter example.") + elif len(sub_graph) == 6: + if not nx.is_isomorphic(nx.complete_bipartite_graph(3, 3), sub_graph): + raise nx.NetworkXException("Bad counter example.") + else: + raise nx.NetworkXException("Bad counter example.") + + +class TestPlanarEmbeddingClass: + def test_get_data(self): + embedding = self.get_star_embedding(3) + data = embedding.get_data() + data_cmp = {0: [2, 1], 1: [0], 2: [0]} + assert data == data_cmp + + def test_missing_edge_orientation(self): + with pytest.raises(nx.NetworkXException): + embedding = nx.PlanarEmbedding() + embedding.add_edge(1, 2) + embedding.add_edge(2, 1) + # Invalid structure because the orientation of the edge was not set + embedding.check_structure() + + def test_invalid_edge_orientation(self): + with pytest.raises(nx.NetworkXException): + embedding = nx.PlanarEmbedding() + embedding.add_half_edge_first(1, 2) + embedding.add_half_edge_first(2, 1) + embedding.add_edge(1, 3) + embedding.check_structure() + + def test_missing_half_edge(self): + with pytest.raises(nx.NetworkXException): + embedding = nx.PlanarEmbedding() + embedding.add_half_edge_first(1, 2) + # Invalid structure because other half edge is missing + embedding.check_structure() + + def test_not_fulfilling_euler_formula(self): + with pytest.raises(nx.NetworkXException): + embedding = nx.PlanarEmbedding() + for i in range(5): + for j in range(5): + if i != j: + embedding.add_half_edge_first(i, j) + embedding.check_structure() + + def test_missing_reference(self): + with pytest.raises(nx.NetworkXException): + embedding = nx.PlanarEmbedding() + embedding.add_half_edge_cw(1, 2, 3) + + def test_connect_components(self): + embedding = nx.PlanarEmbedding() + embedding.connect_components(1, 2) + + def test_successful_face_traversal(self): + embedding = nx.PlanarEmbedding() + embedding.add_half_edge_first(1, 2) + embedding.add_half_edge_first(2, 1) + face = embedding.traverse_face(1, 2) + assert face == [1, 2] + + def test_unsuccessful_face_traversal(self): + with pytest.raises(nx.NetworkXException): + embedding = nx.PlanarEmbedding() + embedding.add_edge(1, 2, ccw=2, cw=3) + embedding.add_edge(2, 1, ccw=1, cw=3) + embedding.traverse_face(1, 2) + + @staticmethod + def get_star_embedding(n): + embedding = nx.PlanarEmbedding() + for i in range(1, n): + embedding.add_half_edge_first(0, i) + embedding.add_half_edge_first(i, 0) + return embedding diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_polynomials.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_polynomials.py new file mode 100644 index 0000000000000000000000000000000000000000..a81d6a69551ead74d3335fda408111a0b580bf6a --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_polynomials.py @@ -0,0 +1,57 @@ +"""Unit tests for the :mod:`networkx.algorithms.polynomials` module.""" + +import pytest + +import networkx as nx + +sympy = pytest.importorskip("sympy") + + +# Mapping of input graphs to a string representation of their tutte polynomials +_test_tutte_graphs = { + nx.complete_graph(1): "1", + nx.complete_graph(4): "x**3 + 3*x**2 + 4*x*y + 2*x + y**3 + 3*y**2 + 2*y", + nx.cycle_graph(5): "x**4 + x**3 + x**2 + x + y", + nx.diamond_graph(): "x**3 + 2*x**2 + 2*x*y + x + y**2 + y", +} + +_test_chromatic_graphs = { + nx.complete_graph(1): "x", + nx.complete_graph(4): "x**4 - 6*x**3 + 11*x**2 - 6*x", + nx.cycle_graph(5): "x**5 - 5*x**4 + 10*x**3 - 10*x**2 + 4*x", + nx.diamond_graph(): "x**4 - 5*x**3 + 8*x**2 - 4*x", + nx.path_graph(5): "x**5 - 4*x**4 + 6*x**3 - 4*x**2 + x", +} + + +@pytest.mark.parametrize(("G", "expected"), _test_tutte_graphs.items()) +def test_tutte_polynomial(G, expected): + assert nx.tutte_polynomial(G).equals(expected) + + +@pytest.mark.parametrize("G", _test_tutte_graphs.keys()) +def test_tutte_polynomial_disjoint(G): + """Tutte polynomial factors into the Tutte polynomials of its components. + Verify this property with the disjoint union of two copies of the input graph. + """ + t_g = nx.tutte_polynomial(G) + H = nx.disjoint_union(G, G) + t_h = nx.tutte_polynomial(H) + assert sympy.simplify(t_g * t_g).equals(t_h) + + +@pytest.mark.parametrize(("G", "expected"), _test_chromatic_graphs.items()) +def test_chromatic_polynomial(G, expected): + assert nx.chromatic_polynomial(G).equals(expected) + + +@pytest.mark.parametrize("G", _test_chromatic_graphs.keys()) +def test_chromatic_polynomial_disjoint(G): + """Chromatic polynomial factors into the Chromatic polynomials of its + components. Verify this property with the disjoint union of two copies of + the input graph. + """ + x_g = nx.chromatic_polynomial(G) + H = nx.disjoint_union(G, G) + x_h = nx.chromatic_polynomial(H) + assert sympy.simplify(x_g * x_g).equals(x_h) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_reciprocity.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_reciprocity.py new file mode 100644 index 0000000000000000000000000000000000000000..e713bc4303f9bfea1199f01d8369c6bdab1a221f --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_reciprocity.py @@ -0,0 +1,37 @@ +import pytest + +import networkx as nx + + +class TestReciprocity: + # test overall reciprocity by passing whole graph + def test_reciprocity_digraph(self): + DG = nx.DiGraph([(1, 2), (2, 1)]) + reciprocity = nx.reciprocity(DG) + assert reciprocity == 1.0 + + # test empty graph's overall reciprocity which will throw an error + def test_overall_reciprocity_empty_graph(self): + with pytest.raises(nx.NetworkXError): + DG = nx.DiGraph() + nx.overall_reciprocity(DG) + + # test for reciprocity for a list of nodes + def test_reciprocity_graph_nodes(self): + DG = nx.DiGraph([(1, 2), (2, 3), (3, 2)]) + reciprocity = nx.reciprocity(DG, [1, 2]) + expected_reciprocity = {1: 0.0, 2: 0.6666666666666666} + assert reciprocity == expected_reciprocity + + # test for reciprocity for a single node + def test_reciprocity_graph_node(self): + DG = nx.DiGraph([(1, 2), (2, 3), (3, 2)]) + reciprocity = nx.reciprocity(DG, 2) + assert reciprocity == 0.6666666666666666 + + # test for reciprocity for an isolated node + def test_reciprocity_graph_isolated_nodes(self): + with pytest.raises(nx.NetworkXError): + DG = nx.DiGraph([(1, 2)]) + DG.add_node(4) + nx.reciprocity(DG, 4) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_regular.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_regular.py new file mode 100644 index 0000000000000000000000000000000000000000..0c8e4e465824310f0b950c1db972336f0bb8c9ca --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_regular.py @@ -0,0 +1,86 @@ +import pytest + +import networkx +import networkx as nx +import networkx.algorithms.regular as reg +import networkx.generators as gen + + +class TestKFactor: + def test_k_factor_trivial(self): + g = gen.cycle_graph(4) + f = reg.k_factor(g, 2) + assert g.edges == f.edges + + def test_k_factor1(self): + g = gen.grid_2d_graph(4, 4) + g_kf = reg.k_factor(g, 2) + for edge in g_kf.edges(): + assert g.has_edge(edge[0], edge[1]) + for _, degree in g_kf.degree(): + assert degree == 2 + + def test_k_factor2(self): + g = gen.complete_graph(6) + g_kf = reg.k_factor(g, 3) + for edge in g_kf.edges(): + assert g.has_edge(edge[0], edge[1]) + for _, degree in g_kf.degree(): + assert degree == 3 + + def test_k_factor3(self): + g = gen.grid_2d_graph(4, 4) + with pytest.raises(nx.NetworkXUnfeasible): + reg.k_factor(g, 3) + + def test_k_factor4(self): + g = gen.lattice.hexagonal_lattice_graph(4, 4) + # Perfect matching doesn't exist for 4,4 hexagonal lattice graph + with pytest.raises(nx.NetworkXUnfeasible): + reg.k_factor(g, 2) + + def test_k_factor5(self): + g = gen.complete_graph(6) + # small k to exercise SmallKGadget + g_kf = reg.k_factor(g, 2) + for edge in g_kf.edges(): + assert g.has_edge(edge[0], edge[1]) + for _, degree in g_kf.degree(): + assert degree == 2 + + +class TestIsRegular: + def test_is_regular1(self): + g = gen.cycle_graph(4) + assert reg.is_regular(g) + + def test_is_regular2(self): + g = gen.complete_graph(5) + assert reg.is_regular(g) + + def test_is_regular3(self): + g = gen.lollipop_graph(5, 5) + assert not reg.is_regular(g) + + def test_is_regular4(self): + g = nx.DiGraph() + g.add_edges_from([(0, 1), (1, 2), (2, 0)]) + assert reg.is_regular(g) + + +class TestIsKRegular: + def test_is_k_regular1(self): + g = gen.cycle_graph(4) + assert reg.is_k_regular(g, 2) + assert not reg.is_k_regular(g, 3) + + def test_is_k_regular2(self): + g = gen.complete_graph(5) + assert reg.is_k_regular(g, 4) + assert not reg.is_k_regular(g, 3) + assert not reg.is_k_regular(g, 6) + + def test_is_k_regular3(self): + g = gen.lollipop_graph(5, 5) + assert not reg.is_k_regular(g, 5) + assert not reg.is_k_regular(g, 6) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_richclub.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_richclub.py new file mode 100644 index 0000000000000000000000000000000000000000..5638ddbf007a6075c3b87a1118ced18a4aa2b7a6 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_richclub.py @@ -0,0 +1,97 @@ +import pytest + +import networkx as nx + + +def test_richclub(): + G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)]) + rc = nx.richclub.rich_club_coefficient(G, normalized=False) + assert rc == {0: 12.0 / 30, 1: 8.0 / 12} + + # test single value + rc0 = nx.richclub.rich_club_coefficient(G, normalized=False)[0] + assert rc0 == 12.0 / 30.0 + + +def test_richclub_seed(): + G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)]) + rcNorm = nx.richclub.rich_club_coefficient(G, Q=2, seed=1) + assert rcNorm == {0: 1.0, 1: 1.0} + + +def test_richclub_normalized(): + G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)]) + rcNorm = nx.richclub.rich_club_coefficient(G, Q=2) + assert rcNorm == {0: 1.0, 1: 1.0} + + +def test_richclub2(): + T = nx.balanced_tree(2, 10) + rc = nx.richclub.rich_club_coefficient(T, normalized=False) + assert rc == { + 0: 4092 / (2047 * 2046.0), + 1: (2044.0 / (1023 * 1022)), + 2: (2040.0 / (1022 * 1021)), + } + + +def test_richclub3(): + # tests edgecase + G = nx.karate_club_graph() + rc = nx.rich_club_coefficient(G, normalized=False) + assert rc == { + 0: 156.0 / 1122, + 1: 154.0 / 1056, + 2: 110.0 / 462, + 3: 78.0 / 240, + 4: 44.0 / 90, + 5: 22.0 / 42, + 6: 10.0 / 20, + 7: 10.0 / 20, + 8: 10.0 / 20, + 9: 6.0 / 12, + 10: 2.0 / 6, + 11: 2.0 / 6, + 12: 0.0, + 13: 0.0, + 14: 0.0, + 15: 0.0, + } + + +def test_richclub4(): + G = nx.Graph() + G.add_edges_from( + [(0, 1), (0, 2), (0, 3), (0, 4), (4, 5), (5, 9), (6, 9), (7, 9), (8, 9)] + ) + rc = nx.rich_club_coefficient(G, normalized=False) + assert rc == {0: 18 / 90.0, 1: 6 / 12.0, 2: 0.0, 3: 0.0} + + +def test_richclub_exception(): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.DiGraph() + nx.rich_club_coefficient(G) + + +def test_rich_club_exception2(): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.MultiGraph() + nx.rich_club_coefficient(G) + + +def test_rich_club_selfloop(): + G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + G.add_edge(1, 1) # self loop + G.add_edge(1, 2) + with pytest.raises( + Exception, + match="rich_club_coefficient is not implemented for " "graphs with self loops.", + ): + nx.rich_club_coefficient(G) + + +# def test_richclub2_normalized(): +# T = nx.balanced_tree(2,10) +# rcNorm = nx.richclub.rich_club_coefficient(T,Q=2) +# assert_true(rcNorm[0] ==1.0 and rcNorm[1] < 0.9 and rcNorm[2] < 0.9) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_similarity.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_similarity.py new file mode 100644 index 0000000000000000000000000000000000000000..b7fcf90fa4303189fc31f095fd0595453578dbca --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_similarity.py @@ -0,0 +1,923 @@ +import pytest + +import networkx as nx +from networkx.algorithms.similarity import ( + graph_edit_distance, + optimal_edit_paths, + optimize_graph_edit_distance, +) +from networkx.generators.classic import ( + circular_ladder_graph, + cycle_graph, + path_graph, + wheel_graph, +) + + +def nmatch(n1, n2): + return n1 == n2 + + +def ematch(e1, e2): + return e1 == e2 + + +def getCanonical(): + G = nx.Graph() + G.add_node("A", label="A") + G.add_node("B", label="B") + G.add_node("C", label="C") + G.add_node("D", label="D") + G.add_edge("A", "B", label="a-b") + G.add_edge("B", "C", label="b-c") + G.add_edge("B", "D", label="b-d") + return G + + +class TestSimilarity: + @classmethod + def setup_class(cls): + global np + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + + def test_graph_edit_distance_roots_and_timeout(self): + G0 = nx.star_graph(5) + G1 = G0.copy() + pytest.raises(ValueError, graph_edit_distance, G0, G1, roots=[2]) + pytest.raises(ValueError, graph_edit_distance, G0, G1, roots=[2, 3, 4]) + pytest.raises(nx.NodeNotFound, graph_edit_distance, G0, G1, roots=(9, 3)) + pytest.raises(nx.NodeNotFound, graph_edit_distance, G0, G1, roots=(3, 9)) + pytest.raises(nx.NodeNotFound, graph_edit_distance, G0, G1, roots=(9, 9)) + assert graph_edit_distance(G0, G1, roots=(1, 2)) == 0 + assert graph_edit_distance(G0, G1, roots=(0, 1)) == 8 + assert graph_edit_distance(G0, G1, roots=(1, 2), timeout=5) == 0 + assert graph_edit_distance(G0, G1, roots=(0, 1), timeout=5) == 8 + assert graph_edit_distance(G0, G1, roots=(0, 1), timeout=0.0001) is None + # test raise on 0 timeout + pytest.raises(nx.NetworkXError, graph_edit_distance, G0, G1, timeout=0) + + def test_graph_edit_distance(self): + G0 = nx.Graph() + G1 = path_graph(6) + G2 = cycle_graph(6) + G3 = wheel_graph(7) + + assert graph_edit_distance(G0, G0) == 0 + assert graph_edit_distance(G0, G1) == 11 + assert graph_edit_distance(G1, G0) == 11 + assert graph_edit_distance(G0, G2) == 12 + assert graph_edit_distance(G2, G0) == 12 + assert graph_edit_distance(G0, G3) == 19 + assert graph_edit_distance(G3, G0) == 19 + + assert graph_edit_distance(G1, G1) == 0 + assert graph_edit_distance(G1, G2) == 1 + assert graph_edit_distance(G2, G1) == 1 + assert graph_edit_distance(G1, G3) == 8 + assert graph_edit_distance(G3, G1) == 8 + + assert graph_edit_distance(G2, G2) == 0 + assert graph_edit_distance(G2, G3) == 7 + assert graph_edit_distance(G3, G2) == 7 + + assert graph_edit_distance(G3, G3) == 0 + + def test_graph_edit_distance_node_match(self): + G1 = cycle_graph(5) + G2 = cycle_graph(5) + for n, attr in G1.nodes.items(): + attr["color"] = "red" if n % 2 == 0 else "blue" + for n, attr in G2.nodes.items(): + attr["color"] = "red" if n % 2 == 1 else "blue" + assert graph_edit_distance(G1, G2) == 0 + assert ( + graph_edit_distance( + G1, G2, node_match=lambda n1, n2: n1["color"] == n2["color"] + ) + == 1 + ) + + def test_graph_edit_distance_edge_match(self): + G1 = path_graph(6) + G2 = path_graph(6) + for e, attr in G1.edges.items(): + attr["color"] = "red" if min(e) % 2 == 0 else "blue" + for e, attr in G2.edges.items(): + attr["color"] = "red" if min(e) // 3 == 0 else "blue" + assert graph_edit_distance(G1, G2) == 0 + assert ( + graph_edit_distance( + G1, G2, edge_match=lambda e1, e2: e1["color"] == e2["color"] + ) + == 2 + ) + + def test_graph_edit_distance_node_cost(self): + G1 = path_graph(6) + G2 = path_graph(6) + for n, attr in G1.nodes.items(): + attr["color"] = "red" if n % 2 == 0 else "blue" + for n, attr in G2.nodes.items(): + attr["color"] = "red" if n % 2 == 1 else "blue" + + def node_subst_cost(uattr, vattr): + if uattr["color"] == vattr["color"]: + return 1 + else: + return 10 + + def node_del_cost(attr): + if attr["color"] == "blue": + return 20 + else: + return 50 + + def node_ins_cost(attr): + if attr["color"] == "blue": + return 40 + else: + return 100 + + assert ( + graph_edit_distance( + G1, + G2, + node_subst_cost=node_subst_cost, + node_del_cost=node_del_cost, + node_ins_cost=node_ins_cost, + ) + == 6 + ) + + def test_graph_edit_distance_edge_cost(self): + G1 = path_graph(6) + G2 = path_graph(6) + for e, attr in G1.edges.items(): + attr["color"] = "red" if min(e) % 2 == 0 else "blue" + for e, attr in G2.edges.items(): + attr["color"] = "red" if min(e) // 3 == 0 else "blue" + + def edge_subst_cost(gattr, hattr): + if gattr["color"] == hattr["color"]: + return 0.01 + else: + return 0.1 + + def edge_del_cost(attr): + if attr["color"] == "blue": + return 0.2 + else: + return 0.5 + + def edge_ins_cost(attr): + if attr["color"] == "blue": + return 0.4 + else: + return 1.0 + + assert ( + graph_edit_distance( + G1, + G2, + edge_subst_cost=edge_subst_cost, + edge_del_cost=edge_del_cost, + edge_ins_cost=edge_ins_cost, + ) + == 0.23 + ) + + def test_graph_edit_distance_upper_bound(self): + G1 = circular_ladder_graph(2) + G2 = circular_ladder_graph(6) + assert graph_edit_distance(G1, G2, upper_bound=5) is None + assert graph_edit_distance(G1, G2, upper_bound=24) == 22 + assert graph_edit_distance(G1, G2) == 22 + + def test_optimal_edit_paths(self): + G1 = path_graph(3) + G2 = cycle_graph(3) + paths, cost = optimal_edit_paths(G1, G2) + assert cost == 1 + assert len(paths) == 6 + + def canonical(vertex_path, edge_path): + return ( + tuple(sorted(vertex_path)), + tuple(sorted(edge_path, key=lambda x: (None in x, x))), + ) + + expected_paths = [ + ( + [(0, 0), (1, 1), (2, 2)], + [((0, 1), (0, 1)), ((1, 2), (1, 2)), (None, (0, 2))], + ), + ( + [(0, 0), (1, 2), (2, 1)], + [((0, 1), (0, 2)), ((1, 2), (1, 2)), (None, (0, 1))], + ), + ( + [(0, 1), (1, 0), (2, 2)], + [((0, 1), (0, 1)), ((1, 2), (0, 2)), (None, (1, 2))], + ), + ( + [(0, 1), (1, 2), (2, 0)], + [((0, 1), (1, 2)), ((1, 2), (0, 2)), (None, (0, 1))], + ), + ( + [(0, 2), (1, 0), (2, 1)], + [((0, 1), (0, 2)), ((1, 2), (0, 1)), (None, (1, 2))], + ), + ( + [(0, 2), (1, 1), (2, 0)], + [((0, 1), (1, 2)), ((1, 2), (0, 1)), (None, (0, 2))], + ), + ] + assert {canonical(*p) for p in paths} == {canonical(*p) for p in expected_paths} + + def test_optimize_graph_edit_distance(self): + G1 = circular_ladder_graph(2) + G2 = circular_ladder_graph(6) + bestcost = 1000 + for cost in optimize_graph_edit_distance(G1, G2): + assert cost < bestcost + bestcost = cost + assert bestcost == 22 + + # def test_graph_edit_distance_bigger(self): + # G1 = circular_ladder_graph(12) + # G2 = circular_ladder_graph(16) + # assert_equal(graph_edit_distance(G1, G2), 22) + + def test_selfloops(self): + G0 = nx.Graph() + G1 = nx.Graph() + G1.add_edges_from((("A", "A"), ("A", "B"))) + G2 = nx.Graph() + G2.add_edges_from((("A", "B"), ("B", "B"))) + G3 = nx.Graph() + G3.add_edges_from((("A", "A"), ("A", "B"), ("B", "B"))) + + assert graph_edit_distance(G0, G0) == 0 + assert graph_edit_distance(G0, G1) == 4 + assert graph_edit_distance(G1, G0) == 4 + assert graph_edit_distance(G0, G2) == 4 + assert graph_edit_distance(G2, G0) == 4 + assert graph_edit_distance(G0, G3) == 5 + assert graph_edit_distance(G3, G0) == 5 + + assert graph_edit_distance(G1, G1) == 0 + assert graph_edit_distance(G1, G2) == 0 + assert graph_edit_distance(G2, G1) == 0 + assert graph_edit_distance(G1, G3) == 1 + assert graph_edit_distance(G3, G1) == 1 + + assert graph_edit_distance(G2, G2) == 0 + assert graph_edit_distance(G2, G3) == 1 + assert graph_edit_distance(G3, G2) == 1 + + assert graph_edit_distance(G3, G3) == 0 + + def test_digraph(self): + G0 = nx.DiGraph() + G1 = nx.DiGraph() + G1.add_edges_from((("A", "B"), ("B", "C"), ("C", "D"), ("D", "A"))) + G2 = nx.DiGraph() + G2.add_edges_from((("A", "B"), ("B", "C"), ("C", "D"), ("A", "D"))) + G3 = nx.DiGraph() + G3.add_edges_from((("A", "B"), ("A", "C"), ("B", "D"), ("C", "D"))) + + assert graph_edit_distance(G0, G0) == 0 + assert graph_edit_distance(G0, G1) == 8 + assert graph_edit_distance(G1, G0) == 8 + assert graph_edit_distance(G0, G2) == 8 + assert graph_edit_distance(G2, G0) == 8 + assert graph_edit_distance(G0, G3) == 8 + assert graph_edit_distance(G3, G0) == 8 + + assert graph_edit_distance(G1, G1) == 0 + assert graph_edit_distance(G1, G2) == 2 + assert graph_edit_distance(G2, G1) == 2 + assert graph_edit_distance(G1, G3) == 4 + assert graph_edit_distance(G3, G1) == 4 + + assert graph_edit_distance(G2, G2) == 0 + assert graph_edit_distance(G2, G3) == 2 + assert graph_edit_distance(G3, G2) == 2 + + assert graph_edit_distance(G3, G3) == 0 + + def test_multigraph(self): + G0 = nx.MultiGraph() + G1 = nx.MultiGraph() + G1.add_edges_from((("A", "B"), ("B", "C"), ("A", "C"))) + G2 = nx.MultiGraph() + G2.add_edges_from((("A", "B"), ("B", "C"), ("B", "C"), ("A", "C"))) + G3 = nx.MultiGraph() + G3.add_edges_from((("A", "B"), ("B", "C"), ("A", "C"), ("A", "C"), ("A", "C"))) + + assert graph_edit_distance(G0, G0) == 0 + assert graph_edit_distance(G0, G1) == 6 + assert graph_edit_distance(G1, G0) == 6 + assert graph_edit_distance(G0, G2) == 7 + assert graph_edit_distance(G2, G0) == 7 + assert graph_edit_distance(G0, G3) == 8 + assert graph_edit_distance(G3, G0) == 8 + + assert graph_edit_distance(G1, G1) == 0 + assert graph_edit_distance(G1, G2) == 1 + assert graph_edit_distance(G2, G1) == 1 + assert graph_edit_distance(G1, G3) == 2 + assert graph_edit_distance(G3, G1) == 2 + + assert graph_edit_distance(G2, G2) == 0 + assert graph_edit_distance(G2, G3) == 1 + assert graph_edit_distance(G3, G2) == 1 + + assert graph_edit_distance(G3, G3) == 0 + + def test_multidigraph(self): + G1 = nx.MultiDiGraph() + G1.add_edges_from( + ( + ("hardware", "kernel"), + ("kernel", "hardware"), + ("kernel", "userspace"), + ("userspace", "kernel"), + ) + ) + G2 = nx.MultiDiGraph() + G2.add_edges_from( + ( + ("winter", "spring"), + ("spring", "summer"), + ("summer", "autumn"), + ("autumn", "winter"), + ) + ) + + assert graph_edit_distance(G1, G2) == 5 + assert graph_edit_distance(G2, G1) == 5 + + # by https://github.com/jfbeaumont + def testCopy(self): + G = nx.Graph() + G.add_node("A", label="A") + G.add_node("B", label="B") + G.add_edge("A", "B", label="a-b") + assert ( + graph_edit_distance(G, G.copy(), node_match=nmatch, edge_match=ematch) == 0 + ) + + def testSame(self): + G1 = nx.Graph() + G1.add_node("A", label="A") + G1.add_node("B", label="B") + G1.add_edge("A", "B", label="a-b") + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_edge("A", "B", label="a-b") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 0 + + def testOneEdgeLabelDiff(self): + G1 = nx.Graph() + G1.add_node("A", label="A") + G1.add_node("B", label="B") + G1.add_edge("A", "B", label="a-b") + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_edge("A", "B", label="bad") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 1 + + def testOneNodeLabelDiff(self): + G1 = nx.Graph() + G1.add_node("A", label="A") + G1.add_node("B", label="B") + G1.add_edge("A", "B", label="a-b") + G2 = nx.Graph() + G2.add_node("A", label="Z") + G2.add_node("B", label="B") + G2.add_edge("A", "B", label="a-b") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 1 + + def testOneExtraNode(self): + G1 = nx.Graph() + G1.add_node("A", label="A") + G1.add_node("B", label="B") + G1.add_edge("A", "B", label="a-b") + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_edge("A", "B", label="a-b") + G2.add_node("C", label="C") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 1 + + def testOneExtraEdge(self): + G1 = nx.Graph() + G1.add_node("A", label="A") + G1.add_node("B", label="B") + G1.add_node("C", label="C") + G1.add_node("C", label="C") + G1.add_edge("A", "B", label="a-b") + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("C", label="C") + G2.add_edge("A", "B", label="a-b") + G2.add_edge("A", "C", label="a-c") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 1 + + def testOneExtraNodeAndEdge(self): + G1 = nx.Graph() + G1.add_node("A", label="A") + G1.add_node("B", label="B") + G1.add_edge("A", "B", label="a-b") + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("C", label="C") + G2.add_edge("A", "B", label="a-b") + G2.add_edge("A", "C", label="a-c") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 2 + + def testGraph1(self): + G1 = getCanonical() + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("D", label="D") + G2.add_node("E", label="E") + G2.add_edge("A", "B", label="a-b") + G2.add_edge("B", "D", label="b-d") + G2.add_edge("D", "E", label="d-e") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 3 + + def testGraph2(self): + G1 = getCanonical() + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("C", label="C") + G2.add_node("D", label="D") + G2.add_node("E", label="E") + G2.add_edge("A", "B", label="a-b") + G2.add_edge("B", "C", label="b-c") + G2.add_edge("C", "D", label="c-d") + G2.add_edge("C", "E", label="c-e") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 4 + + def testGraph3(self): + G1 = getCanonical() + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("C", label="C") + G2.add_node("D", label="D") + G2.add_node("E", label="E") + G2.add_node("F", label="F") + G2.add_node("G", label="G") + G2.add_edge("A", "C", label="a-c") + G2.add_edge("A", "D", label="a-d") + G2.add_edge("D", "E", label="d-e") + G2.add_edge("D", "F", label="d-f") + G2.add_edge("D", "G", label="d-g") + G2.add_edge("E", "B", label="e-b") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 12 + + def testGraph4(self): + G1 = getCanonical() + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("C", label="C") + G2.add_node("D", label="D") + G2.add_edge("A", "B", label="a-b") + G2.add_edge("B", "C", label="b-c") + G2.add_edge("C", "D", label="c-d") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 2 + + def testGraph4_a(self): + G1 = getCanonical() + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("C", label="C") + G2.add_node("D", label="D") + G2.add_edge("A", "B", label="a-b") + G2.add_edge("B", "C", label="b-c") + G2.add_edge("A", "D", label="a-d") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 2 + + def testGraph4_b(self): + G1 = getCanonical() + G2 = nx.Graph() + G2.add_node("A", label="A") + G2.add_node("B", label="B") + G2.add_node("C", label="C") + G2.add_node("D", label="D") + G2.add_edge("A", "B", label="a-b") + G2.add_edge("B", "C", label="b-c") + G2.add_edge("B", "D", label="bad") + assert graph_edit_distance(G1, G2, node_match=nmatch, edge_match=ematch) == 1 + + # note: nx.simrank_similarity_numpy not included because returns np.array + simrank_algs = [ + nx.simrank_similarity, + nx.algorithms.similarity._simrank_similarity_python, + ] + + @pytest.mark.parametrize("simrank_similarity", simrank_algs) + def test_simrank_no_source_no_target(self, simrank_similarity): + G = nx.cycle_graph(5) + expected = { + 0: { + 0: 1, + 1: 0.3951219505902448, + 2: 0.5707317069281646, + 3: 0.5707317069281646, + 4: 0.3951219505902449, + }, + 1: { + 0: 0.3951219505902448, + 1: 1, + 2: 0.3951219505902449, + 3: 0.5707317069281646, + 4: 0.5707317069281646, + }, + 2: { + 0: 0.5707317069281646, + 1: 0.3951219505902449, + 2: 1, + 3: 0.3951219505902449, + 4: 0.5707317069281646, + }, + 3: { + 0: 0.5707317069281646, + 1: 0.5707317069281646, + 2: 0.3951219505902449, + 3: 1, + 4: 0.3951219505902449, + }, + 4: { + 0: 0.3951219505902449, + 1: 0.5707317069281646, + 2: 0.5707317069281646, + 3: 0.3951219505902449, + 4: 1, + }, + } + actual = simrank_similarity(G) + for k, v in expected.items(): + assert v == pytest.approx(actual[k], abs=1e-2) + + # For a DiGraph test, use the first graph from the paper cited in + # the docs: https://dl.acm.org/doi/pdf/10.1145/775047.775126 + G = nx.DiGraph() + G.add_node(0, label="Univ") + G.add_node(1, label="ProfA") + G.add_node(2, label="ProfB") + G.add_node(3, label="StudentA") + G.add_node(4, label="StudentB") + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 4), (4, 2), (3, 0)]) + + expected = { + 0: {0: 1, 1: 0.0, 2: 0.1323363991265798, 3: 0.0, 4: 0.03387811817640443}, + 1: {0: 0.0, 1: 1, 2: 0.4135512472705618, 3: 0.0, 4: 0.10586911930126384}, + 2: { + 0: 0.1323363991265798, + 1: 0.4135512472705618, + 2: 1, + 3: 0.04234764772050554, + 4: 0.08822426608438655, + }, + 3: {0: 0.0, 1: 0.0, 2: 0.04234764772050554, 3: 1, 4: 0.3308409978164495}, + 4: { + 0: 0.03387811817640443, + 1: 0.10586911930126384, + 2: 0.08822426608438655, + 3: 0.3308409978164495, + 4: 1, + }, + } + # Use the importance_factor from the paper to get the same numbers. + actual = simrank_similarity(G, importance_factor=0.8) + for k, v in expected.items(): + assert v == pytest.approx(actual[k], abs=1e-2) + + @pytest.mark.parametrize("simrank_similarity", simrank_algs) + def test_simrank_source_no_target(self, simrank_similarity): + G = nx.cycle_graph(5) + expected = { + 0: 1, + 1: 0.3951219505902448, + 2: 0.5707317069281646, + 3: 0.5707317069281646, + 4: 0.3951219505902449, + } + actual = simrank_similarity(G, source=0) + assert expected == pytest.approx(actual, abs=1e-2) + + # For a DiGraph test, use the first graph from the paper cited in + # the docs: https://dl.acm.org/doi/pdf/10.1145/775047.775126 + G = nx.DiGraph() + G.add_node(0, label="Univ") + G.add_node(1, label="ProfA") + G.add_node(2, label="ProfB") + G.add_node(3, label="StudentA") + G.add_node(4, label="StudentB") + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 4), (4, 2), (3, 0)]) + + expected = {0: 1, 1: 0.0, 2: 0.1323363991265798, 3: 0.0, 4: 0.03387811817640443} + # Use the importance_factor from the paper to get the same numbers. + actual = simrank_similarity(G, importance_factor=0.8, source=0) + assert expected == pytest.approx(actual, abs=1e-2) + + @pytest.mark.parametrize("simrank_similarity", simrank_algs) + def test_simrank_noninteger_nodes(self, simrank_similarity): + G = nx.cycle_graph(5) + G = nx.relabel_nodes(G, dict(enumerate("abcde"))) + expected = { + "a": 1, + "b": 0.3951219505902448, + "c": 0.5707317069281646, + "d": 0.5707317069281646, + "e": 0.3951219505902449, + } + actual = simrank_similarity(G, source="a") + assert expected == pytest.approx(actual, abs=1e-2) + + # For a DiGraph test, use the first graph from the paper cited in + # the docs: https://dl.acm.org/doi/pdf/10.1145/775047.775126 + G = nx.DiGraph() + G.add_node(0, label="Univ") + G.add_node(1, label="ProfA") + G.add_node(2, label="ProfB") + G.add_node(3, label="StudentA") + G.add_node(4, label="StudentB") + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 4), (4, 2), (3, 0)]) + node_labels = dict(enumerate(nx.get_node_attributes(G, "label").values())) + G = nx.relabel_nodes(G, node_labels) + + expected = { + "Univ": 1, + "ProfA": 0.0, + "ProfB": 0.1323363991265798, + "StudentA": 0.0, + "StudentB": 0.03387811817640443, + } + # Use the importance_factor from the paper to get the same numbers. + actual = simrank_similarity(G, importance_factor=0.8, source="Univ") + assert expected == pytest.approx(actual, abs=1e-2) + + @pytest.mark.parametrize("simrank_similarity", simrank_algs) + def test_simrank_source_and_target(self, simrank_similarity): + G = nx.cycle_graph(5) + expected = 1 + actual = simrank_similarity(G, source=0, target=0) + assert expected == pytest.approx(actual, abs=1e-2) + + # For a DiGraph test, use the first graph from the paper cited in + # the docs: https://dl.acm.org/doi/pdf/10.1145/775047.775126 + G = nx.DiGraph() + G.add_node(0, label="Univ") + G.add_node(1, label="ProfA") + G.add_node(2, label="ProfB") + G.add_node(3, label="StudentA") + G.add_node(4, label="StudentB") + G.add_edges_from([(0, 1), (0, 2), (1, 3), (2, 4), (4, 2), (3, 0)]) + + expected = 0.1323363991265798 + # Use the importance_factor from the paper to get the same numbers. + # Use the pair (0,2) because (0,0) and (0,1) have trivial results. + actual = simrank_similarity(G, importance_factor=0.8, source=0, target=2) + assert expected == pytest.approx(actual, abs=1e-5) + + @pytest.mark.parametrize("alg", simrank_algs) + def test_simrank_max_iterations(self, alg): + G = nx.cycle_graph(5) + pytest.raises(nx.ExceededMaxIterations, alg, G, max_iterations=10) + + def test_simrank_between_versions(self): + G = nx.cycle_graph(5) + # _python tolerance 1e-4 + expected_python_tol4 = { + 0: 1, + 1: 0.394512499239852, + 2: 0.5703550452791322, + 3: 0.5703550452791323, + 4: 0.394512499239852, + } + # _numpy tolerance 1e-4 + expected_numpy_tol4 = { + 0: 1.0, + 1: 0.3947180735764555, + 2: 0.570482097206368, + 3: 0.570482097206368, + 4: 0.3947180735764555, + } + actual = nx.simrank_similarity(G, source=0) + assert expected_numpy_tol4 == pytest.approx(actual, abs=1e-7) + # versions differ at 1e-4 level but equal at 1e-3 + assert expected_python_tol4 != pytest.approx(actual, abs=1e-4) + assert expected_python_tol4 == pytest.approx(actual, abs=1e-3) + + actual = nx.similarity._simrank_similarity_python(G, source=0) + assert expected_python_tol4 == pytest.approx(actual, abs=1e-7) + # versions differ at 1e-4 level but equal at 1e-3 + assert expected_numpy_tol4 != pytest.approx(actual, abs=1e-4) + assert expected_numpy_tol4 == pytest.approx(actual, abs=1e-3) + + def test_simrank_numpy_no_source_no_target(self): + G = nx.cycle_graph(5) + expected = np.array( + [ + [ + 1.0, + 0.3947180735764555, + 0.570482097206368, + 0.570482097206368, + 0.3947180735764555, + ], + [ + 0.3947180735764555, + 1.0, + 0.3947180735764555, + 0.570482097206368, + 0.570482097206368, + ], + [ + 0.570482097206368, + 0.3947180735764555, + 1.0, + 0.3947180735764555, + 0.570482097206368, + ], + [ + 0.570482097206368, + 0.570482097206368, + 0.3947180735764555, + 1.0, + 0.3947180735764555, + ], + [ + 0.3947180735764555, + 0.570482097206368, + 0.570482097206368, + 0.3947180735764555, + 1.0, + ], + ] + ) + actual = nx.similarity._simrank_similarity_numpy(G) + np.testing.assert_allclose(expected, actual, atol=1e-7) + + def test_simrank_numpy_source_no_target(self): + G = nx.cycle_graph(5) + expected = np.array( + [ + 1.0, + 0.3947180735764555, + 0.570482097206368, + 0.570482097206368, + 0.3947180735764555, + ] + ) + actual = nx.similarity._simrank_similarity_numpy(G, source=0) + np.testing.assert_allclose(expected, actual, atol=1e-7) + + def test_simrank_numpy_source_and_target(self): + G = nx.cycle_graph(5) + expected = 1.0 + actual = nx.similarity._simrank_similarity_numpy(G, source=0, target=0) + np.testing.assert_allclose(expected, actual, atol=1e-7) + + def test_panther_similarity_unweighted(self): + np.random.seed(42) + + G = nx.Graph() + G.add_edge(0, 1) + G.add_edge(0, 2) + G.add_edge(0, 3) + G.add_edge(1, 2) + G.add_edge(2, 4) + expected = {3: 0.5, 2: 0.5, 1: 0.5, 4: 0.125} + sim = nx.panther_similarity(G, 0, path_length=2) + assert sim == expected + + def test_panther_similarity_weighted(self): + np.random.seed(42) + + G = nx.Graph() + G.add_edge("v1", "v2", w=5) + G.add_edge("v1", "v3", w=1) + G.add_edge("v1", "v4", w=2) + G.add_edge("v2", "v3", w=0.1) + G.add_edge("v3", "v5", w=1) + expected = {"v3": 0.75, "v4": 0.5, "v2": 0.5, "v5": 0.25} + sim = nx.panther_similarity(G, "v1", path_length=2, weight="w") + assert sim == expected + + def test_generate_random_paths_unweighted(self): + np.random.seed(42) + + index_map = {} + num_paths = 10 + path_length = 2 + G = nx.Graph() + G.add_edge(0, 1) + G.add_edge(0, 2) + G.add_edge(0, 3) + G.add_edge(1, 2) + G.add_edge(2, 4) + paths = nx.generate_random_paths( + G, num_paths, path_length=path_length, index_map=index_map + ) + expected_paths = [ + [3, 0, 3], + [4, 2, 1], + [2, 1, 0], + [2, 0, 3], + [3, 0, 1], + [3, 0, 1], + [4, 2, 0], + [2, 1, 0], + [3, 0, 2], + [2, 1, 2], + ] + expected_map = { + 0: {0, 2, 3, 4, 5, 6, 7, 8}, + 1: {1, 2, 4, 5, 7, 9}, + 2: {1, 2, 3, 6, 7, 8, 9}, + 3: {0, 3, 4, 5, 8}, + 4: {1, 6}, + } + + assert expected_paths == list(paths) + assert expected_map == index_map + + def test_generate_random_paths_weighted(self): + np.random.seed(42) + + index_map = {} + num_paths = 10 + path_length = 6 + G = nx.Graph() + G.add_edge("a", "b", weight=0.6) + G.add_edge("a", "c", weight=0.2) + G.add_edge("c", "d", weight=0.1) + G.add_edge("c", "e", weight=0.7) + G.add_edge("c", "f", weight=0.9) + G.add_edge("a", "d", weight=0.3) + paths = nx.generate_random_paths( + G, num_paths, path_length=path_length, index_map=index_map + ) + + expected_paths = [ + ["d", "c", "f", "c", "d", "a", "b"], + ["e", "c", "f", "c", "f", "c", "e"], + ["d", "a", "b", "a", "b", "a", "c"], + ["b", "a", "d", "a", "b", "a", "b"], + ["d", "a", "b", "a", "b", "a", "d"], + ["d", "a", "b", "a", "b", "a", "c"], + ["d", "a", "b", "a", "b", "a", "b"], + ["f", "c", "f", "c", "f", "c", "e"], + ["d", "a", "d", "a", "b", "a", "b"], + ["e", "c", "f", "c", "e", "c", "d"], + ] + expected_map = { + "d": {0, 2, 3, 4, 5, 6, 8, 9}, + "c": {0, 1, 2, 5, 7, 9}, + "f": {0, 1, 9, 7}, + "a": {0, 2, 3, 4, 5, 6, 8}, + "b": {0, 2, 3, 4, 5, 6, 8}, + "e": {1, 9, 7}, + } + + assert expected_paths == list(paths) + assert expected_map == index_map + + def test_symmetry_with_custom_matching(self): + print("G2 is edge (a,b) and G3 is edge (a,a)") + print("but node order for G2 is (a,b) while for G3 it is (b,a)") + + a, b = "A", "B" + G2 = nx.Graph() + G2.add_nodes_from((a, b)) + G2.add_edges_from([(a, b)]) + G3 = nx.Graph() + G3.add_nodes_from((b, a)) + G3.add_edges_from([(a, a)]) + for G in (G2, G3): + for n in G: + G.nodes[n]["attr"] = n + for e in G.edges: + G.edges[e]["attr"] = e + match = lambda x, y: x == y + + print("Starting G2 to G3 GED calculation") + assert nx.graph_edit_distance(G2, G3, node_match=match, edge_match=match) == 1 + + print("Starting G3 to G2 GED calculation") + assert nx.graph_edit_distance(G3, G2, node_match=match, edge_match=match) == 1 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_simple_paths.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_simple_paths.py new file mode 100644 index 0000000000000000000000000000000000000000..ef9506c22b0ca06e5af62fc671586c4a4c506acb --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_simple_paths.py @@ -0,0 +1,769 @@ +import random + +import pytest + +import networkx as nx +from networkx import convert_node_labels_to_integers as cnlti +from networkx.algorithms.simple_paths import ( + _bidirectional_dijkstra, + _bidirectional_shortest_path, +) +from networkx.utils import arbitrary_element, pairwise + + +class TestIsSimplePath: + """Unit tests for the + :func:`networkx.algorithms.simple_paths.is_simple_path` function. + + """ + + def test_empty_list(self): + """Tests that the empty list is not a valid path, since there + should be a one-to-one correspondence between paths as lists of + nodes and paths as lists of edges. + + """ + G = nx.trivial_graph() + assert not nx.is_simple_path(G, []) + + def test_trivial_path(self): + """Tests that the trivial path, a path of length one, is + considered a simple path in a graph. + + """ + G = nx.trivial_graph() + assert nx.is_simple_path(G, [0]) + + def test_trivial_nonpath(self): + """Tests that a list whose sole element is an object not in the + graph is not considered a simple path. + + """ + G = nx.trivial_graph() + assert not nx.is_simple_path(G, ["not a node"]) + + def test_simple_path(self): + G = nx.path_graph(2) + assert nx.is_simple_path(G, [0, 1]) + + def test_non_simple_path(self): + G = nx.path_graph(2) + assert not nx.is_simple_path(G, [0, 1, 0]) + + def test_cycle(self): + G = nx.cycle_graph(3) + assert not nx.is_simple_path(G, [0, 1, 2, 0]) + + def test_missing_node(self): + G = nx.path_graph(2) + assert not nx.is_simple_path(G, [0, 2]) + + def test_missing_starting_node(self): + G = nx.path_graph(2) + assert not nx.is_simple_path(G, [2, 0]) + + def test_directed_path(self): + G = nx.DiGraph([(0, 1), (1, 2)]) + assert nx.is_simple_path(G, [0, 1, 2]) + + def test_directed_non_path(self): + G = nx.DiGraph([(0, 1), (1, 2)]) + assert not nx.is_simple_path(G, [2, 1, 0]) + + def test_directed_cycle(self): + G = nx.DiGraph([(0, 1), (1, 2), (2, 0)]) + assert not nx.is_simple_path(G, [0, 1, 2, 0]) + + def test_multigraph(self): + G = nx.MultiGraph([(0, 1), (0, 1)]) + assert nx.is_simple_path(G, [0, 1]) + + def test_multidigraph(self): + G = nx.MultiDiGraph([(0, 1), (0, 1), (1, 0), (1, 0)]) + assert nx.is_simple_path(G, [0, 1]) + + +# Tests for all_simple_paths +def test_all_simple_paths(): + G = nx.path_graph(4) + paths = nx.all_simple_paths(G, 0, 3) + assert {tuple(p) for p in paths} == {(0, 1, 2, 3)} + + +def test_all_simple_paths_with_two_targets_emits_two_paths(): + G = nx.path_graph(4) + G.add_edge(2, 4) + paths = nx.all_simple_paths(G, 0, [3, 4]) + assert {tuple(p) for p in paths} == {(0, 1, 2, 3), (0, 1, 2, 4)} + + +def test_digraph_all_simple_paths_with_two_targets_emits_two_paths(): + G = nx.path_graph(4, create_using=nx.DiGraph()) + G.add_edge(2, 4) + paths = nx.all_simple_paths(G, 0, [3, 4]) + assert {tuple(p) for p in paths} == {(0, 1, 2, 3), (0, 1, 2, 4)} + + +def test_all_simple_paths_with_two_targets_cutoff(): + G = nx.path_graph(4) + G.add_edge(2, 4) + paths = nx.all_simple_paths(G, 0, [3, 4], cutoff=3) + assert {tuple(p) for p in paths} == {(0, 1, 2, 3), (0, 1, 2, 4)} + + +def test_digraph_all_simple_paths_with_two_targets_cutoff(): + G = nx.path_graph(4, create_using=nx.DiGraph()) + G.add_edge(2, 4) + paths = nx.all_simple_paths(G, 0, [3, 4], cutoff=3) + assert {tuple(p) for p in paths} == {(0, 1, 2, 3), (0, 1, 2, 4)} + + +def test_all_simple_paths_with_two_targets_in_line_emits_two_paths(): + G = nx.path_graph(4) + paths = nx.all_simple_paths(G, 0, [2, 3]) + assert {tuple(p) for p in paths} == {(0, 1, 2), (0, 1, 2, 3)} + + +def test_all_simple_paths_ignores_cycle(): + G = nx.cycle_graph(3, create_using=nx.DiGraph()) + G.add_edge(1, 3) + paths = nx.all_simple_paths(G, 0, 3) + assert {tuple(p) for p in paths} == {(0, 1, 3)} + + +def test_all_simple_paths_with_two_targets_inside_cycle_emits_two_paths(): + G = nx.cycle_graph(3, create_using=nx.DiGraph()) + G.add_edge(1, 3) + paths = nx.all_simple_paths(G, 0, [2, 3]) + assert {tuple(p) for p in paths} == {(0, 1, 2), (0, 1, 3)} + + +def test_all_simple_paths_source_target(): + G = nx.path_graph(4) + paths = nx.all_simple_paths(G, 1, 1) + assert list(paths) == [] + + +def test_all_simple_paths_cutoff(): + G = nx.complete_graph(4) + paths = nx.all_simple_paths(G, 0, 1, cutoff=1) + assert {tuple(p) for p in paths} == {(0, 1)} + paths = nx.all_simple_paths(G, 0, 1, cutoff=2) + assert {tuple(p) for p in paths} == {(0, 1), (0, 2, 1), (0, 3, 1)} + + +def test_all_simple_paths_on_non_trivial_graph(): + """you may need to draw this graph to make sure it is reasonable""" + G = nx.path_graph(5, create_using=nx.DiGraph()) + G.add_edges_from([(0, 5), (1, 5), (1, 3), (5, 4), (4, 2), (4, 3)]) + paths = nx.all_simple_paths(G, 1, [2, 3]) + assert {tuple(p) for p in paths} == { + (1, 2), + (1, 3, 4, 2), + (1, 5, 4, 2), + (1, 3), + (1, 2, 3), + (1, 5, 4, 3), + (1, 5, 4, 2, 3), + } + paths = nx.all_simple_paths(G, 1, [2, 3], cutoff=3) + assert {tuple(p) for p in paths} == { + (1, 2), + (1, 3, 4, 2), + (1, 5, 4, 2), + (1, 3), + (1, 2, 3), + (1, 5, 4, 3), + } + paths = nx.all_simple_paths(G, 1, [2, 3], cutoff=2) + assert {tuple(p) for p in paths} == {(1, 2), (1, 3), (1, 2, 3)} + + +def test_all_simple_paths_multigraph(): + G = nx.MultiGraph([(1, 2), (1, 2)]) + paths = nx.all_simple_paths(G, 1, 1) + assert list(paths) == [] + nx.add_path(G, [3, 1, 10, 2]) + paths = list(nx.all_simple_paths(G, 1, 2)) + assert len(paths) == 3 + assert {tuple(p) for p in paths} == {(1, 2), (1, 2), (1, 10, 2)} + + +def test_all_simple_paths_multigraph_with_cutoff(): + G = nx.MultiGraph([(1, 2), (1, 2), (1, 10), (10, 2)]) + paths = list(nx.all_simple_paths(G, 1, 2, cutoff=1)) + assert len(paths) == 2 + assert {tuple(p) for p in paths} == {(1, 2), (1, 2)} + + +def test_all_simple_paths_directed(): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [3, 2, 1]) + paths = nx.all_simple_paths(G, 1, 3) + assert {tuple(p) for p in paths} == {(1, 2, 3)} + + +def test_all_simple_paths_empty(): + G = nx.path_graph(4) + paths = nx.all_simple_paths(G, 0, 3, cutoff=2) + assert list(paths) == [] + + +def test_all_simple_paths_corner_cases(): + assert list(nx.all_simple_paths(nx.empty_graph(2), 0, 0)) == [] + assert list(nx.all_simple_paths(nx.empty_graph(2), 0, 1)) == [] + assert list(nx.all_simple_paths(nx.path_graph(9), 0, 8, 0)) == [] + + +def hamiltonian_path(G, source): + source = arbitrary_element(G) + neighbors = set(G[source]) - {source} + n = len(G) + for target in neighbors: + for path in nx.all_simple_paths(G, source, target): + if len(path) == n: + yield path + + +def test_hamiltonian_path(): + from itertools import permutations + + G = nx.complete_graph(4) + paths = [list(p) for p in hamiltonian_path(G, 0)] + exact = [[0] + list(p) for p in permutations([1, 2, 3], 3)] + assert sorted(paths) == sorted(exact) + + +def test_cutoff_zero(): + G = nx.complete_graph(4) + paths = nx.all_simple_paths(G, 0, 3, cutoff=0) + assert [list(p) for p in paths] == [] + paths = nx.all_simple_paths(nx.MultiGraph(G), 0, 3, cutoff=0) + assert [list(p) for p in paths] == [] + + +def test_source_missing(): + with pytest.raises(nx.NodeNotFound): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + list(nx.all_simple_paths(nx.MultiGraph(G), 0, 3)) + + +def test_target_missing(): + with pytest.raises(nx.NodeNotFound): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + list(nx.all_simple_paths(nx.MultiGraph(G), 1, 4)) + + +# Tests for all_simple_edge_paths +def test_all_simple_edge_paths(): + G = nx.path_graph(4) + paths = nx.all_simple_edge_paths(G, 0, 3) + assert {tuple(p) for p in paths} == {((0, 1), (1, 2), (2, 3))} + + +def test_all_simple_edge_paths_with_two_targets_emits_two_paths(): + G = nx.path_graph(4) + G.add_edge(2, 4) + paths = nx.all_simple_edge_paths(G, 0, [3, 4]) + assert {tuple(p) for p in paths} == { + ((0, 1), (1, 2), (2, 3)), + ((0, 1), (1, 2), (2, 4)), + } + + +def test_digraph_all_simple_edge_paths_with_two_targets_emits_two_paths(): + G = nx.path_graph(4, create_using=nx.DiGraph()) + G.add_edge(2, 4) + paths = nx.all_simple_edge_paths(G, 0, [3, 4]) + assert {tuple(p) for p in paths} == { + ((0, 1), (1, 2), (2, 3)), + ((0, 1), (1, 2), (2, 4)), + } + + +def test_all_simple_edge_paths_with_two_targets_cutoff(): + G = nx.path_graph(4) + G.add_edge(2, 4) + paths = nx.all_simple_edge_paths(G, 0, [3, 4], cutoff=3) + assert {tuple(p) for p in paths} == { + ((0, 1), (1, 2), (2, 3)), + ((0, 1), (1, 2), (2, 4)), + } + + +def test_digraph_all_simple_edge_paths_with_two_targets_cutoff(): + G = nx.path_graph(4, create_using=nx.DiGraph()) + G.add_edge(2, 4) + paths = nx.all_simple_edge_paths(G, 0, [3, 4], cutoff=3) + assert {tuple(p) for p in paths} == { + ((0, 1), (1, 2), (2, 3)), + ((0, 1), (1, 2), (2, 4)), + } + + +def test_all_simple_edge_paths_with_two_targets_in_line_emits_two_paths(): + G = nx.path_graph(4) + paths = nx.all_simple_edge_paths(G, 0, [2, 3]) + assert {tuple(p) for p in paths} == {((0, 1), (1, 2)), ((0, 1), (1, 2), (2, 3))} + + +def test_all_simple_edge_paths_ignores_cycle(): + G = nx.cycle_graph(3, create_using=nx.DiGraph()) + G.add_edge(1, 3) + paths = nx.all_simple_edge_paths(G, 0, 3) + assert {tuple(p) for p in paths} == {((0, 1), (1, 3))} + + +def test_all_simple_edge_paths_with_two_targets_inside_cycle_emits_two_paths(): + G = nx.cycle_graph(3, create_using=nx.DiGraph()) + G.add_edge(1, 3) + paths = nx.all_simple_edge_paths(G, 0, [2, 3]) + assert {tuple(p) for p in paths} == {((0, 1), (1, 2)), ((0, 1), (1, 3))} + + +def test_all_simple_edge_paths_source_target(): + G = nx.path_graph(4) + paths = nx.all_simple_edge_paths(G, 1, 1) + assert list(paths) == [] + + +def test_all_simple_edge_paths_cutoff(): + G = nx.complete_graph(4) + paths = nx.all_simple_edge_paths(G, 0, 1, cutoff=1) + assert {tuple(p) for p in paths} == {((0, 1),)} + paths = nx.all_simple_edge_paths(G, 0, 1, cutoff=2) + assert {tuple(p) for p in paths} == {((0, 1),), ((0, 2), (2, 1)), ((0, 3), (3, 1))} + + +def test_all_simple_edge_paths_on_non_trivial_graph(): + """you may need to draw this graph to make sure it is reasonable""" + G = nx.path_graph(5, create_using=nx.DiGraph()) + G.add_edges_from([(0, 5), (1, 5), (1, 3), (5, 4), (4, 2), (4, 3)]) + paths = nx.all_simple_edge_paths(G, 1, [2, 3]) + assert {tuple(p) for p in paths} == { + ((1, 2),), + ((1, 3), (3, 4), (4, 2)), + ((1, 5), (5, 4), (4, 2)), + ((1, 3),), + ((1, 2), (2, 3)), + ((1, 5), (5, 4), (4, 3)), + ((1, 5), (5, 4), (4, 2), (2, 3)), + } + paths = nx.all_simple_edge_paths(G, 1, [2, 3], cutoff=3) + assert {tuple(p) for p in paths} == { + ((1, 2),), + ((1, 3), (3, 4), (4, 2)), + ((1, 5), (5, 4), (4, 2)), + ((1, 3),), + ((1, 2), (2, 3)), + ((1, 5), (5, 4), (4, 3)), + } + paths = nx.all_simple_edge_paths(G, 1, [2, 3], cutoff=2) + assert {tuple(p) for p in paths} == {((1, 2),), ((1, 3),), ((1, 2), (2, 3))} + + +def test_all_simple_edge_paths_multigraph(): + G = nx.MultiGraph([(1, 2), (1, 2)]) + paths = nx.all_simple_edge_paths(G, 1, 1) + assert list(paths) == [] + nx.add_path(G, [3, 1, 10, 2]) + paths = list(nx.all_simple_edge_paths(G, 1, 2)) + assert len(paths) == 3 + assert {tuple(p) for p in paths} == { + ((1, 2, 0),), + ((1, 2, 1),), + ((1, 10, 0), (10, 2, 0)), + } + + +def test_all_simple_edge_paths_multigraph_with_cutoff(): + G = nx.MultiGraph([(1, 2), (1, 2), (1, 10), (10, 2)]) + paths = list(nx.all_simple_edge_paths(G, 1, 2, cutoff=1)) + assert len(paths) == 2 + assert {tuple(p) for p in paths} == {((1, 2, 0),), ((1, 2, 1),)} + + +def test_all_simple_edge_paths_directed(): + G = nx.DiGraph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [3, 2, 1]) + paths = nx.all_simple_edge_paths(G, 1, 3) + assert {tuple(p) for p in paths} == {((1, 2), (2, 3))} + + +def test_all_simple_edge_paths_empty(): + G = nx.path_graph(4) + paths = nx.all_simple_edge_paths(G, 0, 3, cutoff=2) + assert list(paths) == [] + + +def test_all_simple_edge_paths_corner_cases(): + assert list(nx.all_simple_edge_paths(nx.empty_graph(2), 0, 0)) == [] + assert list(nx.all_simple_edge_paths(nx.empty_graph(2), 0, 1)) == [] + assert list(nx.all_simple_edge_paths(nx.path_graph(9), 0, 8, 0)) == [] + + +def hamiltonian_edge_path(G, source): + source = arbitrary_element(G) + neighbors = set(G[source]) - {source} + n = len(G) + for target in neighbors: + for path in nx.all_simple_edge_paths(G, source, target): + if len(path) == n - 1: + yield path + + +def test_hamiltonian__edge_path(): + from itertools import permutations + + G = nx.complete_graph(4) + paths = hamiltonian_edge_path(G, 0) + exact = [list(pairwise([0] + list(p))) for p in permutations([1, 2, 3], 3)] + assert sorted(exact) == sorted(paths) + + +def test_edge_cutoff_zero(): + G = nx.complete_graph(4) + paths = nx.all_simple_edge_paths(G, 0, 3, cutoff=0) + assert [list(p) for p in paths] == [] + paths = nx.all_simple_edge_paths(nx.MultiGraph(G), 0, 3, cutoff=0) + assert [list(p) for p in paths] == [] + + +def test_edge_source_missing(): + with pytest.raises(nx.NodeNotFound): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + list(nx.all_simple_edge_paths(nx.MultiGraph(G), 0, 3)) + + +def test_edge_target_missing(): + with pytest.raises(nx.NodeNotFound): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + list(nx.all_simple_edge_paths(nx.MultiGraph(G), 1, 4)) + + +# Tests for shortest_simple_paths +def test_shortest_simple_paths(): + G = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted") + paths = nx.shortest_simple_paths(G, 1, 12) + assert next(paths) == [1, 2, 3, 4, 8, 12] + assert next(paths) == [1, 5, 6, 7, 8, 12] + assert [len(path) for path in nx.shortest_simple_paths(G, 1, 12)] == sorted( + len(path) for path in nx.all_simple_paths(G, 1, 12) + ) + + +def test_shortest_simple_paths_directed(): + G = nx.cycle_graph(7, create_using=nx.DiGraph()) + paths = nx.shortest_simple_paths(G, 0, 3) + assert list(paths) == [[0, 1, 2, 3]] + + +def test_shortest_simple_paths_directed_with_weight_function(): + def cost(u, v, x): + return 1 + + G = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted") + paths = nx.shortest_simple_paths(G, 1, 12) + assert next(paths) == [1, 2, 3, 4, 8, 12] + assert next(paths) == [1, 5, 6, 7, 8, 12] + assert [ + len(path) for path in nx.shortest_simple_paths(G, 1, 12, weight=cost) + ] == sorted(len(path) for path in nx.all_simple_paths(G, 1, 12)) + + +def test_shortest_simple_paths_with_weight_function(): + def cost(u, v, x): + return 1 + + G = nx.cycle_graph(7, create_using=nx.DiGraph()) + paths = nx.shortest_simple_paths(G, 0, 3, weight=cost) + assert list(paths) == [[0, 1, 2, 3]] + + +def test_Greg_Bernstein(): + g1 = nx.Graph() + g1.add_nodes_from(["N0", "N1", "N2", "N3", "N4"]) + g1.add_edge("N4", "N1", weight=10.0, capacity=50, name="L5") + g1.add_edge("N4", "N0", weight=7.0, capacity=40, name="L4") + g1.add_edge("N0", "N1", weight=10.0, capacity=45, name="L1") + g1.add_edge("N3", "N0", weight=10.0, capacity=50, name="L0") + g1.add_edge("N2", "N3", weight=12.0, capacity=30, name="L2") + g1.add_edge("N1", "N2", weight=15.0, capacity=42, name="L3") + solution = [["N1", "N0", "N3"], ["N1", "N2", "N3"], ["N1", "N4", "N0", "N3"]] + result = list(nx.shortest_simple_paths(g1, "N1", "N3", weight="weight")) + assert result == solution + + +def test_weighted_shortest_simple_path(): + def cost_func(path): + return sum(G.adj[u][v]["weight"] for (u, v) in zip(path, path[1:])) + + G = nx.complete_graph(5) + weight = {(u, v): random.randint(1, 100) for (u, v) in G.edges()} + nx.set_edge_attributes(G, weight, "weight") + cost = 0 + for path in nx.shortest_simple_paths(G, 0, 3, weight="weight"): + this_cost = cost_func(path) + assert cost <= this_cost + cost = this_cost + + +def test_directed_weighted_shortest_simple_path(): + def cost_func(path): + return sum(G.adj[u][v]["weight"] for (u, v) in zip(path, path[1:])) + + G = nx.complete_graph(5) + G = G.to_directed() + weight = {(u, v): random.randint(1, 100) for (u, v) in G.edges()} + nx.set_edge_attributes(G, weight, "weight") + cost = 0 + for path in nx.shortest_simple_paths(G, 0, 3, weight="weight"): + this_cost = cost_func(path) + assert cost <= this_cost + cost = this_cost + + +def test_weighted_shortest_simple_path_issue2427(): + G = nx.Graph() + G.add_edge("IN", "OUT", weight=2) + G.add_edge("IN", "A", weight=1) + G.add_edge("IN", "B", weight=2) + G.add_edge("B", "OUT", weight=2) + assert list(nx.shortest_simple_paths(G, "IN", "OUT", weight="weight")) == [ + ["IN", "OUT"], + ["IN", "B", "OUT"], + ] + G = nx.Graph() + G.add_edge("IN", "OUT", weight=10) + G.add_edge("IN", "A", weight=1) + G.add_edge("IN", "B", weight=1) + G.add_edge("B", "OUT", weight=1) + assert list(nx.shortest_simple_paths(G, "IN", "OUT", weight="weight")) == [ + ["IN", "B", "OUT"], + ["IN", "OUT"], + ] + + +def test_directed_weighted_shortest_simple_path_issue2427(): + G = nx.DiGraph() + G.add_edge("IN", "OUT", weight=2) + G.add_edge("IN", "A", weight=1) + G.add_edge("IN", "B", weight=2) + G.add_edge("B", "OUT", weight=2) + assert list(nx.shortest_simple_paths(G, "IN", "OUT", weight="weight")) == [ + ["IN", "OUT"], + ["IN", "B", "OUT"], + ] + G = nx.DiGraph() + G.add_edge("IN", "OUT", weight=10) + G.add_edge("IN", "A", weight=1) + G.add_edge("IN", "B", weight=1) + G.add_edge("B", "OUT", weight=1) + assert list(nx.shortest_simple_paths(G, "IN", "OUT", weight="weight")) == [ + ["IN", "B", "OUT"], + ["IN", "OUT"], + ] + + +def test_weight_name(): + G = nx.cycle_graph(7) + nx.set_edge_attributes(G, 1, "weight") + nx.set_edge_attributes(G, 1, "foo") + G.adj[1][2]["foo"] = 7 + paths = list(nx.shortest_simple_paths(G, 0, 3, weight="foo")) + solution = [[0, 6, 5, 4, 3], [0, 1, 2, 3]] + assert paths == solution + + +def test_ssp_source_missing(): + with pytest.raises(nx.NodeNotFound): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + list(nx.shortest_simple_paths(G, 0, 3)) + + +def test_ssp_target_missing(): + with pytest.raises(nx.NodeNotFound): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + list(nx.shortest_simple_paths(G, 1, 4)) + + +def test_ssp_multigraph(): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.MultiGraph() + nx.add_path(G, [1, 2, 3]) + list(nx.shortest_simple_paths(G, 1, 4)) + + +def test_ssp_source_missing2(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + nx.add_path(G, [0, 1, 2]) + nx.add_path(G, [3, 4, 5]) + list(nx.shortest_simple_paths(G, 0, 3)) + + +def test_bidirectional_shortest_path_restricted_cycle(): + cycle = nx.cycle_graph(7) + length, path = _bidirectional_shortest_path(cycle, 0, 3) + assert path == [0, 1, 2, 3] + length, path = _bidirectional_shortest_path(cycle, 0, 3, ignore_nodes=[1]) + assert path == [0, 6, 5, 4, 3] + + +def test_bidirectional_shortest_path_restricted_wheel(): + wheel = nx.wheel_graph(6) + length, path = _bidirectional_shortest_path(wheel, 1, 3) + assert path in [[1, 0, 3], [1, 2, 3]] + length, path = _bidirectional_shortest_path(wheel, 1, 3, ignore_nodes=[0]) + assert path == [1, 2, 3] + length, path = _bidirectional_shortest_path(wheel, 1, 3, ignore_nodes=[0, 2]) + assert path == [1, 5, 4, 3] + length, path = _bidirectional_shortest_path( + wheel, 1, 3, ignore_edges=[(1, 0), (5, 0), (2, 3)] + ) + assert path in [[1, 2, 0, 3], [1, 5, 4, 3]] + + +def test_bidirectional_shortest_path_restricted_directed_cycle(): + directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph()) + length, path = _bidirectional_shortest_path(directed_cycle, 0, 3) + assert path == [0, 1, 2, 3] + pytest.raises( + nx.NetworkXNoPath, + _bidirectional_shortest_path, + directed_cycle, + 0, + 3, + ignore_nodes=[1], + ) + length, path = _bidirectional_shortest_path( + directed_cycle, 0, 3, ignore_edges=[(2, 1)] + ) + assert path == [0, 1, 2, 3] + pytest.raises( + nx.NetworkXNoPath, + _bidirectional_shortest_path, + directed_cycle, + 0, + 3, + ignore_edges=[(1, 2)], + ) + + +def test_bidirectional_shortest_path_ignore(): + G = nx.Graph() + nx.add_path(G, [1, 2]) + nx.add_path(G, [1, 3]) + nx.add_path(G, [1, 4]) + pytest.raises( + nx.NetworkXNoPath, _bidirectional_shortest_path, G, 1, 2, ignore_nodes=[1] + ) + pytest.raises( + nx.NetworkXNoPath, _bidirectional_shortest_path, G, 1, 2, ignore_nodes=[2] + ) + G = nx.Graph() + nx.add_path(G, [1, 3]) + nx.add_path(G, [1, 4]) + nx.add_path(G, [3, 2]) + pytest.raises( + nx.NetworkXNoPath, _bidirectional_shortest_path, G, 1, 2, ignore_nodes=[1, 2] + ) + + +def validate_path(G, s, t, soln_len, path): + assert path[0] == s + assert path[-1] == t + assert soln_len == sum( + G[u][v].get("weight", 1) for u, v in zip(path[:-1], path[1:]) + ) + + +def validate_length_path(G, s, t, soln_len, length, path): + assert soln_len == length + validate_path(G, s, t, length, path) + + +def test_bidirectional_dijkstra_restricted(): + XG = nx.DiGraph() + XG.add_weighted_edges_from( + [ + ("s", "u", 10), + ("s", "x", 5), + ("u", "v", 1), + ("u", "x", 2), + ("v", "y", 1), + ("x", "u", 3), + ("x", "v", 5), + ("x", "y", 2), + ("y", "s", 7), + ("y", "v", 6), + ] + ) + + XG3 = nx.Graph() + XG3.add_weighted_edges_from( + [[0, 1, 2], [1, 2, 12], [2, 3, 1], [3, 4, 5], [4, 5, 1], [5, 0, 10]] + ) + validate_length_path(XG, "s", "v", 9, *_bidirectional_dijkstra(XG, "s", "v")) + validate_length_path( + XG, "s", "v", 10, *_bidirectional_dijkstra(XG, "s", "v", ignore_nodes=["u"]) + ) + validate_length_path( + XG, + "s", + "v", + 11, + *_bidirectional_dijkstra(XG, "s", "v", ignore_edges=[("s", "x")]), + ) + pytest.raises( + nx.NetworkXNoPath, + _bidirectional_dijkstra, + XG, + "s", + "v", + ignore_nodes=["u"], + ignore_edges=[("s", "x")], + ) + validate_length_path(XG3, 0, 3, 15, *_bidirectional_dijkstra(XG3, 0, 3)) + validate_length_path( + XG3, 0, 3, 16, *_bidirectional_dijkstra(XG3, 0, 3, ignore_nodes=[1]) + ) + validate_length_path( + XG3, 0, 3, 16, *_bidirectional_dijkstra(XG3, 0, 3, ignore_edges=[(2, 3)]) + ) + pytest.raises( + nx.NetworkXNoPath, + _bidirectional_dijkstra, + XG3, + 0, + 3, + ignore_nodes=[1], + ignore_edges=[(5, 4)], + ) + + +def test_bidirectional_dijkstra_no_path(): + with pytest.raises(nx.NetworkXNoPath): + G = nx.Graph() + nx.add_path(G, [1, 2, 3]) + nx.add_path(G, [4, 5, 6]) + _bidirectional_dijkstra(G, 1, 6) + + +def test_bidirectional_dijkstra_ignore(): + G = nx.Graph() + nx.add_path(G, [1, 2, 10]) + nx.add_path(G, [1, 3, 10]) + pytest.raises(nx.NetworkXNoPath, _bidirectional_dijkstra, G, 1, 2, ignore_nodes=[1]) + pytest.raises(nx.NetworkXNoPath, _bidirectional_dijkstra, G, 1, 2, ignore_nodes=[2]) + pytest.raises( + nx.NetworkXNoPath, _bidirectional_dijkstra, G, 1, 2, ignore_nodes=[1, 2] + ) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_smallworld.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_smallworld.py new file mode 100644 index 0000000000000000000000000000000000000000..d115dd99b796fc256341f1e8ff75fd4bc01b9b17 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_smallworld.py @@ -0,0 +1,78 @@ +import pytest + +pytest.importorskip("numpy") + +import random + +import networkx as nx +from networkx import lattice_reference, omega, random_reference, sigma + +rng = 42 + + +def test_random_reference(): + G = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng) + Gr = random_reference(G, niter=1, seed=rng) + C = nx.average_clustering(G) + Cr = nx.average_clustering(Gr) + assert C > Cr + + with pytest.raises(nx.NetworkXError): + next(random_reference(nx.Graph())) + with pytest.raises(nx.NetworkXNotImplemented): + next(random_reference(nx.DiGraph())) + + H = nx.Graph(((0, 1), (2, 3))) + Hl = random_reference(H, niter=1, seed=rng) + + +def test_lattice_reference(): + G = nx.connected_watts_strogatz_graph(50, 6, 1, seed=rng) + Gl = lattice_reference(G, niter=1, seed=rng) + L = nx.average_shortest_path_length(G) + Ll = nx.average_shortest_path_length(Gl) + assert Ll > L + + pytest.raises(nx.NetworkXError, lattice_reference, nx.Graph()) + pytest.raises(nx.NetworkXNotImplemented, lattice_reference, nx.DiGraph()) + + H = nx.Graph(((0, 1), (2, 3))) + Hl = lattice_reference(H, niter=1) + + +def test_sigma(): + Gs = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng) + Gr = nx.connected_watts_strogatz_graph(50, 6, 1, seed=rng) + sigmas = sigma(Gs, niter=1, nrand=2, seed=rng) + sigmar = sigma(Gr, niter=1, nrand=2, seed=rng) + assert sigmar < sigmas + + +def test_omega(): + Gl = nx.connected_watts_strogatz_graph(50, 6, 0, seed=rng) + Gr = nx.connected_watts_strogatz_graph(50, 6, 1, seed=rng) + Gs = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng) + omegal = omega(Gl, niter=1, nrand=1, seed=rng) + omegar = omega(Gr, niter=1, nrand=1, seed=rng) + omegas = omega(Gs, niter=1, nrand=1, seed=rng) + assert omegal < omegas and omegas < omegar + + # Test that omega lies within the [-1, 1] bounds + G_barbell = nx.barbell_graph(5, 1) + G_karate = nx.karate_club_graph() + + omega_barbell = nx.omega(G_barbell) + omega_karate = nx.omega(G_karate, nrand=2) + + omegas = (omegal, omegar, omegas, omega_barbell, omega_karate) + + for o in omegas: + assert -1 <= o <= 1 + + +@pytest.mark.parametrize("f", (nx.random_reference, nx.lattice_reference)) +def test_graph_no_edges(f): + G = nx.Graph() + G.add_nodes_from([0, 1, 2, 3]) + with pytest.raises(nx.NetworkXError, match="Graph has fewer that 2 edges"): + f(G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_smetric.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_smetric.py new file mode 100644 index 0000000000000000000000000000000000000000..29389a7587264792b8b48186eae1c229178f3330 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_smetric.py @@ -0,0 +1,36 @@ +import warnings + +import pytest + +import networkx as nx + + +def test_smetric(): + g = nx.Graph() + g.add_edge(1, 2) + g.add_edge(2, 3) + g.add_edge(2, 4) + g.add_edge(1, 4) + sm = nx.s_metric(g, normalized=False) + assert sm == 19.0 + + +# NOTE: Tests below to be deleted when deprecation of `normalized` kwarg expires + + +def test_normalized_deprecation_warning(): + """Test that a deprecation warning is raised when s_metric is called with + a `normalized` kwarg.""" + G = nx.cycle_graph(7) + # No warning raised when called without kwargs (future behavior) + with warnings.catch_warnings(): + warnings.simplefilter("error") # Fail the test if warning caught + assert nx.s_metric(G) == 28 + + # Deprecation warning + with pytest.deprecated_call(): + nx.s_metric(G, normalized=True) + + # Make sure you get standard Python behavior when unrecognized keyword provided + with pytest.raises(TypeError): + nx.s_metric(G, normalize=True) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_sparsifiers.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_sparsifiers.py new file mode 100644 index 0000000000000000000000000000000000000000..78cabceed0102bf2ffe01d8675102c1ae85efac2 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_sparsifiers.py @@ -0,0 +1,137 @@ +"""Unit tests for the sparsifier computation functions.""" +import pytest + +import networkx as nx +from networkx.utils import py_random_state + +_seed = 2 + + +def _test_spanner(G, spanner, stretch, weight=None): + """Test whether a spanner is valid. + + This function tests whether the given spanner is a subgraph of the + given graph G with the same node set. It also tests for all shortest + paths whether they adhere to the given stretch. + + Parameters + ---------- + G : NetworkX graph + The original graph for which the spanner was constructed. + + spanner : NetworkX graph + The spanner to be tested. + + stretch : float + The proclaimed stretch of the spanner. + + weight : object + The edge attribute to use as distance. + """ + # check node set + assert set(G.nodes()) == set(spanner.nodes()) + + # check edge set and weights + for u, v in spanner.edges(): + assert G.has_edge(u, v) + if weight: + assert spanner[u][v][weight] == G[u][v][weight] + + # check connectivity and stretch + original_length = dict(nx.shortest_path_length(G, weight=weight)) + spanner_length = dict(nx.shortest_path_length(spanner, weight=weight)) + for u in G.nodes(): + for v in G.nodes(): + if u in original_length and v in original_length[u]: + assert spanner_length[u][v] <= stretch * original_length[u][v] + + +@py_random_state(1) +def _assign_random_weights(G, seed=None): + """Assigns random weights to the edges of a graph. + + Parameters + ---------- + + G : NetworkX graph + The original graph for which the spanner was constructed. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + """ + for u, v in G.edges(): + G[u][v]["weight"] = seed.random() + + +def test_spanner_trivial(): + """Test a trivial spanner with stretch 1.""" + G = nx.complete_graph(20) + spanner = nx.spanner(G, 1, seed=_seed) + + for u, v in G.edges: + assert spanner.has_edge(u, v) + + +def test_spanner_unweighted_complete_graph(): + """Test spanner construction on a complete unweighted graph.""" + G = nx.complete_graph(20) + + spanner = nx.spanner(G, 4, seed=_seed) + _test_spanner(G, spanner, 4) + + spanner = nx.spanner(G, 10, seed=_seed) + _test_spanner(G, spanner, 10) + + +def test_spanner_weighted_complete_graph(): + """Test spanner construction on a complete weighted graph.""" + G = nx.complete_graph(20) + _assign_random_weights(G, seed=_seed) + + spanner = nx.spanner(G, 4, weight="weight", seed=_seed) + _test_spanner(G, spanner, 4, weight="weight") + + spanner = nx.spanner(G, 10, weight="weight", seed=_seed) + _test_spanner(G, spanner, 10, weight="weight") + + +def test_spanner_unweighted_gnp_graph(): + """Test spanner construction on an unweighted gnp graph.""" + G = nx.gnp_random_graph(20, 0.4, seed=_seed) + + spanner = nx.spanner(G, 4, seed=_seed) + _test_spanner(G, spanner, 4) + + spanner = nx.spanner(G, 10, seed=_seed) + _test_spanner(G, spanner, 10) + + +def test_spanner_weighted_gnp_graph(): + """Test spanner construction on an weighted gnp graph.""" + G = nx.gnp_random_graph(20, 0.4, seed=_seed) + _assign_random_weights(G, seed=_seed) + + spanner = nx.spanner(G, 4, weight="weight", seed=_seed) + _test_spanner(G, spanner, 4, weight="weight") + + spanner = nx.spanner(G, 10, weight="weight", seed=_seed) + _test_spanner(G, spanner, 10, weight="weight") + + +def test_spanner_unweighted_disconnected_graph(): + """Test spanner construction on a disconnected graph.""" + G = nx.disjoint_union(nx.complete_graph(10), nx.complete_graph(10)) + + spanner = nx.spanner(G, 4, seed=_seed) + _test_spanner(G, spanner, 4) + + spanner = nx.spanner(G, 10, seed=_seed) + _test_spanner(G, spanner, 10) + + +def test_spanner_invalid_stretch(): + """Check whether an invalid stretch is caught.""" + with pytest.raises(ValueError): + G = nx.empty_graph() + nx.spanner(G, 0) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_structuralholes.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_structuralholes.py new file mode 100644 index 0000000000000000000000000000000000000000..6f92baa4f324595b747d4250611c37b807a4cbbf --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_structuralholes.py @@ -0,0 +1,139 @@ +"""Unit tests for the :mod:`networkx.algorithms.structuralholes` module.""" +import math + +import pytest + +import networkx as nx +from networkx.classes.tests import dispatch_interface + + +class TestStructuralHoles: + """Unit tests for computing measures of structural holes. + + The expected values for these functions were originally computed using the + proprietary software `UCINET`_ and the free software `IGraph`_ , and then + computed by hand to make sure that the results are correct. + + .. _UCINET: https://sites.google.com/site/ucinetsoftware/home + .. _IGraph: http://igraph.org/ + + """ + + def setup_method(self): + self.D = nx.DiGraph() + self.D.add_edges_from([(0, 1), (0, 2), (1, 0), (2, 1)]) + self.D_weights = {(0, 1): 2, (0, 2): 2, (1, 0): 1, (2, 1): 1} + # Example from http://www.analytictech.com/connections/v20(1)/holes.htm + self.G = nx.Graph() + self.G.add_edges_from( + [ + ("A", "B"), + ("A", "F"), + ("A", "G"), + ("A", "E"), + ("E", "G"), + ("F", "G"), + ("B", "G"), + ("B", "D"), + ("D", "G"), + ("G", "C"), + ] + ) + self.G_weights = { + ("A", "B"): 2, + ("A", "F"): 3, + ("A", "G"): 5, + ("A", "E"): 2, + ("E", "G"): 8, + ("F", "G"): 3, + ("B", "G"): 4, + ("B", "D"): 1, + ("D", "G"): 3, + ("G", "C"): 10, + } + + # This additionally tests the @nx._dispatch mechanism, treating + # nx.mutual_weight as if it were a re-implementation from another package + @pytest.mark.parametrize("wrapper", [lambda x: x, dispatch_interface.convert]) + def test_constraint_directed(self, wrapper): + constraint = nx.constraint(wrapper(self.D)) + assert constraint[0] == pytest.approx(1.003, abs=1e-3) + assert constraint[1] == pytest.approx(1.003, abs=1e-3) + assert constraint[2] == pytest.approx(1.389, abs=1e-3) + + def test_effective_size_directed(self): + effective_size = nx.effective_size(self.D) + assert effective_size[0] == pytest.approx(1.167, abs=1e-3) + assert effective_size[1] == pytest.approx(1.167, abs=1e-3) + assert effective_size[2] == pytest.approx(1, abs=1e-3) + + def test_constraint_weighted_directed(self): + D = self.D.copy() + nx.set_edge_attributes(D, self.D_weights, "weight") + constraint = nx.constraint(D, weight="weight") + assert constraint[0] == pytest.approx(0.840, abs=1e-3) + assert constraint[1] == pytest.approx(1.143, abs=1e-3) + assert constraint[2] == pytest.approx(1.378, abs=1e-3) + + def test_effective_size_weighted_directed(self): + D = self.D.copy() + nx.set_edge_attributes(D, self.D_weights, "weight") + effective_size = nx.effective_size(D, weight="weight") + assert effective_size[0] == pytest.approx(1.567, abs=1e-3) + assert effective_size[1] == pytest.approx(1.083, abs=1e-3) + assert effective_size[2] == pytest.approx(1, abs=1e-3) + + def test_constraint_undirected(self): + constraint = nx.constraint(self.G) + assert constraint["G"] == pytest.approx(0.400, abs=1e-3) + assert constraint["A"] == pytest.approx(0.595, abs=1e-3) + assert constraint["C"] == pytest.approx(1, abs=1e-3) + + def test_effective_size_undirected_borgatti(self): + effective_size = nx.effective_size(self.G) + assert effective_size["G"] == pytest.approx(4.67, abs=1e-2) + assert effective_size["A"] == pytest.approx(2.50, abs=1e-2) + assert effective_size["C"] == pytest.approx(1, abs=1e-2) + + def test_effective_size_undirected(self): + G = self.G.copy() + nx.set_edge_attributes(G, 1, "weight") + effective_size = nx.effective_size(G, weight="weight") + assert effective_size["G"] == pytest.approx(4.67, abs=1e-2) + assert effective_size["A"] == pytest.approx(2.50, abs=1e-2) + assert effective_size["C"] == pytest.approx(1, abs=1e-2) + + def test_constraint_weighted_undirected(self): + G = self.G.copy() + nx.set_edge_attributes(G, self.G_weights, "weight") + constraint = nx.constraint(G, weight="weight") + assert constraint["G"] == pytest.approx(0.299, abs=1e-3) + assert constraint["A"] == pytest.approx(0.795, abs=1e-3) + assert constraint["C"] == pytest.approx(1, abs=1e-3) + + def test_effective_size_weighted_undirected(self): + G = self.G.copy() + nx.set_edge_attributes(G, self.G_weights, "weight") + effective_size = nx.effective_size(G, weight="weight") + assert effective_size["G"] == pytest.approx(5.47, abs=1e-2) + assert effective_size["A"] == pytest.approx(2.47, abs=1e-2) + assert effective_size["C"] == pytest.approx(1, abs=1e-2) + + def test_constraint_isolated(self): + G = self.G.copy() + G.add_node(1) + constraint = nx.constraint(G) + assert math.isnan(constraint[1]) + + def test_effective_size_isolated(self): + G = self.G.copy() + G.add_node(1) + nx.set_edge_attributes(G, self.G_weights, "weight") + effective_size = nx.effective_size(G, weight="weight") + assert math.isnan(effective_size[1]) + + def test_effective_size_borgatti_isolated(self): + G = self.G.copy() + G.add_node(1) + effective_size = nx.effective_size(G) + assert math.isnan(effective_size[1]) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_summarization.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_summarization.py new file mode 100644 index 0000000000000000000000000000000000000000..823a645d34b14edd2db199d630df397290c543fb --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_summarization.py @@ -0,0 +1,641 @@ +""" +Unit tests for dedensification and graph summarization +""" +import pytest + +import networkx as nx + + +class TestDirectedDedensification: + def build_original_graph(self): + original_matrix = [ + ("1", "BC"), + ("2", "ABC"), + ("3", ["A", "B", "6"]), + ("4", "ABC"), + ("5", "AB"), + ("6", ["5"]), + ("A", ["6"]), + ] + graph = nx.DiGraph() + for source, targets in original_matrix: + for target in targets: + graph.add_edge(source, target) + return graph + + def build_compressed_graph(self): + compressed_matrix = [ + ("1", "BC"), + ("2", ["ABC"]), + ("3", ["A", "B", "6"]), + ("4", ["ABC"]), + ("5", "AB"), + ("6", ["5"]), + ("A", ["6"]), + ("ABC", "ABC"), + ] + compressed_graph = nx.DiGraph() + for source, targets in compressed_matrix: + for target in targets: + compressed_graph.add_edge(source, target) + return compressed_graph + + def test_empty(self): + """ + Verify that an empty directed graph results in no compressor nodes + """ + G = nx.DiGraph() + compressed_graph, c_nodes = nx.dedensify(G, threshold=2) + assert c_nodes == set() + + @staticmethod + def densify(G, compressor_nodes, copy=True): + """ + Reconstructs the original graph from a dedensified, directed graph + + Parameters + ---------- + G: dedensified graph + A networkx graph + compressor_nodes: iterable + Iterable of compressor nodes in the dedensified graph + inplace: bool, optional (default: False) + Indicates if densification should be done inplace + + Returns + ------- + G: graph + A densified networkx graph + """ + if copy: + G = G.copy() + for compressor_node in compressor_nodes: + all_neighbors = set(nx.all_neighbors(G, compressor_node)) + out_neighbors = set(G.neighbors(compressor_node)) + for out_neighbor in out_neighbors: + G.remove_edge(compressor_node, out_neighbor) + in_neighbors = all_neighbors - out_neighbors + for in_neighbor in in_neighbors: + G.remove_edge(in_neighbor, compressor_node) + for out_neighbor in out_neighbors: + G.add_edge(in_neighbor, out_neighbor) + G.remove_node(compressor_node) + return G + + def setup_method(self): + self.c_nodes = ("ABC",) + + def test_dedensify_edges(self): + """ + Verifies that dedensify produced the correct edges to/from compressor + nodes in a directed graph + """ + G = self.build_original_graph() + compressed_G = self.build_compressed_graph() + compressed_graph, c_nodes = nx.dedensify(G, threshold=2) + for s, t in compressed_graph.edges(): + o_s = "".join(sorted(s)) + o_t = "".join(sorted(t)) + compressed_graph_exists = compressed_graph.has_edge(s, t) + verified_compressed_exists = compressed_G.has_edge(o_s, o_t) + assert compressed_graph_exists == verified_compressed_exists + assert len(c_nodes) == len(self.c_nodes) + + def test_dedensify_edge_count(self): + """ + Verifies that dedensify produced the correct number of compressor nodes + in a directed graph + """ + G = self.build_original_graph() + original_edge_count = len(G.edges()) + c_G, c_nodes = nx.dedensify(G, threshold=2) + compressed_edge_count = len(c_G.edges()) + assert compressed_edge_count <= original_edge_count + compressed_G = self.build_compressed_graph() + assert compressed_edge_count == len(compressed_G.edges()) + + def test_densify_edges(self): + """ + Verifies that densification produces the correct edges from the + original directed graph + """ + compressed_G = self.build_compressed_graph() + original_graph = self.densify(compressed_G, self.c_nodes, copy=True) + G = self.build_original_graph() + for s, t in G.edges(): + assert G.has_edge(s, t) == original_graph.has_edge(s, t) + + def test_densify_edge_count(self): + """ + Verifies that densification produces the correct number of edges in the + original directed graph + """ + compressed_G = self.build_compressed_graph() + compressed_edge_count = len(compressed_G.edges()) + original_graph = self.densify(compressed_G, self.c_nodes) + original_edge_count = len(original_graph.edges()) + assert compressed_edge_count <= original_edge_count + G = self.build_original_graph() + assert original_edge_count == len(G.edges()) + + +class TestUnDirectedDedensification: + def build_original_graph(self): + """ + Builds graph shown in the original research paper + """ + original_matrix = [ + ("1", "CB"), + ("2", "ABC"), + ("3", ["A", "B", "6"]), + ("4", "ABC"), + ("5", "AB"), + ("6", ["5"]), + ("A", ["6"]), + ] + graph = nx.Graph() + for source, targets in original_matrix: + for target in targets: + graph.add_edge(source, target) + return graph + + def test_empty(self): + """ + Verify that an empty undirected graph results in no compressor nodes + """ + G = nx.Graph() + compressed_G, c_nodes = nx.dedensify(G, threshold=2) + assert c_nodes == set() + + def setup_method(self): + self.c_nodes = ("6AB", "ABC") + + def build_compressed_graph(self): + compressed_matrix = [ + ("1", ["B", "C"]), + ("2", ["ABC"]), + ("3", ["6AB"]), + ("4", ["ABC"]), + ("5", ["6AB"]), + ("6", ["6AB", "A"]), + ("A", ["6AB", "ABC"]), + ("B", ["ABC", "6AB"]), + ("C", ["ABC"]), + ] + compressed_graph = nx.Graph() + for source, targets in compressed_matrix: + for target in targets: + compressed_graph.add_edge(source, target) + return compressed_graph + + def test_dedensify_edges(self): + """ + Verifies that dedensify produced correct compressor nodes and the + correct edges to/from the compressor nodes in an undirected graph + """ + G = self.build_original_graph() + c_G, c_nodes = nx.dedensify(G, threshold=2) + v_compressed_G = self.build_compressed_graph() + for s, t in c_G.edges(): + o_s = "".join(sorted(s)) + o_t = "".join(sorted(t)) + has_compressed_edge = c_G.has_edge(s, t) + verified_has_compressed_edge = v_compressed_G.has_edge(o_s, o_t) + assert has_compressed_edge == verified_has_compressed_edge + assert len(c_nodes) == len(self.c_nodes) + + def test_dedensify_edge_count(self): + """ + Verifies that dedensify produced the correct number of edges in an + undirected graph + """ + G = self.build_original_graph() + c_G, c_nodes = nx.dedensify(G, threshold=2, copy=True) + compressed_edge_count = len(c_G.edges()) + verified_original_edge_count = len(G.edges()) + assert compressed_edge_count <= verified_original_edge_count + verified_compressed_G = self.build_compressed_graph() + verified_compressed_edge_count = len(verified_compressed_G.edges()) + assert compressed_edge_count == verified_compressed_edge_count + + +@pytest.mark.parametrize( + "graph_type", [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph] +) +def test_summarization_empty(graph_type): + G = graph_type() + summary_graph = nx.snap_aggregation(G, node_attributes=("color",)) + assert nx.is_isomorphic(summary_graph, G) + + +class AbstractSNAP: + node_attributes = ("color",) + + def build_original_graph(self): + pass + + def build_summary_graph(self): + pass + + def test_summary_graph(self): + original_graph = self.build_original_graph() + summary_graph = self.build_summary_graph() + + relationship_attributes = ("type",) + generated_summary_graph = nx.snap_aggregation( + original_graph, self.node_attributes, relationship_attributes + ) + relabeled_summary_graph = self.deterministic_labels(generated_summary_graph) + assert nx.is_isomorphic(summary_graph, relabeled_summary_graph) + + def deterministic_labels(self, G): + node_labels = list(G.nodes) + node_labels = sorted(node_labels, key=lambda n: sorted(G.nodes[n]["group"])[0]) + node_labels.sort() + + label_mapping = {} + for index, node in enumerate(node_labels): + label = "Supernode-%s" % index + label_mapping[node] = label + + return nx.relabel_nodes(G, label_mapping) + + +class TestSNAPNoEdgeTypes(AbstractSNAP): + relationship_attributes = () + + def test_summary_graph(self): + original_graph = self.build_original_graph() + summary_graph = self.build_summary_graph() + + relationship_attributes = ("type",) + generated_summary_graph = nx.snap_aggregation( + original_graph, self.node_attributes + ) + relabeled_summary_graph = self.deterministic_labels(generated_summary_graph) + assert nx.is_isomorphic(summary_graph, relabeled_summary_graph) + + def build_original_graph(self): + nodes = { + "A": {"color": "Red"}, + "B": {"color": "Red"}, + "C": {"color": "Red"}, + "D": {"color": "Red"}, + "E": {"color": "Blue"}, + "F": {"color": "Blue"}, + "G": {"color": "Blue"}, + "H": {"color": "Blue"}, + "I": {"color": "Yellow"}, + "J": {"color": "Yellow"}, + "K": {"color": "Yellow"}, + "L": {"color": "Yellow"}, + } + edges = [ + ("A", "B"), + ("A", "C"), + ("A", "E"), + ("A", "I"), + ("B", "D"), + ("B", "J"), + ("B", "F"), + ("C", "G"), + ("D", "H"), + ("I", "J"), + ("J", "K"), + ("I", "L"), + ] + G = nx.Graph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target in edges: + G.add_edge(source, target) + + return G + + def build_summary_graph(self): + nodes = { + "Supernode-0": {"color": "Red"}, + "Supernode-1": {"color": "Red"}, + "Supernode-2": {"color": "Blue"}, + "Supernode-3": {"color": "Blue"}, + "Supernode-4": {"color": "Yellow"}, + "Supernode-5": {"color": "Yellow"}, + } + edges = [ + ("Supernode-0", "Supernode-0"), + ("Supernode-0", "Supernode-1"), + ("Supernode-0", "Supernode-2"), + ("Supernode-0", "Supernode-4"), + ("Supernode-1", "Supernode-3"), + ("Supernode-4", "Supernode-4"), + ("Supernode-4", "Supernode-5"), + ] + G = nx.Graph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target in edges: + G.add_edge(source, target) + + supernodes = { + "Supernode-0": {"A", "B"}, + "Supernode-1": {"C", "D"}, + "Supernode-2": {"E", "F"}, + "Supernode-3": {"G", "H"}, + "Supernode-4": {"I", "J"}, + "Supernode-5": {"K", "L"}, + } + nx.set_node_attributes(G, supernodes, "group") + return G + + +class TestSNAPUndirected(AbstractSNAP): + def build_original_graph(self): + nodes = { + "A": {"color": "Red"}, + "B": {"color": "Red"}, + "C": {"color": "Red"}, + "D": {"color": "Red"}, + "E": {"color": "Blue"}, + "F": {"color": "Blue"}, + "G": {"color": "Blue"}, + "H": {"color": "Blue"}, + "I": {"color": "Yellow"}, + "J": {"color": "Yellow"}, + "K": {"color": "Yellow"}, + "L": {"color": "Yellow"}, + } + edges = [ + ("A", "B", "Strong"), + ("A", "C", "Weak"), + ("A", "E", "Strong"), + ("A", "I", "Weak"), + ("B", "D", "Weak"), + ("B", "J", "Weak"), + ("B", "F", "Strong"), + ("C", "G", "Weak"), + ("D", "H", "Weak"), + ("I", "J", "Strong"), + ("J", "K", "Strong"), + ("I", "L", "Strong"), + ] + G = nx.Graph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, type in edges: + G.add_edge(source, target, type=type) + + return G + + def build_summary_graph(self): + nodes = { + "Supernode-0": {"color": "Red"}, + "Supernode-1": {"color": "Red"}, + "Supernode-2": {"color": "Blue"}, + "Supernode-3": {"color": "Blue"}, + "Supernode-4": {"color": "Yellow"}, + "Supernode-5": {"color": "Yellow"}, + } + edges = [ + ("Supernode-0", "Supernode-0", "Strong"), + ("Supernode-0", "Supernode-1", "Weak"), + ("Supernode-0", "Supernode-2", "Strong"), + ("Supernode-0", "Supernode-4", "Weak"), + ("Supernode-1", "Supernode-3", "Weak"), + ("Supernode-4", "Supernode-4", "Strong"), + ("Supernode-4", "Supernode-5", "Strong"), + ] + G = nx.Graph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, type in edges: + G.add_edge(source, target, types=[{"type": type}]) + + supernodes = { + "Supernode-0": {"A", "B"}, + "Supernode-1": {"C", "D"}, + "Supernode-2": {"E", "F"}, + "Supernode-3": {"G", "H"}, + "Supernode-4": {"I", "J"}, + "Supernode-5": {"K", "L"}, + } + nx.set_node_attributes(G, supernodes, "group") + return G + + +class TestSNAPDirected(AbstractSNAP): + def build_original_graph(self): + nodes = { + "A": {"color": "Red"}, + "B": {"color": "Red"}, + "C": {"color": "Green"}, + "D": {"color": "Green"}, + "E": {"color": "Blue"}, + "F": {"color": "Blue"}, + "G": {"color": "Yellow"}, + "H": {"color": "Yellow"}, + } + edges = [ + ("A", "C", "Strong"), + ("A", "E", "Strong"), + ("A", "F", "Weak"), + ("B", "D", "Strong"), + ("B", "E", "Weak"), + ("B", "F", "Strong"), + ("C", "G", "Strong"), + ("C", "F", "Strong"), + ("D", "E", "Strong"), + ("D", "H", "Strong"), + ("G", "E", "Strong"), + ("H", "F", "Strong"), + ] + G = nx.DiGraph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, type in edges: + G.add_edge(source, target, type=type) + + return G + + def build_summary_graph(self): + nodes = { + "Supernode-0": {"color": "Red"}, + "Supernode-1": {"color": "Green"}, + "Supernode-2": {"color": "Blue"}, + "Supernode-3": {"color": "Yellow"}, + } + edges = [ + ("Supernode-0", "Supernode-1", [{"type": "Strong"}]), + ("Supernode-0", "Supernode-2", [{"type": "Weak"}, {"type": "Strong"}]), + ("Supernode-1", "Supernode-2", [{"type": "Strong"}]), + ("Supernode-1", "Supernode-3", [{"type": "Strong"}]), + ("Supernode-3", "Supernode-2", [{"type": "Strong"}]), + ] + G = nx.DiGraph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, types in edges: + G.add_edge(source, target, types=types) + + supernodes = { + "Supernode-0": {"A", "B"}, + "Supernode-1": {"C", "D"}, + "Supernode-2": {"E", "F"}, + "Supernode-3": {"G", "H"}, + "Supernode-4": {"I", "J"}, + "Supernode-5": {"K", "L"}, + } + nx.set_node_attributes(G, supernodes, "group") + return G + + +class TestSNAPUndirectedMulti(AbstractSNAP): + def build_original_graph(self): + nodes = { + "A": {"color": "Red"}, + "B": {"color": "Red"}, + "C": {"color": "Red"}, + "D": {"color": "Blue"}, + "E": {"color": "Blue"}, + "F": {"color": "Blue"}, + "G": {"color": "Yellow"}, + "H": {"color": "Yellow"}, + "I": {"color": "Yellow"}, + } + edges = [ + ("A", "D", ["Weak", "Strong"]), + ("B", "E", ["Weak", "Strong"]), + ("D", "I", ["Strong"]), + ("E", "H", ["Strong"]), + ("F", "G", ["Weak"]), + ("I", "G", ["Weak", "Strong"]), + ("I", "H", ["Weak", "Strong"]), + ("G", "H", ["Weak", "Strong"]), + ] + G = nx.MultiGraph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, types in edges: + for type in types: + G.add_edge(source, target, type=type) + + return G + + def build_summary_graph(self): + nodes = { + "Supernode-0": {"color": "Red"}, + "Supernode-1": {"color": "Blue"}, + "Supernode-2": {"color": "Yellow"}, + "Supernode-3": {"color": "Blue"}, + "Supernode-4": {"color": "Yellow"}, + "Supernode-5": {"color": "Red"}, + } + edges = [ + ("Supernode-1", "Supernode-2", [{"type": "Weak"}]), + ("Supernode-2", "Supernode-4", [{"type": "Weak"}, {"type": "Strong"}]), + ("Supernode-3", "Supernode-4", [{"type": "Strong"}]), + ("Supernode-3", "Supernode-5", [{"type": "Weak"}, {"type": "Strong"}]), + ("Supernode-4", "Supernode-4", [{"type": "Weak"}, {"type": "Strong"}]), + ] + G = nx.MultiGraph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, types in edges: + for type in types: + G.add_edge(source, target, type=type) + + supernodes = { + "Supernode-0": {"A", "B"}, + "Supernode-1": {"C", "D"}, + "Supernode-2": {"E", "F"}, + "Supernode-3": {"G", "H"}, + "Supernode-4": {"I", "J"}, + "Supernode-5": {"K", "L"}, + } + nx.set_node_attributes(G, supernodes, "group") + return G + + +class TestSNAPDirectedMulti(AbstractSNAP): + def build_original_graph(self): + nodes = { + "A": {"color": "Red"}, + "B": {"color": "Red"}, + "C": {"color": "Green"}, + "D": {"color": "Green"}, + "E": {"color": "Blue"}, + "F": {"color": "Blue"}, + "G": {"color": "Yellow"}, + "H": {"color": "Yellow"}, + } + edges = [ + ("A", "C", ["Weak", "Strong"]), + ("A", "E", ["Strong"]), + ("A", "F", ["Weak"]), + ("B", "D", ["Weak", "Strong"]), + ("B", "E", ["Weak"]), + ("B", "F", ["Strong"]), + ("C", "G", ["Weak", "Strong"]), + ("C", "F", ["Strong"]), + ("D", "E", ["Strong"]), + ("D", "H", ["Weak", "Strong"]), + ("G", "E", ["Strong"]), + ("H", "F", ["Strong"]), + ] + G = nx.MultiDiGraph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, types in edges: + for type in types: + G.add_edge(source, target, type=type) + + return G + + def build_summary_graph(self): + nodes = { + "Supernode-0": {"color": "Red"}, + "Supernode-1": {"color": "Blue"}, + "Supernode-2": {"color": "Yellow"}, + "Supernode-3": {"color": "Blue"}, + } + edges = [ + ("Supernode-0", "Supernode-1", ["Weak", "Strong"]), + ("Supernode-0", "Supernode-2", ["Weak", "Strong"]), + ("Supernode-1", "Supernode-2", ["Strong"]), + ("Supernode-1", "Supernode-3", ["Weak", "Strong"]), + ("Supernode-3", "Supernode-2", ["Strong"]), + ] + G = nx.MultiDiGraph() + for node in nodes: + attributes = nodes[node] + G.add_node(node, **attributes) + + for source, target, types in edges: + for type in types: + G.add_edge(source, target, type=type) + + supernodes = { + "Supernode-0": {"A", "B"}, + "Supernode-1": {"C", "D"}, + "Supernode-2": {"E", "F"}, + "Supernode-3": {"G", "H"}, + } + nx.set_node_attributes(G, supernodes, "group") + return G diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_swap.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_swap.py new file mode 100644 index 0000000000000000000000000000000000000000..49dd5f8e8c75f95a5650a563530fab67916becc9 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_swap.py @@ -0,0 +1,156 @@ +import pytest + +import networkx as nx + + +def test_directed_edge_swap(): + graph = nx.path_graph(200, create_using=nx.DiGraph) + in_degrees = sorted((n, d) for n, d in graph.in_degree()) + out_degrees = sorted((n, d) for n, d in graph.out_degree()) + G = nx.directed_edge_swap(graph, nswap=40, max_tries=500, seed=1) + assert in_degrees == sorted((n, d) for n, d in G.in_degree()) + assert out_degrees == sorted((n, d) for n, d in G.out_degree()) + + +def test_edge_cases_directed_edge_swap(): + # Tests cases when swaps are impossible, either too few edges exist, or self loops/cycles are unavoidable + # TODO: Rewrite function to explicitly check for impossible swaps and raise error + e = ( + "Maximum number of swap attempts \\(11\\) exceeded " + "before desired swaps achieved \\(\\d\\)." + ) + graph = nx.DiGraph([(0, 0), (0, 1), (1, 0), (2, 3), (3, 2)]) + with pytest.raises(nx.NetworkXAlgorithmError, match=e): + nx.directed_edge_swap(graph, nswap=1, max_tries=10, seed=1) + + +def test_double_edge_swap(): + graph = nx.barabasi_albert_graph(200, 1) + degrees = sorted(d for n, d in graph.degree()) + G = nx.double_edge_swap(graph, 40) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_double_edge_swap_seed(): + graph = nx.barabasi_albert_graph(200, 1) + degrees = sorted(d for n, d in graph.degree()) + G = nx.double_edge_swap(graph, 40, seed=1) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_connected_double_edge_swap(): + graph = nx.barabasi_albert_graph(200, 1) + degrees = sorted(d for n, d in graph.degree()) + G = nx.connected_double_edge_swap(graph, 40, seed=1) + assert nx.is_connected(graph) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_connected_double_edge_swap_low_window_threshold(): + graph = nx.barabasi_albert_graph(200, 1) + degrees = sorted(d for n, d in graph.degree()) + G = nx.connected_double_edge_swap(graph, 40, _window_threshold=0, seed=1) + assert nx.is_connected(graph) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_connected_double_edge_swap_star(): + # Testing ui==xi in connected_double_edge_swap + graph = nx.star_graph(40) + degrees = sorted(d for n, d in graph.degree()) + G = nx.connected_double_edge_swap(graph, 1, seed=4) + assert nx.is_connected(graph) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_connected_double_edge_swap_star_low_window_threshold(): + # Testing ui==xi in connected_double_edge_swap with low window threshold + graph = nx.star_graph(40) + degrees = sorted(d for n, d in graph.degree()) + G = nx.connected_double_edge_swap(graph, 1, _window_threshold=0, seed=4) + assert nx.is_connected(graph) + assert degrees == sorted(d for n, d in graph.degree()) + + +def test_directed_edge_swap_small(): + with pytest.raises(nx.NetworkXError): + G = nx.directed_edge_swap(nx.path_graph(3, create_using=nx.DiGraph)) + + +def test_directed_edge_swap_tries(): + with pytest.raises(nx.NetworkXError): + G = nx.directed_edge_swap( + nx.path_graph(3, create_using=nx.DiGraph), nswap=1, max_tries=0 + ) + + +def test_directed_exception_undirected(): + graph = nx.Graph([(0, 1), (2, 3)]) + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.directed_edge_swap(graph) + + +def test_directed_edge_max_tries(): + with pytest.raises(nx.NetworkXAlgorithmError): + G = nx.directed_edge_swap( + nx.complete_graph(4, nx.DiGraph()), nswap=1, max_tries=5 + ) + + +def test_double_edge_swap_small(): + with pytest.raises(nx.NetworkXError): + G = nx.double_edge_swap(nx.path_graph(3)) + + +def test_double_edge_swap_tries(): + with pytest.raises(nx.NetworkXError): + G = nx.double_edge_swap(nx.path_graph(10), nswap=1, max_tries=0) + + +def test_double_edge_directed(): + graph = nx.DiGraph([(0, 1), (2, 3)]) + with pytest.raises(nx.NetworkXError, match="not defined for directed graphs."): + G = nx.double_edge_swap(graph) + + +def test_double_edge_max_tries(): + with pytest.raises(nx.NetworkXAlgorithmError): + G = nx.double_edge_swap(nx.complete_graph(4), nswap=1, max_tries=5) + + +def test_connected_double_edge_swap_small(): + with pytest.raises(nx.NetworkXError): + G = nx.connected_double_edge_swap(nx.path_graph(3)) + + +def test_connected_double_edge_swap_not_connected(): + with pytest.raises(nx.NetworkXError): + G = nx.path_graph(3) + nx.add_path(G, [10, 11, 12]) + G = nx.connected_double_edge_swap(G) + + +def test_degree_seq_c4(): + G = nx.cycle_graph(4) + degrees = sorted(d for n, d in G.degree()) + G = nx.double_edge_swap(G, 1, 100) + assert degrees == sorted(d for n, d in G.degree()) + + +def test_fewer_than_4_nodes(): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2]) + with pytest.raises(nx.NetworkXError, match=".*fewer than four nodes."): + nx.directed_edge_swap(G) + + +def test_less_than_3_edges(): + G = nx.DiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([3, 4]) + with pytest.raises(nx.NetworkXError, match=".*fewer than 3 edges"): + nx.directed_edge_swap(G) + + G = nx.Graph() + G.add_nodes_from([0, 1, 2, 3]) + with pytest.raises(nx.NetworkXError, match=".*fewer than 2 edges"): + nx.double_edge_swap(G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_threshold.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_threshold.py new file mode 100644 index 0000000000000000000000000000000000000000..07aad44bb268a42944260b4217bce15b1278ebfd --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_threshold.py @@ -0,0 +1,269 @@ +""" +Threshold Graphs +================ +""" + +import pytest + +import networkx as nx +import networkx.algorithms.threshold as nxt +from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic + +cnlti = nx.convert_node_labels_to_integers + + +class TestGeneratorThreshold: + def test_threshold_sequence_graph_test(self): + G = nx.star_graph(10) + assert nxt.is_threshold_graph(G) + assert nxt.is_threshold_sequence([d for n, d in G.degree()]) + + G = nx.complete_graph(10) + assert nxt.is_threshold_graph(G) + assert nxt.is_threshold_sequence([d for n, d in G.degree()]) + + deg = [3, 2, 2, 1, 1, 1] + assert not nxt.is_threshold_sequence(deg) + + deg = [3, 2, 2, 1] + assert nxt.is_threshold_sequence(deg) + + G = nx.generators.havel_hakimi_graph(deg) + assert nxt.is_threshold_graph(G) + + def test_creation_sequences(self): + deg = [3, 2, 2, 1] + G = nx.generators.havel_hakimi_graph(deg) + + with pytest.raises(ValueError): + nxt.creation_sequence(deg, with_labels=True, compact=True) + + cs0 = nxt.creation_sequence(deg) + H0 = nxt.threshold_graph(cs0) + assert "".join(cs0) == "ddid" + + cs1 = nxt.creation_sequence(deg, with_labels=True) + H1 = nxt.threshold_graph(cs1) + assert cs1 == [(1, "d"), (2, "d"), (3, "i"), (0, "d")] + + cs2 = nxt.creation_sequence(deg, compact=True) + H2 = nxt.threshold_graph(cs2) + assert cs2 == [2, 1, 1] + assert "".join(nxt.uncompact(cs2)) == "ddid" + assert graph_could_be_isomorphic(H0, G) + assert graph_could_be_isomorphic(H0, H1) + assert graph_could_be_isomorphic(H0, H2) + + def test_make_compact(self): + assert nxt.make_compact(["d", "d", "d", "i", "d", "d"]) == [3, 1, 2] + assert nxt.make_compact([3, 1, 2]) == [3, 1, 2] + assert pytest.raises(TypeError, nxt.make_compact, [3.0, 1.0, 2.0]) + + def test_uncompact(self): + assert nxt.uncompact([3, 1, 2]) == ["d", "d", "d", "i", "d", "d"] + assert nxt.uncompact(["d", "d", "i", "d"]) == ["d", "d", "i", "d"] + assert nxt.uncompact( + nxt.uncompact([(1, "d"), (2, "d"), (3, "i"), (0, "d")]) + ) == nxt.uncompact([(1, "d"), (2, "d"), (3, "i"), (0, "d")]) + assert pytest.raises(TypeError, nxt.uncompact, [3.0, 1.0, 2.0]) + + def test_creation_sequence_to_weights(self): + assert nxt.creation_sequence_to_weights([3, 1, 2]) == [ + 0.5, + 0.5, + 0.5, + 0.25, + 0.75, + 0.75, + ] + assert pytest.raises( + TypeError, nxt.creation_sequence_to_weights, [3.0, 1.0, 2.0] + ) + + def test_weights_to_creation_sequence(self): + deg = [3, 2, 2, 1] + with pytest.raises(ValueError): + nxt.weights_to_creation_sequence(deg, with_labels=True, compact=True) + assert nxt.weights_to_creation_sequence(deg, with_labels=True) == [ + (3, "d"), + (1, "d"), + (2, "d"), + (0, "d"), + ] + assert nxt.weights_to_creation_sequence(deg, compact=True) == [4] + + def test_find_alternating_4_cycle(self): + G = nx.Graph() + G.add_edge(1, 2) + assert not nxt.find_alternating_4_cycle(G) + + def test_shortest_path(self): + deg = [3, 2, 2, 1] + G = nx.generators.havel_hakimi_graph(deg) + cs1 = nxt.creation_sequence(deg, with_labels=True) + for n, m in [(3, 0), (0, 3), (0, 2), (0, 1), (1, 3), (3, 1), (1, 2), (2, 3)]: + assert nxt.shortest_path(cs1, n, m) == nx.shortest_path(G, n, m) + + spl = nxt.shortest_path_length(cs1, 3) + spl2 = nxt.shortest_path_length([t for v, t in cs1], 2) + assert spl == spl2 + + spld = {} + for j, pl in enumerate(spl): + n = cs1[j][0] + spld[n] = pl + assert spld == nx.single_source_shortest_path_length(G, 3) + + assert nxt.shortest_path(["d", "d", "d", "i", "d", "d"], 1, 2) == [1, 2] + assert nxt.shortest_path([3, 1, 2], 1, 2) == [1, 2] + assert pytest.raises(TypeError, nxt.shortest_path, [3.0, 1.0, 2.0], 1, 2) + assert pytest.raises(ValueError, nxt.shortest_path, [3, 1, 2], "a", 2) + assert pytest.raises(ValueError, nxt.shortest_path, [3, 1, 2], 1, "b") + assert nxt.shortest_path([3, 1, 2], 1, 1) == [1] + + def test_shortest_path_length(self): + assert nxt.shortest_path_length([3, 1, 2], 1) == [1, 0, 1, 2, 1, 1] + assert nxt.shortest_path_length(["d", "d", "d", "i", "d", "d"], 1) == [ + 1, + 0, + 1, + 2, + 1, + 1, + ] + assert nxt.shortest_path_length(("d", "d", "d", "i", "d", "d"), 1) == [ + 1, + 0, + 1, + 2, + 1, + 1, + ] + assert pytest.raises(TypeError, nxt.shortest_path, [3.0, 1.0, 2.0], 1) + + def test_random_threshold_sequence(self): + assert len(nxt.random_threshold_sequence(10, 0.5)) == 10 + assert nxt.random_threshold_sequence(10, 0.5, seed=42) == [ + "d", + "i", + "d", + "d", + "d", + "i", + "i", + "i", + "d", + "d", + ] + assert pytest.raises(ValueError, nxt.random_threshold_sequence, 10, 1.5) + + def test_right_d_threshold_sequence(self): + assert nxt.right_d_threshold_sequence(3, 2) == ["d", "i", "d"] + assert pytest.raises(ValueError, nxt.right_d_threshold_sequence, 2, 3) + + def test_left_d_threshold_sequence(self): + assert nxt.left_d_threshold_sequence(3, 2) == ["d", "i", "d"] + assert pytest.raises(ValueError, nxt.left_d_threshold_sequence, 2, 3) + + def test_weights_thresholds(self): + wseq = [3, 4, 3, 3, 5, 6, 5, 4, 5, 6] + cs = nxt.weights_to_creation_sequence(wseq, threshold=10) + wseq = nxt.creation_sequence_to_weights(cs) + cs2 = nxt.weights_to_creation_sequence(wseq) + assert cs == cs2 + + wseq = nxt.creation_sequence_to_weights(nxt.uncompact([3, 1, 2, 3, 3, 2, 3])) + assert wseq == [ + s * 0.125 for s in [4, 4, 4, 3, 5, 5, 2, 2, 2, 6, 6, 6, 1, 1, 7, 7, 7] + ] + + wseq = nxt.creation_sequence_to_weights([3, 1, 2, 3, 3, 2, 3]) + assert wseq == [ + s * 0.125 for s in [4, 4, 4, 3, 5, 5, 2, 2, 2, 6, 6, 6, 1, 1, 7, 7, 7] + ] + + wseq = nxt.creation_sequence_to_weights(list(enumerate("ddidiiidididi"))) + assert wseq == [s * 0.1 for s in [5, 5, 4, 6, 3, 3, 3, 7, 2, 8, 1, 9, 0]] + + wseq = nxt.creation_sequence_to_weights("ddidiiidididi") + assert wseq == [s * 0.1 for s in [5, 5, 4, 6, 3, 3, 3, 7, 2, 8, 1, 9, 0]] + + wseq = nxt.creation_sequence_to_weights("ddidiiidididid") + ws = [s / 12 for s in [6, 6, 5, 7, 4, 4, 4, 8, 3, 9, 2, 10, 1, 11]] + assert sum(abs(c - d) for c, d in zip(wseq, ws)) < 1e-14 + + def test_finding_routines(self): + G = nx.Graph({1: [2], 2: [3], 3: [4], 4: [5], 5: [6]}) + G.add_edge(2, 4) + G.add_edge(2, 5) + G.add_edge(2, 7) + G.add_edge(3, 6) + G.add_edge(4, 6) + + # Alternating 4 cycle + assert nxt.find_alternating_4_cycle(G) == [1, 2, 3, 6] + + # Threshold graph + TG = nxt.find_threshold_graph(G) + assert nxt.is_threshold_graph(TG) + assert sorted(TG.nodes()) == [1, 2, 3, 4, 5, 7] + + cs = nxt.creation_sequence(dict(TG.degree()), with_labels=True) + assert nxt.find_creation_sequence(G) == cs + + def test_fast_versions_properties_threshold_graphs(self): + cs = "ddiiddid" + G = nxt.threshold_graph(cs) + assert nxt.density("ddiiddid") == nx.density(G) + assert sorted(nxt.degree_sequence(cs)) == sorted(d for n, d in G.degree()) + + ts = nxt.triangle_sequence(cs) + assert ts == list(nx.triangles(G).values()) + assert sum(ts) // 3 == nxt.triangles(cs) + + c1 = nxt.cluster_sequence(cs) + c2 = list(nx.clustering(G).values()) + assert sum(abs(c - d) for c, d in zip(c1, c2)) == pytest.approx(0, abs=1e-7) + + b1 = nx.betweenness_centrality(G).values() + b2 = nxt.betweenness_sequence(cs) + assert sum(abs(c - d) for c, d in zip(b1, b2)) < 1e-7 + + assert nxt.eigenvalues(cs) == [0, 1, 3, 3, 5, 7, 7, 8] + + # Degree Correlation + assert abs(nxt.degree_correlation(cs) + 0.593038821954) < 1e-12 + assert nxt.degree_correlation("diiiddi") == -0.8 + assert nxt.degree_correlation("did") == -1.0 + assert nxt.degree_correlation("ddd") == 1.0 + assert nxt.eigenvalues("dddiii") == [0, 0, 0, 0, 3, 3] + assert nxt.eigenvalues("dddiiid") == [0, 1, 1, 1, 4, 4, 7] + + def test_tg_creation_routines(self): + s = nxt.left_d_threshold_sequence(5, 7) + s = nxt.right_d_threshold_sequence(5, 7) + s1 = nxt.swap_d(s, 1.0, 1.0) + s1 = nxt.swap_d(s, 1.0, 1.0, seed=1) + + def test_eigenvectors(self): + np = pytest.importorskip("numpy") + eigenval = np.linalg.eigvals + pytest.importorskip("scipy") + + cs = "ddiiddid" + G = nxt.threshold_graph(cs) + (tgeval, tgevec) = nxt.eigenvectors(cs) + np.testing.assert_allclose([np.dot(lv, lv) for lv in tgevec], 1.0, rtol=1e-9) + lapl = nx.laplacian_matrix(G) + + def test_create_using(self): + cs = "ddiiddid" + G = nxt.threshold_graph(cs) + assert pytest.raises( + nx.exception.NetworkXError, + nxt.threshold_graph, + cs, + create_using=nx.DiGraph(), + ) + MG = nxt.threshold_graph(cs, create_using=nx.MultiGraph()) + assert sorted(MG.edges()) == sorted(G.edges()) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_time_dependent.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_time_dependent.py new file mode 100644 index 0000000000000000000000000000000000000000..1e256f4bc69389464cfa164f209bc2db713b79ee --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_time_dependent.py @@ -0,0 +1,431 @@ +"""Unit testing for time dependent algorithms.""" + +from datetime import datetime, timedelta + +import pytest + +import networkx as nx + +_delta = timedelta(days=5 * 365) + + +class TestCdIndex: + """Unit testing for the cd index function.""" + + def test_common_graph(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 1) + G.add_edge(4, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 5: {"time": datetime(1997, 1, 1)}, + 6: {"time": datetime(1998, 1, 1)}, + 7: {"time": datetime(1999, 1, 1)}, + 8: {"time": datetime(1999, 1, 1)}, + 9: {"time": datetime(1998, 1, 1)}, + 10: {"time": datetime(1997, 4, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + + assert nx.cd_index(G, 4, time_delta=_delta) == 0.17 + + def test_common_graph_with_given_attributes(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 1) + G.add_edge(4, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"date": datetime(1992, 1, 1)}, + 1: {"date": datetime(1992, 1, 1)}, + 2: {"date": datetime(1993, 1, 1)}, + 3: {"date": datetime(1993, 1, 1)}, + 4: {"date": datetime(1995, 1, 1)}, + 5: {"date": datetime(1997, 1, 1)}, + 6: {"date": datetime(1998, 1, 1)}, + 7: {"date": datetime(1999, 1, 1)}, + 8: {"date": datetime(1999, 1, 1)}, + 9: {"date": datetime(1998, 1, 1)}, + 10: {"date": datetime(1997, 4, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + + assert nx.cd_index(G, 4, time_delta=_delta, time="date") == 0.17 + + def test_common_graph_with_int_attributes(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 1) + G.add_edge(4, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": 20}, + 1: {"time": 20}, + 2: {"time": 30}, + 3: {"time": 30}, + 4: {"time": 50}, + 5: {"time": 70}, + 6: {"time": 80}, + 7: {"time": 90}, + 8: {"time": 90}, + 9: {"time": 80}, + 10: {"time": 74}, + } + + nx.set_node_attributes(G, node_attrs) + + assert nx.cd_index(G, 4, time_delta=50) == 0.17 + + def test_common_graph_with_float_attributes(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 1) + G.add_edge(4, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": 20.2}, + 1: {"time": 20.2}, + 2: {"time": 30.7}, + 3: {"time": 30.7}, + 4: {"time": 50.9}, + 5: {"time": 70.1}, + 6: {"time": 80.6}, + 7: {"time": 90.7}, + 8: {"time": 90.7}, + 9: {"time": 80.6}, + 10: {"time": 74.2}, + } + + nx.set_node_attributes(G, node_attrs) + + assert nx.cd_index(G, 4, time_delta=50) == 0.17 + + def test_common_graph_with_weights(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 1) + G.add_edge(4, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 5: {"time": datetime(1997, 1, 1)}, + 6: {"time": datetime(1998, 1, 1), "weight": 5}, + 7: {"time": datetime(1999, 1, 1), "weight": 2}, + 8: {"time": datetime(1999, 1, 1), "weight": 6}, + 9: {"time": datetime(1998, 1, 1), "weight": 3}, + 10: {"time": datetime(1997, 4, 1), "weight": 10}, + } + + nx.set_node_attributes(G, node_attrs) + assert nx.cd_index(G, 4, time_delta=_delta, weight="weight") == 0.04 + + def test_node_with_no_predecessors(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 5: {"time": datetime(2005, 1, 1)}, + 6: {"time": datetime(2010, 1, 1)}, + 7: {"time": datetime(2001, 1, 1)}, + 8: {"time": datetime(2020, 1, 1)}, + 9: {"time": datetime(2017, 1, 1)}, + 10: {"time": datetime(2004, 4, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + assert nx.cd_index(G, 4, time_delta=_delta) == 0.0 + + def test_node_with_no_successors(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(8, 2) + G.add_edge(6, 0) + G.add_edge(6, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 5: {"time": datetime(1997, 1, 1)}, + 6: {"time": datetime(1998, 1, 1)}, + 7: {"time": datetime(1999, 1, 1)}, + 8: {"time": datetime(1999, 1, 1)}, + 9: {"time": datetime(1998, 1, 1)}, + 10: {"time": datetime(1997, 4, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + assert nx.cd_index(G, 4, time_delta=_delta) == 1.0 + + def test_n_equals_zero(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 3) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 5: {"time": datetime(2005, 1, 1)}, + 6: {"time": datetime(2010, 1, 1)}, + 7: {"time": datetime(2001, 1, 1)}, + 8: {"time": datetime(2020, 1, 1)}, + 9: {"time": datetime(2017, 1, 1)}, + 10: {"time": datetime(2004, 4, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + + with pytest.raises( + nx.NetworkXError, match="The cd index cannot be defined." + ) as ve: + nx.cd_index(G, 4, time_delta=_delta) + + def test_time_timedelta_compatibility(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(4, 2) + G.add_edge(4, 0) + G.add_edge(4, 3) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": 20.2}, + 1: {"time": 20.2}, + 2: {"time": 30.7}, + 3: {"time": 30.7}, + 4: {"time": 50.9}, + 5: {"time": 70.1}, + 6: {"time": 80.6}, + 7: {"time": 90.7}, + 8: {"time": 90.7}, + 9: {"time": 80.6}, + 10: {"time": 74.2}, + } + + nx.set_node_attributes(G, node_attrs) + + with pytest.raises( + nx.NetworkXError, + match="Addition and comparison are not supported between", + ) as ve: + nx.cd_index(G, 4, time_delta=_delta) + + def test_node_with_no_time(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + G.add_edge(8, 2) + G.add_edge(6, 0) + G.add_edge(6, 3) + G.add_edge(5, 2) + G.add_edge(6, 2) + G.add_edge(6, 4) + G.add_edge(7, 4) + G.add_edge(8, 4) + G.add_edge(9, 4) + G.add_edge(9, 1) + G.add_edge(9, 3) + G.add_edge(10, 4) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 6: {"time": datetime(1998, 1, 1)}, + 7: {"time": datetime(1999, 1, 1)}, + 8: {"time": datetime(1999, 1, 1)}, + 9: {"time": datetime(1998, 1, 1)}, + 10: {"time": datetime(1997, 4, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + + with pytest.raises( + nx.NetworkXError, match="Not all nodes have a 'time' attribute." + ) as ve: + nx.cd_index(G, 4, time_delta=_delta) + + def test_maximally_consolidating(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + G.add_edge(5, 1) + G.add_edge(5, 2) + G.add_edge(5, 3) + G.add_edge(5, 4) + G.add_edge(6, 1) + G.add_edge(6, 5) + G.add_edge(7, 1) + G.add_edge(7, 5) + G.add_edge(8, 2) + G.add_edge(8, 5) + G.add_edge(9, 5) + G.add_edge(9, 3) + G.add_edge(10, 5) + G.add_edge(10, 3) + G.add_edge(10, 4) + G.add_edge(11, 5) + G.add_edge(11, 4) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 5: {"time": datetime(1997, 1, 1)}, + 6: {"time": datetime(1998, 1, 1)}, + 7: {"time": datetime(1999, 1, 1)}, + 8: {"time": datetime(1999, 1, 1)}, + 9: {"time": datetime(1998, 1, 1)}, + 10: {"time": datetime(1997, 4, 1)}, + 11: {"time": datetime(1998, 5, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + + assert nx.cd_index(G, 5, time_delta=_delta) == -1 + + def test_maximally_destabilizing(self): + G = nx.DiGraph() + G.add_nodes_from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + G.add_edge(5, 1) + G.add_edge(5, 2) + G.add_edge(5, 3) + G.add_edge(5, 4) + G.add_edge(6, 5) + G.add_edge(7, 5) + G.add_edge(8, 5) + G.add_edge(9, 5) + G.add_edge(10, 5) + G.add_edge(11, 5) + + node_attrs = { + 0: {"time": datetime(1992, 1, 1)}, + 1: {"time": datetime(1992, 1, 1)}, + 2: {"time": datetime(1993, 1, 1)}, + 3: {"time": datetime(1993, 1, 1)}, + 4: {"time": datetime(1995, 1, 1)}, + 5: {"time": datetime(1997, 1, 1)}, + 6: {"time": datetime(1998, 1, 1)}, + 7: {"time": datetime(1999, 1, 1)}, + 8: {"time": datetime(1999, 1, 1)}, + 9: {"time": datetime(1998, 1, 1)}, + 10: {"time": datetime(1997, 4, 1)}, + 11: {"time": datetime(1998, 5, 1)}, + } + + nx.set_node_attributes(G, node_attrs) + + assert nx.cd_index(G, 5, time_delta=_delta) == 1 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_tournament.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_tournament.py new file mode 100644 index 0000000000000000000000000000000000000000..0a88b42ba8fe12a345c2dfcba41ebf8d4e2e4633 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_tournament.py @@ -0,0 +1,162 @@ +"""Unit tests for the :mod:`networkx.algorithms.tournament` module.""" +from itertools import combinations + +import pytest + +from networkx import DiGraph +from networkx.algorithms.tournament import ( + hamiltonian_path, + index_satisfying, + is_reachable, + is_strongly_connected, + is_tournament, + random_tournament, + score_sequence, + tournament_matrix, +) + + +def test_condition_not_satisfied(): + condition = lambda x: x > 0 + iter_in = [0] + assert index_satisfying(iter_in, condition) == 1 + + +def test_empty_iterable(): + condition = lambda x: x > 0 + with pytest.raises(ValueError): + index_satisfying([], condition) + + +def test_is_tournament(): + G = DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0), (1, 3), (0, 2)]) + assert is_tournament(G) + + +def test_self_loops(): + """A tournament must have no self-loops.""" + G = DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0), (1, 3), (0, 2)]) + G.add_edge(0, 0) + assert not is_tournament(G) + + +def test_missing_edges(): + """A tournament must not have any pair of nodes without at least + one edge joining the pair. + + """ + G = DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0), (1, 3)]) + assert not is_tournament(G) + + +def test_bidirectional_edges(): + """A tournament must not have any pair of nodes with greater + than one edge joining the pair. + + """ + G = DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0), (1, 3), (0, 2)]) + G.add_edge(1, 0) + assert not is_tournament(G) + + +def test_graph_is_tournament(): + for _ in range(10): + G = random_tournament(5) + assert is_tournament(G) + + +def test_graph_is_tournament_seed(): + for _ in range(10): + G = random_tournament(5, seed=1) + assert is_tournament(G) + + +def test_graph_is_tournament_one_node(): + G = random_tournament(1) + assert is_tournament(G) + + +def test_graph_is_tournament_zero_node(): + G = random_tournament(0) + assert is_tournament(G) + + +def test_hamiltonian_empty_graph(): + path = hamiltonian_path(DiGraph()) + assert len(path) == 0 + + +def test_path_is_hamiltonian(): + G = DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0), (1, 3), (0, 2)]) + path = hamiltonian_path(G) + assert len(path) == 4 + assert all(v in G[u] for u, v in zip(path, path[1:])) + + +def test_hamiltonian_cycle(): + """Tests that :func:`networkx.tournament.hamiltonian_path` + returns a Hamiltonian cycle when provided a strongly connected + tournament. + + """ + G = DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0), (1, 3), (0, 2)]) + path = hamiltonian_path(G) + assert len(path) == 4 + assert all(v in G[u] for u, v in zip(path, path[1:])) + assert path[0] in G[path[-1]] + + +def test_score_sequence_edge(): + G = DiGraph([(0, 1)]) + assert score_sequence(G) == [0, 1] + + +def test_score_sequence_triangle(): + G = DiGraph([(0, 1), (1, 2), (2, 0)]) + assert score_sequence(G) == [1, 1, 1] + + +def test_tournament_matrix(): + np = pytest.importorskip("numpy") + pytest.importorskip("scipy") + npt = np.testing + G = DiGraph([(0, 1)]) + m = tournament_matrix(G) + npt.assert_array_equal(m.todense(), np.array([[0, 1], [-1, 0]])) + + +def test_reachable_pair(): + """Tests for a reachable pair of nodes.""" + G = DiGraph([(0, 1), (1, 2), (2, 0)]) + assert is_reachable(G, 0, 2) + + +def test_same_node_is_reachable(): + """Tests that a node is always reachable from it.""" + # G is an arbitrary tournament on ten nodes. + G = DiGraph(sorted(p) for p in combinations(range(10), 2)) + assert all(is_reachable(G, v, v) for v in G) + + +def test_unreachable_pair(): + """Tests for an unreachable pair of nodes.""" + G = DiGraph([(0, 1), (0, 2), (1, 2)]) + assert not is_reachable(G, 1, 0) + + +def test_is_strongly_connected(): + """Tests for a strongly connected tournament.""" + G = DiGraph([(0, 1), (1, 2), (2, 0)]) + assert is_strongly_connected(G) + + +def test_not_strongly_connected(): + """Tests for a tournament that is not strongly connected.""" + G = DiGraph([(0, 1), (0, 2), (1, 2)]) + assert not is_strongly_connected(G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_triads.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_triads.py new file mode 100644 index 0000000000000000000000000000000000000000..66dedb758567fcc1bf510700e33035d9855c2549 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_triads.py @@ -0,0 +1,277 @@ +"""Tests for the :mod:`networkx.algorithms.triads` module.""" + +import itertools +from collections import defaultdict +from random import sample + +import pytest + +import networkx as nx + + +def test_triadic_census(): + """Tests the triadic_census function.""" + G = nx.DiGraph() + G.add_edges_from(["01", "02", "03", "04", "05", "12", "16", "51", "56", "65"]) + expected = { + "030T": 2, + "120C": 1, + "210": 0, + "120U": 0, + "012": 9, + "102": 3, + "021U": 0, + "111U": 0, + "003": 8, + "030C": 0, + "021D": 9, + "201": 0, + "111D": 1, + "300": 0, + "120D": 0, + "021C": 2, + } + actual = nx.triadic_census(G) + assert expected == actual + + +def test_is_triad(): + """Tests the is_triad function""" + G = nx.karate_club_graph() + G = G.to_directed() + for i in range(100): + nodes = sample(sorted(G.nodes()), 3) + G2 = G.subgraph(nodes) + assert nx.is_triad(G2) + + +def test_all_triplets(): + """Tests the all_triplets function.""" + G = nx.DiGraph() + G.add_edges_from(["01", "02", "03", "04", "05", "12", "16", "51", "56", "65"]) + expected = [ + f"{i},{j},{k}" + for i in range(7) + for j in range(i + 1, 7) + for k in range(j + 1, 7) + ] + expected = [set(x.split(",")) for x in expected] + actual = [set(x) for x in nx.all_triplets(G)] + assert all(any(s1 == s2 for s1 in expected) for s2 in actual) + + +def test_all_triads(): + """Tests the all_triplets function.""" + G = nx.DiGraph() + G.add_edges_from(["01", "02", "03", "04", "05", "12", "16", "51", "56", "65"]) + expected = [ + f"{i},{j},{k}" + for i in range(7) + for j in range(i + 1, 7) + for k in range(j + 1, 7) + ] + expected = [G.subgraph(x.split(",")) for x in expected] + actual = list(nx.all_triads(G)) + assert all(any(nx.is_isomorphic(G1, G2) for G1 in expected) for G2 in actual) + + +def test_triad_type(): + """Tests the triad_type function.""" + # 0 edges (1 type) + G = nx.DiGraph({0: [], 1: [], 2: []}) + assert nx.triad_type(G) == "003" + # 1 edge (1 type) + G = nx.DiGraph({0: [1], 1: [], 2: []}) + assert nx.triad_type(G) == "012" + # 2 edges (4 types) + G = nx.DiGraph([(0, 1), (0, 2)]) + assert nx.triad_type(G) == "021D" + G = nx.DiGraph({0: [1], 1: [0], 2: []}) + assert nx.triad_type(G) == "102" + G = nx.DiGraph([(0, 1), (2, 1)]) + assert nx.triad_type(G) == "021U" + G = nx.DiGraph([(0, 1), (1, 2)]) + assert nx.triad_type(G) == "021C" + # 3 edges (4 types) + G = nx.DiGraph([(0, 1), (1, 0), (2, 1)]) + assert nx.triad_type(G) == "111D" + G = nx.DiGraph([(0, 1), (1, 0), (1, 2)]) + assert nx.triad_type(G) == "111U" + G = nx.DiGraph([(0, 1), (1, 2), (0, 2)]) + assert nx.triad_type(G) == "030T" + G = nx.DiGraph([(0, 1), (1, 2), (2, 0)]) + assert nx.triad_type(G) == "030C" + # 4 edges (4 types) + G = nx.DiGraph([(0, 1), (1, 0), (2, 0), (0, 2)]) + assert nx.triad_type(G) == "201" + G = nx.DiGraph([(0, 1), (1, 0), (2, 0), (2, 1)]) + assert nx.triad_type(G) == "120D" + G = nx.DiGraph([(0, 1), (1, 0), (0, 2), (1, 2)]) + assert nx.triad_type(G) == "120U" + G = nx.DiGraph([(0, 1), (1, 0), (0, 2), (2, 1)]) + assert nx.triad_type(G) == "120C" + # 5 edges (1 type) + G = nx.DiGraph([(0, 1), (1, 0), (2, 1), (1, 2), (0, 2)]) + assert nx.triad_type(G) == "210" + # 6 edges (1 type) + G = nx.DiGraph([(0, 1), (1, 0), (1, 2), (2, 1), (0, 2), (2, 0)]) + assert nx.triad_type(G) == "300" + + +def test_triads_by_type(): + """Tests the all_triplets function.""" + G = nx.DiGraph() + G.add_edges_from(["01", "02", "03", "04", "05", "12", "16", "51", "56", "65"]) + all_triads = nx.all_triads(G) + expected = defaultdict(list) + for triad in all_triads: + name = nx.triad_type(triad) + expected[name].append(triad) + actual = nx.triads_by_type(G) + assert set(actual.keys()) == set(expected.keys()) + for tri_type, actual_Gs in actual.items(): + expected_Gs = expected[tri_type] + for a in actual_Gs: + assert any(nx.is_isomorphic(a, e) for e in expected_Gs) + + +def test_random_triad(): + """Tests the random_triad function""" + G = nx.karate_club_graph() + G = G.to_directed() + for i in range(100): + assert nx.is_triad(nx.random_triad(G)) + + G = nx.DiGraph() + msg = "at least 3 nodes to form a triad" + with pytest.raises(nx.NetworkXError, match=msg): + nx.random_triad(G) + + +def test_triadic_census_short_path_nodelist(): + G = nx.path_graph("abc", create_using=nx.DiGraph) + expected = {"021C": 1} + for nl in ["a", "b", "c", "ab", "ac", "bc", "abc"]: + triad_census = nx.triadic_census(G, nodelist=nl) + assert expected == {typ: cnt for typ, cnt in triad_census.items() if cnt > 0} + + +def test_triadic_census_correct_nodelist_values(): + G = nx.path_graph(5, create_using=nx.DiGraph) + msg = r"nodelist includes duplicate nodes or nodes not in G" + with pytest.raises(ValueError, match=msg): + nx.triadic_census(G, [1, 2, 2, 3]) + with pytest.raises(ValueError, match=msg): + nx.triadic_census(G, [1, 2, "a", 3]) + + +def test_triadic_census_tiny_graphs(): + tc = nx.triadic_census(nx.empty_graph(0, create_using=nx.DiGraph)) + assert {} == {typ: cnt for typ, cnt in tc.items() if cnt > 0} + tc = nx.triadic_census(nx.empty_graph(1, create_using=nx.DiGraph)) + assert {} == {typ: cnt for typ, cnt in tc.items() if cnt > 0} + tc = nx.triadic_census(nx.empty_graph(2, create_using=nx.DiGraph)) + assert {} == {typ: cnt for typ, cnt in tc.items() if cnt > 0} + tc = nx.triadic_census(nx.DiGraph([(1, 2)])) + assert {} == {typ: cnt for typ, cnt in tc.items() if cnt > 0} + + +def test_triadic_census_selfloops(): + GG = nx.path_graph("abc", create_using=nx.DiGraph) + expected = {"021C": 1} + for n in GG: + G = GG.copy() + G.add_edge(n, n) + tc = nx.triadic_census(G) + assert expected == {typ: cnt for typ, cnt in tc.items() if cnt > 0} + + GG = nx.path_graph("abcde", create_using=nx.DiGraph) + tbt = nx.triads_by_type(GG) + for n in GG: + GG.add_edge(n, n) + tc = nx.triadic_census(GG) + assert tc == {tt: len(tbt[tt]) for tt in tc} + + +def test_triadic_census_four_path(): + G = nx.path_graph("abcd", create_using=nx.DiGraph) + expected = {"012": 2, "021C": 2} + triad_census = nx.triadic_census(G) + assert expected == {typ: cnt for typ, cnt in triad_census.items() if cnt > 0} + + +def test_triadic_census_four_path_nodelist(): + G = nx.path_graph("abcd", create_using=nx.DiGraph) + expected_end = {"012": 2, "021C": 1} + expected_mid = {"012": 1, "021C": 2} + a_triad_census = nx.triadic_census(G, nodelist=["a"]) + assert expected_end == {typ: cnt for typ, cnt in a_triad_census.items() if cnt > 0} + b_triad_census = nx.triadic_census(G, nodelist=["b"]) + assert expected_mid == {typ: cnt for typ, cnt in b_triad_census.items() if cnt > 0} + c_triad_census = nx.triadic_census(G, nodelist=["c"]) + assert expected_mid == {typ: cnt for typ, cnt in c_triad_census.items() if cnt > 0} + d_triad_census = nx.triadic_census(G, nodelist=["d"]) + assert expected_end == {typ: cnt for typ, cnt in d_triad_census.items() if cnt > 0} + + +def test_triadic_census_nodelist(): + """Tests the triadic_census function.""" + G = nx.DiGraph() + G.add_edges_from(["01", "02", "03", "04", "05", "12", "16", "51", "56", "65"]) + expected = { + "030T": 2, + "120C": 1, + "210": 0, + "120U": 0, + "012": 9, + "102": 3, + "021U": 0, + "111U": 0, + "003": 8, + "030C": 0, + "021D": 9, + "201": 0, + "111D": 1, + "300": 0, + "120D": 0, + "021C": 2, + } + actual = {k: 0 for k in expected} + for node in G.nodes(): + node_triad_census = nx.triadic_census(G, nodelist=[node]) + for triad_key in expected: + actual[triad_key] += node_triad_census[triad_key] + # Divide all counts by 3 + for k, v in actual.items(): + actual[k] //= 3 + assert expected == actual + + +@pytest.mark.parametrize("N", [5, 10]) +def test_triadic_census_on_random_graph(N): + G = nx.binomial_graph(N, 0.3, directed=True, seed=42) + tc1 = nx.triadic_census(G) + tbt = nx.triads_by_type(G) + tc2 = {tt: len(tbt[tt]) for tt in tc1} + assert tc1 == tc2 + + for n in G: + tc1 = nx.triadic_census(G, nodelist={n}) + tc2 = {tt: sum(1 for t in tbt.get(tt, []) if n in t) for tt in tc1} + assert tc1 == tc2 + + for ns in itertools.combinations(G, 2): + ns = set(ns) + tc1 = nx.triadic_census(G, nodelist=ns) + tc2 = { + tt: sum(1 for t in tbt.get(tt, []) if any(n in ns for n in t)) for tt in tc1 + } + assert tc1 == tc2 + + for ns in itertools.combinations(G, 3): + ns = set(ns) + tc1 = nx.triadic_census(G, nodelist=ns) + tc2 = { + tt: sum(1 for t in tbt.get(tt, []) if any(n in ns for n in t)) for tt in tc1 + } + assert tc1 == tc2 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_vitality.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_vitality.py new file mode 100644 index 0000000000000000000000000000000000000000..248206e670fa911f62177bb6727d6a7a6df1e6b9 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_vitality.py @@ -0,0 +1,41 @@ +import networkx as nx + + +class TestClosenessVitality: + def test_unweighted(self): + G = nx.cycle_graph(3) + vitality = nx.closeness_vitality(G) + assert vitality == {0: 2, 1: 2, 2: 2} + + def test_weighted(self): + G = nx.Graph() + nx.add_cycle(G, [0, 1, 2], weight=2) + vitality = nx.closeness_vitality(G, weight="weight") + assert vitality == {0: 4, 1: 4, 2: 4} + + def test_unweighted_digraph(self): + G = nx.DiGraph(nx.cycle_graph(3)) + vitality = nx.closeness_vitality(G) + assert vitality == {0: 4, 1: 4, 2: 4} + + def test_weighted_digraph(self): + G = nx.DiGraph() + nx.add_cycle(G, [0, 1, 2], weight=2) + nx.add_cycle(G, [2, 1, 0], weight=2) + vitality = nx.closeness_vitality(G, weight="weight") + assert vitality == {0: 8, 1: 8, 2: 8} + + def test_weighted_multidigraph(self): + G = nx.MultiDiGraph() + nx.add_cycle(G, [0, 1, 2], weight=2) + nx.add_cycle(G, [2, 1, 0], weight=2) + vitality = nx.closeness_vitality(G, weight="weight") + assert vitality == {0: 8, 1: 8, 2: 8} + + def test_disconnecting_graph(self): + """Tests that the closeness vitality of a node whose removal + disconnects the graph is negative infinity. + + """ + G = nx.path_graph(3) + assert nx.closeness_vitality(G, node=1) == -float("inf") diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_voronoi.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_voronoi.py new file mode 100644 index 0000000000000000000000000000000000000000..3269ae62a023ff0cf9fdc55122cb6e7c8d2ba319 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_voronoi.py @@ -0,0 +1,103 @@ +import networkx as nx +from networkx.utils import pairwise + + +class TestVoronoiCells: + """Unit tests for the Voronoi cells function.""" + + def test_isolates(self): + """Tests that a graph with isolated nodes has all isolates in + one block of the partition. + + """ + G = nx.empty_graph(5) + cells = nx.voronoi_cells(G, {0, 2, 4}) + expected = {0: {0}, 2: {2}, 4: {4}, "unreachable": {1, 3}} + assert expected == cells + + def test_undirected_unweighted(self): + G = nx.cycle_graph(6) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0, 1, 5}, 3: {2, 3, 4}} + assert expected == cells + + def test_directed_unweighted(self): + # This is the singly-linked directed cycle graph on six nodes. + G = nx.DiGraph(pairwise(range(6), cyclic=True)) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0, 1, 2}, 3: {3, 4, 5}} + assert expected == cells + + def test_directed_inward(self): + """Tests that reversing the graph gives the "inward" Voronoi + partition. + + """ + # This is the singly-linked reverse directed cycle graph on six nodes. + G = nx.DiGraph(pairwise(range(6), cyclic=True)) + G = G.reverse(copy=False) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0, 4, 5}, 3: {1, 2, 3}} + assert expected == cells + + def test_undirected_weighted(self): + edges = [(0, 1, 10), (1, 2, 1), (2, 3, 1)] + G = nx.Graph() + G.add_weighted_edges_from(edges) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0}, 3: {1, 2, 3}} + assert expected == cells + + def test_directed_weighted(self): + edges = [(0, 1, 10), (1, 2, 1), (2, 3, 1), (3, 2, 1), (2, 1, 1)] + G = nx.DiGraph() + G.add_weighted_edges_from(edges) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0}, 3: {1, 2, 3}} + assert expected == cells + + def test_multigraph_unweighted(self): + """Tests that the Voronoi cells for a multigraph are the same as + for a simple graph. + + """ + edges = [(0, 1), (1, 2), (2, 3)] + G = nx.MultiGraph(2 * edges) + H = nx.Graph(G) + G_cells = nx.voronoi_cells(G, {0, 3}) + H_cells = nx.voronoi_cells(H, {0, 3}) + assert G_cells == H_cells + + def test_multidigraph_unweighted(self): + # This is the twice-singly-linked directed cycle graph on six nodes. + edges = list(pairwise(range(6), cyclic=True)) + G = nx.MultiDiGraph(2 * edges) + H = nx.DiGraph(G) + G_cells = nx.voronoi_cells(G, {0, 3}) + H_cells = nx.voronoi_cells(H, {0, 3}) + assert G_cells == H_cells + + def test_multigraph_weighted(self): + edges = [(0, 1, 10), (0, 1, 10), (1, 2, 1), (1, 2, 100), (2, 3, 1), (2, 3, 100)] + G = nx.MultiGraph() + G.add_weighted_edges_from(edges) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0}, 3: {1, 2, 3}} + assert expected == cells + + def test_multidigraph_weighted(self): + edges = [ + (0, 1, 10), + (0, 1, 10), + (1, 2, 1), + (2, 3, 1), + (3, 2, 10), + (3, 2, 1), + (2, 1, 10), + (2, 1, 1), + ] + G = nx.MultiDiGraph() + G.add_weighted_edges_from(edges) + cells = nx.voronoi_cells(G, {0, 3}) + expected = {0: {0}, 3: {1, 2, 3}} + assert expected == cells diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_walks.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_walks.py new file mode 100644 index 0000000000000000000000000000000000000000..7a6b323932988e1b9513118162df62e9613ee65b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_walks.py @@ -0,0 +1,54 @@ +"""Unit tests for the :mod:`networkx.algorithms.walks` module.""" + +import pytest + +import networkx as nx + +pytest.importorskip("numpy") +pytest.importorskip("scipy") + + +def test_directed(): + G = nx.DiGraph([(0, 1), (1, 2), (2, 0)]) + num_walks = nx.number_of_walks(G, 3) + expected = {0: {0: 1, 1: 0, 2: 0}, 1: {0: 0, 1: 1, 2: 0}, 2: {0: 0, 1: 0, 2: 1}} + assert num_walks == expected + + +def test_undirected(): + G = nx.cycle_graph(3) + num_walks = nx.number_of_walks(G, 3) + expected = {0: {0: 2, 1: 3, 2: 3}, 1: {0: 3, 1: 2, 2: 3}, 2: {0: 3, 1: 3, 2: 2}} + assert num_walks == expected + + +def test_non_integer_nodes(): + G = nx.DiGraph([("A", "B"), ("B", "C"), ("C", "A")]) + num_walks = nx.number_of_walks(G, 2) + expected = { + "A": {"A": 0, "B": 0, "C": 1}, + "B": {"A": 1, "B": 0, "C": 0}, + "C": {"A": 0, "B": 1, "C": 0}, + } + assert num_walks == expected + + +def test_zero_length(): + G = nx.cycle_graph(3) + num_walks = nx.number_of_walks(G, 0) + expected = {0: {0: 1, 1: 0, 2: 0}, 1: {0: 0, 1: 1, 2: 0}, 2: {0: 0, 1: 0, 2: 1}} + assert num_walks == expected + + +def test_negative_length_exception(): + G = nx.cycle_graph(3) + with pytest.raises(ValueError): + nx.number_of_walks(G, -1) + + +def test_hidden_weight_attr(): + G = nx.cycle_graph(3) + G.add_edge(1, 2, weight=5) + num_walks = nx.number_of_walks(G, 3) + expected = {0: {0: 2, 1: 3, 2: 3}, 1: {0: 3, 1: 2, 2: 3}, 2: {0: 3, 1: 3, 2: 2}} + assert num_walks == expected diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tests/test_wiener.py b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_wiener.py new file mode 100644 index 0000000000000000000000000000000000000000..1cb404064fe3bedbf1695f5bc4a773a7d146da90 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tests/test_wiener.py @@ -0,0 +1,66 @@ +"""Unit tests for the :mod:`networkx.algorithms.wiener` module.""" + + +from networkx import DiGraph, complete_graph, empty_graph, path_graph, wiener_index + + +class TestWienerIndex: + """Unit tests for computing the Wiener index of a graph.""" + + def test_disconnected_graph(self): + """Tests that the Wiener index of a disconnected graph is + positive infinity. + + """ + assert wiener_index(empty_graph(2)) == float("inf") + + def test_directed(self): + """Tests that each pair of nodes in the directed graph is + counted once when computing the Wiener index. + + """ + G = complete_graph(3) + H = DiGraph(G) + assert (2 * wiener_index(G)) == wiener_index(H) + + def test_complete_graph(self): + """Tests that the Wiener index of the complete graph is simply + the number of edges. + + """ + n = 10 + G = complete_graph(n) + assert wiener_index(G) == (n * (n - 1) / 2) + + def test_path_graph(self): + """Tests that the Wiener index of the path graph is correctly + computed. + + """ + # In P_n, there are n - 1 pairs of vertices at distance one, n - + # 2 pairs at distance two, n - 3 at distance three, ..., 1 at + # distance n - 1, so the Wiener index should be + # + # 1 * (n - 1) + 2 * (n - 2) + ... + (n - 2) * 2 + (n - 1) * 1 + # + # For example, in P_5, + # + # 1 * 4 + 2 * 3 + 3 * 2 + 4 * 1 = 2 (1 * 4 + 2 * 3) + # + # and in P_6, + # + # 1 * 5 + 2 * 4 + 3 * 3 + 4 * 2 + 5 * 1 = 2 (1 * 5 + 2 * 4) + 3 * 3 + # + # assuming n is *odd*, this gives the formula + # + # 2 \sum_{i = 1}^{(n - 1) / 2} [i * (n - i)] + # + # assuming n is *even*, this gives the formula + # + # 2 \sum_{i = 1}^{n / 2} [i * (n - i)] - (n / 2) ** 2 + # + n = 9 + G = path_graph(n) + expected = 2 * sum(i * (n - i) for i in range(1, (n // 2) + 1)) + actual = wiener_index(G) + assert expected == actual diff --git a/MLPY/Lib/site-packages/networkx/algorithms/threshold.py b/MLPY/Lib/site-packages/networkx/algorithms/threshold.py new file mode 100644 index 0000000000000000000000000000000000000000..0839321de0d9037f64e1f654c00ecd1e5ccfb470 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/threshold.py @@ -0,0 +1,979 @@ +""" +Threshold Graphs - Creation, manipulation and identification. +""" +from math import sqrt + +import networkx as nx +from networkx.utils import py_random_state + +__all__ = ["is_threshold_graph", "find_threshold_graph"] + + +@nx._dispatch +def is_threshold_graph(G): + """ + Returns `True` if `G` is a threshold graph. + + Parameters + ---------- + G : NetworkX graph instance + An instance of `Graph`, `DiGraph`, `MultiGraph` or `MultiDiGraph` + + Returns + ------- + bool + `True` if `G` is a threshold graph, `False` otherwise. + + Examples + -------- + >>> from networkx.algorithms.threshold import is_threshold_graph + >>> G = nx.path_graph(3) + >>> is_threshold_graph(G) + True + >>> G = nx.barbell_graph(3, 3) + >>> is_threshold_graph(G) + False + + References + ---------- + .. [1] Threshold graphs: https://en.wikipedia.org/wiki/Threshold_graph + """ + return is_threshold_sequence([d for n, d in G.degree()]) + + +def is_threshold_sequence(degree_sequence): + """ + Returns True if the sequence is a threshold degree sequence. + + Uses the property that a threshold graph must be constructed by + adding either dominating or isolated nodes. Thus, it can be + deconstructed iteratively by removing a node of degree zero or a + node that connects to the remaining nodes. If this deconstruction + fails then the sequence is not a threshold sequence. + """ + ds = degree_sequence[:] # get a copy so we don't destroy original + ds.sort() + while ds: + if ds[0] == 0: # if isolated node + ds.pop(0) # remove it + continue + if ds[-1] != len(ds) - 1: # is the largest degree node dominating? + return False # no, not a threshold degree sequence + ds.pop() # yes, largest is the dominating node + ds = [d - 1 for d in ds] # remove it and decrement all degrees + return True + + +def creation_sequence(degree_sequence, with_labels=False, compact=False): + """ + Determines the creation sequence for the given threshold degree sequence. + + The creation sequence is a list of single characters 'd' + or 'i': 'd' for dominating or 'i' for isolated vertices. + Dominating vertices are connected to all vertices present when it + is added. The first node added is by convention 'd'. + This list can be converted to a string if desired using "".join(cs) + + If with_labels==True: + Returns a list of 2-tuples containing the vertex number + and a character 'd' or 'i' which describes the type of vertex. + + If compact==True: + Returns the creation sequence in a compact form that is the number + of 'i's and 'd's alternating. + Examples: + [1,2,2,3] represents d,i,i,d,d,i,i,i + [3,1,2] represents d,d,d,i,d,d + + Notice that the first number is the first vertex to be used for + construction and so is always 'd'. + + with_labels and compact cannot both be True. + + Returns None if the sequence is not a threshold sequence + """ + if with_labels and compact: + raise ValueError("compact sequences cannot be labeled") + + # make an indexed copy + if isinstance(degree_sequence, dict): # labeled degree sequence + ds = [[degree, label] for (label, degree) in degree_sequence.items()] + else: + ds = [[d, i] for i, d in enumerate(degree_sequence)] + ds.sort() + cs = [] # creation sequence + while ds: + if ds[0][0] == 0: # isolated node + (d, v) = ds.pop(0) + if len(ds) > 0: # make sure we start with a d + cs.insert(0, (v, "i")) + else: + cs.insert(0, (v, "d")) + continue + if ds[-1][0] != len(ds) - 1: # Not dominating node + return None # not a threshold degree sequence + (d, v) = ds.pop() + cs.insert(0, (v, "d")) + ds = [[d[0] - 1, d[1]] for d in ds] # decrement due to removing node + + if with_labels: + return cs + if compact: + return make_compact(cs) + return [v[1] for v in cs] # not labeled + + +def make_compact(creation_sequence): + """ + Returns the creation sequence in a compact form + that is the number of 'i's and 'd's alternating. + + Examples + -------- + >>> from networkx.algorithms.threshold import make_compact + >>> make_compact(["d", "i", "i", "d", "d", "i", "i", "i"]) + [1, 2, 2, 3] + >>> make_compact(["d", "d", "d", "i", "d", "d"]) + [3, 1, 2] + + Notice that the first number is the first vertex + to be used for construction and so is always 'd'. + + Labeled creation sequences lose their labels in the + compact representation. + + >>> make_compact([3, 1, 2]) + [3, 1, 2] + """ + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + cs = creation_sequence[:] + elif isinstance(first, tuple): # labeled creation sequence + cs = [s[1] for s in creation_sequence] + elif isinstance(first, int): # compact creation sequence + return creation_sequence + else: + raise TypeError("Not a valid creation sequence type") + + ccs = [] + count = 1 # count the run lengths of d's or i's. + for i in range(1, len(cs)): + if cs[i] == cs[i - 1]: + count += 1 + else: + ccs.append(count) + count = 1 + ccs.append(count) # don't forget the last one + return ccs + + +def uncompact(creation_sequence): + """ + Converts a compact creation sequence for a threshold + graph to a standard creation sequence (unlabeled). + If the creation_sequence is already standard, return it. + See creation_sequence. + """ + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + return creation_sequence + elif isinstance(first, tuple): # labeled creation sequence + return creation_sequence + elif isinstance(first, int): # compact creation sequence + ccscopy = creation_sequence[:] + else: + raise TypeError("Not a valid creation sequence type") + cs = [] + while ccscopy: + cs.extend(ccscopy.pop(0) * ["d"]) + if ccscopy: + cs.extend(ccscopy.pop(0) * ["i"]) + return cs + + +def creation_sequence_to_weights(creation_sequence): + """ + Returns a list of node weights which create the threshold + graph designated by the creation sequence. The weights + are scaled so that the threshold is 1.0. The order of the + nodes is the same as that in the creation sequence. + """ + # Turn input sequence into a labeled creation sequence + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + if isinstance(creation_sequence, list): + wseq = creation_sequence[:] + else: + wseq = list(creation_sequence) # string like 'ddidid' + elif isinstance(first, tuple): # labeled creation sequence + wseq = [v[1] for v in creation_sequence] + elif isinstance(first, int): # compact creation sequence + wseq = uncompact(creation_sequence) + else: + raise TypeError("Not a valid creation sequence type") + # pass through twice--first backwards + wseq.reverse() + w = 0 + prev = "i" + for j, s in enumerate(wseq): + if s == "i": + wseq[j] = w + prev = s + elif prev == "i": + prev = s + w += 1 + wseq.reverse() # now pass through forwards + for j, s in enumerate(wseq): + if s == "d": + wseq[j] = w + prev = s + elif prev == "d": + prev = s + w += 1 + # Now scale weights + if prev == "d": + w += 1 + wscale = 1 / w + return [ww * wscale for ww in wseq] + # return wseq + + +def weights_to_creation_sequence( + weights, threshold=1, with_labels=False, compact=False +): + """ + Returns a creation sequence for a threshold graph + determined by the weights and threshold given as input. + If the sum of two node weights is greater than the + threshold value, an edge is created between these nodes. + + The creation sequence is a list of single characters 'd' + or 'i': 'd' for dominating or 'i' for isolated vertices. + Dominating vertices are connected to all vertices present + when it is added. The first node added is by convention 'd'. + + If with_labels==True: + Returns a list of 2-tuples containing the vertex number + and a character 'd' or 'i' which describes the type of vertex. + + If compact==True: + Returns the creation sequence in a compact form that is the number + of 'i's and 'd's alternating. + Examples: + [1,2,2,3] represents d,i,i,d,d,i,i,i + [3,1,2] represents d,d,d,i,d,d + + Notice that the first number is the first vertex to be used for + construction and so is always 'd'. + + with_labels and compact cannot both be True. + """ + if with_labels and compact: + raise ValueError("compact sequences cannot be labeled") + + # make an indexed copy + if isinstance(weights, dict): # labeled weights + wseq = [[w, label] for (label, w) in weights.items()] + else: + wseq = [[w, i] for i, w in enumerate(weights)] + wseq.sort() + cs = [] # creation sequence + cutoff = threshold - wseq[-1][0] + while wseq: + if wseq[0][0] < cutoff: # isolated node + (w, label) = wseq.pop(0) + cs.append((label, "i")) + else: + (w, label) = wseq.pop() + cs.append((label, "d")) + cutoff = threshold - wseq[-1][0] + if len(wseq) == 1: # make sure we start with a d + (w, label) = wseq.pop() + cs.append((label, "d")) + # put in correct order + cs.reverse() + + if with_labels: + return cs + if compact: + return make_compact(cs) + return [v[1] for v in cs] # not labeled + + +# Manipulating NetworkX.Graphs in context of threshold graphs +@nx._dispatch(graphs=None) +def threshold_graph(creation_sequence, create_using=None): + """ + Create a threshold graph from the creation sequence or compact + creation_sequence. + + The input sequence can be a + + creation sequence (e.g. ['d','i','d','d','d','i']) + labeled creation sequence (e.g. [(0,'d'),(2,'d'),(1,'i')]) + compact creation sequence (e.g. [2,1,1,2,0]) + + Use cs=creation_sequence(degree_sequence,labeled=True) + to convert a degree sequence to a creation sequence. + + Returns None if the sequence is not valid + """ + # Turn input sequence into a labeled creation sequence + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + ci = list(enumerate(creation_sequence)) + elif isinstance(first, tuple): # labeled creation sequence + ci = creation_sequence[:] + elif isinstance(first, int): # compact creation sequence + cs = uncompact(creation_sequence) + ci = list(enumerate(cs)) + else: + print("not a valid creation sequence type") + return None + + G = nx.empty_graph(0, create_using) + if G.is_directed(): + raise nx.NetworkXError("Directed Graph not supported") + + G.name = "Threshold Graph" + + # add nodes and edges + # if type is 'i' just add nodea + # if type is a d connect to everything previous + while ci: + (v, node_type) = ci.pop(0) + if node_type == "d": # dominating type, connect to all existing nodes + # We use `for u in list(G):` instead of + # `for u in G:` because we edit the graph `G` in + # the loop. Hence using an iterator will result in + # `RuntimeError: dictionary changed size during iteration` + for u in list(G): + G.add_edge(v, u) + G.add_node(v) + return G + + +@nx._dispatch +def find_alternating_4_cycle(G): + """ + Returns False if there aren't any alternating 4 cycles. + Otherwise returns the cycle as [a,b,c,d] where (a,b) + and (c,d) are edges and (a,c) and (b,d) are not. + """ + for u, v in G.edges(): + for w in G.nodes(): + if not G.has_edge(u, w) and u != w: + for x in G.neighbors(w): + if not G.has_edge(v, x) and v != x: + return [u, v, w, x] + return False + + +@nx._dispatch +def find_threshold_graph(G, create_using=None): + """ + Returns a threshold subgraph that is close to largest in `G`. + + The threshold graph will contain the largest degree node in G. + + Parameters + ---------- + G : NetworkX graph instance + An instance of `Graph`, or `MultiDiGraph` + create_using : NetworkX graph class or `None` (default), optional + Type of graph to use when constructing the threshold graph. + If `None`, infer the appropriate graph type from the input. + + Returns + ------- + graph : + A graph instance representing the threshold graph + + Examples + -------- + >>> from networkx.algorithms.threshold import find_threshold_graph + >>> G = nx.barbell_graph(3, 3) + >>> T = find_threshold_graph(G) + >>> T.nodes # may vary + NodeView((7, 8, 5, 6)) + + References + ---------- + .. [1] Threshold graphs: https://en.wikipedia.org/wiki/Threshold_graph + """ + return threshold_graph(find_creation_sequence(G), create_using) + + +@nx._dispatch +def find_creation_sequence(G): + """ + Find a threshold subgraph that is close to largest in G. + Returns the labeled creation sequence of that threshold graph. + """ + cs = [] + # get a local pointer to the working part of the graph + H = G + while H.order() > 0: + # get new degree sequence on subgraph + dsdict = dict(H.degree()) + ds = [(d, v) for v, d in dsdict.items()] + ds.sort() + # Update threshold graph nodes + if ds[-1][0] == 0: # all are isolated + cs.extend(zip(dsdict, ["i"] * (len(ds) - 1) + ["d"])) + break # Done! + # pull off isolated nodes + while ds[0][0] == 0: + (d, iso) = ds.pop(0) + cs.append((iso, "i")) + # find new biggest node + (d, bigv) = ds.pop() + # add edges of star to t_g + cs.append((bigv, "d")) + # form subgraph of neighbors of big node + H = H.subgraph(H.neighbors(bigv)) + cs.reverse() + return cs + + +# Properties of Threshold Graphs +def triangles(creation_sequence): + """ + Compute number of triangles in the threshold graph with the + given creation sequence. + """ + # shortcut algorithm that doesn't require computing number + # of triangles at each node. + cs = creation_sequence # alias + dr = cs.count("d") # number of d's in sequence + ntri = dr * (dr - 1) * (dr - 2) / 6 # number of triangles in clique of nd d's + # now add dr choose 2 triangles for every 'i' in sequence where + # dr is the number of d's to the right of the current i + for i, typ in enumerate(cs): + if typ == "i": + ntri += dr * (dr - 1) / 2 + else: + dr -= 1 + return ntri + + +def triangle_sequence(creation_sequence): + """ + Return triangle sequence for the given threshold graph creation sequence. + + """ + cs = creation_sequence + seq = [] + dr = cs.count("d") # number of d's to the right of the current pos + dcur = (dr - 1) * (dr - 2) // 2 # number of triangles through a node of clique dr + irun = 0 # number of i's in the last run + drun = 0 # number of d's in the last run + for i, sym in enumerate(cs): + if sym == "d": + drun += 1 + tri = dcur + (dr - 1) * irun # new triangles at this d + else: # cs[i]="i": + if prevsym == "d": # new string of i's + dcur += (dr - 1) * irun # accumulate shared shortest paths + irun = 0 # reset i run counter + dr -= drun # reduce number of d's to right + drun = 0 # reset d run counter + irun += 1 + tri = dr * (dr - 1) // 2 # new triangles at this i + seq.append(tri) + prevsym = sym + return seq + + +def cluster_sequence(creation_sequence): + """ + Return cluster sequence for the given threshold graph creation sequence. + """ + triseq = triangle_sequence(creation_sequence) + degseq = degree_sequence(creation_sequence) + cseq = [] + for i, deg in enumerate(degseq): + tri = triseq[i] + if deg <= 1: # isolated vertex or single pair gets cc 0 + cseq.append(0) + continue + max_size = (deg * (deg - 1)) // 2 + cseq.append(tri / max_size) + return cseq + + +def degree_sequence(creation_sequence): + """ + Return degree sequence for the threshold graph with the given + creation sequence + """ + cs = creation_sequence # alias + seq = [] + rd = cs.count("d") # number of d to the right + for i, sym in enumerate(cs): + if sym == "d": + rd -= 1 + seq.append(rd + i) + else: + seq.append(rd) + return seq + + +def density(creation_sequence): + """ + Return the density of the graph with this creation_sequence. + The density is the fraction of possible edges present. + """ + N = len(creation_sequence) + two_size = sum(degree_sequence(creation_sequence)) + two_possible = N * (N - 1) + den = two_size / two_possible + return den + + +def degree_correlation(creation_sequence): + """ + Return the degree-degree correlation over all edges. + """ + cs = creation_sequence + s1 = 0 # deg_i*deg_j + s2 = 0 # deg_i^2+deg_j^2 + s3 = 0 # deg_i+deg_j + m = 0 # number of edges + rd = cs.count("d") # number of d nodes to the right + rdi = [i for i, sym in enumerate(cs) if sym == "d"] # index of "d"s + ds = degree_sequence(cs) + for i, sym in enumerate(cs): + if sym == "d": + if i != rdi[0]: + print("Logic error in degree_correlation", i, rdi) + raise ValueError + rdi.pop(0) + degi = ds[i] + for dj in rdi: + degj = ds[dj] + s1 += degj * degi + s2 += degi**2 + degj**2 + s3 += degi + degj + m += 1 + denom = 2 * m * s2 - s3 * s3 + numer = 4 * m * s1 - s3 * s3 + if denom == 0: + if numer == 0: + return 1 + raise ValueError(f"Zero Denominator but Numerator is {numer}") + return numer / denom + + +def shortest_path(creation_sequence, u, v): + """ + Find the shortest path between u and v in a + threshold graph G with the given creation_sequence. + + For an unlabeled creation_sequence, the vertices + u and v must be integers in (0,len(sequence)) referring + to the position of the desired vertices in the sequence. + + For a labeled creation_sequence, u and v are labels of vertices. + + Use cs=creation_sequence(degree_sequence,with_labels=True) + to convert a degree sequence to a creation sequence. + + Returns a list of vertices from u to v. + Example: if they are neighbors, it returns [u,v] + """ + # Turn input sequence into a labeled creation sequence + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + cs = [(i, creation_sequence[i]) for i in range(len(creation_sequence))] + elif isinstance(first, tuple): # labeled creation sequence + cs = creation_sequence[:] + elif isinstance(first, int): # compact creation sequence + ci = uncompact(creation_sequence) + cs = [(i, ci[i]) for i in range(len(ci))] + else: + raise TypeError("Not a valid creation sequence type") + + verts = [s[0] for s in cs] + if v not in verts: + raise ValueError(f"Vertex {v} not in graph from creation_sequence") + if u not in verts: + raise ValueError(f"Vertex {u} not in graph from creation_sequence") + # Done checking + if u == v: + return [u] + + uindex = verts.index(u) + vindex = verts.index(v) + bigind = max(uindex, vindex) + if cs[bigind][1] == "d": + return [u, v] + # must be that cs[bigind][1]=='i' + cs = cs[bigind:] + while cs: + vert = cs.pop() + if vert[1] == "d": + return [u, vert[0], v] + # All after u are type 'i' so no connection + return -1 + + +def shortest_path_length(creation_sequence, i): + """ + Return the shortest path length from indicated node to + every other node for the threshold graph with the given + creation sequence. + Node is indicated by index i in creation_sequence unless + creation_sequence is labeled in which case, i is taken to + be the label of the node. + + Paths lengths in threshold graphs are at most 2. + Length to unreachable nodes is set to -1. + """ + # Turn input sequence into a labeled creation sequence + first = creation_sequence[0] + if isinstance(first, str): # creation sequence + if isinstance(creation_sequence, list): + cs = creation_sequence[:] + else: + cs = list(creation_sequence) + elif isinstance(first, tuple): # labeled creation sequence + cs = [v[1] for v in creation_sequence] + i = [v[0] for v in creation_sequence].index(i) + elif isinstance(first, int): # compact creation sequence + cs = uncompact(creation_sequence) + else: + raise TypeError("Not a valid creation sequence type") + + # Compute + N = len(cs) + spl = [2] * N # length 2 to every node + spl[i] = 0 # except self which is 0 + # 1 for all d's to the right + for j in range(i + 1, N): + if cs[j] == "d": + spl[j] = 1 + if cs[i] == "d": # 1 for all nodes to the left + for j in range(i): + spl[j] = 1 + # and -1 for any trailing i to indicate unreachable + for j in range(N - 1, 0, -1): + if cs[j] == "d": + break + spl[j] = -1 + return spl + + +def betweenness_sequence(creation_sequence, normalized=True): + """ + Return betweenness for the threshold graph with the given creation + sequence. The result is unscaled. To scale the values + to the interval [0,1] divide by (n-1)*(n-2). + """ + cs = creation_sequence + seq = [] # betweenness + lastchar = "d" # first node is always a 'd' + dr = float(cs.count("d")) # number of d's to the right of current pos + irun = 0 # number of i's in the last run + drun = 0 # number of d's in the last run + dlast = 0.0 # betweenness of last d + for i, c in enumerate(cs): + if c == "d": # cs[i]=="d": + # betweenness = amt shared with earlier d's and i's + # + new isolated nodes covered + # + new paths to all previous nodes + b = dlast + (irun - 1) * irun / dr + 2 * irun * (i - drun - irun) / dr + drun += 1 # update counter + else: # cs[i]="i": + if lastchar == "d": # if this is a new run of i's + dlast = b # accumulate betweenness + dr -= drun # update number of d's to the right + drun = 0 # reset d counter + irun = 0 # reset i counter + b = 0 # isolated nodes have zero betweenness + irun += 1 # add another i to the run + seq.append(float(b)) + lastchar = c + + # normalize by the number of possible shortest paths + if normalized: + order = len(cs) + scale = 1.0 / ((order - 1) * (order - 2)) + seq = [s * scale for s in seq] + + return seq + + +def eigenvectors(creation_sequence): + """ + Return a 2-tuple of Laplacian eigenvalues and eigenvectors + for the threshold network with creation_sequence. + The first value is a list of eigenvalues. + The second value is a list of eigenvectors. + The lists are in the same order so corresponding eigenvectors + and eigenvalues are in the same position in the two lists. + + Notice that the order of the eigenvalues returned by eigenvalues(cs) + may not correspond to the order of these eigenvectors. + """ + ccs = make_compact(creation_sequence) + N = sum(ccs) + vec = [0] * N + val = vec[:] + # get number of type d nodes to the right (all for first node) + dr = sum(ccs[::2]) + + nn = ccs[0] + vec[0] = [1.0 / sqrt(N)] * N + val[0] = 0 + e = dr + dr -= nn + type_d = True + i = 1 + dd = 1 + while dd < nn: + scale = 1.0 / sqrt(dd * dd + i) + vec[i] = i * [-scale] + [dd * scale] + [0] * (N - i - 1) + val[i] = e + i += 1 + dd += 1 + if len(ccs) == 1: + return (val, vec) + for nn in ccs[1:]: + scale = 1.0 / sqrt(nn * i * (i + nn)) + vec[i] = i * [-nn * scale] + nn * [i * scale] + [0] * (N - i - nn) + # find eigenvalue + type_d = not type_d + if type_d: + e = i + dr + dr -= nn + else: + e = dr + val[i] = e + st = i + i += 1 + dd = 1 + while dd < nn: + scale = 1.0 / sqrt(i - st + dd * dd) + vec[i] = [0] * st + (i - st) * [-scale] + [dd * scale] + [0] * (N - i - 1) + val[i] = e + i += 1 + dd += 1 + return (val, vec) + + +def spectral_projection(u, eigenpairs): + """ + Returns the coefficients of each eigenvector + in a projection of the vector u onto the normalized + eigenvectors which are contained in eigenpairs. + + eigenpairs should be a list of two objects. The + first is a list of eigenvalues and the second a list + of eigenvectors. The eigenvectors should be lists. + + There's not a lot of error checking on lengths of + arrays, etc. so be careful. + """ + coeff = [] + evect = eigenpairs[1] + for ev in evect: + c = sum(evv * uv for (evv, uv) in zip(ev, u)) + coeff.append(c) + return coeff + + +def eigenvalues(creation_sequence): + """ + Return sequence of eigenvalues of the Laplacian of the threshold + graph for the given creation_sequence. + + Based on the Ferrer's diagram method. The spectrum is integral + and is the conjugate of the degree sequence. + + See:: + + @Article{degree-merris-1994, + author = {Russel Merris}, + title = {Degree maximal graphs are Laplacian integral}, + journal = {Linear Algebra Appl.}, + year = {1994}, + volume = {199}, + pages = {381--389}, + } + + """ + degseq = degree_sequence(creation_sequence) + degseq.sort() + eiglist = [] # zero is always one eigenvalue + eig = 0 + row = len(degseq) + bigdeg = degseq.pop() + while row: + if bigdeg < row: + eiglist.append(eig) + row -= 1 + else: + eig += 1 + if degseq: + bigdeg = degseq.pop() + else: + bigdeg = 0 + return eiglist + + +# Threshold graph creation routines + + +@py_random_state(2) +def random_threshold_sequence(n, p, seed=None): + """ + Create a random threshold sequence of size n. + A creation sequence is built by randomly choosing d's with + probability p and i's with probability 1-p. + + s=nx.random_threshold_sequence(10,0.5) + + returns a threshold sequence of length 10 with equal + probably of an i or a d at each position. + + A "random" threshold graph can be built with + + G=nx.threshold_graph(s) + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + """ + if not (0 <= p <= 1): + raise ValueError("p must be in [0,1]") + + cs = ["d"] # threshold sequences always start with a d + for i in range(1, n): + if seed.random() < p: + cs.append("d") + else: + cs.append("i") + return cs + + +# maybe *_d_threshold_sequence routines should +# be (or be called from) a single routine with a more descriptive name +# and a keyword parameter? +def right_d_threshold_sequence(n, m): + """ + Create a skewed threshold graph with a given number + of vertices (n) and a given number of edges (m). + + The routine returns an unlabeled creation sequence + for the threshold graph. + + FIXME: describe algorithm + + """ + cs = ["d"] + ["i"] * (n - 1) # create sequence with n insolated nodes + + # m n * (n - 1) / 2: + raise ValueError("Too many edges for this many nodes.") + + # connected case m >n-1 + ind = n - 1 + sum = n - 1 + while sum < m: + cs[ind] = "d" + ind -= 1 + sum += ind + ind = m - (sum - ind) + cs[ind] = "d" + return cs + + +def left_d_threshold_sequence(n, m): + """ + Create a skewed threshold graph with a given number + of vertices (n) and a given number of edges (m). + + The routine returns an unlabeled creation sequence + for the threshold graph. + + FIXME: describe algorithm + + """ + cs = ["d"] + ["i"] * (n - 1) # create sequence with n insolated nodes + + # m n * (n - 1) / 2: + raise ValueError("Too many edges for this many nodes.") + + # Connected case when M>N-1 + cs[n - 1] = "d" + sum = n - 1 + ind = 1 + while sum < m: + cs[ind] = "d" + sum += ind + ind += 1 + if sum > m: # be sure not to change the first vertex + cs[sum - m] = "i" + return cs + + +@py_random_state(3) +def swap_d(cs, p_split=1.0, p_combine=1.0, seed=None): + """ + Perform a "swap" operation on a threshold sequence. + + The swap preserves the number of nodes and edges + in the graph for the given sequence. + The resulting sequence is still a threshold sequence. + + Perform one split and one combine operation on the + 'd's of a creation sequence for a threshold graph. + This operation maintains the number of nodes and edges + in the graph, but shifts the edges from node to node + maintaining the threshold quality of the graph. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + """ + # preprocess the creation sequence + dlist = [i for (i, node_type) in enumerate(cs[1:-1]) if node_type == "d"] + # split + if seed.random() < p_split: + choice = seed.choice(dlist) + split_to = seed.choice(range(choice)) + flip_side = choice - split_to + if split_to != flip_side and cs[split_to] == "i" and cs[flip_side] == "i": + cs[choice] = "i" + cs[split_to] = "d" + cs[flip_side] = "d" + dlist.remove(choice) + # don't add or combine may reverse this action + # dlist.extend([split_to,flip_side]) + # print >>sys.stderr,"split at %s to %s and %s"%(choice,split_to,flip_side) + # combine + if seed.random() < p_combine and dlist: + first_choice = seed.choice(dlist) + second_choice = seed.choice(dlist) + target = first_choice + second_choice + if target >= len(cs) or cs[target] == "d" or first_choice == second_choice: + return cs + # OK to combine + cs[first_choice] = "i" + cs[second_choice] = "i" + cs[target] = "d" + # print >>sys.stderr,"combine %s and %s to make %s."%(first_choice,second_choice,target) + + return cs diff --git a/MLPY/Lib/site-packages/networkx/algorithms/time_dependent.py b/MLPY/Lib/site-packages/networkx/algorithms/time_dependent.py new file mode 100644 index 0000000000000000000000000000000000000000..e83f42ad92cb8eeba13041f83e791dc166ee473c --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/time_dependent.py @@ -0,0 +1,142 @@ +"""Time dependent algorithms.""" + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = ["cd_index"] + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatch(node_attrs={"time": None, "weight": 1}) +def cd_index(G, node, time_delta, *, time="time", weight=None): + r"""Compute the CD index for `node` within the graph `G`. + + Calculates the CD index for the given node of the graph, + considering only its predecessors who have the `time` attribute + smaller than or equal to the `time` attribute of the `node` + plus `time_delta`. + + Parameters + ---------- + G : graph + A directed networkx graph whose nodes have `time` attributes and optionally + `weight` attributes (if a weight is not given, it is considered 1). + node : node + The node for which the CD index is calculated. + time_delta : numeric or timedelta + Amount of time after the `time` attribute of the `node`. The value of + `time_delta` must support comparison with the `time` node attribute. For + example, if the `time` attribute of the nodes are `datetime.datetime` + objects, then `time_delta` should be a `datetime.timedelta` object. + time : string (Optional, default is "time") + The name of the node attribute that will be used for the calculations. + weight : string (Optional, default is None) + The name of the node attribute used as weight. + + Returns + ------- + float + The CD index calculated for the node `node` within the graph `G`. + + Raises + ------ + NetworkXError + If not all nodes have a `time` attribute or + `time_delta` and `time` attribute types are not compatible or + `n` equals 0. + + NetworkXNotImplemented + If `G` is a non-directed graph or a multigraph. + + Examples + -------- + >>> from datetime import datetime, timedelta + >>> G = nx.DiGraph() + >>> nodes = { + ... 1: {"time": datetime(2015, 1, 1)}, + ... 2: {"time": datetime(2012, 1, 1), 'weight': 4}, + ... 3: {"time": datetime(2010, 1, 1)}, + ... 4: {"time": datetime(2008, 1, 1)}, + ... 5: {"time": datetime(2014, 1, 1)} + ... } + >>> G.add_nodes_from([(n, nodes[n]) for n in nodes]) + >>> edges = [(1, 3), (1, 4), (2, 3), (3, 4), (3, 5)] + >>> G.add_edges_from(edges) + >>> delta = timedelta(days=5 * 365) + >>> nx.cd_index(G, 3, time_delta=delta, time="time") + 0.5 + >>> nx.cd_index(G, 3, time_delta=delta, time="time", weight="weight") + 0.12 + + Integers can also be used for the time values: + >>> node_times = {1: 2015, 2: 2012, 3: 2010, 4: 2008, 5: 2014} + >>> nx.set_node_attributes(G, node_times, "new_time") + >>> nx.cd_index(G, 3, time_delta=4, time="new_time") + 0.5 + >>> nx.cd_index(G, 3, time_delta=4, time="new_time", weight="weight") + 0.12 + + Notes + ----- + This method implements the algorithm for calculating the CD index, + as described in the paper by Funk and Owen-Smith [1]_. The CD index + is used in order to check how consolidating or destabilizing a patent + is, hence the nodes of the graph represent patents and the edges show + the citations between these patents. The mathematical model is given + below: + + .. math:: + CD_{t}=\frac{1}{n_{t}}\sum_{i=1}^{n}\frac{-2f_{it}b_{it}+f_{it}}{w_{it}}, + + where `f_{it}` equals 1 if `i` cites the focal patent else 0, `b_{it}` equals + 1 if `i` cites any of the focal patents successors else 0, `n_{t}` is the number + of forward citations in `i` and `w_{it}` is a matrix of weight for patent `i` + at time `t`. + + The `datetime.timedelta` package can lead to off-by-one issues when converting + from years to days. In the example above `timedelta(days=5 * 365)` looks like + 5 years, but it isn't because of leap year days. So it gives the same result + as `timedelta(days=4 * 365)`. But using `timedelta(days=5 * 365 + 1)` gives + a 5 year delta **for this choice of years** but may not if the 5 year gap has + more than 1 leap year. To avoid these issues, use integers to represent years, + or be very careful when you convert units of time. + + References + ---------- + .. [1] Funk, Russell J., and Jason Owen-Smith. + "A dynamic network measure of technological change." + Management science 63, no. 3 (2017): 791-817. + http://russellfunk.org/cdindex/static/papers/funk_ms_2017.pdf + + """ + if not all(time in G.nodes[n] for n in G): + raise nx.NetworkXError("Not all nodes have a 'time' attribute.") + + try: + # get target_date + target_date = G.nodes[node][time] + time_delta + # keep the predecessors that existed before the target date + pred = {i for i in G.pred[node] if G.nodes[i][time] <= target_date} + except: + raise nx.NetworkXError( + "Addition and comparison are not supported between 'time_delta' " + "and 'time' types." + ) + + # -1 if any edge between node's predecessors and node's successors, else 1 + b = [-1 if any(j in G[i] for j in G[node]) else 1 for i in pred] + + # n is size of the union of the focal node's predecessors and its successors' predecessors + n = len(pred.union(*(G.pred[s].keys() - {node} for s in G[node]))) + if n == 0: + raise nx.NetworkXError("The cd index cannot be defined.") + + # calculate cd index + if weight is None: + return round(sum(bi for bi in b) / n, 2) + else: + # If a node has the specified weight attribute, its weight is used in the calculation + # otherwise, a weight of 1 is assumed for that node + weights = [G.nodes[i].get(weight, 1) for i in pred] + return round(sum(bi / wt for bi, wt in zip(b, weights)) / n, 2) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tournament.py b/MLPY/Lib/site-packages/networkx/algorithms/tournament.py new file mode 100644 index 0000000000000000000000000000000000000000..0b164cb3b16cb74a77a15225e5ea8b1d8e8fdad2 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tournament.py @@ -0,0 +1,406 @@ +"""Functions concerning tournament graphs. + +A `tournament graph`_ is a complete oriented graph. In other words, it +is a directed graph in which there is exactly one directed edge joining +each pair of distinct nodes. For each function in this module that +accepts a graph as input, you must provide a tournament graph. The +responsibility is on the caller to ensure that the graph is a tournament +graph: + + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 0)]) + >>> nx.is_tournament(G) + True + +To access the functions in this module, you must access them through the +:mod:`networkx.tournament` module:: + + >>> nx.tournament.is_reachable(G, 0, 1) + True + +.. _tournament graph: https://en.wikipedia.org/wiki/Tournament_%28graph_theory%29 + +""" +from itertools import combinations + +import networkx as nx +from networkx.algorithms.simple_paths import is_simple_path as is_path +from networkx.utils import arbitrary_element, not_implemented_for, py_random_state + +__all__ = [ + "hamiltonian_path", + "is_reachable", + "is_strongly_connected", + "is_tournament", + "random_tournament", + "score_sequence", +] + + +def index_satisfying(iterable, condition): + """Returns the index of the first element in `iterable` that + satisfies the given condition. + + If no such element is found (that is, when the iterable is + exhausted), this returns the length of the iterable (that is, one + greater than the last index of the iterable). + + `iterable` must not be empty. If `iterable` is empty, this + function raises :exc:`ValueError`. + + """ + # Pre-condition: iterable must not be empty. + for i, x in enumerate(iterable): + if condition(x): + return i + # If we reach the end of the iterable without finding an element + # that satisfies the condition, return the length of the iterable, + # which is one greater than the index of its last element. If the + # iterable was empty, `i` will not be defined, so we raise an + # exception. + try: + return i + 1 + except NameError as err: + raise ValueError("iterable must be non-empty") from err + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatch +def is_tournament(G): + """Returns True if and only if `G` is a tournament. + + A tournament is a directed graph, with neither self-loops nor + multi-edges, in which there is exactly one directed edge joining + each pair of distinct nodes. + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + Returns + ------- + bool + Whether the given graph is a tournament graph. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 0)]) + >>> nx.is_tournament(G) + True + + Notes + ----- + Some definitions require a self-loop on each node, but that is not + the convention used here. + + """ + # In a tournament, there is exactly one directed edge joining each pair. + return ( + all((v in G[u]) ^ (u in G[v]) for u, v in combinations(G, 2)) + and nx.number_of_selfloops(G) == 0 + ) + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatch +def hamiltonian_path(G): + """Returns a Hamiltonian path in the given tournament graph. + + Each tournament has a Hamiltonian path. If furthermore, the + tournament is strongly connected, then the returned Hamiltonian path + is a Hamiltonian cycle (by joining the endpoints of the path). + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + Returns + ------- + path : list + A list of nodes which form a Hamiltonian path in `G`. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]) + >>> nx.is_tournament(G) + True + >>> nx.tournament.hamiltonian_path(G) + [0, 1, 2, 3] + + Notes + ----- + This is a recursive implementation with an asymptotic running time + of $O(n^2)$, ignoring multiplicative polylogarithmic factors, where + $n$ is the number of nodes in the graph. + + """ + if len(G) == 0: + return [] + if len(G) == 1: + return [arbitrary_element(G)] + v = arbitrary_element(G) + hampath = hamiltonian_path(G.subgraph(set(G) - {v})) + # Get the index of the first node in the path that does *not* have + # an edge to `v`, then insert `v` before that node. + index = index_satisfying(hampath, lambda u: v not in G[u]) + hampath.insert(index, v) + return hampath + + +@py_random_state(1) +@nx._dispatch(graphs=None) +def random_tournament(n, seed=None): + r"""Returns a random tournament graph on `n` nodes. + + Parameters + ---------- + n : int + The number of nodes in the returned graph. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G : DiGraph + A tournament on `n` nodes, with exactly one directed edge joining + each pair of distinct nodes. + + Notes + ----- + This algorithm adds, for each pair of distinct nodes, an edge with + uniformly random orientation. In other words, `\binom{n}{2}` flips + of an unbiased coin decide the orientations of the edges in the + graph. + + """ + # Flip an unbiased coin for each pair of distinct nodes. + coins = (seed.random() for i in range((n * (n - 1)) // 2)) + pairs = combinations(range(n), 2) + edges = ((u, v) if r < 0.5 else (v, u) for (u, v), r in zip(pairs, coins)) + return nx.DiGraph(edges) + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatch +def score_sequence(G): + """Returns the score sequence for the given tournament graph. + + The score sequence is the sorted list of the out-degrees of the + nodes of the graph. + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + Returns + ------- + list + A sorted list of the out-degrees of the nodes of `G`. + + Examples + -------- + >>> G = nx.DiGraph([(1, 0), (1, 3), (0, 2), (0, 3), (2, 1), (3, 2)]) + >>> nx.is_tournament(G) + True + >>> nx.tournament.score_sequence(G) + [1, 1, 2, 2] + + """ + return sorted(d for v, d in G.out_degree()) + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatch +def tournament_matrix(G): + r"""Returns the tournament matrix for the given tournament graph. + + This function requires SciPy. + + The *tournament matrix* of a tournament graph with edge set *E* is + the matrix *T* defined by + + .. math:: + + T_{i j} = + \begin{cases} + +1 & \text{if } (i, j) \in E \\ + -1 & \text{if } (j, i) \in E \\ + 0 & \text{if } i == j. + \end{cases} + + An equivalent definition is `T = A - A^T`, where *A* is the + adjacency matrix of the graph `G`. + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + Returns + ------- + SciPy sparse array + The tournament matrix of the tournament graph `G`. + + Raises + ------ + ImportError + If SciPy is not available. + + """ + A = nx.adjacency_matrix(G) + return A - A.T + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatch +def is_reachable(G, s, t): + """Decides whether there is a path from `s` to `t` in the + tournament. + + This function is more theoretically efficient than the reachability + checks than the shortest path algorithms in + :mod:`networkx.algorithms.shortest_paths`. + + The given graph **must** be a tournament, otherwise this function's + behavior is undefined. + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + s : node + A node in the graph. + + t : node + A node in the graph. + + Returns + ------- + bool + Whether there is a path from `s` to `t` in `G`. + + Examples + -------- + >>> G = nx.DiGraph([(1, 0), (1, 3), (1, 2), (2, 3), (2, 0), (3, 0)]) + >>> nx.is_tournament(G) + True + >>> nx.tournament.is_reachable(G, 1, 3) + True + >>> nx.tournament.is_reachable(G, 3, 2) + False + + Notes + ----- + Although this function is more theoretically efficient than the + generic shortest path functions, a speedup requires the use of + parallelism. Though it may in the future, the current implementation + does not use parallelism, thus you may not see much of a speedup. + + This algorithm comes from [1]. + + References + ---------- + .. [1] Tantau, Till. + "A note on the complexity of the reachability problem for + tournaments." + *Electronic Colloquium on Computational Complexity*. 2001. + + """ + + def two_neighborhood(G, v): + """Returns the set of nodes at distance at most two from `v`. + + `G` must be a graph and `v` a node in that graph. + + The returned set includes the nodes at distance zero (that is, + the node `v` itself), the nodes at distance one (that is, the + out-neighbors of `v`), and the nodes at distance two. + + """ + # TODO This is trivially parallelizable. + return { + x for x in G if x == v or x in G[v] or any(is_path(G, [v, z, x]) for z in G) + } + + def is_closed(G, nodes): + """Decides whether the given set of nodes is closed. + + A set *S* of nodes is *closed* if for each node *u* in the graph + not in *S* and for each node *v* in *S*, there is an edge from + *u* to *v*. + + """ + # TODO This is trivially parallelizable. + return all(v in G[u] for u in set(G) - nodes for v in nodes) + + # TODO This is trivially parallelizable. + neighborhoods = [two_neighborhood(G, v) for v in G] + return all(not (is_closed(G, S) and s in S and t not in S) for S in neighborhoods) + + +@not_implemented_for("undirected") +@not_implemented_for("multigraph") +@nx._dispatch(name="tournament_is_strongly_connected") +def is_strongly_connected(G): + """Decides whether the given tournament is strongly connected. + + This function is more theoretically efficient than the + :func:`~networkx.algorithms.components.is_strongly_connected` + function. + + The given graph **must** be a tournament, otherwise this function's + behavior is undefined. + + Parameters + ---------- + G : NetworkX graph + A directed graph representing a tournament. + + Returns + ------- + bool + Whether the tournament is strongly connected. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 0)]) + >>> nx.is_tournament(G) + True + >>> nx.tournament.is_strongly_connected(G) + True + >>> G.remove_edge(3, 0) + >>> G.add_edge(0, 3) + >>> nx.is_tournament(G) + True + >>> nx.tournament.is_strongly_connected(G) + False + + Notes + ----- + Although this function is more theoretically efficient than the + generic strong connectivity function, a speedup requires the use of + parallelism. Though it may in the future, the current implementation + does not use parallelism, thus you may not see much of a speedup. + + This algorithm comes from [1]. + + References + ---------- + .. [1] Tantau, Till. + "A note on the complexity of the reachability problem for + tournaments." + *Electronic Colloquium on Computational Complexity*. 2001. + + + """ + # TODO This is trivially parallelizable. + return all(is_reachable(G, u, v) for u in G for v in G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/traversal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..93e6cdd08ca959835cc5c5f9a3e6dc353f4b217d --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/traversal/__init__.py @@ -0,0 +1,5 @@ +from .beamsearch import * +from .breadth_first_search import * +from .depth_first_search import * +from .edgedfs import * +from .edgebfs import * diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/traversal/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..456f37a25ce4f48ec220cccee594876ecc91596d Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/traversal/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/__pycache__/beamsearch.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/traversal/__pycache__/beamsearch.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c7f54f3db70901c45b01e4f4011ad580839aad3 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/traversal/__pycache__/beamsearch.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/__pycache__/breadth_first_search.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/traversal/__pycache__/breadth_first_search.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c00ced86400f402ce956447785c7276c9aeda1f Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/traversal/__pycache__/breadth_first_search.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/__pycache__/depth_first_search.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/traversal/__pycache__/depth_first_search.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..575fa9abfcb0c4873f72540310b979dd1e5203b2 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/traversal/__pycache__/depth_first_search.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/__pycache__/edgebfs.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/traversal/__pycache__/edgebfs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9ba31ccdde294a819e7e7a5394e5b1cda1923d3 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/traversal/__pycache__/edgebfs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/__pycache__/edgedfs.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/traversal/__pycache__/edgedfs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9df3ec6e7fefe67b0455e4dfef551623d8f22aed Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/traversal/__pycache__/edgedfs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/beamsearch.py b/MLPY/Lib/site-packages/networkx/algorithms/traversal/beamsearch.py new file mode 100644 index 0000000000000000000000000000000000000000..7fe93669a3a18f50e8b46ffa8c667db1e2d4a97a --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/traversal/beamsearch.py @@ -0,0 +1,106 @@ +"""Basic algorithms for breadth-first searching the nodes of a graph.""" +import networkx as nx + +from .breadth_first_search import generic_bfs_edges + +__all__ = ["bfs_beam_edges"] + + +@nx._dispatch +def bfs_beam_edges(G, source, value, width=None): + """Iterates over edges in a beam search. + + The beam search is a generalized breadth-first search in which only + the "best" *w* neighbors of the current node are enqueued, where *w* + is the beam width and "best" is an application-specific + heuristic. In general, a beam search with a small beam width might + not visit each node in the graph. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for the breadth-first search; this function + iterates over only those edges in the component reachable from + this node. + + value : function + A function that takes a node of the graph as input and returns a + real number indicating how "good" it is. A higher value means it + is more likely to be visited sooner during the search. When + visiting a new node, only the `width` neighbors with the highest + `value` are enqueued (in decreasing order of `value`). + + width : int (default = None) + The beam width for the search. This is the number of neighbors + (ordered by `value`) to enqueue when visiting each new node. + + Yields + ------ + edge + Edges in the beam search starting from `source`, given as a pair + of nodes. + + Examples + -------- + To give nodes with, for example, a higher centrality precedence + during the search, set the `value` function to return the centrality + value of the node: + + >>> G = nx.karate_club_graph() + >>> centrality = nx.eigenvector_centrality(G) + >>> source = 0 + >>> width = 5 + >>> for u, v in nx.bfs_beam_edges(G, source, centrality.get, width): + ... print((u, v)) + ... + (0, 2) + (0, 1) + (0, 8) + (0, 13) + (0, 3) + (2, 32) + (1, 30) + (8, 33) + (3, 7) + (32, 31) + (31, 28) + (31, 25) + (25, 23) + (25, 24) + (23, 29) + (23, 27) + (29, 26) + """ + + if width is None: + width = len(G) + + def successors(v): + """Returns a list of the best neighbors of a node. + + `v` is a node in the graph `G`. + + The "best" neighbors are chosen according to the `value` + function (higher is better). Only the `width` best neighbors of + `v` are returned. + + The list returned by this function is in decreasing value as + measured by the `value` function. + + """ + # TODO The Python documentation states that for small values, it + # is better to use `heapq.nlargest`. We should determine the + # threshold at which its better to use `heapq.nlargest()` + # instead of `sorted()[:]` and apply that optimization here. + # + # If `width` is greater than the number of neighbors of `v`, all + # neighbors are returned by the semantics of slicing in + # Python. This occurs in the special case that the user did not + # specify a `width`: in this case all neighbors are always + # returned, so this is just a (slower) implementation of + # `bfs_edges(G, source)` but with a sorted enqueue step. + return iter(sorted(G.neighbors(v), key=value, reverse=True)[:width]) + + yield from generic_bfs_edges(G, source, successors) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/breadth_first_search.py b/MLPY/Lib/site-packages/networkx/algorithms/traversal/breadth_first_search.py new file mode 100644 index 0000000000000000000000000000000000000000..33b0df8e1746f41c143b208a589628a5fc81be9f --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/traversal/breadth_first_search.py @@ -0,0 +1,581 @@ +"""Basic algorithms for breadth-first searching the nodes of a graph.""" +import math +from collections import deque + +import networkx as nx + +__all__ = [ + "bfs_edges", + "bfs_tree", + "bfs_predecessors", + "bfs_successors", + "descendants_at_distance", + "bfs_layers", + "bfs_labeled_edges", + "generic_bfs_edges", +] + + +@nx._dispatch +def generic_bfs_edges(G, source, neighbors=None, depth_limit=None, sort_neighbors=None): + """Iterate over edges in a breadth-first search. + + The breadth-first search begins at `source` and enqueues the + neighbors of newly visited nodes specified by the `neighbors` + function. + + Parameters + ---------- + G : NetworkX graph + + source : node + Starting node for the breadth-first search; this function + iterates over only those edges in the component reachable from + this node. + + neighbors : function + A function that takes a newly visited node of the graph as input + and returns an *iterator* (not just a list) of nodes that are + neighbors of that node with custom ordering. If not specified, this is + just the``G.neighbors`` method, but in general it can be any function + that returns an iterator over some or all of the neighbors of a + given node, in any order. + + depth_limit : int, optional(default=len(G)) + Specify the maximum search depth. + + sort_neighbors : Callable + + .. deprecated:: 3.2 + + The sort_neighbors parameter is deprecated and will be removed in + version 3.4. A custom (e.g. sorted) ordering of neighbors can be + specified with the `neighbors` parameter. + + A function that takes the list of neighbors of a given node as input, + and returns an iterator over these neighbors but with a custom + ordering. + + Yields + ------ + edge + Edges in the breadth-first search starting from `source`. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> list(nx.bfs_edges(G, 0)) + [(0, 1), (1, 2)] + >>> list(nx.bfs_edges(G, source=0, depth_limit=1)) + [(0, 1)] + + Notes + ----- + This implementation is from `PADS`_, which was in the public domain + when it was first accessed in July, 2004. The modifications + to allow depth limits are based on the Wikipedia article + "`Depth-limited-search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS/BFS.py + .. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search + """ + if neighbors is None: + neighbors = G.neighbors + if sort_neighbors is not None: + import warnings + + warnings.warn( + ( + "The sort_neighbors parameter is deprecated and will be removed\n" + "in NetworkX 3.4, use the neighbors parameter instead." + ), + DeprecationWarning, + stacklevel=2, + ) + _neighbors = neighbors + neighbors = lambda node: iter(sort_neighbors(_neighbors(node))) + if depth_limit is None: + depth_limit = len(G) + + seen = {source} + n = len(G) + depth = 0 + next_parents_children = [(source, neighbors(source))] + while next_parents_children and depth < depth_limit: + this_parents_children = next_parents_children + next_parents_children = [] + for parent, children in this_parents_children: + for child in children: + if child not in seen: + seen.add(child) + next_parents_children.append((child, neighbors(child))) + yield parent, child + if len(seen) == n: + return + depth += 1 + + +@nx._dispatch +def bfs_edges(G, source, reverse=False, depth_limit=None, sort_neighbors=None): + """Iterate over edges in a breadth-first-search starting at source. + + Parameters + ---------- + G : NetworkX graph + + source : node + Specify starting node for breadth-first search; this function + iterates over only those edges in the component reachable from + this node. + + reverse : bool, optional + If True traverse a directed graph in the reverse direction + + depth_limit : int, optional(default=len(G)) + Specify the maximum search depth + + sort_neighbors : function + A function that takes the list of neighbors of given node as input, and + returns an *iterator* over these neighbors but with custom ordering. + + Yields + ------ + edge: 2-tuple of nodes + Yields edges resulting from the breadth-first search. + + Examples + -------- + To get the edges in a breadth-first search:: + + >>> G = nx.path_graph(3) + >>> list(nx.bfs_edges(G, 0)) + [(0, 1), (1, 2)] + >>> list(nx.bfs_edges(G, source=0, depth_limit=1)) + [(0, 1)] + + To get the nodes in a breadth-first search order:: + + >>> G = nx.path_graph(3) + >>> root = 2 + >>> edges = nx.bfs_edges(G, root) + >>> nodes = [root] + [v for u, v in edges] + >>> nodes + [2, 1, 0] + + Notes + ----- + The naming of this function is very similar to + :func:`~networkx.algorithms.traversal.edgebfs.edge_bfs`. The difference + is that ``edge_bfs`` yields edges even if they extend back to an already + explored node while this generator yields the edges of the tree that results + from a breadth-first-search (BFS) so no edges are reported if they extend + to already explored nodes. That means ``edge_bfs`` reports all edges while + ``bfs_edges`` only reports those traversed by a node-based BFS. Yet another + description is that ``bfs_edges`` reports the edges traversed during BFS + while ``edge_bfs`` reports all edges in the order they are explored. + + Based on the breadth-first search implementation in PADS [1]_ + by D. Eppstein, July 2004; with modifications to allow depth limits + as described in [2]_. + + References + ---------- + .. [1] http://www.ics.uci.edu/~eppstein/PADS/BFS.py. + .. [2] https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + bfs_tree + :func:`~networkx.algorithms.traversal.depth_first_search.dfs_edges` + :func:`~networkx.algorithms.traversal.edgebfs.edge_bfs` + + """ + if reverse and G.is_directed(): + successors = G.predecessors + else: + successors = G.neighbors + + if callable(sort_neighbors): + yield from generic_bfs_edges( + G, source, lambda node: iter(sort_neighbors(successors(node))), depth_limit + ) + else: + yield from generic_bfs_edges(G, source, successors, depth_limit) + + +@nx._dispatch +def bfs_tree(G, source, reverse=False, depth_limit=None, sort_neighbors=None): + """Returns an oriented tree constructed from of a breadth-first-search + starting at source. + + Parameters + ---------- + G : NetworkX graph + + source : node + Specify starting node for breadth-first search + + reverse : bool, optional + If True traverse a directed graph in the reverse direction + + depth_limit : int, optional(default=len(G)) + Specify the maximum search depth + + sort_neighbors : function + A function that takes the list of neighbors of given node as input, and + returns an *iterator* over these neighbors but with custom ordering. + + Returns + ------- + T: NetworkX DiGraph + An oriented tree + + Examples + -------- + >>> G = nx.path_graph(3) + >>> list(nx.bfs_tree(G, 1).edges()) + [(1, 0), (1, 2)] + >>> H = nx.Graph() + >>> nx.add_path(H, [0, 1, 2, 3, 4, 5, 6]) + >>> nx.add_path(H, [2, 7, 8, 9, 10]) + >>> sorted(list(nx.bfs_tree(H, source=3, depth_limit=3).edges())) + [(1, 0), (2, 1), (2, 7), (3, 2), (3, 4), (4, 5), (5, 6), (7, 8)] + + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py + by D. Eppstein, July 2004. The modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited-search`_". + + .. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_tree + bfs_edges + edge_bfs + """ + T = nx.DiGraph() + T.add_node(source) + edges_gen = bfs_edges( + G, + source, + reverse=reverse, + depth_limit=depth_limit, + sort_neighbors=sort_neighbors, + ) + T.add_edges_from(edges_gen) + return T + + +@nx._dispatch +def bfs_predecessors(G, source, depth_limit=None, sort_neighbors=None): + """Returns an iterator of predecessors in breadth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node + Specify starting node for breadth-first search + + depth_limit : int, optional(default=len(G)) + Specify the maximum search depth + + sort_neighbors : function + A function that takes the list of neighbors of given node as input, and + returns an *iterator* over these neighbors but with custom ordering. + + Returns + ------- + pred: iterator + (node, predecessor) iterator where `predecessor` is the predecessor of + `node` in a breadth first search starting from `source`. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> dict(nx.bfs_predecessors(G, 0)) + {1: 0, 2: 1} + >>> H = nx.Graph() + >>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]) + >>> dict(nx.bfs_predecessors(H, 0)) + {1: 0, 2: 0, 3: 1, 4: 1, 5: 2, 6: 2} + >>> M = nx.Graph() + >>> nx.add_path(M, [0, 1, 2, 3, 4, 5, 6]) + >>> nx.add_path(M, [2, 7, 8, 9, 10]) + >>> sorted(nx.bfs_predecessors(M, source=1, depth_limit=3)) + [(0, 1), (2, 1), (3, 2), (4, 3), (7, 2), (8, 7)] + >>> N = nx.DiGraph() + >>> nx.add_path(N, [0, 1, 2, 3, 4, 7]) + >>> nx.add_path(N, [3, 5, 6, 7]) + >>> sorted(nx.bfs_predecessors(N, source=2)) + [(3, 2), (4, 3), (5, 3), (6, 5), (7, 4)] + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py + by D. Eppstein, July 2004. The modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited-search`_". + + .. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + bfs_tree + bfs_edges + edge_bfs + """ + for s, t in bfs_edges( + G, source, depth_limit=depth_limit, sort_neighbors=sort_neighbors + ): + yield (t, s) + + +@nx._dispatch +def bfs_successors(G, source, depth_limit=None, sort_neighbors=None): + """Returns an iterator of successors in breadth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node + Specify starting node for breadth-first search + + depth_limit : int, optional(default=len(G)) + Specify the maximum search depth + + sort_neighbors : function + A function that takes the list of neighbors of given node as input, and + returns an *iterator* over these neighbors but with custom ordering. + + Returns + ------- + succ: iterator + (node, successors) iterator where `successors` is the non-empty list of + successors of `node` in a breadth first search from `source`. + To appear in the iterator, `node` must have successors. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> dict(nx.bfs_successors(G, 0)) + {0: [1], 1: [2]} + >>> H = nx.Graph() + >>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]) + >>> dict(nx.bfs_successors(H, 0)) + {0: [1, 2], 1: [3, 4], 2: [5, 6]} + >>> G = nx.Graph() + >>> nx.add_path(G, [0, 1, 2, 3, 4, 5, 6]) + >>> nx.add_path(G, [2, 7, 8, 9, 10]) + >>> dict(nx.bfs_successors(G, source=1, depth_limit=3)) + {1: [0, 2], 2: [3, 7], 3: [4], 7: [8]} + >>> G = nx.DiGraph() + >>> nx.add_path(G, [0, 1, 2, 3, 4, 5]) + >>> dict(nx.bfs_successors(G, source=3)) + {3: [4], 4: [5]} + + Notes + ----- + Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py + by D. Eppstein, July 2004.The modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited-search`_". + + .. _Depth-limited-search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + bfs_tree + bfs_edges + edge_bfs + """ + parent = source + children = [] + for p, c in bfs_edges( + G, source, depth_limit=depth_limit, sort_neighbors=sort_neighbors + ): + if p == parent: + children.append(c) + continue + yield (parent, children) + children = [c] + parent = p + yield (parent, children) + + +@nx._dispatch +def bfs_layers(G, sources): + """Returns an iterator of all the layers in breadth-first search traversal. + + Parameters + ---------- + G : NetworkX graph + A graph over which to find the layers using breadth-first search. + + sources : node in `G` or list of nodes in `G` + Specify starting nodes for single source or multiple sources breadth-first search + + Yields + ------ + layer: list of nodes + Yields list of nodes at the same distance from sources + + Examples + -------- + >>> G = nx.path_graph(5) + >>> dict(enumerate(nx.bfs_layers(G, [0, 4]))) + {0: [0, 4], 1: [1, 3], 2: [2]} + >>> H = nx.Graph() + >>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]) + >>> dict(enumerate(nx.bfs_layers(H, [1]))) + {0: [1], 1: [0, 3, 4], 2: [2], 3: [5, 6]} + >>> dict(enumerate(nx.bfs_layers(H, [1, 6]))) + {0: [1, 6], 1: [0, 3, 4, 2], 2: [5]} + """ + if sources in G: + sources = [sources] + + current_layer = list(sources) + visited = set(sources) + + for source in current_layer: + if source not in G: + raise nx.NetworkXError(f"The node {source} is not in the graph.") + + # this is basically BFS, except that the current layer only stores the nodes at + # same distance from sources at each iteration + while current_layer: + yield current_layer + next_layer = [] + for node in current_layer: + for child in G[node]: + if child not in visited: + visited.add(child) + next_layer.append(child) + current_layer = next_layer + + +REVERSE_EDGE = "reverse" +TREE_EDGE = "tree" +FORWARD_EDGE = "forward" +LEVEL_EDGE = "level" + + +@nx._dispatch +def bfs_labeled_edges(G, sources): + """Iterate over edges in a breadth-first search (BFS) labeled by type. + + We generate triple of the form (*u*, *v*, *d*), where (*u*, *v*) is the + edge being explored in the breadth-first search and *d* is one of the + strings 'tree', 'forward', 'level', or 'reverse'. A 'tree' edge is one in + which *v* is first discovered and placed into the layer below *u*. A + 'forward' edge is one in which *u* is on the layer above *v* and *v* has + already been discovered. A 'level' edge is one in which both *u* and *v* + occur on the same layer. A 'reverse' edge is one in which *u* is on a layer + below *v*. + + We emit each edge exactly once. In an undirected graph, 'reverse' edges do + not occur, because each is discovered either as a 'tree' or 'forward' edge. + + Parameters + ---------- + G : NetworkX graph + A graph over which to find the layers using breadth-first search. + + sources : node in `G` or list of nodes in `G` + Starting nodes for single source or multiple sources breadth-first search + + Yields + ------ + edges: generator + A generator of triples (*u*, *v*, *d*) where (*u*, *v*) is the edge being + explored and *d* is described above. + + Examples + -------- + >>> G = nx.cycle_graph(4, create_using = nx.DiGraph) + >>> list(nx.bfs_labeled_edges(G, 0)) + [(0, 1, 'tree'), (1, 2, 'tree'), (2, 3, 'tree'), (3, 0, 'reverse')] + >>> G = nx.complete_graph(3) + >>> list(nx.bfs_labeled_edges(G, 0)) + [(0, 1, 'tree'), (0, 2, 'tree'), (1, 2, 'level')] + >>> list(nx.bfs_labeled_edges(G, [0, 1])) + [(0, 1, 'level'), (0, 2, 'tree'), (1, 2, 'forward')] + """ + if sources in G: + sources = [sources] + + neighbors = G._adj + directed = G.is_directed() + visited = set() + visit = visited.discard if directed else visited.add + # We use visited in a negative sense, so the visited set stays empty for the + # directed case and level edges are reported on their first occurrence in + # the undirected case. Note our use of visited.discard -- this is built-in + # thus somewhat faster than a python-defined def nop(x): pass + depth = {s: 0 for s in sources} + queue = deque(depth.items()) + push = queue.append + pop = queue.popleft + while queue: + u, du = pop() + for v in neighbors[u]: + if v not in depth: + depth[v] = dv = du + 1 + push((v, dv)) + yield u, v, TREE_EDGE + else: + dv = depth[v] + if du == dv: + if v not in visited: + yield u, v, LEVEL_EDGE + elif du < dv: + yield u, v, FORWARD_EDGE + elif directed: + yield u, v, REVERSE_EDGE + visit(u) + + +@nx._dispatch +def descendants_at_distance(G, source, distance): + """Returns all nodes at a fixed `distance` from `source` in `G`. + + Parameters + ---------- + G : NetworkX graph + A graph + source : node in `G` + distance : the distance of the wanted nodes from `source` + + Returns + ------- + set() + The descendants of `source` in `G` at the given `distance` from `source` + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.descendants_at_distance(G, 2, 2) + {0, 4} + >>> H = nx.DiGraph() + >>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]) + >>> nx.descendants_at_distance(H, 0, 2) + {3, 4, 5, 6} + >>> nx.descendants_at_distance(H, 5, 0) + {5} + >>> nx.descendants_at_distance(H, 5, 1) + set() + """ + if source not in G: + raise nx.NetworkXError(f"The node {source} is not in the graph.") + + bfs_generator = nx.bfs_layers(G, source) + for i, layer in enumerate(bfs_generator): + if i == distance: + return set(layer) + return set() diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/depth_first_search.py b/MLPY/Lib/site-packages/networkx/algorithms/traversal/depth_first_search.py new file mode 100644 index 0000000000000000000000000000000000000000..185a99c3dd148cf7bdeaeb4b848f0fd070e661fc --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/traversal/depth_first_search.py @@ -0,0 +1,469 @@ +"""Basic algorithms for depth-first searching the nodes of a graph.""" +from collections import defaultdict + +import networkx as nx + +__all__ = [ + "dfs_edges", + "dfs_tree", + "dfs_predecessors", + "dfs_successors", + "dfs_preorder_nodes", + "dfs_postorder_nodes", + "dfs_labeled_edges", +] + + +@nx._dispatch +def dfs_edges(G, source=None, depth_limit=None): + """Iterate over edges in a depth-first-search (DFS). + + Perform a depth-first-search over the nodes of `G` and yield + the edges in order. This may not generate all edges in `G` + (see `~networkx.algorithms.traversal.edgedfs.edge_dfs`). + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search and yield edges in + the component reachable from source. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + Yields + ------ + edge: 2-tuple of nodes + Yields edges resulting from the depth-first-search. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> list(nx.dfs_edges(G, source=0)) + [(0, 1), (1, 2), (2, 3), (3, 4)] + >>> list(nx.dfs_edges(G, source=0, depth_limit=2)) + [(0, 1), (1, 2)] + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in PADS [1]_, with modifications + to allow depth limits based on the Wikipedia article + "Depth-limited search" [2]_. + + See Also + -------- + dfs_preorder_nodes + dfs_postorder_nodes + dfs_labeled_edges + :func:`~networkx.algorithms.traversal.edgedfs.edge_dfs` + :func:`~networkx.algorithms.traversal.breadth_first_search.bfs_edges` + + References + ---------- + .. [1] http://www.ics.uci.edu/~eppstein/PADS + .. [2] https://en.wikipedia.org/wiki/Depth-limited_search + """ + if source is None: + # edges for all components + nodes = G + else: + # edges for components with source + nodes = [source] + if depth_limit is None: + depth_limit = len(G) + + visited = set() + for start in nodes: + if start in visited: + continue + visited.add(start) + stack = [(start, iter(G[start]))] + depth_now = 1 + while stack: + parent, children = stack[-1] + for child in children: + if child not in visited: + yield parent, child + visited.add(child) + if depth_now < depth_limit: + stack.append((child, iter(G[child]))) + depth_now += 1 + break + else: + stack.pop() + depth_now -= 1 + + +@nx._dispatch +def dfs_tree(G, source=None, depth_limit=None): + """Returns oriented tree constructed from a depth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + Returns + ------- + T : NetworkX DiGraph + An oriented tree + + Examples + -------- + >>> G = nx.path_graph(5) + >>> T = nx.dfs_tree(G, source=0, depth_limit=2) + >>> list(T.edges()) + [(0, 1), (1, 2)] + >>> T = nx.dfs_tree(G, source=0) + >>> list(T.edges()) + [(0, 1), (1, 2), (2, 3), (3, 4)] + + See Also + -------- + dfs_preorder_nodes + dfs_postorder_nodes + dfs_labeled_edges + edge_dfs + bfs_tree + """ + T = nx.DiGraph() + if source is None: + T.add_nodes_from(G) + else: + T.add_node(source) + T.add_edges_from(dfs_edges(G, source, depth_limit)) + return T + + +@nx._dispatch +def dfs_predecessors(G, source=None, depth_limit=None): + """Returns dictionary of predecessors in depth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search. + Note that you will get predecessors for all nodes in the + component containing `source`. This input only specifies + where the DFS starts. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + Returns + ------- + pred: dict + A dictionary with nodes as keys and predecessor nodes as values. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.dfs_predecessors(G, source=0) + {1: 0, 2: 1, 3: 2} + >>> nx.dfs_predecessors(G, source=0, depth_limit=2) + {1: 0, 2: 1} + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_preorder_nodes + dfs_postorder_nodes + dfs_labeled_edges + edge_dfs + bfs_tree + """ + return {t: s for s, t in dfs_edges(G, source, depth_limit)} + + +@nx._dispatch +def dfs_successors(G, source=None, depth_limit=None): + """Returns dictionary of successors in depth-first-search from source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search. + Note that you will get successors for all nodes in the + component containing `source`. This input only specifies + where the DFS starts. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + Returns + ------- + succ: dict + A dictionary with nodes as keys and list of successor nodes as values. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> nx.dfs_successors(G, source=0) + {0: [1], 1: [2], 2: [3], 3: [4]} + >>> nx.dfs_successors(G, source=0, depth_limit=2) + {0: [1], 1: [2]} + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_preorder_nodes + dfs_postorder_nodes + dfs_labeled_edges + edge_dfs + bfs_tree + """ + d = defaultdict(list) + for s, t in dfs_edges(G, source=source, depth_limit=depth_limit): + d[s].append(t) + return dict(d) + + +@nx._dispatch +def dfs_postorder_nodes(G, source=None, depth_limit=None): + """Generate nodes in a depth-first-search post-ordering starting at source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + Returns + ------- + nodes: generator + A generator of nodes in a depth-first-search post-ordering. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> list(nx.dfs_postorder_nodes(G, source=0)) + [4, 3, 2, 1, 0] + >>> list(nx.dfs_postorder_nodes(G, source=0, depth_limit=2)) + [1, 0] + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_edges + dfs_preorder_nodes + dfs_labeled_edges + edge_dfs + bfs_tree + """ + edges = nx.dfs_labeled_edges(G, source=source, depth_limit=depth_limit) + return (v for u, v, d in edges if d == "reverse") + + +@nx._dispatch +def dfs_preorder_nodes(G, source=None, depth_limit=None): + """Generate nodes in a depth-first-search pre-ordering starting at source. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search and return nodes in + the component reachable from source. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + Returns + ------- + nodes: generator + A generator of nodes in a depth-first-search pre-ordering. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> list(nx.dfs_preorder_nodes(G, source=0)) + [0, 1, 2, 3, 4] + >>> list(nx.dfs_preorder_nodes(G, source=0, depth_limit=2)) + [0, 1, 2] + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_edges + dfs_postorder_nodes + dfs_labeled_edges + bfs_edges + """ + edges = nx.dfs_labeled_edges(G, source=source, depth_limit=depth_limit) + return (v for u, v, d in edges if d == "forward") + + +@nx._dispatch +def dfs_labeled_edges(G, source=None, depth_limit=None): + """Iterate over edges in a depth-first-search (DFS) labeled by type. + + Parameters + ---------- + G : NetworkX graph + + source : node, optional + Specify starting node for depth-first search and return edges in + the component reachable from source. + + depth_limit : int, optional (default=len(G)) + Specify the maximum search depth. + + Returns + ------- + edges: generator + A generator of triples of the form (*u*, *v*, *d*), where (*u*, + *v*) is the edge being explored in the depth-first search and *d* + is one of the strings 'forward', 'nontree', 'reverse', or 'reverse-depth_limit'. + A 'forward' edge is one in which *u* has been visited but *v* has + not. A 'nontree' edge is one in which both *u* and *v* have been + visited but the edge is not in the DFS tree. A 'reverse' edge is + one in which both *u* and *v* have been visited and the edge is in + the DFS tree. When the `depth_limit` is reached via a 'forward' edge, + a 'reverse' edge is immediately generated rather than the subtree + being explored. To indicate this flavor of 'reverse' edge, the string + yielded is 'reverse-depth_limit'. + + Examples + -------- + + The labels reveal the complete transcript of the depth-first search + algorithm in more detail than, for example, :func:`dfs_edges`:: + + >>> from pprint import pprint + >>> + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 1)]) + >>> pprint(list(nx.dfs_labeled_edges(G, source=0))) + [(0, 0, 'forward'), + (0, 1, 'forward'), + (1, 2, 'forward'), + (2, 1, 'nontree'), + (1, 2, 'reverse'), + (0, 1, 'reverse'), + (0, 0, 'reverse')] + + Notes + ----- + If a source is not specified then a source is chosen arbitrarily and + repeatedly until all components in the graph are searched. + + The implementation of this function is adapted from David Eppstein's + depth-first search function in `PADS`_, with modifications + to allow depth limits based on the Wikipedia article + "`Depth-limited search`_". + + .. _PADS: http://www.ics.uci.edu/~eppstein/PADS + .. _Depth-limited search: https://en.wikipedia.org/wiki/Depth-limited_search + + See Also + -------- + dfs_edges + dfs_preorder_nodes + dfs_postorder_nodes + """ + # Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py + # by D. Eppstein, July 2004. + if source is None: + # edges for all components + nodes = G + else: + # edges for components with source + nodes = [source] + if depth_limit is None: + depth_limit = len(G) + + visited = set() + for start in nodes: + if start in visited: + continue + yield start, start, "forward" + visited.add(start) + stack = [(start, iter(G[start]))] + depth_now = 1 + while stack: + parent, children = stack[-1] + for child in children: + if child in visited: + yield parent, child, "nontree" + else: + yield parent, child, "forward" + visited.add(child) + if depth_now < depth_limit: + stack.append((child, iter(G[child]))) + depth_now += 1 + break + else: + yield parent, child, "reverse-depth_limit" + else: + stack.pop() + depth_now -= 1 + if stack: + yield stack[-1][0], parent, "reverse" + yield start, start, "reverse" diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/edgebfs.py b/MLPY/Lib/site-packages/networkx/algorithms/traversal/edgebfs.py new file mode 100644 index 0000000000000000000000000000000000000000..c29ef5e02696f85a6e3aedad8995811135f042f5 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/traversal/edgebfs.py @@ -0,0 +1,177 @@ +""" +============================= +Breadth First Search on Edges +============================= + +Algorithms for a breadth-first traversal of edges in a graph. + +""" +from collections import deque + +import networkx as nx + +FORWARD = "forward" +REVERSE = "reverse" + +__all__ = ["edge_bfs"] + + +@nx._dispatch +def edge_bfs(G, source=None, orientation=None): + """A directed, breadth-first-search of edges in `G`, beginning at `source`. + + Yield the edges of G in a breadth-first-search order continuing until + all edges are generated. + + Parameters + ---------- + G : graph + A directed/undirected graph/multigraph. + + source : node, list of nodes + The node from which the traversal begins. If None, then a source + is chosen arbitrarily and repeatedly until all edges from each node in + the graph are searched. + + orientation : None | 'original' | 'reverse' | 'ignore' (default: None) + For directed graphs and directed multigraphs, edge traversals need not + respect the original orientation of the edges. + When set to 'reverse' every edge is traversed in the reverse direction. + When set to 'ignore', every edge is treated as undirected. + When set to 'original', every edge is treated as directed. + In all three cases, the yielded edge tuples add a last entry to + indicate the direction in which that edge was traversed. + If orientation is None, the yielded edge has no direction indicated. + The direction is respected, but not reported. + + Yields + ------ + edge : directed edge + A directed edge indicating the path taken by the breadth-first-search. + For graphs, `edge` is of the form `(u, v)` where `u` and `v` + are the tail and head of the edge as determined by the traversal. + For multigraphs, `edge` is of the form `(u, v, key)`, where `key` is + the key of the edge. When the graph is directed, then `u` and `v` + are always in the order of the actual directed edge. + If orientation is not None then the edge tuple is extended to include + the direction of traversal ('forward' or 'reverse') on that edge. + + Examples + -------- + >>> nodes = [0, 1, 2, 3] + >>> edges = [(0, 1), (1, 0), (1, 0), (2, 0), (2, 1), (3, 1)] + + >>> list(nx.edge_bfs(nx.Graph(edges), nodes)) + [(0, 1), (0, 2), (1, 2), (1, 3)] + + >>> list(nx.edge_bfs(nx.DiGraph(edges), nodes)) + [(0, 1), (1, 0), (2, 0), (2, 1), (3, 1)] + + >>> list(nx.edge_bfs(nx.MultiGraph(edges), nodes)) + [(0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 2, 0), (1, 2, 0), (1, 3, 0)] + + >>> list(nx.edge_bfs(nx.MultiDiGraph(edges), nodes)) + [(0, 1, 0), (1, 0, 0), (1, 0, 1), (2, 0, 0), (2, 1, 0), (3, 1, 0)] + + >>> list(nx.edge_bfs(nx.DiGraph(edges), nodes, orientation="ignore")) + [(0, 1, 'forward'), (1, 0, 'reverse'), (2, 0, 'reverse'), (2, 1, 'reverse'), (3, 1, 'reverse')] + + >>> list(nx.edge_bfs(nx.MultiDiGraph(edges), nodes, orientation="ignore")) + [(0, 1, 0, 'forward'), (1, 0, 0, 'reverse'), (1, 0, 1, 'reverse'), (2, 0, 0, 'reverse'), (2, 1, 0, 'reverse'), (3, 1, 0, 'reverse')] + + Notes + ----- + The goal of this function is to visit edges. It differs from the more + familiar breadth-first-search of nodes, as provided by + :func:`networkx.algorithms.traversal.breadth_first_search.bfs_edges`, in + that it does not stop once every node has been visited. In a directed graph + with edges [(0, 1), (1, 2), (2, 1)], the edge (2, 1) would not be visited + if not for the functionality provided by this function. + + The naming of this function is very similar to bfs_edges. The difference + is that 'edge_bfs' yields edges even if they extend back to an already + explored node while 'bfs_edges' yields the edges of the tree that results + from a breadth-first-search (BFS) so no edges are reported if they extend + to already explored nodes. That means 'edge_bfs' reports all edges while + 'bfs_edges' only report those traversed by a node-based BFS. Yet another + description is that 'bfs_edges' reports the edges traversed during BFS + while 'edge_bfs' reports all edges in the order they are explored. + + See Also + -------- + bfs_edges + bfs_tree + edge_dfs + + """ + nodes = list(G.nbunch_iter(source)) + if not nodes: + return + + directed = G.is_directed() + kwds = {"data": False} + if G.is_multigraph() is True: + kwds["keys"] = True + + # set up edge lookup + if orientation is None: + + def edges_from(node): + return iter(G.edges(node, **kwds)) + + elif not directed or orientation == "original": + + def edges_from(node): + for e in G.edges(node, **kwds): + yield e + (FORWARD,) + + elif orientation == "reverse": + + def edges_from(node): + for e in G.in_edges(node, **kwds): + yield e + (REVERSE,) + + elif orientation == "ignore": + + def edges_from(node): + for e in G.edges(node, **kwds): + yield e + (FORWARD,) + for e in G.in_edges(node, **kwds): + yield e + (REVERSE,) + + else: + raise nx.NetworkXError("invalid orientation argument.") + + if directed: + neighbors = G.successors + + def edge_id(edge): + # remove direction indicator + return edge[:-1] if orientation is not None else edge + + else: + neighbors = G.neighbors + + def edge_id(edge): + return (frozenset(edge[:2]),) + edge[2:] + + check_reverse = directed and orientation in ("reverse", "ignore") + + # start BFS + visited_nodes = set(nodes) + visited_edges = set() + queue = deque([(n, edges_from(n)) for n in nodes]) + while queue: + parent, children_edges = queue.popleft() + for edge in children_edges: + if check_reverse and edge[-1] == REVERSE: + child = edge[0] + else: + child = edge[1] + if child not in visited_nodes: + visited_nodes.add(child) + queue.append((child, edges_from(child))) + edgeid = edge_id(edge) + if edgeid not in visited_edges: + visited_edges.add(edgeid) + yield edge diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/edgedfs.py b/MLPY/Lib/site-packages/networkx/algorithms/traversal/edgedfs.py new file mode 100644 index 0000000000000000000000000000000000000000..1e583de6ec4d4dd1ea90129ef5ad1e473b3b6c1c --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/traversal/edgedfs.py @@ -0,0 +1,175 @@ +""" +=========================== +Depth First Search on Edges +=========================== + +Algorithms for a depth-first traversal of edges in a graph. + +""" +import networkx as nx + +FORWARD = "forward" +REVERSE = "reverse" + +__all__ = ["edge_dfs"] + + +@nx._dispatch +def edge_dfs(G, source=None, orientation=None): + """A directed, depth-first-search of edges in `G`, beginning at `source`. + + Yield the edges of G in a depth-first-search order continuing until + all edges are generated. + + Parameters + ---------- + G : graph + A directed/undirected graph/multigraph. + + source : node, list of nodes + The node from which the traversal begins. If None, then a source + is chosen arbitrarily and repeatedly until all edges from each node in + the graph are searched. + + orientation : None | 'original' | 'reverse' | 'ignore' (default: None) + For directed graphs and directed multigraphs, edge traversals need not + respect the original orientation of the edges. + When set to 'reverse' every edge is traversed in the reverse direction. + When set to 'ignore', every edge is treated as undirected. + When set to 'original', every edge is treated as directed. + In all three cases, the yielded edge tuples add a last entry to + indicate the direction in which that edge was traversed. + If orientation is None, the yielded edge has no direction indicated. + The direction is respected, but not reported. + + Yields + ------ + edge : directed edge + A directed edge indicating the path taken by the depth-first traversal. + For graphs, `edge` is of the form `(u, v)` where `u` and `v` + are the tail and head of the edge as determined by the traversal. + For multigraphs, `edge` is of the form `(u, v, key)`, where `key` is + the key of the edge. When the graph is directed, then `u` and `v` + are always in the order of the actual directed edge. + If orientation is not None then the edge tuple is extended to include + the direction of traversal ('forward' or 'reverse') on that edge. + + Examples + -------- + >>> nodes = [0, 1, 2, 3] + >>> edges = [(0, 1), (1, 0), (1, 0), (2, 1), (3, 1)] + + >>> list(nx.edge_dfs(nx.Graph(edges), nodes)) + [(0, 1), (1, 2), (1, 3)] + + >>> list(nx.edge_dfs(nx.DiGraph(edges), nodes)) + [(0, 1), (1, 0), (2, 1), (3, 1)] + + >>> list(nx.edge_dfs(nx.MultiGraph(edges), nodes)) + [(0, 1, 0), (1, 0, 1), (0, 1, 2), (1, 2, 0), (1, 3, 0)] + + >>> list(nx.edge_dfs(nx.MultiDiGraph(edges), nodes)) + [(0, 1, 0), (1, 0, 0), (1, 0, 1), (2, 1, 0), (3, 1, 0)] + + >>> list(nx.edge_dfs(nx.DiGraph(edges), nodes, orientation="ignore")) + [(0, 1, 'forward'), (1, 0, 'forward'), (2, 1, 'reverse'), (3, 1, 'reverse')] + + >>> list(nx.edge_dfs(nx.MultiDiGraph(edges), nodes, orientation="ignore")) + [(0, 1, 0, 'forward'), (1, 0, 0, 'forward'), (1, 0, 1, 'reverse'), (2, 1, 0, 'reverse'), (3, 1, 0, 'reverse')] + + Notes + ----- + The goal of this function is to visit edges. It differs from the more + familiar depth-first traversal of nodes, as provided by + :func:`~networkx.algorithms.traversal.depth_first_search.dfs_edges`, in + that it does not stop once every node has been visited. In a directed graph + with edges [(0, 1), (1, 2), (2, 1)], the edge (2, 1) would not be visited + if not for the functionality provided by this function. + + See Also + -------- + :func:`~networkx.algorithms.traversal.depth_first_search.dfs_edges` + + """ + nodes = list(G.nbunch_iter(source)) + if not nodes: + return + + directed = G.is_directed() + kwds = {"data": False} + if G.is_multigraph() is True: + kwds["keys"] = True + + # set up edge lookup + if orientation is None: + + def edges_from(node): + return iter(G.edges(node, **kwds)) + + elif not directed or orientation == "original": + + def edges_from(node): + for e in G.edges(node, **kwds): + yield e + (FORWARD,) + + elif orientation == "reverse": + + def edges_from(node): + for e in G.in_edges(node, **kwds): + yield e + (REVERSE,) + + elif orientation == "ignore": + + def edges_from(node): + for e in G.edges(node, **kwds): + yield e + (FORWARD,) + for e in G.in_edges(node, **kwds): + yield e + (REVERSE,) + + else: + raise nx.NetworkXError("invalid orientation argument.") + + # set up formation of edge_id to easily look up if edge already returned + if directed: + + def edge_id(edge): + # remove direction indicator + return edge[:-1] if orientation is not None else edge + + else: + + def edge_id(edge): + # single id for undirected requires frozenset on nodes + return (frozenset(edge[:2]),) + edge[2:] + + # Basic setup + check_reverse = directed and orientation in ("reverse", "ignore") + + visited_edges = set() + visited_nodes = set() + edges = {} + + # start DFS + for start_node in nodes: + stack = [start_node] + while stack: + current_node = stack[-1] + if current_node not in visited_nodes: + edges[current_node] = edges_from(current_node) + visited_nodes.add(current_node) + + try: + edge = next(edges[current_node]) + except StopIteration: + # No more edges from the current node. + stack.pop() + else: + edgeid = edge_id(edge) + if edgeid not in visited_edges: + visited_edges.add(edgeid) + # Mark the traversed "to" node as to-be-explored. + if check_reverse and edge[-1] == REVERSE: + stack.append(edge[0]) + else: + stack.append(edge[1]) + yield edge diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c08e36f9ab76cd63e27cf95a458c44baa0f4a23e Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_beamsearch.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_beamsearch.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9befb7c048ea5644c4f6141f1ad1cd45ad0ea7e6 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_beamsearch.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_bfs.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_bfs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ede4499cfa79ceef3480b8968c9b91dc2fef00f Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_bfs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_dfs.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_dfs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40ed32017aa2ed7e3bae27dc50f5170b53e918f9 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_dfs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_edgebfs.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_edgebfs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a29d6a01af1b651a091343df21f7ce302957c01 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_edgebfs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_edgedfs.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_edgedfs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bf73cec7d00fbddee4e2b08766624c6c2961fdf Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/__pycache__/test_edgedfs.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/test_beamsearch.py b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/test_beamsearch.py new file mode 100644 index 0000000000000000000000000000000000000000..8945b418457b0c5c31b956f40a9c4ded2a439d9d --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/test_beamsearch.py @@ -0,0 +1,32 @@ +"""Unit tests for the beam search functions.""" + +import networkx as nx + + +def identity(x): + return x + + +class TestBeamSearch: + """Unit tests for the beam search function.""" + + def test_narrow(self): + """Tests that a narrow beam width may cause an incomplete search.""" + # In this search, we enqueue only the neighbor 3 at the first + # step, then only the neighbor 2 at the second step. Once at + # node 2, the search chooses node 3, since it has a higher value + # that node 1, but node 3 has already been visited, so the + # search terminates. + G = nx.cycle_graph(4) + edges = nx.bfs_beam_edges(G, 0, identity, width=1) + assert list(edges) == [(0, 3), (3, 2)] + + def test_wide(self): + G = nx.cycle_graph(4) + edges = nx.bfs_beam_edges(G, 0, identity, width=2) + assert list(edges) == [(0, 3), (0, 1), (3, 2)] + + def test_width_none(self): + G = nx.cycle_graph(4) + edges = nx.bfs_beam_edges(G, 0, identity, width=None) + assert list(edges) == [(0, 3), (0, 1), (3, 2)] diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/test_bfs.py b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/test_bfs.py new file mode 100644 index 0000000000000000000000000000000000000000..f9207cdd21a649b711f912b77dbc79cdb1152b2a --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/test_bfs.py @@ -0,0 +1,212 @@ +from functools import partial + +import pytest + +import networkx as nx + + +class TestBFS: + @classmethod + def setup_class(cls): + # simple graph + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 4)]) + cls.G = G + + def test_successor(self): + assert dict(nx.bfs_successors(self.G, source=0)) == {0: [1], 1: [2, 3], 2: [4]} + + def test_predecessor(self): + assert dict(nx.bfs_predecessors(self.G, source=0)) == {1: 0, 2: 1, 3: 1, 4: 2} + + def test_bfs_tree(self): + T = nx.bfs_tree(self.G, source=0) + assert sorted(T.nodes()) == sorted(self.G.nodes()) + assert sorted(T.edges()) == [(0, 1), (1, 2), (1, 3), (2, 4)] + + def test_bfs_edges(self): + edges = nx.bfs_edges(self.G, source=0) + assert list(edges) == [(0, 1), (1, 2), (1, 3), (2, 4)] + + def test_bfs_edges_reverse(self): + D = nx.DiGraph() + D.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 4)]) + edges = nx.bfs_edges(D, source=4, reverse=True) + assert list(edges) == [(4, 2), (4, 3), (2, 1), (1, 0)] + + def test_bfs_edges_sorting(self): + D = nx.DiGraph() + D.add_edges_from([(0, 1), (0, 2), (1, 4), (1, 3), (2, 5)]) + sort_desc = partial(sorted, reverse=True) + edges_asc = nx.bfs_edges(D, source=0, sort_neighbors=sorted) + edges_desc = nx.bfs_edges(D, source=0, sort_neighbors=sort_desc) + assert list(edges_asc) == [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5)] + assert list(edges_desc) == [(0, 2), (0, 1), (2, 5), (1, 4), (1, 3)] + + def test_bfs_tree_isolates(self): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + T = nx.bfs_tree(G, source=1) + assert sorted(T.nodes()) == [1] + assert sorted(T.edges()) == [] + + def test_bfs_layers(self): + expected = { + 0: [0], + 1: [1], + 2: [2, 3], + 3: [4], + } + assert dict(enumerate(nx.bfs_layers(self.G, sources=[0]))) == expected + assert dict(enumerate(nx.bfs_layers(self.G, sources=0))) == expected + + def test_bfs_layers_missing_source(self): + with pytest.raises(nx.NetworkXError): + next(nx.bfs_layers(self.G, sources="abc")) + with pytest.raises(nx.NetworkXError): + next(nx.bfs_layers(self.G, sources=["abc"])) + + def test_descendants_at_distance(self): + for distance, descendants in enumerate([{0}, {1}, {2, 3}, {4}]): + assert nx.descendants_at_distance(self.G, 0, distance) == descendants + + def test_descendants_at_distance_missing_source(self): + with pytest.raises(nx.NetworkXError): + nx.descendants_at_distance(self.G, "abc", 0) + + def test_bfs_labeled_edges_directed(self): + D = nx.cycle_graph(5, create_using=nx.DiGraph) + expected = [ + (0, 1, "tree"), + (1, 2, "tree"), + (2, 3, "tree"), + (3, 4, "tree"), + (4, 0, "reverse"), + ] + answer = list(nx.bfs_labeled_edges(D, 0)) + assert expected == answer + + D.add_edge(4, 4) + expected.append((4, 4, "level")) + answer = list(nx.bfs_labeled_edges(D, 0)) + assert expected == answer + + D.add_edge(0, 2) + D.add_edge(1, 5) + D.add_edge(2, 5) + D.remove_edge(4, 4) + expected = [ + (0, 1, "tree"), + (0, 2, "tree"), + (1, 2, "level"), + (1, 5, "tree"), + (2, 3, "tree"), + (2, 5, "forward"), + (3, 4, "tree"), + (4, 0, "reverse"), + ] + answer = list(nx.bfs_labeled_edges(D, 0)) + assert expected == answer + + G = D.to_undirected() + G.add_edge(4, 4) + expected = [ + (0, 1, "tree"), + (0, 2, "tree"), + (0, 4, "tree"), + (1, 2, "level"), + (1, 5, "tree"), + (2, 3, "tree"), + (2, 5, "forward"), + (4, 3, "forward"), + (4, 4, "level"), + ] + answer = list(nx.bfs_labeled_edges(G, 0)) + assert expected == answer + + +class TestBreadthLimitedSearch: + @classmethod + def setup_class(cls): + # a tree + G = nx.Graph() + nx.add_path(G, [0, 1, 2, 3, 4, 5, 6]) + nx.add_path(G, [2, 7, 8, 9, 10]) + cls.G = G + # a disconnected graph + D = nx.Graph() + D.add_edges_from([(0, 1), (2, 3)]) + nx.add_path(D, [2, 7, 8, 9, 10]) + cls.D = D + + def test_limited_bfs_successor(self): + assert dict(nx.bfs_successors(self.G, source=1, depth_limit=3)) == { + 1: [0, 2], + 2: [3, 7], + 3: [4], + 7: [8], + } + result = { + n: sorted(s) for n, s in nx.bfs_successors(self.D, source=7, depth_limit=2) + } + assert result == {8: [9], 2: [3], 7: [2, 8]} + + def test_limited_bfs_predecessor(self): + assert dict(nx.bfs_predecessors(self.G, source=1, depth_limit=3)) == { + 0: 1, + 2: 1, + 3: 2, + 4: 3, + 7: 2, + 8: 7, + } + assert dict(nx.bfs_predecessors(self.D, source=7, depth_limit=2)) == { + 2: 7, + 3: 2, + 8: 7, + 9: 8, + } + + def test_limited_bfs_tree(self): + T = nx.bfs_tree(self.G, source=3, depth_limit=1) + assert sorted(T.edges()) == [(3, 2), (3, 4)] + + def test_limited_bfs_edges(self): + edges = nx.bfs_edges(self.G, source=9, depth_limit=4) + assert list(edges) == [(9, 8), (9, 10), (8, 7), (7, 2), (2, 1), (2, 3)] + + def test_limited_bfs_layers(self): + assert dict(enumerate(nx.bfs_layers(self.G, sources=[0]))) == { + 0: [0], + 1: [1], + 2: [2], + 3: [3, 7], + 4: [4, 8], + 5: [5, 9], + 6: [6, 10], + } + assert dict(enumerate(nx.bfs_layers(self.D, sources=2))) == { + 0: [2], + 1: [3, 7], + 2: [8], + 3: [9], + 4: [10], + } + + def test_limited_descendants_at_distance(self): + for distance, descendants in enumerate( + [{0}, {1}, {2}, {3, 7}, {4, 8}, {5, 9}, {6, 10}] + ): + assert nx.descendants_at_distance(self.G, 0, distance) == descendants + for distance, descendants in enumerate([{2}, {3, 7}, {8}, {9}, {10}]): + assert nx.descendants_at_distance(self.D, 2, distance) == descendants + + +def test_deprecations(): + G = nx.Graph([(1, 2)]) + generic_bfs = nx.breadth_first_search.generic_bfs_edges + with pytest.deprecated_call(): + list(generic_bfs(G, source=1, sort_neighbors=sorted)) + with pytest.deprecated_call(): + list(generic_bfs(G, source=1, neighbors=G.neighbors, sort_neighbors=sorted)) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/test_dfs.py b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/test_dfs.py new file mode 100644 index 0000000000000000000000000000000000000000..0eb698b0f2da734c564c83a2d23ff7f191c7137a --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/test_dfs.py @@ -0,0 +1,251 @@ +import networkx as nx + + +class TestDFS: + @classmethod + def setup_class(cls): + # simple graph + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 0), (0, 4)]) + cls.G = G + # simple graph, disconnected + D = nx.Graph() + D.add_edges_from([(0, 1), (2, 3)]) + cls.D = D + + def test_preorder_nodes(self): + assert list(nx.dfs_preorder_nodes(self.G, source=0)) == [0, 1, 2, 4, 3] + assert list(nx.dfs_preorder_nodes(self.D)) == [0, 1, 2, 3] + assert list(nx.dfs_preorder_nodes(self.D, source=2)) == [2, 3] + + def test_postorder_nodes(self): + assert list(nx.dfs_postorder_nodes(self.G, source=0)) == [4, 2, 3, 1, 0] + assert list(nx.dfs_postorder_nodes(self.D)) == [1, 0, 3, 2] + assert list(nx.dfs_postorder_nodes(self.D, source=0)) == [1, 0] + + def test_successor(self): + assert nx.dfs_successors(self.G, source=0) == {0: [1], 1: [2, 3], 2: [4]} + assert nx.dfs_successors(self.G, source=1) == {0: [3, 4], 1: [0], 4: [2]} + assert nx.dfs_successors(self.D) == {0: [1], 2: [3]} + assert nx.dfs_successors(self.D, source=1) == {1: [0]} + + def test_predecessor(self): + assert nx.dfs_predecessors(self.G, source=0) == {1: 0, 2: 1, 3: 1, 4: 2} + assert nx.dfs_predecessors(self.D) == {1: 0, 3: 2} + + def test_dfs_tree(self): + exp_nodes = sorted(self.G.nodes()) + exp_edges = [(0, 1), (1, 2), (1, 3), (2, 4)] + # Search from first node + T = nx.dfs_tree(self.G, source=0) + assert sorted(T.nodes()) == exp_nodes + assert sorted(T.edges()) == exp_edges + # Check source=None + T = nx.dfs_tree(self.G, source=None) + assert sorted(T.nodes()) == exp_nodes + assert sorted(T.edges()) == exp_edges + # Check source=None is the default + T = nx.dfs_tree(self.G) + assert sorted(T.nodes()) == exp_nodes + assert sorted(T.edges()) == exp_edges + + def test_dfs_edges(self): + edges = nx.dfs_edges(self.G, source=0) + assert list(edges) == [(0, 1), (1, 2), (2, 4), (1, 3)] + edges = nx.dfs_edges(self.D) + assert list(edges) == [(0, 1), (2, 3)] + + def test_dfs_labeled_edges(self): + edges = list(nx.dfs_labeled_edges(self.G, source=0)) + forward = [(u, v) for (u, v, d) in edges if d == "forward"] + assert forward == [(0, 0), (0, 1), (1, 2), (2, 4), (1, 3)] + assert edges == [ + (0, 0, "forward"), + (0, 1, "forward"), + (1, 0, "nontree"), + (1, 2, "forward"), + (2, 1, "nontree"), + (2, 4, "forward"), + (4, 2, "nontree"), + (4, 0, "nontree"), + (2, 4, "reverse"), + (1, 2, "reverse"), + (1, 3, "forward"), + (3, 1, "nontree"), + (3, 0, "nontree"), + (1, 3, "reverse"), + (0, 1, "reverse"), + (0, 3, "nontree"), + (0, 4, "nontree"), + (0, 0, "reverse"), + ] + + def test_dfs_labeled_disconnected_edges(self): + edges = list(nx.dfs_labeled_edges(self.D)) + forward = [(u, v) for (u, v, d) in edges if d == "forward"] + assert forward == [(0, 0), (0, 1), (2, 2), (2, 3)] + assert edges == [ + (0, 0, "forward"), + (0, 1, "forward"), + (1, 0, "nontree"), + (0, 1, "reverse"), + (0, 0, "reverse"), + (2, 2, "forward"), + (2, 3, "forward"), + (3, 2, "nontree"), + (2, 3, "reverse"), + (2, 2, "reverse"), + ] + + def test_dfs_tree_isolates(self): + G = nx.Graph() + G.add_node(1) + G.add_node(2) + T = nx.dfs_tree(G, source=1) + assert sorted(T.nodes()) == [1] + assert sorted(T.edges()) == [] + T = nx.dfs_tree(G, source=None) + assert sorted(T.nodes()) == [1, 2] + assert sorted(T.edges()) == [] + + +class TestDepthLimitedSearch: + @classmethod + def setup_class(cls): + # a tree + G = nx.Graph() + nx.add_path(G, [0, 1, 2, 3, 4, 5, 6]) + nx.add_path(G, [2, 7, 8, 9, 10]) + cls.G = G + # a disconnected graph + D = nx.Graph() + D.add_edges_from([(0, 1), (2, 3)]) + nx.add_path(D, [2, 7, 8, 9, 10]) + cls.D = D + + def test_dls_preorder_nodes(self): + assert list(nx.dfs_preorder_nodes(self.G, source=0, depth_limit=2)) == [0, 1, 2] + assert list(nx.dfs_preorder_nodes(self.D, source=1, depth_limit=2)) == ([1, 0]) + + def test_dls_postorder_nodes(self): + assert list(nx.dfs_postorder_nodes(self.G, source=3, depth_limit=3)) == [ + 1, + 7, + 2, + 5, + 4, + 3, + ] + assert list(nx.dfs_postorder_nodes(self.D, source=2, depth_limit=2)) == ( + [3, 7, 2] + ) + + def test_dls_successor(self): + result = nx.dfs_successors(self.G, source=4, depth_limit=3) + assert {n: set(v) for n, v in result.items()} == { + 2: {1, 7}, + 3: {2}, + 4: {3, 5}, + 5: {6}, + } + result = nx.dfs_successors(self.D, source=7, depth_limit=2) + assert {n: set(v) for n, v in result.items()} == {8: {9}, 2: {3}, 7: {8, 2}} + + def test_dls_predecessor(self): + assert nx.dfs_predecessors(self.G, source=0, depth_limit=3) == { + 1: 0, + 2: 1, + 3: 2, + 7: 2, + } + assert nx.dfs_predecessors(self.D, source=2, depth_limit=3) == { + 8: 7, + 9: 8, + 3: 2, + 7: 2, + } + + def test_dls_tree(self): + T = nx.dfs_tree(self.G, source=3, depth_limit=1) + assert sorted(T.edges()) == [(3, 2), (3, 4)] + + def test_dls_edges(self): + edges = nx.dfs_edges(self.G, source=9, depth_limit=4) + assert list(edges) == [(9, 8), (8, 7), (7, 2), (2, 1), (2, 3), (9, 10)] + + def test_dls_labeled_edges_depth_1(self): + edges = list(nx.dfs_labeled_edges(self.G, source=5, depth_limit=1)) + forward = [(u, v) for (u, v, d) in edges if d == "forward"] + assert forward == [(5, 5), (5, 4), (5, 6)] + # Note: reverse-depth_limit edge types were not reported before gh-6240 + assert edges == [ + (5, 5, "forward"), + (5, 4, "forward"), + (5, 4, "reverse-depth_limit"), + (5, 6, "forward"), + (5, 6, "reverse-depth_limit"), + (5, 5, "reverse"), + ] + + def test_dls_labeled_edges_depth_2(self): + edges = list(nx.dfs_labeled_edges(self.G, source=6, depth_limit=2)) + forward = [(u, v) for (u, v, d) in edges if d == "forward"] + assert forward == [(6, 6), (6, 5), (5, 4)] + assert edges == [ + (6, 6, "forward"), + (6, 5, "forward"), + (5, 4, "forward"), + (5, 4, "reverse-depth_limit"), + (5, 6, "nontree"), + (6, 5, "reverse"), + (6, 6, "reverse"), + ] + + def test_dls_labeled_disconnected_edges(self): + edges = list(nx.dfs_labeled_edges(self.D, depth_limit=1)) + assert edges == [ + (0, 0, "forward"), + (0, 1, "forward"), + (0, 1, "reverse-depth_limit"), + (0, 0, "reverse"), + (2, 2, "forward"), + (2, 3, "forward"), + (2, 3, "reverse-depth_limit"), + (2, 7, "forward"), + (2, 7, "reverse-depth_limit"), + (2, 2, "reverse"), + (8, 8, "forward"), + (8, 7, "nontree"), + (8, 9, "forward"), + (8, 9, "reverse-depth_limit"), + (8, 8, "reverse"), + (10, 10, "forward"), + (10, 9, "nontree"), + (10, 10, "reverse"), + ] + # large depth_limit has no impact + edges = list(nx.dfs_labeled_edges(self.D, depth_limit=19)) + assert edges == [ + (0, 0, "forward"), + (0, 1, "forward"), + (1, 0, "nontree"), + (0, 1, "reverse"), + (0, 0, "reverse"), + (2, 2, "forward"), + (2, 3, "forward"), + (3, 2, "nontree"), + (2, 3, "reverse"), + (2, 7, "forward"), + (7, 2, "nontree"), + (7, 8, "forward"), + (8, 7, "nontree"), + (8, 9, "forward"), + (9, 8, "nontree"), + (9, 10, "forward"), + (10, 9, "nontree"), + (9, 10, "reverse"), + (8, 9, "reverse"), + (7, 8, "reverse"), + (2, 7, "reverse"), + (2, 2, "reverse"), + ] diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/test_edgebfs.py b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/test_edgebfs.py new file mode 100644 index 0000000000000000000000000000000000000000..1bf3fae0bd067dd548281e3382a6125f6e50ee22 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/test_edgebfs.py @@ -0,0 +1,147 @@ +import pytest + +import networkx as nx +from networkx.algorithms.traversal.edgedfs import FORWARD, REVERSE + + +class TestEdgeBFS: + @classmethod + def setup_class(cls): + cls.nodes = [0, 1, 2, 3] + cls.edges = [(0, 1), (1, 0), (1, 0), (2, 0), (2, 1), (3, 1)] + + def test_empty(self): + G = nx.Graph() + edges = list(nx.edge_bfs(G)) + assert edges == [] + + def test_graph_single_source(self): + G = nx.Graph(self.edges) + G.add_edge(4, 5) + x = list(nx.edge_bfs(G, [0])) + x_ = [(0, 1), (0, 2), (1, 2), (1, 3)] + assert x == x_ + + def test_graph(self): + G = nx.Graph(self.edges) + x = list(nx.edge_bfs(G, self.nodes)) + x_ = [(0, 1), (0, 2), (1, 2), (1, 3)] + assert x == x_ + + def test_digraph(self): + G = nx.DiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes)) + x_ = [(0, 1), (1, 0), (2, 0), (2, 1), (3, 1)] + assert x == x_ + + def test_digraph_orientation_invalid(self): + G = nx.DiGraph(self.edges) + edge_iterator = nx.edge_bfs(G, self.nodes, orientation="hello") + pytest.raises(nx.NetworkXError, list, edge_iterator) + + def test_digraph_orientation_none(self): + G = nx.DiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes, orientation=None)) + x_ = [(0, 1), (1, 0), (2, 0), (2, 1), (3, 1)] + assert x == x_ + + def test_digraph_orientation_original(self): + G = nx.DiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes, orientation="original")) + x_ = [ + (0, 1, FORWARD), + (1, 0, FORWARD), + (2, 0, FORWARD), + (2, 1, FORWARD), + (3, 1, FORWARD), + ] + assert x == x_ + + def test_digraph2(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + x = list(nx.edge_bfs(G, [0])) + x_ = [(0, 1), (1, 2), (2, 3)] + assert x == x_ + + def test_digraph_rev(self): + G = nx.DiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes, orientation="reverse")) + x_ = [ + (1, 0, REVERSE), + (2, 0, REVERSE), + (0, 1, REVERSE), + (2, 1, REVERSE), + (3, 1, REVERSE), + ] + assert x == x_ + + def test_digraph_rev2(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + x = list(nx.edge_bfs(G, [3], orientation="reverse")) + x_ = [(2, 3, REVERSE), (1, 2, REVERSE), (0, 1, REVERSE)] + assert x == x_ + + def test_multigraph(self): + G = nx.MultiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes)) + x_ = [(0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 2, 0), (1, 2, 0), (1, 3, 0)] + # This is an example of where hash randomization can break. + # There are 3! * 2 alternative outputs, such as: + # [(0, 1, 1), (1, 0, 0), (0, 1, 2), (1, 3, 0), (1, 2, 0)] + # But note, the edges (1,2,0) and (1,3,0) always follow the (0,1,k) + # edges. So the algorithm only guarantees a partial order. A total + # order is guaranteed only if the graph data structures are ordered. + assert x == x_ + + def test_multidigraph(self): + G = nx.MultiDiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes)) + x_ = [(0, 1, 0), (1, 0, 0), (1, 0, 1), (2, 0, 0), (2, 1, 0), (3, 1, 0)] + assert x == x_ + + def test_multidigraph_rev(self): + G = nx.MultiDiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes, orientation="reverse")) + x_ = [ + (1, 0, 0, REVERSE), + (1, 0, 1, REVERSE), + (2, 0, 0, REVERSE), + (0, 1, 0, REVERSE), + (2, 1, 0, REVERSE), + (3, 1, 0, REVERSE), + ] + assert x == x_ + + def test_digraph_ignore(self): + G = nx.DiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes, orientation="ignore")) + x_ = [ + (0, 1, FORWARD), + (1, 0, REVERSE), + (2, 0, REVERSE), + (2, 1, REVERSE), + (3, 1, REVERSE), + ] + assert x == x_ + + def test_digraph_ignore2(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + x = list(nx.edge_bfs(G, [0], orientation="ignore")) + x_ = [(0, 1, FORWARD), (1, 2, FORWARD), (2, 3, FORWARD)] + assert x == x_ + + def test_multidigraph_ignore(self): + G = nx.MultiDiGraph(self.edges) + x = list(nx.edge_bfs(G, self.nodes, orientation="ignore")) + x_ = [ + (0, 1, 0, FORWARD), + (1, 0, 0, REVERSE), + (1, 0, 1, REVERSE), + (2, 0, 0, REVERSE), + (2, 1, 0, REVERSE), + (3, 1, 0, REVERSE), + ] + assert x == x_ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/test_edgedfs.py b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/test_edgedfs.py new file mode 100644 index 0000000000000000000000000000000000000000..7c1967cce04b3a0c9db80f9af39d7b1dfd8ef4cb --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/traversal/tests/test_edgedfs.py @@ -0,0 +1,131 @@ +import pytest + +import networkx as nx +from networkx.algorithms import edge_dfs +from networkx.algorithms.traversal.edgedfs import FORWARD, REVERSE + +# These tests can fail with hash randomization. The easiest and clearest way +# to write these unit tests is for the edges to be output in an expected total +# order, but we cannot guarantee the order amongst outgoing edges from a node, +# unless each class uses an ordered data structure for neighbors. This is +# painful to do with the current API. The alternative is that the tests are +# written (IMO confusingly) so that there is not a total order over the edges, +# but only a partial order. Due to the small size of the graphs, hopefully +# failures due to hash randomization will not occur. For an example of how +# this can fail, see TestEdgeDFS.test_multigraph. + + +class TestEdgeDFS: + @classmethod + def setup_class(cls): + cls.nodes = [0, 1, 2, 3] + cls.edges = [(0, 1), (1, 0), (1, 0), (2, 1), (3, 1)] + + def test_empty(self): + G = nx.Graph() + edges = list(edge_dfs(G)) + assert edges == [] + + def test_graph(self): + G = nx.Graph(self.edges) + x = list(edge_dfs(G, self.nodes)) + x_ = [(0, 1), (1, 2), (1, 3)] + assert x == x_ + + def test_digraph(self): + G = nx.DiGraph(self.edges) + x = list(edge_dfs(G, self.nodes)) + x_ = [(0, 1), (1, 0), (2, 1), (3, 1)] + assert x == x_ + + def test_digraph_orientation_invalid(self): + G = nx.DiGraph(self.edges) + edge_iterator = edge_dfs(G, self.nodes, orientation="hello") + pytest.raises(nx.NetworkXError, list, edge_iterator) + + def test_digraph_orientation_none(self): + G = nx.DiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation=None)) + x_ = [(0, 1), (1, 0), (2, 1), (3, 1)] + assert x == x_ + + def test_digraph_orientation_original(self): + G = nx.DiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation="original")) + x_ = [(0, 1, FORWARD), (1, 0, FORWARD), (2, 1, FORWARD), (3, 1, FORWARD)] + assert x == x_ + + def test_digraph2(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + x = list(edge_dfs(G, [0])) + x_ = [(0, 1), (1, 2), (2, 3)] + assert x == x_ + + def test_digraph_rev(self): + G = nx.DiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation="reverse")) + x_ = [(1, 0, REVERSE), (0, 1, REVERSE), (2, 1, REVERSE), (3, 1, REVERSE)] + assert x == x_ + + def test_digraph_rev2(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + x = list(edge_dfs(G, [3], orientation="reverse")) + x_ = [(2, 3, REVERSE), (1, 2, REVERSE), (0, 1, REVERSE)] + assert x == x_ + + def test_multigraph(self): + G = nx.MultiGraph(self.edges) + x = list(edge_dfs(G, self.nodes)) + x_ = [(0, 1, 0), (1, 0, 1), (0, 1, 2), (1, 2, 0), (1, 3, 0)] + # This is an example of where hash randomization can break. + # There are 3! * 2 alternative outputs, such as: + # [(0, 1, 1), (1, 0, 0), (0, 1, 2), (1, 3, 0), (1, 2, 0)] + # But note, the edges (1,2,0) and (1,3,0) always follow the (0,1,k) + # edges. So the algorithm only guarantees a partial order. A total + # order is guaranteed only if the graph data structures are ordered. + assert x == x_ + + def test_multidigraph(self): + G = nx.MultiDiGraph(self.edges) + x = list(edge_dfs(G, self.nodes)) + x_ = [(0, 1, 0), (1, 0, 0), (1, 0, 1), (2, 1, 0), (3, 1, 0)] + assert x == x_ + + def test_multidigraph_rev(self): + G = nx.MultiDiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation="reverse")) + x_ = [ + (1, 0, 0, REVERSE), + (0, 1, 0, REVERSE), + (1, 0, 1, REVERSE), + (2, 1, 0, REVERSE), + (3, 1, 0, REVERSE), + ] + assert x == x_ + + def test_digraph_ignore(self): + G = nx.DiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation="ignore")) + x_ = [(0, 1, FORWARD), (1, 0, FORWARD), (2, 1, REVERSE), (3, 1, REVERSE)] + assert x == x_ + + def test_digraph_ignore2(self): + G = nx.DiGraph() + nx.add_path(G, range(4)) + x = list(edge_dfs(G, [0], orientation="ignore")) + x_ = [(0, 1, FORWARD), (1, 2, FORWARD), (2, 3, FORWARD)] + assert x == x_ + + def test_multidigraph_ignore(self): + G = nx.MultiDiGraph(self.edges) + x = list(edge_dfs(G, self.nodes, orientation="ignore")) + x_ = [ + (0, 1, 0, FORWARD), + (1, 0, 0, FORWARD), + (1, 0, 1, REVERSE), + (2, 1, 0, REVERSE), + (3, 1, 0, REVERSE), + ] + assert x == x_ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/tree/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7120d4bc7ef25279b68eaa23690b6ff4574ed676 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tree/__init__.py @@ -0,0 +1,6 @@ +from .branchings import * +from .coding import * +from .mst import * +from .recognition import * +from .operations import * +from .decomposition import * diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tree/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..945118e30e20fea30e95783808a92912b8c6d37f Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tree/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/__pycache__/branchings.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tree/__pycache__/branchings.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f01796cb0a5826e6966ebd9c28d91a0ea12535b Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tree/__pycache__/branchings.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/__pycache__/coding.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tree/__pycache__/coding.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0881b69ff508fe3b4390730282645c8414b5e15 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tree/__pycache__/coding.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/__pycache__/decomposition.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tree/__pycache__/decomposition.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6334b8ec3d34652a5d4defae07b2512328a21160 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tree/__pycache__/decomposition.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/__pycache__/mst.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tree/__pycache__/mst.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1840e0ffe62f10c37e6414cd032fa122739b8c51 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tree/__pycache__/mst.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/__pycache__/operations.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tree/__pycache__/operations.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19ff02e54d04ed2c9d23c858785236fbca6413de Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tree/__pycache__/operations.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/__pycache__/recognition.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tree/__pycache__/recognition.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24d862fc81a4152a15a2317acaf1b0b46080cb1c Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tree/__pycache__/recognition.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/branchings.py b/MLPY/Lib/site-packages/networkx/algorithms/tree/branchings.py new file mode 100644 index 0000000000000000000000000000000000000000..653266915e77b2ac0e936fb9e28db432b979b85f --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tree/branchings.py @@ -0,0 +1,1600 @@ +""" +Algorithms for finding optimum branchings and spanning arborescences. + +This implementation is based on: + + J. Edmonds, Optimum branchings, J. Res. Natl. Bur. Standards 71B (1967), + 233–240. URL: http://archive.org/details/jresv71Bn4p233 + +""" +# TODO: Implement method from Gabow, Galil, Spence and Tarjan: +# +# @article{ +# year={1986}, +# issn={0209-9683}, +# journal={Combinatorica}, +# volume={6}, +# number={2}, +# doi={10.1007/BF02579168}, +# title={Efficient algorithms for finding minimum spanning trees in +# undirected and directed graphs}, +# url={https://doi.org/10.1007/BF02579168}, +# publisher={Springer-Verlag}, +# keywords={68 B 15; 68 C 05}, +# author={Gabow, Harold N. and Galil, Zvi and Spencer, Thomas and Tarjan, +# Robert E.}, +# pages={109-122}, +# language={English} +# } +import string +from dataclasses import dataclass, field +from enum import Enum +from operator import itemgetter +from queue import PriorityQueue + +import networkx as nx +from networkx.utils import py_random_state + +from .recognition import is_arborescence, is_branching + +__all__ = [ + "branching_weight", + "greedy_branching", + "maximum_branching", + "minimum_branching", + "minimal_branching", + "maximum_spanning_arborescence", + "minimum_spanning_arborescence", + "ArborescenceIterator", + "Edmonds", +] + +KINDS = {"max", "min"} + +STYLES = { + "branching": "branching", + "arborescence": "arborescence", + "spanning arborescence": "arborescence", +} + +INF = float("inf") + + +@py_random_state(1) +def random_string(L=15, seed=None): + return "".join([seed.choice(string.ascii_letters) for n in range(L)]) + + +def _min_weight(weight): + return -weight + + +def _max_weight(weight): + return weight + + +@nx._dispatch(edge_attrs={"attr": "default"}) +def branching_weight(G, attr="weight", default=1): + """ + Returns the total weight of a branching. + + You must access this function through the networkx.algorithms.tree module. + + Parameters + ---------- + G : DiGraph + The directed graph. + attr : str + The attribute to use as weights. If None, then each edge will be + treated equally with a weight of 1. + default : float + When `attr` is not None, then if an edge does not have that attribute, + `default` specifies what value it should take. + + Returns + ------- + weight: int or float + The total weight of the branching. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_weighted_edges_from([(0, 1, 2), (1, 2, 4), (2, 3, 3), (3, 4, 2)]) + >>> nx.tree.branching_weight(G) + 11 + + """ + return sum(edge[2].get(attr, default) for edge in G.edges(data=True)) + + +@py_random_state(4) +@nx._dispatch(edge_attrs={"attr": "default"}) +def greedy_branching(G, attr="weight", default=1, kind="max", seed=None): + """ + Returns a branching obtained through a greedy algorithm. + + This algorithm is wrong, and cannot give a proper optimal branching. + However, we include it for pedagogical reasons, as it can be helpful to + see what its outputs are. + + The output is a branching, and possibly, a spanning arborescence. However, + it is not guaranteed to be optimal in either case. + + Parameters + ---------- + G : DiGraph + The directed graph to scan. + attr : str + The attribute to use as weights. If None, then each edge will be + treated equally with a weight of 1. + default : float + When `attr` is not None, then if an edge does not have that attribute, + `default` specifies what value it should take. + kind : str + The type of optimum to search for: 'min' or 'max' greedy branching. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + B : directed graph + The greedily obtained branching. + + """ + if kind not in KINDS: + raise nx.NetworkXException("Unknown value for `kind`.") + + if kind == "min": + reverse = False + else: + reverse = True + + if attr is None: + # Generate a random string the graph probably won't have. + attr = random_string(seed=seed) + + edges = [(u, v, data.get(attr, default)) for (u, v, data) in G.edges(data=True)] + + # We sort by weight, but also by nodes to normalize behavior across runs. + try: + edges.sort(key=itemgetter(2, 0, 1), reverse=reverse) + except TypeError: + # This will fail in Python 3.x if the nodes are of varying types. + # In that case, we use the arbitrary order. + edges.sort(key=itemgetter(2), reverse=reverse) + + # The branching begins with a forest of no edges. + B = nx.DiGraph() + B.add_nodes_from(G) + + # Now we add edges greedily so long we maintain the branching. + uf = nx.utils.UnionFind() + for i, (u, v, w) in enumerate(edges): + if uf[u] == uf[v]: + # Adding this edge would form a directed cycle. + continue + elif B.in_degree(v) == 1: + # The edge would increase the degree to be greater than one. + continue + else: + # If attr was None, then don't insert weights... + data = {} + if attr is not None: + data[attr] = w + B.add_edge(u, v, **data) + uf.union(u, v) + + return B + + +class MultiDiGraph_EdgeKey(nx.MultiDiGraph): + """ + MultiDiGraph which assigns unique keys to every edge. + + Adds a dictionary edge_index which maps edge keys to (u, v, data) tuples. + + This is not a complete implementation. For Edmonds algorithm, we only use + add_node and add_edge, so that is all that is implemented here. During + additions, any specified keys are ignored---this means that you also + cannot update edge attributes through add_node and add_edge. + + Why do we need this? Edmonds algorithm requires that we track edges, even + as we change the head and tail of an edge, and even changing the weight + of edges. We must reliably track edges across graph mutations. + """ + + def __init__(self, incoming_graph_data=None, **attr): + cls = super() + cls.__init__(incoming_graph_data=incoming_graph_data, **attr) + + self._cls = cls + self.edge_index = {} + + import warnings + + msg = "MultiDiGraph_EdgeKey has been deprecated and will be removed in NetworkX 3.4." + warnings.warn(msg, DeprecationWarning) + + def remove_node(self, n): + keys = set() + for keydict in self.pred[n].values(): + keys.update(keydict) + for keydict in self.succ[n].values(): + keys.update(keydict) + + for key in keys: + del self.edge_index[key] + + self._cls.remove_node(n) + + def remove_nodes_from(self, nbunch): + for n in nbunch: + self.remove_node(n) + + def add_edge(self, u_for_edge, v_for_edge, key_for_edge, **attr): + """ + Key is now required. + + """ + u, v, key = u_for_edge, v_for_edge, key_for_edge + if key in self.edge_index: + uu, vv, _ = self.edge_index[key] + if (u != uu) or (v != vv): + raise Exception(f"Key {key!r} is already in use.") + + self._cls.add_edge(u, v, key, **attr) + self.edge_index[key] = (u, v, self.succ[u][v][key]) + + def add_edges_from(self, ebunch_to_add, **attr): + for u, v, k, d in ebunch_to_add: + self.add_edge(u, v, k, **d) + + def remove_edge_with_key(self, key): + try: + u, v, _ = self.edge_index[key] + except KeyError as err: + raise KeyError(f"Invalid edge key {key!r}") from err + else: + del self.edge_index[key] + self._cls.remove_edge(u, v, key) + + def remove_edges_from(self, ebunch): + raise NotImplementedError + + +def get_path(G, u, v): + """ + Returns the edge keys of the unique path between u and v. + + This is not a generic function. G must be a branching and an instance of + MultiDiGraph_EdgeKey. + + """ + nodes = nx.shortest_path(G, u, v) + + # We are guaranteed that there is only one edge connected every node + # in the shortest path. + + def first_key(i, vv): + # Needed for 2.x/3.x compatibility + keys = G[nodes[i]][vv].keys() + # Normalize behavior + keys = list(keys) + return keys[0] + + edges = [first_key(i, vv) for i, vv in enumerate(nodes[1:])] + return nodes, edges + + +class Edmonds: + """ + Edmonds algorithm [1]_ for finding optimal branchings and spanning + arborescences. + + This algorithm can find both minimum and maximum spanning arborescences and + branchings. + + Notes + ----- + While this algorithm can find a minimum branching, since it isn't required + to be spanning, the minimum branching is always from the set of negative + weight edges which is most likely the empty set for most graphs. + + References + ---------- + .. [1] J. Edmonds, Optimum Branchings, Journal of Research of the National + Bureau of Standards, 1967, Vol. 71B, p.233-240, + https://archive.org/details/jresv71Bn4p233 + + """ + + def __init__(self, G, seed=None): + self.G_original = G + + # Need to fix this. We need the whole tree. + self.store = True + + # The final answer. + self.edges = [] + + # Since we will be creating graphs with new nodes, we need to make + # sure that our node names do not conflict with the real node names. + self.template = random_string(seed=seed) + "_{0}" + + import warnings + + msg = "Edmonds has been deprecated and will be removed in NetworkX 3.4. Please use the appropriate minimum or maximum branching or arborescence function directly." + warnings.warn(msg, DeprecationWarning) + + def _init(self, attr, default, kind, style, preserve_attrs, seed, partition): + """ + So we need the code in _init and find_optimum to successfully run edmonds algorithm. + Responsibilities of the _init function: + - Check that the kind argument is in {min, max} or raise a NetworkXException. + - Transform the graph if we need a minimum arborescence/branching. + - The current method is to map weight -> -weight. This is NOT a good approach since + the algorithm can and does choose to ignore negative weights when creating a branching + since that is always optimal when maximzing the weights. I think we should set the edge + weights to be (max_weight + 1) - edge_weight. + - Transform the graph into a MultiDiGraph, adding the partition information and potoentially + other edge attributes if we set preserve_attrs = True. + - Setup the buckets and union find data structures required for the algorithm. + """ + if kind not in KINDS: + raise nx.NetworkXException("Unknown value for `kind`.") + + # Store inputs. + self.attr = attr + self.default = default + self.kind = kind + self.style = style + + # Determine how we are going to transform the weights. + if kind == "min": + self.trans = trans = _min_weight + else: + self.trans = trans = _max_weight + + if attr is None: + # Generate a random attr the graph probably won't have. + attr = random_string(seed=seed) + + # This is the actual attribute used by the algorithm. + self._attr = attr + + # This attribute is used to store whether a particular edge is still + # a candidate. We generate a random attr to remove clashes with + # preserved edges + self.candidate_attr = "candidate_" + random_string(seed=seed) + + # The object we manipulate at each step is a multidigraph. + self.G = G = MultiDiGraph_EdgeKey() + for key, (u, v, data) in enumerate(self.G_original.edges(data=True)): + d = {attr: trans(data.get(attr, default))} + + if data.get(partition) is not None: + d[partition] = data.get(partition) + + if preserve_attrs: + for d_k, d_v in data.items(): + if d_k != attr: + d[d_k] = d_v + + G.add_edge(u, v, key, **d) + + self.level = 0 + + # These are the "buckets" from the paper. + # + # As in the paper, G^i are modified versions of the original graph. + # D^i and E^i are nodes and edges of the maximal edges that are + # consistent with G^i. These are dashed edges in figures A-F of the + # paper. In this implementation, we store D^i and E^i together as a + # graph B^i. So we will have strictly more B^i than the paper does. + self.B = MultiDiGraph_EdgeKey() + self.B.edge_index = {} + self.graphs = [] # G^i + self.branchings = [] # B^i + self.uf = nx.utils.UnionFind() + + # A list of lists of edge indexes. Each list is a circuit for graph G^i. + # Note the edge list will not, in general, be a circuit in graph G^0. + self.circuits = [] + # Stores the index of the minimum edge in the circuit found in G^i + # and B^i. The ordering of the edges seems to preserve the weight + # ordering from G^0. So even if the circuit does not form a circuit + # in G^0, it is still true that the minimum edge of the circuit in + # G^i is still the minimum edge in circuit G^0 (despite their weights + # being different). + self.minedge_circuit = [] + + # TODO: separate each step into an inner function. Then the overall loop would become + # while True: + # step_I1() + # if cycle detected: + # step_I2() + # elif every node of G is in D and E is a branching + # break + + def find_optimum( + self, + attr="weight", + default=1, + kind="max", + style="branching", + preserve_attrs=False, + partition=None, + seed=None, + ): + """ + Returns a branching from G. + + Parameters + ---------- + attr : str + The edge attribute used to in determining optimality. + default : float + The value of the edge attribute used if an edge does not have + the attribute `attr`. + kind : {'min', 'max'} + The type of optimum to search for, either 'min' or 'max'. + style : {'branching', 'arborescence'} + If 'branching', then an optimal branching is found. If `style` is + 'arborescence', then a branching is found, such that if the + branching is also an arborescence, then the branching is an + optimal spanning arborescences. A given graph G need not have + an optimal spanning arborescence. + preserve_attrs : bool + If True, preserve the other edge attributes of the original + graph (that are not the one passed to `attr`) + partition : str + The edge attribute holding edge partition data. Used in the + spanning arborescence iterator. + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + H : (multi)digraph + The branching. + + """ + self._init(attr, default, kind, style, preserve_attrs, seed, partition) + uf = self.uf + + # This enormous while loop could use some refactoring... + + G, B = self.G, self.B + D = set() + nodes = iter(list(G.nodes())) + attr = self._attr + G_pred = G.pred + + def desired_edge(v): + """ + Find the edge directed toward v with maximal weight. + + If an edge partition exists in this graph, return the included edge + if it exists and no not return any excluded edges. There can only + be one included edge for each vertex otherwise the edge partition is + empty. + """ + edge = None + weight = -INF + for u, _, key, data in G.in_edges(v, data=True, keys=True): + # Skip excluded edges + if data.get(partition) == nx.EdgePartition.EXCLUDED: + continue + new_weight = data[attr] + # Return the included edge + if data.get(partition) == nx.EdgePartition.INCLUDED: + weight = new_weight + edge = (u, v, key, new_weight, data) + return edge, weight + # Find the best open edge + if new_weight > weight: + weight = new_weight + edge = (u, v, key, new_weight, data) + + return edge, weight + + while True: + # (I1): Choose a node v in G^i not in D^i. + try: + v = next(nodes) + except StopIteration: + # If there are no more new nodes to consider, then we *should* + # meet the break condition (b) from the paper: + # (b) every node of G^i is in D^i and E^i is a branching + # Construction guarantees that it's a branching. + assert len(G) == len(B) + if len(B): + assert is_branching(B) + + if self.store: + self.graphs.append(G.copy()) + self.branchings.append(B.copy()) + + # Add these to keep the lengths equal. Element i is the + # circuit at level i that was merged to form branching i+1. + # There is no circuit for the last level. + self.circuits.append([]) + self.minedge_circuit.append(None) + break + else: + if v in D: + # print("v in D", v) + continue + + # Put v into bucket D^i. + # print(f"Adding node {v}") + D.add(v) + B.add_node(v) + # End (I1) + + # Start cycle detection + edge, weight = desired_edge(v) + # print(f"Max edge is {edge!r}") + if edge is None: + # If there is no edge, continue with a new node at (I1). + continue + else: + # Determine if adding the edge to E^i would mean its no longer + # a branching. Presently, v has indegree 0 in B---it is a root. + u = edge[0] + + if uf[u] == uf[v]: + # Then adding the edge will create a circuit. Then B + # contains a unique path P from v to u. So condition (a) + # from the paper does hold. We need to store the circuit + # for future reference. + Q_nodes, Q_edges = get_path(B, v, u) + Q_edges.append(edge[2]) # Edge key + else: + # Then B with the edge is still a branching and condition + # (a) from the paper does not hold. + Q_nodes, Q_edges = None, None + # End cycle detection + + # THIS WILL PROBABLY BE REMOVED? MAYBE A NEW ARG FOR THIS FEATURE? + # Conditions for adding the edge. + # If weight < 0, then it cannot help in finding a maximum branching. + # This is the root of the problem with minimum branching. + if self.style == "branching" and weight <= 0: + acceptable = False + else: + acceptable = True + + # print(f"Edge is acceptable: {acceptable}") + if acceptable: + dd = {attr: weight} + if edge[4].get(partition) is not None: + dd[partition] = edge[4].get(partition) + B.add_edge(u, v, edge[2], **dd) + G[u][v][edge[2]][self.candidate_attr] = True + uf.union(u, v) + if Q_edges is not None: + # print("Edge introduced a simple cycle:") + # print(Q_nodes, Q_edges) + + # Move to method + # Previous meaning of u and v is no longer important. + + # Apply (I2). + # Get the edge in the cycle with the minimum weight. + # Also, save the incoming weights for each node. + minweight = INF + minedge = None + Q_incoming_weight = {} + for edge_key in Q_edges: + u, v, data = B.edge_index[edge_key] + # We cannot remove an included edges, even if it is + # the minimum edge in the circuit + w = data[attr] + Q_incoming_weight[v] = w + if data.get(partition) == nx.EdgePartition.INCLUDED: + continue + if w < minweight: + minweight = w + minedge = edge_key + + self.circuits.append(Q_edges) + self.minedge_circuit.append(minedge) + + if self.store: + self.graphs.append(G.copy()) + # Always need the branching with circuits. + self.branchings.append(B.copy()) + + # Now we mutate it. + new_node = self.template.format(self.level) + + # print(minweight, minedge, Q_incoming_weight) + + G.add_node(new_node) + new_edges = [] + for u, v, key, data in G.edges(data=True, keys=True): + if u in Q_incoming_weight: + if v in Q_incoming_weight: + # Circuit edge, do nothing for now. + # Eventually delete it. + continue + else: + # Outgoing edge. Make it from new node + dd = data.copy() + new_edges.append((new_node, v, key, dd)) + else: + if v in Q_incoming_weight: + # Incoming edge. Change its weight + w = data[attr] + w += minweight - Q_incoming_weight[v] + dd = data.copy() + dd[attr] = w + new_edges.append((u, new_node, key, dd)) + else: + # Outside edge. No modification necessary. + continue + + G.remove_nodes_from(Q_nodes) + B.remove_nodes_from(Q_nodes) + D.difference_update(set(Q_nodes)) + + for u, v, key, data in new_edges: + G.add_edge(u, v, key, **data) + if self.candidate_attr in data: + del data[self.candidate_attr] + B.add_edge(u, v, key, **data) + uf.union(u, v) + + nodes = iter(list(G.nodes())) + self.level += 1 + # END STEP (I2)? + + # (I3) Branch construction. + # print(self.level) + H = self.G_original.__class__() + + def is_root(G, u, edgekeys): + """ + Returns True if `u` is a root node in G. + + Node `u` will be a root node if its in-degree, restricted to the + specified edges, is equal to 0. + + """ + if u not in G: + # print(G.nodes(), u) + raise Exception(f"{u!r} not in G") + for v in G.pred[u]: + for edgekey in G.pred[u][v]: + if edgekey in edgekeys: + return False, edgekey + else: + return True, None + + # Start with the branching edges in the last level. + edges = set(self.branchings[self.level].edge_index) + while self.level > 0: + self.level -= 1 + + # The current level is i, and we start counting from 0. + + # We need the node at level i+1 that results from merging a circuit + # at level i. randomname_0 is the first merged node and this + # happens at level 1. That is, randomname_0 is a node at level 1 + # that results from merging a circuit at level 0. + merged_node = self.template.format(self.level) + + # The circuit at level i that was merged as a node the graph + # at level i+1. + circuit = self.circuits[self.level] + # print + # print(merged_node, self.level, circuit) + # print("before", edges) + # Note, we ask if it is a root in the full graph, not the branching. + # The branching alone doesn't have all the edges. + isroot, edgekey = is_root(self.graphs[self.level + 1], merged_node, edges) + edges.update(circuit) + if isroot: + minedge = self.minedge_circuit[self.level] + if minedge is None: + raise Exception + + # Remove the edge in the cycle with minimum weight. + edges.remove(minedge) + else: + # We have identified an edge at next higher level that + # transitions into the merged node at the level. That edge + # transitions to some corresponding node at the current level. + # We want to remove an edge from the cycle that transitions + # into the corresponding node. + # print("edgekey is: ", edgekey) + # print("circuit is: ", circuit) + # The branching at level i + G = self.graphs[self.level] + # print(G.edge_index) + target = G.edge_index[edgekey][1] + for edgekey in circuit: + u, v, data = G.edge_index[edgekey] + if v == target: + break + else: + raise Exception("Couldn't find edge incoming to merged node.") + + edges.remove(edgekey) + + self.edges = edges + + H.add_nodes_from(self.G_original) + for edgekey in edges: + u, v, d = self.graphs[0].edge_index[edgekey] + dd = {self.attr: self.trans(d[self.attr])} + + # Optionally, preserve the other edge attributes of the original + # graph + if preserve_attrs: + for key, value in d.items(): + if key not in [self.attr, self.candidate_attr]: + dd[key] = value + + # TODO: make this preserve the key. + H.add_edge(u, v, **dd) + + return H + + +@nx._dispatch( + edge_attrs={"attr": "default", "partition": 0}, + preserve_edge_attrs="preserve_attrs", +) +def maximum_branching( + G, + attr="weight", + default=1, + preserve_attrs=False, + partition=None, +): + ####################################### + ### Data Structure Helper Functions ### + ####################################### + + def edmonds_add_edge(G, edge_index, u, v, key, **d): + """ + Adds an edge to `G` while also updating the edge index. + + This algorithm requires the use of an external dictionary to track + the edge keys since it is possible that the source or destination + node of an edge will be changed and the default key-handling + capabilities of the MultiDiGraph class do not account for this. + + Parameters + ---------- + G : MultiDiGraph + The graph to insert an edge into. + edge_index : dict + A mapping from integers to the edges of the graph. + u : node + The source node of the new edge. + v : node + The destination node of the new edge. + key : int + The key to use from `edge_index`. + d : keyword arguments, optional + Other attributes to store on the new edge. + """ + + if key in edge_index: + uu, vv, _ = edge_index[key] + if (u != uu) or (v != vv): + raise Exception(f"Key {key!r} is already in use.") + + G.add_edge(u, v, key, **d) + edge_index[key] = (u, v, G.succ[u][v][key]) + + def edmonds_remove_node(G, edge_index, n): + """ + Remove a node from the graph, updating the edge index to match. + + Parameters + ---------- + G : MultiDiGraph + The graph to remove an edge from. + edge_index : dict + A mapping from integers to the edges of the graph. + n : node + The node to remove from `G`. + """ + keys = set() + for keydict in G.pred[n].values(): + keys.update(keydict) + for keydict in G.succ[n].values(): + keys.update(keydict) + + for key in keys: + del edge_index[key] + + G.remove_node(n) + + ####################### + ### Algorithm Setup ### + ####################### + + # Pick an attribute name that the original graph is unlikly to have + candidate_attr = "edmonds' secret candidate attribute" + new_node_base_name = "edmonds new node base name " + + G_original = G + G = nx.MultiDiGraph() + # A dict to reliably track mutations to the edges using the key of the edge. + G_edge_index = {} + # Each edge is given an arbitrary numerical key + for key, (u, v, data) in enumerate(G_original.edges(data=True)): + d = {attr: data.get(attr, default)} + + if data.get(partition) is not None: + d[partition] = data.get(partition) + + if preserve_attrs: + for d_k, d_v in data.items(): + if d_k != attr: + d[d_k] = d_v + + edmonds_add_edge(G, G_edge_index, u, v, key, **d) + + level = 0 # Stores the number of contracted nodes + + # These are the buckets from the paper. + # + # In the paper, G^i are modified versions of the original graph. + # D^i and E^i are the nodes and edges of the maximal edges that are + # consistent with G^i. In this implementation, D^i and E^i are stored + # together as the graph B^i. We will have strictly more B^i then the + # paper will have. + # + # Note that the data in graphs and branchings are tuples with the graph as + # the first element and the edge index as the second. + B = nx.MultiDiGraph() + B_edge_index = {} + graphs = [] # G^i list + branchings = [] # B^i list + selected_nodes = set() # D^i bucket + uf = nx.utils.UnionFind() + + # A list of lists of edge indices. Each list is a circuit for graph G^i. + # Note the edge list is not required to be a circuit in G^0. + circuits = [] + + # Stores the index of the minimum edge in the circuit found in G^i and B^i. + # The ordering of the edges seems to preserver the weight ordering from + # G^0. So even if the circuit does not form a circuit in G^0, it is still + # true that the minimum edges in circuit G^0 (despite their weights being + # different) + minedge_circuit = [] + + ########################### + ### Algorithm Structure ### + ########################### + + # Each step listed in the algorithm is an inner function. Thus, the overall + # loop structure is: + # + # while True: + # step_I1() + # if cycle detected: + # step_I2() + # elif every node of G is in D and E is a branching: + # break + + ################################## + ### Algorithm Helper Functions ### + ################################## + + def edmonds_find_desired_edge(v): + """ + Find the edge directed towards v with maximal weight. + + If an edge partition exists in this graph, return the included + edge if it exists and never return any excluded edge. + + Note: There can only be one included edge for each vertex otherwise + the edge partition is empty. + + Parameters + ---------- + v : node + The node to search for the maximal weight incoming edge. + """ + edge = None + max_weight = -INF + for u, _, key, data in G.in_edges(v, data=True, keys=True): + # Skip excluded edges + if data.get(partition) == nx.EdgePartition.EXCLUDED: + continue + + new_weight = data[attr] + + # Return the included edge + if data.get(partition) == nx.EdgePartition.INCLUDED: + max_weight = new_weight + edge = (u, v, key, new_weight, data) + break + + # Find the best open edge + if new_weight > max_weight: + max_weight = new_weight + edge = (u, v, key, new_weight, data) + + return edge, max_weight + + def edmonds_step_I2(v, desired_edge, level): + """ + Perform step I2 from Edmonds' paper + + First, check if the last step I1 created a cycle. If it did not, do nothing. + If it did, store the cycle for later reference and contract it. + + Parameters + ---------- + v : node + The current node to consider + desired_edge : edge + The minimum desired edge to remove from the cycle. + level : int + The current level, i.e. the number of cycles that have already been removed. + """ + u = desired_edge[0] + + Q_nodes = nx.shortest_path(B, v, u) + Q_edges = [ + list(B[Q_nodes[i]][vv].keys())[0] for i, vv in enumerate(Q_nodes[1:]) + ] + Q_edges.append(desired_edge[2]) # Add the new edge key to complete the circuit + + # Get the edge in the circuit with the minimum weight. + # Also, save the incoming weights for each node. + minweight = INF + minedge = None + Q_incoming_weight = {} + for edge_key in Q_edges: + u, v, data = B_edge_index[edge_key] + w = data[attr] + # We cannot remove an included edge, even if it is the + # minimum edge in the circuit + Q_incoming_weight[v] = w + if data.get(partition) == nx.EdgePartition.INCLUDED: + continue + if w < minweight: + minweight = w + minedge = edge_key + + circuits.append(Q_edges) + minedge_circuit.append(minedge) + graphs.append((G.copy(), G_edge_index.copy())) + branchings.append((B.copy(), B_edge_index.copy())) + + # Mutate the graph to contract the circuit + new_node = new_node_base_name + str(level) + G.add_node(new_node) + new_edges = [] + for u, v, key, data in G.edges(data=True, keys=True): + if u in Q_incoming_weight: + if v in Q_incoming_weight: + # Circuit edge. For the moment do nothing, + # eventually it will be removed. + continue + else: + # Outgoing edge from a node in the circuit. + # Make it come from the new node instead + dd = data.copy() + new_edges.append((new_node, v, key, dd)) + else: + if v in Q_incoming_weight: + # Incoming edge to the circuit. + # Update it's weight + w = data[attr] + w += minweight - Q_incoming_weight[v] + dd = data.copy() + dd[attr] = w + new_edges.append((u, new_node, key, dd)) + else: + # Outside edge. No modification needed + continue + + for node in Q_nodes: + edmonds_remove_node(G, G_edge_index, node) + edmonds_remove_node(B, B_edge_index, node) + + selected_nodes.difference_update(set(Q_nodes)) + + for u, v, key, data in new_edges: + edmonds_add_edge(G, G_edge_index, u, v, key, **data) + if candidate_attr in data: + del data[candidate_attr] + edmonds_add_edge(B, B_edge_index, u, v, key, **data) + uf.union(u, v) + + def is_root(G, u, edgekeys): + """ + Returns True if `u` is a root node in G. + + Node `u` is a root node if its in-degree over the specified edges is zero. + + Parameters + ---------- + G : Graph + The current graph. + u : node + The node in `G` to check if it is a root. + edgekeys : iterable of edges + The edges for which to check if `u` is a root of. + """ + if u not in G: + raise Exception(f"{u!r} not in G") + + for v in G.pred[u]: + for edgekey in G.pred[u][v]: + if edgekey in edgekeys: + return False, edgekey + else: + return True, None + + nodes = iter(list(G.nodes)) + while True: + try: + v = next(nodes) + except StopIteration: + # If there are no more new nodes to consider, then we should + # meet stopping condition (b) from the paper: + # (b) every node of G^i is in D^i and E^i is a branching + assert len(G) == len(B) + if len(B): + assert is_branching(B) + + graphs.append((G.copy(), G_edge_index.copy())) + branchings.append((B.copy(), B_edge_index.copy())) + circuits.append([]) + minedge_circuit.append(None) + + break + else: + ##################### + ### BEGIN STEP I1 ### + ##################### + + # This is a very simple step, so I don't think it needs a method of it's own + if v in selected_nodes: + continue + + selected_nodes.add(v) + B.add_node(v) + desired_edge, desired_edge_weight = edmonds_find_desired_edge(v) + + # There might be no desired edge if all edges are excluded or + # v is the last node to be added to B, the ultimate root of the branching + if desired_edge is not None and desired_edge_weight > 0: + u = desired_edge[0] + # Flag adding the edge will create a circuit before merging the two + # connected components of u and v in B + circuit = uf[u] == uf[v] + dd = {attr: desired_edge_weight} + if desired_edge[4].get(partition) is not None: + dd[partition] = desired_edge[4].get(partition) + + edmonds_add_edge(B, B_edge_index, u, v, desired_edge[2], **dd) + G[u][v][desired_edge[2]][candidate_attr] = True + uf.union(u, v) + + ################### + ### END STEP I1 ### + ################### + + ##################### + ### BEGIN STEP I2 ### + ##################### + + if circuit: + edmonds_step_I2(v, desired_edge, level) + nodes = iter(list(G.nodes())) + level += 1 + + ################### + ### END STEP I2 ### + ################### + + ##################### + ### BEGIN STEP I3 ### + ##################### + + # Create a new graph of the same class as the input graph + H = G_original.__class__() + + # Start with the branching edges in the last level. + edges = set(branchings[level][1]) + while level > 0: + level -= 1 + + # The current level is i, and we start counting from 0. + # + # We need the node at level i+1 that results from merging a circuit + # at level i. basename_0 is the first merged node and this happens + # at level 1. That is basename_0 is a node at level 1 that results + # from merging a circuit at level 0. + + merged_node = new_node_base_name + str(level) + circuit = circuits[level] + isroot, edgekey = is_root(graphs[level + 1][0], merged_node, edges) + edges.update(circuit) + + if isroot: + minedge = minedge_circuit[level] + if minedge is None: + raise Exception + + # Remove the edge in the cycle with minimum weight + edges.remove(minedge) + else: + # We have identified an edge at the next higher level that + # transitions into the merged node at this level. That edge + # transitions to some corresponding node at the current level. + # + # We want to remove an edge from the cycle that transitions + # into the corresponding node, otherwise the result would not + # be a branching. + + G, G_edge_index = graphs[level] + target = G_edge_index[edgekey][1] + for edgekey in circuit: + u, v, data = G_edge_index[edgekey] + if v == target: + break + else: + raise Exception("Couldn't find edge incoming to merged node.") + + edges.remove(edgekey) + + H.add_nodes_from(G_original) + for edgekey in edges: + u, v, d = graphs[0][1][edgekey] + dd = {attr: d[attr]} + + if preserve_attrs: + for key, value in d.items(): + if key not in [attr, candidate_attr]: + dd[key] = value + + H.add_edge(u, v, **dd) + + ################### + ### END STEP I3 ### + ################### + + return H + + +@nx._dispatch( + edge_attrs={"attr": "default", "partition": None}, + preserve_edge_attrs="preserve_attrs", +) +def minimum_branching( + G, attr="weight", default=1, preserve_attrs=False, partition=None +): + for _, _, d in G.edges(data=True): + d[attr] = -d[attr] + + B = maximum_branching(G, attr, default, preserve_attrs, partition) + + for _, _, d in G.edges(data=True): + d[attr] = -d[attr] + + for _, _, d in B.edges(data=True): + d[attr] = -d[attr] + + return B + + +@nx._dispatch( + edge_attrs={"attr": "default", "partition": None}, + preserve_edge_attrs="preserve_attrs", +) +def minimal_branching( + G, /, *, attr="weight", default=1, preserve_attrs=False, partition=None +): + """ + Returns a minimal branching from `G`. + + A minimal branching is a branching similar to a minimal arborescence but + without the requirement that the result is actually a spanning arborescence. + This allows minimal branchinges to be computed over graphs which may not + have arborescence (such as multiple components). + + Parameters + ---------- + G : (multi)digraph-like + The graph to be searched. + attr : str + The edge attribute used in determining optimality. + default : float + The value of the edge attribute used if an edge does not have + the attribute `attr`. + preserve_attrs : bool + If True, preserve the other attributes of the original graph (that are not + passed to `attr`) + partition : str + The key for the edge attribute containing the partition + data on the graph. Edges can be included, excluded or open using the + `EdgePartition` enum. + + Returns + ------- + B : (multi)digraph-like + A minimal branching. + """ + max_weight = -INF + min_weight = INF + for _, _, w in G.edges(data=attr): + if w > max_weight: + max_weight = w + if w < min_weight: + min_weight = w + + for _, _, d in G.edges(data=True): + # Transform the weights so that the minimum weight is larger than + # the difference between the max and min weights. This is important + # in order to prevent the edge weights from becoming negative during + # computation + d[attr] = max_weight + 1 + (max_weight - min_weight) - d[attr] + + B = maximum_branching(G, attr, default, preserve_attrs, partition) + + # Reverse the weight transformations + for _, _, d in G.edges(data=True): + d[attr] = max_weight + 1 + (max_weight - min_weight) - d[attr] + + for _, _, d in B.edges(data=True): + d[attr] = max_weight + 1 + (max_weight - min_weight) - d[attr] + + return B + + +@nx._dispatch( + edge_attrs={"attr": "default", "partition": None}, + preserve_edge_attrs="preserve_attrs", +) +def maximum_spanning_arborescence( + G, attr="weight", default=1, preserve_attrs=False, partition=None +): + # In order to use the same algorithm is the maximum branching, we need to adjust + # the weights of the graph. The branching algorithm can choose to not include an + # edge if it doesn't help find a branching, mainly triggered by edges with negative + # weights. + # + # To prevent this from happening while trying to find a spanning arborescence, we + # just have to tweak the edge weights so that they are all positive and cannot + # become negative during the branching algorithm, find the maximum branching and + # then return them to their original values. + + min_weight = INF + max_weight = -INF + for _, _, w in G.edges(data=attr): + if w < min_weight: + min_weight = w + if w > max_weight: + max_weight = w + + for _, _, d in G.edges(data=True): + d[attr] = d[attr] - min_weight + 1 - (min_weight - max_weight) + + B = maximum_branching(G, attr, default, preserve_attrs, partition) + + for _, _, d in G.edges(data=True): + d[attr] = d[attr] + min_weight - 1 + (min_weight - max_weight) + + for _, _, d in B.edges(data=True): + d[attr] = d[attr] + min_weight - 1 + (min_weight - max_weight) + + if not is_arborescence(B): + raise nx.exception.NetworkXException("No maximum spanning arborescence in G.") + + return B + + +@nx._dispatch( + edge_attrs={"attr": "default", "partition": None}, + preserve_edge_attrs="preserve_attrs", +) +def minimum_spanning_arborescence( + G, attr="weight", default=1, preserve_attrs=False, partition=None +): + B = minimal_branching( + G, + attr=attr, + default=default, + preserve_attrs=preserve_attrs, + partition=partition, + ) + + if not is_arborescence(B): + raise nx.exception.NetworkXException("No minimum spanning arborescence in G.") + + return B + + +docstring_branching = """ +Returns a {kind} {style} from G. + +Parameters +---------- +G : (multi)digraph-like + The graph to be searched. +attr : str + The edge attribute used to in determining optimality. +default : float + The value of the edge attribute used if an edge does not have + the attribute `attr`. +preserve_attrs : bool + If True, preserve the other attributes of the original graph (that are not + passed to `attr`) +partition : str + The key for the edge attribute containing the partition + data on the graph. Edges can be included, excluded or open using the + `EdgePartition` enum. + +Returns +------- +B : (multi)digraph-like + A {kind} {style}. +""" + +docstring_arborescence = ( + docstring_branching + + """ +Raises +------ +NetworkXException + If the graph does not contain a {kind} {style}. + +""" +) + +maximum_branching.__doc__ = docstring_branching.format( + kind="maximum", style="branching" +) + +minimum_branching.__doc__ = ( + docstring_branching.format(kind="minimum", style="branching") + + """ +See Also +-------- + minimal_branching +""" +) + +maximum_spanning_arborescence.__doc__ = docstring_arborescence.format( + kind="maximum", style="spanning arborescence" +) + +minimum_spanning_arborescence.__doc__ = docstring_arborescence.format( + kind="minimum", style="spanning arborescence" +) + + +class ArborescenceIterator: + """ + Iterate over all spanning arborescences of a graph in either increasing or + decreasing cost. + + Notes + ----- + This iterator uses the partition scheme from [1]_ (included edges, + excluded edges and open edges). It generates minimum spanning + arborescences using a modified Edmonds' Algorithm which respects the + partition of edges. For arborescences with the same weight, ties are + broken arbitrarily. + + References + ---------- + .. [1] G.K. Janssens, K. Sörensen, An algorithm to generate all spanning + trees in order of increasing cost, Pesquisa Operacional, 2005-08, + Vol. 25 (2), p. 219-229, + https://www.scielo.br/j/pope/a/XHswBwRwJyrfL88dmMwYNWp/?lang=en + """ + + @dataclass(order=True) + class Partition: + """ + This dataclass represents a partition and stores a dict with the edge + data and the weight of the minimum spanning arborescence of the + partition dict. + """ + + mst_weight: float + partition_dict: dict = field(compare=False) + + def __copy__(self): + return ArborescenceIterator.Partition( + self.mst_weight, self.partition_dict.copy() + ) + + def __init__(self, G, weight="weight", minimum=True, init_partition=None): + """ + Initialize the iterator + + Parameters + ---------- + G : nx.DiGraph + The directed graph which we need to iterate trees over + + weight : String, default = "weight" + The edge attribute used to store the weight of the edge + + minimum : bool, default = True + Return the trees in increasing order while true and decreasing order + while false. + + init_partition : tuple, default = None + In the case that certain edges have to be included or excluded from + the arborescences, `init_partition` should be in the form + `(included_edges, excluded_edges)` where each edges is a + `(u, v)`-tuple inside an iterable such as a list or set. + + """ + self.G = G.copy() + self.weight = weight + self.minimum = minimum + self.method = ( + minimum_spanning_arborescence if minimum else maximum_spanning_arborescence + ) + # Randomly create a key for an edge attribute to hold the partition data + self.partition_key = ( + "ArborescenceIterators super secret partition attribute name" + ) + if init_partition is not None: + partition_dict = {} + for e in init_partition[0]: + partition_dict[e] = nx.EdgePartition.INCLUDED + for e in init_partition[1]: + partition_dict[e] = nx.EdgePartition.EXCLUDED + self.init_partition = ArborescenceIterator.Partition(0, partition_dict) + else: + self.init_partition = None + + def __iter__(self): + """ + Returns + ------- + ArborescenceIterator + The iterator object for this graph + """ + self.partition_queue = PriorityQueue() + self._clear_partition(self.G) + + # Write the initial partition if it exists. + if self.init_partition is not None: + self._write_partition(self.init_partition) + + mst_weight = self.method( + self.G, + self.weight, + partition=self.partition_key, + preserve_attrs=True, + ).size(weight=self.weight) + + self.partition_queue.put( + self.Partition( + mst_weight if self.minimum else -mst_weight, + {} + if self.init_partition is None + else self.init_partition.partition_dict, + ) + ) + + return self + + def __next__(self): + """ + Returns + ------- + (multi)Graph + The spanning tree of next greatest weight, which ties broken + arbitrarily. + """ + if self.partition_queue.empty(): + del self.G, self.partition_queue + raise StopIteration + + partition = self.partition_queue.get() + self._write_partition(partition) + next_arborescence = self.method( + self.G, + self.weight, + partition=self.partition_key, + preserve_attrs=True, + ) + self._partition(partition, next_arborescence) + + self._clear_partition(next_arborescence) + return next_arborescence + + def _partition(self, partition, partition_arborescence): + """ + Create new partitions based of the minimum spanning tree of the + current minimum partition. + + Parameters + ---------- + partition : Partition + The Partition instance used to generate the current minimum spanning + tree. + partition_arborescence : nx.Graph + The minimum spanning arborescence of the input partition. + """ + # create two new partitions with the data from the input partition dict + p1 = self.Partition(0, partition.partition_dict.copy()) + p2 = self.Partition(0, partition.partition_dict.copy()) + for e in partition_arborescence.edges: + # determine if the edge was open or included + if e not in partition.partition_dict: + # This is an open edge + p1.partition_dict[e] = nx.EdgePartition.EXCLUDED + p2.partition_dict[e] = nx.EdgePartition.INCLUDED + + self._write_partition(p1) + try: + p1_mst = self.method( + self.G, + self.weight, + partition=self.partition_key, + preserve_attrs=True, + ) + + p1_mst_weight = p1_mst.size(weight=self.weight) + p1.mst_weight = p1_mst_weight if self.minimum else -p1_mst_weight + self.partition_queue.put(p1.__copy__()) + except nx.NetworkXException: + pass + + p1.partition_dict = p2.partition_dict.copy() + + def _write_partition(self, partition): + """ + Writes the desired partition into the graph to calculate the minimum + spanning tree. Also, if one incoming edge is included, mark all others + as excluded so that if that vertex is merged during Edmonds' algorithm + we cannot still pick another of that vertex's included edges. + + Parameters + ---------- + partition : Partition + A Partition dataclass describing a partition on the edges of the + graph. + """ + for u, v, d in self.G.edges(data=True): + if (u, v) in partition.partition_dict: + d[self.partition_key] = partition.partition_dict[(u, v)] + else: + d[self.partition_key] = nx.EdgePartition.OPEN + + for n in self.G: + included_count = 0 + excluded_count = 0 + for u, v, d in self.G.in_edges(nbunch=n, data=True): + if d.get(self.partition_key) == nx.EdgePartition.INCLUDED: + included_count += 1 + elif d.get(self.partition_key) == nx.EdgePartition.EXCLUDED: + excluded_count += 1 + # Check that if there is an included edges, all other incoming ones + # are excluded. If not fix it! + if included_count == 1 and excluded_count != self.G.in_degree(n) - 1: + for u, v, d in self.G.in_edges(nbunch=n, data=True): + if d.get(self.partition_key) != nx.EdgePartition.INCLUDED: + d[self.partition_key] = nx.EdgePartition.EXCLUDED + + def _clear_partition(self, G): + """ + Removes partition data from the graph + """ + for u, v, d in G.edges(data=True): + if self.partition_key in d: + del d[self.partition_key] diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/coding.py b/MLPY/Lib/site-packages/networkx/algorithms/tree/coding.py new file mode 100644 index 0000000000000000000000000000000000000000..a74fd48cff0d9f304e2a731842e9122b52daa025 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tree/coding.py @@ -0,0 +1,412 @@ +"""Functions for encoding and decoding trees. + +Since a tree is a highly restricted form of graph, it can be represented +concisely in several ways. This module includes functions for encoding +and decoding trees in the form of nested tuples and Prüfer +sequences. The former requires a rooted tree, whereas the latter can be +applied to unrooted trees. Furthermore, there is a bijection from Prüfer +sequences to labeled trees. + +""" +from collections import Counter +from itertools import chain + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "from_nested_tuple", + "from_prufer_sequence", + "NotATree", + "to_nested_tuple", + "to_prufer_sequence", +] + + +class NotATree(nx.NetworkXException): + """Raised when a function expects a tree (that is, a connected + undirected graph with no cycles) but gets a non-tree graph as input + instead. + + """ + + +@not_implemented_for("directed") +@nx._dispatch(graphs="T") +def to_nested_tuple(T, root, canonical_form=False): + """Returns a nested tuple representation of the given tree. + + The nested tuple representation of a tree is defined + recursively. The tree with one node and no edges is represented by + the empty tuple, ``()``. A tree with ``k`` subtrees is represented + by a tuple of length ``k`` in which each element is the nested tuple + representation of a subtree. + + Parameters + ---------- + T : NetworkX graph + An undirected graph object representing a tree. + + root : node + The node in ``T`` to interpret as the root of the tree. + + canonical_form : bool + If ``True``, each tuple is sorted so that the function returns + a canonical form for rooted trees. This means "lighter" subtrees + will appear as nested tuples before "heavier" subtrees. In this + way, each isomorphic rooted tree has the same nested tuple + representation. + + Returns + ------- + tuple + A nested tuple representation of the tree. + + Notes + ----- + This function is *not* the inverse of :func:`from_nested_tuple`; the + only guarantee is that the rooted trees are isomorphic. + + See also + -------- + from_nested_tuple + to_prufer_sequence + + Examples + -------- + The tree need not be a balanced binary tree:: + + >>> T = nx.Graph() + >>> T.add_edges_from([(0, 1), (0, 2), (0, 3)]) + >>> T.add_edges_from([(1, 4), (1, 5)]) + >>> T.add_edges_from([(3, 6), (3, 7)]) + >>> root = 0 + >>> nx.to_nested_tuple(T, root) + (((), ()), (), ((), ())) + + Continuing the above example, if ``canonical_form`` is ``True``, the + nested tuples will be sorted:: + + >>> nx.to_nested_tuple(T, root, canonical_form=True) + ((), ((), ()), ((), ())) + + Even the path graph can be interpreted as a tree:: + + >>> T = nx.path_graph(4) + >>> root = 0 + >>> nx.to_nested_tuple(T, root) + ((((),),),) + + """ + + def _make_tuple(T, root, _parent): + """Recursively compute the nested tuple representation of the + given rooted tree. + + ``_parent`` is the parent node of ``root`` in the supertree in + which ``T`` is a subtree, or ``None`` if ``root`` is the root of + the supertree. This argument is used to determine which + neighbors of ``root`` are children and which is the parent. + + """ + # Get the neighbors of `root` that are not the parent node. We + # are guaranteed that `root` is always in `T` by construction. + children = set(T[root]) - {_parent} + if len(children) == 0: + return () + nested = (_make_tuple(T, v, root) for v in children) + if canonical_form: + nested = sorted(nested) + return tuple(nested) + + # Do some sanity checks on the input. + if not nx.is_tree(T): + raise nx.NotATree("provided graph is not a tree") + if root not in T: + raise nx.NodeNotFound(f"Graph {T} contains no node {root}") + + return _make_tuple(T, root, None) + + +@nx._dispatch(graphs=None) +def from_nested_tuple(sequence, sensible_relabeling=False): + """Returns the rooted tree corresponding to the given nested tuple. + + The nested tuple representation of a tree is defined + recursively. The tree with one node and no edges is represented by + the empty tuple, ``()``. A tree with ``k`` subtrees is represented + by a tuple of length ``k`` in which each element is the nested tuple + representation of a subtree. + + Parameters + ---------- + sequence : tuple + A nested tuple representing a rooted tree. + + sensible_relabeling : bool + Whether to relabel the nodes of the tree so that nodes are + labeled in increasing order according to their breadth-first + search order from the root node. + + Returns + ------- + NetworkX graph + The tree corresponding to the given nested tuple, whose root + node is node 0. If ``sensible_labeling`` is ``True``, nodes will + be labeled in breadth-first search order starting from the root + node. + + Notes + ----- + This function is *not* the inverse of :func:`to_nested_tuple`; the + only guarantee is that the rooted trees are isomorphic. + + See also + -------- + to_nested_tuple + from_prufer_sequence + + Examples + -------- + Sensible relabeling ensures that the nodes are labeled from the root + starting at 0:: + + >>> balanced = (((), ()), ((), ())) + >>> T = nx.from_nested_tuple(balanced, sensible_relabeling=True) + >>> edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)] + >>> all((u, v) in T.edges() or (v, u) in T.edges() for (u, v) in edges) + True + + """ + + def _make_tree(sequence): + """Recursively creates a tree from the given sequence of nested + tuples. + + This function employs the :func:`~networkx.tree.join` function + to recursively join subtrees into a larger tree. + + """ + # The empty sequence represents the empty tree, which is the + # (unique) graph with a single node. We mark the single node + # with an attribute that indicates that it is the root of the + # graph. + if len(sequence) == 0: + return nx.empty_graph(1) + # For a nonempty sequence, get the subtrees for each child + # sequence and join all the subtrees at their roots. After + # joining the subtrees, the root is node 0. + return nx.tree.join_trees([(_make_tree(child), 0) for child in sequence]) + + # Make the tree and remove the `is_root` node attribute added by the + # helper function. + T = _make_tree(sequence) + if sensible_relabeling: + # Relabel the nodes according to their breadth-first search + # order, starting from the root node (that is, the node 0). + bfs_nodes = chain([0], (v for u, v in nx.bfs_edges(T, 0))) + labels = {v: i for i, v in enumerate(bfs_nodes)} + # We would like to use `copy=False`, but `relabel_nodes` doesn't + # allow a relabel mapping that can't be topologically sorted. + T = nx.relabel_nodes(T, labels) + return T + + +@not_implemented_for("directed") +@nx._dispatch(graphs="T") +def to_prufer_sequence(T): + r"""Returns the Prüfer sequence of the given tree. + + A *Prüfer sequence* is a list of *n* - 2 numbers between 0 and + *n* - 1, inclusive. The tree corresponding to a given Prüfer + sequence can be recovered by repeatedly joining a node in the + sequence with a node with the smallest potential degree according to + the sequence. + + Parameters + ---------- + T : NetworkX graph + An undirected graph object representing a tree. + + Returns + ------- + list + The Prüfer sequence of the given tree. + + Raises + ------ + NetworkXPointlessConcept + If the number of nodes in `T` is less than two. + + NotATree + If `T` is not a tree. + + KeyError + If the set of nodes in `T` is not {0, …, *n* - 1}. + + Notes + ----- + There is a bijection from labeled trees to Prüfer sequences. This + function is the inverse of the :func:`from_prufer_sequence` + function. + + Sometimes Prüfer sequences use nodes labeled from 1 to *n* instead + of from 0 to *n* - 1. This function requires nodes to be labeled in + the latter form. You can use :func:`~networkx.relabel_nodes` to + relabel the nodes of your tree to the appropriate format. + + This implementation is from [1]_ and has a running time of + $O(n)$. + + See also + -------- + to_nested_tuple + from_prufer_sequence + + References + ---------- + .. [1] Wang, Xiaodong, Lei Wang, and Yingjie Wu. + "An optimal algorithm for Prufer codes." + *Journal of Software Engineering and Applications* 2.02 (2009): 111. + + + Examples + -------- + There is a bijection between Prüfer sequences and labeled trees, so + this function is the inverse of the :func:`from_prufer_sequence` + function: + + >>> edges = [(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)] + >>> tree = nx.Graph(edges) + >>> sequence = nx.to_prufer_sequence(tree) + >>> sequence + [3, 3, 3, 4] + >>> tree2 = nx.from_prufer_sequence(sequence) + >>> list(tree2.edges()) == edges + True + + """ + # Perform some sanity checks on the input. + n = len(T) + if n < 2: + msg = "Prüfer sequence undefined for trees with fewer than two nodes" + raise nx.NetworkXPointlessConcept(msg) + if not nx.is_tree(T): + raise nx.NotATree("provided graph is not a tree") + if set(T) != set(range(n)): + raise KeyError("tree must have node labels {0, ..., n - 1}") + + degree = dict(T.degree()) + + def parents(u): + return next(v for v in T[u] if degree[v] > 1) + + index = u = next(k for k in range(n) if degree[k] == 1) + result = [] + for i in range(n - 2): + v = parents(u) + result.append(v) + degree[v] -= 1 + if v < index and degree[v] == 1: + u = v + else: + index = u = next(k for k in range(index + 1, n) if degree[k] == 1) + return result + + +@nx._dispatch(graphs=None) +def from_prufer_sequence(sequence): + r"""Returns the tree corresponding to the given Prüfer sequence. + + A *Prüfer sequence* is a list of *n* - 2 numbers between 0 and + *n* - 1, inclusive. The tree corresponding to a given Prüfer + sequence can be recovered by repeatedly joining a node in the + sequence with a node with the smallest potential degree according to + the sequence. + + Parameters + ---------- + sequence : list + A Prüfer sequence, which is a list of *n* - 2 integers between + zero and *n* - 1, inclusive. + + Returns + ------- + NetworkX graph + The tree corresponding to the given Prüfer sequence. + + Raises + ------ + NetworkXError + If the Prüfer sequence is not valid. + + Notes + ----- + There is a bijection from labeled trees to Prüfer sequences. This + function is the inverse of the :func:`from_prufer_sequence` function. + + Sometimes Prüfer sequences use nodes labeled from 1 to *n* instead + of from 0 to *n* - 1. This function requires nodes to be labeled in + the latter form. You can use :func:`networkx.relabel_nodes` to + relabel the nodes of your tree to the appropriate format. + + This implementation is from [1]_ and has a running time of + $O(n)$. + + References + ---------- + .. [1] Wang, Xiaodong, Lei Wang, and Yingjie Wu. + "An optimal algorithm for Prufer codes." + *Journal of Software Engineering and Applications* 2.02 (2009): 111. + + + See also + -------- + from_nested_tuple + to_prufer_sequence + + Examples + -------- + There is a bijection between Prüfer sequences and labeled trees, so + this function is the inverse of the :func:`to_prufer_sequence` + function: + + >>> edges = [(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)] + >>> tree = nx.Graph(edges) + >>> sequence = nx.to_prufer_sequence(tree) + >>> sequence + [3, 3, 3, 4] + >>> tree2 = nx.from_prufer_sequence(sequence) + >>> list(tree2.edges()) == edges + True + + """ + n = len(sequence) + 2 + # `degree` stores the remaining degree (plus one) for each node. The + # degree of a node in the decoded tree is one more than the number + # of times it appears in the code. + degree = Counter(chain(sequence, range(n))) + T = nx.empty_graph(n) + # `not_orphaned` is the set of nodes that have a parent in the + # tree. After the loop, there should be exactly two nodes that are + # not in this set. + not_orphaned = set() + index = u = next(k for k in range(n) if degree[k] == 1) + for v in sequence: + # check the validity of the prufer sequence + if v < 0 or v > n - 1: + raise nx.NetworkXError( + f"Invalid Prufer sequence: Values must be between 0 and {n-1}, got {v}" + ) + T.add_edge(u, v) + not_orphaned.add(u) + degree[v] -= 1 + if v < index and degree[v] == 1: + u = v + else: + index = u = next(k for k in range(index + 1, n) if degree[k] == 1) + # At this point, there must be exactly two orphaned nodes; join them. + orphans = set(T) - not_orphaned + u, v = orphans + T.add_edge(u, v) + return T diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/decomposition.py b/MLPY/Lib/site-packages/networkx/algorithms/tree/decomposition.py new file mode 100644 index 0000000000000000000000000000000000000000..0517100016fc909459a222ad9a31388fa2fd3164 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tree/decomposition.py @@ -0,0 +1,88 @@ +r"""Function for computing a junction tree of a graph.""" + +from itertools import combinations + +import networkx as nx +from networkx.algorithms import chordal_graph_cliques, complete_to_chordal_graph, moral +from networkx.utils import not_implemented_for + +__all__ = ["junction_tree"] + + +@not_implemented_for("multigraph") +@nx._dispatch +def junction_tree(G): + r"""Returns a junction tree of a given graph. + + A junction tree (or clique tree) is constructed from a (un)directed graph G. + The tree is constructed based on a moralized and triangulated version of G. + The tree's nodes consist of maximal cliques and sepsets of the revised graph. + The sepset of two cliques is the intersection of the nodes of these cliques, + e.g. the sepset of (A,B,C) and (A,C,E,F) is (A,C). These nodes are often called + "variables" in this literature. The tree is bipartite with each sepset + connected to its two cliques. + + Junction Trees are not unique as the order of clique consideration determines + which sepsets are included. + + The junction tree algorithm consists of five steps [1]_: + + 1. Moralize the graph + 2. Triangulate the graph + 3. Find maximal cliques + 4. Build the tree from cliques, connecting cliques with shared + nodes, set edge-weight to number of shared variables + 5. Find maximum spanning tree + + + Parameters + ---------- + G : networkx.Graph + Directed or undirected graph. + + Returns + ------- + junction_tree : networkx.Graph + The corresponding junction tree of `G`. + + Raises + ------ + NetworkXNotImplemented + Raised if `G` is an instance of `MultiGraph` or `MultiDiGraph`. + + References + ---------- + .. [1] Junction tree algorithm: + https://en.wikipedia.org/wiki/Junction_tree_algorithm + + .. [2] Finn V. Jensen and Frank Jensen. 1994. Optimal + junction trees. In Proceedings of the Tenth international + conference on Uncertainty in artificial intelligence (UAI’94). + Morgan Kaufmann Publishers Inc., San Francisco, CA, USA, 360–366. + """ + + clique_graph = nx.Graph() + + if G.is_directed(): + G = moral.moral_graph(G) + chordal_graph, _ = complete_to_chordal_graph(G) + + cliques = [tuple(sorted(i)) for i in chordal_graph_cliques(chordal_graph)] + clique_graph.add_nodes_from(cliques, type="clique") + + for edge in combinations(cliques, 2): + set_edge_0 = set(edge[0]) + set_edge_1 = set(edge[1]) + if not set_edge_0.isdisjoint(set_edge_1): + sepset = tuple(sorted(set_edge_0.intersection(set_edge_1))) + clique_graph.add_edge(edge[0], edge[1], weight=len(sepset), sepset=sepset) + + junction_tree = nx.maximum_spanning_tree(clique_graph) + + for edge in list(junction_tree.edges(data=True)): + junction_tree.add_node(edge[2]["sepset"], type="sepset") + junction_tree.add_edge(edge[0], edge[2]["sepset"]) + junction_tree.add_edge(edge[1], edge[2]["sepset"]) + junction_tree.remove_edge(edge[0], edge[1]) + + return junction_tree diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/mst.py b/MLPY/Lib/site-packages/networkx/algorithms/tree/mst.py new file mode 100644 index 0000000000000000000000000000000000000000..f4cec03dc30536122e95440f4052f0650e824601 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tree/mst.py @@ -0,0 +1,1133 @@ +""" +Algorithms for calculating min/max spanning trees/forests. + +""" +from dataclasses import dataclass, field +from enum import Enum +from heapq import heappop, heappush +from itertools import count +from math import isnan +from operator import itemgetter +from queue import PriorityQueue + +import networkx as nx +from networkx.utils import UnionFind, not_implemented_for, py_random_state + +__all__ = [ + "minimum_spanning_edges", + "maximum_spanning_edges", + "minimum_spanning_tree", + "maximum_spanning_tree", + "random_spanning_tree", + "partition_spanning_tree", + "EdgePartition", + "SpanningTreeIterator", +] + + +class EdgePartition(Enum): + """ + An enum to store the state of an edge partition. The enum is written to the + edges of a graph before being pasted to `kruskal_mst_edges`. Options are: + + - EdgePartition.OPEN + - EdgePartition.INCLUDED + - EdgePartition.EXCLUDED + """ + + OPEN = 0 + INCLUDED = 1 + EXCLUDED = 2 + + +@not_implemented_for("multigraph") +@nx._dispatch(edge_attrs="weight", preserve_edge_attrs="data") +def boruvka_mst_edges( + G, minimum=True, weight="weight", keys=False, data=True, ignore_nan=False +): + """Iterate over edges of a Borůvka's algorithm min/max spanning tree. + + Parameters + ---------- + G : NetworkX Graph + The edges of `G` must have distinct weights, + otherwise the edges may not form a tree. + + minimum : bool (default: True) + Find the minimum (True) or maximum (False) spanning tree. + + weight : string (default: 'weight') + The name of the edge attribute holding the edge weights. + + keys : bool (default: True) + This argument is ignored since this function is not + implemented for multigraphs; it exists only for consistency + with the other minimum spanning tree functions. + + data : bool (default: True) + Flag for whether to yield edge attribute dicts. + If True, yield edges `(u, v, d)`, where `d` is the attribute dict. + If False, yield edges `(u, v)`. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + """ + # Initialize a forest, assuming initially that it is the discrete + # partition of the nodes of the graph. + forest = UnionFind(G) + + def best_edge(component): + """Returns the optimum (minimum or maximum) edge on the edge + boundary of the given set of nodes. + + A return value of ``None`` indicates an empty boundary. + + """ + sign = 1 if minimum else -1 + minwt = float("inf") + boundary = None + for e in nx.edge_boundary(G, component, data=True): + wt = e[-1].get(weight, 1) * sign + if isnan(wt): + if ignore_nan: + continue + msg = f"NaN found as an edge weight. Edge {e}" + raise ValueError(msg) + if wt < minwt: + minwt = wt + boundary = e + return boundary + + # Determine the optimum edge in the edge boundary of each component + # in the forest. + best_edges = (best_edge(component) for component in forest.to_sets()) + best_edges = [edge for edge in best_edges if edge is not None] + # If each entry was ``None``, that means the graph was disconnected, + # so we are done generating the forest. + while best_edges: + # Determine the optimum edge in the edge boundary of each + # component in the forest. + # + # This must be a sequence, not an iterator. In this list, the + # same edge may appear twice, in different orientations (but + # that's okay, since a union operation will be called on the + # endpoints the first time it is seen, but not the second time). + # + # Any ``None`` indicates that the edge boundary for that + # component was empty, so that part of the forest has been + # completed. + # + # TODO This can be parallelized, both in the outer loop over + # each component in the forest and in the computation of the + # minimum. (Same goes for the identical lines outside the loop.) + best_edges = (best_edge(component) for component in forest.to_sets()) + best_edges = [edge for edge in best_edges if edge is not None] + # Join trees in the forest using the best edges, and yield that + # edge, since it is part of the spanning tree. + # + # TODO This loop can be parallelized, to an extent (the union + # operation must be atomic). + for u, v, d in best_edges: + if forest[u] != forest[v]: + if data: + yield u, v, d + else: + yield u, v + forest.union(u, v) + + +@nx._dispatch( + edge_attrs={"weight": None, "partition": None}, preserve_edge_attrs="data" +) +def kruskal_mst_edges( + G, minimum, weight="weight", keys=True, data=True, ignore_nan=False, partition=None +): + """ + Iterate over edge of a Kruskal's algorithm min/max spanning tree. + + Parameters + ---------- + G : NetworkX Graph + The graph holding the tree of interest. + + minimum : bool (default: True) + Find the minimum (True) or maximum (False) spanning tree. + + weight : string (default: 'weight') + The name of the edge attribute holding the edge weights. + + keys : bool (default: True) + If `G` is a multigraph, `keys` controls whether edge keys ar yielded. + Otherwise `keys` is ignored. + + data : bool (default: True) + Flag for whether to yield edge attribute dicts. + If True, yield edges `(u, v, d)`, where `d` is the attribute dict. + If False, yield edges `(u, v)`. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + partition : string (default: None) + The name of the edge attribute holding the partition data, if it exists. + Partition data is written to the edges using the `EdgePartition` enum. + If a partition exists, all included edges and none of the excluded edges + will appear in the final tree. Open edges may or may not be used. + + Yields + ------ + edge tuple + The edges as discovered by Kruskal's method. Each edge can + take the following forms: `(u, v)`, `(u, v, d)` or `(u, v, k, d)` + depending on the `key` and `data` parameters + """ + subtrees = UnionFind() + if G.is_multigraph(): + edges = G.edges(keys=True, data=True) + else: + edges = G.edges(data=True) + + """ + Sort the edges of the graph with respect to the partition data. + Edges are returned in the following order: + + * Included edges + * Open edges from smallest to largest weight + * Excluded edges + """ + included_edges = [] + open_edges = [] + for e in edges: + d = e[-1] + wt = d.get(weight, 1) + if isnan(wt): + if ignore_nan: + continue + raise ValueError(f"NaN found as an edge weight. Edge {e}") + + edge = (wt,) + e + if d.get(partition) == EdgePartition.INCLUDED: + included_edges.append(edge) + elif d.get(partition) == EdgePartition.EXCLUDED: + continue + else: + open_edges.append(edge) + + if minimum: + sorted_open_edges = sorted(open_edges, key=itemgetter(0)) + else: + sorted_open_edges = sorted(open_edges, key=itemgetter(0), reverse=True) + + # Condense the lists into one + included_edges.extend(sorted_open_edges) + sorted_edges = included_edges + del open_edges, sorted_open_edges, included_edges + + # Multigraphs need to handle edge keys in addition to edge data. + if G.is_multigraph(): + for wt, u, v, k, d in sorted_edges: + if subtrees[u] != subtrees[v]: + if keys: + if data: + yield u, v, k, d + else: + yield u, v, k + else: + if data: + yield u, v, d + else: + yield u, v + subtrees.union(u, v) + else: + for wt, u, v, d in sorted_edges: + if subtrees[u] != subtrees[v]: + if data: + yield u, v, d + else: + yield u, v + subtrees.union(u, v) + + +@nx._dispatch(edge_attrs="weight", preserve_edge_attrs="data") +def prim_mst_edges(G, minimum, weight="weight", keys=True, data=True, ignore_nan=False): + """Iterate over edges of Prim's algorithm min/max spanning tree. + + Parameters + ---------- + G : NetworkX Graph + The graph holding the tree of interest. + + minimum : bool (default: True) + Find the minimum (True) or maximum (False) spanning tree. + + weight : string (default: 'weight') + The name of the edge attribute holding the edge weights. + + keys : bool (default: True) + If `G` is a multigraph, `keys` controls whether edge keys ar yielded. + Otherwise `keys` is ignored. + + data : bool (default: True) + Flag for whether to yield edge attribute dicts. + If True, yield edges `(u, v, d)`, where `d` is the attribute dict. + If False, yield edges `(u, v)`. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + """ + is_multigraph = G.is_multigraph() + push = heappush + pop = heappop + + nodes = set(G) + c = count() + + sign = 1 if minimum else -1 + + while nodes: + u = nodes.pop() + frontier = [] + visited = {u} + if is_multigraph: + for v, keydict in G.adj[u].items(): + for k, d in keydict.items(): + wt = d.get(weight, 1) * sign + if isnan(wt): + if ignore_nan: + continue + msg = f"NaN found as an edge weight. Edge {(u, v, k, d)}" + raise ValueError(msg) + push(frontier, (wt, next(c), u, v, k, d)) + else: + for v, d in G.adj[u].items(): + wt = d.get(weight, 1) * sign + if isnan(wt): + if ignore_nan: + continue + msg = f"NaN found as an edge weight. Edge {(u, v, d)}" + raise ValueError(msg) + push(frontier, (wt, next(c), u, v, d)) + while nodes and frontier: + if is_multigraph: + W, _, u, v, k, d = pop(frontier) + else: + W, _, u, v, d = pop(frontier) + if v in visited or v not in nodes: + continue + # Multigraphs need to handle edge keys in addition to edge data. + if is_multigraph and keys: + if data: + yield u, v, k, d + else: + yield u, v, k + else: + if data: + yield u, v, d + else: + yield u, v + # update frontier + visited.add(v) + nodes.discard(v) + if is_multigraph: + for w, keydict in G.adj[v].items(): + if w in visited: + continue + for k2, d2 in keydict.items(): + new_weight = d2.get(weight, 1) * sign + if isnan(new_weight): + if ignore_nan: + continue + msg = f"NaN found as an edge weight. Edge {(v, w, k2, d2)}" + raise ValueError(msg) + push(frontier, (new_weight, next(c), v, w, k2, d2)) + else: + for w, d2 in G.adj[v].items(): + if w in visited: + continue + new_weight = d2.get(weight, 1) * sign + if isnan(new_weight): + if ignore_nan: + continue + msg = f"NaN found as an edge weight. Edge {(v, w, d2)}" + raise ValueError(msg) + push(frontier, (new_weight, next(c), v, w, d2)) + + +ALGORITHMS = { + "boruvka": boruvka_mst_edges, + "borůvka": boruvka_mst_edges, + "kruskal": kruskal_mst_edges, + "prim": prim_mst_edges, +} + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight", preserve_edge_attrs="data") +def minimum_spanning_edges( + G, algorithm="kruskal", weight="weight", keys=True, data=True, ignore_nan=False +): + """Generate edges in a minimum spanning forest of an undirected + weighted graph. + + A minimum spanning tree is a subgraph of the graph (a tree) + with the minimum sum of edge weights. A spanning forest is a + union of the spanning trees for each connected component of the graph. + + Parameters + ---------- + G : undirected Graph + An undirected graph. If `G` is connected, then the algorithm finds a + spanning tree. Otherwise, a spanning forest is found. + + algorithm : string + The algorithm to use when finding a minimum spanning tree. Valid + choices are 'kruskal', 'prim', or 'boruvka'. The default is 'kruskal'. + + weight : string + Edge data key to use for weight (default 'weight'). + + keys : bool + Whether to yield edge key in multigraphs in addition to the edge. + If `G` is not a multigraph, this is ignored. + + data : bool, optional + If True yield the edge data along with the edge. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + Returns + ------- + edges : iterator + An iterator over edges in a maximum spanning tree of `G`. + Edges connecting nodes `u` and `v` are represented as tuples: + `(u, v, k, d)` or `(u, v, k)` or `(u, v, d)` or `(u, v)` + + If `G` is a multigraph, `keys` indicates whether the edge key `k` will + be reported in the third position in the edge tuple. `data` indicates + whether the edge datadict `d` will appear at the end of the edge tuple. + + If `G` is not a multigraph, the tuples are `(u, v, d)` if `data` is True + or `(u, v)` if `data` is False. + + Examples + -------- + >>> from networkx.algorithms import tree + + Find minimum spanning edges by Kruskal's algorithm + + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) + >>> mst = tree.minimum_spanning_edges(G, algorithm="kruskal", data=False) + >>> edgelist = list(mst) + >>> sorted(sorted(e) for e in edgelist) + [[0, 1], [1, 2], [2, 3]] + + Find minimum spanning edges by Prim's algorithm + + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) + >>> mst = tree.minimum_spanning_edges(G, algorithm="prim", data=False) + >>> edgelist = list(mst) + >>> sorted(sorted(e) for e in edgelist) + [[0, 1], [1, 2], [2, 3]] + + Notes + ----- + For Borůvka's algorithm, each edge must have a weight attribute, and + each edge weight must be distinct. + + For the other algorithms, if the graph edges do not have a weight + attribute a default weight of 1 will be used. + + Modified code from David Eppstein, April 2006 + http://www.ics.uci.edu/~eppstein/PADS/ + + """ + try: + algo = ALGORITHMS[algorithm] + except KeyError as err: + msg = f"{algorithm} is not a valid choice for an algorithm." + raise ValueError(msg) from err + + return algo( + G, minimum=True, weight=weight, keys=keys, data=data, ignore_nan=ignore_nan + ) + + +@not_implemented_for("directed") +@nx._dispatch(edge_attrs="weight", preserve_edge_attrs="data") +def maximum_spanning_edges( + G, algorithm="kruskal", weight="weight", keys=True, data=True, ignore_nan=False +): + """Generate edges in a maximum spanning forest of an undirected + weighted graph. + + A maximum spanning tree is a subgraph of the graph (a tree) + with the maximum possible sum of edge weights. A spanning forest is a + union of the spanning trees for each connected component of the graph. + + Parameters + ---------- + G : undirected Graph + An undirected graph. If `G` is connected, then the algorithm finds a + spanning tree. Otherwise, a spanning forest is found. + + algorithm : string + The algorithm to use when finding a maximum spanning tree. Valid + choices are 'kruskal', 'prim', or 'boruvka'. The default is 'kruskal'. + + weight : string + Edge data key to use for weight (default 'weight'). + + keys : bool + Whether to yield edge key in multigraphs in addition to the edge. + If `G` is not a multigraph, this is ignored. + + data : bool, optional + If True yield the edge data along with the edge. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + Returns + ------- + edges : iterator + An iterator over edges in a maximum spanning tree of `G`. + Edges connecting nodes `u` and `v` are represented as tuples: + `(u, v, k, d)` or `(u, v, k)` or `(u, v, d)` or `(u, v)` + + If `G` is a multigraph, `keys` indicates whether the edge key `k` will + be reported in the third position in the edge tuple. `data` indicates + whether the edge datadict `d` will appear at the end of the edge tuple. + + If `G` is not a multigraph, the tuples are `(u, v, d)` if `data` is True + or `(u, v)` if `data` is False. + + Examples + -------- + >>> from networkx.algorithms import tree + + Find maximum spanning edges by Kruskal's algorithm + + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) + >>> mst = tree.maximum_spanning_edges(G, algorithm="kruskal", data=False) + >>> edgelist = list(mst) + >>> sorted(sorted(e) for e in edgelist) + [[0, 1], [0, 3], [1, 2]] + + Find maximum spanning edges by Prim's algorithm + + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) # assign weight 2 to edge 0-3 + >>> mst = tree.maximum_spanning_edges(G, algorithm="prim", data=False) + >>> edgelist = list(mst) + >>> sorted(sorted(e) for e in edgelist) + [[0, 1], [0, 3], [2, 3]] + + Notes + ----- + For Borůvka's algorithm, each edge must have a weight attribute, and + each edge weight must be distinct. + + For the other algorithms, if the graph edges do not have a weight + attribute a default weight of 1 will be used. + + Modified code from David Eppstein, April 2006 + http://www.ics.uci.edu/~eppstein/PADS/ + """ + try: + algo = ALGORITHMS[algorithm] + except KeyError as err: + msg = f"{algorithm} is not a valid choice for an algorithm." + raise ValueError(msg) from err + + return algo( + G, minimum=False, weight=weight, keys=keys, data=data, ignore_nan=ignore_nan + ) + + +@nx._dispatch(preserve_all_attrs=True) +def minimum_spanning_tree(G, weight="weight", algorithm="kruskal", ignore_nan=False): + """Returns a minimum spanning tree or forest on an undirected graph `G`. + + Parameters + ---------- + G : undirected graph + An undirected graph. If `G` is connected, then the algorithm finds a + spanning tree. Otherwise, a spanning forest is found. + + weight : str + Data key to use for edge weights. + + algorithm : string + The algorithm to use when finding a minimum spanning tree. Valid + choices are 'kruskal', 'prim', or 'boruvka'. The default is + 'kruskal'. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + Returns + ------- + G : NetworkX Graph + A minimum spanning tree or forest. + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) + >>> T = nx.minimum_spanning_tree(G) + >>> sorted(T.edges(data=True)) + [(0, 1, {}), (1, 2, {}), (2, 3, {})] + + + Notes + ----- + For Borůvka's algorithm, each edge must have a weight attribute, and + each edge weight must be distinct. + + For the other algorithms, if the graph edges do not have a weight + attribute a default weight of 1 will be used. + + There may be more than one tree with the same minimum or maximum weight. + See :mod:`networkx.tree.recognition` for more detailed definitions. + + Isolated nodes with self-loops are in the tree as edgeless isolated nodes. + + """ + edges = minimum_spanning_edges( + G, algorithm, weight, keys=True, data=True, ignore_nan=ignore_nan + ) + T = G.__class__() # Same graph class as G + T.graph.update(G.graph) + T.add_nodes_from(G.nodes.items()) + T.add_edges_from(edges) + return T + + +@nx._dispatch(preserve_all_attrs=True) +def partition_spanning_tree( + G, minimum=True, weight="weight", partition="partition", ignore_nan=False +): + """ + Find a spanning tree while respecting a partition of edges. + + Edges can be flagged as either `INCLUDED` which are required to be in the + returned tree, `EXCLUDED`, which cannot be in the returned tree and `OPEN`. + + This is used in the SpanningTreeIterator to create new partitions following + the algorithm of Sörensen and Janssens [1]_. + + Parameters + ---------- + G : undirected graph + An undirected graph. + + minimum : bool (default: True) + Determines whether the returned tree is the minimum spanning tree of + the partition of the maximum one. + + weight : str + Data key to use for edge weights. + + partition : str + The key for the edge attribute containing the partition + data on the graph. Edges can be included, excluded or open using the + `EdgePartition` enum. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + + Returns + ------- + G : NetworkX Graph + A minimum spanning tree using all of the included edges in the graph and + none of the excluded edges. + + References + ---------- + .. [1] G.K. Janssens, K. Sörensen, An algorithm to generate all spanning + trees in order of increasing cost, Pesquisa Operacional, 2005-08, + Vol. 25 (2), p. 219-229, + https://www.scielo.br/j/pope/a/XHswBwRwJyrfL88dmMwYNWp/?lang=en + """ + edges = kruskal_mst_edges( + G, + minimum, + weight, + keys=True, + data=True, + ignore_nan=ignore_nan, + partition=partition, + ) + T = G.__class__() # Same graph class as G + T.graph.update(G.graph) + T.add_nodes_from(G.nodes.items()) + T.add_edges_from(edges) + return T + + +@nx._dispatch(preserve_all_attrs=True) +def maximum_spanning_tree(G, weight="weight", algorithm="kruskal", ignore_nan=False): + """Returns a maximum spanning tree or forest on an undirected graph `G`. + + Parameters + ---------- + G : undirected graph + An undirected graph. If `G` is connected, then the algorithm finds a + spanning tree. Otherwise, a spanning forest is found. + + weight : str + Data key to use for edge weights. + + algorithm : string + The algorithm to use when finding a maximum spanning tree. Valid + choices are 'kruskal', 'prim', or 'boruvka'. The default is + 'kruskal'. + + ignore_nan : bool (default: False) + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + + + Returns + ------- + G : NetworkX Graph + A maximum spanning tree or forest. + + + Examples + -------- + >>> G = nx.cycle_graph(4) + >>> G.add_edge(0, 3, weight=2) + >>> T = nx.maximum_spanning_tree(G) + >>> sorted(T.edges(data=True)) + [(0, 1, {}), (0, 3, {'weight': 2}), (1, 2, {})] + + + Notes + ----- + For Borůvka's algorithm, each edge must have a weight attribute, and + each edge weight must be distinct. + + For the other algorithms, if the graph edges do not have a weight + attribute a default weight of 1 will be used. + + There may be more than one tree with the same minimum or maximum weight. + See :mod:`networkx.tree.recognition` for more detailed definitions. + + Isolated nodes with self-loops are in the tree as edgeless isolated nodes. + + """ + edges = maximum_spanning_edges( + G, algorithm, weight, keys=True, data=True, ignore_nan=ignore_nan + ) + edges = list(edges) + T = G.__class__() # Same graph class as G + T.graph.update(G.graph) + T.add_nodes_from(G.nodes.items()) + T.add_edges_from(edges) + return T + + +@py_random_state(3) +@nx._dispatch(preserve_edge_attrs=True) +def random_spanning_tree(G, weight=None, *, multiplicative=True, seed=None): + """ + Sample a random spanning tree using the edges weights of `G`. + + This function supports two different methods for determining the + probability of the graph. If ``multiplicative=True``, the probability + is based on the product of edge weights, and if ``multiplicative=False`` + it is based on the sum of the edge weight. However, since it is + easier to determine the total weight of all spanning trees for the + multiplicative version, that is significantly faster and should be used if + possible. Additionally, setting `weight` to `None` will cause a spanning tree + to be selected with uniform probability. + + The function uses algorithm A8 in [1]_ . + + Parameters + ---------- + G : nx.Graph + An undirected version of the original graph. + + weight : string + The edge key for the edge attribute holding edge weight. + + multiplicative : bool, default=True + If `True`, the probability of each tree is the product of its edge weight + over the sum of the product of all the spanning trees in the graph. If + `False`, the probability is the sum of its edge weight over the sum of + the sum of weights for all spanning trees in the graph. + + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + nx.Graph + A spanning tree using the distribution defined by the weight of the tree. + + References + ---------- + .. [1] V. Kulkarni, Generating random combinatorial objects, Journal of + Algorithms, 11 (1990), pp. 185–207 + """ + + def find_node(merged_nodes, node): + """ + We can think of clusters of contracted nodes as having one + representative in the graph. Each node which is not in merged_nodes + is still its own representative. Since a representative can be later + contracted, we need to recursively search though the dict to find + the final representative, but once we know it we can use path + compression to speed up the access of the representative for next time. + + This cannot be replaced by the standard NetworkX union_find since that + data structure will merge nodes with less representing nodes into the + one with more representing nodes but this function requires we merge + them using the order that contract_edges contracts using. + + Parameters + ---------- + merged_nodes : dict + The dict storing the mapping from node to representative + node + The node whose representative we seek + + Returns + ------- + The representative of the `node` + """ + if node not in merged_nodes: + return node + else: + rep = find_node(merged_nodes, merged_nodes[node]) + merged_nodes[node] = rep + return rep + + def prepare_graph(): + """ + For the graph `G`, remove all edges not in the set `V` and then + contract all edges in the set `U`. + + Returns + ------- + A copy of `G` which has had all edges not in `V` removed and all edges + in `U` contracted. + """ + + # The result is a MultiGraph version of G so that parallel edges are + # allowed during edge contraction + result = nx.MultiGraph(incoming_graph_data=G) + + # Remove all edges not in V + edges_to_remove = set(result.edges()).difference(V) + result.remove_edges_from(edges_to_remove) + + # Contract all edges in U + # + # Imagine that you have two edges to contract and they share an + # endpoint like this: + # [0] ----- [1] ----- [2] + # If we contract (0, 1) first, the contraction function will always + # delete the second node it is passed so the resulting graph would be + # [0] ----- [2] + # and edge (1, 2) no longer exists but (0, 2) would need to be contracted + # in its place now. That is why I use the below dict as a merge-find + # data structure with path compression to track how the nodes are merged. + merged_nodes = {} + + for u, v in U: + u_rep = find_node(merged_nodes, u) + v_rep = find_node(merged_nodes, v) + # We cannot contract a node with itself + if u_rep == v_rep: + continue + nx.contracted_nodes(result, u_rep, v_rep, self_loops=False, copy=False) + merged_nodes[v_rep] = u_rep + + return merged_nodes, result + + def spanning_tree_total_weight(G, weight): + """ + Find the sum of weights of the spanning trees of `G` using the + appropriate `method`. + + This is easy if the chosen method is 'multiplicative', since we can + use Kirchhoff's Tree Matrix Theorem directly. However, with the + 'additive' method, this process is slightly more complex and less + computationally efficient as we have to find the number of spanning + trees which contain each possible edge in the graph. + + Parameters + ---------- + G : NetworkX Graph + The graph to find the total weight of all spanning trees on. + + weight : string + The key for the weight edge attribute of the graph. + + Returns + ------- + float + The sum of either the multiplicative or additive weight for all + spanning trees in the graph. + """ + if multiplicative: + return nx.total_spanning_tree_weight(G, weight) + else: + # There are two cases for the total spanning tree additive weight. + # 1. There is one edge in the graph. Then the only spanning tree is + # that edge itself, which will have a total weight of that edge + # itself. + if G.number_of_edges() == 1: + return G.edges(data=weight).__iter__().__next__()[2] + # 2. There are more than two edges in the graph. Then, we can find the + # total weight of the spanning trees using the formula in the + # reference paper: take the weight of that edge and multiple it by + # the number of spanning trees which have to include that edge. This + # can be accomplished by contracting the edge and finding the + # multiplicative total spanning tree weight if the weight of each edge + # is assumed to be 1, which is conveniently built into networkx already, + # by calling total_spanning_tree_weight with weight=None + else: + total = 0 + for u, v, w in G.edges(data=weight): + total += w * nx.total_spanning_tree_weight( + nx.contracted_edge(G, edge=(u, v), self_loops=False), None + ) + return total + + U = set() + st_cached_value = 0 + V = set(G.edges()) + shuffled_edges = list(G.edges()) + seed.shuffle(shuffled_edges) + + for u, v in shuffled_edges: + e_weight = G[u][v][weight] if weight is not None else 1 + node_map, prepared_G = prepare_graph() + G_total_tree_weight = spanning_tree_total_weight(prepared_G, weight) + # Add the edge to U so that we can compute the total tree weight + # assuming we include that edge + # Now, if (u, v) cannot exist in G because it is fully contracted out + # of existence, then it by definition cannot influence G_e's Kirchhoff + # value. But, we also cannot pick it. + rep_edge = (find_node(node_map, u), find_node(node_map, v)) + # Check to see if the 'representative edge' for the current edge is + # in prepared_G. If so, then we can pick it. + if rep_edge in prepared_G.edges: + prepared_G_e = nx.contracted_edge( + prepared_G, edge=rep_edge, self_loops=False + ) + G_e_total_tree_weight = spanning_tree_total_weight(prepared_G_e, weight) + if multiplicative: + threshold = e_weight * G_e_total_tree_weight / G_total_tree_weight + else: + numerator = ( + st_cached_value + e_weight + ) * nx.total_spanning_tree_weight(prepared_G_e) + G_e_total_tree_weight + denominator = ( + st_cached_value * nx.total_spanning_tree_weight(prepared_G) + + G_total_tree_weight + ) + threshold = numerator / denominator + else: + threshold = 0.0 + z = seed.uniform(0.0, 1.0) + if z > threshold: + # Remove the edge from V since we did not pick it. + V.remove((u, v)) + else: + # Add the edge to U since we picked it. + st_cached_value += e_weight + U.add((u, v)) + # If we decide to keep an edge, it may complete the spanning tree. + if len(U) == G.number_of_nodes() - 1: + spanning_tree = nx.Graph() + spanning_tree.add_edges_from(U) + return spanning_tree + raise Exception(f"Something went wrong! Only {len(U)} edges in the spanning tree!") + + +class SpanningTreeIterator: + """ + Iterate over all spanning trees of a graph in either increasing or + decreasing cost. + + Notes + ----- + This iterator uses the partition scheme from [1]_ (included edges, + excluded edges and open edges) as well as a modified Kruskal's Algorithm + to generate minimum spanning trees which respect the partition of edges. + For spanning trees with the same weight, ties are broken arbitrarily. + + References + ---------- + .. [1] G.K. Janssens, K. Sörensen, An algorithm to generate all spanning + trees in order of increasing cost, Pesquisa Operacional, 2005-08, + Vol. 25 (2), p. 219-229, + https://www.scielo.br/j/pope/a/XHswBwRwJyrfL88dmMwYNWp/?lang=en + """ + + @dataclass(order=True) + class Partition: + """ + This dataclass represents a partition and stores a dict with the edge + data and the weight of the minimum spanning tree of the partition dict. + """ + + mst_weight: float + partition_dict: dict = field(compare=False) + + def __copy__(self): + return SpanningTreeIterator.Partition( + self.mst_weight, self.partition_dict.copy() + ) + + def __init__(self, G, weight="weight", minimum=True, ignore_nan=False): + """ + Initialize the iterator + + Parameters + ---------- + G : nx.Graph + The directed graph which we need to iterate trees over + + weight : String, default = "weight" + The edge attribute used to store the weight of the edge + + minimum : bool, default = True + Return the trees in increasing order while true and decreasing order + while false. + + ignore_nan : bool, default = False + If a NaN is found as an edge weight normally an exception is raised. + If `ignore_nan is True` then that edge is ignored instead. + """ + self.G = G.copy() + self.weight = weight + self.minimum = minimum + self.ignore_nan = ignore_nan + # Randomly create a key for an edge attribute to hold the partition data + self.partition_key = ( + "SpanningTreeIterators super secret partition attribute name" + ) + + def __iter__(self): + """ + Returns + ------- + SpanningTreeIterator + The iterator object for this graph + """ + self.partition_queue = PriorityQueue() + self._clear_partition(self.G) + mst_weight = partition_spanning_tree( + self.G, self.minimum, self.weight, self.partition_key, self.ignore_nan + ).size(weight=self.weight) + + self.partition_queue.put( + self.Partition(mst_weight if self.minimum else -mst_weight, {}) + ) + + return self + + def __next__(self): + """ + Returns + ------- + (multi)Graph + The spanning tree of next greatest weight, which ties broken + arbitrarily. + """ + if self.partition_queue.empty(): + del self.G, self.partition_queue + raise StopIteration + + partition = self.partition_queue.get() + self._write_partition(partition) + next_tree = partition_spanning_tree( + self.G, self.minimum, self.weight, self.partition_key, self.ignore_nan + ) + self._partition(partition, next_tree) + + self._clear_partition(next_tree) + return next_tree + + def _partition(self, partition, partition_tree): + """ + Create new partitions based of the minimum spanning tree of the + current minimum partition. + + Parameters + ---------- + partition : Partition + The Partition instance used to generate the current minimum spanning + tree. + partition_tree : nx.Graph + The minimum spanning tree of the input partition. + """ + # create two new partitions with the data from the input partition dict + p1 = self.Partition(0, partition.partition_dict.copy()) + p2 = self.Partition(0, partition.partition_dict.copy()) + for e in partition_tree.edges: + # determine if the edge was open or included + if e not in partition.partition_dict: + # This is an open edge + p1.partition_dict[e] = EdgePartition.EXCLUDED + p2.partition_dict[e] = EdgePartition.INCLUDED + + self._write_partition(p1) + p1_mst = partition_spanning_tree( + self.G, + self.minimum, + self.weight, + self.partition_key, + self.ignore_nan, + ) + p1_mst_weight = p1_mst.size(weight=self.weight) + if nx.is_connected(p1_mst): + p1.mst_weight = p1_mst_weight if self.minimum else -p1_mst_weight + self.partition_queue.put(p1.__copy__()) + p1.partition_dict = p2.partition_dict.copy() + + def _write_partition(self, partition): + """ + Writes the desired partition into the graph to calculate the minimum + spanning tree. + + Parameters + ---------- + partition : Partition + A Partition dataclass describing a partition on the edges of the + graph. + """ + for u, v, d in self.G.edges(data=True): + if (u, v) in partition.partition_dict: + d[self.partition_key] = partition.partition_dict[(u, v)] + else: + d[self.partition_key] = EdgePartition.OPEN + + def _clear_partition(self, G): + """ + Removes partition data from the graph + """ + for u, v, d in G.edges(data=True): + if self.partition_key in d: + del d[self.partition_key] diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/operations.py b/MLPY/Lib/site-packages/networkx/algorithms/tree/operations.py new file mode 100644 index 0000000000000000000000000000000000000000..df1b4e7bec059b54fa5998cca180bff84e517847 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tree/operations.py @@ -0,0 +1,128 @@ +"""Operations on trees.""" +from functools import partial +from itertools import accumulate, chain + +import networkx as nx + +__all__ = ["join", "join_trees"] + + +def join(rooted_trees, label_attribute=None): + """A deprecated name for `join_trees` + + Returns a new rooted tree with a root node joined with the roots + of each of the given rooted trees. + + .. deprecated:: 3.2 + + `join` is deprecated in NetworkX v3.2 and will be removed in v3.4. + It has been renamed join_trees with the same syntax/interface. + + """ + import warnings + + warnings.warn( + "The function `join` is deprecated and is renamed `join_trees`.\n" + "The ``join`` function itself will be removed in v3.4", + DeprecationWarning, + stacklevel=2, + ) + + return join_trees(rooted_trees, label_attribute=label_attribute) + + +# Argument types don't match dispatching, but allow manual selection of backend +@nx._dispatch(graphs=None) +def join_trees(rooted_trees, *, label_attribute=None, first_label=0): + """Returns a new rooted tree made by joining `rooted_trees` + + Constructs a new tree by joining each tree in `rooted_trees`. + A new root node is added and connected to each of the roots + of the input trees. While copying the nodes from the trees, + relabeling to integers occurs. If the `label_attribute` is provided, + the old node labels will be stored in the new tree under this attribute. + + Parameters + ---------- + rooted_trees : list + A list of pairs in which each left element is a NetworkX graph + object representing a tree and each right element is the root + node of that tree. The nodes of these trees will be relabeled to + integers. + + label_attribute : str + If provided, the old node labels will be stored in the new tree + under this node attribute. If not provided, the original labels + of the nodes in the input trees are not stored. + + first_label : int, optional (default=0) + Specifies the label for the new root node. If provided, the root node of the joined tree + will have this label. If not provided, the root node will default to a label of 0. + + Returns + ------- + NetworkX graph + The rooted tree resulting from joining the provided `rooted_trees`. The new tree has a root node + labeled as specified by `first_label` (defaulting to 0 if not provided). Subtrees from the input + `rooted_trees` are attached to this new root node. Each non-root node, if the `label_attribute` + is provided, has an attribute that indicates the original label of the node in the input tree. + + Notes + ----- + Trees are stored in NetworkX as NetworkX Graphs. There is no specific + enforcement of the fact that these are trees. Testing for each tree + can be done using :func:`networkx.is_tree`. + + Graph, edge, and node attributes are propagated from the given + rooted trees to the created tree. If there are any overlapping graph + attributes, those from later trees will overwrite those from earlier + trees in the tuple of positional arguments. + + Examples + -------- + Join two full balanced binary trees of height *h* to get a full + balanced binary tree of depth *h* + 1:: + + >>> h = 4 + >>> left = nx.balanced_tree(2, h) + >>> right = nx.balanced_tree(2, h) + >>> joined_tree = nx.join([(left, 0), (right, 0)]) + >>> nx.is_isomorphic(joined_tree, nx.balanced_tree(2, h + 1)) + True + + """ + if not rooted_trees: + return nx.empty_graph(1) + + # Unzip the zipped list of (tree, root) pairs. + trees, roots = zip(*rooted_trees) + + # The join of the trees has the same type as the type of the first tree. + R = type(trees[0])() + + lengths = (len(tree) for tree in trees[:-1]) + first_labels = list(accumulate(lengths, initial=first_label + 1)) + + new_roots = [] + for tree, root, first_node in zip(trees, roots, first_labels): + new_root = first_node + list(tree.nodes()).index(root) + new_roots.append(new_root) + + # Relabel the nodes so that their union is the integers starting at first_label. + relabel = partial( + nx.convert_node_labels_to_integers, label_attribute=label_attribute + ) + new_trees = [ + relabel(tree, first_label=first_label) + for tree, first_label in zip(trees, first_labels) + ] + + # Add all sets of nodes and edges, attributes + for tree in new_trees: + R.update(tree) + + # Finally, join the subtrees at the root. We know first_label is unused by the way we relabeled the subtrees. + R.add_node(first_label) + R.add_edges_from((first_label, root) for root in new_roots) + + return R diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/recognition.py b/MLPY/Lib/site-packages/networkx/algorithms/tree/recognition.py new file mode 100644 index 0000000000000000000000000000000000000000..15bbdf7d83b8a7e1b6061c860f18d118663afbc2 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tree/recognition.py @@ -0,0 +1,273 @@ +""" +Recognition Tests +================= + +A *forest* is an acyclic, undirected graph, and a *tree* is a connected forest. +Depending on the subfield, there are various conventions for generalizing these +definitions to directed graphs. + +In one convention, directed variants of forest and tree are defined in an +identical manner, except that the direction of the edges is ignored. In effect, +each directed edge is treated as a single undirected edge. Then, additional +restrictions are imposed to define *branchings* and *arborescences*. + +In another convention, directed variants of forest and tree correspond to +the previous convention's branchings and arborescences, respectively. Then two +new terms, *polyforest* and *polytree*, are defined to correspond to the other +convention's forest and tree. + +Summarizing:: + + +-----------------------------+ + | Convention A | Convention B | + +=============================+ + | forest | polyforest | + | tree | polytree | + | branching | forest | + | arborescence | tree | + +-----------------------------+ + +Each convention has its reasons. The first convention emphasizes definitional +similarity in that directed forests and trees are only concerned with +acyclicity and do not have an in-degree constraint, just as their undirected +counterparts do not. The second convention emphasizes functional similarity +in the sense that the directed analog of a spanning tree is a spanning +arborescence. That is, take any spanning tree and choose one node as the root. +Then every edge is assigned a direction such there is a directed path from the +root to every other node. The result is a spanning arborescence. + +NetworkX follows convention "A". Explicitly, these are: + +undirected forest + An undirected graph with no undirected cycles. + +undirected tree + A connected, undirected forest. + +directed forest + A directed graph with no undirected cycles. Equivalently, the underlying + graph structure (which ignores edge orientations) is an undirected forest. + In convention B, this is known as a polyforest. + +directed tree + A weakly connected, directed forest. Equivalently, the underlying graph + structure (which ignores edge orientations) is an undirected tree. In + convention B, this is known as a polytree. + +branching + A directed forest with each node having, at most, one parent. So the maximum + in-degree is equal to 1. In convention B, this is known as a forest. + +arborescence + A directed tree with each node having, at most, one parent. So the maximum + in-degree is equal to 1. In convention B, this is known as a tree. + +For trees and arborescences, the adjective "spanning" may be added to designate +that the graph, when considered as a forest/branching, consists of a single +tree/arborescence that includes all nodes in the graph. It is true, by +definition, that every tree/arborescence is spanning with respect to the nodes +that define the tree/arborescence and so, it might seem redundant to introduce +the notion of "spanning". However, the nodes may represent a subset of +nodes from a larger graph, and it is in this context that the term "spanning" +becomes a useful notion. + +""" + +import networkx as nx + +__all__ = ["is_arborescence", "is_branching", "is_forest", "is_tree"] + + +@nx.utils.not_implemented_for("undirected") +@nx._dispatch +def is_arborescence(G): + """ + Returns True if `G` is an arborescence. + + An arborescence is a directed tree with maximum in-degree equal to 1. + + Parameters + ---------- + G : graph + The graph to test. + + Returns + ------- + b : bool + A boolean that is True if `G` is an arborescence. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (0, 2), (2, 3), (3, 4)]) + >>> nx.is_arborescence(G) + True + >>> G.remove_edge(0, 1) + >>> G.add_edge(1, 2) # maximum in-degree is 2 + >>> nx.is_arborescence(G) + False + + Notes + ----- + In another convention, an arborescence is known as a *tree*. + + See Also + -------- + is_tree + + """ + return is_tree(G) and max(d for n, d in G.in_degree()) <= 1 + + +@nx.utils.not_implemented_for("undirected") +@nx._dispatch +def is_branching(G): + """ + Returns True if `G` is a branching. + + A branching is a directed forest with maximum in-degree equal to 1. + + Parameters + ---------- + G : directed graph + The directed graph to test. + + Returns + ------- + b : bool + A boolean that is True if `G` is a branching. + + Examples + -------- + >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 4)]) + >>> nx.is_branching(G) + True + >>> G.remove_edge(2, 3) + >>> G.add_edge(3, 1) # maximum in-degree is 2 + >>> nx.is_branching(G) + False + + Notes + ----- + In another convention, a branching is also known as a *forest*. + + See Also + -------- + is_forest + + """ + return is_forest(G) and max(d for n, d in G.in_degree()) <= 1 + + +@nx._dispatch +def is_forest(G): + """ + Returns True if `G` is a forest. + + A forest is a graph with no undirected cycles. + + For directed graphs, `G` is a forest if the underlying graph is a forest. + The underlying graph is obtained by treating each directed edge as a single + undirected edge in a multigraph. + + Parameters + ---------- + G : graph + The graph to test. + + Returns + ------- + b : bool + A boolean that is True if `G` is a forest. + + Raises + ------ + NetworkXPointlessConcept + If `G` is empty. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from([(1, 2), (1, 3), (2, 4), (2, 5)]) + >>> nx.is_forest(G) + True + >>> G.add_edge(4, 1) + >>> nx.is_forest(G) + False + + Notes + ----- + In another convention, a directed forest is known as a *polyforest* and + then *forest* corresponds to a *branching*. + + See Also + -------- + is_branching + + """ + if len(G) == 0: + raise nx.exception.NetworkXPointlessConcept("G has no nodes.") + + if G.is_directed(): + components = (G.subgraph(c) for c in nx.weakly_connected_components(G)) + else: + components = (G.subgraph(c) for c in nx.connected_components(G)) + + return all(len(c) - 1 == c.number_of_edges() for c in components) + + +@nx._dispatch +def is_tree(G): + """ + Returns True if `G` is a tree. + + A tree is a connected graph with no undirected cycles. + + For directed graphs, `G` is a tree if the underlying graph is a tree. The + underlying graph is obtained by treating each directed edge as a single + undirected edge in a multigraph. + + Parameters + ---------- + G : graph + The graph to test. + + Returns + ------- + b : bool + A boolean that is True if `G` is a tree. + + Raises + ------ + NetworkXPointlessConcept + If `G` is empty. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from([(1, 2), (1, 3), (2, 4), (2, 5)]) + >>> nx.is_tree(G) # n-1 edges + True + >>> G.add_edge(3, 4) + >>> nx.is_tree(G) # n edges + False + + Notes + ----- + In another convention, a directed tree is known as a *polytree* and then + *tree* corresponds to an *arborescence*. + + See Also + -------- + is_arborescence + + """ + if len(G) == 0: + raise nx.exception.NetworkXPointlessConcept("G has no nodes.") + + if G.is_directed(): + is_connected = nx.is_weakly_connected + else: + is_connected = nx.is_connected + + # A connected graph with no cycles has n-1 edges. + return len(G) - 1 == G.number_of_edges() and is_connected(G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__init__.py b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11ac9b6cfbd53a69c718d8dc4a21362cb5d87731 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__pycache__/test_branchings.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__pycache__/test_branchings.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..245ff26ba8a87ecdf7f9cc726ff36312e03d08fc Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__pycache__/test_branchings.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__pycache__/test_coding.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__pycache__/test_coding.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7554a51018f8566522ee8dd26fbe1e1985acb780 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__pycache__/test_coding.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__pycache__/test_decomposition.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__pycache__/test_decomposition.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e95629e5d86c8fcae038d3f30ec01aa35e9f782 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__pycache__/test_decomposition.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__pycache__/test_mst.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__pycache__/test_mst.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45427a9925ec7402ef1f6ff9a8774b8f50975b13 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__pycache__/test_mst.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__pycache__/test_operations.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__pycache__/test_operations.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9242bb5fb7b4feaaec119123a9fd103cea427108 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__pycache__/test_operations.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__pycache__/test_recognition.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__pycache__/test_recognition.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57de3e1e7218704915681693fe929a064097cfd7 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/__pycache__/test_recognition.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/test_branchings.py b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/test_branchings.py new file mode 100644 index 0000000000000000000000000000000000000000..ad1cc33b9a494577638dc34b45736f9f3d1a23fe --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/test_branchings.py @@ -0,0 +1,632 @@ +import math +from operator import itemgetter + +import pytest + +np = pytest.importorskip("numpy") + +import networkx as nx +from networkx.algorithms.tree import branchings, recognition + +# +# Explicitly discussed examples from Edmonds paper. +# + +# Used in Figures A-F. +# +# fmt: off +G_array = np.array([ + # 0 1 2 3 4 5 6 7 8 + [0, 0, 12, 0, 12, 0, 0, 0, 0], # 0 + [4, 0, 0, 0, 0, 13, 0, 0, 0], # 1 + [0, 17, 0, 21, 0, 12, 0, 0, 0], # 2 + [5, 0, 0, 0, 17, 0, 18, 0, 0], # 3 + [0, 0, 0, 0, 0, 0, 0, 12, 0], # 4 + [0, 0, 0, 0, 0, 0, 14, 0, 12], # 5 + [0, 0, 21, 0, 0, 0, 0, 0, 15], # 6 + [0, 0, 0, 19, 0, 0, 15, 0, 0], # 7 + [0, 0, 0, 0, 0, 0, 0, 18, 0], # 8 +], dtype=int) + +# Two copies of the graph from the original paper as disconnected components +G_big_array = np.zeros(np.array(G_array.shape) * 2, dtype=int) +G_big_array[:G_array.shape[0], :G_array.shape[1]] = G_array +G_big_array[G_array.shape[0]:, G_array.shape[1]:] = G_array + +# fmt: on + + +def G1(): + G = nx.from_numpy_array(G_array, create_using=nx.MultiDiGraph) + return G + + +def G2(): + # Now we shift all the weights by -10. + # Should not affect optimal arborescence, but does affect optimal branching. + Garr = G_array.copy() + Garr[np.nonzero(Garr)] -= 10 + G = nx.from_numpy_array(Garr, create_using=nx.MultiDiGraph) + return G + + +# An optimal branching for G1 that is also a spanning arborescence. So it is +# also an optimal spanning arborescence. +# +optimal_arborescence_1 = [ + (0, 2, 12), + (2, 1, 17), + (2, 3, 21), + (1, 5, 13), + (3, 4, 17), + (3, 6, 18), + (6, 8, 15), + (8, 7, 18), +] + +# For G2, the optimal branching of G1 (with shifted weights) is no longer +# an optimal branching, but it is still an optimal spanning arborescence +# (just with shifted weights). An optimal branching for G2 is similar to what +# appears in figure G (this is greedy_subopt_branching_1a below), but with the +# edge (3, 0, 5), which is now (3, 0, -5), removed. Thus, the optimal branching +# is not a spanning arborescence. The code finds optimal_branching_2a. +# An alternative and equivalent branching is optimal_branching_2b. We would +# need to modify the code to iterate through all equivalent optimal branchings. +# +# These are maximal branchings or arborescences. +optimal_branching_2a = [ + (5, 6, 4), + (6, 2, 11), + (6, 8, 5), + (8, 7, 8), + (2, 1, 7), + (2, 3, 11), + (3, 4, 7), +] +optimal_branching_2b = [ + (8, 7, 8), + (7, 3, 9), + (3, 4, 7), + (3, 6, 8), + (6, 2, 11), + (2, 1, 7), + (1, 5, 3), +] +optimal_arborescence_2 = [ + (0, 2, 2), + (2, 1, 7), + (2, 3, 11), + (1, 5, 3), + (3, 4, 7), + (3, 6, 8), + (6, 8, 5), + (8, 7, 8), +] + +# Two suboptimal maximal branchings on G1 obtained from a greedy algorithm. +# 1a matches what is shown in Figure G in Edmonds's paper. +greedy_subopt_branching_1a = [ + (5, 6, 14), + (6, 2, 21), + (6, 8, 15), + (8, 7, 18), + (2, 1, 17), + (2, 3, 21), + (3, 0, 5), + (3, 4, 17), +] +greedy_subopt_branching_1b = [ + (8, 7, 18), + (7, 6, 15), + (6, 2, 21), + (2, 1, 17), + (2, 3, 21), + (1, 5, 13), + (3, 0, 5), + (3, 4, 17), +] + + +def build_branching(edges, double=False): + G = nx.DiGraph() + for u, v, weight in edges: + G.add_edge(u, v, weight=weight) + if double: + G.add_edge(u + 9, v + 9, weight=weight) + return G + + +def sorted_edges(G, attr="weight", default=1): + edges = [(u, v, data.get(attr, default)) for (u, v, data) in G.edges(data=True)] + edges = sorted(edges, key=lambda x: (x[2], x[1], x[0])) + return edges + + +def assert_equal_branchings(G1, G2, attr="weight", default=1): + edges1 = list(G1.edges(data=True)) + edges2 = list(G2.edges(data=True)) + assert len(edges1) == len(edges2) + + # Grab the weights only. + e1 = sorted_edges(G1, attr, default) + e2 = sorted_edges(G2, attr, default) + + for a, b in zip(e1, e2): + assert a[:2] == b[:2] + np.testing.assert_almost_equal(a[2], b[2]) + + +################ + + +def test_optimal_branching1(): + G = build_branching(optimal_arborescence_1) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 131 + + +def test_optimal_branching2a(): + G = build_branching(optimal_branching_2a) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 53 + + +def test_optimal_branching2b(): + G = build_branching(optimal_branching_2b) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 53 + + +def test_optimal_arborescence2(): + G = build_branching(optimal_arborescence_2) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 51 + + +def test_greedy_suboptimal_branching1a(): + G = build_branching(greedy_subopt_branching_1a) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 128 + + +def test_greedy_suboptimal_branching1b(): + G = build_branching(greedy_subopt_branching_1b) + assert recognition.is_arborescence(G), True + assert branchings.branching_weight(G) == 127 + + +def test_greedy_max1(): + # Standard test. + # + G = G1() + B = branchings.greedy_branching(G) + # There are only two possible greedy branchings. The sorting is such + # that it should equal the second suboptimal branching: 1b. + B_ = build_branching(greedy_subopt_branching_1b) + assert_equal_branchings(B, B_) + + +def test_greedy_branching_kwarg_kind(): + G = G1() + with pytest.raises(nx.NetworkXException, match="Unknown value for `kind`."): + B = branchings.greedy_branching(G, kind="lol") + + +def test_greedy_branching_for_unsortable_nodes(): + G = nx.DiGraph() + G.add_weighted_edges_from([((2, 3), 5, 1), (3, "a", 1), (2, 4, 5)]) + edges = [(u, v, data.get("weight", 1)) for (u, v, data) in G.edges(data=True)] + with pytest.raises(TypeError): + edges.sort(key=itemgetter(2, 0, 1), reverse=True) + B = branchings.greedy_branching(G, kind="max").edges(data=True) + assert list(B) == [ + ((2, 3), 5, {"weight": 1}), + (3, "a", {"weight": 1}), + (2, 4, {"weight": 5}), + ] + + +def test_greedy_max2(): + # Different default weight. + # + G = G1() + del G[1][0][0]["weight"] + B = branchings.greedy_branching(G, default=6) + # Chosen so that edge (3,0,5) is not selected and (1,0,6) is instead. + + edges = [ + (1, 0, 6), + (1, 5, 13), + (7, 6, 15), + (2, 1, 17), + (3, 4, 17), + (8, 7, 18), + (2, 3, 21), + (6, 2, 21), + ] + B_ = build_branching(edges) + assert_equal_branchings(B, B_) + + +def test_greedy_max3(): + # All equal weights. + # + G = G1() + B = branchings.greedy_branching(G, attr=None) + + # This is mostly arbitrary...the output was generated by running the algo. + edges = [ + (2, 1, 1), + (3, 0, 1), + (3, 4, 1), + (5, 8, 1), + (6, 2, 1), + (7, 3, 1), + (7, 6, 1), + (8, 7, 1), + ] + B_ = build_branching(edges) + assert_equal_branchings(B, B_, default=1) + + +def test_greedy_min(): + G = G1() + B = branchings.greedy_branching(G, kind="min") + + edges = [ + (1, 0, 4), + (0, 2, 12), + (0, 4, 12), + (2, 5, 12), + (4, 7, 12), + (5, 8, 12), + (5, 6, 14), + (7, 3, 19), + ] + B_ = build_branching(edges) + assert_equal_branchings(B, B_) + + +def test_edmonds1_maxbranch(): + G = G1() + x = branchings.maximum_branching(G) + x_ = build_branching(optimal_arborescence_1) + assert_equal_branchings(x, x_) + + +def test_edmonds1_maxarbor(): + G = G1() + x = branchings.maximum_spanning_arborescence(G) + x_ = build_branching(optimal_arborescence_1) + assert_equal_branchings(x, x_) + + +def test_edmonds1_minimal_branching(): + # graph will have something like a minimum arborescence but no spanning one + G = nx.from_numpy_array(G_big_array, create_using=nx.DiGraph) + B = branchings.minimal_branching(G) + edges = [ + (3, 0, 5), + (0, 2, 12), + (0, 4, 12), + (2, 5, 12), + (4, 7, 12), + (5, 8, 12), + (5, 6, 14), + (2, 1, 17), + ] + B_ = build_branching(edges, double=True) + assert_equal_branchings(B, B_) + + +def test_edmonds2_maxbranch(): + G = G2() + x = branchings.maximum_branching(G) + x_ = build_branching(optimal_branching_2a) + assert_equal_branchings(x, x_) + + +def test_edmonds2_maxarbor(): + G = G2() + x = branchings.maximum_spanning_arborescence(G) + x_ = build_branching(optimal_arborescence_2) + assert_equal_branchings(x, x_) + + +def test_edmonds2_minarbor(): + G = G1() + x = branchings.minimum_spanning_arborescence(G) + # This was obtained from algorithm. Need to verify it independently. + # Branch weight is: 96 + edges = [ + (3, 0, 5), + (0, 2, 12), + (0, 4, 12), + (2, 5, 12), + (4, 7, 12), + (5, 8, 12), + (5, 6, 14), + (2, 1, 17), + ] + x_ = build_branching(edges) + assert_equal_branchings(x, x_) + + +def test_edmonds3_minbranch1(): + G = G1() + x = branchings.minimum_branching(G) + edges = [] + x_ = build_branching(edges) + assert_equal_branchings(x, x_) + + +def test_edmonds3_minbranch2(): + G = G1() + G.add_edge(8, 9, weight=-10) + x = branchings.minimum_branching(G) + edges = [(8, 9, -10)] + x_ = build_branching(edges) + assert_equal_branchings(x, x_) + + +# Need more tests + + +def test_mst(): + # Make sure we get the same results for undirected graphs. + # Example from: https://en.wikipedia.org/wiki/Kruskal's_algorithm + G = nx.Graph() + edgelist = [ + (0, 3, [("weight", 5)]), + (0, 1, [("weight", 7)]), + (1, 3, [("weight", 9)]), + (1, 2, [("weight", 8)]), + (1, 4, [("weight", 7)]), + (3, 4, [("weight", 15)]), + (3, 5, [("weight", 6)]), + (2, 4, [("weight", 5)]), + (4, 5, [("weight", 8)]), + (4, 6, [("weight", 9)]), + (5, 6, [("weight", 11)]), + ] + G.add_edges_from(edgelist) + G = G.to_directed() + x = branchings.minimum_spanning_arborescence(G) + + edges = [ + ({0, 1}, 7), + ({0, 3}, 5), + ({3, 5}, 6), + ({1, 4}, 7), + ({4, 2}, 5), + ({4, 6}, 9), + ] + + assert x.number_of_edges() == len(edges) + for u, v, d in x.edges(data=True): + assert ({u, v}, d["weight"]) in edges + + +def test_mixed_nodetypes(): + # Smoke test to make sure no TypeError is raised for mixed node types. + G = nx.Graph() + edgelist = [(0, 3, [("weight", 5)]), (0, "1", [("weight", 5)])] + G.add_edges_from(edgelist) + G = G.to_directed() + x = branchings.minimum_spanning_arborescence(G) + + +def test_edmonds1_minbranch(): + # Using -G_array and min should give the same as optimal_arborescence_1, + # but with all edges negative. + edges = [(u, v, -w) for (u, v, w) in optimal_arborescence_1] + + G = nx.from_numpy_array(-G_array, create_using=nx.DiGraph) + + # Quickly make sure max branching is empty. + x = branchings.maximum_branching(G) + x_ = build_branching([]) + assert_equal_branchings(x, x_) + + # Now test the min branching. + x = branchings.minimum_branching(G) + x_ = build_branching(edges) + assert_equal_branchings(x, x_) + + +def test_edge_attribute_preservation_normal_graph(): + # Test that edge attributes are preserved when finding an optimum graph + # using the Edmonds class for normal graphs. + G = nx.Graph() + + edgelist = [ + (0, 1, [("weight", 5), ("otherattr", 1), ("otherattr2", 3)]), + (0, 2, [("weight", 5), ("otherattr", 2), ("otherattr2", 2)]), + (1, 2, [("weight", 6), ("otherattr", 3), ("otherattr2", 1)]), + ] + G.add_edges_from(edgelist) + + B = branchings.maximum_branching(G, preserve_attrs=True) + + assert B[0][1]["otherattr"] == 1 + assert B[0][1]["otherattr2"] == 3 + + +def test_edge_attribute_preservation_multigraph(): + # Test that edge attributes are preserved when finding an optimum graph + # using the Edmonds class for multigraphs. + G = nx.MultiGraph() + + edgelist = [ + (0, 1, [("weight", 5), ("otherattr", 1), ("otherattr2", 3)]), + (0, 2, [("weight", 5), ("otherattr", 2), ("otherattr2", 2)]), + (1, 2, [("weight", 6), ("otherattr", 3), ("otherattr2", 1)]), + ] + G.add_edges_from(edgelist * 2) # Make sure we have duplicate edge paths + + B = branchings.maximum_branching(G, preserve_attrs=True) + + assert B[0][1][0]["otherattr"] == 1 + assert B[0][1][0]["otherattr2"] == 3 + + +# TODO remove with Edmonds +def test_Edmond_kind(): + G = nx.MultiGraph() + + edgelist = [ + (0, 1, [("weight", 5), ("otherattr", 1), ("otherattr2", 3)]), + (0, 2, [("weight", 5), ("otherattr", 2), ("otherattr2", 2)]), + (1, 2, [("weight", 6), ("otherattr", 3), ("otherattr2", 1)]), + ] + G.add_edges_from(edgelist * 2) # Make sure we have duplicate edge paths + ed = branchings.Edmonds(G) + with pytest.raises(nx.NetworkXException, match="Unknown value for `kind`."): + ed.find_optimum(kind="lol", preserve_attrs=True) + + +# TODO remove with MultiDiGraph_EdgeKey +def test_MultiDiGraph_EdgeKey(): + # test if more than one edges has the same key + G = branchings.MultiDiGraph_EdgeKey() + G.add_edge(1, 2, "A") + with pytest.raises(Exception, match="Key 'A' is already in use."): + G.add_edge(3, 4, "A") + # test if invalid edge key was specified + with pytest.raises(KeyError, match="Invalid edge key 'B'"): + G.remove_edge_with_key("B") + # test remove_edge_with_key works + if G.remove_edge_with_key("A"): + assert list(G.edges(data=True)) == [] + # test that remove_edges_from doesn't work + G.add_edge(1, 3, "A") + with pytest.raises(NotImplementedError): + G.remove_edges_from([(1, 3)]) + + +def test_edge_attribute_discard(): + # Test that edge attributes are discarded if we do not specify to keep them + G = nx.Graph() + + edgelist = [ + (0, 1, [("weight", 5), ("otherattr", 1), ("otherattr2", 3)]), + (0, 2, [("weight", 5), ("otherattr", 2), ("otherattr2", 2)]), + (1, 2, [("weight", 6), ("otherattr", 3), ("otherattr2", 1)]), + ] + G.add_edges_from(edgelist) + + B = branchings.maximum_branching(G, preserve_attrs=False) + + edge_dict = B[0][1] + with pytest.raises(KeyError): + _ = edge_dict["otherattr"] + + +def test_partition_spanning_arborescence(): + """ + Test that we can generate minimum spanning arborescences which respect the + given partition. + """ + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + G[3][0]["partition"] = nx.EdgePartition.EXCLUDED + G[2][3]["partition"] = nx.EdgePartition.INCLUDED + G[7][3]["partition"] = nx.EdgePartition.EXCLUDED + G[0][2]["partition"] = nx.EdgePartition.EXCLUDED + G[6][2]["partition"] = nx.EdgePartition.INCLUDED + + actual_edges = [ + (0, 4, 12), + (1, 0, 4), + (1, 5, 13), + (2, 3, 21), + (4, 7, 12), + (5, 6, 14), + (5, 8, 12), + (6, 2, 21), + ] + + B = branchings.minimum_spanning_arborescence(G, partition="partition") + assert_equal_branchings(build_branching(actual_edges), B) + + +def test_arborescence_iterator_min(): + """ + Tests the arborescence iterator. + + A brute force method found 680 arborescences in this graph. + This test will not verify all of them individually, but will check two + things + + * The iterator returns 680 arborescences + * The weight of the arborescences is non-strictly increasing + + for more information please visit + https://mjschwenne.github.io/2021/06/10/implementing-the-iterators.html + """ + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + + arborescence_count = 0 + arborescence_weight = -math.inf + for B in branchings.ArborescenceIterator(G): + arborescence_count += 1 + new_arborescence_weight = B.size(weight="weight") + assert new_arborescence_weight >= arborescence_weight + arborescence_weight = new_arborescence_weight + + assert arborescence_count == 680 + + +def test_arborescence_iterator_max(): + """ + Tests the arborescence iterator. + + A brute force method found 680 arborescences in this graph. + This test will not verify all of them individually, but will check two + things + + * The iterator returns 680 arborescences + * The weight of the arborescences is non-strictly decreasing + + for more information please visit + https://mjschwenne.github.io/2021/06/10/implementing-the-iterators.html + """ + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + + arborescence_count = 0 + arborescence_weight = math.inf + for B in branchings.ArborescenceIterator(G, minimum=False): + arborescence_count += 1 + new_arborescence_weight = B.size(weight="weight") + assert new_arborescence_weight <= arborescence_weight + arborescence_weight = new_arborescence_weight + + assert arborescence_count == 680 + + +def test_arborescence_iterator_initial_partition(): + """ + Tests the arborescence iterator with three included edges and three excluded + in the initial partition. + + A brute force method similar to the one used in the above tests found that + there are 16 arborescences which contain the included edges and not the + excluded edges. + """ + G = nx.from_numpy_array(G_array, create_using=nx.DiGraph) + included_edges = [(1, 0), (5, 6), (8, 7)] + excluded_edges = [(0, 2), (3, 6), (1, 5)] + + arborescence_count = 0 + arborescence_weight = -math.inf + for B in branchings.ArborescenceIterator( + G, init_partition=(included_edges, excluded_edges) + ): + arborescence_count += 1 + new_arborescence_weight = B.size(weight="weight") + assert new_arborescence_weight >= arborescence_weight + arborescence_weight = new_arborescence_weight + for e in included_edges: + assert e in B.edges + for e in excluded_edges: + assert e not in B.edges + assert arborescence_count == 16 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/test_coding.py b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/test_coding.py new file mode 100644 index 0000000000000000000000000000000000000000..c695fea5fdab4f09f79c69f8837bb07f65d16540 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/test_coding.py @@ -0,0 +1,113 @@ +"""Unit tests for the :mod:`~networkx.algorithms.tree.coding` module.""" +from itertools import product + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +class TestPruferSequence: + """Unit tests for the Prüfer sequence encoding and decoding + functions. + + """ + + def test_nontree(self): + with pytest.raises(nx.NotATree): + G = nx.cycle_graph(3) + nx.to_prufer_sequence(G) + + def test_null_graph(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.to_prufer_sequence(nx.null_graph()) + + def test_trivial_graph(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.to_prufer_sequence(nx.trivial_graph()) + + def test_bad_integer_labels(self): + with pytest.raises(KeyError): + T = nx.Graph(nx.utils.pairwise("abc")) + nx.to_prufer_sequence(T) + + def test_encoding(self): + """Tests for encoding a tree as a Prüfer sequence using the + iterative strategy. + + """ + # Example from Wikipedia. + tree = nx.Graph([(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)]) + sequence = nx.to_prufer_sequence(tree) + assert sequence == [3, 3, 3, 4] + + def test_decoding(self): + """Tests for decoding a tree from a Prüfer sequence.""" + # Example from Wikipedia. + sequence = [3, 3, 3, 4] + tree = nx.from_prufer_sequence(sequence) + assert nodes_equal(list(tree), list(range(6))) + edges = [(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)] + assert edges_equal(list(tree.edges()), edges) + + def test_decoding2(self): + # Example from "An Optimal Algorithm for Prufer Codes". + sequence = [2, 4, 0, 1, 3, 3] + tree = nx.from_prufer_sequence(sequence) + assert nodes_equal(list(tree), list(range(8))) + edges = [(0, 1), (0, 4), (1, 3), (2, 4), (2, 5), (3, 6), (3, 7)] + assert edges_equal(list(tree.edges()), edges) + + def test_inverse(self): + """Tests that the encoding and decoding functions are inverses.""" + for T in nx.nonisomorphic_trees(4): + T2 = nx.from_prufer_sequence(nx.to_prufer_sequence(T)) + assert nodes_equal(list(T), list(T2)) + assert edges_equal(list(T.edges()), list(T2.edges())) + + for seq in product(range(4), repeat=2): + seq2 = nx.to_prufer_sequence(nx.from_prufer_sequence(seq)) + assert list(seq) == seq2 + + +class TestNestedTuple: + """Unit tests for the nested tuple encoding and decoding functions.""" + + def test_nontree(self): + with pytest.raises(nx.NotATree): + G = nx.cycle_graph(3) + nx.to_nested_tuple(G, 0) + + def test_unknown_root(self): + with pytest.raises(nx.NodeNotFound): + G = nx.path_graph(2) + nx.to_nested_tuple(G, "bogus") + + def test_encoding(self): + T = nx.full_rary_tree(2, 2**3 - 1) + expected = (((), ()), ((), ())) + actual = nx.to_nested_tuple(T, 0) + assert nodes_equal(expected, actual) + + def test_canonical_form(self): + T = nx.Graph() + T.add_edges_from([(0, 1), (0, 2), (0, 3)]) + T.add_edges_from([(1, 4), (1, 5)]) + T.add_edges_from([(3, 6), (3, 7)]) + root = 0 + actual = nx.to_nested_tuple(T, root, canonical_form=True) + expected = ((), ((), ()), ((), ())) + assert actual == expected + + def test_decoding(self): + balanced = (((), ()), ((), ())) + expected = nx.full_rary_tree(2, 2**3 - 1) + actual = nx.from_nested_tuple(balanced) + assert nx.is_isomorphic(expected, actual) + + def test_sensible_relabeling(self): + balanced = (((), ()), ((), ())) + T = nx.from_nested_tuple(balanced, sensible_relabeling=True) + edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)] + assert nodes_equal(list(T), list(range(2**3 - 1))) + assert edges_equal(list(T.edges()), edges) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/test_decomposition.py b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/test_decomposition.py new file mode 100644 index 0000000000000000000000000000000000000000..8c376053794537611f46c038ed074eb92b1ba676 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/test_decomposition.py @@ -0,0 +1,79 @@ +import networkx as nx +from networkx.algorithms.tree.decomposition import junction_tree + + +def test_junction_tree_directed_confounders(): + B = nx.DiGraph() + B.add_edges_from([("A", "C"), ("B", "C"), ("C", "D"), ("C", "E")]) + + G = junction_tree(B) + J = nx.Graph() + J.add_edges_from( + [ + (("C", "E"), ("C",)), + (("C",), ("A", "B", "C")), + (("A", "B", "C"), ("C",)), + (("C",), ("C", "D")), + ] + ) + + assert nx.is_isomorphic(G, J) + + +def test_junction_tree_directed_unconnected_nodes(): + B = nx.DiGraph() + B.add_nodes_from([("A", "B", "C", "D")]) + G = junction_tree(B) + + J = nx.Graph() + J.add_nodes_from([("A", "B", "C", "D")]) + + assert nx.is_isomorphic(G, J) + + +def test_junction_tree_directed_cascade(): + B = nx.DiGraph() + B.add_edges_from([("A", "B"), ("B", "C"), ("C", "D")]) + G = junction_tree(B) + + J = nx.Graph() + J.add_edges_from( + [ + (("A", "B"), ("B",)), + (("B",), ("B", "C")), + (("B", "C"), ("C",)), + (("C",), ("C", "D")), + ] + ) + assert nx.is_isomorphic(G, J) + + +def test_junction_tree_directed_unconnected_edges(): + B = nx.DiGraph() + B.add_edges_from([("A", "B"), ("C", "D"), ("E", "F")]) + G = junction_tree(B) + + J = nx.Graph() + J.add_nodes_from([("A", "B"), ("C", "D"), ("E", "F")]) + + assert nx.is_isomorphic(G, J) + + +def test_junction_tree_undirected(): + B = nx.Graph() + B.add_edges_from([("A", "C"), ("A", "D"), ("B", "C"), ("C", "E")]) + G = junction_tree(B) + + J = nx.Graph() + J.add_edges_from( + [ + (("A", "D"), ("A",)), + (("A",), ("A", "C")), + (("A", "C"), ("C",)), + (("C",), ("B", "C")), + (("B", "C"), ("C",)), + (("C",), ("C", "E")), + ] + ) + + assert nx.is_isomorphic(G, J) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/test_mst.py b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/test_mst.py new file mode 100644 index 0000000000000000000000000000000000000000..373f16cf7a016f1ed56ba2826745783568f20b5e --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/test_mst.py @@ -0,0 +1,708 @@ +"""Unit tests for the :mod:`networkx.algorithms.tree.mst` module.""" + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +def test_unknown_algorithm(): + with pytest.raises(ValueError): + nx.minimum_spanning_tree(nx.Graph(), algorithm="random") + with pytest.raises( + ValueError, match="random is not a valid choice for an algorithm." + ): + nx.maximum_spanning_edges(nx.Graph(), algorithm="random") + + +class MinimumSpanningTreeTestBase: + """Base class for test classes for minimum spanning tree algorithms. + This class contains some common tests that will be inherited by + subclasses. Each subclass must have a class attribute + :data:`algorithm` that is a string representing the algorithm to + run, as described under the ``algorithm`` keyword argument for the + :func:`networkx.minimum_spanning_edges` function. Subclasses can + then implement any algorithm-specific tests. + """ + + def setup_method(self, method): + """Creates an example graph and stores the expected minimum and + maximum spanning tree edges. + """ + # This stores the class attribute `algorithm` in an instance attribute. + self.algo = self.algorithm + # This example graph comes from Wikipedia: + # https://en.wikipedia.org/wiki/Kruskal's_algorithm + edges = [ + (0, 1, 7), + (0, 3, 5), + (1, 2, 8), + (1, 3, 9), + (1, 4, 7), + (2, 4, 5), + (3, 4, 15), + (3, 5, 6), + (4, 5, 8), + (4, 6, 9), + (5, 6, 11), + ] + self.G = nx.Graph() + self.G.add_weighted_edges_from(edges) + self.minimum_spanning_edgelist = [ + (0, 1, {"weight": 7}), + (0, 3, {"weight": 5}), + (1, 4, {"weight": 7}), + (2, 4, {"weight": 5}), + (3, 5, {"weight": 6}), + (4, 6, {"weight": 9}), + ] + self.maximum_spanning_edgelist = [ + (0, 1, {"weight": 7}), + (1, 2, {"weight": 8}), + (1, 3, {"weight": 9}), + (3, 4, {"weight": 15}), + (4, 6, {"weight": 9}), + (5, 6, {"weight": 11}), + ] + + def test_minimum_edges(self): + edges = nx.minimum_spanning_edges(self.G, algorithm=self.algo) + # Edges from the spanning edges functions don't come in sorted + # orientation, so we need to sort each edge individually. + actual = sorted((min(u, v), max(u, v), d) for u, v, d in edges) + assert edges_equal(actual, self.minimum_spanning_edgelist) + + def test_maximum_edges(self): + edges = nx.maximum_spanning_edges(self.G, algorithm=self.algo) + # Edges from the spanning edges functions don't come in sorted + # orientation, so we need to sort each edge individually. + actual = sorted((min(u, v), max(u, v), d) for u, v, d in edges) + assert edges_equal(actual, self.maximum_spanning_edgelist) + + def test_without_data(self): + edges = nx.minimum_spanning_edges(self.G, algorithm=self.algo, data=False) + # Edges from the spanning edges functions don't come in sorted + # orientation, so we need to sort each edge individually. + actual = sorted((min(u, v), max(u, v)) for u, v in edges) + expected = [(u, v) for u, v, d in self.minimum_spanning_edgelist] + assert edges_equal(actual, expected) + + def test_nan_weights(self): + # Edge weights NaN never appear in the spanning tree. see #2164 + G = self.G + G.add_edge(0, 12, weight=float("nan")) + edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, data=False, ignore_nan=True + ) + actual = sorted((min(u, v), max(u, v)) for u, v in edges) + expected = [(u, v) for u, v, d in self.minimum_spanning_edgelist] + assert edges_equal(actual, expected) + # Now test for raising exception + edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, data=False, ignore_nan=False + ) + with pytest.raises(ValueError): + list(edges) + # test default for ignore_nan as False + edges = nx.minimum_spanning_edges(G, algorithm=self.algo, data=False) + with pytest.raises(ValueError): + list(edges) + + def test_nan_weights_MultiGraph(self): + G = nx.MultiGraph() + G.add_edge(0, 12, weight=float("nan")) + edges = nx.minimum_spanning_edges( + G, algorithm="prim", data=False, ignore_nan=False + ) + with pytest.raises(ValueError): + list(edges) + # test default for ignore_nan as False + edges = nx.minimum_spanning_edges(G, algorithm="prim", data=False) + with pytest.raises(ValueError): + list(edges) + + def test_nan_weights_order(self): + # now try again with a nan edge at the beginning of G.nodes + edges = [ + (0, 1, 7), + (0, 3, 5), + (1, 2, 8), + (1, 3, 9), + (1, 4, 7), + (2, 4, 5), + (3, 4, 15), + (3, 5, 6), + (4, 5, 8), + (4, 6, 9), + (5, 6, 11), + ] + G = nx.Graph() + G.add_weighted_edges_from([(u + 1, v + 1, wt) for u, v, wt in edges]) + G.add_edge(0, 7, weight=float("nan")) + edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, data=False, ignore_nan=True + ) + actual = sorted((min(u, v), max(u, v)) for u, v in edges) + shift = [(u + 1, v + 1) for u, v, d in self.minimum_spanning_edgelist] + assert edges_equal(actual, shift) + + def test_isolated_node(self): + # now try again with an isolated node + edges = [ + (0, 1, 7), + (0, 3, 5), + (1, 2, 8), + (1, 3, 9), + (1, 4, 7), + (2, 4, 5), + (3, 4, 15), + (3, 5, 6), + (4, 5, 8), + (4, 6, 9), + (5, 6, 11), + ] + G = nx.Graph() + G.add_weighted_edges_from([(u + 1, v + 1, wt) for u, v, wt in edges]) + G.add_node(0) + edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, data=False, ignore_nan=True + ) + actual = sorted((min(u, v), max(u, v)) for u, v in edges) + shift = [(u + 1, v + 1) for u, v, d in self.minimum_spanning_edgelist] + assert edges_equal(actual, shift) + + def test_minimum_tree(self): + T = nx.minimum_spanning_tree(self.G, algorithm=self.algo) + actual = sorted(T.edges(data=True)) + assert edges_equal(actual, self.minimum_spanning_edgelist) + + def test_maximum_tree(self): + T = nx.maximum_spanning_tree(self.G, algorithm=self.algo) + actual = sorted(T.edges(data=True)) + assert edges_equal(actual, self.maximum_spanning_edgelist) + + def test_disconnected(self): + G = nx.Graph([(0, 1, {"weight": 1}), (2, 3, {"weight": 2})]) + T = nx.minimum_spanning_tree(G, algorithm=self.algo) + assert nodes_equal(list(T), list(range(4))) + assert edges_equal(list(T.edges()), [(0, 1), (2, 3)]) + + def test_empty_graph(self): + G = nx.empty_graph(3) + T = nx.minimum_spanning_tree(G, algorithm=self.algo) + assert nodes_equal(sorted(T), list(range(3))) + assert T.number_of_edges() == 0 + + def test_attributes(self): + G = nx.Graph() + G.add_edge(1, 2, weight=1, color="red", distance=7) + G.add_edge(2, 3, weight=1, color="green", distance=2) + G.add_edge(1, 3, weight=10, color="blue", distance=1) + G.graph["foo"] = "bar" + T = nx.minimum_spanning_tree(G, algorithm=self.algo) + assert T.graph == G.graph + assert nodes_equal(T, G) + for u, v in T.edges(): + assert T.adj[u][v] == G.adj[u][v] + + def test_weight_attribute(self): + G = nx.Graph() + G.add_edge(0, 1, weight=1, distance=7) + G.add_edge(0, 2, weight=30, distance=1) + G.add_edge(1, 2, weight=1, distance=1) + G.add_node(3) + T = nx.minimum_spanning_tree(G, algorithm=self.algo, weight="distance") + assert nodes_equal(sorted(T), list(range(4))) + assert edges_equal(sorted(T.edges()), [(0, 2), (1, 2)]) + T = nx.maximum_spanning_tree(G, algorithm=self.algo, weight="distance") + assert nodes_equal(sorted(T), list(range(4))) + assert edges_equal(sorted(T.edges()), [(0, 1), (0, 2)]) + + +class TestBoruvka(MinimumSpanningTreeTestBase): + """Unit tests for computing a minimum (or maximum) spanning tree + using Borůvka's algorithm. + """ + + algorithm = "boruvka" + + def test_unicode_name(self): + """Tests that using a Unicode string can correctly indicate + Borůvka's algorithm. + """ + edges = nx.minimum_spanning_edges(self.G, algorithm="borůvka") + # Edges from the spanning edges functions don't come in sorted + # orientation, so we need to sort each edge individually. + actual = sorted((min(u, v), max(u, v), d) for u, v, d in edges) + assert edges_equal(actual, self.minimum_spanning_edgelist) + + +class MultigraphMSTTestBase(MinimumSpanningTreeTestBase): + # Abstract class + + def test_multigraph_keys_min(self): + """Tests that the minimum spanning edges of a multigraph + preserves edge keys. + """ + G = nx.MultiGraph() + G.add_edge(0, 1, key="a", weight=2) + G.add_edge(0, 1, key="b", weight=1) + min_edges = nx.minimum_spanning_edges + mst_edges = min_edges(G, algorithm=self.algo, data=False) + assert edges_equal([(0, 1, "b")], list(mst_edges)) + + def test_multigraph_keys_max(self): + """Tests that the maximum spanning edges of a multigraph + preserves edge keys. + """ + G = nx.MultiGraph() + G.add_edge(0, 1, key="a", weight=2) + G.add_edge(0, 1, key="b", weight=1) + max_edges = nx.maximum_spanning_edges + mst_edges = max_edges(G, algorithm=self.algo, data=False) + assert edges_equal([(0, 1, "a")], list(mst_edges)) + + +class TestKruskal(MultigraphMSTTestBase): + """Unit tests for computing a minimum (or maximum) spanning tree + using Kruskal's algorithm. + """ + + algorithm = "kruskal" + + def test_key_data_bool(self): + """Tests that the keys and data values are included in + MST edges based on whether keys and data parameters are + true or false""" + G = nx.MultiGraph() + G.add_edge(1, 2, key=1, weight=2) + G.add_edge(1, 2, key=2, weight=3) + G.add_edge(3, 2, key=1, weight=2) + G.add_edge(3, 1, key=1, weight=4) + + # keys are included and data is not included + mst_edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, keys=True, data=False + ) + assert edges_equal([(1, 2, 1), (2, 3, 1)], list(mst_edges)) + + # keys are not included and data is included + mst_edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, keys=False, data=True + ) + assert edges_equal( + [(1, 2, {"weight": 2}), (2, 3, {"weight": 2})], list(mst_edges) + ) + + # both keys and data are not included + mst_edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, keys=False, data=False + ) + assert edges_equal([(1, 2), (2, 3)], list(mst_edges)) + + # both keys and data are included + mst_edges = nx.minimum_spanning_edges( + G, algorithm=self.algo, keys=True, data=True + ) + assert edges_equal( + [(1, 2, 1, {"weight": 2}), (2, 3, 1, {"weight": 2})], list(mst_edges) + ) + + +class TestPrim(MultigraphMSTTestBase): + """Unit tests for computing a minimum (or maximum) spanning tree + using Prim's algorithm. + """ + + algorithm = "prim" + + def test_prim_mst_edges_simple_graph(self): + H = nx.Graph() + H.add_edge(1, 2, key=2, weight=3) + H.add_edge(3, 2, key=1, weight=2) + H.add_edge(3, 1, key=1, weight=4) + + mst_edges = nx.minimum_spanning_edges(H, algorithm=self.algo, ignore_nan=True) + assert edges_equal( + [(1, 2, {"key": 2, "weight": 3}), (2, 3, {"key": 1, "weight": 2})], + list(mst_edges), + ) + + def test_ignore_nan(self): + """Tests that the edges with NaN weights are ignored or + raise an Error based on ignore_nan is true or false""" + H = nx.MultiGraph() + H.add_edge(1, 2, key=1, weight=float("nan")) + H.add_edge(1, 2, key=2, weight=3) + H.add_edge(3, 2, key=1, weight=2) + H.add_edge(3, 1, key=1, weight=4) + + # NaN weight edges are ignored when ignore_nan=True + mst_edges = nx.minimum_spanning_edges(H, algorithm=self.algo, ignore_nan=True) + assert edges_equal( + [(1, 2, 2, {"weight": 3}), (2, 3, 1, {"weight": 2})], list(mst_edges) + ) + + # NaN weight edges raise Error when ignore_nan=False + with pytest.raises(ValueError): + list(nx.minimum_spanning_edges(H, algorithm=self.algo, ignore_nan=False)) + + def test_multigraph_keys_tree(self): + G = nx.MultiGraph() + G.add_edge(0, 1, key="a", weight=2) + G.add_edge(0, 1, key="b", weight=1) + T = nx.minimum_spanning_tree(G, algorithm=self.algo) + assert edges_equal([(0, 1, 1)], list(T.edges(data="weight"))) + + def test_multigraph_keys_tree_max(self): + G = nx.MultiGraph() + G.add_edge(0, 1, key="a", weight=2) + G.add_edge(0, 1, key="b", weight=1) + T = nx.maximum_spanning_tree(G, algorithm=self.algo) + assert edges_equal([(0, 1, 2)], list(T.edges(data="weight"))) + + +class TestSpanningTreeIterator: + """ + Tests the spanning tree iterator on the example graph in the 2005 Sörensen + and Janssens paper An Algorithm to Generate all Spanning Trees of a Graph in + Order of Increasing Cost + """ + + def setup_method(self): + # Original Graph + edges = [(0, 1, 5), (1, 2, 4), (1, 4, 6), (2, 3, 5), (2, 4, 7), (3, 4, 3)] + self.G = nx.Graph() + self.G.add_weighted_edges_from(edges) + # List of lists of spanning trees in increasing order + self.spanning_trees = [ + # 1, MST, cost = 17 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (2, 3, {"weight": 5}), + (3, 4, {"weight": 3}), + ], + # 2, cost = 18 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (1, 4, {"weight": 6}), + (3, 4, {"weight": 3}), + ], + # 3, cost = 19 + [ + (0, 1, {"weight": 5}), + (1, 4, {"weight": 6}), + (2, 3, {"weight": 5}), + (3, 4, {"weight": 3}), + ], + # 4, cost = 19 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (2, 4, {"weight": 7}), + (3, 4, {"weight": 3}), + ], + # 5, cost = 20 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (1, 4, {"weight": 6}), + (2, 3, {"weight": 5}), + ], + # 6, cost = 21 + [ + (0, 1, {"weight": 5}), + (1, 4, {"weight": 6}), + (2, 4, {"weight": 7}), + (3, 4, {"weight": 3}), + ], + # 7, cost = 21 + [ + (0, 1, {"weight": 5}), + (1, 2, {"weight": 4}), + (2, 3, {"weight": 5}), + (2, 4, {"weight": 7}), + ], + # 8, cost = 23 + [ + (0, 1, {"weight": 5}), + (1, 4, {"weight": 6}), + (2, 3, {"weight": 5}), + (2, 4, {"weight": 7}), + ], + ] + + def test_minimum_spanning_tree_iterator(self): + """ + Tests that the spanning trees are correctly returned in increasing order + """ + tree_index = 0 + for tree in nx.SpanningTreeIterator(self.G): + actual = sorted(tree.edges(data=True)) + assert edges_equal(actual, self.spanning_trees[tree_index]) + tree_index += 1 + + def test_maximum_spanning_tree_iterator(self): + """ + Tests that the spanning trees are correctly returned in decreasing order + """ + tree_index = 7 + for tree in nx.SpanningTreeIterator(self.G, minimum=False): + actual = sorted(tree.edges(data=True)) + assert edges_equal(actual, self.spanning_trees[tree_index]) + tree_index -= 1 + + +def test_random_spanning_tree_multiplicative_small(): + """ + Using a fixed seed, sample one tree for repeatability. + """ + from math import exp + + pytest.importorskip("scipy") + + gamma = { + (0, 1): -0.6383, + (0, 2): -0.6827, + (0, 5): 0, + (1, 2): -1.0781, + (1, 4): 0, + (2, 3): 0, + (5, 3): -0.2820, + (5, 4): -0.3327, + (4, 3): -0.9927, + } + + # The undirected support of gamma + G = nx.Graph() + for u, v in gamma: + G.add_edge(u, v, lambda_key=exp(gamma[(u, v)])) + + solution_edges = [(2, 3), (3, 4), (0, 5), (5, 4), (4, 1)] + solution = nx.Graph() + solution.add_edges_from(solution_edges) + + sampled_tree = nx.random_spanning_tree(G, "lambda_key", seed=42) + + assert nx.utils.edges_equal(solution.edges, sampled_tree.edges) + + +@pytest.mark.slow +def test_random_spanning_tree_multiplicative_large(): + """ + Sample many trees from the distribution created in the last test + """ + from math import exp + from random import Random + + pytest.importorskip("numpy") + stats = pytest.importorskip("scipy.stats") + + gamma = { + (0, 1): -0.6383, + (0, 2): -0.6827, + (0, 5): 0, + (1, 2): -1.0781, + (1, 4): 0, + (2, 3): 0, + (5, 3): -0.2820, + (5, 4): -0.3327, + (4, 3): -0.9927, + } + + # The undirected support of gamma + G = nx.Graph() + for u, v in gamma: + G.add_edge(u, v, lambda_key=exp(gamma[(u, v)])) + + # Find the multiplicative weight for each tree. + total_weight = 0 + tree_expected = {} + for t in nx.SpanningTreeIterator(G): + # Find the multiplicative weight of the spanning tree + weight = 1 + for u, v, d in t.edges(data="lambda_key"): + weight *= d + tree_expected[t] = weight + total_weight += weight + + # Assert that every tree has an entry in the expected distribution + assert len(tree_expected) == 75 + + # Set the sample size and then calculate the expected number of times we + # expect to see each tree. This test uses a near minimum sample size where + # the most unlikely tree has an expected frequency of 5.15. + # (Minimum required is 5) + # + # Here we also initialize the tree_actual dict so that we know the keys + # match between the two. We will later take advantage of the fact that since + # python 3.7 dict order is guaranteed so the expected and actual data will + # have the same order. + sample_size = 1200 + tree_actual = {} + for t in tree_expected: + tree_expected[t] = (tree_expected[t] / total_weight) * sample_size + tree_actual[t] = 0 + + # Sample the spanning trees + # + # Assert that they are actually trees and record which of the 75 trees we + # have sampled. + # + # For repeatability, we want to take advantage of the decorators in NetworkX + # to randomly sample the same sample each time. However, if we pass in a + # constant seed to sample_spanning_tree we will get the same tree each time. + # Instead, we can create our own random number generator with a fixed seed + # and pass those into sample_spanning_tree. + rng = Random(37) + for _ in range(sample_size): + sampled_tree = nx.random_spanning_tree(G, "lambda_key", seed=rng) + assert nx.is_tree(sampled_tree) + + for t in tree_expected: + if nx.utils.edges_equal(t.edges, sampled_tree.edges): + tree_actual[t] += 1 + break + + # Conduct a Chi squared test to see if the actual distribution matches the + # expected one at an alpha = 0.05 significance level. + # + # H_0: The distribution of trees in tree_actual matches the normalized product + # of the edge weights in the tree. + # + # H_a: The distribution of trees in tree_actual follows some other + # distribution of spanning trees. + _, p = stats.chisquare(list(tree_actual.values()), list(tree_expected.values())) + + # Assert that p is greater than the significance level so that we do not + # reject the null hypothesis + assert not p < 0.05 + + +def test_random_spanning_tree_additive_small(): + """ + Sample a single spanning tree from the additive method. + """ + pytest.importorskip("scipy") + + edges = { + (0, 1): 1, + (0, 2): 1, + (0, 5): 3, + (1, 2): 2, + (1, 4): 3, + (2, 3): 3, + (5, 3): 4, + (5, 4): 5, + (4, 3): 4, + } + + # Build the graph + G = nx.Graph() + for u, v in edges: + G.add_edge(u, v, weight=edges[(u, v)]) + + solution_edges = [(0, 2), (1, 2), (2, 3), (3, 4), (3, 5)] + solution = nx.Graph() + solution.add_edges_from(solution_edges) + + sampled_tree = nx.random_spanning_tree( + G, weight="weight", multiplicative=False, seed=37 + ) + + assert nx.utils.edges_equal(solution.edges, sampled_tree.edges) + + +@pytest.mark.slow +def test_random_spanning_tree_additive_large(): + """ + Sample many spanning trees from the additive method. + """ + from random import Random + + pytest.importorskip("numpy") + stats = pytest.importorskip("scipy.stats") + + edges = { + (0, 1): 1, + (0, 2): 1, + (0, 5): 3, + (1, 2): 2, + (1, 4): 3, + (2, 3): 3, + (5, 3): 4, + (5, 4): 5, + (4, 3): 4, + } + + # Build the graph + G = nx.Graph() + for u, v in edges: + G.add_edge(u, v, weight=edges[(u, v)]) + + # Find the additive weight for each tree. + total_weight = 0 + tree_expected = {} + for t in nx.SpanningTreeIterator(G): + # Find the multiplicative weight of the spanning tree + weight = 0 + for u, v, d in t.edges(data="weight"): + weight += d + tree_expected[t] = weight + total_weight += weight + + # Assert that every tree has an entry in the expected distribution + assert len(tree_expected) == 75 + + # Set the sample size and then calculate the expected number of times we + # expect to see each tree. This test uses a near minimum sample size where + # the most unlikely tree has an expected frequency of 5.07. + # (Minimum required is 5) + # + # Here we also initialize the tree_actual dict so that we know the keys + # match between the two. We will later take advantage of the fact that since + # python 3.7 dict order is guaranteed so the expected and actual data will + # have the same order. + sample_size = 500 + tree_actual = {} + for t in tree_expected: + tree_expected[t] = (tree_expected[t] / total_weight) * sample_size + tree_actual[t] = 0 + + # Sample the spanning trees + # + # Assert that they are actually trees and record which of the 75 trees we + # have sampled. + # + # For repeatability, we want to take advantage of the decorators in NetworkX + # to randomly sample the same sample each time. However, if we pass in a + # constant seed to sample_spanning_tree we will get the same tree each time. + # Instead, we can create our own random number generator with a fixed seed + # and pass those into sample_spanning_tree. + rng = Random(37) + for _ in range(sample_size): + sampled_tree = nx.random_spanning_tree( + G, "weight", multiplicative=False, seed=rng + ) + assert nx.is_tree(sampled_tree) + + for t in tree_expected: + if nx.utils.edges_equal(t.edges, sampled_tree.edges): + tree_actual[t] += 1 + break + + # Conduct a Chi squared test to see if the actual distribution matches the + # expected one at an alpha = 0.05 significance level. + # + # H_0: The distribution of trees in tree_actual matches the normalized product + # of the edge weights in the tree. + # + # H_a: The distribution of trees in tree_actual follows some other + # distribution of spanning trees. + _, p = stats.chisquare(list(tree_actual.values()), list(tree_expected.values())) + + # Assert that p is greater than the significance level so that we do not + # reject the null hypothesis + assert not p < 0.05 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/test_operations.py b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/test_operations.py new file mode 100644 index 0000000000000000000000000000000000000000..284d94e2e5059de267b5ea47f6012a42c6ac4639 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/test_operations.py @@ -0,0 +1,53 @@ +from itertools import chain + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +def _check_custom_label_attribute(input_trees, res_tree, label_attribute): + res_attr_dict = nx.get_node_attributes(res_tree, label_attribute) + res_attr_set = set(res_attr_dict.values()) + input_label = (tree for tree, root in input_trees) + input_label_set = set(chain.from_iterable(input_label)) + return res_attr_set == input_label_set + + +def test_empty_sequence(): + """Joining the empty sequence results in the tree with one node.""" + T = nx.join_trees([]) + assert len(T) == 1 + assert T.number_of_edges() == 0 + + +def test_single(): + """Joining just one tree yields a tree with one more node.""" + T = nx.empty_graph(1) + trees = [(T, 0)] + actual_with_label = nx.join_trees(trees, label_attribute="custom_label") + expected = nx.path_graph(2) + assert nodes_equal(list(expected), list(actual_with_label)) + assert edges_equal(list(expected.edges()), list(actual_with_label.edges())) + + +def test_basic(): + """Joining multiple subtrees at a root node.""" + trees = [(nx.full_rary_tree(2, 2**2 - 1), 0) for i in range(2)] + expected = nx.full_rary_tree(2, 2**3 - 1) + actual = nx.join_trees(trees, label_attribute="old_labels") + assert nx.is_isomorphic(actual, expected) + assert _check_custom_label_attribute(trees, actual, "old_labels") + + actual_without_label = nx.join_trees(trees) + assert nx.is_isomorphic(actual_without_label, expected) + # check that no labels were stored + assert all(not data for _, data in actual_without_label.nodes(data=True)) + + +def test_first_label(): + """Test the functionality of the first_label argument.""" + T1 = nx.path_graph(3) + T2 = nx.path_graph(2) + actual = nx.join_trees([(T1, 0), (T2, 0)], first_label=10) + expected_nodes = set(range(10, 16)) + assert set(actual.nodes()) == expected_nodes + assert set(actual.neighbors(10)) == {11, 14} diff --git a/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/test_recognition.py b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/test_recognition.py new file mode 100644 index 0000000000000000000000000000000000000000..a9c6c5aade9f1b3a317541c68affd4b5c2f21752 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/tree/tests/test_recognition.py @@ -0,0 +1,162 @@ +import pytest + +import networkx as nx + + +class TestTreeRecognition: + graph = nx.Graph + multigraph = nx.MultiGraph + + @classmethod + def setup_class(cls): + cls.T1 = cls.graph() + + cls.T2 = cls.graph() + cls.T2.add_node(1) + + cls.T3 = cls.graph() + cls.T3.add_nodes_from(range(5)) + edges = [(i, i + 1) for i in range(4)] + cls.T3.add_edges_from(edges) + + cls.T5 = cls.multigraph() + cls.T5.add_nodes_from(range(5)) + edges = [(i, i + 1) for i in range(4)] + cls.T5.add_edges_from(edges) + + cls.T6 = cls.graph() + cls.T6.add_nodes_from([6, 7]) + cls.T6.add_edge(6, 7) + + cls.F1 = nx.compose(cls.T6, cls.T3) + + cls.N4 = cls.graph() + cls.N4.add_node(1) + cls.N4.add_edge(1, 1) + + cls.N5 = cls.graph() + cls.N5.add_nodes_from(range(5)) + + cls.N6 = cls.graph() + cls.N6.add_nodes_from(range(3)) + cls.N6.add_edges_from([(0, 1), (1, 2), (2, 0)]) + + cls.NF1 = nx.compose(cls.T6, cls.N6) + + def test_null_tree(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.is_tree(self.graph()) + + def test_null_tree2(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.is_tree(self.multigraph()) + + def test_null_forest(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.is_forest(self.graph()) + + def test_null_forest2(self): + with pytest.raises(nx.NetworkXPointlessConcept): + nx.is_forest(self.multigraph()) + + def test_is_tree(self): + assert nx.is_tree(self.T2) + assert nx.is_tree(self.T3) + assert nx.is_tree(self.T5) + + def test_is_not_tree(self): + assert not nx.is_tree(self.N4) + assert not nx.is_tree(self.N5) + assert not nx.is_tree(self.N6) + + def test_is_forest(self): + assert nx.is_forest(self.T2) + assert nx.is_forest(self.T3) + assert nx.is_forest(self.T5) + assert nx.is_forest(self.F1) + assert nx.is_forest(self.N5) + + def test_is_not_forest(self): + assert not nx.is_forest(self.N4) + assert not nx.is_forest(self.N6) + assert not nx.is_forest(self.NF1) + + +class TestDirectedTreeRecognition(TestTreeRecognition): + graph = nx.DiGraph + multigraph = nx.MultiDiGraph + + +def test_disconnected_graph(): + # https://github.com/networkx/networkx/issues/1144 + G = nx.Graph() + G.add_edges_from([(0, 1), (1, 2), (2, 0), (3, 4)]) + assert not nx.is_tree(G) + + G = nx.DiGraph() + G.add_edges_from([(0, 1), (1, 2), (2, 0), (3, 4)]) + assert not nx.is_tree(G) + + +def test_dag_nontree(): + G = nx.DiGraph() + G.add_edges_from([(0, 1), (0, 2), (1, 2)]) + assert not nx.is_tree(G) + assert nx.is_directed_acyclic_graph(G) + + +def test_multicycle(): + G = nx.MultiDiGraph() + G.add_edges_from([(0, 1), (0, 1)]) + assert not nx.is_tree(G) + assert nx.is_directed_acyclic_graph(G) + + +def test_emptybranch(): + G = nx.DiGraph() + G.add_nodes_from(range(10)) + assert nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_path(): + G = nx.DiGraph() + nx.add_path(G, range(5)) + assert nx.is_branching(G) + assert nx.is_arborescence(G) + + +def test_notbranching1(): + # Acyclic violation. + G = nx.MultiDiGraph() + G.add_nodes_from(range(10)) + G.add_edges_from([(0, 1), (1, 0)]) + assert not nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_notbranching2(): + # In-degree violation. + G = nx.MultiDiGraph() + G.add_nodes_from(range(10)) + G.add_edges_from([(0, 1), (0, 2), (3, 2)]) + assert not nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_notarborescence1(): + # Not an arborescence due to not spanning. + G = nx.MultiDiGraph() + G.add_nodes_from(range(10)) + G.add_edges_from([(0, 1), (0, 2), (1, 3), (5, 6)]) + assert nx.is_branching(G) + assert not nx.is_arborescence(G) + + +def test_notarborescence2(): + # Not an arborescence due to in-degree violation. + G = nx.MultiDiGraph() + nx.add_path(G, range(5)) + G.add_edge(6, 4) + assert not nx.is_branching(G) + assert not nx.is_arborescence(G) diff --git a/MLPY/Lib/site-packages/networkx/algorithms/triads.py b/MLPY/Lib/site-packages/networkx/algorithms/triads.py new file mode 100644 index 0000000000000000000000000000000000000000..0041b83d82ada9ddd5c79428168a571cbcc13fd4 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/triads.py @@ -0,0 +1,565 @@ +# See https://github.com/networkx/networkx/pull/1474 +# Copyright 2011 Reya Group +# Copyright 2011 Alex Levenson +# Copyright 2011 Diederik van Liere +"""Functions for analyzing triads of a graph.""" + +from collections import defaultdict +from itertools import combinations, permutations + +import networkx as nx +from networkx.utils import not_implemented_for, py_random_state + +__all__ = [ + "triadic_census", + "is_triad", + "all_triplets", + "all_triads", + "triads_by_type", + "triad_type", + "random_triad", +] + +#: The integer codes representing each type of triad. +#: +#: Triads that are the same up to symmetry have the same code. +TRICODES = ( + 1, + 2, + 2, + 3, + 2, + 4, + 6, + 8, + 2, + 6, + 5, + 7, + 3, + 8, + 7, + 11, + 2, + 6, + 4, + 8, + 5, + 9, + 9, + 13, + 6, + 10, + 9, + 14, + 7, + 14, + 12, + 15, + 2, + 5, + 6, + 7, + 6, + 9, + 10, + 14, + 4, + 9, + 9, + 12, + 8, + 13, + 14, + 15, + 3, + 7, + 8, + 11, + 7, + 12, + 14, + 15, + 8, + 14, + 13, + 15, + 11, + 15, + 15, + 16, +) + +#: The names of each type of triad. The order of the elements is +#: important: it corresponds to the tricodes given in :data:`TRICODES`. +TRIAD_NAMES = ( + "003", + "012", + "102", + "021D", + "021U", + "021C", + "111D", + "111U", + "030T", + "030C", + "201", + "120D", + "120U", + "120C", + "210", + "300", +) + + +#: A dictionary mapping triad code to triad name. +TRICODE_TO_NAME = {i: TRIAD_NAMES[code - 1] for i, code in enumerate(TRICODES)} + + +def _tricode(G, v, u, w): + """Returns the integer code of the given triad. + + This is some fancy magic that comes from Batagelj and Mrvar's paper. It + treats each edge joining a pair of `v`, `u`, and `w` as a bit in + the binary representation of an integer. + + """ + combos = ((v, u, 1), (u, v, 2), (v, w, 4), (w, v, 8), (u, w, 16), (w, u, 32)) + return sum(x for u, v, x in combos if v in G[u]) + + +@not_implemented_for("undirected") +@nx._dispatch +def triadic_census(G, nodelist=None): + """Determines the triadic census of a directed graph. + + The triadic census is a count of how many of the 16 possible types of + triads are present in a directed graph. If a list of nodes is passed, then + only those triads are taken into account which have elements of nodelist in them. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph + nodelist : list + List of nodes for which you want to calculate triadic census + + Returns + ------- + census : dict + Dictionary with triad type as keys and number of occurrences as values. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 1), (3, 4), (4, 1), (4, 2)]) + >>> triadic_census = nx.triadic_census(G) + >>> for key, value in triadic_census.items(): + ... print(f"{key}: {value}") + ... + 003: 0 + 012: 0 + 102: 0 + 021D: 0 + 021U: 0 + 021C: 0 + 111D: 0 + 111U: 0 + 030T: 2 + 030C: 2 + 201: 0 + 120D: 0 + 120U: 0 + 120C: 0 + 210: 0 + 300: 0 + + Notes + ----- + This algorithm has complexity $O(m)$ where $m$ is the number of edges in + the graph. + + Raises + ------ + ValueError + If `nodelist` contains duplicate nodes or nodes not in `G`. + If you want to ignore this you can preprocess with `set(nodelist) & G.nodes` + + See also + -------- + triad_graph + + References + ---------- + .. [1] Vladimir Batagelj and Andrej Mrvar, A subquadratic triad census + algorithm for large sparse networks with small maximum degree, + University of Ljubljana, + http://vlado.fmf.uni-lj.si/pub/networks/doc/triads/triads.pdf + + """ + nodeset = set(G.nbunch_iter(nodelist)) + if nodelist is not None and len(nodelist) != len(nodeset): + raise ValueError("nodelist includes duplicate nodes or nodes not in G") + + N = len(G) + Nnot = N - len(nodeset) # can signal special counting for subset of nodes + + # create an ordering of nodes with nodeset nodes first + m = {n: i for i, n in enumerate(nodeset)} + if Nnot: + # add non-nodeset nodes later in the ordering + not_nodeset = G.nodes - nodeset + m.update((n, i + N) for i, n in enumerate(not_nodeset)) + + # build all_neighbor dicts for easy counting + # After Python 3.8 can leave off these keys(). Speedup also using G._pred + # nbrs = {n: G._pred[n].keys() | G._succ[n].keys() for n in G} + nbrs = {n: G.pred[n].keys() | G.succ[n].keys() for n in G} + dbl_nbrs = {n: G.pred[n].keys() & G.succ[n].keys() for n in G} + + if Nnot: + sgl_nbrs = {n: G.pred[n].keys() ^ G.succ[n].keys() for n in not_nodeset} + # find number of edges not incident to nodes in nodeset + sgl = sum(1 for n in not_nodeset for nbr in sgl_nbrs[n] if nbr not in nodeset) + sgl_edges_outside = sgl // 2 + dbl = sum(1 for n in not_nodeset for nbr in dbl_nbrs[n] if nbr not in nodeset) + dbl_edges_outside = dbl // 2 + + # Initialize the count for each triad to be zero. + census = {name: 0 for name in TRIAD_NAMES} + # Main loop over nodes + for v in nodeset: + vnbrs = nbrs[v] + dbl_vnbrs = dbl_nbrs[v] + if Nnot: + # set up counts of edges attached to v. + sgl_unbrs_bdy = sgl_unbrs_out = dbl_unbrs_bdy = dbl_unbrs_out = 0 + for u in vnbrs: + if m[u] <= m[v]: + continue + unbrs = nbrs[u] + neighbors = (vnbrs | unbrs) - {u, v} + # Count connected triads. + for w in neighbors: + if m[u] < m[w] or (m[v] < m[w] < m[u] and v not in nbrs[w]): + code = _tricode(G, v, u, w) + census[TRICODE_TO_NAME[code]] += 1 + + # Use a formula for dyadic triads with edge incident to v + if u in dbl_vnbrs: + census["102"] += N - len(neighbors) - 2 + else: + census["012"] += N - len(neighbors) - 2 + + # Count edges attached to v. Subtract later to get triads with v isolated + # _out are (u,unbr) for unbrs outside boundary of nodeset + # _bdy are (u,unbr) for unbrs on boundary of nodeset (get double counted) + if Nnot and u not in nodeset: + sgl_unbrs = sgl_nbrs[u] + sgl_unbrs_bdy += len(sgl_unbrs & vnbrs - nodeset) + sgl_unbrs_out += len(sgl_unbrs - vnbrs - nodeset) + dbl_unbrs = dbl_nbrs[u] + dbl_unbrs_bdy += len(dbl_unbrs & vnbrs - nodeset) + dbl_unbrs_out += len(dbl_unbrs - vnbrs - nodeset) + # if nodeset == G.nodes, skip this b/c we will find the edge later. + if Nnot: + # Count edges outside nodeset not connected with v (v isolated triads) + census["012"] += sgl_edges_outside - (sgl_unbrs_out + sgl_unbrs_bdy // 2) + census["102"] += dbl_edges_outside - (dbl_unbrs_out + dbl_unbrs_bdy // 2) + + # calculate null triads: "003" + # null triads = total number of possible triads - all found triads + total_triangles = (N * (N - 1) * (N - 2)) // 6 + triangles_without_nodeset = (Nnot * (Nnot - 1) * (Nnot - 2)) // 6 + total_census = total_triangles - triangles_without_nodeset + census["003"] = total_census - sum(census.values()) + + return census + + +@nx._dispatch +def is_triad(G): + """Returns True if the graph G is a triad, else False. + + Parameters + ---------- + G : graph + A NetworkX Graph + + Returns + ------- + istriad : boolean + Whether G is a valid triad + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + >>> nx.is_triad(G) + True + >>> G.add_edge(0, 1) + >>> nx.is_triad(G) + False + """ + if isinstance(G, nx.Graph): + if G.order() == 3 and nx.is_directed(G): + if not any((n, n) in G.edges() for n in G.nodes()): + return True + return False + + +@not_implemented_for("undirected") +@nx._dispatch +def all_triplets(G): + """Returns a generator of all possible sets of 3 nodes in a DiGraph. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph + + Returns + ------- + triplets : generator of 3-tuples + Generator of tuples of 3 nodes + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + >>> list(nx.all_triplets(G)) + [(1, 2, 3), (1, 2, 4), (1, 3, 4), (2, 3, 4)] + + """ + triplets = combinations(G.nodes(), 3) + return triplets + + +@not_implemented_for("undirected") +@nx._dispatch +def all_triads(G): + """A generator of all possible triads in G. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph + + Returns + ------- + all_triads : generator of DiGraphs + Generator of triads (order-3 DiGraphs) + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 1), (3, 4), (4, 1), (4, 2)]) + >>> for triad in nx.all_triads(G): + ... print(triad.edges) + [(1, 2), (2, 3), (3, 1)] + [(1, 2), (4, 1), (4, 2)] + [(3, 1), (3, 4), (4, 1)] + [(2, 3), (3, 4), (4, 2)] + + """ + triplets = combinations(G.nodes(), 3) + for triplet in triplets: + yield G.subgraph(triplet).copy() + + +@not_implemented_for("undirected") +@nx._dispatch +def triads_by_type(G): + """Returns a list of all triads for each triad type in a directed graph. + There are exactly 16 different types of triads possible. Suppose 1, 2, 3 are three + nodes, they will be classified as a particular triad type if their connections + are as follows: + + - 003: 1, 2, 3 + - 012: 1 -> 2, 3 + - 102: 1 <-> 2, 3 + - 021D: 1 <- 2 -> 3 + - 021U: 1 -> 2 <- 3 + - 021C: 1 -> 2 -> 3 + - 111D: 1 <-> 2 <- 3 + - 111U: 1 <-> 2 -> 3 + - 030T: 1 -> 2 -> 3, 1 -> 3 + - 030C: 1 <- 2 <- 3, 1 -> 3 + - 201: 1 <-> 2 <-> 3 + - 120D: 1 <- 2 -> 3, 1 <-> 3 + - 120U: 1 -> 2 <- 3, 1 <-> 3 + - 120C: 1 -> 2 -> 3, 1 <-> 3 + - 210: 1 -> 2 <-> 3, 1 <-> 3 + - 300: 1 <-> 2 <-> 3, 1 <-> 3 + + Refer to the :doc:`example gallery ` + for visual examples of the triad types. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph + + Returns + ------- + tri_by_type : dict + Dictionary with triad types as keys and lists of triads as values. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 3), (3, 1), (5, 6), (5, 4), (6, 7)]) + >>> dict = nx.triads_by_type(G) + >>> dict['120C'][0].edges() + OutEdgeView([(1, 2), (1, 3), (2, 3), (3, 1)]) + >>> dict['012'][0].edges() + OutEdgeView([(1, 2)]) + + References + ---------- + .. [1] Snijders, T. (2012). "Transitivity and triads." University of + Oxford. + https://web.archive.org/web/20170830032057/http://www.stats.ox.ac.uk/~snijders/Trans_Triads_ha.pdf + """ + # num_triads = o * (o - 1) * (o - 2) // 6 + # if num_triads > TRIAD_LIMIT: print(WARNING) + all_tri = all_triads(G) + tri_by_type = defaultdict(list) + for triad in all_tri: + name = triad_type(triad) + tri_by_type[name].append(triad) + return tri_by_type + + +@not_implemented_for("undirected") +@nx._dispatch +def triad_type(G): + """Returns the sociological triad type for a triad. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph with 3 nodes + + Returns + ------- + triad_type : str + A string identifying the triad type + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 1)]) + >>> nx.triad_type(G) + '030C' + >>> G.add_edge(1, 3) + >>> nx.triad_type(G) + '120C' + + Notes + ----- + There can be 6 unique edges in a triad (order-3 DiGraph) (so 2^^6=64 unique + triads given 3 nodes). These 64 triads each display exactly 1 of 16 + topologies of triads (topologies can be permuted). These topologies are + identified by the following notation: + + {m}{a}{n}{type} (for example: 111D, 210, 102) + + Here: + + {m} = number of mutual ties (takes 0, 1, 2, 3); a mutual tie is (0,1) + AND (1,0) + {a} = number of asymmetric ties (takes 0, 1, 2, 3); an asymmetric tie + is (0,1) BUT NOT (1,0) or vice versa + {n} = number of null ties (takes 0, 1, 2, 3); a null tie is NEITHER + (0,1) NOR (1,0) + {type} = a letter (takes U, D, C, T) corresponding to up, down, cyclical + and transitive. This is only used for topologies that can have + more than one form (eg: 021D and 021U). + + References + ---------- + .. [1] Snijders, T. (2012). "Transitivity and triads." University of + Oxford. + https://web.archive.org/web/20170830032057/http://www.stats.ox.ac.uk/~snijders/Trans_Triads_ha.pdf + """ + if not is_triad(G): + raise nx.NetworkXAlgorithmError("G is not a triad (order-3 DiGraph)") + num_edges = len(G.edges()) + if num_edges == 0: + return "003" + elif num_edges == 1: + return "012" + elif num_edges == 2: + e1, e2 = G.edges() + if set(e1) == set(e2): + return "102" + elif e1[0] == e2[0]: + return "021D" + elif e1[1] == e2[1]: + return "021U" + elif e1[1] == e2[0] or e2[1] == e1[0]: + return "021C" + elif num_edges == 3: + for e1, e2, e3 in permutations(G.edges(), 3): + if set(e1) == set(e2): + if e3[0] in e1: + return "111U" + # e3[1] in e1: + return "111D" + elif set(e1).symmetric_difference(set(e2)) == set(e3): + if {e1[0], e2[0], e3[0]} == {e1[0], e2[0], e3[0]} == set(G.nodes()): + return "030C" + # e3 == (e1[0], e2[1]) and e2 == (e1[1], e3[1]): + return "030T" + elif num_edges == 4: + for e1, e2, e3, e4 in permutations(G.edges(), 4): + if set(e1) == set(e2): + # identify pair of symmetric edges (which necessarily exists) + if set(e3) == set(e4): + return "201" + if {e3[0]} == {e4[0]} == set(e3).intersection(set(e4)): + return "120D" + if {e3[1]} == {e4[1]} == set(e3).intersection(set(e4)): + return "120U" + if e3[1] == e4[0]: + return "120C" + elif num_edges == 5: + return "210" + elif num_edges == 6: + return "300" + + +@not_implemented_for("undirected") +@py_random_state(1) +@nx._dispatch +def random_triad(G, seed=None): + """Returns a random triad from a directed graph. + + Parameters + ---------- + G : digraph + A NetworkX DiGraph + seed : integer, random_state, or None (default) + Indicator of random number generation state. + See :ref:`Randomness`. + + Returns + ------- + G2 : subgraph + A randomly selected triad (order-3 NetworkX DiGraph) + + Raises + ------ + NetworkXError + If the input Graph has less than 3 nodes. + + Examples + -------- + >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 3), (3, 1), (5, 6), (5, 4), (6, 7)]) + >>> triad = nx.random_triad(G, seed=1) + >>> triad.edges + OutEdgeView([(1, 2)]) + + """ + if len(G) < 3: + raise nx.NetworkXError( + f"G needs at least 3 nodes to form a triad; (it has {len(G)} nodes)" + ) + nodes = seed.sample(list(G.nodes()), 3) + G2 = G.subgraph(nodes) + return G2 diff --git a/MLPY/Lib/site-packages/networkx/algorithms/vitality.py b/MLPY/Lib/site-packages/networkx/algorithms/vitality.py new file mode 100644 index 0000000000000000000000000000000000000000..c41efd13f2cf40758f374fcf6ee1de18302d83a2 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/vitality.py @@ -0,0 +1,76 @@ +""" +Vitality measures. +""" +from functools import partial + +import networkx as nx + +__all__ = ["closeness_vitality"] + + +@nx._dispatch(edge_attrs="weight") +def closeness_vitality(G, node=None, weight=None, wiener_index=None): + """Returns the closeness vitality for nodes in the graph. + + The *closeness vitality* of a node, defined in Section 3.6.2 of [1], + is the change in the sum of distances between all node pairs when + excluding that node. + + Parameters + ---------- + G : NetworkX graph + A strongly-connected graph. + + weight : string + The name of the edge attribute used as weight. This is passed + directly to the :func:`~networkx.wiener_index` function. + + node : object + If specified, only the closeness vitality for this node will be + returned. Otherwise, a dictionary mapping each node to its + closeness vitality will be returned. + + Other parameters + ---------------- + wiener_index : number + If you have already computed the Wiener index of the graph + `G`, you can provide that value here. Otherwise, it will be + computed for you. + + Returns + ------- + dictionary or float + If `node` is None, this function returns a dictionary + with nodes as keys and closeness vitality as the + value. Otherwise, it returns only the closeness vitality for the + specified `node`. + + The closeness vitality of a node may be negative infinity if + removing that node would disconnect the graph. + + Examples + -------- + >>> G = nx.cycle_graph(3) + >>> nx.closeness_vitality(G) + {0: 2.0, 1: 2.0, 2: 2.0} + + See Also + -------- + closeness_centrality + + References + ---------- + .. [1] Ulrik Brandes, Thomas Erlebach (eds.). + *Network Analysis: Methodological Foundations*. + Springer, 2005. + + + """ + if wiener_index is None: + wiener_index = nx.wiener_index(G, weight=weight) + if node is not None: + after = nx.wiener_index(G.subgraph(set(G) - {node}), weight=weight) + return wiener_index - after + vitality = partial(closeness_vitality, G, weight=weight, wiener_index=wiener_index) + # TODO This can be trivially parallelized. + return {v: vitality(node=v) for v in G} diff --git a/MLPY/Lib/site-packages/networkx/algorithms/voronoi.py b/MLPY/Lib/site-packages/networkx/algorithms/voronoi.py new file mode 100644 index 0000000000000000000000000000000000000000..af17f013ec89a0c59f01cc6753cacaef47eb6d97 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/voronoi.py @@ -0,0 +1,85 @@ +"""Functions for computing the Voronoi cells of a graph.""" +import networkx as nx +from networkx.utils import groups + +__all__ = ["voronoi_cells"] + + +@nx._dispatch(edge_attrs="weight") +def voronoi_cells(G, center_nodes, weight="weight"): + """Returns the Voronoi cells centered at `center_nodes` with respect + to the shortest-path distance metric. + + If $C$ is a set of nodes in the graph and $c$ is an element of $C$, + the *Voronoi cell* centered at a node $c$ is the set of all nodes + $v$ that are closer to $c$ than to any other center node in $C$ with + respect to the shortest-path distance metric. [1]_ + + For directed graphs, this will compute the "outward" Voronoi cells, + as defined in [1]_, in which distance is measured from the center + nodes to the target node. For the "inward" Voronoi cells, use the + :meth:`DiGraph.reverse` method to reverse the orientation of the + edges before invoking this function on the directed graph. + + Parameters + ---------- + G : NetworkX graph + + center_nodes : set + A nonempty set of nodes in the graph `G` that represent the + center of the Voronoi cells. + + weight : string or function + The edge attribute (or an arbitrary function) representing the + weight of an edge. This keyword argument is as described in the + documentation for :func:`~networkx.multi_source_dijkstra_path`, + for example. + + Returns + ------- + dictionary + A mapping from center node to set of all nodes in the graph + closer to that center node than to any other center node. The + keys of the dictionary are the element of `center_nodes`, and + the values of the dictionary form a partition of the nodes of + `G`. + + Examples + -------- + To get only the partition of the graph induced by the Voronoi cells, + take the collection of all values in the returned dictionary:: + + >>> G = nx.path_graph(6) + >>> center_nodes = {0, 3} + >>> cells = nx.voronoi_cells(G, center_nodes) + >>> partition = set(map(frozenset, cells.values())) + >>> sorted(map(sorted, partition)) + [[0, 1], [2, 3, 4, 5]] + + Raises + ------ + ValueError + If `center_nodes` is empty. + + References + ---------- + .. [1] Erwig, Martin. (2000),"The graph Voronoi diagram with applications." + *Networks*, 36: 156--163. + https://doi.org/10.1002/1097-0037(200010)36:3<156::AID-NET2>3.0.CO;2-L + + """ + # Determine the shortest paths from any one of the center nodes to + # every node in the graph. + # + # This raises `ValueError` if `center_nodes` is an empty set. + paths = nx.multi_source_dijkstra_path(G, center_nodes, weight=weight) + # Determine the center node from which the shortest path originates. + nearest = {v: p[0] for v, p in paths.items()} + # Get the mapping from center node to all nodes closer to it than to + # any other center node. + cells = groups(nearest) + # We collect all unreachable nodes under a special key, if there are any. + unreachable = set(G) - set(nearest) + if unreachable: + cells["unreachable"] = unreachable + return cells diff --git a/MLPY/Lib/site-packages/networkx/algorithms/walks.py b/MLPY/Lib/site-packages/networkx/algorithms/walks.py new file mode 100644 index 0000000000000000000000000000000000000000..6f357ce1d42c81939f10a14a4fadd5139f6aab9b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/walks.py @@ -0,0 +1,80 @@ +"""Function for computing walks in a graph. +""" + +import networkx as nx + +__all__ = ["number_of_walks"] + + +@nx._dispatch +def number_of_walks(G, walk_length): + """Returns the number of walks connecting each pair of nodes in `G` + + A *walk* is a sequence of nodes in which each adjacent pair of nodes + in the sequence is adjacent in the graph. A walk can repeat the same + edge and go in the opposite direction just as people can walk on a + set of paths, but standing still is not counted as part of the walk. + + This function only counts the walks with `walk_length` edges. Note that + the number of nodes in the walk sequence is one more than `walk_length`. + The number of walks can grow very quickly on a larger graph + and with a larger walk length. + + Parameters + ---------- + G : NetworkX graph + + walk_length : int + A nonnegative integer representing the length of a walk. + + Returns + ------- + dict + A dictionary of dictionaries in which outer keys are source + nodes, inner keys are target nodes, and inner values are the + number of walks of length `walk_length` connecting those nodes. + + Raises + ------ + ValueError + If `walk_length` is negative + + Examples + -------- + + >>> G = nx.Graph([(0, 1), (1, 2)]) + >>> walks = nx.number_of_walks(G, 2) + >>> walks + {0: {0: 1, 1: 0, 2: 1}, 1: {0: 0, 1: 2, 2: 0}, 2: {0: 1, 1: 0, 2: 1}} + >>> total_walks = sum(sum(tgts.values()) for _, tgts in walks.items()) + + You can also get the number of walks from a specific source node using the + returned dictionary. For example, number of walks of length 1 from node 0 + can be found as follows: + + >>> walks = nx.number_of_walks(G, 1) + >>> walks[0] + {0: 0, 1: 1, 2: 0} + >>> sum(walks[0].values()) # walks from 0 of length 1 + 1 + + Similarly, a target node can also be specified: + + >>> walks[0][1] + 1 + + """ + import numpy as np + + if walk_length < 0: + raise ValueError(f"`walk_length` cannot be negative: {walk_length}") + + A = nx.adjacency_matrix(G, weight=None) + # TODO: Use matrix_power from scipy.sparse when available + # power = sp.sparse.linalg.matrix_power(A, walk_length) + power = np.linalg.matrix_power(A.toarray(), walk_length) + result = { + u: {v: power[u_idx, v_idx] for v_idx, v in enumerate(G)} + for u_idx, u in enumerate(G) + } + return result diff --git a/MLPY/Lib/site-packages/networkx/algorithms/wiener.py b/MLPY/Lib/site-packages/networkx/algorithms/wiener.py new file mode 100644 index 0000000000000000000000000000000000000000..9e81cdc72ca61870c3eb818c6d2bb82315694f37 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/algorithms/wiener.py @@ -0,0 +1,79 @@ +"""Functions related to the Wiener index of a graph.""" + +from itertools import chain + +import networkx as nx + +from .components import is_connected, is_strongly_connected +from .shortest_paths import shortest_path_length as spl + +__all__ = ["wiener_index"] + +#: Rename the :func:`chain.from_iterable` function for the sake of +#: brevity. +chaini = chain.from_iterable + + +@nx._dispatch(edge_attrs="weight") +def wiener_index(G, weight=None): + """Returns the Wiener index of the given graph. + + The *Wiener index* of a graph is the sum of the shortest-path + distances between each pair of reachable nodes. For pairs of nodes + in undirected graphs, only one orientation of the pair is counted. + + Parameters + ---------- + G : NetworkX graph + + weight : object + The edge attribute to use as distance when computing + shortest-path distances. This is passed directly to the + :func:`networkx.shortest_path_length` function. + + Returns + ------- + float + The Wiener index of the graph `G`. + + Raises + ------ + NetworkXError + If the graph `G` is not connected. + + Notes + ----- + If a pair of nodes is not reachable, the distance is assumed to be + infinity. This means that for graphs that are not + strongly-connected, this function returns ``inf``. + + The Wiener index is not usually defined for directed graphs, however + this function uses the natural generalization of the Wiener index to + directed graphs. + + Examples + -------- + The Wiener index of the (unweighted) complete graph on *n* nodes + equals the number of pairs of the *n* nodes, since each pair of + nodes is at distance one:: + + >>> n = 10 + >>> G = nx.complete_graph(n) + >>> nx.wiener_index(G) == n * (n - 1) / 2 + True + + Graphs that are not strongly-connected have infinite Wiener index:: + + >>> G = nx.empty_graph(2) + >>> nx.wiener_index(G) + inf + + """ + is_directed = G.is_directed() + if (is_directed and not is_strongly_connected(G)) or ( + not is_directed and not is_connected(G) + ): + return float("inf") + total = sum(chaini(p.values() for v, p in spl(G, weight=weight))) + # Need to account for double counting pairs of nodes in undirected graphs. + return total if is_directed else total / 2 diff --git a/MLPY/Lib/site-packages/networkx/classes/__init__.py b/MLPY/Lib/site-packages/networkx/classes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..721fa8b4767233bc2b624f6b2ce4d10533a4d66c --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/__init__.py @@ -0,0 +1,13 @@ +from .graph import Graph +from .digraph import DiGraph +from .multigraph import MultiGraph +from .multidigraph import MultiDiGraph + +from .function import * +from .graphviews import subgraph_view, reverse_view + +from networkx.classes import filters + +from networkx.classes import coreviews +from networkx.classes import graphviews +from networkx.classes import reportviews diff --git a/MLPY/Lib/site-packages/networkx/classes/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c74ac23e6324824a490a0c2273f3d3a94ae30b0 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/__pycache__/coreviews.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/__pycache__/coreviews.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b67aedde0867b457ed0006852485a9bc8e2154c Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/__pycache__/coreviews.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/__pycache__/digraph.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/__pycache__/digraph.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65850a6347ed8cccb3d5ea9d2f10d51c79601407 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/__pycache__/digraph.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/__pycache__/filters.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/__pycache__/filters.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77d0f808f14c1aa7c1b344d2fd583dc652d22f74 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/__pycache__/filters.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/__pycache__/function.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/__pycache__/function.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..191475f054a5ac1553246cecd28010eb22b3722d Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/__pycache__/function.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/__pycache__/graph.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/__pycache__/graph.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51edd71499a5c94ffc037e5a8223e308f47cb54c Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/__pycache__/graph.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/__pycache__/graphviews.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/__pycache__/graphviews.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a97e68e7062900543d58a02109c2ef8ad88133ac Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/__pycache__/graphviews.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/__pycache__/multidigraph.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/__pycache__/multidigraph.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec4d157f77955505126341a70f476ac1c01d3247 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/__pycache__/multidigraph.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/__pycache__/multigraph.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/__pycache__/multigraph.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..551b503c4a27c7da9d0f6966d37fe8a78e99fb4f Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/__pycache__/multigraph.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/__pycache__/reportviews.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/__pycache__/reportviews.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fba586666a5774d61f97c86302829e5c1bdeb047 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/__pycache__/reportviews.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/coreviews.py b/MLPY/Lib/site-packages/networkx/classes/coreviews.py new file mode 100644 index 0000000000000000000000000000000000000000..5c4defe94aa14ddbbae4a8b4b796032063fe6df7 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/coreviews.py @@ -0,0 +1,367 @@ +"""Views of core data structures such as nested Mappings (e.g. dict-of-dicts). +These ``Views`` often restrict element access, with either the entire view or +layers of nested mappings being read-only. +""" +from collections.abc import Mapping + +__all__ = [ + "AtlasView", + "AdjacencyView", + "MultiAdjacencyView", + "UnionAtlas", + "UnionAdjacency", + "UnionMultiInner", + "UnionMultiAdjacency", + "FilterAtlas", + "FilterAdjacency", + "FilterMultiInner", + "FilterMultiAdjacency", +] + + +class AtlasView(Mapping): + """An AtlasView is a Read-only Mapping of Mappings. + + It is a View into a dict-of-dict data structure. + The inner level of dict is read-write. But the + outer level is read-only. + + See Also + ======== + AdjacencyView: View into dict-of-dict-of-dict + MultiAdjacencyView: View into dict-of-dict-of-dict-of-dict + """ + + __slots__ = ("_atlas",) + + def __getstate__(self): + return {"_atlas": self._atlas} + + def __setstate__(self, state): + self._atlas = state["_atlas"] + + def __init__(self, d): + self._atlas = d + + def __len__(self): + return len(self._atlas) + + def __iter__(self): + return iter(self._atlas) + + def __getitem__(self, key): + return self._atlas[key] + + def copy(self): + return {n: self[n].copy() for n in self._atlas} + + def __str__(self): + return str(self._atlas) # {nbr: self[nbr] for nbr in self}) + + def __repr__(self): + return f"{self.__class__.__name__}({self._atlas!r})" + + +class AdjacencyView(AtlasView): + """An AdjacencyView is a Read-only Map of Maps of Maps. + + It is a View into a dict-of-dict-of-dict data structure. + The inner level of dict is read-write. But the + outer levels are read-only. + + See Also + ======== + AtlasView: View into dict-of-dict + MultiAdjacencyView: View into dict-of-dict-of-dict-of-dict + """ + + __slots__ = () # Still uses AtlasView slots names _atlas + + def __getitem__(self, name): + return AtlasView(self._atlas[name]) + + def copy(self): + return {n: self[n].copy() for n in self._atlas} + + +class MultiAdjacencyView(AdjacencyView): + """An MultiAdjacencyView is a Read-only Map of Maps of Maps of Maps. + + It is a View into a dict-of-dict-of-dict-of-dict data structure. + The inner level of dict is read-write. But the + outer levels are read-only. + + See Also + ======== + AtlasView: View into dict-of-dict + AdjacencyView: View into dict-of-dict-of-dict + """ + + __slots__ = () # Still uses AtlasView slots names _atlas + + def __getitem__(self, name): + return AdjacencyView(self._atlas[name]) + + def copy(self): + return {n: self[n].copy() for n in self._atlas} + + +class UnionAtlas(Mapping): + """A read-only union of two atlases (dict-of-dict). + + The two dict-of-dicts represent the inner dict of + an Adjacency: `G.succ[node]` and `G.pred[node]`. + The inner level of dict of both hold attribute key:value + pairs and is read-write. But the outer level is read-only. + + See Also + ======== + UnionAdjacency: View into dict-of-dict-of-dict + UnionMultiAdjacency: View into dict-of-dict-of-dict-of-dict + """ + + __slots__ = ("_succ", "_pred") + + def __getstate__(self): + return {"_succ": self._succ, "_pred": self._pred} + + def __setstate__(self, state): + self._succ = state["_succ"] + self._pred = state["_pred"] + + def __init__(self, succ, pred): + self._succ = succ + self._pred = pred + + def __len__(self): + return len(self._succ.keys() | self._pred.keys()) + + def __iter__(self): + return iter(set(self._succ.keys()) | set(self._pred.keys())) + + def __getitem__(self, key): + try: + return self._succ[key] + except KeyError: + return self._pred[key] + + def copy(self): + result = {nbr: dd.copy() for nbr, dd in self._succ.items()} + for nbr, dd in self._pred.items(): + if nbr in result: + result[nbr].update(dd) + else: + result[nbr] = dd.copy() + return result + + def __str__(self): + return str({nbr: self[nbr] for nbr in self}) + + def __repr__(self): + return f"{self.__class__.__name__}({self._succ!r}, {self._pred!r})" + + +class UnionAdjacency(Mapping): + """A read-only union of dict Adjacencies as a Map of Maps of Maps. + + The two input dict-of-dict-of-dicts represent the union of + `G.succ` and `G.pred`. Return values are UnionAtlas + The inner level of dict is read-write. But the + middle and outer levels are read-only. + + succ : a dict-of-dict-of-dict {node: nbrdict} + pred : a dict-of-dict-of-dict {node: nbrdict} + The keys for the two dicts should be the same + + See Also + ======== + UnionAtlas: View into dict-of-dict + UnionMultiAdjacency: View into dict-of-dict-of-dict-of-dict + """ + + __slots__ = ("_succ", "_pred") + + def __getstate__(self): + return {"_succ": self._succ, "_pred": self._pred} + + def __setstate__(self, state): + self._succ = state["_succ"] + self._pred = state["_pred"] + + def __init__(self, succ, pred): + # keys must be the same for two input dicts + assert len(set(succ.keys()) ^ set(pred.keys())) == 0 + self._succ = succ + self._pred = pred + + def __len__(self): + return len(self._succ) # length of each dict should be the same + + def __iter__(self): + return iter(self._succ) + + def __getitem__(self, nbr): + return UnionAtlas(self._succ[nbr], self._pred[nbr]) + + def copy(self): + return {n: self[n].copy() for n in self._succ} + + def __str__(self): + return str({nbr: self[nbr] for nbr in self}) + + def __repr__(self): + return f"{self.__class__.__name__}({self._succ!r}, {self._pred!r})" + + +class UnionMultiInner(UnionAtlas): + """A read-only union of two inner dicts of MultiAdjacencies. + + The two input dict-of-dict-of-dicts represent the union of + `G.succ[node]` and `G.pred[node]` for MultiDiGraphs. + Return values are UnionAtlas. + The inner level of dict is read-write. But the outer levels are read-only. + + See Also + ======== + UnionAtlas: View into dict-of-dict + UnionAdjacency: View into dict-of-dict-of-dict + UnionMultiAdjacency: View into dict-of-dict-of-dict-of-dict + """ + + __slots__ = () # Still uses UnionAtlas slots names _succ, _pred + + def __getitem__(self, node): + in_succ = node in self._succ + in_pred = node in self._pred + if in_succ: + if in_pred: + return UnionAtlas(self._succ[node], self._pred[node]) + return UnionAtlas(self._succ[node], {}) + return UnionAtlas({}, self._pred[node]) + + def copy(self): + nodes = set(self._succ.keys()) | set(self._pred.keys()) + return {n: self[n].copy() for n in nodes} + + +class UnionMultiAdjacency(UnionAdjacency): + """A read-only union of two dict MultiAdjacencies. + + The two input dict-of-dict-of-dict-of-dicts represent the union of + `G.succ` and `G.pred` for MultiDiGraphs. Return values are UnionAdjacency. + The inner level of dict is read-write. But the outer levels are read-only. + + See Also + ======== + UnionAtlas: View into dict-of-dict + UnionMultiInner: View into dict-of-dict-of-dict + """ + + __slots__ = () # Still uses UnionAdjacency slots names _succ, _pred + + def __getitem__(self, node): + return UnionMultiInner(self._succ[node], self._pred[node]) + + +class FilterAtlas(Mapping): # nodedict, nbrdict, keydict + def __init__(self, d, NODE_OK): + self._atlas = d + self.NODE_OK = NODE_OK + + def __len__(self): + return sum(1 for n in self) + + def __iter__(self): + try: # check that NODE_OK has attr 'nodes' + node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas) + except AttributeError: + node_ok_shorter = False + if node_ok_shorter: + return (n for n in self.NODE_OK.nodes if n in self._atlas) + return (n for n in self._atlas if self.NODE_OK(n)) + + def __getitem__(self, key): + if key in self._atlas and self.NODE_OK(key): + return self._atlas[key] + raise KeyError(f"Key {key} not found") + + def __str__(self): + return str({nbr: self[nbr] for nbr in self}) + + def __repr__(self): + return f"{self.__class__.__name__}({self._atlas!r}, {self.NODE_OK!r})" + + +class FilterAdjacency(Mapping): # edgedict + def __init__(self, d, NODE_OK, EDGE_OK): + self._atlas = d + self.NODE_OK = NODE_OK + self.EDGE_OK = EDGE_OK + + def __len__(self): + return sum(1 for n in self) + + def __iter__(self): + try: # check that NODE_OK has attr 'nodes' + node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas) + except AttributeError: + node_ok_shorter = False + if node_ok_shorter: + return (n for n in self.NODE_OK.nodes if n in self._atlas) + return (n for n in self._atlas if self.NODE_OK(n)) + + def __getitem__(self, node): + if node in self._atlas and self.NODE_OK(node): + + def new_node_ok(nbr): + return self.NODE_OK(nbr) and self.EDGE_OK(node, nbr) + + return FilterAtlas(self._atlas[node], new_node_ok) + raise KeyError(f"Key {node} not found") + + def __str__(self): + return str({nbr: self[nbr] for nbr in self}) + + def __repr__(self): + name = self.__class__.__name__ + return f"{name}({self._atlas!r}, {self.NODE_OK!r}, {self.EDGE_OK!r})" + + +class FilterMultiInner(FilterAdjacency): # muliedge_seconddict + def __iter__(self): + try: # check that NODE_OK has attr 'nodes' + node_ok_shorter = 2 * len(self.NODE_OK.nodes) < len(self._atlas) + except AttributeError: + node_ok_shorter = False + if node_ok_shorter: + my_nodes = (n for n in self.NODE_OK.nodes if n in self._atlas) + else: + my_nodes = (n for n in self._atlas if self.NODE_OK(n)) + for n in my_nodes: + some_keys_ok = False + for key in self._atlas[n]: + if self.EDGE_OK(n, key): + some_keys_ok = True + break + if some_keys_ok is True: + yield n + + def __getitem__(self, nbr): + if nbr in self._atlas and self.NODE_OK(nbr): + + def new_node_ok(key): + return self.EDGE_OK(nbr, key) + + return FilterAtlas(self._atlas[nbr], new_node_ok) + raise KeyError(f"Key {nbr} not found") + + +class FilterMultiAdjacency(FilterAdjacency): # multiedgedict + def __getitem__(self, node): + if node in self._atlas and self.NODE_OK(node): + + def edge_ok(nbr, key): + return self.NODE_OK(nbr) and self.EDGE_OK(node, nbr, key) + + return FilterMultiInner(self._atlas[node], self.NODE_OK, edge_ok) + raise KeyError(f"Key {node} not found") diff --git a/MLPY/Lib/site-packages/networkx/classes/digraph.py b/MLPY/Lib/site-packages/networkx/classes/digraph.py new file mode 100644 index 0000000000000000000000000000000000000000..988ed599892f0991582e222824731b4d46e27ee5 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/digraph.py @@ -0,0 +1,1323 @@ +"""Base class for directed graphs.""" +from copy import deepcopy +from functools import cached_property + +import networkx as nx +from networkx import convert +from networkx.classes.coreviews import AdjacencyView +from networkx.classes.graph import Graph +from networkx.classes.reportviews import ( + DiDegreeView, + InDegreeView, + InEdgeView, + OutDegreeView, + OutEdgeView, +) +from networkx.exception import NetworkXError + +__all__ = ["DiGraph"] + + +class _CachedPropertyResetterAdjAndSucc: + """Data Descriptor class that syncs and resets cached properties adj and succ + + The cached properties `adj` and `succ` are reset whenever `_adj` or `_succ` + are set to new objects. In addition, the attributes `_succ` and `_adj` + are synced so these two names point to the same object. + + This object sits on a class and ensures that any instance of that + class clears its cached properties "succ" and "adj" whenever the + underlying instance attributes "_succ" or "_adj" are set to a new object. + It only affects the set process of the obj._adj and obj._succ attribute. + All get/del operations act as they normally would. + + For info on Data Descriptors see: https://docs.python.org/3/howto/descriptor.html + """ + + def __set__(self, obj, value): + od = obj.__dict__ + od["_adj"] = value + od["_succ"] = value + # reset cached properties + if "adj" in od: + del od["adj"] + if "succ" in od: + del od["succ"] + + +class _CachedPropertyResetterPred: + """Data Descriptor class for _pred that resets ``pred`` cached_property when needed + + This assumes that the ``cached_property`` ``G.pred`` should be reset whenever + ``G._pred`` is set to a new value. + + This object sits on a class and ensures that any instance of that + class clears its cached property "pred" whenever the underlying + instance attribute "_pred" is set to a new object. It only affects + the set process of the obj._pred attribute. All get/del operations + act as they normally would. + + For info on Data Descriptors see: https://docs.python.org/3/howto/descriptor.html + """ + + def __set__(self, obj, value): + od = obj.__dict__ + od["_pred"] = value + if "pred" in od: + del od["pred"] + + +class DiGraph(Graph): + """ + Base class for directed graphs. + + A DiGraph stores nodes and edges with optional data, or attributes. + + DiGraphs hold directed edges. Self loops are allowed but multiple + (parallel) edges are not. + + Nodes can be arbitrary (hashable) Python objects with optional + key/value attributes. By convention `None` is not used as a node. + + Edges are represented as links between nodes with optional + key/value attributes. + + Parameters + ---------- + incoming_graph_data : input graph (optional, default: None) + Data to initialize graph. If None (default) an empty + graph is created. The data can be any format that is supported + by the to_networkx_graph() function, currently including edge list, + dict of dicts, dict of lists, NetworkX graph, 2D NumPy array, SciPy + sparse matrix, or PyGraphviz graph. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + Graph + MultiGraph + MultiDiGraph + + Examples + -------- + Create an empty graph structure (a "null graph") with no nodes and + no edges. + + >>> G = nx.DiGraph() + + G can be grown in several ways. + + **Nodes:** + + Add one node at a time: + + >>> G.add_node(1) + + Add the nodes from any container (a list, dict, set or + even the lines from a file or the nodes from another graph). + + >>> G.add_nodes_from([2, 3]) + >>> G.add_nodes_from(range(100, 110)) + >>> H = nx.path_graph(10) + >>> G.add_nodes_from(H) + + In addition to strings and integers any hashable Python object + (except None) can represent a node, e.g. a customized node object, + or even another Graph. + + >>> G.add_node(H) + + **Edges:** + + G can also be grown by adding edges. + + Add one edge, + + >>> G.add_edge(1, 2) + + a list of edges, + + >>> G.add_edges_from([(1, 2), (1, 3)]) + + or a collection of edges, + + >>> G.add_edges_from(H.edges) + + If some edges connect nodes not yet in the graph, the nodes + are added automatically. There are no errors when adding + nodes or edges that already exist. + + **Attributes:** + + Each graph, node, and edge can hold key/value attribute pairs + in an associated attribute dictionary (the keys must be hashable). + By default these are empty, but can be added or changed using + add_edge, add_node or direct manipulation of the attribute + dictionaries named graph, node and edge respectively. + + >>> G = nx.DiGraph(day="Friday") + >>> G.graph + {'day': 'Friday'} + + Add node attributes using add_node(), add_nodes_from() or G.nodes + + >>> G.add_node(1, time="5pm") + >>> G.add_nodes_from([3], time="2pm") + >>> G.nodes[1] + {'time': '5pm'} + >>> G.nodes[1]["room"] = 714 + >>> del G.nodes[1]["room"] # remove attribute + >>> list(G.nodes(data=True)) + [(1, {'time': '5pm'}), (3, {'time': '2pm'})] + + Add edge attributes using add_edge(), add_edges_from(), subscript + notation, or G.edges. + + >>> G.add_edge(1, 2, weight=4.7) + >>> G.add_edges_from([(3, 4), (4, 5)], color="red") + >>> G.add_edges_from([(1, 2, {"color": "blue"}), (2, 3, {"weight": 8})]) + >>> G[1][2]["weight"] = 4.7 + >>> G.edges[1, 2]["weight"] = 4 + + Warning: we protect the graph data structure by making `G.edges[1, 2]` a + read-only dict-like structure. However, you can assign to attributes + in e.g. `G.edges[1, 2]`. Thus, use 2 sets of brackets to add/change + data attributes: `G.edges[1, 2]['weight'] = 4` + (For multigraphs: `MG.edges[u, v, key][name] = value`). + + **Shortcuts:** + + Many common graph features allow python syntax to speed reporting. + + >>> 1 in G # check if node in graph + True + >>> [n for n in G if n < 3] # iterate through nodes + [1, 2] + >>> len(G) # number of nodes in graph + 5 + + Often the best way to traverse all edges of a graph is via the neighbors. + The neighbors are reported as an adjacency-dict `G.adj` or `G.adjacency()` + + >>> for n, nbrsdict in G.adjacency(): + ... for nbr, eattr in nbrsdict.items(): + ... if "weight" in eattr: + ... # Do something useful with the edges + ... pass + + But the edges reporting object is often more convenient: + + >>> for u, v, weight in G.edges(data="weight"): + ... if weight is not None: + ... # Do something useful with the edges + ... pass + + **Reporting:** + + Simple graph information is obtained using object-attributes and methods. + Reporting usually provides views instead of containers to reduce memory + usage. The views update as the graph is updated similarly to dict-views. + The objects `nodes`, `edges` and `adj` provide access to data attributes + via lookup (e.g. `nodes[n]`, `edges[u, v]`, `adj[u][v]`) and iteration + (e.g. `nodes.items()`, `nodes.data('color')`, + `nodes.data('color', default='blue')` and similarly for `edges`) + Views exist for `nodes`, `edges`, `neighbors()`/`adj` and `degree`. + + For details on these and other miscellaneous methods, see below. + + **Subclasses (Advanced):** + + The Graph class uses a dict-of-dict-of-dict data structure. + The outer dict (node_dict) holds adjacency information keyed by node. + The next dict (adjlist_dict) represents the adjacency information and holds + edge data keyed by neighbor. The inner dict (edge_attr_dict) represents + the edge data and holds edge attribute values keyed by attribute names. + + Each of these three dicts can be replaced in a subclass by a user defined + dict-like object. In general, the dict-like features should be + maintained but extra features can be added. To replace one of the + dicts create a new graph class by changing the class(!) variable + holding the factory for that dict-like structure. The variable names are + node_dict_factory, node_attr_dict_factory, adjlist_inner_dict_factory, + adjlist_outer_dict_factory, edge_attr_dict_factory and graph_attr_dict_factory. + + node_dict_factory : function, (default: dict) + Factory function to be used to create the dict containing node + attributes, keyed by node id. + It should require no arguments and return a dict-like object + + node_attr_dict_factory: function, (default: dict) + Factory function to be used to create the node attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object + + adjlist_outer_dict_factory : function, (default: dict) + Factory function to be used to create the outer-most dict + in the data structure that holds adjacency info keyed by node. + It should require no arguments and return a dict-like object. + + adjlist_inner_dict_factory : function, optional (default: dict) + Factory function to be used to create the adjacency list + dict which holds edge data keyed by neighbor. + It should require no arguments and return a dict-like object + + edge_attr_dict_factory : function, optional (default: dict) + Factory function to be used to create the edge attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + graph_attr_dict_factory : function, (default: dict) + Factory function to be used to create the graph attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + Typically, if your extension doesn't impact the data structure all + methods will inherited without issue except: `to_directed/to_undirected`. + By default these methods create a DiGraph/Graph class and you probably + want them to create your extension of a DiGraph/Graph. To facilitate + this we define two class variables that you can set in your subclass. + + to_directed_class : callable, (default: DiGraph or MultiDiGraph) + Class to create a new graph structure in the `to_directed` method. + If `None`, a NetworkX class (DiGraph or MultiDiGraph) is used. + + to_undirected_class : callable, (default: Graph or MultiGraph) + Class to create a new graph structure in the `to_undirected` method. + If `None`, a NetworkX class (Graph or MultiGraph) is used. + + **Subclassing Example** + + Create a low memory graph class that effectively disallows edge + attributes by using a single attribute dict for all edges. + This reduces the memory used, but you lose edge attributes. + + >>> class ThinGraph(nx.Graph): + ... all_edge_dict = {"weight": 1} + ... + ... def single_edge_dict(self): + ... return self.all_edge_dict + ... + ... edge_attr_dict_factory = single_edge_dict + >>> G = ThinGraph() + >>> G.add_edge(2, 1) + >>> G[2][1] + {'weight': 1} + >>> G.add_edge(2, 2) + >>> G[2][1] is G[2][2] + True + """ + + _adj = _CachedPropertyResetterAdjAndSucc() # type: ignore[assignment] + _succ = _adj # type: ignore[has-type] + _pred = _CachedPropertyResetterPred() + + def __init__(self, incoming_graph_data=None, **attr): + """Initialize a graph with edges, name, or graph attributes. + + Parameters + ---------- + incoming_graph_data : input graph (optional, default: None) + Data to initialize graph. If None (default) an empty + graph is created. The data can be an edge list, or any + NetworkX graph object. If the corresponding optional Python + packages are installed the data can also be a 2D NumPy array, a + SciPy sparse array, or a PyGraphviz graph. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + convert + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G = nx.Graph(name="my graph") + >>> e = [(1, 2), (2, 3), (3, 4)] # list of edges + >>> G = nx.Graph(e) + + Arbitrary graph attribute pairs (key=value) may be assigned + + >>> G = nx.Graph(e, day="Friday") + >>> G.graph + {'day': 'Friday'} + + """ + self.graph = self.graph_attr_dict_factory() # dictionary for graph attributes + self._node = self.node_dict_factory() # dictionary for node attr + # We store two adjacency lists: + # the predecessors of node n are stored in the dict self._pred + # the successors of node n are stored in the dict self._succ=self._adj + self._adj = self.adjlist_outer_dict_factory() # empty adjacency dict successor + self._pred = self.adjlist_outer_dict_factory() # predecessor + # Note: self._succ = self._adj # successor + + # attempt to load graph with data + if incoming_graph_data is not None: + convert.to_networkx_graph(incoming_graph_data, create_using=self) + # load graph attributes (must be after convert) + self.graph.update(attr) + + @cached_property + def adj(self): + """Graph adjacency object holding the neighbors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edge-data-dict. So `G.adj[3][2]['color'] = 'blue'` sets + the color of the edge `(3, 2)` to `"blue"`. + + Iterating over G.adj behaves like a dict. Useful idioms include + `for nbr, datadict in G.adj[n].items():`. + + The neighbor information is also provided by subscripting the graph. + So `for nbr, foovalue in G[node].data('foo', default=1):` works. + + For directed graphs, `G.adj` holds outgoing (successor) info. + """ + return AdjacencyView(self._succ) + + @cached_property + def succ(self): + """Graph adjacency object holding the successors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edge-data-dict. So `G.succ[3][2]['color'] = 'blue'` sets + the color of the edge `(3, 2)` to `"blue"`. + + Iterating over G.succ behaves like a dict. Useful idioms include + `for nbr, datadict in G.succ[n].items():`. A data-view not provided + by dicts also exists: `for nbr, foovalue in G.succ[node].data('foo'):` + and a default can be set via a `default` argument to the `data` method. + + The neighbor information is also provided by subscripting the graph. + So `for nbr, foovalue in G[node].data('foo', default=1):` works. + + For directed graphs, `G.adj` is identical to `G.succ`. + """ + return AdjacencyView(self._succ) + + @cached_property + def pred(self): + """Graph adjacency object holding the predecessors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edge-data-dict. So `G.pred[2][3]['color'] = 'blue'` sets + the color of the edge `(3, 2)` to `"blue"`. + + Iterating over G.pred behaves like a dict. Useful idioms include + `for nbr, datadict in G.pred[n].items():`. A data-view not provided + by dicts also exists: `for nbr, foovalue in G.pred[node].data('foo'):` + A default can be set via a `default` argument to the `data` method. + """ + return AdjacencyView(self._pred) + + def add_node(self, node_for_adding, **attr): + """Add a single node `node_for_adding` and update node attributes. + + Parameters + ---------- + node_for_adding : node + A node can be any hashable Python object except None. + attr : keyword arguments, optional + Set or change node attributes using key=value. + + See Also + -------- + add_nodes_from + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_node(1) + >>> G.add_node("Hello") + >>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)]) + >>> G.add_node(K3) + >>> G.number_of_nodes() + 3 + + Use keywords set/change node attributes: + + >>> G.add_node(1, size=10) + >>> G.add_node(3, weight=0.4, UTM=("13S", 382871, 3972649)) + + Notes + ----- + A hashable object is one that can be used as a key in a Python + dictionary. This includes strings, numbers, tuples of strings + and numbers, etc. + + On many platforms hashable items also include mutables such as + NetworkX Graphs, though one should be careful that the hash + doesn't change on mutables. + """ + if node_for_adding not in self._succ: + if node_for_adding is None: + raise ValueError("None cannot be a node") + self._succ[node_for_adding] = self.adjlist_inner_dict_factory() + self._pred[node_for_adding] = self.adjlist_inner_dict_factory() + attr_dict = self._node[node_for_adding] = self.node_attr_dict_factory() + attr_dict.update(attr) + else: # update attr even if node already exists + self._node[node_for_adding].update(attr) + + def add_nodes_from(self, nodes_for_adding, **attr): + """Add multiple nodes. + + Parameters + ---------- + nodes_for_adding : iterable container + A container of nodes (list, dict, set, etc.). + OR + A container of (node, attribute dict) tuples. + Node attributes are updated using the attribute dict. + attr : keyword arguments, optional (default= no attributes) + Update attributes for all nodes in nodes. + Node attributes specified in nodes as a tuple take + precedence over attributes specified via keyword arguments. + + See Also + -------- + add_node + + Notes + ----- + When adding nodes from an iterator over the graph you are changing, + a `RuntimeError` can be raised with message: + `RuntimeError: dictionary changed size during iteration`. This + happens when the graph's underlying dictionary is modified during + iteration. To avoid this error, evaluate the iterator into a separate + object, e.g. by using `list(iterator_of_nodes)`, and pass this + object to `G.add_nodes_from`. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_nodes_from("Hello") + >>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)]) + >>> G.add_nodes_from(K3) + >>> sorted(G.nodes(), key=str) + [0, 1, 2, 'H', 'e', 'l', 'o'] + + Use keywords to update specific node attributes for every node. + + >>> G.add_nodes_from([1, 2], size=10) + >>> G.add_nodes_from([3, 4], weight=0.4) + + Use (node, attrdict) tuples to update attributes for specific nodes. + + >>> G.add_nodes_from([(1, dict(size=11)), (2, {"color": "blue"})]) + >>> G.nodes[1]["size"] + 11 + >>> H = nx.Graph() + >>> H.add_nodes_from(G.nodes(data=True)) + >>> H.nodes[1]["size"] + 11 + + Evaluate an iterator over a graph if using it to modify the same graph + + >>> G = nx.DiGraph([(0, 1), (1, 2), (3, 4)]) + >>> # wrong way - will raise RuntimeError + >>> # G.add_nodes_from(n + 1 for n in G.nodes) + >>> # correct way + >>> G.add_nodes_from(list(n + 1 for n in G.nodes)) + """ + for n in nodes_for_adding: + try: + newnode = n not in self._node + newdict = attr + except TypeError: + n, ndict = n + newnode = n not in self._node + newdict = attr.copy() + newdict.update(ndict) + if newnode: + if n is None: + raise ValueError("None cannot be a node") + self._succ[n] = self.adjlist_inner_dict_factory() + self._pred[n] = self.adjlist_inner_dict_factory() + self._node[n] = self.node_attr_dict_factory() + self._node[n].update(newdict) + + def remove_node(self, n): + """Remove node n. + + Removes the node n and all adjacent edges. + Attempting to remove a nonexistent node will raise an exception. + + Parameters + ---------- + n : node + A node in the graph + + Raises + ------ + NetworkXError + If n is not in the graph. + + See Also + -------- + remove_nodes_from + + Examples + -------- + >>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> list(G.edges) + [(0, 1), (1, 2)] + >>> G.remove_node(1) + >>> list(G.edges) + [] + + """ + try: + nbrs = self._succ[n] + del self._node[n] + except KeyError as err: # NetworkXError if n not in self + raise NetworkXError(f"The node {n} is not in the digraph.") from err + for u in nbrs: + del self._pred[u][n] # remove all edges n-u in digraph + del self._succ[n] # remove node from succ + for u in self._pred[n]: + del self._succ[u][n] # remove all edges n-u in digraph + del self._pred[n] # remove node from pred + + def remove_nodes_from(self, nodes): + """Remove multiple nodes. + + Parameters + ---------- + nodes : iterable container + A container of nodes (list, dict, set, etc.). If a node + in the container is not in the graph it is silently ignored. + + See Also + -------- + remove_node + + Notes + ----- + When removing nodes from an iterator over the graph you are changing, + a `RuntimeError` will be raised with message: + `RuntimeError: dictionary changed size during iteration`. This + happens when the graph's underlying dictionary is modified during + iteration. To avoid this error, evaluate the iterator into a separate + object, e.g. by using `list(iterator_of_nodes)`, and pass this + object to `G.remove_nodes_from`. + + Examples + -------- + >>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> e = list(G.nodes) + >>> e + [0, 1, 2] + >>> G.remove_nodes_from(e) + >>> list(G.nodes) + [] + + Evaluate an iterator over a graph if using it to modify the same graph + + >>> G = nx.DiGraph([(0, 1), (1, 2), (3, 4)]) + >>> # this command will fail, as the graph's dict is modified during iteration + >>> # G.remove_nodes_from(n for n in G.nodes if n < 2) + >>> # this command will work, since the dictionary underlying graph is not modified + >>> G.remove_nodes_from(list(n for n in G.nodes if n < 2)) + """ + for n in nodes: + try: + succs = self._succ[n] + del self._node[n] + for u in succs: + del self._pred[u][n] # remove all edges n-u in digraph + del self._succ[n] # now remove node + for u in self._pred[n]: + del self._succ[u][n] # remove all edges n-u in digraph + del self._pred[n] # now remove node + except KeyError: + pass # silent failure on remove + + def add_edge(self, u_of_edge, v_of_edge, **attr): + """Add an edge between u and v. + + The nodes u and v will be automatically added if they are + not already in the graph. + + Edge attributes can be specified with keywords or by directly + accessing the edge's attribute dictionary. See examples below. + + Parameters + ---------- + u_of_edge, v_of_edge : nodes + Nodes can be, for example, strings or numbers. + Nodes must be hashable (and not None) Python objects. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + See Also + -------- + add_edges_from : add a collection of edges + + Notes + ----- + Adding an edge that already exists updates the edge data. + + Many NetworkX algorithms designed for weighted graphs use + an edge attribute (by default `weight`) to hold a numerical value. + + Examples + -------- + The following all add the edge e=(1, 2) to graph G: + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> e = (1, 2) + >>> G.add_edge(1, 2) # explicit two-node form + >>> G.add_edge(*e) # single edge as tuple of two nodes + >>> G.add_edges_from([(1, 2)]) # add edges from iterable container + + Associate data to edges using keywords: + + >>> G.add_edge(1, 2, weight=3) + >>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7) + + For non-string attribute keys, use subscript notation. + + >>> G.add_edge(1, 2) + >>> G[1][2].update({0: 5}) + >>> G.edges[1, 2].update({0: 5}) + """ + u, v = u_of_edge, v_of_edge + # add nodes + if u not in self._succ: + if u is None: + raise ValueError("None cannot be a node") + self._succ[u] = self.adjlist_inner_dict_factory() + self._pred[u] = self.adjlist_inner_dict_factory() + self._node[u] = self.node_attr_dict_factory() + if v not in self._succ: + if v is None: + raise ValueError("None cannot be a node") + self._succ[v] = self.adjlist_inner_dict_factory() + self._pred[v] = self.adjlist_inner_dict_factory() + self._node[v] = self.node_attr_dict_factory() + # add the edge + datadict = self._adj[u].get(v, self.edge_attr_dict_factory()) + datadict.update(attr) + self._succ[u][v] = datadict + self._pred[v][u] = datadict + + def add_edges_from(self, ebunch_to_add, **attr): + """Add all the edges in ebunch_to_add. + + Parameters + ---------- + ebunch_to_add : container of edges + Each edge given in the container will be added to the + graph. The edges must be given as 2-tuples (u, v) or + 3-tuples (u, v, d) where d is a dictionary containing edge data. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + See Also + -------- + add_edge : add a single edge + add_weighted_edges_from : convenient way to add weighted edges + + Notes + ----- + Adding the same edge twice has no effect but any edge data + will be updated when each duplicate edge is added. + + Edge attributes specified in an ebunch take precedence over + attributes specified via keyword arguments. + + When adding edges from an iterator over the graph you are changing, + a `RuntimeError` can be raised with message: + `RuntimeError: dictionary changed size during iteration`. This + happens when the graph's underlying dictionary is modified during + iteration. To avoid this error, evaluate the iterator into a separate + object, e.g. by using `list(iterator_of_edges)`, and pass this + object to `G.add_edges_from`. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples + >>> e = zip(range(0, 3), range(1, 4)) + >>> G.add_edges_from(e) # Add the path graph 0-1-2-3 + + Associate data to edges + + >>> G.add_edges_from([(1, 2), (2, 3)], weight=3) + >>> G.add_edges_from([(3, 4), (1, 4)], label="WN2898") + + Evaluate an iterator over a graph if using it to modify the same graph + + >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 4)]) + >>> # Grow graph by one new node, adding edges to all existing nodes. + >>> # wrong way - will raise RuntimeError + >>> # G.add_edges_from(((5, n) for n in G.nodes)) + >>> # right way - note that there will be no self-edge for node 5 + >>> G.add_edges_from(list((5, n) for n in G.nodes)) + """ + for e in ebunch_to_add: + ne = len(e) + if ne == 3: + u, v, dd = e + elif ne == 2: + u, v = e + dd = {} + else: + raise NetworkXError(f"Edge tuple {e} must be a 2-tuple or 3-tuple.") + if u not in self._succ: + if u is None: + raise ValueError("None cannot be a node") + self._succ[u] = self.adjlist_inner_dict_factory() + self._pred[u] = self.adjlist_inner_dict_factory() + self._node[u] = self.node_attr_dict_factory() + if v not in self._succ: + if v is None: + raise ValueError("None cannot be a node") + self._succ[v] = self.adjlist_inner_dict_factory() + self._pred[v] = self.adjlist_inner_dict_factory() + self._node[v] = self.node_attr_dict_factory() + datadict = self._adj[u].get(v, self.edge_attr_dict_factory()) + datadict.update(attr) + datadict.update(dd) + self._succ[u][v] = datadict + self._pred[v][u] = datadict + + def remove_edge(self, u, v): + """Remove the edge between u and v. + + Parameters + ---------- + u, v : nodes + Remove the edge between nodes u and v. + + Raises + ------ + NetworkXError + If there is not an edge between u and v. + + See Also + -------- + remove_edges_from : remove a collection of edges + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, etc + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.remove_edge(0, 1) + >>> e = (1, 2) + >>> G.remove_edge(*e) # unpacks e from an edge tuple + >>> e = (2, 3, {"weight": 7}) # an edge with attribute data + >>> G.remove_edge(*e[:2]) # select first part of edge tuple + """ + try: + del self._succ[u][v] + del self._pred[v][u] + except KeyError as err: + raise NetworkXError(f"The edge {u}-{v} not in graph.") from err + + def remove_edges_from(self, ebunch): + """Remove all edges specified in ebunch. + + Parameters + ---------- + ebunch: list or container of edge tuples + Each edge given in the list or container will be removed + from the graph. The edges can be: + + - 2-tuples (u, v) edge between u and v. + - 3-tuples (u, v, k) where k is ignored. + + See Also + -------- + remove_edge : remove a single edge + + Notes + ----- + Will fail silently if an edge in ebunch is not in the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> ebunch = [(1, 2), (2, 3)] + >>> G.remove_edges_from(ebunch) + """ + for e in ebunch: + u, v = e[:2] # ignore edge data + if u in self._succ and v in self._succ[u]: + del self._succ[u][v] + del self._pred[v][u] + + def has_successor(self, u, v): + """Returns True if node u has successor v. + + This is true if graph has the edge u->v. + """ + return u in self._succ and v in self._succ[u] + + def has_predecessor(self, u, v): + """Returns True if node u has predecessor v. + + This is true if graph has the edge u<-v. + """ + return u in self._pred and v in self._pred[u] + + def successors(self, n): + """Returns an iterator over successor nodes of n. + + A successor of n is a node m such that there exists a directed + edge from n to m. + + Parameters + ---------- + n : node + A node in the graph + + Raises + ------ + NetworkXError + If n is not in the graph. + + See Also + -------- + predecessors + + Notes + ----- + neighbors() and successors() are the same. + """ + try: + return iter(self._succ[n]) + except KeyError as err: + raise NetworkXError(f"The node {n} is not in the digraph.") from err + + # digraph definitions + neighbors = successors + + def predecessors(self, n): + """Returns an iterator over predecessor nodes of n. + + A predecessor of n is a node m such that there exists a directed + edge from m to n. + + Parameters + ---------- + n : node + A node in the graph + + Raises + ------ + NetworkXError + If n is not in the graph. + + See Also + -------- + successors + """ + try: + return iter(self._pred[n]) + except KeyError as err: + raise NetworkXError(f"The node {n} is not in the digraph.") from err + + @cached_property + def edges(self): + """An OutEdgeView of the DiGraph as G.edges or G.edges(). + + edges(self, nbunch=None, data=False, default=None) + + The OutEdgeView provides set-like operations on the edge-tuples + as well as edge attribute lookup. When called, it also provides + an EdgeDataView object which allows control of access to edge + attributes (but does not provide set-like operations). + Hence, `G.edges[u, v]['color']` provides the value of the color + attribute for edge `(u, v)` while + `for (u, v, c) in G.edges.data('color', default='red'):` + iterates through all the edges yielding the color attribute + with default `'red'` if no color attribute exists. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges from these nodes. + data : string or bool, optional (default=False) + The edge attribute returned in 3-tuple (u, v, ddict[data]). + If True, return edge attribute dict in 3-tuple (u, v, ddict). + If False, return 2-tuple (u, v). + default : value, optional (default=None) + Value used for edges that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + edges : OutEdgeView + A view of edge attributes, usually it iterates over (u, v) + or (u, v, d) tuples of edges, but can also be used for + attribute lookup as `edges[u, v]['foo']`. + + See Also + -------- + in_edges, out_edges + + Notes + ----- + Nodes in nbunch that are not in the graph will be (quietly) ignored. + For directed graphs this returns the out-edges. + + Examples + -------- + >>> G = nx.DiGraph() # or MultiDiGraph, etc + >>> nx.add_path(G, [0, 1, 2]) + >>> G.add_edge(2, 3, weight=5) + >>> [e for e in G.edges] + [(0, 1), (1, 2), (2, 3)] + >>> G.edges.data() # default data is {} (empty dict) + OutEdgeDataView([(0, 1, {}), (1, 2, {}), (2, 3, {'weight': 5})]) + >>> G.edges.data("weight", default=1) + OutEdgeDataView([(0, 1, 1), (1, 2, 1), (2, 3, 5)]) + >>> G.edges([0, 2]) # only edges originating from these nodes + OutEdgeDataView([(0, 1), (2, 3)]) + >>> G.edges(0) # only edges from node 0 + OutEdgeDataView([(0, 1)]) + + """ + return OutEdgeView(self) + + # alias out_edges to edges + @cached_property + def out_edges(self): + return OutEdgeView(self) + + out_edges.__doc__ = edges.__doc__ + + @cached_property + def in_edges(self): + """A view of the in edges of the graph as G.in_edges or G.in_edges(). + + in_edges(self, nbunch=None, data=False, default=None): + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + data : string or bool, optional (default=False) + The edge attribute returned in 3-tuple (u, v, ddict[data]). + If True, return edge attribute dict in 3-tuple (u, v, ddict). + If False, return 2-tuple (u, v). + default : value, optional (default=None) + Value used for edges that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + in_edges : InEdgeView or InEdgeDataView + A view of edge attributes, usually it iterates over (u, v) + or (u, v, d) tuples of edges, but can also be used for + attribute lookup as `edges[u, v]['foo']`. + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge(1, 2, color='blue') + >>> G.in_edges() + InEdgeView([(1, 2)]) + >>> G.in_edges(nbunch=2) + InEdgeDataView([(1, 2)]) + + See Also + -------- + edges + """ + return InEdgeView(self) + + @cached_property + def degree(self): + """A DegreeView for the Graph as G.degree or G.degree(). + + The node degree is the number of edges adjacent to the node. + The weighted node degree is the sum of the edge weights for + edges incident to that node. + + This object provides an iterator for (node, degree) as well as + lookup for the degree for a single node. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + DiDegreeView or int + If multiple nodes are requested (the default), returns a `DiDegreeView` + mapping nodes to their degree. + If a single node is requested, returns the degree of the node as an integer. + + See Also + -------- + in_degree, out_degree + + Examples + -------- + >>> G = nx.DiGraph() # or MultiDiGraph + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.degree(0) # node 0 with degree 1 + 1 + >>> list(G.degree([0, 1, 2])) + [(0, 1), (1, 2), (2, 2)] + + """ + return DiDegreeView(self) + + @cached_property + def in_degree(self): + """An InDegreeView for (node, in_degree) or in_degree for single node. + + The node in_degree is the number of edges pointing to the node. + The weighted node degree is the sum of the edge weights for + edges incident to that node. + + This object provides an iteration over (node, in_degree) as well as + lookup for the degree for a single node. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + If a single node is requested + deg : int + In-degree of the node + + OR if multiple nodes are requested + nd_iter : iterator + The iterator returns two-tuples of (node, in-degree). + + See Also + -------- + degree, out_degree + + Examples + -------- + >>> G = nx.DiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.in_degree(0) # node 0 with degree 0 + 0 + >>> list(G.in_degree([0, 1, 2])) + [(0, 0), (1, 1), (2, 1)] + + """ + return InDegreeView(self) + + @cached_property + def out_degree(self): + """An OutDegreeView for (node, out_degree) + + The node out_degree is the number of edges pointing out of the node. + The weighted node degree is the sum of the edge weights for + edges incident to that node. + + This object provides an iterator over (node, out_degree) as well as + lookup for the degree for a single node. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + If a single node is requested + deg : int + Out-degree of the node + + OR if multiple nodes are requested + nd_iter : iterator + The iterator returns two-tuples of (node, out-degree). + + See Also + -------- + degree, in_degree + + Examples + -------- + >>> G = nx.DiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.out_degree(0) # node 0 with degree 1 + 1 + >>> list(G.out_degree([0, 1, 2])) + [(0, 1), (1, 1), (2, 1)] + + """ + return OutDegreeView(self) + + def clear(self): + """Remove all nodes and edges from the graph. + + This also removes the name, and all graph, node, and edge attributes. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.clear() + >>> list(G.nodes) + [] + >>> list(G.edges) + [] + + """ + self._succ.clear() + self._pred.clear() + self._node.clear() + self.graph.clear() + + def clear_edges(self): + """Remove all edges from the graph without altering nodes. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.clear_edges() + >>> list(G.nodes) + [0, 1, 2, 3] + >>> list(G.edges) + [] + + """ + for predecessor_dict in self._pred.values(): + predecessor_dict.clear() + for successor_dict in self._succ.values(): + successor_dict.clear() + + def is_multigraph(self): + """Returns True if graph is a multigraph, False otherwise.""" + return False + + def is_directed(self): + """Returns True if graph is directed, False otherwise.""" + return True + + def to_undirected(self, reciprocal=False, as_view=False): + """Returns an undirected representation of the digraph. + + Parameters + ---------- + reciprocal : bool (optional) + If True only keep edges that appear in both directions + in the original digraph. + as_view : bool (optional, default=False) + If True return an undirected view of the original directed graph. + + Returns + ------- + G : Graph + An undirected graph with the same name and nodes and + with edge (u, v, data) if either (u, v, data) or (v, u, data) + is in the digraph. If both edges exist in digraph and + their edge data is different, only one edge is created + with an arbitrary choice of which edge data to use. + You must check and correct for this manually if desired. + + See Also + -------- + Graph, copy, add_edge, add_edges_from + + Notes + ----- + If edges in both directions (u, v) and (v, u) exist in the + graph, attributes for the new undirected edge will be a combination of + the attributes of the directed edges. The edge data is updated + in the (arbitrary) order that the edges are encountered. For + more customized control of the edge attributes use add_edge(). + + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar G=DiGraph(D) which returns a + shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Warning: If you have subclassed DiGraph to use dict-like objects + in the data structure, those changes do not transfer to the + Graph created by this method. + + Examples + -------- + >>> G = nx.path_graph(2) # or MultiGraph, etc + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1), (1, 0)] + >>> G2 = H.to_undirected() + >>> list(G2.edges) + [(0, 1)] + """ + graph_class = self.to_undirected_class() + if as_view is True: + return nx.graphviews.generic_graph_view(self, graph_class) + # deepcopy when not a view + G = graph_class() + G.graph.update(deepcopy(self.graph)) + G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + if reciprocal is True: + G.add_edges_from( + (u, v, deepcopy(d)) + for u, nbrs in self._adj.items() + for v, d in nbrs.items() + if v in self._pred[u] + ) + else: + G.add_edges_from( + (u, v, deepcopy(d)) + for u, nbrs in self._adj.items() + for v, d in nbrs.items() + ) + return G + + def reverse(self, copy=True): + """Returns the reverse of the graph. + + The reverse is a graph with the same nodes and edges + but with the directions of the edges reversed. + + Parameters + ---------- + copy : bool optional (default=True) + If True, return a new DiGraph holding the reversed edges. + If False, the reverse graph is created using a view of + the original graph. + """ + if copy: + H = self.__class__() + H.graph.update(deepcopy(self.graph)) + H.add_nodes_from((n, deepcopy(d)) for n, d in self.nodes.items()) + H.add_edges_from((v, u, deepcopy(d)) for u, v, d in self.edges(data=True)) + return H + return nx.reverse_view(self) diff --git a/MLPY/Lib/site-packages/networkx/classes/filters.py b/MLPY/Lib/site-packages/networkx/classes/filters.py new file mode 100644 index 0000000000000000000000000000000000000000..aefcbdf3f53fd6a5c2a5c6ad82595a4701136282 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/filters.py @@ -0,0 +1,75 @@ +"""Filter factories to hide or show sets of nodes and edges. + +These filters return the function used when creating `SubGraph`. +""" +__all__ = [ + "no_filter", + "hide_nodes", + "hide_edges", + "hide_multiedges", + "hide_diedges", + "hide_multidiedges", + "show_nodes", + "show_edges", + "show_multiedges", + "show_diedges", + "show_multidiedges", +] + + +def no_filter(*items): + return True + + +def hide_nodes(nodes): + nodes = set(nodes) + return lambda node: node not in nodes + + +def hide_diedges(edges): + edges = {(u, v) for u, v in edges} + return lambda u, v: (u, v) not in edges + + +def hide_edges(edges): + alledges = set(edges) | {(v, u) for (u, v) in edges} + return lambda u, v: (u, v) not in alledges + + +def hide_multidiedges(edges): + edges = {(u, v, k) for u, v, k in edges} + return lambda u, v, k: (u, v, k) not in edges + + +def hide_multiedges(edges): + alledges = set(edges) | {(v, u, k) for (u, v, k) in edges} + return lambda u, v, k: (u, v, k) not in alledges + + +# write show_nodes as a class to make SubGraph pickleable +class show_nodes: + def __init__(self, nodes): + self.nodes = set(nodes) + + def __call__(self, node): + return node in self.nodes + + +def show_diedges(edges): + edges = {(u, v) for u, v in edges} + return lambda u, v: (u, v) in edges + + +def show_edges(edges): + alledges = set(edges) | {(v, u) for (u, v) in edges} + return lambda u, v: (u, v) in alledges + + +def show_multidiedges(edges): + edges = {(u, v, k) for u, v, k in edges} + return lambda u, v, k: (u, v, k) in edges + + +def show_multiedges(edges): + alledges = set(edges) | {(v, u, k) for (u, v, k) in edges} + return lambda u, v, k: (u, v, k) in alledges diff --git a/MLPY/Lib/site-packages/networkx/classes/function.py b/MLPY/Lib/site-packages/networkx/classes/function.py new file mode 100644 index 0000000000000000000000000000000000000000..4890044f45649697aae0f2ff929527f18719f038 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/function.py @@ -0,0 +1,1313 @@ +"""Functional interface to graph methods and assorted utilities. +""" + +from collections import Counter +from itertools import chain + +import networkx as nx +from networkx.utils import not_implemented_for, pairwise + +__all__ = [ + "nodes", + "edges", + "degree", + "degree_histogram", + "neighbors", + "number_of_nodes", + "number_of_edges", + "density", + "is_directed", + "freeze", + "is_frozen", + "subgraph", + "induced_subgraph", + "edge_subgraph", + "restricted_view", + "to_directed", + "to_undirected", + "add_star", + "add_path", + "add_cycle", + "create_empty_copy", + "set_node_attributes", + "get_node_attributes", + "set_edge_attributes", + "get_edge_attributes", + "all_neighbors", + "non_neighbors", + "non_edges", + "common_neighbors", + "is_weighted", + "is_negatively_weighted", + "is_empty", + "selfloop_edges", + "nodes_with_selfloops", + "number_of_selfloops", + "path_weight", + "is_path", +] + + +def nodes(G): + """Returns an iterator over the graph nodes.""" + return G.nodes() + + +def edges(G, nbunch=None): + """Returns an edge view of edges incident to nodes in nbunch. + + Return all edges if nbunch is unspecified or nbunch=None. + + For digraphs, edges=out_edges + """ + return G.edges(nbunch) + + +def degree(G, nbunch=None, weight=None): + """Returns a degree view of single node or of nbunch of nodes. + If nbunch is omitted, then return degrees of *all* nodes. + """ + return G.degree(nbunch, weight) + + +def neighbors(G, n): + """Returns a list of nodes connected to node n.""" + return G.neighbors(n) + + +def number_of_nodes(G): + """Returns the number of nodes in the graph.""" + return G.number_of_nodes() + + +def number_of_edges(G): + """Returns the number of edges in the graph.""" + return G.number_of_edges() + + +def density(G): + r"""Returns the density of a graph. + + The density for undirected graphs is + + .. math:: + + d = \frac{2m}{n(n-1)}, + + and for directed graphs is + + .. math:: + + d = \frac{m}{n(n-1)}, + + where `n` is the number of nodes and `m` is the number of edges in `G`. + + Notes + ----- + The density is 0 for a graph without edges and 1 for a complete graph. + The density of multigraphs can be higher than 1. + + Self loops are counted in the total number of edges so graphs with self + loops can have density higher than 1. + """ + n = number_of_nodes(G) + m = number_of_edges(G) + if m == 0 or n <= 1: + return 0 + d = m / (n * (n - 1)) + if not G.is_directed(): + d *= 2 + return d + + +def degree_histogram(G): + """Returns a list of the frequency of each degree value. + + Parameters + ---------- + G : Networkx graph + A graph + + Returns + ------- + hist : list + A list of frequencies of degrees. + The degree values are the index in the list. + + Notes + ----- + Note: the bins are width one, hence len(list) can be large + (Order(number_of_edges)) + """ + counts = Counter(d for n, d in G.degree()) + return [counts.get(i, 0) for i in range(max(counts) + 1)] + + +def is_directed(G): + """Return True if graph is directed.""" + return G.is_directed() + + +def frozen(*args, **kwargs): + """Dummy method for raising errors when trying to modify frozen graphs""" + raise nx.NetworkXError("Frozen graph can't be modified") + + +def freeze(G): + """Modify graph to prevent further change by adding or removing + nodes or edges. + + Node and edge data can still be modified. + + Parameters + ---------- + G : graph + A NetworkX graph + + Examples + -------- + >>> G = nx.path_graph(4) + >>> G = nx.freeze(G) + >>> try: + ... G.add_edge(4, 5) + ... except nx.NetworkXError as err: + ... print(str(err)) + Frozen graph can't be modified + + Notes + ----- + To "unfreeze" a graph you must make a copy by creating a new graph object: + + >>> graph = nx.path_graph(4) + >>> frozen_graph = nx.freeze(graph) + >>> unfrozen_graph = nx.Graph(frozen_graph) + >>> nx.is_frozen(unfrozen_graph) + False + + See Also + -------- + is_frozen + """ + G.add_node = frozen + G.add_nodes_from = frozen + G.remove_node = frozen + G.remove_nodes_from = frozen + G.add_edge = frozen + G.add_edges_from = frozen + G.add_weighted_edges_from = frozen + G.remove_edge = frozen + G.remove_edges_from = frozen + G.clear = frozen + G.clear_edges = frozen + G.frozen = True + return G + + +def is_frozen(G): + """Returns True if graph is frozen. + + Parameters + ---------- + G : graph + A NetworkX graph + + See Also + -------- + freeze + """ + try: + return G.frozen + except AttributeError: + return False + + +def add_star(G_to_add_to, nodes_for_star, **attr): + """Add a star to Graph G_to_add_to. + + The first node in `nodes_for_star` is the middle of the star. + It is connected to all other nodes. + + Parameters + ---------- + G_to_add_to : graph + A NetworkX graph + nodes_for_star : iterable container + A container of nodes. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to every edge in star. + + See Also + -------- + add_path, add_cycle + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_star(G, [0, 1, 2, 3]) + >>> nx.add_star(G, [10, 11, 12], weight=2) + """ + nlist = iter(nodes_for_star) + try: + v = next(nlist) + except StopIteration: + return + G_to_add_to.add_node(v) + edges = ((v, n) for n in nlist) + G_to_add_to.add_edges_from(edges, **attr) + + +def add_path(G_to_add_to, nodes_for_path, **attr): + """Add a path to the Graph G_to_add_to. + + Parameters + ---------- + G_to_add_to : graph + A NetworkX graph + nodes_for_path : iterable container + A container of nodes. A path will be constructed from + the nodes (in order) and added to the graph. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to every edge in path. + + See Also + -------- + add_star, add_cycle + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> nx.add_path(G, [10, 11, 12], weight=7) + """ + nlist = iter(nodes_for_path) + try: + first_node = next(nlist) + except StopIteration: + return + G_to_add_to.add_node(first_node) + G_to_add_to.add_edges_from(pairwise(chain((first_node,), nlist)), **attr) + + +def add_cycle(G_to_add_to, nodes_for_cycle, **attr): + """Add a cycle to the Graph G_to_add_to. + + Parameters + ---------- + G_to_add_to : graph + A NetworkX graph + nodes_for_cycle: iterable container + A container of nodes. A cycle will be constructed from + the nodes (in order) and added to the graph. + attr : keyword arguments, optional (default= no attributes) + Attributes to add to every edge in cycle. + + See Also + -------- + add_path, add_star + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> nx.add_cycle(G, [0, 1, 2, 3]) + >>> nx.add_cycle(G, [10, 11, 12], weight=7) + """ + nlist = iter(nodes_for_cycle) + try: + first_node = next(nlist) + except StopIteration: + return + G_to_add_to.add_node(first_node) + G_to_add_to.add_edges_from( + pairwise(chain((first_node,), nlist), cyclic=True), **attr + ) + + +def subgraph(G, nbunch): + """Returns the subgraph induced on nodes in nbunch. + + Parameters + ---------- + G : graph + A NetworkX graph + + nbunch : list, iterable + A container of nodes that will be iterated through once (thus + it should be an iterator or be iterable). Each element of the + container should be a valid node type: any hashable type except + None. If nbunch is None, return all edges data in the graph. + Nodes in nbunch that are not in the graph will be (quietly) + ignored. + + Notes + ----- + subgraph(G) calls G.subgraph() + """ + return G.subgraph(nbunch) + + +def induced_subgraph(G, nbunch): + """Returns a SubGraph view of `G` showing only nodes in nbunch. + + The induced subgraph of a graph on a set of nodes N is the + graph with nodes N and edges from G which have both ends in N. + + Parameters + ---------- + G : NetworkX Graph + nbunch : node, container of nodes or None (for all nodes) + + Returns + ------- + subgraph : SubGraph View + A read-only view of the subgraph in `G` induced by the nodes. + Changes to the graph `G` will be reflected in the view. + + Notes + ----- + To create a mutable subgraph with its own copies of nodes + edges and attributes use `subgraph.copy()` or `Graph(subgraph)` + + For an inplace reduction of a graph to a subgraph you can remove nodes: + `G.remove_nodes_from(n in G if n not in set(nbunch))` + + If you are going to compute subgraphs of your subgraphs you could + end up with a chain of views that can be very slow once the chain + has about 15 views in it. If they are all induced subgraphs, you + can short-cut the chain by making them all subgraphs of the original + graph. The graph class method `G.subgraph` does this when `G` is + a subgraph. In contrast, this function allows you to choose to build + chains or not, as you wish. The returned subgraph is a view on `G`. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> H = nx.induced_subgraph(G, [0, 1, 3]) + >>> list(H.edges) + [(0, 1)] + >>> list(H.nodes) + [0, 1, 3] + """ + induced_nodes = nx.filters.show_nodes(G.nbunch_iter(nbunch)) + return nx.subgraph_view(G, filter_node=induced_nodes) + + +def edge_subgraph(G, edges): + """Returns a view of the subgraph induced by the specified edges. + + The induced subgraph contains each edge in `edges` and each + node incident to any of those edges. + + Parameters + ---------- + G : NetworkX Graph + edges : iterable + An iterable of edges. Edges not present in `G` are ignored. + + Returns + ------- + subgraph : SubGraph View + A read-only edge-induced subgraph of `G`. + Changes to `G` are reflected in the view. + + Notes + ----- + To create a mutable subgraph with its own copies of nodes + edges and attributes use `subgraph.copy()` or `Graph(subgraph)` + + If you create a subgraph of a subgraph recursively you can end up + with a chain of subgraphs that becomes very slow with about 15 + nested subgraph views. Luckily the edge_subgraph filter nests + nicely so you can use the original graph as G in this function + to avoid chains. We do not rule out chains programmatically so + that odd cases like an `edge_subgraph` of a `restricted_view` + can be created. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> H = G.edge_subgraph([(0, 1), (3, 4)]) + >>> list(H.nodes) + [0, 1, 3, 4] + >>> list(H.edges) + [(0, 1), (3, 4)] + """ + nxf = nx.filters + edges = set(edges) + nodes = set() + for e in edges: + nodes.update(e[:2]) + induced_nodes = nxf.show_nodes(nodes) + if G.is_multigraph(): + if G.is_directed(): + induced_edges = nxf.show_multidiedges(edges) + else: + induced_edges = nxf.show_multiedges(edges) + else: + if G.is_directed(): + induced_edges = nxf.show_diedges(edges) + else: + induced_edges = nxf.show_edges(edges) + return nx.subgraph_view(G, filter_node=induced_nodes, filter_edge=induced_edges) + + +def restricted_view(G, nodes, edges): + """Returns a view of `G` with hidden nodes and edges. + + The resulting subgraph filters out node `nodes` and edges `edges`. + Filtered out nodes also filter out any of their edges. + + Parameters + ---------- + G : NetworkX Graph + nodes : iterable + An iterable of nodes. Nodes not present in `G` are ignored. + edges : iterable + An iterable of edges. Edges not present in `G` are ignored. + + Returns + ------- + subgraph : SubGraph View + A read-only restricted view of `G` filtering out nodes and edges. + Changes to `G` are reflected in the view. + + Notes + ----- + To create a mutable subgraph with its own copies of nodes + edges and attributes use `subgraph.copy()` or `Graph(subgraph)` + + If you create a subgraph of a subgraph recursively you may end up + with a chain of subgraph views. Such chains can get quite slow + for lengths near 15. To avoid long chains, try to make your subgraph + based on the original graph. We do not rule out chains programmatically + so that odd cases like an `edge_subgraph` of a `restricted_view` + can be created. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> H = nx.restricted_view(G, [0], [(1, 2), (3, 4)]) + >>> list(H.nodes) + [1, 2, 3, 4] + >>> list(H.edges) + [(2, 3)] + """ + nxf = nx.filters + hide_nodes = nxf.hide_nodes(nodes) + if G.is_multigraph(): + if G.is_directed(): + hide_edges = nxf.hide_multidiedges(edges) + else: + hide_edges = nxf.hide_multiedges(edges) + else: + if G.is_directed(): + hide_edges = nxf.hide_diedges(edges) + else: + hide_edges = nxf.hide_edges(edges) + return nx.subgraph_view(G, filter_node=hide_nodes, filter_edge=hide_edges) + + +def to_directed(graph): + """Returns a directed view of the graph `graph`. + + Identical to graph.to_directed(as_view=True) + Note that graph.to_directed defaults to `as_view=False` + while this function always provides a view. + """ + return graph.to_directed(as_view=True) + + +def to_undirected(graph): + """Returns an undirected view of the graph `graph`. + + Identical to graph.to_undirected(as_view=True) + Note that graph.to_undirected defaults to `as_view=False` + while this function always provides a view. + """ + return graph.to_undirected(as_view=True) + + +def create_empty_copy(G, with_data=True): + """Returns a copy of the graph G with all of the edges removed. + + Parameters + ---------- + G : graph + A NetworkX graph + + with_data : bool (default=True) + Propagate Graph and Nodes data to the new graph. + + See Also + -------- + empty_graph + + """ + H = G.__class__() + H.add_nodes_from(G.nodes(data=with_data)) + if with_data: + H.graph.update(G.graph) + return H + + +def set_node_attributes(G, values, name=None): + """Sets node attributes from a given value or dictionary of values. + + .. Warning:: The call order of arguments `values` and `name` + switched between v1.x & v2.x. + + Parameters + ---------- + G : NetworkX Graph + + values : scalar value, dict-like + What the node attribute should be set to. If `values` is + not a dictionary, then it is treated as a single attribute value + that is then applied to every node in `G`. This means that if + you provide a mutable object, like a list, updates to that object + will be reflected in the node attribute for every node. + The attribute name will be `name`. + + If `values` is a dict or a dict of dict, it should be keyed + by node to either an attribute value or a dict of attribute key/value + pairs used to update the node's attributes. + + name : string (optional, default=None) + Name of the node attribute to set if values is a scalar. + + Examples + -------- + After computing some property of the nodes of a graph, you may want + to assign a node attribute to store the value of that property for + each node:: + + >>> G = nx.path_graph(3) + >>> bb = nx.betweenness_centrality(G) + >>> isinstance(bb, dict) + True + >>> nx.set_node_attributes(G, bb, "betweenness") + >>> G.nodes[1]["betweenness"] + 1.0 + + If you provide a list as the second argument, updates to the list + will be reflected in the node attribute for each node:: + + >>> G = nx.path_graph(3) + >>> labels = [] + >>> nx.set_node_attributes(G, labels, "labels") + >>> labels.append("foo") + >>> G.nodes[0]["labels"] + ['foo'] + >>> G.nodes[1]["labels"] + ['foo'] + >>> G.nodes[2]["labels"] + ['foo'] + + If you provide a dictionary of dictionaries as the second argument, + the outer dictionary is assumed to be keyed by node to an inner + dictionary of node attributes for that node:: + + >>> G = nx.path_graph(3) + >>> attrs = {0: {"attr1": 20, "attr2": "nothing"}, 1: {"attr2": 3}} + >>> nx.set_node_attributes(G, attrs) + >>> G.nodes[0]["attr1"] + 20 + >>> G.nodes[0]["attr2"] + 'nothing' + >>> G.nodes[1]["attr2"] + 3 + >>> G.nodes[2] + {} + + Note that if the dictionary contains nodes that are not in `G`, the + values are silently ignored:: + + >>> G = nx.Graph() + >>> G.add_node(0) + >>> nx.set_node_attributes(G, {0: "red", 1: "blue"}, name="color") + >>> G.nodes[0]["color"] + 'red' + >>> 1 in G.nodes + False + + """ + # Set node attributes based on type of `values` + if name is not None: # `values` must not be a dict of dict + try: # `values` is a dict + for n, v in values.items(): + try: + G.nodes[n][name] = values[n] + except KeyError: + pass + except AttributeError: # `values` is a constant + for n in G: + G.nodes[n][name] = values + else: # `values` must be dict of dict + for n, d in values.items(): + try: + G.nodes[n].update(d) + except KeyError: + pass + + +def get_node_attributes(G, name, default=None): + """Get node attributes from graph + + Parameters + ---------- + G : NetworkX Graph + + name : string + Attribute name + + default: object (default=None) + Default value of the node attribute if there is no value set for that + node in graph. If `None` then nodes without this attribute are not + included in the returned dict. + + Returns + ------- + Dictionary of attributes keyed by node. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from([1, 2, 3], color="red") + >>> color = nx.get_node_attributes(G, "color") + >>> color[1] + 'red' + >>> G.add_node(4) + >>> color = nx.get_node_attributes(G, "color", default="yellow") + >>> color[4] + 'yellow' + """ + if default is not None: + return {n: d.get(name, default) for n, d in G.nodes.items()} + return {n: d[name] for n, d in G.nodes.items() if name in d} + + +def set_edge_attributes(G, values, name=None): + """Sets edge attributes from a given value or dictionary of values. + + .. Warning:: The call order of arguments `values` and `name` + switched between v1.x & v2.x. + + Parameters + ---------- + G : NetworkX Graph + + values : scalar value, dict-like + What the edge attribute should be set to. If `values` is + not a dictionary, then it is treated as a single attribute value + that is then applied to every edge in `G`. This means that if + you provide a mutable object, like a list, updates to that object + will be reflected in the edge attribute for each edge. The attribute + name will be `name`. + + If `values` is a dict or a dict of dict, it should be keyed + by edge tuple to either an attribute value or a dict of attribute + key/value pairs used to update the edge's attributes. + For multigraphs, the edge tuples must be of the form ``(u, v, key)``, + where `u` and `v` are nodes and `key` is the edge key. + For non-multigraphs, the keys must be tuples of the form ``(u, v)``. + + name : string (optional, default=None) + Name of the edge attribute to set if values is a scalar. + + Examples + -------- + After computing some property of the edges of a graph, you may want + to assign a edge attribute to store the value of that property for + each edge:: + + >>> G = nx.path_graph(3) + >>> bb = nx.edge_betweenness_centrality(G, normalized=False) + >>> nx.set_edge_attributes(G, bb, "betweenness") + >>> G.edges[1, 2]["betweenness"] + 2.0 + + If you provide a list as the second argument, updates to the list + will be reflected in the edge attribute for each edge:: + + >>> labels = [] + >>> nx.set_edge_attributes(G, labels, "labels") + >>> labels.append("foo") + >>> G.edges[0, 1]["labels"] + ['foo'] + >>> G.edges[1, 2]["labels"] + ['foo'] + + If you provide a dictionary of dictionaries as the second argument, + the entire dictionary will be used to update edge attributes:: + + >>> G = nx.path_graph(3) + >>> attrs = {(0, 1): {"attr1": 20, "attr2": "nothing"}, (1, 2): {"attr2": 3}} + >>> nx.set_edge_attributes(G, attrs) + >>> G[0][1]["attr1"] + 20 + >>> G[0][1]["attr2"] + 'nothing' + >>> G[1][2]["attr2"] + 3 + + The attributes of one Graph can be used to set those of another. + + >>> H = nx.path_graph(3) + >>> nx.set_edge_attributes(H, G.edges) + + Note that if the dict contains edges that are not in `G`, they are + silently ignored:: + + >>> G = nx.Graph([(0, 1)]) + >>> nx.set_edge_attributes(G, {(1, 2): {"weight": 2.0}}) + >>> (1, 2) in G.edges() + False + + For multigraphs, the `values` dict is expected to be keyed by 3-tuples + including the edge key:: + + >>> MG = nx.MultiGraph() + >>> edges = [(0, 1), (0, 1)] + >>> MG.add_edges_from(edges) # Returns list of edge keys + [0, 1] + >>> attributes = {(0, 1, 0): {"cost": 21}, (0, 1, 1): {"cost": 7}} + >>> nx.set_edge_attributes(MG, attributes) + >>> MG[0][1][0]["cost"] + 21 + >>> MG[0][1][1]["cost"] + 7 + + If MultiGraph attributes are desired for a Graph, you must convert the 3-tuple + multiedge to a 2-tuple edge and the last multiedge's attribute value will + overwrite the previous values. Continuing from the previous case we get:: + + >>> H = nx.path_graph([0, 1, 2]) + >>> nx.set_edge_attributes(H, {(u, v): ed for u, v, ed in MG.edges.data()}) + >>> nx.get_edge_attributes(H, "cost") + {(0, 1): 7} + + """ + if name is not None: + # `values` does not contain attribute names + try: + # if `values` is a dict using `.items()` => {edge: value} + if G.is_multigraph(): + for (u, v, key), value in values.items(): + try: + G[u][v][key][name] = value + except KeyError: + pass + else: + for (u, v), value in values.items(): + try: + G[u][v][name] = value + except KeyError: + pass + except AttributeError: + # treat `values` as a constant + for u, v, data in G.edges(data=True): + data[name] = values + else: + # `values` consists of doct-of-dict {edge: {attr: value}} shape + if G.is_multigraph(): + for (u, v, key), d in values.items(): + try: + G[u][v][key].update(d) + except KeyError: + pass + else: + for (u, v), d in values.items(): + try: + G[u][v].update(d) + except KeyError: + pass + + +def get_edge_attributes(G, name, default=None): + """Get edge attributes from graph + + Parameters + ---------- + G : NetworkX Graph + + name : string + Attribute name + + default: object (default=None) + Default value of the edge attribute if there is no value set for that + edge in graph. If `None` then edges without this attribute are not + included in the returned dict. + + Returns + ------- + Dictionary of attributes keyed by edge. For (di)graphs, the keys are + 2-tuples of the form: (u, v). For multi(di)graphs, the keys are 3-tuples of + the form: (u, v, key). + + Examples + -------- + >>> G = nx.Graph() + >>> nx.add_path(G, [1, 2, 3], color="red") + >>> color = nx.get_edge_attributes(G, "color") + >>> color[(1, 2)] + 'red' + >>> G.add_edge(3, 4) + >>> color = nx.get_edge_attributes(G, "color", default="yellow") + >>> color[(3, 4)] + 'yellow' + """ + if G.is_multigraph(): + edges = G.edges(keys=True, data=True) + else: + edges = G.edges(data=True) + if default is not None: + return {x[:-1]: x[-1].get(name, default) for x in edges} + return {x[:-1]: x[-1][name] for x in edges if name in x[-1]} + + +def all_neighbors(graph, node): + """Returns all of the neighbors of a node in the graph. + + If the graph is directed returns predecessors as well as successors. + + Parameters + ---------- + graph : NetworkX graph + Graph to find neighbors. + + node : node + The node whose neighbors will be returned. + + Returns + ------- + neighbors : iterator + Iterator of neighbors + """ + if graph.is_directed(): + values = chain(graph.predecessors(node), graph.successors(node)) + else: + values = graph.neighbors(node) + return values + + +def non_neighbors(graph, node): + """Returns the non-neighbors of the node in the graph. + + Parameters + ---------- + graph : NetworkX graph + Graph to find neighbors. + + node : node + The node whose neighbors will be returned. + + Returns + ------- + non_neighbors : iterator + Iterator of nodes in the graph that are not neighbors of the node. + """ + nbors = set(neighbors(graph, node)) | {node} + return (nnode for nnode in graph if nnode not in nbors) + + +def non_edges(graph): + """Returns the nonexistent edges in the graph. + + Parameters + ---------- + graph : NetworkX graph. + Graph to find nonexistent edges. + + Returns + ------- + non_edges : iterator + Iterator of edges that are not in the graph. + """ + if graph.is_directed(): + for u in graph: + for v in non_neighbors(graph, u): + yield (u, v) + else: + nodes = set(graph) + while nodes: + u = nodes.pop() + for v in nodes - set(graph[u]): + yield (u, v) + + +@not_implemented_for("directed") +def common_neighbors(G, u, v): + """Returns the common neighbors of two nodes in a graph. + + Parameters + ---------- + G : graph + A NetworkX undirected graph. + + u, v : nodes + Nodes in the graph. + + Returns + ------- + cnbors : iterator + Iterator of common neighbors of u and v in the graph. + + Raises + ------ + NetworkXError + If u or v is not a node in the graph. + + Examples + -------- + >>> G = nx.complete_graph(5) + >>> sorted(nx.common_neighbors(G, 0, 1)) + [2, 3, 4] + """ + if u not in G: + raise nx.NetworkXError("u is not in the graph.") + if v not in G: + raise nx.NetworkXError("v is not in the graph.") + + # Return a generator explicitly instead of yielding so that the above + # checks are executed eagerly. + return (w for w in G[u] if w in G[v] and w not in (u, v)) + + +def is_weighted(G, edge=None, weight="weight"): + """Returns True if `G` has weighted edges. + + Parameters + ---------- + G : graph + A NetworkX graph. + + edge : tuple, optional + A 2-tuple specifying the only edge in `G` that will be tested. If + None, then every edge in `G` is tested. + + weight: string, optional + The attribute name used to query for edge weights. + + Returns + ------- + bool + A boolean signifying if `G`, or the specified edge, is weighted. + + Raises + ------ + NetworkXError + If the specified edge does not exist. + + Examples + -------- + >>> G = nx.path_graph(4) + >>> nx.is_weighted(G) + False + >>> nx.is_weighted(G, (2, 3)) + False + + >>> G = nx.DiGraph() + >>> G.add_edge(1, 2, weight=1) + >>> nx.is_weighted(G) + True + + """ + if edge is not None: + data = G.get_edge_data(*edge) + if data is None: + msg = f"Edge {edge!r} does not exist." + raise nx.NetworkXError(msg) + return weight in data + + if is_empty(G): + # Special handling required since: all([]) == True + return False + + return all(weight in data for u, v, data in G.edges(data=True)) + + +def is_negatively_weighted(G, edge=None, weight="weight"): + """Returns True if `G` has negatively weighted edges. + + Parameters + ---------- + G : graph + A NetworkX graph. + + edge : tuple, optional + A 2-tuple specifying the only edge in `G` that will be tested. If + None, then every edge in `G` is tested. + + weight: string, optional + The attribute name used to query for edge weights. + + Returns + ------- + bool + A boolean signifying if `G`, or the specified edge, is negatively + weighted. + + Raises + ------ + NetworkXError + If the specified edge does not exist. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from([(1, 3), (2, 4), (2, 6)]) + >>> G.add_edge(1, 2, weight=4) + >>> nx.is_negatively_weighted(G, (1, 2)) + False + >>> G[2][4]["weight"] = -2 + >>> nx.is_negatively_weighted(G) + True + >>> G = nx.DiGraph() + >>> edges = [("0", "3", 3), ("0", "1", -5), ("1", "0", -2)] + >>> G.add_weighted_edges_from(edges) + >>> nx.is_negatively_weighted(G) + True + + """ + if edge is not None: + data = G.get_edge_data(*edge) + if data is None: + msg = f"Edge {edge!r} does not exist." + raise nx.NetworkXError(msg) + return weight in data and data[weight] < 0 + + return any(weight in data and data[weight] < 0 for u, v, data in G.edges(data=True)) + + +def is_empty(G): + """Returns True if `G` has no edges. + + Parameters + ---------- + G : graph + A NetworkX graph. + + Returns + ------- + bool + True if `G` has no edges, and False otherwise. + + Notes + ----- + An empty graph can have nodes but not edges. The empty graph with zero + nodes is known as the null graph. This is an $O(n)$ operation where n + is the number of nodes in the graph. + + """ + return not any(G.adj.values()) + + +def nodes_with_selfloops(G): + """Returns an iterator over nodes with self loops. + + A node with a self loop has an edge with both ends adjacent + to that node. + + Returns + ------- + nodelist : iterator + A iterator over nodes with self loops. + + See Also + -------- + selfloop_edges, number_of_selfloops + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge(1, 1) + >>> G.add_edge(1, 2) + >>> list(nx.nodes_with_selfloops(G)) + [1] + + """ + return (n for n, nbrs in G.adj.items() if n in nbrs) + + +def selfloop_edges(G, data=False, keys=False, default=None): + """Returns an iterator over selfloop edges. + + A selfloop edge has the same node at both ends. + + Parameters + ---------- + G : graph + A NetworkX graph. + data : string or bool, optional (default=False) + Return selfloop edges as two tuples (u, v) (data=False) + or three-tuples (u, v, datadict) (data=True) + or three-tuples (u, v, datavalue) (data='attrname') + keys : bool, optional (default=False) + If True, return edge keys with each edge. + default : value, optional (default=None) + Value used for edges that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + edgeiter : iterator over edge tuples + An iterator over all selfloop edges. + + See Also + -------- + nodes_with_selfloops, number_of_selfloops + + Examples + -------- + >>> G = nx.MultiGraph() # or Graph, DiGraph, MultiDiGraph, etc + >>> ekey = G.add_edge(1, 1) + >>> ekey = G.add_edge(1, 2) + >>> list(nx.selfloop_edges(G)) + [(1, 1)] + >>> list(nx.selfloop_edges(G, data=True)) + [(1, 1, {})] + >>> list(nx.selfloop_edges(G, keys=True)) + [(1, 1, 0)] + >>> list(nx.selfloop_edges(G, keys=True, data=True)) + [(1, 1, 0, {})] + """ + if data is True: + if G.is_multigraph(): + if keys is True: + return ( + (n, n, k, d) + for n, nbrs in G.adj.items() + if n in nbrs + for k, d in nbrs[n].items() + ) + else: + return ( + (n, n, d) + for n, nbrs in G.adj.items() + if n in nbrs + for d in nbrs[n].values() + ) + else: + return ((n, n, nbrs[n]) for n, nbrs in G.adj.items() if n in nbrs) + elif data is not False: + if G.is_multigraph(): + if keys is True: + return ( + (n, n, k, d.get(data, default)) + for n, nbrs in G.adj.items() + if n in nbrs + for k, d in nbrs[n].items() + ) + else: + return ( + (n, n, d.get(data, default)) + for n, nbrs in G.adj.items() + if n in nbrs + for d in nbrs[n].values() + ) + else: + return ( + (n, n, nbrs[n].get(data, default)) + for n, nbrs in G.adj.items() + if n in nbrs + ) + else: + if G.is_multigraph(): + if keys is True: + return ( + (n, n, k) for n, nbrs in G.adj.items() if n in nbrs for k in nbrs[n] + ) + else: + return ( + (n, n) + for n, nbrs in G.adj.items() + if n in nbrs + for i in range(len(nbrs[n])) # for easy edge removal (#4068) + ) + else: + return ((n, n) for n, nbrs in G.adj.items() if n in nbrs) + + +def number_of_selfloops(G): + """Returns the number of selfloop edges. + + A selfloop edge has the same node at both ends. + + Returns + ------- + nloops : int + The number of selfloops. + + See Also + -------- + nodes_with_selfloops, selfloop_edges + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge(1, 1) + >>> G.add_edge(1, 2) + >>> nx.number_of_selfloops(G) + 1 + """ + return sum(1 for _ in nx.selfloop_edges(G)) + + +def is_path(G, path): + """Returns whether or not the specified path exists. + + For it to return True, every node on the path must exist and + each consecutive pair must be connected via one or more edges. + + Parameters + ---------- + G : graph + A NetworkX graph. + + path : list + A list of nodes which defines the path to traverse + + Returns + ------- + bool + True if `path` is a valid path in `G` + + """ + return all((node in G and nbr in G[node]) for node, nbr in nx.utils.pairwise(path)) + + +def path_weight(G, path, weight): + """Returns total cost associated with specified path and weight + + Parameters + ---------- + G : graph + A NetworkX graph. + + path: list + A list of node labels which defines the path to traverse + + weight: string + A string indicating which edge attribute to use for path cost + + Returns + ------- + cost: int or float + An integer or a float representing the total cost with respect to the + specified weight of the specified path + + Raises + ------ + NetworkXNoPath + If the specified edge does not exist. + """ + multigraph = G.is_multigraph() + cost = 0 + + if not nx.is_path(G, path): + raise nx.NetworkXNoPath("path does not exist") + for node, nbr in nx.utils.pairwise(path): + if multigraph: + cost += min(v[weight] for v in G[node][nbr].values()) + else: + cost += G[node][nbr][weight] + return cost diff --git a/MLPY/Lib/site-packages/networkx/classes/graph.py b/MLPY/Lib/site-packages/networkx/classes/graph.py new file mode 100644 index 0000000000000000000000000000000000000000..5bbf079a47b231fc8a3c79d4f6d545dc27f51327 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/graph.py @@ -0,0 +1,2030 @@ +"""Base class for undirected graphs. + +The Graph class allows any hashable object as a node +and can associate key/value attribute pairs with each undirected edge. + +Self-loops are allowed but multiple edges are not (see MultiGraph). + +For directed graphs see DiGraph and MultiDiGraph. +""" +from copy import deepcopy +from functools import cached_property + +import networkx as nx +from networkx import convert +from networkx.classes.coreviews import AdjacencyView +from networkx.classes.reportviews import DegreeView, EdgeView, NodeView +from networkx.exception import NetworkXError + +__all__ = ["Graph"] + + +class _CachedPropertyResetterAdj: + """Data Descriptor class for _adj that resets ``adj`` cached_property when needed + + This assumes that the ``cached_property`` ``G.adj`` should be reset whenever + ``G._adj`` is set to a new value. + + This object sits on a class and ensures that any instance of that + class clears its cached property "adj" whenever the underlying + instance attribute "_adj" is set to a new object. It only affects + the set process of the obj._adj attribute. All get/del operations + act as they normally would. + + For info on Data Descriptors see: https://docs.python.org/3/howto/descriptor.html + """ + + def __set__(self, obj, value): + od = obj.__dict__ + od["_adj"] = value + if "adj" in od: + del od["adj"] + + +class _CachedPropertyResetterNode: + """Data Descriptor class for _node that resets ``nodes`` cached_property when needed + + This assumes that the ``cached_property`` ``G.node`` should be reset whenever + ``G._node`` is set to a new value. + + This object sits on a class and ensures that any instance of that + class clears its cached property "nodes" whenever the underlying + instance attribute "_node" is set to a new object. It only affects + the set process of the obj._adj attribute. All get/del operations + act as they normally would. + + For info on Data Descriptors see: https://docs.python.org/3/howto/descriptor.html + """ + + def __set__(self, obj, value): + od = obj.__dict__ + od["_node"] = value + if "nodes" in od: + del od["nodes"] + + +class Graph: + """ + Base class for undirected graphs. + + A Graph stores nodes and edges with optional data, or attributes. + + Graphs hold undirected edges. Self loops are allowed but multiple + (parallel) edges are not. + + Nodes can be arbitrary (hashable) Python objects with optional + key/value attributes, except that `None` is not allowed as a node. + + Edges are represented as links between nodes with optional + key/value attributes. + + Parameters + ---------- + incoming_graph_data : input graph (optional, default: None) + Data to initialize graph. If None (default) an empty + graph is created. The data can be any format that is supported + by the to_networkx_graph() function, currently including edge list, + dict of dicts, dict of lists, NetworkX graph, 2D NumPy array, SciPy + sparse matrix, or PyGraphviz graph. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + DiGraph + MultiGraph + MultiDiGraph + + Examples + -------- + Create an empty graph structure (a "null graph") with no nodes and + no edges. + + >>> G = nx.Graph() + + G can be grown in several ways. + + **Nodes:** + + Add one node at a time: + + >>> G.add_node(1) + + Add the nodes from any container (a list, dict, set or + even the lines from a file or the nodes from another graph). + + >>> G.add_nodes_from([2, 3]) + >>> G.add_nodes_from(range(100, 110)) + >>> H = nx.path_graph(10) + >>> G.add_nodes_from(H) + + In addition to strings and integers any hashable Python object + (except None) can represent a node, e.g. a customized node object, + or even another Graph. + + >>> G.add_node(H) + + **Edges:** + + G can also be grown by adding edges. + + Add one edge, + + >>> G.add_edge(1, 2) + + a list of edges, + + >>> G.add_edges_from([(1, 2), (1, 3)]) + + or a collection of edges, + + >>> G.add_edges_from(H.edges) + + If some edges connect nodes not yet in the graph, the nodes + are added automatically. There are no errors when adding + nodes or edges that already exist. + + **Attributes:** + + Each graph, node, and edge can hold key/value attribute pairs + in an associated attribute dictionary (the keys must be hashable). + By default these are empty, but can be added or changed using + add_edge, add_node or direct manipulation of the attribute + dictionaries named graph, node and edge respectively. + + >>> G = nx.Graph(day="Friday") + >>> G.graph + {'day': 'Friday'} + + Add node attributes using add_node(), add_nodes_from() or G.nodes + + >>> G.add_node(1, time="5pm") + >>> G.add_nodes_from([3], time="2pm") + >>> G.nodes[1] + {'time': '5pm'} + >>> G.nodes[1]["room"] = 714 # node must exist already to use G.nodes + >>> del G.nodes[1]["room"] # remove attribute + >>> list(G.nodes(data=True)) + [(1, {'time': '5pm'}), (3, {'time': '2pm'})] + + Add edge attributes using add_edge(), add_edges_from(), subscript + notation, or G.edges. + + >>> G.add_edge(1, 2, weight=4.7) + >>> G.add_edges_from([(3, 4), (4, 5)], color="red") + >>> G.add_edges_from([(1, 2, {"color": "blue"}), (2, 3, {"weight": 8})]) + >>> G[1][2]["weight"] = 4.7 + >>> G.edges[1, 2]["weight"] = 4 + + Warning: we protect the graph data structure by making `G.edges` a + read-only dict-like structure. However, you can assign to attributes + in e.g. `G.edges[1, 2]`. Thus, use 2 sets of brackets to add/change + data attributes: `G.edges[1, 2]['weight'] = 4` + (For multigraphs: `MG.edges[u, v, key][name] = value`). + + **Shortcuts:** + + Many common graph features allow python syntax to speed reporting. + + >>> 1 in G # check if node in graph + True + >>> [n for n in G if n < 3] # iterate through nodes + [1, 2] + >>> len(G) # number of nodes in graph + 5 + + Often the best way to traverse all edges of a graph is via the neighbors. + The neighbors are reported as an adjacency-dict `G.adj` or `G.adjacency()` + + >>> for n, nbrsdict in G.adjacency(): + ... for nbr, eattr in nbrsdict.items(): + ... if "weight" in eattr: + ... # Do something useful with the edges + ... pass + + But the edges() method is often more convenient: + + >>> for u, v, weight in G.edges.data("weight"): + ... if weight is not None: + ... # Do something useful with the edges + ... pass + + **Reporting:** + + Simple graph information is obtained using object-attributes and methods. + Reporting typically provides views instead of containers to reduce memory + usage. The views update as the graph is updated similarly to dict-views. + The objects `nodes`, `edges` and `adj` provide access to data attributes + via lookup (e.g. `nodes[n]`, `edges[u, v]`, `adj[u][v]`) and iteration + (e.g. `nodes.items()`, `nodes.data('color')`, + `nodes.data('color', default='blue')` and similarly for `edges`) + Views exist for `nodes`, `edges`, `neighbors()`/`adj` and `degree`. + + For details on these and other miscellaneous methods, see below. + + **Subclasses (Advanced):** + + The Graph class uses a dict-of-dict-of-dict data structure. + The outer dict (node_dict) holds adjacency information keyed by node. + The next dict (adjlist_dict) represents the adjacency information and holds + edge data keyed by neighbor. The inner dict (edge_attr_dict) represents + the edge data and holds edge attribute values keyed by attribute names. + + Each of these three dicts can be replaced in a subclass by a user defined + dict-like object. In general, the dict-like features should be + maintained but extra features can be added. To replace one of the + dicts create a new graph class by changing the class(!) variable + holding the factory for that dict-like structure. + + node_dict_factory : function, (default: dict) + Factory function to be used to create the dict containing node + attributes, keyed by node id. + It should require no arguments and return a dict-like object + + node_attr_dict_factory: function, (default: dict) + Factory function to be used to create the node attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object + + adjlist_outer_dict_factory : function, (default: dict) + Factory function to be used to create the outer-most dict + in the data structure that holds adjacency info keyed by node. + It should require no arguments and return a dict-like object. + + adjlist_inner_dict_factory : function, (default: dict) + Factory function to be used to create the adjacency list + dict which holds edge data keyed by neighbor. + It should require no arguments and return a dict-like object + + edge_attr_dict_factory : function, (default: dict) + Factory function to be used to create the edge attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + graph_attr_dict_factory : function, (default: dict) + Factory function to be used to create the graph attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + Typically, if your extension doesn't impact the data structure all + methods will inherit without issue except: `to_directed/to_undirected`. + By default these methods create a DiGraph/Graph class and you probably + want them to create your extension of a DiGraph/Graph. To facilitate + this we define two class variables that you can set in your subclass. + + to_directed_class : callable, (default: DiGraph or MultiDiGraph) + Class to create a new graph structure in the `to_directed` method. + If `None`, a NetworkX class (DiGraph or MultiDiGraph) is used. + + to_undirected_class : callable, (default: Graph or MultiGraph) + Class to create a new graph structure in the `to_undirected` method. + If `None`, a NetworkX class (Graph or MultiGraph) is used. + + **Subclassing Example** + + Create a low memory graph class that effectively disallows edge + attributes by using a single attribute dict for all edges. + This reduces the memory used, but you lose edge attributes. + + >>> class ThinGraph(nx.Graph): + ... all_edge_dict = {"weight": 1} + ... + ... def single_edge_dict(self): + ... return self.all_edge_dict + ... + ... edge_attr_dict_factory = single_edge_dict + >>> G = ThinGraph() + >>> G.add_edge(2, 1) + >>> G[2][1] + {'weight': 1} + >>> G.add_edge(2, 2) + >>> G[2][1] is G[2][2] + True + """ + + _adj = _CachedPropertyResetterAdj() + _node = _CachedPropertyResetterNode() + + node_dict_factory = dict + node_attr_dict_factory = dict + adjlist_outer_dict_factory = dict + adjlist_inner_dict_factory = dict + edge_attr_dict_factory = dict + graph_attr_dict_factory = dict + + def to_directed_class(self): + """Returns the class to use for empty directed copies. + + If you subclass the base classes, use this to designate + what directed class to use for `to_directed()` copies. + """ + return nx.DiGraph + + def to_undirected_class(self): + """Returns the class to use for empty undirected copies. + + If you subclass the base classes, use this to designate + what directed class to use for `to_directed()` copies. + """ + return Graph + + def __init__(self, incoming_graph_data=None, **attr): + """Initialize a graph with edges, name, or graph attributes. + + Parameters + ---------- + incoming_graph_data : input graph (optional, default: None) + Data to initialize graph. If None (default) an empty + graph is created. The data can be an edge list, or any + NetworkX graph object. If the corresponding optional Python + packages are installed the data can also be a 2D NumPy array, a + SciPy sparse array, or a PyGraphviz graph. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + convert + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G = nx.Graph(name="my graph") + >>> e = [(1, 2), (2, 3), (3, 4)] # list of edges + >>> G = nx.Graph(e) + + Arbitrary graph attribute pairs (key=value) may be assigned + + >>> G = nx.Graph(e, day="Friday") + >>> G.graph + {'day': 'Friday'} + + """ + self.graph = self.graph_attr_dict_factory() # dictionary for graph attributes + self._node = self.node_dict_factory() # empty node attribute dict + self._adj = self.adjlist_outer_dict_factory() # empty adjacency dict + # attempt to load graph with data + if incoming_graph_data is not None: + convert.to_networkx_graph(incoming_graph_data, create_using=self) + # load graph attributes (must be after convert) + self.graph.update(attr) + + @cached_property + def adj(self): + """Graph adjacency object holding the neighbors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edge-data-dict. So `G.adj[3][2]['color'] = 'blue'` sets + the color of the edge `(3, 2)` to `"blue"`. + + Iterating over G.adj behaves like a dict. Useful idioms include + `for nbr, datadict in G.adj[n].items():`. + + The neighbor information is also provided by subscripting the graph. + So `for nbr, foovalue in G[node].data('foo', default=1):` works. + + For directed graphs, `G.adj` holds outgoing (successor) info. + """ + return AdjacencyView(self._adj) + + @property + def name(self): + """String identifier of the graph. + + This graph attribute appears in the attribute dict G.graph + keyed by the string `"name"`. as well as an attribute (technically + a property) `G.name`. This is entirely user controlled. + """ + return self.graph.get("name", "") + + @name.setter + def name(self, s): + self.graph["name"] = s + + def __str__(self): + """Returns a short summary of the graph. + + Returns + ------- + info : string + Graph information including the graph name (if any), graph type, and the + number of nodes and edges. + + Examples + -------- + >>> G = nx.Graph(name="foo") + >>> str(G) + "Graph named 'foo' with 0 nodes and 0 edges" + + >>> G = nx.path_graph(3) + >>> str(G) + 'Graph with 3 nodes and 2 edges' + + """ + return "".join( + [ + type(self).__name__, + f" named {self.name!r}" if self.name else "", + f" with {self.number_of_nodes()} nodes and {self.number_of_edges()} edges", + ] + ) + + def __iter__(self): + """Iterate over the nodes. Use: 'for n in G'. + + Returns + ------- + niter : iterator + An iterator over all nodes in the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> [n for n in G] + [0, 1, 2, 3] + >>> list(G) + [0, 1, 2, 3] + """ + return iter(self._node) + + def __contains__(self, n): + """Returns True if n is a node, False otherwise. Use: 'n in G'. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> 1 in G + True + """ + try: + return n in self._node + except TypeError: + return False + + def __len__(self): + """Returns the number of nodes in the graph. Use: 'len(G)'. + + Returns + ------- + nnodes : int + The number of nodes in the graph. + + See Also + -------- + number_of_nodes: identical method + order: identical method + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> len(G) + 4 + + """ + return len(self._node) + + def __getitem__(self, n): + """Returns a dict of neighbors of node n. Use: 'G[n]'. + + Parameters + ---------- + n : node + A node in the graph. + + Returns + ------- + adj_dict : dictionary + The adjacency dictionary for nodes connected to n. + + Notes + ----- + G[n] is the same as G.adj[n] and similar to G.neighbors(n) + (which is an iterator over G.adj[n]) + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G[0] + AtlasView({1: {}}) + """ + return self.adj[n] + + def add_node(self, node_for_adding, **attr): + """Add a single node `node_for_adding` and update node attributes. + + Parameters + ---------- + node_for_adding : node + A node can be any hashable Python object except None. + attr : keyword arguments, optional + Set or change node attributes using key=value. + + See Also + -------- + add_nodes_from + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_node(1) + >>> G.add_node("Hello") + >>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)]) + >>> G.add_node(K3) + >>> G.number_of_nodes() + 3 + + Use keywords set/change node attributes: + + >>> G.add_node(1, size=10) + >>> G.add_node(3, weight=0.4, UTM=("13S", 382871, 3972649)) + + Notes + ----- + A hashable object is one that can be used as a key in a Python + dictionary. This includes strings, numbers, tuples of strings + and numbers, etc. + + On many platforms hashable items also include mutables such as + NetworkX Graphs, though one should be careful that the hash + doesn't change on mutables. + """ + if node_for_adding not in self._node: + if node_for_adding is None: + raise ValueError("None cannot be a node") + self._adj[node_for_adding] = self.adjlist_inner_dict_factory() + attr_dict = self._node[node_for_adding] = self.node_attr_dict_factory() + attr_dict.update(attr) + else: # update attr even if node already exists + self._node[node_for_adding].update(attr) + + def add_nodes_from(self, nodes_for_adding, **attr): + """Add multiple nodes. + + Parameters + ---------- + nodes_for_adding : iterable container + A container of nodes (list, dict, set, etc.). + OR + A container of (node, attribute dict) tuples. + Node attributes are updated using the attribute dict. + attr : keyword arguments, optional (default= no attributes) + Update attributes for all nodes in nodes. + Node attributes specified in nodes as a tuple take + precedence over attributes specified via keyword arguments. + + See Also + -------- + add_node + + Notes + ----- + When adding nodes from an iterator over the graph you are changing, + a `RuntimeError` can be raised with message: + `RuntimeError: dictionary changed size during iteration`. This + happens when the graph's underlying dictionary is modified during + iteration. To avoid this error, evaluate the iterator into a separate + object, e.g. by using `list(iterator_of_nodes)`, and pass this + object to `G.add_nodes_from`. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_nodes_from("Hello") + >>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)]) + >>> G.add_nodes_from(K3) + >>> sorted(G.nodes(), key=str) + [0, 1, 2, 'H', 'e', 'l', 'o'] + + Use keywords to update specific node attributes for every node. + + >>> G.add_nodes_from([1, 2], size=10) + >>> G.add_nodes_from([3, 4], weight=0.4) + + Use (node, attrdict) tuples to update attributes for specific nodes. + + >>> G.add_nodes_from([(1, dict(size=11)), (2, {"color": "blue"})]) + >>> G.nodes[1]["size"] + 11 + >>> H = nx.Graph() + >>> H.add_nodes_from(G.nodes(data=True)) + >>> H.nodes[1]["size"] + 11 + + Evaluate an iterator over a graph if using it to modify the same graph + + >>> G = nx.Graph([(0, 1), (1, 2), (3, 4)]) + >>> # wrong way - will raise RuntimeError + >>> # G.add_nodes_from(n + 1 for n in G.nodes) + >>> # correct way + >>> G.add_nodes_from(list(n + 1 for n in G.nodes)) + """ + for n in nodes_for_adding: + try: + newnode = n not in self._node + newdict = attr + except TypeError: + n, ndict = n + newnode = n not in self._node + newdict = attr.copy() + newdict.update(ndict) + if newnode: + if n is None: + raise ValueError("None cannot be a node") + self._adj[n] = self.adjlist_inner_dict_factory() + self._node[n] = self.node_attr_dict_factory() + self._node[n].update(newdict) + + def remove_node(self, n): + """Remove node n. + + Removes the node n and all adjacent edges. + Attempting to remove a nonexistent node will raise an exception. + + Parameters + ---------- + n : node + A node in the graph + + Raises + ------ + NetworkXError + If n is not in the graph. + + See Also + -------- + remove_nodes_from + + Examples + -------- + >>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> list(G.edges) + [(0, 1), (1, 2)] + >>> G.remove_node(1) + >>> list(G.edges) + [] + + """ + adj = self._adj + try: + nbrs = list(adj[n]) # list handles self-loops (allows mutation) + del self._node[n] + except KeyError as err: # NetworkXError if n not in self + raise NetworkXError(f"The node {n} is not in the graph.") from err + for u in nbrs: + del adj[u][n] # remove all edges n-u in graph + del adj[n] # now remove node + + def remove_nodes_from(self, nodes): + """Remove multiple nodes. + + Parameters + ---------- + nodes : iterable container + A container of nodes (list, dict, set, etc.). If a node + in the container is not in the graph it is silently + ignored. + + See Also + -------- + remove_node + + Notes + ----- + When removing nodes from an iterator over the graph you are changing, + a `RuntimeError` will be raised with message: + `RuntimeError: dictionary changed size during iteration`. This + happens when the graph's underlying dictionary is modified during + iteration. To avoid this error, evaluate the iterator into a separate + object, e.g. by using `list(iterator_of_nodes)`, and pass this + object to `G.remove_nodes_from`. + + Examples + -------- + >>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> e = list(G.nodes) + >>> e + [0, 1, 2] + >>> G.remove_nodes_from(e) + >>> list(G.nodes) + [] + + Evaluate an iterator over a graph if using it to modify the same graph + + >>> G = nx.Graph([(0, 1), (1, 2), (3, 4)]) + >>> # this command will fail, as the graph's dict is modified during iteration + >>> # G.remove_nodes_from(n for n in G.nodes if n < 2) + >>> # this command will work, since the dictionary underlying graph is not modified + >>> G.remove_nodes_from(list(n for n in G.nodes if n < 2)) + """ + adj = self._adj + for n in nodes: + try: + del self._node[n] + for u in list(adj[n]): # list handles self-loops + del adj[u][n] # (allows mutation of dict in loop) + del adj[n] + except KeyError: + pass + + @cached_property + def nodes(self): + """A NodeView of the Graph as G.nodes or G.nodes(). + + Can be used as `G.nodes` for data lookup and for set-like operations. + Can also be used as `G.nodes(data='color', default=None)` to return a + NodeDataView which reports specific node data but no set operations. + It presents a dict-like interface as well with `G.nodes.items()` + iterating over `(node, nodedata)` 2-tuples and `G.nodes[3]['foo']` + providing the value of the `foo` attribute for node `3`. In addition, + a view `G.nodes.data('foo')` provides a dict-like interface to the + `foo` attribute of each node. `G.nodes.data('foo', default=1)` + provides a default for nodes that do not have attribute `foo`. + + Parameters + ---------- + data : string or bool, optional (default=False) + The node attribute returned in 2-tuple (n, ddict[data]). + If True, return entire node attribute dict as (n, ddict). + If False, return just the nodes n. + + default : value, optional (default=None) + Value used for nodes that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + NodeView + Allows set-like operations over the nodes as well as node + attribute dict lookup and calling to get a NodeDataView. + A NodeDataView iterates over `(n, data)` and has no set operations. + A NodeView iterates over `n` and includes set operations. + + When called, if data is False, an iterator over nodes. + Otherwise an iterator of 2-tuples (node, attribute value) + where the attribute is specified in `data`. + If data is True then the attribute becomes the + entire data dictionary. + + Notes + ----- + If your node data is not needed, it is simpler and equivalent + to use the expression ``for n in G``, or ``list(G)``. + + Examples + -------- + There are two simple ways of getting a list of all nodes in the graph: + + >>> G = nx.path_graph(3) + >>> list(G.nodes) + [0, 1, 2] + >>> list(G) + [0, 1, 2] + + To get the node data along with the nodes: + + >>> G.add_node(1, time="5pm") + >>> G.nodes[0]["foo"] = "bar" + >>> list(G.nodes(data=True)) + [(0, {'foo': 'bar'}), (1, {'time': '5pm'}), (2, {})] + >>> list(G.nodes.data()) + [(0, {'foo': 'bar'}), (1, {'time': '5pm'}), (2, {})] + + >>> list(G.nodes(data="foo")) + [(0, 'bar'), (1, None), (2, None)] + >>> list(G.nodes.data("foo")) + [(0, 'bar'), (1, None), (2, None)] + + >>> list(G.nodes(data="time")) + [(0, None), (1, '5pm'), (2, None)] + >>> list(G.nodes.data("time")) + [(0, None), (1, '5pm'), (2, None)] + + >>> list(G.nodes(data="time", default="Not Available")) + [(0, 'Not Available'), (1, '5pm'), (2, 'Not Available')] + >>> list(G.nodes.data("time", default="Not Available")) + [(0, 'Not Available'), (1, '5pm'), (2, 'Not Available')] + + If some of your nodes have an attribute and the rest are assumed + to have a default attribute value you can create a dictionary + from node/attribute pairs using the `default` keyword argument + to guarantee the value is never None:: + + >>> G = nx.Graph() + >>> G.add_node(0) + >>> G.add_node(1, weight=2) + >>> G.add_node(2, weight=3) + >>> dict(G.nodes(data="weight", default=1)) + {0: 1, 1: 2, 2: 3} + + """ + return NodeView(self) + + def number_of_nodes(self): + """Returns the number of nodes in the graph. + + Returns + ------- + nnodes : int + The number of nodes in the graph. + + See Also + -------- + order: identical method + __len__: identical method + + Examples + -------- + >>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.number_of_nodes() + 3 + """ + return len(self._node) + + def order(self): + """Returns the number of nodes in the graph. + + Returns + ------- + nnodes : int + The number of nodes in the graph. + + See Also + -------- + number_of_nodes: identical method + __len__: identical method + + Examples + -------- + >>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.order() + 3 + """ + return len(self._node) + + def has_node(self, n): + """Returns True if the graph contains the node n. + + Identical to `n in G` + + Parameters + ---------- + n : node + + Examples + -------- + >>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.has_node(0) + True + + It is more readable and simpler to use + + >>> 0 in G + True + + """ + try: + return n in self._node + except TypeError: + return False + + def add_edge(self, u_of_edge, v_of_edge, **attr): + """Add an edge between u and v. + + The nodes u and v will be automatically added if they are + not already in the graph. + + Edge attributes can be specified with keywords or by directly + accessing the edge's attribute dictionary. See examples below. + + Parameters + ---------- + u_of_edge, v_of_edge : nodes + Nodes can be, for example, strings or numbers. + Nodes must be hashable (and not None) Python objects. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + See Also + -------- + add_edges_from : add a collection of edges + + Notes + ----- + Adding an edge that already exists updates the edge data. + + Many NetworkX algorithms designed for weighted graphs use + an edge attribute (by default `weight`) to hold a numerical value. + + Examples + -------- + The following all add the edge e=(1, 2) to graph G: + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> e = (1, 2) + >>> G.add_edge(1, 2) # explicit two-node form + >>> G.add_edge(*e) # single edge as tuple of two nodes + >>> G.add_edges_from([(1, 2)]) # add edges from iterable container + + Associate data to edges using keywords: + + >>> G.add_edge(1, 2, weight=3) + >>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7) + + For non-string attribute keys, use subscript notation. + + >>> G.add_edge(1, 2) + >>> G[1][2].update({0: 5}) + >>> G.edges[1, 2].update({0: 5}) + """ + u, v = u_of_edge, v_of_edge + # add nodes + if u not in self._node: + if u is None: + raise ValueError("None cannot be a node") + self._adj[u] = self.adjlist_inner_dict_factory() + self._node[u] = self.node_attr_dict_factory() + if v not in self._node: + if v is None: + raise ValueError("None cannot be a node") + self._adj[v] = self.adjlist_inner_dict_factory() + self._node[v] = self.node_attr_dict_factory() + # add the edge + datadict = self._adj[u].get(v, self.edge_attr_dict_factory()) + datadict.update(attr) + self._adj[u][v] = datadict + self._adj[v][u] = datadict + + def add_edges_from(self, ebunch_to_add, **attr): + """Add all the edges in ebunch_to_add. + + Parameters + ---------- + ebunch_to_add : container of edges + Each edge given in the container will be added to the + graph. The edges must be given as 2-tuples (u, v) or + 3-tuples (u, v, d) where d is a dictionary containing edge data. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + See Also + -------- + add_edge : add a single edge + add_weighted_edges_from : convenient way to add weighted edges + + Notes + ----- + Adding the same edge twice has no effect but any edge data + will be updated when each duplicate edge is added. + + Edge attributes specified in an ebunch take precedence over + attributes specified via keyword arguments. + + When adding edges from an iterator over the graph you are changing, + a `RuntimeError` can be raised with message: + `RuntimeError: dictionary changed size during iteration`. This + happens when the graph's underlying dictionary is modified during + iteration. To avoid this error, evaluate the iterator into a separate + object, e.g. by using `list(iterator_of_edges)`, and pass this + object to `G.add_edges_from`. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples + >>> e = zip(range(0, 3), range(1, 4)) + >>> G.add_edges_from(e) # Add the path graph 0-1-2-3 + + Associate data to edges + + >>> G.add_edges_from([(1, 2), (2, 3)], weight=3) + >>> G.add_edges_from([(3, 4), (1, 4)], label="WN2898") + + Evaluate an iterator over a graph if using it to modify the same graph + + >>> G = nx.Graph([(1, 2), (2, 3), (3, 4)]) + >>> # Grow graph by one new node, adding edges to all existing nodes. + >>> # wrong way - will raise RuntimeError + >>> # G.add_edges_from(((5, n) for n in G.nodes)) + >>> # correct way - note that there will be no self-edge for node 5 + >>> G.add_edges_from(list((5, n) for n in G.nodes)) + """ + for e in ebunch_to_add: + ne = len(e) + if ne == 3: + u, v, dd = e + elif ne == 2: + u, v = e + dd = {} # doesn't need edge_attr_dict_factory + else: + raise NetworkXError(f"Edge tuple {e} must be a 2-tuple or 3-tuple.") + if u not in self._node: + if u is None: + raise ValueError("None cannot be a node") + self._adj[u] = self.adjlist_inner_dict_factory() + self._node[u] = self.node_attr_dict_factory() + if v not in self._node: + if v is None: + raise ValueError("None cannot be a node") + self._adj[v] = self.adjlist_inner_dict_factory() + self._node[v] = self.node_attr_dict_factory() + datadict = self._adj[u].get(v, self.edge_attr_dict_factory()) + datadict.update(attr) + datadict.update(dd) + self._adj[u][v] = datadict + self._adj[v][u] = datadict + + def add_weighted_edges_from(self, ebunch_to_add, weight="weight", **attr): + """Add weighted edges in `ebunch_to_add` with specified weight attr + + Parameters + ---------- + ebunch_to_add : container of edges + Each edge given in the list or container will be added + to the graph. The edges must be given as 3-tuples (u, v, w) + where w is a number. + weight : string, optional (default= 'weight') + The attribute name for the edge weights to be added. + attr : keyword arguments, optional (default= no attributes) + Edge attributes to add/update for all edges. + + See Also + -------- + add_edge : add a single edge + add_edges_from : add multiple edges + + Notes + ----- + Adding the same edge twice for Graph/DiGraph simply updates + the edge data. For MultiGraph/MultiDiGraph, duplicate edges + are stored. + + When adding edges from an iterator over the graph you are changing, + a `RuntimeError` can be raised with message: + `RuntimeError: dictionary changed size during iteration`. This + happens when the graph's underlying dictionary is modified during + iteration. To avoid this error, evaluate the iterator into a separate + object, e.g. by using `list(iterator_of_edges)`, and pass this + object to `G.add_weighted_edges_from`. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_weighted_edges_from([(0, 1, 3.0), (1, 2, 7.5)]) + + Evaluate an iterator over edges before passing it + + >>> G = nx.Graph([(1, 2), (2, 3), (3, 4)]) + >>> weight = 0.1 + >>> # Grow graph by one new node, adding edges to all existing nodes. + >>> # wrong way - will raise RuntimeError + >>> # G.add_weighted_edges_from(((5, n, weight) for n in G.nodes)) + >>> # correct way - note that there will be no self-edge for node 5 + >>> G.add_weighted_edges_from(list((5, n, weight) for n in G.nodes)) + """ + self.add_edges_from(((u, v, {weight: d}) for u, v, d in ebunch_to_add), **attr) + + def remove_edge(self, u, v): + """Remove the edge between u and v. + + Parameters + ---------- + u, v : nodes + Remove the edge between nodes u and v. + + Raises + ------ + NetworkXError + If there is not an edge between u and v. + + See Also + -------- + remove_edges_from : remove a collection of edges + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, etc + >>> G.remove_edge(0, 1) + >>> e = (1, 2) + >>> G.remove_edge(*e) # unpacks e from an edge tuple + >>> e = (2, 3, {"weight": 7}) # an edge with attribute data + >>> G.remove_edge(*e[:2]) # select first part of edge tuple + """ + try: + del self._adj[u][v] + if u != v: # self-loop needs only one entry removed + del self._adj[v][u] + except KeyError as err: + raise NetworkXError(f"The edge {u}-{v} is not in the graph") from err + + def remove_edges_from(self, ebunch): + """Remove all edges specified in ebunch. + + Parameters + ---------- + ebunch: list or container of edge tuples + Each edge given in the list or container will be removed + from the graph. The edges can be: + + - 2-tuples (u, v) edge between u and v. + - 3-tuples (u, v, k) where k is ignored. + + See Also + -------- + remove_edge : remove a single edge + + Notes + ----- + Will fail silently if an edge in ebunch is not in the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> ebunch = [(1, 2), (2, 3)] + >>> G.remove_edges_from(ebunch) + """ + adj = self._adj + for e in ebunch: + u, v = e[:2] # ignore edge data if present + if u in adj and v in adj[u]: + del adj[u][v] + if u != v: # self loop needs only one entry removed + del adj[v][u] + + def update(self, edges=None, nodes=None): + """Update the graph using nodes/edges/graphs as input. + + Like dict.update, this method takes a graph as input, adding the + graph's nodes and edges to this graph. It can also take two inputs: + edges and nodes. Finally it can take either edges or nodes. + To specify only nodes the keyword `nodes` must be used. + + The collections of edges and nodes are treated similarly to + the add_edges_from/add_nodes_from methods. When iterated, they + should yield 2-tuples (u, v) or 3-tuples (u, v, datadict). + + Parameters + ---------- + edges : Graph object, collection of edges, or None + The first parameter can be a graph or some edges. If it has + attributes `nodes` and `edges`, then it is taken to be a + Graph-like object and those attributes are used as collections + of nodes and edges to be added to the graph. + If the first parameter does not have those attributes, it is + treated as a collection of edges and added to the graph. + If the first argument is None, no edges are added. + nodes : collection of nodes, or None + The second parameter is treated as a collection of nodes + to be added to the graph unless it is None. + If `edges is None` and `nodes is None` an exception is raised. + If the first parameter is a Graph, then `nodes` is ignored. + + Examples + -------- + >>> G = nx.path_graph(5) + >>> G.update(nx.complete_graph(range(4, 10))) + >>> from itertools import combinations + >>> edges = ( + ... (u, v, {"power": u * v}) + ... for u, v in combinations(range(10, 20), 2) + ... if u * v < 225 + ... ) + >>> nodes = [1000] # for singleton, use a container + >>> G.update(edges, nodes) + + Notes + ----- + It you want to update the graph using an adjacency structure + it is straightforward to obtain the edges/nodes from adjacency. + The following examples provide common cases, your adjacency may + be slightly different and require tweaks of these examples:: + + >>> # dict-of-set/list/tuple + >>> adj = {1: {2, 3}, 2: {1, 3}, 3: {1, 2}} + >>> e = [(u, v) for u, nbrs in adj.items() for v in nbrs] + >>> G.update(edges=e, nodes=adj) + + >>> DG = nx.DiGraph() + >>> # dict-of-dict-of-attribute + >>> adj = {1: {2: 1.3, 3: 0.7}, 2: {1: 1.4}, 3: {1: 0.7}} + >>> e = [ + ... (u, v, {"weight": d}) + ... for u, nbrs in adj.items() + ... for v, d in nbrs.items() + ... ] + >>> DG.update(edges=e, nodes=adj) + + >>> # dict-of-dict-of-dict + >>> adj = {1: {2: {"weight": 1.3}, 3: {"color": 0.7, "weight": 1.2}}} + >>> e = [ + ... (u, v, {"weight": d}) + ... for u, nbrs in adj.items() + ... for v, d in nbrs.items() + ... ] + >>> DG.update(edges=e, nodes=adj) + + >>> # predecessor adjacency (dict-of-set) + >>> pred = {1: {2, 3}, 2: {3}, 3: {3}} + >>> e = [(v, u) for u, nbrs in pred.items() for v in nbrs] + + >>> # MultiGraph dict-of-dict-of-dict-of-attribute + >>> MDG = nx.MultiDiGraph() + >>> adj = { + ... 1: {2: {0: {"weight": 1.3}, 1: {"weight": 1.2}}}, + ... 3: {2: {0: {"weight": 0.7}}}, + ... } + >>> e = [ + ... (u, v, ekey, d) + ... for u, nbrs in adj.items() + ... for v, keydict in nbrs.items() + ... for ekey, d in keydict.items() + ... ] + >>> MDG.update(edges=e) + + See Also + -------- + add_edges_from: add multiple edges to a graph + add_nodes_from: add multiple nodes to a graph + """ + if edges is not None: + if nodes is not None: + self.add_nodes_from(nodes) + self.add_edges_from(edges) + else: + # check if edges is a Graph object + try: + graph_nodes = edges.nodes + graph_edges = edges.edges + except AttributeError: + # edge not Graph-like + self.add_edges_from(edges) + else: # edges is Graph-like + self.add_nodes_from(graph_nodes.data()) + self.add_edges_from(graph_edges.data()) + self.graph.update(edges.graph) + elif nodes is not None: + self.add_nodes_from(nodes) + else: + raise NetworkXError("update needs nodes or edges input") + + def has_edge(self, u, v): + """Returns True if the edge (u, v) is in the graph. + + This is the same as `v in G[u]` without KeyError exceptions. + + Parameters + ---------- + u, v : nodes + Nodes can be, for example, strings or numbers. + Nodes must be hashable (and not None) Python objects. + + Returns + ------- + edge_ind : bool + True if edge is in the graph, False otherwise. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.has_edge(0, 1) # using two nodes + True + >>> e = (0, 1) + >>> G.has_edge(*e) # e is a 2-tuple (u, v) + True + >>> e = (0, 1, {"weight": 7}) + >>> G.has_edge(*e[:2]) # e is a 3-tuple (u, v, data_dictionary) + True + + The following syntax are equivalent: + + >>> G.has_edge(0, 1) + True + >>> 1 in G[0] # though this gives KeyError if 0 not in G + True + + """ + try: + return v in self._adj[u] + except KeyError: + return False + + def neighbors(self, n): + """Returns an iterator over all neighbors of node n. + + This is identical to `iter(G[n])` + + Parameters + ---------- + n : node + A node in the graph + + Returns + ------- + neighbors : iterator + An iterator over all neighbors of node n + + Raises + ------ + NetworkXError + If the node n is not in the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> [n for n in G.neighbors(0)] + [1] + + Notes + ----- + Alternate ways to access the neighbors are ``G.adj[n]`` or ``G[n]``: + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge("a", "b", weight=7) + >>> G["a"] + AtlasView({'b': {'weight': 7}}) + >>> G = nx.path_graph(4) + >>> [n for n in G[0]] + [1] + """ + try: + return iter(self._adj[n]) + except KeyError as err: + raise NetworkXError(f"The node {n} is not in the graph.") from err + + @cached_property + def edges(self): + """An EdgeView of the Graph as G.edges or G.edges(). + + edges(self, nbunch=None, data=False, default=None) + + The EdgeView provides set-like operations on the edge-tuples + as well as edge attribute lookup. When called, it also provides + an EdgeDataView object which allows control of access to edge + attributes (but does not provide set-like operations). + Hence, `G.edges[u, v]['color']` provides the value of the color + attribute for edge `(u, v)` while + `for (u, v, c) in G.edges.data('color', default='red'):` + iterates through all the edges yielding the color attribute + with default `'red'` if no color attribute exists. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges from these nodes. + data : string or bool, optional (default=False) + The edge attribute returned in 3-tuple (u, v, ddict[data]). + If True, return edge attribute dict in 3-tuple (u, v, ddict). + If False, return 2-tuple (u, v). + default : value, optional (default=None) + Value used for edges that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + edges : EdgeView + A view of edge attributes, usually it iterates over (u, v) + or (u, v, d) tuples of edges, but can also be used for + attribute lookup as `edges[u, v]['foo']`. + + Notes + ----- + Nodes in nbunch that are not in the graph will be (quietly) ignored. + For directed graphs this returns the out-edges. + + Examples + -------- + >>> G = nx.path_graph(3) # or MultiGraph, etc + >>> G.add_edge(2, 3, weight=5) + >>> [e for e in G.edges] + [(0, 1), (1, 2), (2, 3)] + >>> G.edges.data() # default data is {} (empty dict) + EdgeDataView([(0, 1, {}), (1, 2, {}), (2, 3, {'weight': 5})]) + >>> G.edges.data("weight", default=1) + EdgeDataView([(0, 1, 1), (1, 2, 1), (2, 3, 5)]) + >>> G.edges([0, 3]) # only edges from these nodes + EdgeDataView([(0, 1), (3, 2)]) + >>> G.edges(0) # only edges from node 0 + EdgeDataView([(0, 1)]) + """ + return EdgeView(self) + + def get_edge_data(self, u, v, default=None): + """Returns the attribute dictionary associated with edge (u, v). + + This is identical to `G[u][v]` except the default is returned + instead of an exception if the edge doesn't exist. + + Parameters + ---------- + u, v : nodes + default: any Python object (default=None) + Value to return if the edge (u, v) is not found. + + Returns + ------- + edge_dict : dictionary + The edge attribute dictionary. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G[0][1] + {} + + Warning: Assigning to `G[u][v]` is not permitted. + But it is safe to assign attributes `G[u][v]['foo']` + + >>> G[0][1]["weight"] = 7 + >>> G[0][1]["weight"] + 7 + >>> G[1][0]["weight"] + 7 + + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.get_edge_data(0, 1) # default edge data is {} + {} + >>> e = (0, 1) + >>> G.get_edge_data(*e) # tuple form + {} + >>> G.get_edge_data("a", "b", default=0) # edge not in graph, return 0 + 0 + """ + try: + return self._adj[u][v] + except KeyError: + return default + + def adjacency(self): + """Returns an iterator over (node, adjacency dict) tuples for all nodes. + + For directed graphs, only outgoing neighbors/adjacencies are included. + + Returns + ------- + adj_iter : iterator + An iterator over (node, adjacency dictionary) for all nodes in + the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> [(n, nbrdict) for n, nbrdict in G.adjacency()] + [(0, {1: {}}), (1, {0: {}, 2: {}}), (2, {1: {}, 3: {}}), (3, {2: {}})] + + """ + return iter(self._adj.items()) + + @cached_property + def degree(self): + """A DegreeView for the Graph as G.degree or G.degree(). + + The node degree is the number of edges adjacent to the node. + The weighted node degree is the sum of the edge weights for + edges incident to that node. + + This object provides an iterator for (node, degree) as well as + lookup for the degree for a single node. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + DegreeView or int + If multiple nodes are requested (the default), returns a `DegreeView` + mapping nodes to their degree. + If a single node is requested, returns the degree of the node as an integer. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.degree[0] # node 0 has degree 1 + 1 + >>> list(G.degree([0, 1, 2])) + [(0, 1), (1, 2), (2, 2)] + """ + return DegreeView(self) + + def clear(self): + """Remove all nodes and edges from the graph. + + This also removes the name, and all graph, node, and edge attributes. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.clear() + >>> list(G.nodes) + [] + >>> list(G.edges) + [] + + """ + self._adj.clear() + self._node.clear() + self.graph.clear() + + def clear_edges(self): + """Remove all edges from the graph without altering nodes. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.clear_edges() + >>> list(G.nodes) + [0, 1, 2, 3] + >>> list(G.edges) + [] + """ + for neighbours_dict in self._adj.values(): + neighbours_dict.clear() + + def is_multigraph(self): + """Returns True if graph is a multigraph, False otherwise.""" + return False + + def is_directed(self): + """Returns True if graph is directed, False otherwise.""" + return False + + def copy(self, as_view=False): + """Returns a copy of the graph. + + The copy method by default returns an independent shallow copy + of the graph and attributes. That is, if an attribute is a + container, that container is shared by the original an the copy. + Use Python's `copy.deepcopy` for new containers. + + If `as_view` is True then a view is returned instead of a copy. + + Notes + ----- + All copies reproduce the graph structure, but data attributes + may be handled in different ways. There are four types of copies + of a graph that people might want. + + Deepcopy -- A "deepcopy" copies the graph structure as well as + all data attributes and any objects they might contain. + The entire graph object is new so that changes in the copy + do not affect the original object. (see Python's copy.deepcopy) + + Data Reference (Shallow) -- For a shallow copy the graph structure + is copied but the edge, node and graph attribute dicts are + references to those in the original graph. This saves + time and memory but could cause confusion if you change an attribute + in one graph and it changes the attribute in the other. + NetworkX does not provide this level of shallow copy. + + Independent Shallow -- This copy creates new independent attribute + dicts and then does a shallow copy of the attributes. That is, any + attributes that are containers are shared between the new graph + and the original. This is exactly what `dict.copy()` provides. + You can obtain this style copy using: + + >>> G = nx.path_graph(5) + >>> H = G.copy() + >>> H = G.copy(as_view=False) + >>> H = nx.Graph(G) + >>> H = G.__class__(G) + + Fresh Data -- For fresh data, the graph structure is copied while + new empty data attribute dicts are created. The resulting graph + is independent of the original and it has no edge, node or graph + attributes. Fresh copies are not enabled. Instead use: + + >>> H = G.__class__() + >>> H.add_nodes_from(G) + >>> H.add_edges_from(G.edges) + + View -- Inspired by dict-views, graph-views act like read-only + versions of the original graph, providing a copy of the original + structure without requiring any memory for copying the information. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Parameters + ---------- + as_view : bool, optional (default=False) + If True, the returned graph-view provides a read-only view + of the original graph without actually copying any data. + + Returns + ------- + G : Graph + A copy of the graph. + + See Also + -------- + to_directed: return a directed copy of the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> H = G.copy() + + """ + if as_view is True: + return nx.graphviews.generic_graph_view(self) + G = self.__class__() + G.graph.update(self.graph) + G.add_nodes_from((n, d.copy()) for n, d in self._node.items()) + G.add_edges_from( + (u, v, datadict.copy()) + for u, nbrs in self._adj.items() + for v, datadict in nbrs.items() + ) + return G + + def to_directed(self, as_view=False): + """Returns a directed representation of the graph. + + Returns + ------- + G : DiGraph + A directed graph with the same name, same nodes, and with + each edge (u, v, data) replaced by two directed edges + (u, v, data) and (v, u, data). + + Notes + ----- + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar D=DiGraph(G) which returns a + shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Warning: If you have subclassed Graph to use dict-like objects + in the data structure, those changes do not transfer to the + DiGraph created by this method. + + Examples + -------- + >>> G = nx.Graph() # or MultiGraph, etc + >>> G.add_edge(0, 1) + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1), (1, 0)] + + If already directed, return a (deep) copy + + >>> G = nx.DiGraph() # or MultiDiGraph, etc + >>> G.add_edge(0, 1) + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1)] + """ + graph_class = self.to_directed_class() + if as_view is True: + return nx.graphviews.generic_graph_view(self, graph_class) + # deepcopy when not a view + G = graph_class() + G.graph.update(deepcopy(self.graph)) + G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + G.add_edges_from( + (u, v, deepcopy(data)) + for u, nbrs in self._adj.items() + for v, data in nbrs.items() + ) + return G + + def to_undirected(self, as_view=False): + """Returns an undirected copy of the graph. + + Parameters + ---------- + as_view : bool (optional, default=False) + If True return a view of the original undirected graph. + + Returns + ------- + G : Graph/MultiGraph + A deepcopy of the graph. + + See Also + -------- + Graph, copy, add_edge, add_edges_from + + Notes + ----- + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar `G = nx.DiGraph(D)` which returns a + shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Warning: If you have subclassed DiGraph to use dict-like objects + in the data structure, those changes do not transfer to the + Graph created by this method. + + Examples + -------- + >>> G = nx.path_graph(2) # or MultiGraph, etc + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1), (1, 0)] + >>> G2 = H.to_undirected() + >>> list(G2.edges) + [(0, 1)] + """ + graph_class = self.to_undirected_class() + if as_view is True: + return nx.graphviews.generic_graph_view(self, graph_class) + # deepcopy when not a view + G = graph_class() + G.graph.update(deepcopy(self.graph)) + G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + G.add_edges_from( + (u, v, deepcopy(d)) + for u, nbrs in self._adj.items() + for v, d in nbrs.items() + ) + return G + + def subgraph(self, nodes): + """Returns a SubGraph view of the subgraph induced on `nodes`. + + The induced subgraph of the graph contains the nodes in `nodes` + and the edges between those nodes. + + Parameters + ---------- + nodes : list, iterable + A container of nodes which will be iterated through once. + + Returns + ------- + G : SubGraph View + A subgraph view of the graph. The graph structure cannot be + changed but node/edge attributes can and are shared with the + original graph. + + Notes + ----- + The graph, edge and node attributes are shared with the original graph. + Changes to the graph structure is ruled out by the view, but changes + to attributes are reflected in the original graph. + + To create a subgraph with its own copy of the edge/node attributes use: + G.subgraph(nodes).copy() + + For an inplace reduction of a graph to a subgraph you can remove nodes: + G.remove_nodes_from([n for n in G if n not in set(nodes)]) + + Subgraph views are sometimes NOT what you want. In most cases where + you want to do more than simply look at the induced edges, it makes + more sense to just create the subgraph as its own graph with code like: + + :: + + # Create a subgraph SG based on a (possibly multigraph) G + SG = G.__class__() + SG.add_nodes_from((n, G.nodes[n]) for n in largest_wcc) + if SG.is_multigraph(): + SG.add_edges_from((n, nbr, key, d) + for n, nbrs in G.adj.items() if n in largest_wcc + for nbr, keydict in nbrs.items() if nbr in largest_wcc + for key, d in keydict.items()) + else: + SG.add_edges_from((n, nbr, d) + for n, nbrs in G.adj.items() if n in largest_wcc + for nbr, d in nbrs.items() if nbr in largest_wcc) + SG.graph.update(G.graph) + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> H = G.subgraph([0, 1, 2]) + >>> list(H.edges) + [(0, 1), (1, 2)] + """ + induced_nodes = nx.filters.show_nodes(self.nbunch_iter(nodes)) + # if already a subgraph, don't make a chain + subgraph = nx.subgraph_view + if hasattr(self, "_NODE_OK"): + return subgraph( + self._graph, filter_node=induced_nodes, filter_edge=self._EDGE_OK + ) + return subgraph(self, filter_node=induced_nodes) + + def edge_subgraph(self, edges): + """Returns the subgraph induced by the specified edges. + + The induced subgraph contains each edge in `edges` and each + node incident to any one of those edges. + + Parameters + ---------- + edges : iterable + An iterable of edges in this graph. + + Returns + ------- + G : Graph + An edge-induced subgraph of this graph with the same edge + attributes. + + Notes + ----- + The graph, edge, and node attributes in the returned subgraph + view are references to the corresponding attributes in the original + graph. The view is read-only. + + To create a full graph version of the subgraph with its own copy + of the edge or node attributes, use:: + + G.edge_subgraph(edges).copy() + + Examples + -------- + >>> G = nx.path_graph(5) + >>> H = G.edge_subgraph([(0, 1), (3, 4)]) + >>> list(H.nodes) + [0, 1, 3, 4] + >>> list(H.edges) + [(0, 1), (3, 4)] + + """ + return nx.edge_subgraph(self, edges) + + def size(self, weight=None): + """Returns the number of edges or total of all edge weights. + + Parameters + ---------- + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + + Returns + ------- + size : numeric + The number of edges or + (if weight keyword is provided) the total weight sum. + + If weight is None, returns an int. Otherwise a float + (or more general numeric if the weights are more general). + + See Also + -------- + number_of_edges + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.size() + 3 + + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edge("a", "b", weight=2) + >>> G.add_edge("b", "c", weight=4) + >>> G.size() + 2 + >>> G.size(weight="weight") + 6.0 + """ + s = sum(d for v, d in self.degree(weight=weight)) + # If `weight` is None, the sum of the degrees is guaranteed to be + # even, so we can perform integer division and hence return an + # integer. Otherwise, the sum of the weighted degrees is not + # guaranteed to be an integer, so we perform "real" division. + return s // 2 if weight is None else s / 2 + + def number_of_edges(self, u=None, v=None): + """Returns the number of edges between two nodes. + + Parameters + ---------- + u, v : nodes, optional (default=all edges) + If u and v are specified, return the number of edges between + u and v. Otherwise return the total number of all edges. + + Returns + ------- + nedges : int + The number of edges in the graph. If nodes `u` and `v` are + specified return the number of edges between those nodes. If + the graph is directed, this only returns the number of edges + from `u` to `v`. + + See Also + -------- + size + + Examples + -------- + For undirected graphs, this method counts the total number of + edges in the graph: + + >>> G = nx.path_graph(4) + >>> G.number_of_edges() + 3 + + If you specify two nodes, this counts the total number of edges + joining the two nodes: + + >>> G.number_of_edges(0, 1) + 1 + + For directed graphs, this method can count the total number of + directed edges from `u` to `v`: + + >>> G = nx.DiGraph() + >>> G.add_edge(0, 1) + >>> G.add_edge(1, 0) + >>> G.number_of_edges(0, 1) + 1 + + """ + if u is None: + return int(self.size()) + if v in self._adj[u]: + return 1 + return 0 + + def nbunch_iter(self, nbunch=None): + """Returns an iterator over nodes contained in nbunch that are + also in the graph. + + The nodes in nbunch are checked for membership in the graph + and if not are silently ignored. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + Returns + ------- + niter : iterator + An iterator over nodes in nbunch that are also in the graph. + If nbunch is None, iterate over all nodes in the graph. + + Raises + ------ + NetworkXError + If nbunch is not a node or sequence of nodes. + If a node in nbunch is not hashable. + + See Also + -------- + Graph.__iter__ + + Notes + ----- + When nbunch is an iterator, the returned iterator yields values + directly from nbunch, becoming exhausted when nbunch is exhausted. + + To test whether nbunch is a single node, one can use + "if nbunch in self:", even after processing with this routine. + + If nbunch is not a node or a (possibly empty) sequence/iterator + or None, a :exc:`NetworkXError` is raised. Also, if any object in + nbunch is not hashable, a :exc:`NetworkXError` is raised. + """ + if nbunch is None: # include all nodes via iterator + bunch = iter(self._adj) + elif nbunch in self: # if nbunch is a single node + bunch = iter([nbunch]) + else: # if nbunch is a sequence of nodes + + def bunch_iter(nlist, adj): + try: + for n in nlist: + if n in adj: + yield n + except TypeError as err: + exc, message = err, err.args[0] + # capture error for non-sequence/iterator nbunch. + if "iter" in message: + exc = NetworkXError( + "nbunch is not a node or a sequence of nodes." + ) + # capture error for unhashable node. + if "hashable" in message: + exc = NetworkXError( + f"Node {n} in sequence nbunch is not a valid node." + ) + raise exc + + bunch = bunch_iter(nbunch, self._adj) + return bunch diff --git a/MLPY/Lib/site-packages/networkx/classes/graphviews.py b/MLPY/Lib/site-packages/networkx/classes/graphviews.py new file mode 100644 index 0000000000000000000000000000000000000000..3fb08df08e384498cc29f03f60db0274c2eb2d1d --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/graphviews.py @@ -0,0 +1,267 @@ +"""View of Graphs as SubGraph, Reverse, Directed, Undirected. + +In some algorithms it is convenient to temporarily morph +a graph to exclude some nodes or edges. It should be better +to do that via a view than to remove and then re-add. +In other algorithms it is convenient to temporarily morph +a graph to reverse directed edges, or treat a directed graph +as undirected, etc. This module provides those graph views. + +The resulting views are essentially read-only graphs that +report data from the original graph object. We provide an +attribute G._graph which points to the underlying graph object. + +Note: Since graphviews look like graphs, one can end up with +view-of-view-of-view chains. Be careful with chains because +they become very slow with about 15 nested views. +For the common simple case of node induced subgraphs created +from the graph class, we short-cut the chain by returning a +subgraph of the original graph directly rather than a subgraph +of a subgraph. We are careful not to disrupt any edge filter in +the middle subgraph. In general, determining how to short-cut +the chain is tricky and much harder with restricted_views than +with induced subgraphs. +Often it is easiest to use .copy() to avoid chains. +""" +import networkx as nx +from networkx.classes.coreviews import ( + FilterAdjacency, + FilterAtlas, + FilterMultiAdjacency, + UnionAdjacency, + UnionMultiAdjacency, +) +from networkx.classes.filters import no_filter +from networkx.exception import NetworkXError +from networkx.utils import deprecate_positional_args, not_implemented_for + +__all__ = ["generic_graph_view", "subgraph_view", "reverse_view"] + + +def generic_graph_view(G, create_using=None): + """Returns a read-only view of `G`. + + The graph `G` and its attributes are not copied but viewed through the new graph object + of the same class as `G` (or of the class specified in `create_using`). + + Parameters + ---------- + G : graph + A directed/undirected graph/multigraph. + + create_using : NetworkX graph constructor, optional (default=None) + Graph type to create. If graph instance, then cleared before populated. + If `None`, then the appropriate Graph type is inferred from `G`. + + Returns + ------- + newG : graph + A view of the input graph `G` and its attributes as viewed through + the `create_using` class. + + Raises + ------ + NetworkXError + If `G` is a multigraph (or multidigraph) but `create_using` is not, or vice versa. + + Notes + ----- + The returned graph view is read-only (cannot modify the graph). + Yet the view reflects any changes in `G`. The intent is to mimic dict views. + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edge(1, 2, weight=0.3) + >>> G.add_edge(2, 3, weight=0.5) + >>> G.edges(data=True) + EdgeDataView([(1, 2, {'weight': 0.3}), (2, 3, {'weight': 0.5})]) + + The view exposes the attributes from the original graph. + + >>> viewG = nx.graphviews.generic_graph_view(G) + >>> viewG.edges(data=True) + EdgeDataView([(1, 2, {'weight': 0.3}), (2, 3, {'weight': 0.5})]) + + Changes to `G` are reflected in `viewG`. + + >>> G.remove_edge(2, 3) + >>> G.edges(data=True) + EdgeDataView([(1, 2, {'weight': 0.3})]) + + >>> viewG.edges(data=True) + EdgeDataView([(1, 2, {'weight': 0.3})]) + + We can change the graph type with the `create_using` parameter. + + >>> type(G) + + >>> viewDG = nx.graphviews.generic_graph_view(G, create_using=nx.DiGraph) + >>> type(viewDG) + + """ + if create_using is None: + newG = G.__class__() + else: + newG = nx.empty_graph(0, create_using) + if G.is_multigraph() != newG.is_multigraph(): + raise NetworkXError("Multigraph for G must agree with create_using") + newG = nx.freeze(newG) + + # create view by assigning attributes from G + newG._graph = G + newG.graph = G.graph + + newG._node = G._node + if newG.is_directed(): + if G.is_directed(): + newG._succ = G._succ + newG._pred = G._pred + # newG._adj is synced with _succ + else: + newG._succ = G._adj + newG._pred = G._adj + # newG._adj is synced with _succ + elif G.is_directed(): + if G.is_multigraph(): + newG._adj = UnionMultiAdjacency(G._succ, G._pred) + else: + newG._adj = UnionAdjacency(G._succ, G._pred) + else: + newG._adj = G._adj + return newG + + +@deprecate_positional_args(version="3.4") +def subgraph_view(G, *, filter_node=no_filter, filter_edge=no_filter): + """View of `G` applying a filter on nodes and edges. + + `subgraph_view` provides a read-only view of the input graph that excludes + nodes and edges based on the outcome of two filter functions `filter_node` + and `filter_edge`. + + The `filter_node` function takes one argument --- the node --- and returns + `True` if the node should be included in the subgraph, and `False` if it + should not be included. + + The `filter_edge` function takes two (or three arguments if `G` is a + multi-graph) --- the nodes describing an edge, plus the edge-key if + parallel edges are possible --- and returns `True` if the edge should be + included in the subgraph, and `False` if it should not be included. + + Both node and edge filter functions are called on graph elements as they + are queried, meaning there is no up-front cost to creating the view. + + Parameters + ---------- + G : networkx.Graph + A directed/undirected graph/multigraph + + filter_node : callable, optional + A function taking a node as input, which returns `True` if the node + should appear in the view. + + filter_edge : callable, optional + A function taking as input the two nodes describing an edge (plus the + edge-key if `G` is a multi-graph), which returns `True` if the edge + should appear in the view. + + Returns + ------- + graph : networkx.Graph + A read-only graph view of the input graph. + + Examples + -------- + >>> G = nx.path_graph(6) + + Filter functions operate on the node, and return `True` if the node should + appear in the view: + + >>> def filter_node(n1): + ... return n1 != 5 + ... + >>> view = nx.subgraph_view(G, filter_node=filter_node) + >>> view.nodes() + NodeView((0, 1, 2, 3, 4)) + + We can use a closure pattern to filter graph elements based on additional + data --- for example, filtering on edge data attached to the graph: + + >>> G[3][4]["cross_me"] = False + >>> def filter_edge(n1, n2): + ... return G[n1][n2].get("cross_me", True) + ... + >>> view = nx.subgraph_view(G, filter_edge=filter_edge) + >>> view.edges() + EdgeView([(0, 1), (1, 2), (2, 3), (4, 5)]) + + >>> view = nx.subgraph_view(G, filter_node=filter_node, filter_edge=filter_edge,) + >>> view.nodes() + NodeView((0, 1, 2, 3, 4)) + >>> view.edges() + EdgeView([(0, 1), (1, 2), (2, 3)]) + """ + newG = nx.freeze(G.__class__()) + newG._NODE_OK = filter_node + newG._EDGE_OK = filter_edge + + # create view by assigning attributes from G + newG._graph = G + newG.graph = G.graph + + newG._node = FilterAtlas(G._node, filter_node) + if G.is_multigraph(): + Adj = FilterMultiAdjacency + + def reverse_edge(u, v, k=None): + return filter_edge(v, u, k) + + else: + Adj = FilterAdjacency + + def reverse_edge(u, v, k=None): + return filter_edge(v, u) + + if G.is_directed(): + newG._succ = Adj(G._succ, filter_node, filter_edge) + newG._pred = Adj(G._pred, filter_node, reverse_edge) + # newG._adj is synced with _succ + else: + newG._adj = Adj(G._adj, filter_node, filter_edge) + return newG + + +@not_implemented_for("undirected") +def reverse_view(G): + """View of `G` with edge directions reversed + + `reverse_view` returns a read-only view of the input graph where + edge directions are reversed. + + Identical to digraph.reverse(copy=False) + + Parameters + ---------- + G : networkx.DiGraph + + Returns + ------- + graph : networkx.DiGraph + + Examples + -------- + >>> G = nx.DiGraph() + >>> G.add_edge(1, 2) + >>> G.add_edge(2, 3) + >>> G.edges() + OutEdgeView([(1, 2), (2, 3)]) + + >>> view = nx.reverse_view(G) + >>> view.edges() + OutEdgeView([(2, 1), (3, 2)]) + """ + newG = generic_graph_view(G) + newG._succ, newG._pred = G._pred, G._succ + # newG._adj is synced with _succ + return newG diff --git a/MLPY/Lib/site-packages/networkx/classes/multidigraph.py b/MLPY/Lib/site-packages/networkx/classes/multidigraph.py new file mode 100644 index 0000000000000000000000000000000000000000..fb8b1a35c401ae9371fd0eec7a3d437b9b917269 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/multidigraph.py @@ -0,0 +1,963 @@ +"""Base class for MultiDiGraph.""" +from copy import deepcopy +from functools import cached_property + +import networkx as nx +from networkx import convert +from networkx.classes.coreviews import MultiAdjacencyView +from networkx.classes.digraph import DiGraph +from networkx.classes.multigraph import MultiGraph +from networkx.classes.reportviews import ( + DiMultiDegreeView, + InMultiDegreeView, + InMultiEdgeView, + OutMultiDegreeView, + OutMultiEdgeView, +) +from networkx.exception import NetworkXError + +__all__ = ["MultiDiGraph"] + + +class MultiDiGraph(MultiGraph, DiGraph): + """A directed graph class that can store multiedges. + + Multiedges are multiple edges between two nodes. Each edge + can hold optional data or attributes. + + A MultiDiGraph holds directed edges. Self loops are allowed. + + Nodes can be arbitrary (hashable) Python objects with optional + key/value attributes. By convention `None` is not used as a node. + + Edges are represented as links between nodes with optional + key/value attributes. + + Parameters + ---------- + incoming_graph_data : input graph (optional, default: None) + Data to initialize graph. If None (default) an empty + graph is created. The data can be any format that is supported + by the to_networkx_graph() function, currently including edge list, + dict of dicts, dict of lists, NetworkX graph, 2D NumPy array, SciPy + sparse matrix, or PyGraphviz graph. + + multigraph_input : bool or None (default None) + Note: Only used when `incoming_graph_data` is a dict. + If True, `incoming_graph_data` is assumed to be a + dict-of-dict-of-dict-of-dict structure keyed by + node to neighbor to edge keys to edge data for multi-edges. + A NetworkXError is raised if this is not the case. + If False, :func:`to_networkx_graph` is used to try to determine + the dict's graph data structure as either a dict-of-dict-of-dict + keyed by node to neighbor to edge data, or a dict-of-iterable + keyed by node to neighbors. + If None, the treatment for True is tried, but if it fails, + the treatment for False is tried. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + Graph + DiGraph + MultiGraph + + Examples + -------- + Create an empty graph structure (a "null graph") with no nodes and + no edges. + + >>> G = nx.MultiDiGraph() + + G can be grown in several ways. + + **Nodes:** + + Add one node at a time: + + >>> G.add_node(1) + + Add the nodes from any container (a list, dict, set or + even the lines from a file or the nodes from another graph). + + >>> G.add_nodes_from([2, 3]) + >>> G.add_nodes_from(range(100, 110)) + >>> H = nx.path_graph(10) + >>> G.add_nodes_from(H) + + In addition to strings and integers any hashable Python object + (except None) can represent a node, e.g. a customized node object, + or even another Graph. + + >>> G.add_node(H) + + **Edges:** + + G can also be grown by adding edges. + + Add one edge, + + >>> key = G.add_edge(1, 2) + + a list of edges, + + >>> keys = G.add_edges_from([(1, 2), (1, 3)]) + + or a collection of edges, + + >>> keys = G.add_edges_from(H.edges) + + If some edges connect nodes not yet in the graph, the nodes + are added automatically. If an edge already exists, an additional + edge is created and stored using a key to identify the edge. + By default the key is the lowest unused integer. + + >>> keys = G.add_edges_from([(4, 5, dict(route=282)), (4, 5, dict(route=37))]) + >>> G[4] + AdjacencyView({5: {0: {}, 1: {'route': 282}, 2: {'route': 37}}}) + + **Attributes:** + + Each graph, node, and edge can hold key/value attribute pairs + in an associated attribute dictionary (the keys must be hashable). + By default these are empty, but can be added or changed using + add_edge, add_node or direct manipulation of the attribute + dictionaries named graph, node and edge respectively. + + >>> G = nx.MultiDiGraph(day="Friday") + >>> G.graph + {'day': 'Friday'} + + Add node attributes using add_node(), add_nodes_from() or G.nodes + + >>> G.add_node(1, time="5pm") + >>> G.add_nodes_from([3], time="2pm") + >>> G.nodes[1] + {'time': '5pm'} + >>> G.nodes[1]["room"] = 714 + >>> del G.nodes[1]["room"] # remove attribute + >>> list(G.nodes(data=True)) + [(1, {'time': '5pm'}), (3, {'time': '2pm'})] + + Add edge attributes using add_edge(), add_edges_from(), subscript + notation, or G.edges. + + >>> key = G.add_edge(1, 2, weight=4.7) + >>> keys = G.add_edges_from([(3, 4), (4, 5)], color="red") + >>> keys = G.add_edges_from([(1, 2, {"color": "blue"}), (2, 3, {"weight": 8})]) + >>> G[1][2][0]["weight"] = 4.7 + >>> G.edges[1, 2, 0]["weight"] = 4 + + Warning: we protect the graph data structure by making `G.edges[1, + 2, 0]` a read-only dict-like structure. However, you can assign to + attributes in e.g. `G.edges[1, 2, 0]`. Thus, use 2 sets of brackets + to add/change data attributes: `G.edges[1, 2, 0]['weight'] = 4` + (for multigraphs the edge key is required: `MG.edges[u, v, + key][name] = value`). + + **Shortcuts:** + + Many common graph features allow python syntax to speed reporting. + + >>> 1 in G # check if node in graph + True + >>> [n for n in G if n < 3] # iterate through nodes + [1, 2] + >>> len(G) # number of nodes in graph + 5 + >>> G[1] # adjacency dict-like view mapping neighbor -> edge key -> edge attributes + AdjacencyView({2: {0: {'weight': 4}, 1: {'color': 'blue'}}}) + + Often the best way to traverse all edges of a graph is via the neighbors. + The neighbors are available as an adjacency-view `G.adj` object or via + the method `G.adjacency()`. + + >>> for n, nbrsdict in G.adjacency(): + ... for nbr, keydict in nbrsdict.items(): + ... for key, eattr in keydict.items(): + ... if "weight" in eattr: + ... # Do something useful with the edges + ... pass + + But the edges() method is often more convenient: + + >>> for u, v, keys, weight in G.edges(data="weight", keys=True): + ... if weight is not None: + ... # Do something useful with the edges + ... pass + + **Reporting:** + + Simple graph information is obtained using methods and object-attributes. + Reporting usually provides views instead of containers to reduce memory + usage. The views update as the graph is updated similarly to dict-views. + The objects `nodes`, `edges` and `adj` provide access to data attributes + via lookup (e.g. `nodes[n]`, `edges[u, v, k]`, `adj[u][v]`) and iteration + (e.g. `nodes.items()`, `nodes.data('color')`, + `nodes.data('color', default='blue')` and similarly for `edges`) + Views exist for `nodes`, `edges`, `neighbors()`/`adj` and `degree`. + + For details on these and other miscellaneous methods, see below. + + **Subclasses (Advanced):** + + The MultiDiGraph class uses a dict-of-dict-of-dict-of-dict structure. + The outer dict (node_dict) holds adjacency information keyed by node. + The next dict (adjlist_dict) represents the adjacency information + and holds edge_key dicts keyed by neighbor. The edge_key dict holds + each edge_attr dict keyed by edge key. The inner dict + (edge_attr_dict) represents the edge data and holds edge attribute + values keyed by attribute names. + + Each of these four dicts in the dict-of-dict-of-dict-of-dict + structure can be replaced by a user defined dict-like object. + In general, the dict-like features should be maintained but + extra features can be added. To replace one of the dicts create + a new graph class by changing the class(!) variable holding the + factory for that dict-like structure. The variable names are + node_dict_factory, node_attr_dict_factory, adjlist_inner_dict_factory, + adjlist_outer_dict_factory, edge_key_dict_factory, edge_attr_dict_factory + and graph_attr_dict_factory. + + node_dict_factory : function, (default: dict) + Factory function to be used to create the dict containing node + attributes, keyed by node id. + It should require no arguments and return a dict-like object + + node_attr_dict_factory: function, (default: dict) + Factory function to be used to create the node attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object + + adjlist_outer_dict_factory : function, (default: dict) + Factory function to be used to create the outer-most dict + in the data structure that holds adjacency info keyed by node. + It should require no arguments and return a dict-like object. + + adjlist_inner_dict_factory : function, (default: dict) + Factory function to be used to create the adjacency list + dict which holds multiedge key dicts keyed by neighbor. + It should require no arguments and return a dict-like object. + + edge_key_dict_factory : function, (default: dict) + Factory function to be used to create the edge key dict + which holds edge data keyed by edge key. + It should require no arguments and return a dict-like object. + + edge_attr_dict_factory : function, (default: dict) + Factory function to be used to create the edge attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + graph_attr_dict_factory : function, (default: dict) + Factory function to be used to create the graph attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + Typically, if your extension doesn't impact the data structure all + methods will inherited without issue except: `to_directed/to_undirected`. + By default these methods create a DiGraph/Graph class and you probably + want them to create your extension of a DiGraph/Graph. To facilitate + this we define two class variables that you can set in your subclass. + + to_directed_class : callable, (default: DiGraph or MultiDiGraph) + Class to create a new graph structure in the `to_directed` method. + If `None`, a NetworkX class (DiGraph or MultiDiGraph) is used. + + to_undirected_class : callable, (default: Graph or MultiGraph) + Class to create a new graph structure in the `to_undirected` method. + If `None`, a NetworkX class (Graph or MultiGraph) is used. + + **Subclassing Example** + + Create a low memory graph class that effectively disallows edge + attributes by using a single attribute dict for all edges. + This reduces the memory used, but you lose edge attributes. + + >>> class ThinGraph(nx.Graph): + ... all_edge_dict = {"weight": 1} + ... + ... def single_edge_dict(self): + ... return self.all_edge_dict + ... + ... edge_attr_dict_factory = single_edge_dict + >>> G = ThinGraph() + >>> G.add_edge(2, 1) + >>> G[2][1] + {'weight': 1} + >>> G.add_edge(2, 2) + >>> G[2][1] is G[2][2] + True + """ + + # node_dict_factory = dict # already assigned in Graph + # adjlist_outer_dict_factory = dict + # adjlist_inner_dict_factory = dict + edge_key_dict_factory = dict + # edge_attr_dict_factory = dict + + def __init__(self, incoming_graph_data=None, multigraph_input=None, **attr): + """Initialize a graph with edges, name, or graph attributes. + + Parameters + ---------- + incoming_graph_data : input graph + Data to initialize graph. If incoming_graph_data=None (default) + an empty graph is created. The data can be an edge list, or any + NetworkX graph object. If the corresponding optional Python + packages are installed the data can also be a 2D NumPy array, a + SciPy sparse array, or a PyGraphviz graph. + + multigraph_input : bool or None (default None) + Note: Only used when `incoming_graph_data` is a dict. + If True, `incoming_graph_data` is assumed to be a + dict-of-dict-of-dict-of-dict structure keyed by + node to neighbor to edge keys to edge data for multi-edges. + A NetworkXError is raised if this is not the case. + If False, :func:`to_networkx_graph` is used to try to determine + the dict's graph data structure as either a dict-of-dict-of-dict + keyed by node to neighbor to edge data, or a dict-of-iterable + keyed by node to neighbors. + If None, the treatment for True is tried, but if it fails, + the treatment for False is tried. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + convert + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G = nx.Graph(name="my graph") + >>> e = [(1, 2), (2, 3), (3, 4)] # list of edges + >>> G = nx.Graph(e) + + Arbitrary graph attribute pairs (key=value) may be assigned + + >>> G = nx.Graph(e, day="Friday") + >>> G.graph + {'day': 'Friday'} + + """ + # multigraph_input can be None/True/False. So check "is not False" + if isinstance(incoming_graph_data, dict) and multigraph_input is not False: + DiGraph.__init__(self) + try: + convert.from_dict_of_dicts( + incoming_graph_data, create_using=self, multigraph_input=True + ) + self.graph.update(attr) + except Exception as err: + if multigraph_input is True: + raise nx.NetworkXError( + f"converting multigraph_input raised:\n{type(err)}: {err}" + ) + DiGraph.__init__(self, incoming_graph_data, **attr) + else: + DiGraph.__init__(self, incoming_graph_data, **attr) + + @cached_property + def adj(self): + """Graph adjacency object holding the neighbors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edgekey-dict. So `G.adj[3][2][0]['color'] = 'blue'` sets + the color of the edge `(3, 2, 0)` to `"blue"`. + + Iterating over G.adj behaves like a dict. Useful idioms include + `for nbr, datadict in G.adj[n].items():`. + + The neighbor information is also provided by subscripting the graph. + So `for nbr, foovalue in G[node].data('foo', default=1):` works. + + For directed graphs, `G.adj` holds outgoing (successor) info. + """ + return MultiAdjacencyView(self._succ) + + @cached_property + def succ(self): + """Graph adjacency object holding the successors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edgekey-dict. So `G.adj[3][2][0]['color'] = 'blue'` sets + the color of the edge `(3, 2, 0)` to `"blue"`. + + Iterating over G.adj behaves like a dict. Useful idioms include + `for nbr, datadict in G.adj[n].items():`. + + The neighbor information is also provided by subscripting the graph. + So `for nbr, foovalue in G[node].data('foo', default=1):` works. + + For directed graphs, `G.succ` is identical to `G.adj`. + """ + return MultiAdjacencyView(self._succ) + + @cached_property + def pred(self): + """Graph adjacency object holding the predecessors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edgekey-dict. So `G.adj[3][2][0]['color'] = 'blue'` sets + the color of the edge `(3, 2, 0)` to `"blue"`. + + Iterating over G.adj behaves like a dict. Useful idioms include + `for nbr, datadict in G.adj[n].items():`. + """ + return MultiAdjacencyView(self._pred) + + def add_edge(self, u_for_edge, v_for_edge, key=None, **attr): + """Add an edge between u and v. + + The nodes u and v will be automatically added if they are + not already in the graph. + + Edge attributes can be specified with keywords or by directly + accessing the edge's attribute dictionary. See examples below. + + Parameters + ---------- + u_for_edge, v_for_edge : nodes + Nodes can be, for example, strings or numbers. + Nodes must be hashable (and not None) Python objects. + key : hashable identifier, optional (default=lowest unused integer) + Used to distinguish multiedges between a pair of nodes. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + Returns + ------- + The edge key assigned to the edge. + + See Also + -------- + add_edges_from : add a collection of edges + + Notes + ----- + To replace/update edge data, use the optional key argument + to identify a unique edge. Otherwise a new edge will be created. + + NetworkX algorithms designed for weighted graphs cannot use + multigraphs directly because it is not clear how to handle + multiedge weights. Convert to Graph using edge attribute + 'weight' to enable weighted graph algorithms. + + Default keys are generated using the method `new_edge_key()`. + This method can be overridden by subclassing the base class and + providing a custom `new_edge_key()` method. + + Examples + -------- + The following all add the edge e=(1, 2) to graph G: + + >>> G = nx.MultiDiGraph() + >>> e = (1, 2) + >>> key = G.add_edge(1, 2) # explicit two-node form + >>> G.add_edge(*e) # single edge as tuple of two nodes + 1 + >>> G.add_edges_from([(1, 2)]) # add edges from iterable container + [2] + + Associate data to edges using keywords: + + >>> key = G.add_edge(1, 2, weight=3) + >>> key = G.add_edge(1, 2, key=0, weight=4) # update data for key=0 + >>> key = G.add_edge(1, 3, weight=7, capacity=15, length=342.7) + + For non-string attribute keys, use subscript notation. + + >>> ekey = G.add_edge(1, 2) + >>> G[1][2][0].update({0: 5}) + >>> G.edges[1, 2, 0].update({0: 5}) + """ + u, v = u_for_edge, v_for_edge + # add nodes + if u not in self._succ: + if u is None: + raise ValueError("None cannot be a node") + self._succ[u] = self.adjlist_inner_dict_factory() + self._pred[u] = self.adjlist_inner_dict_factory() + self._node[u] = self.node_attr_dict_factory() + if v not in self._succ: + if v is None: + raise ValueError("None cannot be a node") + self._succ[v] = self.adjlist_inner_dict_factory() + self._pred[v] = self.adjlist_inner_dict_factory() + self._node[v] = self.node_attr_dict_factory() + if key is None: + key = self.new_edge_key(u, v) + if v in self._succ[u]: + keydict = self._adj[u][v] + datadict = keydict.get(key, self.edge_attr_dict_factory()) + datadict.update(attr) + keydict[key] = datadict + else: + # selfloops work this way without special treatment + datadict = self.edge_attr_dict_factory() + datadict.update(attr) + keydict = self.edge_key_dict_factory() + keydict[key] = datadict + self._succ[u][v] = keydict + self._pred[v][u] = keydict + return key + + def remove_edge(self, u, v, key=None): + """Remove an edge between u and v. + + Parameters + ---------- + u, v : nodes + Remove an edge between nodes u and v. + key : hashable identifier, optional (default=None) + Used to distinguish multiple edges between a pair of nodes. + If None, remove a single edge between u and v. If there are + multiple edges, removes the last edge added in terms of + insertion order. + + Raises + ------ + NetworkXError + If there is not an edge between u and v, or + if there is no edge with the specified key. + + See Also + -------- + remove_edges_from : remove a collection of edges + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.remove_edge(0, 1) + >>> e = (1, 2) + >>> G.remove_edge(*e) # unpacks e from an edge tuple + + For multiple edges + + >>> G = nx.MultiDiGraph() + >>> G.add_edges_from([(1, 2), (1, 2), (1, 2)]) # key_list returned + [0, 1, 2] + + When ``key=None`` (the default), edges are removed in the opposite + order that they were added: + + >>> G.remove_edge(1, 2) + >>> G.edges(keys=True) + OutMultiEdgeView([(1, 2, 0), (1, 2, 1)]) + + For edges with keys + + >>> G = nx.MultiDiGraph() + >>> G.add_edge(1, 2, key="first") + 'first' + >>> G.add_edge(1, 2, key="second") + 'second' + >>> G.remove_edge(1, 2, key="first") + >>> G.edges(keys=True) + OutMultiEdgeView([(1, 2, 'second')]) + + """ + try: + d = self._adj[u][v] + except KeyError as err: + raise NetworkXError(f"The edge {u}-{v} is not in the graph.") from err + # remove the edge with specified data + if key is None: + d.popitem() + else: + try: + del d[key] + except KeyError as err: + msg = f"The edge {u}-{v} with key {key} is not in the graph." + raise NetworkXError(msg) from err + if len(d) == 0: + # remove the key entries if last edge + del self._succ[u][v] + del self._pred[v][u] + + @cached_property + def edges(self): + """An OutMultiEdgeView of the Graph as G.edges or G.edges(). + + edges(self, nbunch=None, data=False, keys=False, default=None) + + The OutMultiEdgeView provides set-like operations on the edge-tuples + as well as edge attribute lookup. When called, it also provides + an EdgeDataView object which allows control of access to edge + attributes (but does not provide set-like operations). + Hence, ``G.edges[u, v, k]['color']`` provides the value of the color + attribute for the edge from ``u`` to ``v`` with key ``k`` while + ``for (u, v, k, c) in G.edges(data='color', default='red', keys=True):`` + iterates through all the edges yielding the color attribute with + default `'red'` if no color attribute exists. + + Edges are returned as tuples with optional data and keys + in the order (node, neighbor, key, data). If ``keys=True`` is not + provided, the tuples will just be (node, neighbor, data), but + multiple tuples with the same node and neighbor will be + generated when multiple edges between two nodes exist. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges from these nodes. + data : string or bool, optional (default=False) + The edge attribute returned in 3-tuple (u, v, ddict[data]). + If True, return edge attribute dict in 3-tuple (u, v, ddict). + If False, return 2-tuple (u, v). + keys : bool, optional (default=False) + If True, return edge keys with each edge, creating (u, v, k, + d) tuples when data is also requested (the default) and (u, + v, k) tuples when data is not requested. + default : value, optional (default=None) + Value used for edges that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + edges : OutMultiEdgeView + A view of edge attributes, usually it iterates over (u, v) + (u, v, k) or (u, v, k, d) tuples of edges, but can also be + used for attribute lookup as ``edges[u, v, k]['foo']``. + + Notes + ----- + Nodes in nbunch that are not in the graph will be (quietly) ignored. + For directed graphs this returns the out-edges. + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> nx.add_path(G, [0, 1, 2]) + >>> key = G.add_edge(2, 3, weight=5) + >>> key2 = G.add_edge(1, 2) # second edge between these nodes + >>> [e for e in G.edges()] + [(0, 1), (1, 2), (1, 2), (2, 3)] + >>> list(G.edges(data=True)) # default data is {} (empty dict) + [(0, 1, {}), (1, 2, {}), (1, 2, {}), (2, 3, {'weight': 5})] + >>> list(G.edges(data="weight", default=1)) + [(0, 1, 1), (1, 2, 1), (1, 2, 1), (2, 3, 5)] + >>> list(G.edges(keys=True)) # default keys are integers + [(0, 1, 0), (1, 2, 0), (1, 2, 1), (2, 3, 0)] + >>> list(G.edges(data=True, keys=True)) + [(0, 1, 0, {}), (1, 2, 0, {}), (1, 2, 1, {}), (2, 3, 0, {'weight': 5})] + >>> list(G.edges(data="weight", default=1, keys=True)) + [(0, 1, 0, 1), (1, 2, 0, 1), (1, 2, 1, 1), (2, 3, 0, 5)] + >>> list(G.edges([0, 2])) + [(0, 1), (2, 3)] + >>> list(G.edges(0)) + [(0, 1)] + >>> list(G.edges(1)) + [(1, 2), (1, 2)] + + See Also + -------- + in_edges, out_edges + """ + return OutMultiEdgeView(self) + + # alias out_edges to edges + @cached_property + def out_edges(self): + return OutMultiEdgeView(self) + + out_edges.__doc__ = edges.__doc__ + + @cached_property + def in_edges(self): + """A view of the in edges of the graph as G.in_edges or G.in_edges(). + + in_edges(self, nbunch=None, data=False, keys=False, default=None) + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + data : string or bool, optional (default=False) + The edge attribute returned in 3-tuple (u, v, ddict[data]). + If True, return edge attribute dict in 3-tuple (u, v, ddict). + If False, return 2-tuple (u, v). + keys : bool, optional (default=False) + If True, return edge keys with each edge, creating 3-tuples + (u, v, k) or with data, 4-tuples (u, v, k, d). + default : value, optional (default=None) + Value used for edges that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + in_edges : InMultiEdgeView or InMultiEdgeDataView + A view of edge attributes, usually it iterates over (u, v) + or (u, v, k) or (u, v, k, d) tuples of edges, but can also be + used for attribute lookup as `edges[u, v, k]['foo']`. + + See Also + -------- + edges + """ + return InMultiEdgeView(self) + + @cached_property + def degree(self): + """A DegreeView for the Graph as G.degree or G.degree(). + + The node degree is the number of edges adjacent to the node. + The weighted node degree is the sum of the edge weights for + edges incident to that node. + + This object provides an iterator for (node, degree) as well as + lookup for the degree for a single node. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + DiMultiDegreeView or int + If multiple nodes are requested (the default), returns a `DiMultiDegreeView` + mapping nodes to their degree. + If a single node is requested, returns the degree of the node as an integer. + + See Also + -------- + out_degree, in_degree + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.degree(0) # node 0 with degree 1 + 1 + >>> list(G.degree([0, 1, 2])) + [(0, 1), (1, 2), (2, 2)] + >>> G.add_edge(0, 1) # parallel edge + 1 + >>> list(G.degree([0, 1, 2])) # parallel edges are counted + [(0, 2), (1, 3), (2, 2)] + + """ + return DiMultiDegreeView(self) + + @cached_property + def in_degree(self): + """A DegreeView for (node, in_degree) or in_degree for single node. + + The node in-degree is the number of edges pointing into the node. + The weighted node degree is the sum of the edge weights for + edges incident to that node. + + This object provides an iterator for (node, degree) as well as + lookup for the degree for a single node. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + If a single node is requested + deg : int + Degree of the node + + OR if multiple nodes are requested + nd_iter : iterator + The iterator returns two-tuples of (node, in-degree). + + See Also + -------- + degree, out_degree + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.in_degree(0) # node 0 with degree 0 + 0 + >>> list(G.in_degree([0, 1, 2])) + [(0, 0), (1, 1), (2, 1)] + >>> G.add_edge(0, 1) # parallel edge + 1 + >>> list(G.in_degree([0, 1, 2])) # parallel edges counted + [(0, 0), (1, 2), (2, 1)] + + """ + return InMultiDegreeView(self) + + @cached_property + def out_degree(self): + """Returns an iterator for (node, out-degree) or out-degree for single node. + + out_degree(self, nbunch=None, weight=None) + + The node out-degree is the number of edges pointing out of the node. + This function returns the out-degree for a single node or an iterator + for a bunch of nodes or if nothing is passed as argument. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights. + + Returns + ------- + If a single node is requested + deg : int + Degree of the node + + OR if multiple nodes are requested + nd_iter : iterator + The iterator returns two-tuples of (node, out-degree). + + See Also + -------- + degree, in_degree + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.out_degree(0) # node 0 with degree 1 + 1 + >>> list(G.out_degree([0, 1, 2])) + [(0, 1), (1, 1), (2, 1)] + >>> G.add_edge(0, 1) # parallel edge + 1 + >>> list(G.out_degree([0, 1, 2])) # counts parallel edges + [(0, 2), (1, 1), (2, 1)] + + """ + return OutMultiDegreeView(self) + + def is_multigraph(self): + """Returns True if graph is a multigraph, False otherwise.""" + return True + + def is_directed(self): + """Returns True if graph is directed, False otherwise.""" + return True + + def to_undirected(self, reciprocal=False, as_view=False): + """Returns an undirected representation of the digraph. + + Parameters + ---------- + reciprocal : bool (optional) + If True only keep edges that appear in both directions + in the original digraph. + as_view : bool (optional, default=False) + If True return an undirected view of the original directed graph. + + Returns + ------- + G : MultiGraph + An undirected graph with the same name and nodes and + with edge (u, v, data) if either (u, v, data) or (v, u, data) + is in the digraph. If both edges exist in digraph and + their edge data is different, only one edge is created + with an arbitrary choice of which edge data to use. + You must check and correct for this manually if desired. + + See Also + -------- + MultiGraph, copy, add_edge, add_edges_from + + Notes + ----- + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar D=MultiDiGraph(G) which + returns a shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Warning: If you have subclassed MultiDiGraph to use dict-like + objects in the data structure, those changes do not transfer + to the MultiGraph created by this method. + + Examples + -------- + >>> G = nx.path_graph(2) # or MultiGraph, etc + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1), (1, 0)] + >>> G2 = H.to_undirected() + >>> list(G2.edges) + [(0, 1)] + """ + graph_class = self.to_undirected_class() + if as_view is True: + return nx.graphviews.generic_graph_view(self, graph_class) + # deepcopy when not a view + G = graph_class() + G.graph.update(deepcopy(self.graph)) + G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + if reciprocal is True: + G.add_edges_from( + (u, v, key, deepcopy(data)) + for u, nbrs in self._adj.items() + for v, keydict in nbrs.items() + for key, data in keydict.items() + if v in self._pred[u] and key in self._pred[u][v] + ) + else: + G.add_edges_from( + (u, v, key, deepcopy(data)) + for u, nbrs in self._adj.items() + for v, keydict in nbrs.items() + for key, data in keydict.items() + ) + return G + + def reverse(self, copy=True): + """Returns the reverse of the graph. + + The reverse is a graph with the same nodes and edges + but with the directions of the edges reversed. + + Parameters + ---------- + copy : bool optional (default=True) + If True, return a new DiGraph holding the reversed edges. + If False, the reverse graph is created using a view of + the original graph. + """ + if copy: + H = self.__class__() + H.graph.update(deepcopy(self.graph)) + H.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + H.add_edges_from( + (v, u, k, deepcopy(d)) + for u, v, k, d in self.edges(keys=True, data=True) + ) + return H + return nx.reverse_view(self) diff --git a/MLPY/Lib/site-packages/networkx/classes/multigraph.py b/MLPY/Lib/site-packages/networkx/classes/multigraph.py new file mode 100644 index 0000000000000000000000000000000000000000..bbf2ce241360d1108f98be8cb14e7598388fc472 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/multigraph.py @@ -0,0 +1,1278 @@ +"""Base class for MultiGraph.""" +from copy import deepcopy +from functools import cached_property + +import networkx as nx +from networkx import NetworkXError, convert +from networkx.classes.coreviews import MultiAdjacencyView +from networkx.classes.graph import Graph +from networkx.classes.reportviews import MultiDegreeView, MultiEdgeView + +__all__ = ["MultiGraph"] + + +class MultiGraph(Graph): + """ + An undirected graph class that can store multiedges. + + Multiedges are multiple edges between two nodes. Each edge + can hold optional data or attributes. + + A MultiGraph holds undirected edges. Self loops are allowed. + + Nodes can be arbitrary (hashable) Python objects with optional + key/value attributes. By convention `None` is not used as a node. + + Edges are represented as links between nodes with optional + key/value attributes, in a MultiGraph each edge has a key to + distinguish between multiple edges that have the same source and + destination nodes. + + Parameters + ---------- + incoming_graph_data : input graph (optional, default: None) + Data to initialize graph. If None (default) an empty + graph is created. The data can be any format that is supported + by the to_networkx_graph() function, currently including edge list, + dict of dicts, dict of lists, NetworkX graph, 2D NumPy array, + SciPy sparse array, or PyGraphviz graph. + + multigraph_input : bool or None (default None) + Note: Only used when `incoming_graph_data` is a dict. + If True, `incoming_graph_data` is assumed to be a + dict-of-dict-of-dict-of-dict structure keyed by + node to neighbor to edge keys to edge data for multi-edges. + A NetworkXError is raised if this is not the case. + If False, :func:`to_networkx_graph` is used to try to determine + the dict's graph data structure as either a dict-of-dict-of-dict + keyed by node to neighbor to edge data, or a dict-of-iterable + keyed by node to neighbors. + If None, the treatment for True is tried, but if it fails, + the treatment for False is tried. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + Graph + DiGraph + MultiDiGraph + + Examples + -------- + Create an empty graph structure (a "null graph") with no nodes and + no edges. + + >>> G = nx.MultiGraph() + + G can be grown in several ways. + + **Nodes:** + + Add one node at a time: + + >>> G.add_node(1) + + Add the nodes from any container (a list, dict, set or + even the lines from a file or the nodes from another graph). + + >>> G.add_nodes_from([2, 3]) + >>> G.add_nodes_from(range(100, 110)) + >>> H = nx.path_graph(10) + >>> G.add_nodes_from(H) + + In addition to strings and integers any hashable Python object + (except None) can represent a node, e.g. a customized node object, + or even another Graph. + + >>> G.add_node(H) + + **Edges:** + + G can also be grown by adding edges. + + Add one edge, + + >>> key = G.add_edge(1, 2) + + a list of edges, + + >>> keys = G.add_edges_from([(1, 2), (1, 3)]) + + or a collection of edges, + + >>> keys = G.add_edges_from(H.edges) + + If some edges connect nodes not yet in the graph, the nodes + are added automatically. If an edge already exists, an additional + edge is created and stored using a key to identify the edge. + By default the key is the lowest unused integer. + + >>> keys = G.add_edges_from([(4, 5, {"route": 28}), (4, 5, {"route": 37})]) + >>> G[4] + AdjacencyView({3: {0: {}}, 5: {0: {}, 1: {'route': 28}, 2: {'route': 37}}}) + + **Attributes:** + + Each graph, node, and edge can hold key/value attribute pairs + in an associated attribute dictionary (the keys must be hashable). + By default these are empty, but can be added or changed using + add_edge, add_node or direct manipulation of the attribute + dictionaries named graph, node and edge respectively. + + >>> G = nx.MultiGraph(day="Friday") + >>> G.graph + {'day': 'Friday'} + + Add node attributes using add_node(), add_nodes_from() or G.nodes + + >>> G.add_node(1, time="5pm") + >>> G.add_nodes_from([3], time="2pm") + >>> G.nodes[1] + {'time': '5pm'} + >>> G.nodes[1]["room"] = 714 + >>> del G.nodes[1]["room"] # remove attribute + >>> list(G.nodes(data=True)) + [(1, {'time': '5pm'}), (3, {'time': '2pm'})] + + Add edge attributes using add_edge(), add_edges_from(), subscript + notation, or G.edges. + + >>> key = G.add_edge(1, 2, weight=4.7) + >>> keys = G.add_edges_from([(3, 4), (4, 5)], color="red") + >>> keys = G.add_edges_from([(1, 2, {"color": "blue"}), (2, 3, {"weight": 8})]) + >>> G[1][2][0]["weight"] = 4.7 + >>> G.edges[1, 2, 0]["weight"] = 4 + + Warning: we protect the graph data structure by making `G.edges[1, + 2, 0]` a read-only dict-like structure. However, you can assign to + attributes in e.g. `G.edges[1, 2, 0]`. Thus, use 2 sets of brackets + to add/change data attributes: `G.edges[1, 2, 0]['weight'] = 4`. + + **Shortcuts:** + + Many common graph features allow python syntax to speed reporting. + + >>> 1 in G # check if node in graph + True + >>> [n for n in G if n < 3] # iterate through nodes + [1, 2] + >>> len(G) # number of nodes in graph + 5 + >>> G[1] # adjacency dict-like view mapping neighbor -> edge key -> edge attributes + AdjacencyView({2: {0: {'weight': 4}, 1: {'color': 'blue'}}}) + + Often the best way to traverse all edges of a graph is via the neighbors. + The neighbors are reported as an adjacency-dict `G.adj` or `G.adjacency()`. + + >>> for n, nbrsdict in G.adjacency(): + ... for nbr, keydict in nbrsdict.items(): + ... for key, eattr in keydict.items(): + ... if "weight" in eattr: + ... # Do something useful with the edges + ... pass + + But the edges() method is often more convenient: + + >>> for u, v, keys, weight in G.edges(data="weight", keys=True): + ... if weight is not None: + ... # Do something useful with the edges + ... pass + + **Reporting:** + + Simple graph information is obtained using methods and object-attributes. + Reporting usually provides views instead of containers to reduce memory + usage. The views update as the graph is updated similarly to dict-views. + The objects `nodes`, `edges` and `adj` provide access to data attributes + via lookup (e.g. `nodes[n]`, `edges[u, v, k]`, `adj[u][v]`) and iteration + (e.g. `nodes.items()`, `nodes.data('color')`, + `nodes.data('color', default='blue')` and similarly for `edges`) + Views exist for `nodes`, `edges`, `neighbors()`/`adj` and `degree`. + + For details on these and other miscellaneous methods, see below. + + **Subclasses (Advanced):** + + The MultiGraph class uses a dict-of-dict-of-dict-of-dict data structure. + The outer dict (node_dict) holds adjacency information keyed by node. + The next dict (adjlist_dict) represents the adjacency information + and holds edge_key dicts keyed by neighbor. The edge_key dict holds + each edge_attr dict keyed by edge key. The inner dict + (edge_attr_dict) represents the edge data and holds edge attribute + values keyed by attribute names. + + Each of these four dicts in the dict-of-dict-of-dict-of-dict + structure can be replaced by a user defined dict-like object. + In general, the dict-like features should be maintained but + extra features can be added. To replace one of the dicts create + a new graph class by changing the class(!) variable holding the + factory for that dict-like structure. The variable names are + node_dict_factory, node_attr_dict_factory, adjlist_inner_dict_factory, + adjlist_outer_dict_factory, edge_key_dict_factory, edge_attr_dict_factory + and graph_attr_dict_factory. + + node_dict_factory : function, (default: dict) + Factory function to be used to create the dict containing node + attributes, keyed by node id. + It should require no arguments and return a dict-like object + + node_attr_dict_factory: function, (default: dict) + Factory function to be used to create the node attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object + + adjlist_outer_dict_factory : function, (default: dict) + Factory function to be used to create the outer-most dict + in the data structure that holds adjacency info keyed by node. + It should require no arguments and return a dict-like object. + + adjlist_inner_dict_factory : function, (default: dict) + Factory function to be used to create the adjacency list + dict which holds multiedge key dicts keyed by neighbor. + It should require no arguments and return a dict-like object. + + edge_key_dict_factory : function, (default: dict) + Factory function to be used to create the edge key dict + which holds edge data keyed by edge key. + It should require no arguments and return a dict-like object. + + edge_attr_dict_factory : function, (default: dict) + Factory function to be used to create the edge attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + graph_attr_dict_factory : function, (default: dict) + Factory function to be used to create the graph attribute + dict which holds attribute values keyed by attribute name. + It should require no arguments and return a dict-like object. + + Typically, if your extension doesn't impact the data structure all + methods will inherited without issue except: `to_directed/to_undirected`. + By default these methods create a DiGraph/Graph class and you probably + want them to create your extension of a DiGraph/Graph. To facilitate + this we define two class variables that you can set in your subclass. + + to_directed_class : callable, (default: DiGraph or MultiDiGraph) + Class to create a new graph structure in the `to_directed` method. + If `None`, a NetworkX class (DiGraph or MultiDiGraph) is used. + + to_undirected_class : callable, (default: Graph or MultiGraph) + Class to create a new graph structure in the `to_undirected` method. + If `None`, a NetworkX class (Graph or MultiGraph) is used. + + **Subclassing Example** + + Create a low memory graph class that effectively disallows edge + attributes by using a single attribute dict for all edges. + This reduces the memory used, but you lose edge attributes. + + >>> class ThinGraph(nx.Graph): + ... all_edge_dict = {"weight": 1} + ... + ... def single_edge_dict(self): + ... return self.all_edge_dict + ... + ... edge_attr_dict_factory = single_edge_dict + >>> G = ThinGraph() + >>> G.add_edge(2, 1) + >>> G[2][1] + {'weight': 1} + >>> G.add_edge(2, 2) + >>> G[2][1] is G[2][2] + True + """ + + # node_dict_factory = dict # already assigned in Graph + # adjlist_outer_dict_factory = dict + # adjlist_inner_dict_factory = dict + edge_key_dict_factory = dict + # edge_attr_dict_factory = dict + + def to_directed_class(self): + """Returns the class to use for empty directed copies. + + If you subclass the base classes, use this to designate + what directed class to use for `to_directed()` copies. + """ + return nx.MultiDiGraph + + def to_undirected_class(self): + """Returns the class to use for empty undirected copies. + + If you subclass the base classes, use this to designate + what directed class to use for `to_directed()` copies. + """ + return MultiGraph + + def __init__(self, incoming_graph_data=None, multigraph_input=None, **attr): + """Initialize a graph with edges, name, or graph attributes. + + Parameters + ---------- + incoming_graph_data : input graph + Data to initialize graph. If incoming_graph_data=None (default) + an empty graph is created. The data can be an edge list, or any + NetworkX graph object. If the corresponding optional Python + packages are installed the data can also be a 2D NumPy array, a + SciPy sparse array, or a PyGraphviz graph. + + multigraph_input : bool or None (default None) + Note: Only used when `incoming_graph_data` is a dict. + If True, `incoming_graph_data` is assumed to be a + dict-of-dict-of-dict-of-dict structure keyed by + node to neighbor to edge keys to edge data for multi-edges. + A NetworkXError is raised if this is not the case. + If False, :func:`to_networkx_graph` is used to try to determine + the dict's graph data structure as either a dict-of-dict-of-dict + keyed by node to neighbor to edge data, or a dict-of-iterable + keyed by node to neighbors. + If None, the treatment for True is tried, but if it fails, + the treatment for False is tried. + + attr : keyword arguments, optional (default= no attributes) + Attributes to add to graph as key=value pairs. + + See Also + -------- + convert + + Examples + -------- + >>> G = nx.MultiGraph() + >>> G = nx.MultiGraph(name="my graph") + >>> e = [(1, 2), (1, 2), (2, 3), (3, 4)] # list of edges + >>> G = nx.MultiGraph(e) + + Arbitrary graph attribute pairs (key=value) may be assigned + + >>> G = nx.MultiGraph(e, day="Friday") + >>> G.graph + {'day': 'Friday'} + + """ + # multigraph_input can be None/True/False. So check "is not False" + if isinstance(incoming_graph_data, dict) and multigraph_input is not False: + Graph.__init__(self) + try: + convert.from_dict_of_dicts( + incoming_graph_data, create_using=self, multigraph_input=True + ) + self.graph.update(attr) + except Exception as err: + if multigraph_input is True: + raise nx.NetworkXError( + f"converting multigraph_input raised:\n{type(err)}: {err}" + ) + Graph.__init__(self, incoming_graph_data, **attr) + else: + Graph.__init__(self, incoming_graph_data, **attr) + + @cached_property + def adj(self): + """Graph adjacency object holding the neighbors of each node. + + This object is a read-only dict-like structure with node keys + and neighbor-dict values. The neighbor-dict is keyed by neighbor + to the edgekey-data-dict. So `G.adj[3][2][0]['color'] = 'blue'` sets + the color of the edge `(3, 2, 0)` to `"blue"`. + + Iterating over G.adj behaves like a dict. Useful idioms include + `for nbr, edgesdict in G.adj[n].items():`. + + The neighbor information is also provided by subscripting the graph. + + Examples + -------- + >>> e = [(1, 2), (1, 2), (1, 3), (3, 4)] # list of edges + >>> G = nx.MultiGraph(e) + >>> G.edges[1, 2, 0]["weight"] = 3 + >>> result = set() + >>> for edgekey, data in G[1][2].items(): + ... result.add(data.get('weight', 1)) + >>> result + {1, 3} + + For directed graphs, `G.adj` holds outgoing (successor) info. + """ + return MultiAdjacencyView(self._adj) + + def new_edge_key(self, u, v): + """Returns an unused key for edges between nodes `u` and `v`. + + The nodes `u` and `v` do not need to be already in the graph. + + Notes + ----- + In the standard MultiGraph class the new key is the number of existing + edges between `u` and `v` (increased if necessary to ensure unused). + The first edge will have key 0, then 1, etc. If an edge is removed + further new_edge_keys may not be in this order. + + Parameters + ---------- + u, v : nodes + + Returns + ------- + key : int + """ + try: + keydict = self._adj[u][v] + except KeyError: + return 0 + key = len(keydict) + while key in keydict: + key += 1 + return key + + def add_edge(self, u_for_edge, v_for_edge, key=None, **attr): + """Add an edge between u and v. + + The nodes u and v will be automatically added if they are + not already in the graph. + + Edge attributes can be specified with keywords or by directly + accessing the edge's attribute dictionary. See examples below. + + Parameters + ---------- + u_for_edge, v_for_edge : nodes + Nodes can be, for example, strings or numbers. + Nodes must be hashable (and not None) Python objects. + key : hashable identifier, optional (default=lowest unused integer) + Used to distinguish multiedges between a pair of nodes. + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + Returns + ------- + The edge key assigned to the edge. + + See Also + -------- + add_edges_from : add a collection of edges + + Notes + ----- + To replace/update edge data, use the optional key argument + to identify a unique edge. Otherwise a new edge will be created. + + NetworkX algorithms designed for weighted graphs cannot use + multigraphs directly because it is not clear how to handle + multiedge weights. Convert to Graph using edge attribute + 'weight' to enable weighted graph algorithms. + + Default keys are generated using the method `new_edge_key()`. + This method can be overridden by subclassing the base class and + providing a custom `new_edge_key()` method. + + Examples + -------- + The following each add an additional edge e=(1, 2) to graph G: + + >>> G = nx.MultiGraph() + >>> e = (1, 2) + >>> ekey = G.add_edge(1, 2) # explicit two-node form + >>> G.add_edge(*e) # single edge as tuple of two nodes + 1 + >>> G.add_edges_from([(1, 2)]) # add edges from iterable container + [2] + + Associate data to edges using keywords: + + >>> ekey = G.add_edge(1, 2, weight=3) + >>> ekey = G.add_edge(1, 2, key=0, weight=4) # update data for key=0 + >>> ekey = G.add_edge(1, 3, weight=7, capacity=15, length=342.7) + + For non-string attribute keys, use subscript notation. + + >>> ekey = G.add_edge(1, 2) + >>> G[1][2][0].update({0: 5}) + >>> G.edges[1, 2, 0].update({0: 5}) + """ + u, v = u_for_edge, v_for_edge + # add nodes + if u not in self._adj: + if u is None: + raise ValueError("None cannot be a node") + self._adj[u] = self.adjlist_inner_dict_factory() + self._node[u] = self.node_attr_dict_factory() + if v not in self._adj: + if v is None: + raise ValueError("None cannot be a node") + self._adj[v] = self.adjlist_inner_dict_factory() + self._node[v] = self.node_attr_dict_factory() + if key is None: + key = self.new_edge_key(u, v) + if v in self._adj[u]: + keydict = self._adj[u][v] + datadict = keydict.get(key, self.edge_attr_dict_factory()) + datadict.update(attr) + keydict[key] = datadict + else: + # selfloops work this way without special treatment + datadict = self.edge_attr_dict_factory() + datadict.update(attr) + keydict = self.edge_key_dict_factory() + keydict[key] = datadict + self._adj[u][v] = keydict + self._adj[v][u] = keydict + return key + + def add_edges_from(self, ebunch_to_add, **attr): + """Add all the edges in ebunch_to_add. + + Parameters + ---------- + ebunch_to_add : container of edges + Each edge given in the container will be added to the + graph. The edges can be: + + - 2-tuples (u, v) or + - 3-tuples (u, v, d) for an edge data dict d, or + - 3-tuples (u, v, k) for not iterable key k, or + - 4-tuples (u, v, k, d) for an edge with data and key k + + attr : keyword arguments, optional + Edge data (or labels or objects) can be assigned using + keyword arguments. + + Returns + ------- + A list of edge keys assigned to the edges in `ebunch`. + + See Also + -------- + add_edge : add a single edge + add_weighted_edges_from : convenient way to add weighted edges + + Notes + ----- + Adding the same edge twice has no effect but any edge data + will be updated when each duplicate edge is added. + + Edge attributes specified in an ebunch take precedence over + attributes specified via keyword arguments. + + Default keys are generated using the method ``new_edge_key()``. + This method can be overridden by subclassing the base class and + providing a custom ``new_edge_key()`` method. + + When adding edges from an iterator over the graph you are changing, + a `RuntimeError` can be raised with message: + `RuntimeError: dictionary changed size during iteration`. This + happens when the graph's underlying dictionary is modified during + iteration. To avoid this error, evaluate the iterator into a separate + object, e.g. by using `list(iterator_of_edges)`, and pass this + object to `G.add_edges_from`. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples + >>> e = zip(range(0, 3), range(1, 4)) + >>> G.add_edges_from(e) # Add the path graph 0-1-2-3 + + Associate data to edges + + >>> G.add_edges_from([(1, 2), (2, 3)], weight=3) + >>> G.add_edges_from([(3, 4), (1, 4)], label="WN2898") + + Evaluate an iterator over a graph if using it to modify the same graph + + >>> G = nx.MultiGraph([(1, 2), (2, 3), (3, 4)]) + >>> # Grow graph by one new node, adding edges to all existing nodes. + >>> # wrong way - will raise RuntimeError + >>> # G.add_edges_from(((5, n) for n in G.nodes)) + >>> # right way - note that there will be no self-edge for node 5 + >>> assigned_keys = G.add_edges_from(list((5, n) for n in G.nodes)) + """ + keylist = [] + for e in ebunch_to_add: + ne = len(e) + if ne == 4: + u, v, key, dd = e + elif ne == 3: + u, v, dd = e + key = None + elif ne == 2: + u, v = e + dd = {} + key = None + else: + msg = f"Edge tuple {e} must be a 2-tuple, 3-tuple or 4-tuple." + raise NetworkXError(msg) + ddd = {} + ddd.update(attr) + try: + ddd.update(dd) + except (TypeError, ValueError): + if ne != 3: + raise + key = dd # ne == 3 with 3rd value not dict, must be a key + key = self.add_edge(u, v, key) + self[u][v][key].update(ddd) + keylist.append(key) + return keylist + + def remove_edge(self, u, v, key=None): + """Remove an edge between u and v. + + Parameters + ---------- + u, v : nodes + Remove an edge between nodes u and v. + key : hashable identifier, optional (default=None) + Used to distinguish multiple edges between a pair of nodes. + If None, remove a single edge between u and v. If there are + multiple edges, removes the last edge added in terms of + insertion order. + + Raises + ------ + NetworkXError + If there is not an edge between u and v, or + if there is no edge with the specified key. + + See Also + -------- + remove_edges_from : remove a collection of edges + + Examples + -------- + >>> G = nx.MultiGraph() + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.remove_edge(0, 1) + >>> e = (1, 2) + >>> G.remove_edge(*e) # unpacks e from an edge tuple + + For multiple edges + + >>> G = nx.MultiGraph() # or MultiDiGraph, etc + >>> G.add_edges_from([(1, 2), (1, 2), (1, 2)]) # key_list returned + [0, 1, 2] + + When ``key=None`` (the default), edges are removed in the opposite + order that they were added: + + >>> G.remove_edge(1, 2) + >>> G.edges(keys=True) + MultiEdgeView([(1, 2, 0), (1, 2, 1)]) + >>> G.remove_edge(2, 1) # edges are not directed + >>> G.edges(keys=True) + MultiEdgeView([(1, 2, 0)]) + + For edges with keys + + >>> G = nx.MultiGraph() + >>> G.add_edge(1, 2, key="first") + 'first' + >>> G.add_edge(1, 2, key="second") + 'second' + >>> G.remove_edge(1, 2, key="first") + >>> G.edges(keys=True) + MultiEdgeView([(1, 2, 'second')]) + + """ + try: + d = self._adj[u][v] + except KeyError as err: + raise NetworkXError(f"The edge {u}-{v} is not in the graph.") from err + # remove the edge with specified data + if key is None: + d.popitem() + else: + try: + del d[key] + except KeyError as err: + msg = f"The edge {u}-{v} with key {key} is not in the graph." + raise NetworkXError(msg) from err + if len(d) == 0: + # remove the key entries if last edge + del self._adj[u][v] + if u != v: # check for selfloop + del self._adj[v][u] + + def remove_edges_from(self, ebunch): + """Remove all edges specified in ebunch. + + Parameters + ---------- + ebunch: list or container of edge tuples + Each edge given in the list or container will be removed + from the graph. The edges can be: + + - 2-tuples (u, v) A single edge between u and v is removed. + - 3-tuples (u, v, key) The edge identified by key is removed. + - 4-tuples (u, v, key, data) where data is ignored. + + See Also + -------- + remove_edge : remove a single edge + + Notes + ----- + Will fail silently if an edge in ebunch is not in the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> ebunch = [(1, 2), (2, 3)] + >>> G.remove_edges_from(ebunch) + + Removing multiple copies of edges + + >>> G = nx.MultiGraph() + >>> keys = G.add_edges_from([(1, 2), (1, 2), (1, 2)]) + >>> G.remove_edges_from([(1, 2), (2, 1)]) # edges aren't directed + >>> list(G.edges()) + [(1, 2)] + >>> G.remove_edges_from([(1, 2), (1, 2)]) # silently ignore extra copy + >>> list(G.edges) # now empty graph + [] + + When the edge is a 2-tuple ``(u, v)`` but there are multiple edges between + u and v in the graph, the most recent edge (in terms of insertion + order) is removed. + + >>> G = nx.MultiGraph() + >>> for key in ("x", "y", "a"): + ... k = G.add_edge(0, 1, key=key) + >>> G.edges(keys=True) + MultiEdgeView([(0, 1, 'x'), (0, 1, 'y'), (0, 1, 'a')]) + >>> G.remove_edges_from([(0, 1)]) + >>> G.edges(keys=True) + MultiEdgeView([(0, 1, 'x'), (0, 1, 'y')]) + + """ + for e in ebunch: + try: + self.remove_edge(*e[:3]) + except NetworkXError: + pass + + def has_edge(self, u, v, key=None): + """Returns True if the graph has an edge between nodes u and v. + + This is the same as `v in G[u] or key in G[u][v]` + without KeyError exceptions. + + Parameters + ---------- + u, v : nodes + Nodes can be, for example, strings or numbers. + + key : hashable identifier, optional (default=None) + If specified return True only if the edge with + key is found. + + Returns + ------- + edge_ind : bool + True if edge is in the graph, False otherwise. + + Examples + -------- + Can be called either using two nodes u, v, an edge tuple (u, v), + or an edge tuple (u, v, key). + + >>> G = nx.MultiGraph() # or MultiDiGraph + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.has_edge(0, 1) # using two nodes + True + >>> e = (0, 1) + >>> G.has_edge(*e) # e is a 2-tuple (u, v) + True + >>> G.add_edge(0, 1, key="a") + 'a' + >>> G.has_edge(0, 1, key="a") # specify key + True + >>> G.has_edge(1, 0, key="a") # edges aren't directed + True + >>> e = (0, 1, "a") + >>> G.has_edge(*e) # e is a 3-tuple (u, v, 'a') + True + + The following syntax are equivalent: + + >>> G.has_edge(0, 1) + True + >>> 1 in G[0] # though this gives :exc:`KeyError` if 0 not in G + True + >>> 0 in G[1] # other order; also gives :exc:`KeyError` if 0 not in G + True + + """ + try: + if key is None: + return v in self._adj[u] + else: + return key in self._adj[u][v] + except KeyError: + return False + + @cached_property + def edges(self): + """Returns an iterator over the edges. + + edges(self, nbunch=None, data=False, keys=False, default=None) + + The MultiEdgeView provides set-like operations on the edge-tuples + as well as edge attribute lookup. When called, it also provides + an EdgeDataView object which allows control of access to edge + attributes (but does not provide set-like operations). + Hence, ``G.edges[u, v, k]['color']`` provides the value of the color + attribute for the edge from ``u`` to ``v`` with key ``k`` while + ``for (u, v, k, c) in G.edges(data='color', keys=True, default="red"):`` + iterates through all the edges yielding the color attribute with + default `'red'` if no color attribute exists. + + Edges are returned as tuples with optional data and keys + in the order (node, neighbor, key, data). If ``keys=True`` is not + provided, the tuples will just be (node, neighbor, data), but + multiple tuples with the same node and neighbor will be generated + when multiple edges exist between two nodes. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges from these nodes. + data : string or bool, optional (default=False) + The edge attribute returned in 3-tuple (u, v, ddict[data]). + If True, return edge attribute dict in 3-tuple (u, v, ddict). + If False, return 2-tuple (u, v). + keys : bool, optional (default=False) + If True, return edge keys with each edge, creating (u, v, k) + tuples or (u, v, k, d) tuples if data is also requested. + default : value, optional (default=None) + Value used for edges that don't have the requested attribute. + Only relevant if data is not True or False. + + Returns + ------- + edges : MultiEdgeView + A view of edge attributes, usually it iterates over (u, v) + (u, v, k) or (u, v, k, d) tuples of edges, but can also be + used for attribute lookup as ``edges[u, v, k]['foo']``. + + Notes + ----- + Nodes in nbunch that are not in the graph will be (quietly) ignored. + For directed graphs this returns the out-edges. + + Examples + -------- + >>> G = nx.MultiGraph() + >>> nx.add_path(G, [0, 1, 2]) + >>> key = G.add_edge(2, 3, weight=5) + >>> key2 = G.add_edge(2, 1, weight=2) # multi-edge + >>> [e for e in G.edges()] + [(0, 1), (1, 2), (1, 2), (2, 3)] + >>> G.edges.data() # default data is {} (empty dict) + MultiEdgeDataView([(0, 1, {}), (1, 2, {}), (1, 2, {'weight': 2}), (2, 3, {'weight': 5})]) + >>> G.edges.data("weight", default=1) + MultiEdgeDataView([(0, 1, 1), (1, 2, 1), (1, 2, 2), (2, 3, 5)]) + >>> G.edges(keys=True) # default keys are integers + MultiEdgeView([(0, 1, 0), (1, 2, 0), (1, 2, 1), (2, 3, 0)]) + >>> G.edges.data(keys=True) + MultiEdgeDataView([(0, 1, 0, {}), (1, 2, 0, {}), (1, 2, 1, {'weight': 2}), (2, 3, 0, {'weight': 5})]) + >>> G.edges.data("weight", default=1, keys=True) + MultiEdgeDataView([(0, 1, 0, 1), (1, 2, 0, 1), (1, 2, 1, 2), (2, 3, 0, 5)]) + >>> G.edges([0, 3]) # Note ordering of tuples from listed sources + MultiEdgeDataView([(0, 1), (3, 2)]) + >>> G.edges([0, 3, 2, 1]) # Note ordering of tuples + MultiEdgeDataView([(0, 1), (3, 2), (2, 1), (2, 1)]) + >>> G.edges(0) + MultiEdgeDataView([(0, 1)]) + """ + return MultiEdgeView(self) + + def get_edge_data(self, u, v, key=None, default=None): + """Returns the attribute dictionary associated with edge (u, v, + key). + + If a key is not provided, returns a dictionary mapping edge keys + to attribute dictionaries for each edge between u and v. + + This is identical to `G[u][v][key]` except the default is returned + instead of an exception is the edge doesn't exist. + + Parameters + ---------- + u, v : nodes + + default : any Python object (default=None) + Value to return if the specific edge (u, v, key) is not + found, OR if there are no edges between u and v and no key + is specified. + + key : hashable identifier, optional (default=None) + Return data only for the edge with specified key, as an + attribute dictionary (rather than a dictionary mapping keys + to attribute dictionaries). + + Returns + ------- + edge_dict : dictionary + The edge attribute dictionary, OR a dictionary mapping edge + keys to attribute dictionaries for each of those edges if no + specific key is provided (even if there's only one edge + between u and v). + + Examples + -------- + >>> G = nx.MultiGraph() # or MultiDiGraph + >>> key = G.add_edge(0, 1, key="a", weight=7) + >>> G[0][1]["a"] # key='a' + {'weight': 7} + >>> G.edges[0, 1, "a"] # key='a' + {'weight': 7} + + Warning: we protect the graph data structure by making + `G.edges` and `G[1][2]` read-only dict-like structures. + However, you can assign values to attributes in e.g. + `G.edges[1, 2, 'a']` or `G[1][2]['a']` using an additional + bracket as shown next. You need to specify all edge info + to assign to the edge data associated with an edge. + + >>> G[0][1]["a"]["weight"] = 10 + >>> G.edges[0, 1, "a"]["weight"] = 10 + >>> G[0][1]["a"]["weight"] + 10 + >>> G.edges[1, 0, "a"]["weight"] + 10 + + >>> G = nx.MultiGraph() # or MultiDiGraph + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.edges[0, 1, 0]["weight"] = 5 + >>> G.get_edge_data(0, 1) + {0: {'weight': 5}} + >>> e = (0, 1) + >>> G.get_edge_data(*e) # tuple form + {0: {'weight': 5}} + >>> G.get_edge_data(3, 0) # edge not in graph, returns None + >>> G.get_edge_data(3, 0, default=0) # edge not in graph, return default + 0 + >>> G.get_edge_data(1, 0, 0) # specific key gives back + {'weight': 5} + """ + try: + if key is None: + return self._adj[u][v] + else: + return self._adj[u][v][key] + except KeyError: + return default + + @cached_property + def degree(self): + """A DegreeView for the Graph as G.degree or G.degree(). + + The node degree is the number of edges adjacent to the node. + The weighted node degree is the sum of the edge weights for + edges incident to that node. + + This object provides an iterator for (node, degree) as well as + lookup for the degree for a single node. + + Parameters + ---------- + nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. + + weight : string or None, optional (default=None) + The name of an edge attribute that holds the numerical value used + as a weight. If None, then each edge has weight 1. + The degree is the sum of the edge weights adjacent to the node. + + Returns + ------- + MultiDegreeView or int + If multiple nodes are requested (the default), returns a `MultiDegreeView` + mapping nodes to their degree. + If a single node is requested, returns the degree of the node as an integer. + + Examples + -------- + >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> nx.add_path(G, [0, 1, 2, 3]) + >>> G.degree(0) # node 0 with degree 1 + 1 + >>> list(G.degree([0, 1])) + [(0, 1), (1, 2)] + + """ + return MultiDegreeView(self) + + def is_multigraph(self): + """Returns True if graph is a multigraph, False otherwise.""" + return True + + def is_directed(self): + """Returns True if graph is directed, False otherwise.""" + return False + + def copy(self, as_view=False): + """Returns a copy of the graph. + + The copy method by default returns an independent shallow copy + of the graph and attributes. That is, if an attribute is a + container, that container is shared by the original an the copy. + Use Python's `copy.deepcopy` for new containers. + + If `as_view` is True then a view is returned instead of a copy. + + Notes + ----- + All copies reproduce the graph structure, but data attributes + may be handled in different ways. There are four types of copies + of a graph that people might want. + + Deepcopy -- A "deepcopy" copies the graph structure as well as + all data attributes and any objects they might contain. + The entire graph object is new so that changes in the copy + do not affect the original object. (see Python's copy.deepcopy) + + Data Reference (Shallow) -- For a shallow copy the graph structure + is copied but the edge, node and graph attribute dicts are + references to those in the original graph. This saves + time and memory but could cause confusion if you change an attribute + in one graph and it changes the attribute in the other. + NetworkX does not provide this level of shallow copy. + + Independent Shallow -- This copy creates new independent attribute + dicts and then does a shallow copy of the attributes. That is, any + attributes that are containers are shared between the new graph + and the original. This is exactly what `dict.copy()` provides. + You can obtain this style copy using: + + >>> G = nx.path_graph(5) + >>> H = G.copy() + >>> H = G.copy(as_view=False) + >>> H = nx.Graph(G) + >>> H = G.__class__(G) + + Fresh Data -- For fresh data, the graph structure is copied while + new empty data attribute dicts are created. The resulting graph + is independent of the original and it has no edge, node or graph + attributes. Fresh copies are not enabled. Instead use: + + >>> H = G.__class__() + >>> H.add_nodes_from(G) + >>> H.add_edges_from(G.edges) + + View -- Inspired by dict-views, graph-views act like read-only + versions of the original graph, providing a copy of the original + structure without requiring any memory for copying the information. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Parameters + ---------- + as_view : bool, optional (default=False) + If True, the returned graph-view provides a read-only view + of the original graph without actually copying any data. + + Returns + ------- + G : Graph + A copy of the graph. + + See Also + -------- + to_directed: return a directed copy of the graph. + + Examples + -------- + >>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc + >>> H = G.copy() + + """ + if as_view is True: + return nx.graphviews.generic_graph_view(self) + G = self.__class__() + G.graph.update(self.graph) + G.add_nodes_from((n, d.copy()) for n, d in self._node.items()) + G.add_edges_from( + (u, v, key, datadict.copy()) + for u, nbrs in self._adj.items() + for v, keydict in nbrs.items() + for key, datadict in keydict.items() + ) + return G + + def to_directed(self, as_view=False): + """Returns a directed representation of the graph. + + Returns + ------- + G : MultiDiGraph + A directed graph with the same name, same nodes, and with + each edge (u, v, k, data) replaced by two directed edges + (u, v, k, data) and (v, u, k, data). + + Notes + ----- + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar D=MultiDiGraph(G) which + returns a shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Warning: If you have subclassed MultiGraph to use dict-like objects + in the data structure, those changes do not transfer to the + MultiDiGraph created by this method. + + Examples + -------- + >>> G = nx.MultiGraph() + >>> G.add_edge(0, 1) + 0 + >>> G.add_edge(0, 1) + 1 + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1)] + + If already directed, return a (deep) copy + + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0, 1) + 0 + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1, 0)] + """ + graph_class = self.to_directed_class() + if as_view is True: + return nx.graphviews.generic_graph_view(self, graph_class) + # deepcopy when not a view + G = graph_class() + G.graph.update(deepcopy(self.graph)) + G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + G.add_edges_from( + (u, v, key, deepcopy(datadict)) + for u, nbrs in self.adj.items() + for v, keydict in nbrs.items() + for key, datadict in keydict.items() + ) + return G + + def to_undirected(self, as_view=False): + """Returns an undirected copy of the graph. + + Returns + ------- + G : Graph/MultiGraph + A deepcopy of the graph. + + See Also + -------- + copy, add_edge, add_edges_from + + Notes + ----- + This returns a "deepcopy" of the edge, node, and + graph attributes which attempts to completely copy + all of the data and references. + + This is in contrast to the similar `G = nx.MultiGraph(D)` + which returns a shallow copy of the data. + + See the Python copy module for more information on shallow + and deep copies, https://docs.python.org/3/library/copy.html. + + Warning: If you have subclassed MultiGraph to use dict-like + objects in the data structure, those changes do not transfer + to the MultiGraph created by this method. + + Examples + -------- + >>> G = nx.MultiGraph([(0, 1), (0, 1), (1, 2)]) + >>> H = G.to_directed() + >>> list(H.edges) + [(0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1), (1, 2, 0), (2, 1, 0)] + >>> G2 = H.to_undirected() + >>> list(G2.edges) + [(0, 1, 0), (0, 1, 1), (1, 2, 0)] + """ + graph_class = self.to_undirected_class() + if as_view is True: + return nx.graphviews.generic_graph_view(self, graph_class) + # deepcopy when not a view + G = graph_class() + G.graph.update(deepcopy(self.graph)) + G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) + G.add_edges_from( + (u, v, key, deepcopy(datadict)) + for u, nbrs in self._adj.items() + for v, keydict in nbrs.items() + for key, datadict in keydict.items() + ) + return G + + def number_of_edges(self, u=None, v=None): + """Returns the number of edges between two nodes. + + Parameters + ---------- + u, v : nodes, optional (Default=all edges) + If u and v are specified, return the number of edges between + u and v. Otherwise return the total number of all edges. + + Returns + ------- + nedges : int + The number of edges in the graph. If nodes `u` and `v` are + specified return the number of edges between those nodes. If + the graph is directed, this only returns the number of edges + from `u` to `v`. + + See Also + -------- + size + + Examples + -------- + For undirected multigraphs, this method counts the total number + of edges in the graph:: + + >>> G = nx.MultiGraph() + >>> G.add_edges_from([(0, 1), (0, 1), (1, 2)]) + [0, 1, 0] + >>> G.number_of_edges() + 3 + + If you specify two nodes, this counts the total number of edges + joining the two nodes:: + + >>> G.number_of_edges(0, 1) + 2 + + For directed multigraphs, this method can count the total number + of directed edges from `u` to `v`:: + + >>> G = nx.MultiDiGraph() + >>> G.add_edges_from([(0, 1), (0, 1), (1, 0)]) + [0, 1, 0] + >>> G.number_of_edges(0, 1) + 2 + >>> G.number_of_edges(1, 0) + 1 + + """ + if u is None: + return self.size() + try: + edgedata = self._adj[u][v] + except KeyError: + return 0 # no such edge + return len(edgedata) diff --git a/MLPY/Lib/site-packages/networkx/classes/reportviews.py b/MLPY/Lib/site-packages/networkx/classes/reportviews.py new file mode 100644 index 0000000000000000000000000000000000000000..59a16243f9863f6b8981b6b2758348370d0a2b6e --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/reportviews.py @@ -0,0 +1,1431 @@ +""" +View Classes provide node, edge and degree "views" of a graph. + +Views for nodes, edges and degree are provided for all base graph classes. +A view means a read-only object that is quick to create, automatically +updated when the graph changes, and provides basic access like `n in V`, +`for n in V`, `V[n]` and sometimes set operations. + +The views are read-only iterable containers that are updated as the +graph is updated. As with dicts, the graph should not be updated +while iterating through the view. Views can be iterated multiple times. + +Edge and Node views also allow data attribute lookup. +The resulting attribute dict is writable as `G.edges[3, 4]['color']='red'` +Degree views allow lookup of degree values for single nodes. +Weighted degree is supported with the `weight` argument. + +NodeView +======== + + `V = G.nodes` (or `V = G.nodes()`) allows `len(V)`, `n in V`, set + operations e.g. "G.nodes & H.nodes", and `dd = G.nodes[n]`, where + `dd` is the node data dict. Iteration is over the nodes by default. + +NodeDataView +============ + + To iterate over (node, data) pairs, use arguments to `G.nodes()` + to create a DataView e.g. `DV = G.nodes(data='color', default='red')`. + The DataView iterates as `for n, color in DV` and allows + `(n, 'red') in DV`. Using `DV = G.nodes(data=True)`, the DataViews + use the full datadict in writeable form also allowing contain testing as + `(n, {'color': 'red'}) in VD`. DataViews allow set operations when + data attributes are hashable. + +DegreeView +========== + + `V = G.degree` allows iteration over (node, degree) pairs as well + as lookup: `deg=V[n]`. There are many flavors of DegreeView + for In/Out/Directed/Multi. For Directed Graphs, `G.degree` + counts both in and out going edges. `G.out_degree` and + `G.in_degree` count only specific directions. + Weighted degree using edge data attributes is provide via + `V = G.degree(weight='attr_name')` where any string with the + attribute name can be used. `weight=None` is the default. + No set operations are implemented for degrees, use NodeView. + + The argument `nbunch` restricts iteration to nodes in nbunch. + The DegreeView can still lookup any node even if nbunch is specified. + +EdgeView +======== + + `V = G.edges` or `V = G.edges()` allows iteration over edges as well as + `e in V`, set operations and edge data lookup `dd = G.edges[2, 3]`. + Iteration is over 2-tuples `(u, v)` for Graph/DiGraph. For multigraphs + edges 3-tuples `(u, v, key)` are the default but 2-tuples can be obtained + via `V = G.edges(keys=False)`. + + Set operations for directed graphs treat the edges as a set of 2-tuples. + For undirected graphs, 2-tuples are not a unique representation of edges. + So long as the set being compared to contains unique representations + of its edges, the set operations will act as expected. If the other + set contains both `(0, 1)` and `(1, 0)` however, the result of set + operations may contain both representations of the same edge. + +EdgeDataView +============ + + Edge data can be reported using an EdgeDataView typically created + by calling an EdgeView: `DV = G.edges(data='weight', default=1)`. + The EdgeDataView allows iteration over edge tuples, membership checking + but no set operations. + + Iteration depends on `data` and `default` and for multigraph `keys` + If `data is False` (the default) then iterate over 2-tuples `(u, v)`. + If `data is True` iterate over 3-tuples `(u, v, datadict)`. + Otherwise iterate over `(u, v, datadict.get(data, default))`. + For Multigraphs, if `keys is True`, replace `u, v` with `u, v, key` + to create 3-tuples and 4-tuples. + + The argument `nbunch` restricts edges to those incident to nodes in nbunch. +""" +from collections.abc import Mapping, Set + +import networkx as nx + +__all__ = [ + "NodeView", + "NodeDataView", + "EdgeView", + "OutEdgeView", + "InEdgeView", + "EdgeDataView", + "OutEdgeDataView", + "InEdgeDataView", + "MultiEdgeView", + "OutMultiEdgeView", + "InMultiEdgeView", + "MultiEdgeDataView", + "OutMultiEdgeDataView", + "InMultiEdgeDataView", + "DegreeView", + "DiDegreeView", + "InDegreeView", + "OutDegreeView", + "MultiDegreeView", + "DiMultiDegreeView", + "InMultiDegreeView", + "OutMultiDegreeView", +] + + +# NodeViews +class NodeView(Mapping, Set): + """A NodeView class to act as G.nodes for a NetworkX Graph + + Set operations act on the nodes without considering data. + Iteration is over nodes. Node data can be looked up like a dict. + Use NodeDataView to iterate over node data or to specify a data + attribute for lookup. NodeDataView is created by calling the NodeView. + + Parameters + ---------- + graph : NetworkX graph-like class + + Examples + -------- + >>> G = nx.path_graph(3) + >>> NV = G.nodes() + >>> 2 in NV + True + >>> for n in NV: + ... print(n) + 0 + 1 + 2 + >>> assert NV & {1, 2, 3} == {1, 2} + + >>> G.add_node(2, color="blue") + >>> NV[2] + {'color': 'blue'} + >>> G.add_node(8, color="red") + >>> NDV = G.nodes(data=True) + >>> (2, NV[2]) in NDV + True + >>> for n, dd in NDV: + ... print((n, dd.get("color", "aqua"))) + (0, 'aqua') + (1, 'aqua') + (2, 'blue') + (8, 'red') + >>> NDV[2] == NV[2] + True + + >>> NVdata = G.nodes(data="color", default="aqua") + >>> (2, NVdata[2]) in NVdata + True + >>> for n, dd in NVdata: + ... print((n, dd)) + (0, 'aqua') + (1, 'aqua') + (2, 'blue') + (8, 'red') + >>> NVdata[2] == NV[2] # NVdata gets 'color', NV gets datadict + False + """ + + __slots__ = ("_nodes",) + + def __getstate__(self): + return {"_nodes": self._nodes} + + def __setstate__(self, state): + self._nodes = state["_nodes"] + + def __init__(self, graph): + self._nodes = graph._node + + # Mapping methods + def __len__(self): + return len(self._nodes) + + def __iter__(self): + return iter(self._nodes) + + def __getitem__(self, n): + if isinstance(n, slice): + raise nx.NetworkXError( + f"{type(self).__name__} does not support slicing, " + f"try list(G.nodes)[{n.start}:{n.stop}:{n.step}]" + ) + return self._nodes[n] + + # Set methods + def __contains__(self, n): + return n in self._nodes + + @classmethod + def _from_iterable(cls, it): + return set(it) + + # DataView method + def __call__(self, data=False, default=None): + if data is False: + return self + return NodeDataView(self._nodes, data, default) + + def data(self, data=True, default=None): + """ + Return a read-only view of node data. + + Parameters + ---------- + data : bool or node data key, default=True + If ``data=True`` (the default), return a `NodeDataView` object that + maps each node to *all* of its attributes. `data` may also be an + arbitrary key, in which case the `NodeDataView` maps each node to + the value for the keyed attribute. In this case, if a node does + not have the `data` attribute, the `default` value is used. + default : object, default=None + The value used when a node does not have a specific attribute. + + Returns + ------- + NodeDataView + The layout of the returned NodeDataView depends on the value of the + `data` parameter. + + Notes + ----- + If ``data=False``, returns a `NodeView` object without data. + + See Also + -------- + NodeDataView + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_nodes_from([ + ... (0, {"color": "red", "weight": 10}), + ... (1, {"color": "blue"}), + ... (2, {"color": "yellow", "weight": 2}) + ... ]) + + Accessing node data with ``data=True`` (the default) returns a + NodeDataView mapping each node to all of its attributes: + + >>> G.nodes.data() + NodeDataView({0: {'color': 'red', 'weight': 10}, 1: {'color': 'blue'}, 2: {'color': 'yellow', 'weight': 2}}) + + If `data` represents a key in the node attribute dict, a NodeDataView mapping + the nodes to the value for that specific key is returned: + + >>> G.nodes.data("color") + NodeDataView({0: 'red', 1: 'blue', 2: 'yellow'}, data='color') + + If a specific key is not found in an attribute dict, the value specified + by `default` is returned: + + >>> G.nodes.data("weight", default=-999) + NodeDataView({0: 10, 1: -999, 2: 2}, data='weight') + + Note that there is no check that the `data` key is in any of the + node attribute dictionaries: + + >>> G.nodes.data("height") + NodeDataView({0: None, 1: None, 2: None}, data='height') + """ + if data is False: + return self + return NodeDataView(self._nodes, data, default) + + def __str__(self): + return str(list(self)) + + def __repr__(self): + return f"{self.__class__.__name__}({tuple(self)})" + + +class NodeDataView(Set): + """A DataView class for nodes of a NetworkX Graph + + The main use for this class is to iterate through node-data pairs. + The data can be the entire data-dictionary for each node, or it + can be a specific attribute (with default) for each node. + Set operations are enabled with NodeDataView, but don't work in + cases where the data is not hashable. Use with caution. + Typically, set operations on nodes use NodeView, not NodeDataView. + That is, they use `G.nodes` instead of `G.nodes(data='foo')`. + + Parameters + ========== + graph : NetworkX graph-like class + data : bool or string (default=False) + default : object (default=None) + """ + + __slots__ = ("_nodes", "_data", "_default") + + def __getstate__(self): + return {"_nodes": self._nodes, "_data": self._data, "_default": self._default} + + def __setstate__(self, state): + self._nodes = state["_nodes"] + self._data = state["_data"] + self._default = state["_default"] + + def __init__(self, nodedict, data=False, default=None): + self._nodes = nodedict + self._data = data + self._default = default + + @classmethod + def _from_iterable(cls, it): + try: + return set(it) + except TypeError as err: + if "unhashable" in str(err): + msg = " : Could be b/c data=True or your values are unhashable" + raise TypeError(str(err) + msg) from err + raise + + def __len__(self): + return len(self._nodes) + + def __iter__(self): + data = self._data + if data is False: + return iter(self._nodes) + if data is True: + return iter(self._nodes.items()) + return ( + (n, dd[data] if data in dd else self._default) + for n, dd in self._nodes.items() + ) + + def __contains__(self, n): + try: + node_in = n in self._nodes + except TypeError: + n, d = n + return n in self._nodes and self[n] == d + if node_in is True: + return node_in + try: + n, d = n + except (TypeError, ValueError): + return False + return n in self._nodes and self[n] == d + + def __getitem__(self, n): + if isinstance(n, slice): + raise nx.NetworkXError( + f"{type(self).__name__} does not support slicing, " + f"try list(G.nodes.data())[{n.start}:{n.stop}:{n.step}]" + ) + ddict = self._nodes[n] + data = self._data + if data is False or data is True: + return ddict + return ddict[data] if data in ddict else self._default + + def __str__(self): + return str(list(self)) + + def __repr__(self): + name = self.__class__.__name__ + if self._data is False: + return f"{name}({tuple(self)})" + if self._data is True: + return f"{name}({dict(self)})" + return f"{name}({dict(self)}, data={self._data!r})" + + +# DegreeViews +class DiDegreeView: + """A View class for degree of nodes in a NetworkX Graph + + The functionality is like dict.items() with (node, degree) pairs. + Additional functionality includes read-only lookup of node degree, + and calling with optional features nbunch (for only a subset of nodes) + and weight (use edge weights to compute degree). + + Parameters + ========== + graph : NetworkX graph-like class + nbunch : node, container of nodes, or None meaning all nodes (default=None) + weight : bool or string (default=None) + + Notes + ----- + DegreeView can still lookup any node even if nbunch is specified. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> DV = G.degree() + >>> assert DV[2] == 1 + >>> assert sum(deg for n, deg in DV) == 4 + + >>> DVweight = G.degree(weight="span") + >>> G.add_edge(1, 2, span=34) + >>> DVweight[2] + 34 + >>> DVweight[0] # default edge weight is 1 + 1 + >>> sum(span for n, span in DVweight) # sum weighted degrees + 70 + + >>> DVnbunch = G.degree(nbunch=(1, 2)) + >>> assert len(list(DVnbunch)) == 2 # iteration over nbunch only + """ + + def __init__(self, G, nbunch=None, weight=None): + self._graph = G + self._succ = G._succ if hasattr(G, "_succ") else G._adj + self._pred = G._pred if hasattr(G, "_pred") else G._adj + self._nodes = self._succ if nbunch is None else list(G.nbunch_iter(nbunch)) + self._weight = weight + + def __call__(self, nbunch=None, weight=None): + if nbunch is None: + if weight == self._weight: + return self + return self.__class__(self._graph, None, weight) + try: + if nbunch in self._nodes: + if weight == self._weight: + return self[nbunch] + return self.__class__(self._graph, None, weight)[nbunch] + except TypeError: + pass + return self.__class__(self._graph, nbunch, weight) + + def __getitem__(self, n): + weight = self._weight + succs = self._succ[n] + preds = self._pred[n] + if weight is None: + return len(succs) + len(preds) + return sum(dd.get(weight, 1) for dd in succs.values()) + sum( + dd.get(weight, 1) for dd in preds.values() + ) + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + succs = self._succ[n] + preds = self._pred[n] + yield (n, len(succs) + len(preds)) + else: + for n in self._nodes: + succs = self._succ[n] + preds = self._pred[n] + deg = sum(dd.get(weight, 1) for dd in succs.values()) + sum( + dd.get(weight, 1) for dd in preds.values() + ) + yield (n, deg) + + def __len__(self): + return len(self._nodes) + + def __str__(self): + return str(list(self)) + + def __repr__(self): + return f"{self.__class__.__name__}({dict(self)})" + + +class DegreeView(DiDegreeView): + """A DegreeView class to act as G.degree for a NetworkX Graph + + Typical usage focuses on iteration over `(node, degree)` pairs. + The degree is by default the number of edges incident to the node. + Optional argument `weight` enables weighted degree using the edge + attribute named in the `weight` argument. Reporting and iteration + can also be restricted to a subset of nodes using `nbunch`. + + Additional functionality include node lookup so that `G.degree[n]` + reported the (possibly weighted) degree of node `n`. Calling the + view creates a view with different arguments `nbunch` or `weight`. + + Parameters + ========== + graph : NetworkX graph-like class + nbunch : node, container of nodes, or None meaning all nodes (default=None) + weight : string or None (default=None) + + Notes + ----- + DegreeView can still lookup any node even if nbunch is specified. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> DV = G.degree() + >>> assert DV[2] == 1 + >>> assert G.degree[2] == 1 + >>> assert sum(deg for n, deg in DV) == 4 + + >>> DVweight = G.degree(weight="span") + >>> G.add_edge(1, 2, span=34) + >>> DVweight[2] + 34 + >>> DVweight[0] # default edge weight is 1 + 1 + >>> sum(span for n, span in DVweight) # sum weighted degrees + 70 + + >>> DVnbunch = G.degree(nbunch=(1, 2)) + >>> assert len(list(DVnbunch)) == 2 # iteration over nbunch only + """ + + def __getitem__(self, n): + weight = self._weight + nbrs = self._succ[n] + if weight is None: + return len(nbrs) + (n in nbrs) + return sum(dd.get(weight, 1) for dd in nbrs.values()) + ( + n in nbrs and nbrs[n].get(weight, 1) + ) + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + nbrs = self._succ[n] + yield (n, len(nbrs) + (n in nbrs)) + else: + for n in self._nodes: + nbrs = self._succ[n] + deg = sum(dd.get(weight, 1) for dd in nbrs.values()) + ( + n in nbrs and nbrs[n].get(weight, 1) + ) + yield (n, deg) + + +class OutDegreeView(DiDegreeView): + """A DegreeView class to report out_degree for a DiGraph; See DegreeView""" + + def __getitem__(self, n): + weight = self._weight + nbrs = self._succ[n] + if self._weight is None: + return len(nbrs) + return sum(dd.get(self._weight, 1) for dd in nbrs.values()) + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + succs = self._succ[n] + yield (n, len(succs)) + else: + for n in self._nodes: + succs = self._succ[n] + deg = sum(dd.get(weight, 1) for dd in succs.values()) + yield (n, deg) + + +class InDegreeView(DiDegreeView): + """A DegreeView class to report in_degree for a DiGraph; See DegreeView""" + + def __getitem__(self, n): + weight = self._weight + nbrs = self._pred[n] + if weight is None: + return len(nbrs) + return sum(dd.get(weight, 1) for dd in nbrs.values()) + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + preds = self._pred[n] + yield (n, len(preds)) + else: + for n in self._nodes: + preds = self._pred[n] + deg = sum(dd.get(weight, 1) for dd in preds.values()) + yield (n, deg) + + +class MultiDegreeView(DiDegreeView): + """A DegreeView class for undirected multigraphs; See DegreeView""" + + def __getitem__(self, n): + weight = self._weight + nbrs = self._succ[n] + if weight is None: + return sum(len(keys) for keys in nbrs.values()) + ( + n in nbrs and len(nbrs[n]) + ) + # edge weighted graph - degree is sum of nbr edge weights + deg = sum( + d.get(weight, 1) for key_dict in nbrs.values() for d in key_dict.values() + ) + if n in nbrs: + deg += sum(d.get(weight, 1) for d in nbrs[n].values()) + return deg + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + nbrs = self._succ[n] + deg = sum(len(keys) for keys in nbrs.values()) + ( + n in nbrs and len(nbrs[n]) + ) + yield (n, deg) + else: + for n in self._nodes: + nbrs = self._succ[n] + deg = sum( + d.get(weight, 1) + for key_dict in nbrs.values() + for d in key_dict.values() + ) + if n in nbrs: + deg += sum(d.get(weight, 1) for d in nbrs[n].values()) + yield (n, deg) + + +class DiMultiDegreeView(DiDegreeView): + """A DegreeView class for MultiDiGraph; See DegreeView""" + + def __getitem__(self, n): + weight = self._weight + succs = self._succ[n] + preds = self._pred[n] + if weight is None: + return sum(len(keys) for keys in succs.values()) + sum( + len(keys) for keys in preds.values() + ) + # edge weighted graph - degree is sum of nbr edge weights + deg = sum( + d.get(weight, 1) for key_dict in succs.values() for d in key_dict.values() + ) + sum( + d.get(weight, 1) for key_dict in preds.values() for d in key_dict.values() + ) + return deg + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + succs = self._succ[n] + preds = self._pred[n] + deg = sum(len(keys) for keys in succs.values()) + sum( + len(keys) for keys in preds.values() + ) + yield (n, deg) + else: + for n in self._nodes: + succs = self._succ[n] + preds = self._pred[n] + deg = sum( + d.get(weight, 1) + for key_dict in succs.values() + for d in key_dict.values() + ) + sum( + d.get(weight, 1) + for key_dict in preds.values() + for d in key_dict.values() + ) + yield (n, deg) + + +class InMultiDegreeView(DiDegreeView): + """A DegreeView class for inward degree of MultiDiGraph; See DegreeView""" + + def __getitem__(self, n): + weight = self._weight + nbrs = self._pred[n] + if weight is None: + return sum(len(data) for data in nbrs.values()) + # edge weighted graph - degree is sum of nbr edge weights + return sum( + d.get(weight, 1) for key_dict in nbrs.values() for d in key_dict.values() + ) + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + nbrs = self._pred[n] + deg = sum(len(data) for data in nbrs.values()) + yield (n, deg) + else: + for n in self._nodes: + nbrs = self._pred[n] + deg = sum( + d.get(weight, 1) + for key_dict in nbrs.values() + for d in key_dict.values() + ) + yield (n, deg) + + +class OutMultiDegreeView(DiDegreeView): + """A DegreeView class for outward degree of MultiDiGraph; See DegreeView""" + + def __getitem__(self, n): + weight = self._weight + nbrs = self._succ[n] + if weight is None: + return sum(len(data) for data in nbrs.values()) + # edge weighted graph - degree is sum of nbr edge weights + return sum( + d.get(weight, 1) for key_dict in nbrs.values() for d in key_dict.values() + ) + + def __iter__(self): + weight = self._weight + if weight is None: + for n in self._nodes: + nbrs = self._succ[n] + deg = sum(len(data) for data in nbrs.values()) + yield (n, deg) + else: + for n in self._nodes: + nbrs = self._succ[n] + deg = sum( + d.get(weight, 1) + for key_dict in nbrs.values() + for d in key_dict.values() + ) + yield (n, deg) + + +# EdgeDataViews +class OutEdgeDataView: + """EdgeDataView for outward edges of DiGraph; See EdgeDataView""" + + __slots__ = ( + "_viewer", + "_nbunch", + "_data", + "_default", + "_adjdict", + "_nodes_nbrs", + "_report", + ) + + def __getstate__(self): + return { + "viewer": self._viewer, + "nbunch": self._nbunch, + "data": self._data, + "default": self._default, + } + + def __setstate__(self, state): + self.__init__(**state) + + def __init__(self, viewer, nbunch=None, data=False, *, default=None): + self._viewer = viewer + adjdict = self._adjdict = viewer._adjdict + if nbunch is None: + self._nodes_nbrs = adjdict.items + else: + # dict retains order of nodes but acts like a set + nbunch = dict.fromkeys(viewer._graph.nbunch_iter(nbunch)) + self._nodes_nbrs = lambda: [(n, adjdict[n]) for n in nbunch] + self._nbunch = nbunch + self._data = data + self._default = default + # Set _report based on data and default + if data is True: + self._report = lambda n, nbr, dd: (n, nbr, dd) + elif data is False: + self._report = lambda n, nbr, dd: (n, nbr) + else: # data is attribute name + self._report = ( + lambda n, nbr, dd: (n, nbr, dd[data]) + if data in dd + else (n, nbr, default) + ) + + def __len__(self): + return sum(len(nbrs) for n, nbrs in self._nodes_nbrs()) + + def __iter__(self): + return ( + self._report(n, nbr, dd) + for n, nbrs in self._nodes_nbrs() + for nbr, dd in nbrs.items() + ) + + def __contains__(self, e): + u, v = e[:2] + if self._nbunch is not None and u not in self._nbunch: + return False # this edge doesn't start in nbunch + try: + ddict = self._adjdict[u][v] + except KeyError: + return False + return e == self._report(u, v, ddict) + + def __str__(self): + return str(list(self)) + + def __repr__(self): + return f"{self.__class__.__name__}({list(self)})" + + +class EdgeDataView(OutEdgeDataView): + """A EdgeDataView class for edges of Graph + + This view is primarily used to iterate over the edges reporting + edges as node-tuples with edge data optionally reported. The + argument `nbunch` allows restriction to edges incident to nodes + in that container/singleton. The default (nbunch=None) + reports all edges. The arguments `data` and `default` control + what edge data is reported. The default `data is False` reports + only node-tuples for each edge. If `data is True` the entire edge + data dict is returned. Otherwise `data` is assumed to hold the name + of the edge attribute to report with default `default` if that + edge attribute is not present. + + Parameters + ---------- + nbunch : container of nodes, node or None (default None) + data : False, True or string (default False) + default : default value (default None) + + Examples + -------- + >>> G = nx.path_graph(3) + >>> G.add_edge(1, 2, foo="bar") + >>> list(G.edges(data="foo", default="biz")) + [(0, 1, 'biz'), (1, 2, 'bar')] + >>> assert (0, 1, "biz") in G.edges(data="foo", default="biz") + """ + + __slots__ = () + + def __len__(self): + return sum(1 for e in self) + + def __iter__(self): + seen = {} + for n, nbrs in self._nodes_nbrs(): + for nbr, dd in nbrs.items(): + if nbr not in seen: + yield self._report(n, nbr, dd) + seen[n] = 1 + del seen + + def __contains__(self, e): + u, v = e[:2] + if self._nbunch is not None and u not in self._nbunch and v not in self._nbunch: + return False # this edge doesn't start and it doesn't end in nbunch + try: + ddict = self._adjdict[u][v] + except KeyError: + return False + return e == self._report(u, v, ddict) + + +class InEdgeDataView(OutEdgeDataView): + """An EdgeDataView class for outward edges of DiGraph; See EdgeDataView""" + + __slots__ = () + + def __iter__(self): + return ( + self._report(nbr, n, dd) + for n, nbrs in self._nodes_nbrs() + for nbr, dd in nbrs.items() + ) + + def __contains__(self, e): + u, v = e[:2] + if self._nbunch is not None and v not in self._nbunch: + return False # this edge doesn't end in nbunch + try: + ddict = self._adjdict[v][u] + except KeyError: + return False + return e == self._report(u, v, ddict) + + +class OutMultiEdgeDataView(OutEdgeDataView): + """An EdgeDataView for outward edges of MultiDiGraph; See EdgeDataView""" + + __slots__ = ("keys",) + + def __getstate__(self): + return { + "viewer": self._viewer, + "nbunch": self._nbunch, + "keys": self.keys, + "data": self._data, + "default": self._default, + } + + def __setstate__(self, state): + self.__init__(**state) + + def __init__(self, viewer, nbunch=None, data=False, *, default=None, keys=False): + self._viewer = viewer + adjdict = self._adjdict = viewer._adjdict + self.keys = keys + if nbunch is None: + self._nodes_nbrs = adjdict.items + else: + # dict retains order of nodes but acts like a set + nbunch = dict.fromkeys(viewer._graph.nbunch_iter(nbunch)) + self._nodes_nbrs = lambda: [(n, adjdict[n]) for n in nbunch] + self._nbunch = nbunch + self._data = data + self._default = default + # Set _report based on data and default + if data is True: + if keys is True: + self._report = lambda n, nbr, k, dd: (n, nbr, k, dd) + else: + self._report = lambda n, nbr, k, dd: (n, nbr, dd) + elif data is False: + if keys is True: + self._report = lambda n, nbr, k, dd: (n, nbr, k) + else: + self._report = lambda n, nbr, k, dd: (n, nbr) + else: # data is attribute name + if keys is True: + self._report = ( + lambda n, nbr, k, dd: (n, nbr, k, dd[data]) + if data in dd + else (n, nbr, k, default) + ) + else: + self._report = ( + lambda n, nbr, k, dd: (n, nbr, dd[data]) + if data in dd + else (n, nbr, default) + ) + + def __len__(self): + return sum(1 for e in self) + + def __iter__(self): + return ( + self._report(n, nbr, k, dd) + for n, nbrs in self._nodes_nbrs() + for nbr, kd in nbrs.items() + for k, dd in kd.items() + ) + + def __contains__(self, e): + u, v = e[:2] + if self._nbunch is not None and u not in self._nbunch: + return False # this edge doesn't start in nbunch + try: + kdict = self._adjdict[u][v] + except KeyError: + return False + if self.keys is True: + k = e[2] + try: + dd = kdict[k] + except KeyError: + return False + return e == self._report(u, v, k, dd) + return any(e == self._report(u, v, k, dd) for k, dd in kdict.items()) + + +class MultiEdgeDataView(OutMultiEdgeDataView): + """An EdgeDataView class for edges of MultiGraph; See EdgeDataView""" + + __slots__ = () + + def __iter__(self): + seen = {} + for n, nbrs in self._nodes_nbrs(): + for nbr, kd in nbrs.items(): + if nbr not in seen: + for k, dd in kd.items(): + yield self._report(n, nbr, k, dd) + seen[n] = 1 + del seen + + def __contains__(self, e): + u, v = e[:2] + if self._nbunch is not None and u not in self._nbunch and v not in self._nbunch: + return False # this edge doesn't start and doesn't end in nbunch + try: + kdict = self._adjdict[u][v] + except KeyError: + try: + kdict = self._adjdict[v][u] + except KeyError: + return False + if self.keys is True: + k = e[2] + try: + dd = kdict[k] + except KeyError: + return False + return e == self._report(u, v, k, dd) + return any(e == self._report(u, v, k, dd) for k, dd in kdict.items()) + + +class InMultiEdgeDataView(OutMultiEdgeDataView): + """An EdgeDataView for inward edges of MultiDiGraph; See EdgeDataView""" + + __slots__ = () + + def __iter__(self): + return ( + self._report(nbr, n, k, dd) + for n, nbrs in self._nodes_nbrs() + for nbr, kd in nbrs.items() + for k, dd in kd.items() + ) + + def __contains__(self, e): + u, v = e[:2] + if self._nbunch is not None and v not in self._nbunch: + return False # this edge doesn't end in nbunch + try: + kdict = self._adjdict[v][u] + except KeyError: + return False + if self.keys is True: + k = e[2] + dd = kdict[k] + return e == self._report(u, v, k, dd) + return any(e == self._report(u, v, k, dd) for k, dd in kdict.items()) + + +# EdgeViews have set operations and no data reported +class OutEdgeView(Set, Mapping): + """A EdgeView class for outward edges of a DiGraph""" + + __slots__ = ("_adjdict", "_graph", "_nodes_nbrs") + + def __getstate__(self): + return {"_graph": self._graph, "_adjdict": self._adjdict} + + def __setstate__(self, state): + self._graph = state["_graph"] + self._adjdict = state["_adjdict"] + self._nodes_nbrs = self._adjdict.items + + @classmethod + def _from_iterable(cls, it): + return set(it) + + dataview = OutEdgeDataView + + def __init__(self, G): + self._graph = G + self._adjdict = G._succ if hasattr(G, "succ") else G._adj + self._nodes_nbrs = self._adjdict.items + + # Set methods + def __len__(self): + return sum(len(nbrs) for n, nbrs in self._nodes_nbrs()) + + def __iter__(self): + for n, nbrs in self._nodes_nbrs(): + for nbr in nbrs: + yield (n, nbr) + + def __contains__(self, e): + try: + u, v = e + return v in self._adjdict[u] + except KeyError: + return False + + # Mapping Methods + def __getitem__(self, e): + if isinstance(e, slice): + raise nx.NetworkXError( + f"{type(self).__name__} does not support slicing, " + f"try list(G.edges)[{e.start}:{e.stop}:{e.step}]" + ) + u, v = e + return self._adjdict[u][v] + + # EdgeDataView methods + def __call__(self, nbunch=None, data=False, *, default=None): + if nbunch is None and data is False: + return self + return self.dataview(self, nbunch, data, default=default) + + def data(self, data=True, default=None, nbunch=None): + """ + Return a read-only view of edge data. + + Parameters + ---------- + data : bool or edge attribute key + If ``data=True``, then the data view maps each edge to a dictionary + containing all of its attributes. If `data` is a key in the edge + dictionary, then the data view maps each edge to its value for + the keyed attribute. In this case, if the edge doesn't have the + attribute, the `default` value is returned. + default : object, default=None + The value used when an edge does not have a specific attribute + nbunch : container of nodes, optional (default=None) + Allows restriction to edges only involving certain nodes. All edges + are considered by default. + + Returns + ------- + dataview + Returns an `EdgeDataView` for undirected Graphs, `OutEdgeDataView` + for DiGraphs, `MultiEdgeDataView` for MultiGraphs and + `OutMultiEdgeDataView` for MultiDiGraphs. + + Notes + ----- + If ``data=False``, returns an `EdgeView` without any edge data. + + See Also + -------- + EdgeDataView + OutEdgeDataView + MultiEdgeDataView + OutMultiEdgeDataView + + Examples + -------- + >>> G = nx.Graph() + >>> G.add_edges_from([ + ... (0, 1, {"dist": 3, "capacity": 20}), + ... (1, 2, {"dist": 4}), + ... (2, 0, {"dist": 5}) + ... ]) + + Accessing edge data with ``data=True`` (the default) returns an + edge data view object listing each edge with all of its attributes: + + >>> G.edges.data() + EdgeDataView([(0, 1, {'dist': 3, 'capacity': 20}), (0, 2, {'dist': 5}), (1, 2, {'dist': 4})]) + + If `data` represents a key in the edge attribute dict, a dataview listing + each edge with its value for that specific key is returned: + + >>> G.edges.data("dist") + EdgeDataView([(0, 1, 3), (0, 2, 5), (1, 2, 4)]) + + `nbunch` can be used to limit the edges: + + >>> G.edges.data("dist", nbunch=[0]) + EdgeDataView([(0, 1, 3), (0, 2, 5)]) + + If a specific key is not found in an edge attribute dict, the value + specified by `default` is used: + + >>> G.edges.data("capacity") + EdgeDataView([(0, 1, 20), (0, 2, None), (1, 2, None)]) + + Note that there is no check that the `data` key is present in any of + the edge attribute dictionaries: + + >>> G.edges.data("speed") + EdgeDataView([(0, 1, None), (0, 2, None), (1, 2, None)]) + """ + if nbunch is None and data is False: + return self + return self.dataview(self, nbunch, data, default=default) + + # String Methods + def __str__(self): + return str(list(self)) + + def __repr__(self): + return f"{self.__class__.__name__}({list(self)})" + + +class EdgeView(OutEdgeView): + """A EdgeView class for edges of a Graph + + This densely packed View allows iteration over edges, data lookup + like a dict and set operations on edges represented by node-tuples. + In addition, edge data can be controlled by calling this object + possibly creating an EdgeDataView. Typically edges are iterated over + and reported as `(u, v)` node tuples or `(u, v, key)` node/key tuples + for multigraphs. Those edge representations can also be using to + lookup the data dict for any edge. Set operations also are available + where those tuples are the elements of the set. + Calling this object with optional arguments `data`, `default` and `keys` + controls the form of the tuple (see EdgeDataView). Optional argument + `nbunch` allows restriction to edges only involving certain nodes. + + If `data is False` (the default) then iterate over 2-tuples `(u, v)`. + If `data is True` iterate over 3-tuples `(u, v, datadict)`. + Otherwise iterate over `(u, v, datadict.get(data, default))`. + For Multigraphs, if `keys is True`, replace `u, v` with `u, v, key` above. + + Parameters + ========== + graph : NetworkX graph-like class + nbunch : (default= all nodes in graph) only report edges with these nodes + keys : (only for MultiGraph. default=False) report edge key in tuple + data : bool or string (default=False) see above + default : object (default=None) + + Examples + ======== + >>> G = nx.path_graph(4) + >>> EV = G.edges() + >>> (2, 3) in EV + True + >>> for u, v in EV: + ... print((u, v)) + (0, 1) + (1, 2) + (2, 3) + >>> assert EV & {(1, 2), (3, 4)} == {(1, 2)} + + >>> EVdata = G.edges(data="color", default="aqua") + >>> G.add_edge(2, 3, color="blue") + >>> assert (2, 3, "blue") in EVdata + >>> for u, v, c in EVdata: + ... print(f"({u}, {v}) has color: {c}") + (0, 1) has color: aqua + (1, 2) has color: aqua + (2, 3) has color: blue + + >>> EVnbunch = G.edges(nbunch=2) + >>> assert (2, 3) in EVnbunch + >>> assert (0, 1) not in EVnbunch + >>> for u, v in EVnbunch: + ... assert u == 2 or v == 2 + + >>> MG = nx.path_graph(4, create_using=nx.MultiGraph) + >>> EVmulti = MG.edges(keys=True) + >>> (2, 3, 0) in EVmulti + True + >>> (2, 3) in EVmulti # 2-tuples work even when keys is True + True + >>> key = MG.add_edge(2, 3) + >>> for u, v, k in EVmulti: + ... print((u, v, k)) + (0, 1, 0) + (1, 2, 0) + (2, 3, 0) + (2, 3, 1) + """ + + __slots__ = () + + dataview = EdgeDataView + + def __len__(self): + num_nbrs = (len(nbrs) + (n in nbrs) for n, nbrs in self._nodes_nbrs()) + return sum(num_nbrs) // 2 + + def __iter__(self): + seen = {} + for n, nbrs in self._nodes_nbrs(): + for nbr in list(nbrs): + if nbr not in seen: + yield (n, nbr) + seen[n] = 1 + del seen + + def __contains__(self, e): + try: + u, v = e[:2] + return v in self._adjdict[u] or u in self._adjdict[v] + except (KeyError, ValueError): + return False + + +class InEdgeView(OutEdgeView): + """A EdgeView class for inward edges of a DiGraph""" + + __slots__ = () + + def __setstate__(self, state): + self._graph = state["_graph"] + self._adjdict = state["_adjdict"] + self._nodes_nbrs = self._adjdict.items + + dataview = InEdgeDataView + + def __init__(self, G): + self._graph = G + self._adjdict = G._pred if hasattr(G, "pred") else G._adj + self._nodes_nbrs = self._adjdict.items + + def __iter__(self): + for n, nbrs in self._nodes_nbrs(): + for nbr in nbrs: + yield (nbr, n) + + def __contains__(self, e): + try: + u, v = e + return u in self._adjdict[v] + except KeyError: + return False + + def __getitem__(self, e): + if isinstance(e, slice): + raise nx.NetworkXError( + f"{type(self).__name__} does not support slicing, " + f"try list(G.in_edges)[{e.start}:{e.stop}:{e.step}]" + ) + u, v = e + return self._adjdict[v][u] + + +class OutMultiEdgeView(OutEdgeView): + """A EdgeView class for outward edges of a MultiDiGraph""" + + __slots__ = () + + dataview = OutMultiEdgeDataView + + def __len__(self): + return sum( + len(kdict) for n, nbrs in self._nodes_nbrs() for nbr, kdict in nbrs.items() + ) + + def __iter__(self): + for n, nbrs in self._nodes_nbrs(): + for nbr, kdict in nbrs.items(): + for key in kdict: + yield (n, nbr, key) + + def __contains__(self, e): + N = len(e) + if N == 3: + u, v, k = e + elif N == 2: + u, v = e + k = 0 + else: + raise ValueError("MultiEdge must have length 2 or 3") + try: + return k in self._adjdict[u][v] + except KeyError: + return False + + def __getitem__(self, e): + if isinstance(e, slice): + raise nx.NetworkXError( + f"{type(self).__name__} does not support slicing, " + f"try list(G.edges)[{e.start}:{e.stop}:{e.step}]" + ) + u, v, k = e + return self._adjdict[u][v][k] + + def __call__(self, nbunch=None, data=False, *, default=None, keys=False): + if nbunch is None and data is False and keys is True: + return self + return self.dataview(self, nbunch, data, default=default, keys=keys) + + def data(self, data=True, default=None, nbunch=None, keys=False): + if nbunch is None and data is False and keys is True: + return self + return self.dataview(self, nbunch, data, default=default, keys=keys) + + +class MultiEdgeView(OutMultiEdgeView): + """A EdgeView class for edges of a MultiGraph""" + + __slots__ = () + + dataview = MultiEdgeDataView + + def __len__(self): + return sum(1 for e in self) + + def __iter__(self): + seen = {} + for n, nbrs in self._nodes_nbrs(): + for nbr, kd in nbrs.items(): + if nbr not in seen: + for k, dd in kd.items(): + yield (n, nbr, k) + seen[n] = 1 + del seen + + +class InMultiEdgeView(OutMultiEdgeView): + """A EdgeView class for inward edges of a MultiDiGraph""" + + __slots__ = () + + def __setstate__(self, state): + self._graph = state["_graph"] + self._adjdict = state["_adjdict"] + self._nodes_nbrs = self._adjdict.items + + dataview = InMultiEdgeDataView + + def __init__(self, G): + self._graph = G + self._adjdict = G._pred if hasattr(G, "pred") else G._adj + self._nodes_nbrs = self._adjdict.items + + def __iter__(self): + for n, nbrs in self._nodes_nbrs(): + for nbr, kdict in nbrs.items(): + for key in kdict: + yield (nbr, n, key) + + def __contains__(self, e): + N = len(e) + if N == 3: + u, v, k = e + elif N == 2: + u, v = e + k = 0 + else: + raise ValueError("MultiEdge must have length 2 or 3") + try: + return k in self._adjdict[v][u] + except KeyError: + return False + + def __getitem__(self, e): + if isinstance(e, slice): + raise nx.NetworkXError( + f"{type(self).__name__} does not support slicing, " + f"try list(G.in_edges)[{e.start}:{e.stop}:{e.step}]" + ) + u, v, k = e + return self._adjdict[v][u][k] diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/__init__.py b/MLPY/Lib/site-packages/networkx/classes/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..919122ccce50636a78997cd0d6da62f2b492434a Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/dispatch_interface.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/dispatch_interface.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3471f1a9f759a4a69fe855fef23f053996886f1 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/dispatch_interface.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/historical_tests.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/historical_tests.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b7a51adbca4f579c8d8a726b4e73274264826d2 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/historical_tests.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_backends.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_backends.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aac0217d2c9aa212f9c55e89e59f9c34493e13c8 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_backends.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_coreviews.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_coreviews.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ac75845e52d9eec8527862835982897fa382978 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_coreviews.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_digraph.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_digraph.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..109f54be7b3acbf43e13e2e0bc36abfa9e5f73e8 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_digraph.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_digraph_historical.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_digraph_historical.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0adc1798679db8b71c0d0b5e128fe9685c7d5e40 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_digraph_historical.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_filters.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_filters.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f77f0c3d115c8b7402276d5f3ebe68948709f2db Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_filters.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_function.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_function.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d790e03306146daef9fef894ff218b47ad090cc1 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_function.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_graph.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_graph.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51a723b2e430d60769b1102c3c774981713dc83d Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_graph.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_graph_historical.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_graph_historical.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77b4cfda1aab67e174022b8dde8d6221f1db93f3 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_graph_historical.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_graphviews.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_graphviews.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bddb256a1c33691cc73270e5b0ec5cfa8056767 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_graphviews.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_multidigraph.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_multidigraph.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52eb0ee868061dc3a9501b48511ff270b5e64b4a Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_multidigraph.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_multigraph.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_multigraph.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47fafbedd7eef6ef6ab24ce836dbddf85a9256af Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_multigraph.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_reportviews.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_reportviews.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9065f7296f58a8e4fe2ba8b9c7514e128753b61 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_reportviews.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_special.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_special.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83b482a9fdb4b82c9c9d32ba3a6848a26f44b80d Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_special.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_subgraphviews.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_subgraphviews.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6291dc874c08ab9f810185d702314e0c6b8c7b18 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/classes/tests/__pycache__/test_subgraphviews.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/dispatch_interface.py b/MLPY/Lib/site-packages/networkx/classes/tests/dispatch_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..5cef755241b312d0a3fa8e4364297dd457653790 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/tests/dispatch_interface.py @@ -0,0 +1,194 @@ +# This file contains utilities for testing the dispatching feature + +# A full test of all dispatchable algorithms is performed by +# modifying the pytest invocation and setting an environment variable +# NETWORKX_TEST_BACKEND=nx-loopback pytest +# This is comprehensive, but only tests the `test_override_dispatch` +# function in networkx.classes.backends. + +# To test the `_dispatch` function directly, several tests scattered throughout +# NetworkX have been augmented to test normal and dispatch mode. +# Searching for `dispatch_interface` should locate the specific tests. + +import networkx as nx +from networkx import DiGraph, Graph, MultiDiGraph, MultiGraph, PlanarEmbedding +from networkx.classes.reportviews import NodeView + + +class LoopbackGraph(Graph): + __networkx_backend__ = "nx-loopback" + + +class LoopbackDiGraph(DiGraph): + __networkx_backend__ = "nx-loopback" + + +class LoopbackMultiGraph(MultiGraph): + __networkx_backend__ = "nx-loopback" + + +class LoopbackMultiDiGraph(MultiDiGraph): + __networkx_backend__ = "nx-loopback" + + +class LoopbackPlanarEmbedding(PlanarEmbedding): + __networkx_backend__ = "nx-loopback" + + +def convert(graph): + if isinstance(graph, PlanarEmbedding): + return LoopbackPlanarEmbedding(graph) + if isinstance(graph, MultiDiGraph): + return LoopbackMultiDiGraph(graph) + if isinstance(graph, MultiGraph): + return LoopbackMultiGraph(graph) + if isinstance(graph, DiGraph): + return LoopbackDiGraph(graph) + if isinstance(graph, Graph): + return LoopbackGraph(graph) + raise TypeError(f"Unsupported type of graph: {type(graph)}") + + +class LoopbackDispatcher: + def __getattr__(self, item): + try: + return nx.utils.backends._registered_algorithms[item].orig_func + except KeyError: + raise AttributeError(item) from None + + @staticmethod + def convert_from_nx( + graph, + *, + edge_attrs=None, + node_attrs=None, + preserve_edge_attrs=None, + preserve_node_attrs=None, + preserve_graph_attrs=None, + name=None, + graph_name=None, + ): + if name in { + # Raise if input graph changes + "lexicographical_topological_sort", + "topological_generations", + "topological_sort", + # Sensitive tests (iteration order matters) + "dfs_labeled_edges", + }: + return graph + if isinstance(graph, NodeView): + # Convert to a Graph with only nodes (no edges) + new_graph = Graph() + new_graph.add_nodes_from(graph.items()) + graph = new_graph + G = LoopbackGraph() + elif not isinstance(graph, Graph): + raise TypeError( + f"Bad type for graph argument {graph_name} in {name}: {type(graph)}" + ) + elif graph.__class__ in {Graph, LoopbackGraph}: + G = LoopbackGraph() + elif graph.__class__ in {DiGraph, LoopbackDiGraph}: + G = LoopbackDiGraph() + elif graph.__class__ in {MultiGraph, LoopbackMultiGraph}: + G = LoopbackMultiGraph() + elif graph.__class__ in {MultiDiGraph, LoopbackMultiDiGraph}: + G = LoopbackMultiDiGraph() + elif graph.__class__ in {PlanarEmbedding, LoopbackPlanarEmbedding}: + G = LoopbackDiGraph() # or LoopbackPlanarEmbedding + else: + # It would be nice to be able to convert _AntiGraph to a regular Graph + # nx.algorithms.approximation.kcomponents._AntiGraph + # nx.algorithms.tree.branchings.MultiDiGraph_EdgeKey + # nx.classes.tests.test_multidigraph.MultiDiGraphSubClass + # nx.classes.tests.test_multigraph.MultiGraphSubClass + G = graph.__class__() + + if preserve_graph_attrs: + G.graph.update(graph.graph) + + if preserve_node_attrs: + G.add_nodes_from(graph.nodes(data=True)) + elif node_attrs: + G.add_nodes_from( + ( + node, + { + k: datadict.get(k, default) + for k, default in node_attrs.items() + if default is not None or k in datadict + }, + ) + for node, datadict in graph.nodes(data=True) + ) + else: + G.add_nodes_from(graph) + + if graph.is_multigraph(): + if preserve_edge_attrs: + G.add_edges_from( + (u, v, key, datadict) + for u, nbrs in graph._adj.items() + for v, keydict in nbrs.items() + for key, datadict in keydict.items() + ) + elif edge_attrs: + G.add_edges_from( + ( + u, + v, + key, + { + k: datadict.get(k, default) + for k, default in edge_attrs.items() + if default is not None or k in datadict + }, + ) + for u, nbrs in graph._adj.items() + for v, keydict in nbrs.items() + for key, datadict in keydict.items() + ) + else: + G.add_edges_from( + (u, v, key, {}) + for u, nbrs in graph._adj.items() + for v, keydict in nbrs.items() + for key, datadict in keydict.items() + ) + elif preserve_edge_attrs: + G.add_edges_from(graph.edges(data=True)) + elif edge_attrs: + G.add_edges_from( + ( + u, + v, + { + k: datadict.get(k, default) + for k, default in edge_attrs.items() + if default is not None or k in datadict + }, + ) + for u, v, datadict in graph.edges(data=True) + ) + else: + G.add_edges_from(graph.edges) + return G + + @staticmethod + def convert_to_nx(obj, *, name=None): + return obj + + @staticmethod + def on_start_tests(items): + # Verify that items can be xfailed + for item in items: + assert hasattr(item, "add_marker") + + def can_run(self, name, args, kwargs): + # It is unnecessary to define this function if algorithms are fully supported. + # We include it for illustration purposes. + return hasattr(self, name) + + +dispatcher = LoopbackDispatcher() diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/historical_tests.py b/MLPY/Lib/site-packages/networkx/classes/tests/historical_tests.py new file mode 100644 index 0000000000000000000000000000000000000000..68089ba203eecb16d887bfb7934b38cb5b971a46 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/tests/historical_tests.py @@ -0,0 +1,474 @@ +"""Original NetworkX graph tests""" +import pytest + +import networkx as nx +from networkx import convert_node_labels_to_integers as cnlti +from networkx.utils import edges_equal, nodes_equal + + +class HistoricalTests: + @classmethod + def setup_class(cls): + cls.null = nx.null_graph() + cls.P1 = cnlti(nx.path_graph(1), first_label=1) + cls.P3 = cnlti(nx.path_graph(3), first_label=1) + cls.P10 = cnlti(nx.path_graph(10), first_label=1) + cls.K1 = cnlti(nx.complete_graph(1), first_label=1) + cls.K3 = cnlti(nx.complete_graph(3), first_label=1) + cls.K4 = cnlti(nx.complete_graph(4), first_label=1) + cls.K5 = cnlti(nx.complete_graph(5), first_label=1) + cls.K10 = cnlti(nx.complete_graph(10), first_label=1) + cls.G = nx.Graph + + def test_name(self): + G = self.G(name="test") + assert G.name == "test" + H = self.G() + assert H.name == "" + + # Nodes + + def test_add_remove_node(self): + G = self.G() + G.add_node("A") + assert G.has_node("A") + G.remove_node("A") + assert not G.has_node("A") + + def test_nonhashable_node(self): + # Test if a non-hashable object is in the Graph. A python dict will + # raise a TypeError, but for a Graph class a simple False should be + # returned (see Graph __contains__). If it cannot be a node then it is + # not a node. + G = self.G() + assert not G.has_node(["A"]) + assert not G.has_node({"A": 1}) + + def test_add_nodes_from(self): + G = self.G() + G.add_nodes_from(list("ABCDEFGHIJKL")) + assert G.has_node("L") + G.remove_nodes_from(["H", "I", "J", "K", "L"]) + G.add_nodes_from([1, 2, 3, 4]) + assert sorted(G.nodes(), key=str) == [ + 1, + 2, + 3, + 4, + "A", + "B", + "C", + "D", + "E", + "F", + "G", + ] + # test __iter__ + assert sorted(G, key=str) == [1, 2, 3, 4, "A", "B", "C", "D", "E", "F", "G"] + + def test_contains(self): + G = self.G() + G.add_node("A") + assert "A" in G + assert [] not in G # never raise a Key or TypeError in this test + assert {1: 1} not in G + + def test_add_remove(self): + # Test add_node and remove_node acting for various nbunch + G = self.G() + G.add_node("m") + assert G.has_node("m") + G.add_node("m") # no complaints + pytest.raises(nx.NetworkXError, G.remove_node, "j") + G.remove_node("m") + assert list(G) == [] + + def test_nbunch_is_list(self): + G = self.G() + G.add_nodes_from(list("ABCD")) + G.add_nodes_from(self.P3) # add nbunch of nodes (nbunch=Graph) + assert sorted(G.nodes(), key=str) == [1, 2, 3, "A", "B", "C", "D"] + G.remove_nodes_from(self.P3) # remove nbunch of nodes (nbunch=Graph) + assert sorted(G.nodes(), key=str) == ["A", "B", "C", "D"] + + def test_nbunch_is_set(self): + G = self.G() + nbunch = set("ABCDEFGHIJKL") + G.add_nodes_from(nbunch) + assert G.has_node("L") + + def test_nbunch_dict(self): + # nbunch is a dict with nodes as keys + G = self.G() + nbunch = set("ABCDEFGHIJKL") + G.add_nodes_from(nbunch) + nbunch = {"I": "foo", "J": 2, "K": True, "L": "spam"} + G.remove_nodes_from(nbunch) + assert sorted(G.nodes(), key=str), ["A", "B", "C", "D", "E", "F", "G", "H"] + + def test_nbunch_iterator(self): + G = self.G() + G.add_nodes_from(["A", "B", "C", "D", "E", "F", "G", "H"]) + n_iter = self.P3.nodes() + G.add_nodes_from(n_iter) + assert sorted(G.nodes(), key=str) == [ + 1, + 2, + 3, + "A", + "B", + "C", + "D", + "E", + "F", + "G", + "H", + ] + n_iter = self.P3.nodes() # rebuild same iterator + G.remove_nodes_from(n_iter) # remove nbunch of nodes (nbunch=iterator) + assert sorted(G.nodes(), key=str) == ["A", "B", "C", "D", "E", "F", "G", "H"] + + def test_nbunch_graph(self): + G = self.G() + G.add_nodes_from(["A", "B", "C", "D", "E", "F", "G", "H"]) + nbunch = self.K3 + G.add_nodes_from(nbunch) + assert sorted(G.nodes(), key=str), [ + 1, + 2, + 3, + "A", + "B", + "C", + "D", + "E", + "F", + "G", + "H", + ] + + # Edges + + def test_add_edge(self): + G = self.G() + pytest.raises(TypeError, G.add_edge, "A") + + G.add_edge("A", "B") # testing add_edge() + G.add_edge("A", "B") # should fail silently + assert G.has_edge("A", "B") + assert not G.has_edge("A", "C") + assert G.has_edge(*("A", "B")) + if G.is_directed(): + assert not G.has_edge("B", "A") + else: + # G is undirected, so B->A is an edge + assert G.has_edge("B", "A") + + G.add_edge("A", "C") # test directedness + G.add_edge("C", "A") + G.remove_edge("C", "A") + if G.is_directed(): + assert G.has_edge("A", "C") + else: + assert not G.has_edge("A", "C") + assert not G.has_edge("C", "A") + + def test_self_loop(self): + G = self.G() + G.add_edge("A", "A") # test self loops + assert G.has_edge("A", "A") + G.remove_edge("A", "A") + G.add_edge("X", "X") + assert G.has_node("X") + G.remove_node("X") + G.add_edge("A", "Z") # should add the node silently + assert G.has_node("Z") + + def test_add_edges_from(self): + G = self.G() + G.add_edges_from([("B", "C")]) # test add_edges_from() + assert G.has_edge("B", "C") + if G.is_directed(): + assert not G.has_edge("C", "B") + else: + assert G.has_edge("C", "B") # undirected + + G.add_edges_from([("D", "F"), ("B", "D")]) + assert G.has_edge("D", "F") + assert G.has_edge("B", "D") + + if G.is_directed(): + assert not G.has_edge("D", "B") + else: + assert G.has_edge("D", "B") # undirected + + def test_add_edges_from2(self): + G = self.G() + # after failing silently, should add 2nd edge + G.add_edges_from([tuple("IJ"), list("KK"), tuple("JK")]) + assert G.has_edge(*("I", "J")) + assert G.has_edge(*("K", "K")) + assert G.has_edge(*("J", "K")) + if G.is_directed(): + assert not G.has_edge(*("K", "J")) + else: + assert G.has_edge(*("K", "J")) + + def test_add_edges_from3(self): + G = self.G() + G.add_edges_from(zip(list("ACD"), list("CDE"))) + assert G.has_edge("D", "E") + assert not G.has_edge("E", "C") + + def test_remove_edge(self): + G = self.G() + G.add_nodes_from([1, 2, 3, "A", "B", "C", "D", "E", "F", "G", "H"]) + + G.add_edges_from(zip(list("MNOP"), list("NOPM"))) + assert G.has_edge("O", "P") + assert G.has_edge("P", "M") + G.remove_node("P") # tests remove_node()'s handling of edges. + assert not G.has_edge("P", "M") + pytest.raises(TypeError, G.remove_edge, "M") + + G.add_edge("N", "M") + assert G.has_edge("M", "N") + G.remove_edge("M", "N") + assert not G.has_edge("M", "N") + + # self loop fails silently + G.remove_edges_from([list("HI"), list("DF"), tuple("KK"), tuple("JK")]) + assert not G.has_edge("H", "I") + assert not G.has_edge("J", "K") + G.remove_edges_from([list("IJ"), list("KK"), list("JK")]) + assert not G.has_edge("I", "J") + G.remove_nodes_from(set("ZEFHIMNO")) + G.add_edge("J", "K") + + def test_edges_nbunch(self): + # Test G.edges(nbunch) with various forms of nbunch + G = self.G() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")]) + # node not in nbunch should be quietly ignored + pytest.raises(nx.NetworkXError, G.edges, 6) + assert list(G.edges("Z")) == [] # iterable non-node + # nbunch can be an empty list + assert list(G.edges([])) == [] + if G.is_directed(): + elist = [("A", "B"), ("A", "C"), ("B", "D")] + else: + elist = [("A", "B"), ("A", "C"), ("B", "C"), ("B", "D")] + # nbunch can be a list + assert edges_equal(list(G.edges(["A", "B"])), elist) + # nbunch can be a set + assert edges_equal(G.edges({"A", "B"}), elist) + # nbunch can be a graph + G1 = self.G() + G1.add_nodes_from("AB") + assert edges_equal(G.edges(G1), elist) + # nbunch can be a dict with nodes as keys + ndict = {"A": "thing1", "B": "thing2"} + assert edges_equal(G.edges(ndict), elist) + # nbunch can be a single node + assert edges_equal(list(G.edges("A")), [("A", "B"), ("A", "C")]) + assert nodes_equal(sorted(G), ["A", "B", "C", "D"]) + + # nbunch can be nothing (whole graph) + assert edges_equal( + list(G.edges()), + [("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")], + ) + + def test_degree(self): + G = self.G() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")]) + assert G.degree("A") == 2 + + # degree of single node in iterable container must return dict + assert list(G.degree(["A"])) == [("A", 2)] + assert sorted(d for n, d in G.degree(["A", "B"])) == [2, 3] + assert sorted(d for n, d in G.degree()) == [2, 2, 3, 3] + + def test_degree2(self): + H = self.G() + H.add_edges_from([(1, 24), (1, 2)]) + assert sorted(d for n, d in H.degree([1, 24])) == [1, 2] + + def test_degree_graph(self): + P3 = nx.path_graph(3) + P5 = nx.path_graph(5) + # silently ignore nodes not in P3 + assert dict(d for n, d in P3.degree(["A", "B"])) == {} + # nbunch can be a graph + assert sorted(d for n, d in P5.degree(P3)) == [1, 2, 2] + # nbunch can be a graph that's way too big + assert sorted(d for n, d in P3.degree(P5)) == [1, 1, 2] + assert list(P5.degree([])) == [] + assert dict(P5.degree([])) == {} + + def test_null(self): + null = nx.null_graph() + assert list(null.degree()) == [] + assert dict(null.degree()) == {} + + def test_order_size(self): + G = self.G() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")]) + assert G.order() == 4 + assert G.size() == 5 + assert G.number_of_edges() == 5 + assert G.number_of_edges("A", "B") == 1 + assert G.number_of_edges("A", "D") == 0 + + def test_copy(self): + G = self.G() + H = G.copy() # copy + assert H.adj == G.adj + assert H.name == G.name + assert H is not G + + def test_subgraph(self): + G = self.G() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")]) + SG = G.subgraph(["A", "B", "D"]) + assert nodes_equal(list(SG), ["A", "B", "D"]) + assert edges_equal(list(SG.edges()), [("A", "B"), ("B", "D")]) + + def test_to_directed(self): + G = self.G() + if not G.is_directed(): + G.add_edges_from( + [("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")] + ) + + DG = G.to_directed() + assert DG is not G # directed copy or copy + + assert DG.is_directed() + assert DG.name == G.name + assert DG.adj == G.adj + assert sorted(DG.out_edges(list("AB"))) == [ + ("A", "B"), + ("A", "C"), + ("B", "A"), + ("B", "C"), + ("B", "D"), + ] + DG.remove_edge("A", "B") + assert DG.has_edge("B", "A") # this removes B-A but not A-B + assert not DG.has_edge("A", "B") + + def test_to_undirected(self): + G = self.G() + if G.is_directed(): + G.add_edges_from( + [("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")] + ) + UG = G.to_undirected() # to_undirected + assert UG is not G + assert not UG.is_directed() + assert G.is_directed() + assert UG.name == G.name + assert UG.adj != G.adj + assert sorted(UG.edges(list("AB"))) == [ + ("A", "B"), + ("A", "C"), + ("B", "C"), + ("B", "D"), + ] + assert sorted(UG.edges(["A", "B"])) == [ + ("A", "B"), + ("A", "C"), + ("B", "C"), + ("B", "D"), + ] + UG.remove_edge("A", "B") + assert not UG.has_edge("B", "A") + assert not UG.has_edge("A", "B") + + def test_neighbors(self): + G = self.G() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")]) + G.add_nodes_from("GJK") + assert sorted(G["A"]) == ["B", "C"] + assert sorted(G.neighbors("A")) == ["B", "C"] + assert sorted(G.neighbors("A")) == ["B", "C"] + assert sorted(G.neighbors("G")) == [] + pytest.raises(nx.NetworkXError, G.neighbors, "j") + + def test_iterators(self): + G = self.G() + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")]) + G.add_nodes_from("GJK") + assert sorted(G.nodes()) == ["A", "B", "C", "D", "G", "J", "K"] + assert edges_equal( + G.edges(), [("A", "B"), ("A", "C"), ("B", "D"), ("C", "B"), ("C", "D")] + ) + + assert sorted(v for k, v in G.degree()) == [0, 0, 0, 2, 2, 3, 3] + assert sorted(G.degree(), key=str) == [ + ("A", 2), + ("B", 3), + ("C", 3), + ("D", 2), + ("G", 0), + ("J", 0), + ("K", 0), + ] + assert sorted(G.neighbors("A")) == ["B", "C"] + pytest.raises(nx.NetworkXError, G.neighbors, "X") + G.clear() + assert nx.number_of_nodes(G) == 0 + assert nx.number_of_edges(G) == 0 + + def test_null_subgraph(self): + # Subgraph of a null graph is a null graph + nullgraph = nx.null_graph() + G = nx.null_graph() + H = G.subgraph([]) + assert nx.is_isomorphic(H, nullgraph) + + def test_empty_subgraph(self): + # Subgraph of an empty graph is an empty graph. test 1 + nullgraph = nx.null_graph() + E5 = nx.empty_graph(5) + E10 = nx.empty_graph(10) + H = E10.subgraph([]) + assert nx.is_isomorphic(H, nullgraph) + H = E10.subgraph([1, 2, 3, 4, 5]) + assert nx.is_isomorphic(H, E5) + + def test_complete_subgraph(self): + # Subgraph of a complete graph is a complete graph + K1 = nx.complete_graph(1) + K3 = nx.complete_graph(3) + K5 = nx.complete_graph(5) + H = K5.subgraph([1, 2, 3]) + assert nx.is_isomorphic(H, K3) + + def test_subgraph_nbunch(self): + nullgraph = nx.null_graph() + K1 = nx.complete_graph(1) + K3 = nx.complete_graph(3) + K5 = nx.complete_graph(5) + # Test G.subgraph(nbunch), where nbunch is a single node + H = K5.subgraph(1) + assert nx.is_isomorphic(H, K1) + # Test G.subgraph(nbunch), where nbunch is a set + H = K5.subgraph({1}) + assert nx.is_isomorphic(H, K1) + # Test G.subgraph(nbunch), where nbunch is an iterator + H = K5.subgraph(iter(K3)) + assert nx.is_isomorphic(H, K3) + # Test G.subgraph(nbunch), where nbunch is another graph + H = K5.subgraph(K3) + assert nx.is_isomorphic(H, K3) + H = K5.subgraph([9]) + assert nx.is_isomorphic(H, nullgraph) + + def test_node_tuple_issue(self): + H = self.G() + # Test error handling of tuple as a node + pytest.raises(nx.NetworkXError, H.remove_node, (1, 2)) + H.remove_nodes_from([(1, 2)]) # no error + pytest.raises(nx.NetworkXError, H.neighbors, (1, 2)) diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/test_backends.py b/MLPY/Lib/site-packages/networkx/classes/tests/test_backends.py new file mode 100644 index 0000000000000000000000000000000000000000..2a3f72a8737bd2a4c3f823ee5c4860ca0535b651 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/tests/test_backends.py @@ -0,0 +1,76 @@ +import pickle + +import pytest + +import networkx as nx + +sp = pytest.importorskip("scipy") +pytest.importorskip("numpy") + + +def test_dispatch_kwds_vs_args(): + G = nx.path_graph(4) + nx.pagerank(G) + nx.pagerank(G=G) + with pytest.raises(TypeError): + nx.pagerank() + + +def test_pickle(): + for name, func in nx.utils.backends._registered_algorithms.items(): + assert pickle.loads(pickle.dumps(func)) is func + assert pickle.loads(pickle.dumps(nx.inverse_line_graph)) is nx.inverse_line_graph + + +@pytest.mark.skipif( + "not nx._dispatch._automatic_backends " + "or nx._dispatch._automatic_backends[0] != 'nx-loopback'" +) +def test_graph_converter_needs_backend(): + # When testing, `nx.from_scipy_sparse_array` will *always* call the backend + # implementation if it's implemented. If `backend=` isn't given, then the result + # will be converted back to NetworkX via `convert_to_nx`. + # If not testing, then calling `nx.from_scipy_sparse_array` w/o `backend=` will + # always call the original version. `backend=` is *required* to call the backend. + from networkx.classes.tests.dispatch_interface import ( + LoopbackDispatcher, + LoopbackGraph, + ) + + A = sp.sparse.coo_array([[0, 3, 2], [3, 0, 1], [2, 1, 0]]) + + side_effects = [] + + def from_scipy_sparse_array(self, *args, **kwargs): + side_effects.append(1) # Just to prove this was called + return self.convert_from_nx( + self.__getattr__("from_scipy_sparse_array")(*args, **kwargs), + preserve_edge_attrs=None, + preserve_node_attrs=None, + preserve_graph_attrs=None, + ) + + @staticmethod + def convert_to_nx(obj, *, name=None): + if type(obj) is nx.Graph: + return obj + return nx.Graph(obj) + + # *This mutates LoopbackDispatcher!* + orig_convert_to_nx = LoopbackDispatcher.convert_to_nx + LoopbackDispatcher.convert_to_nx = convert_to_nx + LoopbackDispatcher.from_scipy_sparse_array = from_scipy_sparse_array + + try: + assert side_effects == [] + assert type(nx.from_scipy_sparse_array(A)) is nx.Graph + assert side_effects == [1] + assert ( + type(nx.from_scipy_sparse_array(A, backend="nx-loopback")) is LoopbackGraph + ) + assert side_effects == [1, 1] + finally: + LoopbackDispatcher.convert_to_nx = staticmethod(orig_convert_to_nx) + del LoopbackDispatcher.from_scipy_sparse_array + with pytest.raises(ImportError, match="Unable to load"): + nx.from_scipy_sparse_array(A, backend="bad-backend-name") diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/test_coreviews.py b/MLPY/Lib/site-packages/networkx/classes/tests/test_coreviews.py new file mode 100644 index 0000000000000000000000000000000000000000..24de7f2f1115b864682b261daa256eff0deef696 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/tests/test_coreviews.py @@ -0,0 +1,362 @@ +import pickle + +import pytest + +import networkx as nx + + +class TestAtlasView: + # node->data + def setup_method(self): + self.d = {0: {"color": "blue", "weight": 1.2}, 1: {}, 2: {"color": 1}} + self.av = nx.classes.coreviews.AtlasView(self.d) + + def test_pickle(self): + view = self.av + pview = pickle.loads(pickle.dumps(view, -1)) + assert view == pview + assert view.__slots__ == pview.__slots__ + pview = pickle.loads(pickle.dumps(view)) + assert view == pview + assert view.__slots__ == pview.__slots__ + + def test_len(self): + assert len(self.av) == len(self.d) + + def test_iter(self): + assert list(self.av) == list(self.d) + + def test_getitem(self): + assert self.av[1] is self.d[1] + assert self.av[2]["color"] == 1 + pytest.raises(KeyError, self.av.__getitem__, 3) + + def test_copy(self): + avcopy = self.av.copy() + assert avcopy[0] == self.av[0] + assert avcopy == self.av + assert avcopy[0] is not self.av[0] + assert avcopy is not self.av + avcopy[5] = {} + assert avcopy != self.av + + avcopy[0]["ht"] = 4 + assert avcopy[0] != self.av[0] + self.av[0]["ht"] = 4 + assert avcopy[0] == self.av[0] + del self.av[0]["ht"] + + assert not hasattr(self.av, "__setitem__") + + def test_items(self): + assert sorted(self.av.items()) == sorted(self.d.items()) + + def test_str(self): + out = str(self.d) + assert str(self.av) == out + + def test_repr(self): + out = "AtlasView(" + str(self.d) + ")" + assert repr(self.av) == out + + +class TestAdjacencyView: + # node->nbr->data + def setup_method(self): + dd = {"color": "blue", "weight": 1.2} + self.nd = {0: dd, 1: {}, 2: {"color": 1}} + self.adj = {3: self.nd, 0: {3: dd}, 1: {}, 2: {3: {"color": 1}}} + self.adjview = nx.classes.coreviews.AdjacencyView(self.adj) + + def test_pickle(self): + view = self.adjview + pview = pickle.loads(pickle.dumps(view, -1)) + assert view == pview + assert view.__slots__ == pview.__slots__ + + def test_len(self): + assert len(self.adjview) == len(self.adj) + + def test_iter(self): + assert list(self.adjview) == list(self.adj) + + def test_getitem(self): + assert self.adjview[1] is not self.adj[1] + assert self.adjview[3][0] is self.adjview[0][3] + assert self.adjview[2][3]["color"] == 1 + pytest.raises(KeyError, self.adjview.__getitem__, 4) + + def test_copy(self): + avcopy = self.adjview.copy() + assert avcopy[0] == self.adjview[0] + assert avcopy[0] is not self.adjview[0] + + avcopy[2][3]["ht"] = 4 + assert avcopy[2] != self.adjview[2] + self.adjview[2][3]["ht"] = 4 + assert avcopy[2] == self.adjview[2] + del self.adjview[2][3]["ht"] + + assert not hasattr(self.adjview, "__setitem__") + + def test_items(self): + view_items = sorted((n, dict(d)) for n, d in self.adjview.items()) + assert view_items == sorted(self.adj.items()) + + def test_str(self): + out = str(dict(self.adj)) + assert str(self.adjview) == out + + def test_repr(self): + out = self.adjview.__class__.__name__ + "(" + str(self.adj) + ")" + assert repr(self.adjview) == out + + +class TestMultiAdjacencyView(TestAdjacencyView): + # node->nbr->key->data + def setup_method(self): + dd = {"color": "blue", "weight": 1.2} + self.kd = {0: dd, 1: {}, 2: {"color": 1}} + self.nd = {3: self.kd, 0: {3: dd}, 1: {0: {}}, 2: {3: {"color": 1}}} + self.adj = {3: self.nd, 0: {3: {3: dd}}, 1: {}, 2: {3: {8: {}}}} + self.adjview = nx.classes.coreviews.MultiAdjacencyView(self.adj) + + def test_getitem(self): + assert self.adjview[1] is not self.adj[1] + assert self.adjview[3][0][3] is self.adjview[0][3][3] + assert self.adjview[3][2][3]["color"] == 1 + pytest.raises(KeyError, self.adjview.__getitem__, 4) + + def test_copy(self): + avcopy = self.adjview.copy() + assert avcopy[0] == self.adjview[0] + assert avcopy[0] is not self.adjview[0] + + avcopy[2][3][8]["ht"] = 4 + assert avcopy[2] != self.adjview[2] + self.adjview[2][3][8]["ht"] = 4 + assert avcopy[2] == self.adjview[2] + del self.adjview[2][3][8]["ht"] + + assert not hasattr(self.adjview, "__setitem__") + + +class TestUnionAtlas: + # node->data + def setup_method(self): + self.s = {0: {"color": "blue", "weight": 1.2}, 1: {}, 2: {"color": 1}} + self.p = {3: {"color": "blue", "weight": 1.2}, 4: {}, 2: {"watch": 2}} + self.av = nx.classes.coreviews.UnionAtlas(self.s, self.p) + + def test_pickle(self): + view = self.av + pview = pickle.loads(pickle.dumps(view, -1)) + assert view == pview + assert view.__slots__ == pview.__slots__ + + def test_len(self): + assert len(self.av) == len(self.s.keys() | self.p.keys()) == 5 + + def test_iter(self): + assert set(self.av) == set(self.s) | set(self.p) + + def test_getitem(self): + assert self.av[0] is self.s[0] + assert self.av[4] is self.p[4] + assert self.av[2]["color"] == 1 + pytest.raises(KeyError, self.av[2].__getitem__, "watch") + pytest.raises(KeyError, self.av.__getitem__, 8) + + def test_copy(self): + avcopy = self.av.copy() + assert avcopy[0] == self.av[0] + assert avcopy[0] is not self.av[0] + assert avcopy is not self.av + avcopy[5] = {} + assert avcopy != self.av + + avcopy[0]["ht"] = 4 + assert avcopy[0] != self.av[0] + self.av[0]["ht"] = 4 + assert avcopy[0] == self.av[0] + del self.av[0]["ht"] + + assert not hasattr(self.av, "__setitem__") + + def test_items(self): + expected = dict(self.p.items()) + expected.update(self.s) + assert sorted(self.av.items()) == sorted(expected.items()) + + def test_str(self): + out = str(dict(self.av)) + assert str(self.av) == out + + def test_repr(self): + out = f"{self.av.__class__.__name__}({self.s}, {self.p})" + assert repr(self.av) == out + + +class TestUnionAdjacency: + # node->nbr->data + def setup_method(self): + dd = {"color": "blue", "weight": 1.2} + self.nd = {0: dd, 1: {}, 2: {"color": 1}} + self.s = {3: self.nd, 0: {}, 1: {}, 2: {3: {"color": 1}}} + self.p = {3: {}, 0: {3: dd}, 1: {0: {}}, 2: {1: {"color": 1}}} + self.adjview = nx.classes.coreviews.UnionAdjacency(self.s, self.p) + + def test_pickle(self): + view = self.adjview + pview = pickle.loads(pickle.dumps(view, -1)) + assert view == pview + assert view.__slots__ == pview.__slots__ + + def test_len(self): + assert len(self.adjview) == len(self.s) + + def test_iter(self): + assert sorted(self.adjview) == sorted(self.s) + + def test_getitem(self): + assert self.adjview[1] is not self.s[1] + assert self.adjview[3][0] is self.adjview[0][3] + assert self.adjview[2][3]["color"] == 1 + pytest.raises(KeyError, self.adjview.__getitem__, 4) + + def test_copy(self): + avcopy = self.adjview.copy() + assert avcopy[0] == self.adjview[0] + assert avcopy[0] is not self.adjview[0] + + avcopy[2][3]["ht"] = 4 + assert avcopy[2] != self.adjview[2] + self.adjview[2][3]["ht"] = 4 + assert avcopy[2] == self.adjview[2] + del self.adjview[2][3]["ht"] + + assert not hasattr(self.adjview, "__setitem__") + + def test_str(self): + out = str(dict(self.adjview)) + assert str(self.adjview) == out + + def test_repr(self): + clsname = self.adjview.__class__.__name__ + out = f"{clsname}({self.s}, {self.p})" + assert repr(self.adjview) == out + + +class TestUnionMultiInner(TestUnionAdjacency): + # nbr->key->data + def setup_method(self): + dd = {"color": "blue", "weight": 1.2} + self.kd = {7: {}, "ekey": {}, 9: {"color": 1}} + self.s = {3: self.kd, 0: {7: dd}, 1: {}, 2: {"key": {"color": 1}}} + self.p = {3: {}, 0: {3: dd}, 1: {}, 2: {1: {"span": 2}}} + self.adjview = nx.classes.coreviews.UnionMultiInner(self.s, self.p) + + def test_len(self): + assert len(self.adjview) == len(self.s.keys() | self.p.keys()) == 4 + + def test_getitem(self): + assert self.adjview[1] is not self.s[1] + assert self.adjview[0][7] is self.adjview[0][3] + assert self.adjview[2]["key"]["color"] == 1 + assert self.adjview[2][1]["span"] == 2 + pytest.raises(KeyError, self.adjview.__getitem__, 4) + pytest.raises(KeyError, self.adjview[1].__getitem__, "key") + + def test_copy(self): + avcopy = self.adjview.copy() + assert avcopy[0] == self.adjview[0] + assert avcopy[0] is not self.adjview[0] + + avcopy[2][1]["width"] = 8 + assert avcopy[2] != self.adjview[2] + self.adjview[2][1]["width"] = 8 + assert avcopy[2] == self.adjview[2] + del self.adjview[2][1]["width"] + + assert not hasattr(self.adjview, "__setitem__") + assert hasattr(avcopy, "__setitem__") + + +class TestUnionMultiAdjacency(TestUnionAdjacency): + # node->nbr->key->data + def setup_method(self): + dd = {"color": "blue", "weight": 1.2} + self.kd = {7: {}, 8: {}, 9: {"color": 1}} + self.nd = {3: self.kd, 0: {9: dd}, 1: {8: {}}, 2: {9: {"color": 1}}} + self.s = {3: self.nd, 0: {3: {7: dd}}, 1: {}, 2: {3: {8: {}}}} + self.p = {3: {}, 0: {3: {9: dd}}, 1: {}, 2: {1: {8: {}}}} + self.adjview = nx.classes.coreviews.UnionMultiAdjacency(self.s, self.p) + + def test_getitem(self): + assert self.adjview[1] is not self.s[1] + assert self.adjview[3][0][9] is self.adjview[0][3][9] + assert self.adjview[3][2][9]["color"] == 1 + pytest.raises(KeyError, self.adjview.__getitem__, 4) + + def test_copy(self): + avcopy = self.adjview.copy() + assert avcopy[0] == self.adjview[0] + assert avcopy[0] is not self.adjview[0] + + avcopy[2][3][8]["ht"] = 4 + assert avcopy[2] != self.adjview[2] + self.adjview[2][3][8]["ht"] = 4 + assert avcopy[2] == self.adjview[2] + del self.adjview[2][3][8]["ht"] + + assert not hasattr(self.adjview, "__setitem__") + assert hasattr(avcopy, "__setitem__") + + +class TestFilteredGraphs: + def setup_method(self): + self.Graphs = [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph] + + def test_hide_show_nodes(self): + SubGraph = nx.subgraph_view + for Graph in self.Graphs: + G = nx.path_graph(4, Graph) + SG = G.subgraph([2, 3]) + RG = SubGraph(G, filter_node=nx.filters.hide_nodes([0, 1])) + assert SG.nodes == RG.nodes + assert SG.edges == RG.edges + SGC = SG.copy() + RGC = RG.copy() + assert SGC.nodes == RGC.nodes + assert SGC.edges == RGC.edges + + def test_str_repr(self): + SubGraph = nx.subgraph_view + for Graph in self.Graphs: + G = nx.path_graph(4, Graph) + SG = G.subgraph([2, 3]) + RG = SubGraph(G, filter_node=nx.filters.hide_nodes([0, 1])) + str(SG.adj) + str(RG.adj) + repr(SG.adj) + repr(RG.adj) + str(SG.adj[2]) + str(RG.adj[2]) + repr(SG.adj[2]) + repr(RG.adj[2]) + + def test_copy(self): + SubGraph = nx.subgraph_view + for Graph in self.Graphs: + G = nx.path_graph(4, Graph) + SG = G.subgraph([2, 3]) + RG = SubGraph(G, filter_node=nx.filters.hide_nodes([0, 1])) + RsG = SubGraph(G, filter_node=nx.filters.show_nodes([2, 3])) + assert G.adj.copy() == G.adj + assert G.adj[2].copy() == G.adj[2] + assert SG.adj.copy() == SG.adj + assert SG.adj[2].copy() == SG.adj[2] + assert RG.adj.copy() == RG.adj + assert RG.adj[2].copy() == RG.adj[2] + assert RsG.adj.copy() == RsG.adj + assert RsG.adj[2].copy() == RsG.adj[2] diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/test_digraph.py b/MLPY/Lib/site-packages/networkx/classes/tests/test_digraph.py new file mode 100644 index 0000000000000000000000000000000000000000..b9972f9a5f1ab101b9f6f2f9a1584ddafccd2ff3 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/tests/test_digraph.py @@ -0,0 +1,331 @@ +import pytest + +import networkx as nx +from networkx.utils import nodes_equal + +from .test_graph import BaseAttrGraphTester, BaseGraphTester +from .test_graph import TestEdgeSubgraph as _TestGraphEdgeSubgraph +from .test_graph import TestGraph as _TestGraph + + +class BaseDiGraphTester(BaseGraphTester): + def test_has_successor(self): + G = self.K3 + assert G.has_successor(0, 1) + assert not G.has_successor(0, -1) + + def test_successors(self): + G = self.K3 + assert sorted(G.successors(0)) == [1, 2] + with pytest.raises(nx.NetworkXError): + G.successors(-1) + + def test_has_predecessor(self): + G = self.K3 + assert G.has_predecessor(0, 1) + assert not G.has_predecessor(0, -1) + + def test_predecessors(self): + G = self.K3 + assert sorted(G.predecessors(0)) == [1, 2] + with pytest.raises(nx.NetworkXError): + G.predecessors(-1) + + def test_edges(self): + G = self.K3 + assert sorted(G.edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.edges(0)) == [(0, 1), (0, 2)] + assert sorted(G.edges([0, 1])) == [(0, 1), (0, 2), (1, 0), (1, 2)] + with pytest.raises(nx.NetworkXError): + G.edges(-1) + + def test_out_edges(self): + G = self.K3 + assert sorted(G.out_edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.out_edges(0)) == [(0, 1), (0, 2)] + with pytest.raises(nx.NetworkXError): + G.out_edges(-1) + + def test_out_edges_dir(self): + G = self.P3 + assert sorted(G.out_edges()) == [(0, 1), (1, 2)] + assert sorted(G.out_edges(0)) == [(0, 1)] + assert sorted(G.out_edges(2)) == [] + + def test_out_edges_data(self): + G = nx.DiGraph([(0, 1, {"data": 0}), (1, 0, {})]) + assert sorted(G.out_edges(data=True)) == [(0, 1, {"data": 0}), (1, 0, {})] + assert sorted(G.out_edges(0, data=True)) == [(0, 1, {"data": 0})] + assert sorted(G.out_edges(data="data")) == [(0, 1, 0), (1, 0, None)] + assert sorted(G.out_edges(0, data="data")) == [(0, 1, 0)] + + def test_in_edges_dir(self): + G = self.P3 + assert sorted(G.in_edges()) == [(0, 1), (1, 2)] + assert sorted(G.in_edges(0)) == [] + assert sorted(G.in_edges(2)) == [(1, 2)] + + def test_in_edges_data(self): + G = nx.DiGraph([(0, 1, {"data": 0}), (1, 0, {})]) + assert sorted(G.in_edges(data=True)) == [(0, 1, {"data": 0}), (1, 0, {})] + assert sorted(G.in_edges(1, data=True)) == [(0, 1, {"data": 0})] + assert sorted(G.in_edges(data="data")) == [(0, 1, 0), (1, 0, None)] + assert sorted(G.in_edges(1, data="data")) == [(0, 1, 0)] + + def test_degree(self): + G = self.K3 + assert sorted(G.degree()) == [(0, 4), (1, 4), (2, 4)] + assert dict(G.degree()) == {0: 4, 1: 4, 2: 4} + assert G.degree(0) == 4 + assert list(G.degree(iter([0]))) == [(0, 4)] # run through iterator + + def test_in_degree(self): + G = self.K3 + assert sorted(G.in_degree()) == [(0, 2), (1, 2), (2, 2)] + assert dict(G.in_degree()) == {0: 2, 1: 2, 2: 2} + assert G.in_degree(0) == 2 + assert list(G.in_degree(iter([0]))) == [(0, 2)] # run through iterator + + def test_out_degree(self): + G = self.K3 + assert sorted(G.out_degree()) == [(0, 2), (1, 2), (2, 2)] + assert dict(G.out_degree()) == {0: 2, 1: 2, 2: 2} + assert G.out_degree(0) == 2 + assert list(G.out_degree(iter([0]))) == [(0, 2)] + + def test_size(self): + G = self.K3 + assert G.size() == 6 + assert G.number_of_edges() == 6 + + def test_to_undirected_reciprocal(self): + G = self.Graph() + G.add_edge(1, 2) + assert G.to_undirected().has_edge(1, 2) + assert not G.to_undirected(reciprocal=True).has_edge(1, 2) + G.add_edge(2, 1) + assert G.to_undirected(reciprocal=True).has_edge(1, 2) + + def test_reverse_copy(self): + G = nx.DiGraph([(0, 1), (1, 2)]) + R = G.reverse() + assert sorted(R.edges()) == [(1, 0), (2, 1)] + R.remove_edge(1, 0) + assert sorted(R.edges()) == [(2, 1)] + assert sorted(G.edges()) == [(0, 1), (1, 2)] + + def test_reverse_nocopy(self): + G = nx.DiGraph([(0, 1), (1, 2)]) + R = G.reverse(copy=False) + assert sorted(R.edges()) == [(1, 0), (2, 1)] + with pytest.raises(nx.NetworkXError): + R.remove_edge(1, 0) + + def test_reverse_hashable(self): + class Foo: + pass + + x = Foo() + y = Foo() + G = nx.DiGraph() + G.add_edge(x, y) + assert nodes_equal(G.nodes(), G.reverse().nodes()) + assert [(y, x)] == list(G.reverse().edges()) + + def test_di_cache_reset(self): + G = self.K3.copy() + old_succ = G.succ + assert id(G.succ) == id(old_succ) + old_adj = G.adj + assert id(G.adj) == id(old_adj) + + G._succ = {} + assert id(G.succ) != id(old_succ) + assert id(G.adj) != id(old_adj) + + old_pred = G.pred + assert id(G.pred) == id(old_pred) + G._pred = {} + assert id(G.pred) != id(old_pred) + + def test_di_attributes_cached(self): + G = self.K3.copy() + assert id(G.in_edges) == id(G.in_edges) + assert id(G.out_edges) == id(G.out_edges) + assert id(G.in_degree) == id(G.in_degree) + assert id(G.out_degree) == id(G.out_degree) + assert id(G.succ) == id(G.succ) + assert id(G.pred) == id(G.pred) + + +class BaseAttrDiGraphTester(BaseDiGraphTester, BaseAttrGraphTester): + def test_edges_data(self): + G = self.K3 + all_edges = [ + (0, 1, {}), + (0, 2, {}), + (1, 0, {}), + (1, 2, {}), + (2, 0, {}), + (2, 1, {}), + ] + assert sorted(G.edges(data=True)) == all_edges + assert sorted(G.edges(0, data=True)) == all_edges[:2] + assert sorted(G.edges([0, 1], data=True)) == all_edges[:4] + with pytest.raises(nx.NetworkXError): + G.edges(-1, True) + + def test_in_degree_weighted(self): + G = self.K3.copy() + G.add_edge(0, 1, weight=0.3, other=1.2) + assert sorted(G.in_degree(weight="weight")) == [(0, 2), (1, 1.3), (2, 2)] + assert dict(G.in_degree(weight="weight")) == {0: 2, 1: 1.3, 2: 2} + assert G.in_degree(1, weight="weight") == 1.3 + assert sorted(G.in_degree(weight="other")) == [(0, 2), (1, 2.2), (2, 2)] + assert dict(G.in_degree(weight="other")) == {0: 2, 1: 2.2, 2: 2} + assert G.in_degree(1, weight="other") == 2.2 + assert list(G.in_degree(iter([1]), weight="other")) == [(1, 2.2)] + + def test_out_degree_weighted(self): + G = self.K3.copy() + G.add_edge(0, 1, weight=0.3, other=1.2) + assert sorted(G.out_degree(weight="weight")) == [(0, 1.3), (1, 2), (2, 2)] + assert dict(G.out_degree(weight="weight")) == {0: 1.3, 1: 2, 2: 2} + assert G.out_degree(0, weight="weight") == 1.3 + assert sorted(G.out_degree(weight="other")) == [(0, 2.2), (1, 2), (2, 2)] + assert dict(G.out_degree(weight="other")) == {0: 2.2, 1: 2, 2: 2} + assert G.out_degree(0, weight="other") == 2.2 + assert list(G.out_degree(iter([0]), weight="other")) == [(0, 2.2)] + + +class TestDiGraph(BaseAttrDiGraphTester, _TestGraph): + """Tests specific to dict-of-dict-of-dict digraph data structure""" + + def setup_method(self): + self.Graph = nx.DiGraph + # build dict-of-dict-of-dict K3 + ed1, ed2, ed3, ed4, ed5, ed6 = ({}, {}, {}, {}, {}, {}) + self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed3, 2: ed4}, 2: {0: ed5, 1: ed6}} + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._succ = self.k3adj # K3._adj is synced with K3._succ + self.K3._pred = {0: {1: ed3, 2: ed5}, 1: {0: ed1, 2: ed6}, 2: {0: ed2, 1: ed4}} + self.K3._node = {} + self.K3._node[0] = {} + self.K3._node[1] = {} + self.K3._node[2] = {} + + ed1, ed2 = ({}, {}) + self.P3 = self.Graph() + self.P3._succ = {0: {1: ed1}, 1: {2: ed2}, 2: {}} + self.P3._pred = {0: {}, 1: {0: ed1}, 2: {1: ed2}} + # P3._adj is synced with P3._succ + self.P3._node = {} + self.P3._node[0] = {} + self.P3._node[1] = {} + self.P3._node[2] = {} + + def test_data_input(self): + G = self.Graph({1: [2], 2: [1]}, name="test") + assert G.name == "test" + assert sorted(G.adj.items()) == [(1, {2: {}}), (2, {1: {}})] + assert sorted(G.succ.items()) == [(1, {2: {}}), (2, {1: {}})] + assert sorted(G.pred.items()) == [(1, {2: {}}), (2, {1: {}})] + + def test_add_edge(self): + G = self.Graph() + G.add_edge(0, 1) + assert G.adj == {0: {1: {}}, 1: {}} + assert G.succ == {0: {1: {}}, 1: {}} + assert G.pred == {0: {}, 1: {0: {}}} + G = self.Graph() + G.add_edge(*(0, 1)) + assert G.adj == {0: {1: {}}, 1: {}} + assert G.succ == {0: {1: {}}, 1: {}} + assert G.pred == {0: {}, 1: {0: {}}} + with pytest.raises(ValueError, match="None cannot be a node"): + G.add_edge(None, 3) + + def test_add_edges_from(self): + G = self.Graph() + G.add_edges_from([(0, 1), (0, 2, {"data": 3})], data=2) + assert G.adj == {0: {1: {"data": 2}, 2: {"data": 3}}, 1: {}, 2: {}} + assert G.succ == {0: {1: {"data": 2}, 2: {"data": 3}}, 1: {}, 2: {}} + assert G.pred == {0: {}, 1: {0: {"data": 2}}, 2: {0: {"data": 3}}} + + with pytest.raises(nx.NetworkXError): + G.add_edges_from([(0,)]) # too few in tuple + with pytest.raises(nx.NetworkXError): + G.add_edges_from([(0, 1, 2, 3)]) # too many in tuple + with pytest.raises(TypeError): + G.add_edges_from([0]) # not a tuple + with pytest.raises(ValueError, match="None cannot be a node"): + G.add_edges_from([(None, 3), (3, 2)]) + + def test_remove_edge(self): + G = self.K3.copy() + G.remove_edge(0, 1) + assert G.succ == {0: {2: {}}, 1: {0: {}, 2: {}}, 2: {0: {}, 1: {}}} + assert G.pred == {0: {1: {}, 2: {}}, 1: {2: {}}, 2: {0: {}, 1: {}}} + with pytest.raises(nx.NetworkXError): + G.remove_edge(-1, 0) + + def test_remove_edges_from(self): + G = self.K3.copy() + G.remove_edges_from([(0, 1)]) + assert G.succ == {0: {2: {}}, 1: {0: {}, 2: {}}, 2: {0: {}, 1: {}}} + assert G.pred == {0: {1: {}, 2: {}}, 1: {2: {}}, 2: {0: {}, 1: {}}} + G.remove_edges_from([(0, 0)]) # silent fail + + def test_clear(self): + G = self.K3 + G.graph["name"] = "K3" + G.clear() + assert list(G.nodes) == [] + assert G.succ == {} + assert G.pred == {} + assert G.graph == {} + + def test_clear_edges(self): + G = self.K3 + G.graph["name"] = "K3" + nodes = list(G.nodes) + G.clear_edges() + assert list(G.nodes) == nodes + expected = {0: {}, 1: {}, 2: {}} + assert G.succ == expected + assert G.pred == expected + assert list(G.edges) == [] + assert G.graph["name"] == "K3" + + +class TestEdgeSubgraph(_TestGraphEdgeSubgraph): + """Unit tests for the :meth:`DiGraph.edge_subgraph` method.""" + + def setup_method(self): + # Create a doubly-linked path graph on five nodes. + G = nx.DiGraph(nx.path_graph(5)) + # Add some node, edge, and graph attributes. + for i in range(5): + G.nodes[i]["name"] = f"node{i}" + G.edges[0, 1]["name"] = "edge01" + G.edges[3, 4]["name"] = "edge34" + G.graph["name"] = "graph" + # Get the subgraph induced by the first and last edges. + self.G = G + self.H = G.edge_subgraph([(0, 1), (3, 4)]) + + def test_pred_succ(self): + """Test that nodes are added to predecessors and successors. + + For more information, see GitHub issue #2370. + + """ + G = nx.DiGraph() + G.add_edge(0, 1) + H = G.edge_subgraph([(0, 1)]) + assert list(H.predecessors(0)) == [] + assert list(H.successors(0)) == [1] + assert list(H.predecessors(1)) == [0] + assert list(H.successors(1)) == [] diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/test_digraph_historical.py b/MLPY/Lib/site-packages/networkx/classes/tests/test_digraph_historical.py new file mode 100644 index 0000000000000000000000000000000000000000..665fc267f5fb253fbc875604a251a9f93e0d3836 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/tests/test_digraph_historical.py @@ -0,0 +1,110 @@ +"""Original NetworkX graph tests""" +import pytest + +import networkx +import networkx as nx + +from .historical_tests import HistoricalTests + + +class TestDiGraphHistorical(HistoricalTests): + @classmethod + def setup_class(cls): + HistoricalTests.setup_class() + cls.G = nx.DiGraph + + def test_in_degree(self): + G = self.G() + G.add_nodes_from("GJK") + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("B", "C"), ("C", "D")]) + + assert sorted(d for n, d in G.in_degree()) == [0, 0, 0, 0, 1, 2, 2] + assert dict(G.in_degree()) == { + "A": 0, + "C": 2, + "B": 1, + "D": 2, + "G": 0, + "K": 0, + "J": 0, + } + + def test_out_degree(self): + G = self.G() + G.add_nodes_from("GJK") + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("B", "C"), ("C", "D")]) + assert sorted(v for k, v in G.in_degree()) == [0, 0, 0, 0, 1, 2, 2] + assert dict(G.out_degree()) == { + "A": 2, + "C": 1, + "B": 2, + "D": 0, + "G": 0, + "K": 0, + "J": 0, + } + + def test_degree_digraph(self): + H = nx.DiGraph() + H.add_edges_from([(1, 24), (1, 2)]) + assert sorted(d for n, d in H.in_degree([1, 24])) == [0, 1] + assert sorted(d for n, d in H.out_degree([1, 24])) == [0, 2] + assert sorted(d for n, d in H.degree([1, 24])) == [1, 2] + + def test_neighbors(self): + G = self.G() + G.add_nodes_from("GJK") + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("B", "C"), ("C", "D")]) + + assert sorted(G.neighbors("C")) == ["D"] + assert sorted(G["C"]) == ["D"] + assert sorted(G.neighbors("A")) == ["B", "C"] + pytest.raises(nx.NetworkXError, G.neighbors, "j") + pytest.raises(nx.NetworkXError, G.neighbors, "j") + + def test_successors(self): + G = self.G() + G.add_nodes_from("GJK") + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("B", "C"), ("C", "D")]) + assert sorted(G.successors("A")) == ["B", "C"] + assert sorted(G.successors("A")) == ["B", "C"] + assert sorted(G.successors("G")) == [] + assert sorted(G.successors("D")) == [] + assert sorted(G.successors("G")) == [] + pytest.raises(nx.NetworkXError, G.successors, "j") + pytest.raises(nx.NetworkXError, G.successors, "j") + + def test_predecessors(self): + G = self.G() + G.add_nodes_from("GJK") + G.add_edges_from([("A", "B"), ("A", "C"), ("B", "D"), ("B", "C"), ("C", "D")]) + assert sorted(G.predecessors("C")) == ["A", "B"] + assert sorted(G.predecessors("C")) == ["A", "B"] + assert sorted(G.predecessors("G")) == [] + assert sorted(G.predecessors("A")) == [] + assert sorted(G.predecessors("G")) == [] + assert sorted(G.predecessors("A")) == [] + assert sorted(G.successors("D")) == [] + + pytest.raises(nx.NetworkXError, G.predecessors, "j") + pytest.raises(nx.NetworkXError, G.predecessors, "j") + + def test_reverse(self): + G = nx.complete_graph(10) + H = G.to_directed() + HR = H.reverse() + assert nx.is_isomorphic(H, HR) + assert sorted(H.edges()) == sorted(HR.edges()) + + def test_reverse2(self): + H = nx.DiGraph() + foo = [H.add_edge(u, u + 1) for u in range(5)] + HR = H.reverse() + for u in range(5): + assert HR.has_edge(u + 1, u) + + def test_reverse3(self): + H = nx.DiGraph() + H.add_nodes_from([1, 2, 3, 4]) + HR = H.reverse() + assert sorted(HR.nodes()) == [1, 2, 3, 4] diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/test_filters.py b/MLPY/Lib/site-packages/networkx/classes/tests/test_filters.py new file mode 100644 index 0000000000000000000000000000000000000000..2da59117cad0d72d5830b53c8d19c6e0ca988d54 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/tests/test_filters.py @@ -0,0 +1,177 @@ +import pytest + +import networkx as nx + + +class TestFilterFactory: + def test_no_filter(self): + nf = nx.filters.no_filter + assert nf() + assert nf(1) + assert nf(2, 1) + + def test_hide_nodes(self): + f = nx.classes.filters.hide_nodes([1, 2, 3]) + assert not f(1) + assert not f(2) + assert not f(3) + assert f(4) + assert f(0) + assert f("a") + pytest.raises(TypeError, f, 1, 2) + pytest.raises(TypeError, f) + + def test_show_nodes(self): + f = nx.classes.filters.show_nodes([1, 2, 3]) + assert f(1) + assert f(2) + assert f(3) + assert not f(4) + assert not f(0) + assert not f("a") + pytest.raises(TypeError, f, 1, 2) + pytest.raises(TypeError, f) + + def test_hide_edges(self): + factory = nx.classes.filters.hide_edges + f = factory([(1, 2), (3, 4)]) + assert not f(1, 2) + assert not f(3, 4) + assert not f(4, 3) + assert f(2, 3) + assert f(0, -1) + assert f("a", "b") + pytest.raises(TypeError, f, 1, 2, 3) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2, 3)]) + + def test_show_edges(self): + factory = nx.classes.filters.show_edges + f = factory([(1, 2), (3, 4)]) + assert f(1, 2) + assert f(3, 4) + assert f(4, 3) + assert not f(2, 3) + assert not f(0, -1) + assert not f("a", "b") + pytest.raises(TypeError, f, 1, 2, 3) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2, 3)]) + + def test_hide_diedges(self): + factory = nx.classes.filters.hide_diedges + f = factory([(1, 2), (3, 4)]) + assert not f(1, 2) + assert not f(3, 4) + assert f(4, 3) + assert f(2, 3) + assert f(0, -1) + assert f("a", "b") + pytest.raises(TypeError, f, 1, 2, 3) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2, 3)]) + + def test_show_diedges(self): + factory = nx.classes.filters.show_diedges + f = factory([(1, 2), (3, 4)]) + assert f(1, 2) + assert f(3, 4) + assert not f(4, 3) + assert not f(2, 3) + assert not f(0, -1) + assert not f("a", "b") + pytest.raises(TypeError, f, 1, 2, 3) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2, 3)]) + + def test_hide_multiedges(self): + factory = nx.classes.filters.hide_multiedges + f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)]) + assert not f(1, 2, 0) + assert not f(1, 2, 1) + assert f(1, 2, 2) + assert f(3, 4, 0) + assert not f(3, 4, 1) + assert not f(4, 3, 1) + assert f(4, 3, 0) + assert f(2, 3, 0) + assert f(0, -1, 0) + assert f("a", "b", 0) + pytest.raises(TypeError, f, 1, 2, 3, 4) + pytest.raises(TypeError, f, 1, 2) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2)]) + pytest.raises(ValueError, factory, [(1, 2, 3, 4)]) + + def test_show_multiedges(self): + factory = nx.classes.filters.show_multiedges + f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)]) + assert f(1, 2, 0) + assert f(1, 2, 1) + assert not f(1, 2, 2) + assert not f(3, 4, 0) + assert f(3, 4, 1) + assert f(4, 3, 1) + assert not f(4, 3, 0) + assert not f(2, 3, 0) + assert not f(0, -1, 0) + assert not f("a", "b", 0) + pytest.raises(TypeError, f, 1, 2, 3, 4) + pytest.raises(TypeError, f, 1, 2) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2)]) + pytest.raises(ValueError, factory, [(1, 2, 3, 4)]) + + def test_hide_multidiedges(self): + factory = nx.classes.filters.hide_multidiedges + f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)]) + assert not f(1, 2, 0) + assert not f(1, 2, 1) + assert f(1, 2, 2) + assert f(3, 4, 0) + assert not f(3, 4, 1) + assert f(4, 3, 1) + assert f(4, 3, 0) + assert f(2, 3, 0) + assert f(0, -1, 0) + assert f("a", "b", 0) + pytest.raises(TypeError, f, 1, 2, 3, 4) + pytest.raises(TypeError, f, 1, 2) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2)]) + pytest.raises(ValueError, factory, [(1, 2, 3, 4)]) + + def test_show_multidiedges(self): + factory = nx.classes.filters.show_multidiedges + f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)]) + assert f(1, 2, 0) + assert f(1, 2, 1) + assert not f(1, 2, 2) + assert not f(3, 4, 0) + assert f(3, 4, 1) + assert not f(4, 3, 1) + assert not f(4, 3, 0) + assert not f(2, 3, 0) + assert not f(0, -1, 0) + assert not f("a", "b", 0) + pytest.raises(TypeError, f, 1, 2, 3, 4) + pytest.raises(TypeError, f, 1, 2) + pytest.raises(TypeError, f, 1) + pytest.raises(TypeError, f) + pytest.raises(TypeError, factory, [1, 2, 3]) + pytest.raises(ValueError, factory, [(1, 2)]) + pytest.raises(ValueError, factory, [(1, 2, 3, 4)]) diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/test_function.py b/MLPY/Lib/site-packages/networkx/classes/tests/test_function.py new file mode 100644 index 0000000000000000000000000000000000000000..61b73c2d256064ba4c34f9a9d95160bd272c8e3e --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/tests/test_function.py @@ -0,0 +1,782 @@ +import random + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + + +class TestFunction: + def setup_method(self): + self.G = nx.Graph({0: [1, 2, 3], 1: [1, 2, 0], 4: []}, name="Test") + self.Gdegree = {0: 3, 1: 2, 2: 2, 3: 1, 4: 0} + self.Gnodes = list(range(5)) + self.Gedges = [(0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2)] + self.DG = nx.DiGraph({0: [1, 2, 3], 1: [1, 2, 0], 4: []}) + self.DGin_degree = {0: 1, 1: 2, 2: 2, 3: 1, 4: 0} + self.DGout_degree = {0: 3, 1: 3, 2: 0, 3: 0, 4: 0} + self.DGnodes = list(range(5)) + self.DGedges = [(0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2)] + + def test_nodes(self): + assert nodes_equal(self.G.nodes(), list(nx.nodes(self.G))) + assert nodes_equal(self.DG.nodes(), list(nx.nodes(self.DG))) + + def test_edges(self): + assert edges_equal(self.G.edges(), list(nx.edges(self.G))) + assert sorted(self.DG.edges()) == sorted(nx.edges(self.DG)) + assert edges_equal( + self.G.edges(nbunch=[0, 1, 3]), list(nx.edges(self.G, nbunch=[0, 1, 3])) + ) + assert sorted(self.DG.edges(nbunch=[0, 1, 3])) == sorted( + nx.edges(self.DG, nbunch=[0, 1, 3]) + ) + + def test_degree(self): + assert edges_equal(self.G.degree(), list(nx.degree(self.G))) + assert sorted(self.DG.degree()) == sorted(nx.degree(self.DG)) + assert edges_equal( + self.G.degree(nbunch=[0, 1]), list(nx.degree(self.G, nbunch=[0, 1])) + ) + assert sorted(self.DG.degree(nbunch=[0, 1])) == sorted( + nx.degree(self.DG, nbunch=[0, 1]) + ) + assert edges_equal( + self.G.degree(weight="weight"), list(nx.degree(self.G, weight="weight")) + ) + assert sorted(self.DG.degree(weight="weight")) == sorted( + nx.degree(self.DG, weight="weight") + ) + + def test_neighbors(self): + assert list(self.G.neighbors(1)) == list(nx.neighbors(self.G, 1)) + assert list(self.DG.neighbors(1)) == list(nx.neighbors(self.DG, 1)) + + def test_number_of_nodes(self): + assert self.G.number_of_nodes() == nx.number_of_nodes(self.G) + assert self.DG.number_of_nodes() == nx.number_of_nodes(self.DG) + + def test_number_of_edges(self): + assert self.G.number_of_edges() == nx.number_of_edges(self.G) + assert self.DG.number_of_edges() == nx.number_of_edges(self.DG) + + def test_is_directed(self): + assert self.G.is_directed() == nx.is_directed(self.G) + assert self.DG.is_directed() == nx.is_directed(self.DG) + + def test_add_star(self): + G = self.G.copy() + nlist = [12, 13, 14, 15] + nx.add_star(G, nlist) + assert edges_equal(G.edges(nlist), [(12, 13), (12, 14), (12, 15)]) + + G = self.G.copy() + nx.add_star(G, nlist, weight=2.0) + assert edges_equal( + G.edges(nlist, data=True), + [ + (12, 13, {"weight": 2.0}), + (12, 14, {"weight": 2.0}), + (12, 15, {"weight": 2.0}), + ], + ) + + G = self.G.copy() + nlist = [12] + nx.add_star(G, nlist) + assert nodes_equal(G, list(self.G) + nlist) + + G = self.G.copy() + nlist = [] + nx.add_star(G, nlist) + assert nodes_equal(G.nodes, self.Gnodes) + assert edges_equal(G.edges, self.G.edges) + + def test_add_path(self): + G = self.G.copy() + nlist = [12, 13, 14, 15] + nx.add_path(G, nlist) + assert edges_equal(G.edges(nlist), [(12, 13), (13, 14), (14, 15)]) + G = self.G.copy() + nx.add_path(G, nlist, weight=2.0) + assert edges_equal( + G.edges(nlist, data=True), + [ + (12, 13, {"weight": 2.0}), + (13, 14, {"weight": 2.0}), + (14, 15, {"weight": 2.0}), + ], + ) + + G = self.G.copy() + nlist = ["node"] + nx.add_path(G, nlist) + assert edges_equal(G.edges(nlist), []) + assert nodes_equal(G, list(self.G) + ["node"]) + + G = self.G.copy() + nlist = iter(["node"]) + nx.add_path(G, nlist) + assert edges_equal(G.edges(["node"]), []) + assert nodes_equal(G, list(self.G) + ["node"]) + + G = self.G.copy() + nlist = [12] + nx.add_path(G, nlist) + assert edges_equal(G.edges(nlist), []) + assert nodes_equal(G, list(self.G) + [12]) + + G = self.G.copy() + nlist = iter([12]) + nx.add_path(G, nlist) + assert edges_equal(G.edges([12]), []) + assert nodes_equal(G, list(self.G) + [12]) + + G = self.G.copy() + nlist = [] + nx.add_path(G, nlist) + assert edges_equal(G.edges, self.G.edges) + assert nodes_equal(G, list(self.G)) + + G = self.G.copy() + nlist = iter([]) + nx.add_path(G, nlist) + assert edges_equal(G.edges, self.G.edges) + assert nodes_equal(G, list(self.G)) + + def test_add_cycle(self): + G = self.G.copy() + nlist = [12, 13, 14, 15] + oklists = [ + [(12, 13), (12, 15), (13, 14), (14, 15)], + [(12, 13), (13, 14), (14, 15), (15, 12)], + ] + nx.add_cycle(G, nlist) + assert sorted(G.edges(nlist)) in oklists + G = self.G.copy() + oklists = [ + [ + (12, 13, {"weight": 1.0}), + (12, 15, {"weight": 1.0}), + (13, 14, {"weight": 1.0}), + (14, 15, {"weight": 1.0}), + ], + [ + (12, 13, {"weight": 1.0}), + (13, 14, {"weight": 1.0}), + (14, 15, {"weight": 1.0}), + (15, 12, {"weight": 1.0}), + ], + ] + nx.add_cycle(G, nlist, weight=1.0) + assert sorted(G.edges(nlist, data=True)) in oklists + + G = self.G.copy() + nlist = [12] + nx.add_cycle(G, nlist) + assert nodes_equal(G, list(self.G) + nlist) + + G = self.G.copy() + nlist = [] + nx.add_cycle(G, nlist) + assert nodes_equal(G.nodes, self.Gnodes) + assert edges_equal(G.edges, self.G.edges) + + def test_subgraph(self): + assert ( + self.G.subgraph([0, 1, 2, 4]).adj == nx.subgraph(self.G, [0, 1, 2, 4]).adj + ) + assert ( + self.DG.subgraph([0, 1, 2, 4]).adj == nx.subgraph(self.DG, [0, 1, 2, 4]).adj + ) + assert ( + self.G.subgraph([0, 1, 2, 4]).adj + == nx.induced_subgraph(self.G, [0, 1, 2, 4]).adj + ) + assert ( + self.DG.subgraph([0, 1, 2, 4]).adj + == nx.induced_subgraph(self.DG, [0, 1, 2, 4]).adj + ) + # subgraph-subgraph chain is allowed in function interface + H = nx.induced_subgraph(self.G.subgraph([0, 1, 2, 4]), [0, 1, 4]) + assert H._graph is not self.G + assert H.adj == self.G.subgraph([0, 1, 4]).adj + + def test_edge_subgraph(self): + assert ( + self.G.edge_subgraph([(1, 2), (0, 3)]).adj + == nx.edge_subgraph(self.G, [(1, 2), (0, 3)]).adj + ) + assert ( + self.DG.edge_subgraph([(1, 2), (0, 3)]).adj + == nx.edge_subgraph(self.DG, [(1, 2), (0, 3)]).adj + ) + + def test_create_empty_copy(self): + G = nx.create_empty_copy(self.G, with_data=False) + assert nodes_equal(G, list(self.G)) + assert G.graph == {} + assert G._node == {}.fromkeys(self.G.nodes(), {}) + assert G._adj == {}.fromkeys(self.G.nodes(), {}) + G = nx.create_empty_copy(self.G) + assert nodes_equal(G, list(self.G)) + assert G.graph == self.G.graph + assert G._node == self.G._node + assert G._adj == {}.fromkeys(self.G.nodes(), {}) + + def test_degree_histogram(self): + assert nx.degree_histogram(self.G) == [1, 1, 1, 1, 1] + + def test_density(self): + assert nx.density(self.G) == 0.5 + assert nx.density(self.DG) == 0.3 + G = nx.Graph() + G.add_node(1) + assert nx.density(G) == 0.0 + + def test_density_selfloop(self): + G = nx.Graph() + G.add_edge(1, 1) + assert nx.density(G) == 0.0 + G.add_edge(1, 2) + assert nx.density(G) == 2.0 + + def test_freeze(self): + G = nx.freeze(self.G) + assert G.frozen + pytest.raises(nx.NetworkXError, G.add_node, 1) + pytest.raises(nx.NetworkXError, G.add_nodes_from, [1]) + pytest.raises(nx.NetworkXError, G.remove_node, 1) + pytest.raises(nx.NetworkXError, G.remove_nodes_from, [1]) + pytest.raises(nx.NetworkXError, G.add_edge, 1, 2) + pytest.raises(nx.NetworkXError, G.add_edges_from, [(1, 2)]) + pytest.raises(nx.NetworkXError, G.remove_edge, 1, 2) + pytest.raises(nx.NetworkXError, G.remove_edges_from, [(1, 2)]) + pytest.raises(nx.NetworkXError, G.clear_edges) + pytest.raises(nx.NetworkXError, G.clear) + + def test_is_frozen(self): + assert not nx.is_frozen(self.G) + G = nx.freeze(self.G) + assert G.frozen == nx.is_frozen(self.G) + assert G.frozen + + def test_node_attributes_are_still_mutable_on_frozen_graph(self): + G = nx.freeze(nx.path_graph(3)) + node = G.nodes[0] + node["node_attribute"] = True + assert node["node_attribute"] == True + + def test_edge_attributes_are_still_mutable_on_frozen_graph(self): + G = nx.freeze(nx.path_graph(3)) + edge = G.edges[(0, 1)] + edge["edge_attribute"] = True + assert edge["edge_attribute"] == True + + def test_neighbors_complete_graph(self): + graph = nx.complete_graph(100) + pop = random.sample(list(graph), 1) + nbors = list(nx.neighbors(graph, pop[0])) + # should be all the other vertices in the graph + assert len(nbors) == len(graph) - 1 + + graph = nx.path_graph(100) + node = random.sample(list(graph), 1)[0] + nbors = list(nx.neighbors(graph, node)) + # should be all the other vertices in the graph + if node != 0 and node != 99: + assert len(nbors) == 2 + else: + assert len(nbors) == 1 + + # create a star graph with 99 outer nodes + graph = nx.star_graph(99) + nbors = list(nx.neighbors(graph, 0)) + assert len(nbors) == 99 + + def test_non_neighbors(self): + graph = nx.complete_graph(100) + pop = random.sample(list(graph), 1) + nbors = list(nx.non_neighbors(graph, pop[0])) + # should be all the other vertices in the graph + assert len(nbors) == 0 + + graph = nx.path_graph(100) + node = random.sample(list(graph), 1)[0] + nbors = list(nx.non_neighbors(graph, node)) + # should be all the other vertices in the graph + if node != 0 and node != 99: + assert len(nbors) == 97 + else: + assert len(nbors) == 98 + + # create a star graph with 99 outer nodes + graph = nx.star_graph(99) + nbors = list(nx.non_neighbors(graph, 0)) + assert len(nbors) == 0 + + # disconnected graph + graph = nx.Graph() + graph.add_nodes_from(range(10)) + nbors = list(nx.non_neighbors(graph, 0)) + assert len(nbors) == 9 + + def test_non_edges(self): + # All possible edges exist + graph = nx.complete_graph(5) + nedges = list(nx.non_edges(graph)) + assert len(nedges) == 0 + + graph = nx.path_graph(4) + expected = [(0, 2), (0, 3), (1, 3)] + nedges = list(nx.non_edges(graph)) + for u, v in expected: + assert (u, v) in nedges or (v, u) in nedges + + graph = nx.star_graph(4) + expected = [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + nedges = list(nx.non_edges(graph)) + for u, v in expected: + assert (u, v) in nedges or (v, u) in nedges + + # Directed graphs + graph = nx.DiGraph() + graph.add_edges_from([(0, 2), (2, 0), (2, 1)]) + expected = [(0, 1), (1, 0), (1, 2)] + nedges = list(nx.non_edges(graph)) + for e in expected: + assert e in nedges + + def test_is_weighted(self): + G = nx.Graph() + assert not nx.is_weighted(G) + + G = nx.path_graph(4) + assert not nx.is_weighted(G) + assert not nx.is_weighted(G, (2, 3)) + + G.add_node(4) + G.add_edge(3, 4, weight=4) + assert not nx.is_weighted(G) + assert nx.is_weighted(G, (3, 4)) + + G = nx.DiGraph() + G.add_weighted_edges_from( + [ + ("0", "3", 3), + ("0", "1", -5), + ("1", "0", -5), + ("0", "2", 2), + ("1", "2", 4), + ("2", "3", 1), + ] + ) + assert nx.is_weighted(G) + assert nx.is_weighted(G, ("1", "0")) + + G = G.to_undirected() + assert nx.is_weighted(G) + assert nx.is_weighted(G, ("1", "0")) + + pytest.raises(nx.NetworkXError, nx.is_weighted, G, (1, 2)) + + def test_is_negatively_weighted(self): + G = nx.Graph() + assert not nx.is_negatively_weighted(G) + + G.add_node(1) + G.add_nodes_from([2, 3, 4, 5]) + assert not nx.is_negatively_weighted(G) + + G.add_edge(1, 2, weight=4) + assert not nx.is_negatively_weighted(G, (1, 2)) + + G.add_edges_from([(1, 3), (2, 4), (2, 6)]) + G[1][3]["color"] = "blue" + assert not nx.is_negatively_weighted(G) + assert not nx.is_negatively_weighted(G, (1, 3)) + + G[2][4]["weight"] = -2 + assert nx.is_negatively_weighted(G, (2, 4)) + assert nx.is_negatively_weighted(G) + + G = nx.DiGraph() + G.add_weighted_edges_from( + [ + ("0", "3", 3), + ("0", "1", -5), + ("1", "0", -2), + ("0", "2", 2), + ("1", "2", -3), + ("2", "3", 1), + ] + ) + assert nx.is_negatively_weighted(G) + assert not nx.is_negatively_weighted(G, ("0", "3")) + assert nx.is_negatively_weighted(G, ("1", "0")) + + pytest.raises(nx.NetworkXError, nx.is_negatively_weighted, G, (1, 4)) + + +class TestCommonNeighbors: + @classmethod + def setup_class(cls): + cls.func = staticmethod(nx.common_neighbors) + + def test_func(G, u, v, expected): + result = sorted(cls.func(G, u, v)) + assert result == expected + + cls.test = staticmethod(test_func) + + def test_K5(self): + G = nx.complete_graph(5) + self.test(G, 0, 1, [2, 3, 4]) + + def test_P3(self): + G = nx.path_graph(3) + self.test(G, 0, 2, [1]) + + def test_S4(self): + G = nx.star_graph(4) + self.test(G, 1, 2, [0]) + + def test_digraph(self): + with pytest.raises(nx.NetworkXNotImplemented): + G = nx.DiGraph() + G.add_edges_from([(0, 1), (1, 2)]) + self.func(G, 0, 2) + + def test_nonexistent_nodes(self): + G = nx.complete_graph(5) + pytest.raises(nx.NetworkXError, nx.common_neighbors, G, 5, 4) + pytest.raises(nx.NetworkXError, nx.common_neighbors, G, 4, 5) + pytest.raises(nx.NetworkXError, nx.common_neighbors, G, 5, 6) + + def test_custom1(self): + """Case of no common neighbors.""" + G = nx.Graph() + G.add_nodes_from([0, 1]) + self.test(G, 0, 1, []) + + def test_custom2(self): + """Case of equal nodes.""" + G = nx.complete_graph(4) + self.test(G, 0, 0, [1, 2, 3]) + + +@pytest.mark.parametrize( + "graph_type", (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph) +) +def test_set_node_attributes(graph_type): + # Test single value + G = nx.path_graph(3, create_using=graph_type) + vals = 100 + attr = "hello" + nx.set_node_attributes(G, vals, attr) + assert G.nodes[0][attr] == vals + assert G.nodes[1][attr] == vals + assert G.nodes[2][attr] == vals + + # Test dictionary + G = nx.path_graph(3, create_using=graph_type) + vals = dict(zip(sorted(G.nodes()), range(len(G)))) + attr = "hi" + nx.set_node_attributes(G, vals, attr) + assert G.nodes[0][attr] == 0 + assert G.nodes[1][attr] == 1 + assert G.nodes[2][attr] == 2 + + # Test dictionary of dictionaries + G = nx.path_graph(3, create_using=graph_type) + d = {"hi": 0, "hello": 200} + vals = dict.fromkeys(G.nodes(), d) + vals.pop(0) + nx.set_node_attributes(G, vals) + assert G.nodes[0] == {} + assert G.nodes[1]["hi"] == 0 + assert G.nodes[2]["hello"] == 200 + + +@pytest.mark.parametrize( + ("values", "name"), + ( + ({0: "red", 1: "blue"}, "color"), # values dictionary + ({0: {"color": "red"}, 1: {"color": "blue"}}, None), # dict-of-dict + ), +) +def test_set_node_attributes_ignores_extra_nodes(values, name): + """ + When `values` is a dict or dict-of-dict keyed by nodes, ensure that keys + that correspond to nodes not in G are ignored. + """ + G = nx.Graph() + G.add_node(0) + nx.set_node_attributes(G, values, name) + assert G.nodes[0]["color"] == "red" + assert 1 not in G.nodes + + +@pytest.mark.parametrize("graph_type", (nx.Graph, nx.DiGraph)) +def test_set_edge_attributes(graph_type): + # Test single value + G = nx.path_graph(3, create_using=graph_type) + attr = "hello" + vals = 3 + nx.set_edge_attributes(G, vals, attr) + assert G[0][1][attr] == vals + assert G[1][2][attr] == vals + + # Test multiple values + G = nx.path_graph(3, create_using=graph_type) + attr = "hi" + edges = [(0, 1), (1, 2)] + vals = dict(zip(edges, range(len(edges)))) + nx.set_edge_attributes(G, vals, attr) + assert G[0][1][attr] == 0 + assert G[1][2][attr] == 1 + + # Test dictionary of dictionaries + G = nx.path_graph(3, create_using=graph_type) + d = {"hi": 0, "hello": 200} + edges = [(0, 1)] + vals = dict.fromkeys(edges, d) + nx.set_edge_attributes(G, vals) + assert G[0][1]["hi"] == 0 + assert G[0][1]["hello"] == 200 + assert G[1][2] == {} + + +@pytest.mark.parametrize( + ("values", "name"), + ( + ({(0, 1): 1.0, (0, 2): 2.0}, "weight"), # values dict + ({(0, 1): {"weight": 1.0}, (0, 2): {"weight": 2.0}}, None), # values dod + ), +) +def test_set_edge_attributes_ignores_extra_edges(values, name): + """If `values` is a dict or dict-of-dicts containing edges that are not in + G, data associate with these edges should be ignored. + """ + G = nx.Graph([(0, 1)]) + nx.set_edge_attributes(G, values, name) + assert G[0][1]["weight"] == 1.0 + assert (0, 2) not in G.edges + + +@pytest.mark.parametrize("graph_type", (nx.MultiGraph, nx.MultiDiGraph)) +def test_set_edge_attributes_multi(graph_type): + # Test single value + G = nx.path_graph(3, create_using=graph_type) + attr = "hello" + vals = 3 + nx.set_edge_attributes(G, vals, attr) + assert G[0][1][0][attr] == vals + assert G[1][2][0][attr] == vals + + # Test multiple values + G = nx.path_graph(3, create_using=graph_type) + attr = "hi" + edges = [(0, 1, 0), (1, 2, 0)] + vals = dict(zip(edges, range(len(edges)))) + nx.set_edge_attributes(G, vals, attr) + assert G[0][1][0][attr] == 0 + assert G[1][2][0][attr] == 1 + + # Test dictionary of dictionaries + G = nx.path_graph(3, create_using=graph_type) + d = {"hi": 0, "hello": 200} + edges = [(0, 1, 0)] + vals = dict.fromkeys(edges, d) + nx.set_edge_attributes(G, vals) + assert G[0][1][0]["hi"] == 0 + assert G[0][1][0]["hello"] == 200 + assert G[1][2][0] == {} + + +@pytest.mark.parametrize( + ("values", "name"), + ( + ({(0, 1, 0): 1.0, (0, 2, 0): 2.0}, "weight"), # values dict + ({(0, 1, 0): {"weight": 1.0}, (0, 2, 0): {"weight": 2.0}}, None), # values dod + ), +) +def test_set_edge_attributes_multi_ignores_extra_edges(values, name): + """If `values` is a dict or dict-of-dicts containing edges that are not in + G, data associate with these edges should be ignored. + """ + G = nx.MultiGraph([(0, 1, 0), (0, 1, 1)]) + nx.set_edge_attributes(G, values, name) + assert G[0][1][0]["weight"] == 1.0 + assert G[0][1][1] == {} + assert (0, 2) not in G.edges() + + +def test_get_node_attributes(): + graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()] + for G in graphs: + G = nx.path_graph(3, create_using=G) + attr = "hello" + vals = 100 + nx.set_node_attributes(G, vals, attr) + attrs = nx.get_node_attributes(G, attr) + assert attrs[0] == vals + assert attrs[1] == vals + assert attrs[2] == vals + default_val = 1 + G.add_node(4) + attrs = nx.get_node_attributes(G, attr, default=default_val) + assert attrs[4] == default_val + + +def test_get_edge_attributes(): + graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()] + for G in graphs: + G = nx.path_graph(3, create_using=G) + attr = "hello" + vals = 100 + nx.set_edge_attributes(G, vals, attr) + attrs = nx.get_edge_attributes(G, attr) + assert len(attrs) == 2 + + for edge in G.edges: + assert attrs[edge] == vals + + default_val = vals + G.add_edge(4, 5) + deafult_attrs = nx.get_edge_attributes(G, attr, default=default_val) + assert len(deafult_attrs) == 3 + + for edge in G.edges: + assert deafult_attrs[edge] == vals + + +def test_is_empty(): + graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()] + for G in graphs: + assert nx.is_empty(G) + G.add_nodes_from(range(5)) + assert nx.is_empty(G) + G.add_edges_from([(1, 2), (3, 4)]) + assert not nx.is_empty(G) + + +@pytest.mark.parametrize( + "graph_type", [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph] +) +def test_selfloops(graph_type): + G = nx.complete_graph(3, create_using=graph_type) + G.add_edge(0, 0) + assert nodes_equal(nx.nodes_with_selfloops(G), [0]) + assert edges_equal(nx.selfloop_edges(G), [(0, 0)]) + assert edges_equal(nx.selfloop_edges(G, data=True), [(0, 0, {})]) + assert nx.number_of_selfloops(G) == 1 + + +@pytest.mark.parametrize( + "graph_type", [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph] +) +def test_selfloop_edges_attr(graph_type): + G = nx.complete_graph(3, create_using=graph_type) + G.add_edge(0, 0) + G.add_edge(1, 1, weight=2) + assert edges_equal( + nx.selfloop_edges(G, data=True), [(0, 0, {}), (1, 1, {"weight": 2})] + ) + assert edges_equal(nx.selfloop_edges(G, data="weight"), [(0, 0, None), (1, 1, 2)]) + + +def test_selfloop_edges_multi_with_data_and_keys(): + G = nx.complete_graph(3, create_using=nx.MultiGraph) + G.add_edge(0, 0, weight=10) + G.add_edge(0, 0, weight=100) + assert edges_equal( + nx.selfloop_edges(G, data="weight", keys=True), [(0, 0, 0, 10), (0, 0, 1, 100)] + ) + + +@pytest.mark.parametrize("graph_type", [nx.Graph, nx.DiGraph]) +def test_selfloops_removal(graph_type): + G = nx.complete_graph(3, create_using=graph_type) + G.add_edge(0, 0) + G.remove_edges_from(nx.selfloop_edges(G, keys=True)) + G.add_edge(0, 0) + G.remove_edges_from(nx.selfloop_edges(G, data=True)) + G.add_edge(0, 0) + G.remove_edges_from(nx.selfloop_edges(G, keys=True, data=True)) + + +@pytest.mark.parametrize("graph_type", [nx.MultiGraph, nx.MultiDiGraph]) +def test_selfloops_removal_multi(graph_type): + """test removing selfloops behavior vis-a-vis altering a dict while iterating. + cf. gh-4068""" + G = nx.complete_graph(3, create_using=graph_type) + # Defaults - see gh-4080 + G.add_edge(0, 0) + G.add_edge(0, 0) + G.remove_edges_from(nx.selfloop_edges(G)) + assert (0, 0) not in G.edges() + # With keys + G.add_edge(0, 0) + G.add_edge(0, 0) + with pytest.raises(RuntimeError): + G.remove_edges_from(nx.selfloop_edges(G, keys=True)) + # With data + G.add_edge(0, 0) + G.add_edge(0, 0) + with pytest.raises(TypeError): + G.remove_edges_from(nx.selfloop_edges(G, data=True)) + # With keys and data + G.add_edge(0, 0) + G.add_edge(0, 0) + with pytest.raises(RuntimeError): + G.remove_edges_from(nx.selfloop_edges(G, data=True, keys=True)) + + +def test_pathweight(): + valid_path = [1, 2, 3] + invalid_path = [1, 3, 2] + graphs = [nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()] + edges = [ + (1, 2, {"cost": 5, "dist": 6}), + (2, 3, {"cost": 3, "dist": 4}), + (1, 2, {"cost": 1, "dist": 2}), + ] + for graph in graphs: + graph.add_edges_from(edges) + assert nx.path_weight(graph, valid_path, "cost") == 4 + assert nx.path_weight(graph, valid_path, "dist") == 6 + pytest.raises(nx.NetworkXNoPath, nx.path_weight, graph, invalid_path, "cost") + + +@pytest.mark.parametrize( + "G", (nx.Graph(), nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()) +) +def test_ispath(G): + G.add_edges_from([(1, 2), (2, 3), (1, 2), (3, 4)]) + valid_path = [1, 2, 3, 4] + invalid_path = [1, 2, 4, 3] # wrong node order + another_invalid_path = [1, 2, 3, 4, 5] # contains node not in G + assert nx.is_path(G, valid_path) + assert not nx.is_path(G, invalid_path) + assert not nx.is_path(G, another_invalid_path) + + +@pytest.mark.parametrize("G", (nx.Graph(), nx.DiGraph())) +def test_restricted_view(G): + G.add_edges_from([(0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2)]) + G.add_node(4) + H = nx.restricted_view(G, [0, 2, 5], [(1, 2), (3, 4)]) + assert set(H.nodes()) == {1, 3, 4} + assert set(H.edges()) == {(1, 1)} + + +@pytest.mark.parametrize("G", (nx.MultiGraph(), nx.MultiDiGraph())) +def test_restricted_view_multi(G): + G.add_edges_from( + [(0, 1, 0), (0, 2, 0), (0, 3, 0), (0, 1, 1), (1, 0, 0), (1, 1, 0), (1, 2, 0)] + ) + G.add_node(4) + H = nx.restricted_view(G, [0, 2, 5], [(1, 2, 0), (3, 4, 0)]) + assert set(H.nodes()) == {1, 3, 4} + assert set(H.edges()) == {(1, 1)} diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/test_graph.py b/MLPY/Lib/site-packages/networkx/classes/tests/test_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..b0048a31f04eb583f4bcfc32385c2335b94467ad --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/tests/test_graph.py @@ -0,0 +1,920 @@ +import gc +import pickle +import platform +import weakref + +import pytest + +import networkx as nx +from networkx.utils import edges_equal, graphs_equal, nodes_equal + + +class BaseGraphTester: + """Tests for data-structure independent graph class features.""" + + def test_contains(self): + G = self.K3 + assert 1 in G + assert 4 not in G + assert "b" not in G + assert [] not in G # no exception for nonhashable + assert {1: 1} not in G # no exception for nonhashable + + def test_order(self): + G = self.K3 + assert len(G) == 3 + assert G.order() == 3 + assert G.number_of_nodes() == 3 + + def test_nodes(self): + G = self.K3 + assert isinstance(G._node, G.node_dict_factory) + assert isinstance(G._adj, G.adjlist_outer_dict_factory) + assert all( + isinstance(adj, G.adjlist_inner_dict_factory) for adj in G._adj.values() + ) + assert sorted(G.nodes()) == self.k3nodes + assert sorted(G.nodes(data=True)) == [(0, {}), (1, {}), (2, {})] + + def test_none_node(self): + G = self.Graph() + with pytest.raises(ValueError): + G.add_node(None) + with pytest.raises(ValueError): + G.add_nodes_from([None]) + with pytest.raises(ValueError): + G.add_edge(0, None) + with pytest.raises(ValueError): + G.add_edges_from([(0, None)]) + + def test_has_node(self): + G = self.K3 + assert G.has_node(1) + assert not G.has_node(4) + assert not G.has_node([]) # no exception for nonhashable + assert not G.has_node({1: 1}) # no exception for nonhashable + + def test_has_edge(self): + G = self.K3 + assert G.has_edge(0, 1) + assert not G.has_edge(0, -1) + + def test_neighbors(self): + G = self.K3 + assert sorted(G.neighbors(0)) == [1, 2] + with pytest.raises(nx.NetworkXError): + G.neighbors(-1) + + @pytest.mark.skipif( + platform.python_implementation() == "PyPy", reason="PyPy gc is different" + ) + def test_memory_leak(self): + G = self.Graph() + + def count_objects_of_type(_type): + # Iterating over all objects tracked by gc can include weak references + # whose weakly-referenced objects may no longer exist. Calling `isinstance` + # on such a weak reference will raise ReferenceError. There are at least + # three workarounds for this: one is to compare type names instead of using + # `isinstance` such as `type(obj).__name__ == typename`, another is to use + # `type(obj) == _type`, and the last is to ignore ProxyTypes as we do below. + # NOTE: even if this safeguard is deemed unnecessary to pass NetworkX tests, + # we should still keep it for maximum safety for other NetworkX backends. + return sum( + 1 + for obj in gc.get_objects() + if not isinstance(obj, weakref.ProxyTypes) and isinstance(obj, _type) + ) + + gc.collect() + before = count_objects_of_type(self.Graph) + G.copy() + gc.collect() + after = count_objects_of_type(self.Graph) + assert before == after + + # test a subgraph of the base class + class MyGraph(self.Graph): + pass + + gc.collect() + G = MyGraph() + before = count_objects_of_type(MyGraph) + G.copy() + gc.collect() + after = count_objects_of_type(MyGraph) + assert before == after + + def test_edges(self): + G = self.K3 + assert isinstance(G._adj, G.adjlist_outer_dict_factory) + assert edges_equal(G.edges(), [(0, 1), (0, 2), (1, 2)]) + assert edges_equal(G.edges(0), [(0, 1), (0, 2)]) + assert edges_equal(G.edges([0, 1]), [(0, 1), (0, 2), (1, 2)]) + with pytest.raises(nx.NetworkXError): + G.edges(-1) + + def test_degree(self): + G = self.K3 + assert sorted(G.degree()) == [(0, 2), (1, 2), (2, 2)] + assert dict(G.degree()) == {0: 2, 1: 2, 2: 2} + assert G.degree(0) == 2 + with pytest.raises(nx.NetworkXError): + G.degree(-1) # node not in graph + + def test_size(self): + G = self.K3 + assert G.size() == 3 + assert G.number_of_edges() == 3 + + def test_nbunch_iter(self): + G = self.K3 + assert nodes_equal(G.nbunch_iter(), self.k3nodes) # all nodes + assert nodes_equal(G.nbunch_iter(0), [0]) # single node + assert nodes_equal(G.nbunch_iter([0, 1]), [0, 1]) # sequence + # sequence with none in graph + assert nodes_equal(G.nbunch_iter([-1]), []) + # string sequence with none in graph + assert nodes_equal(G.nbunch_iter("foo"), []) + # node not in graph doesn't get caught upon creation of iterator + bunch = G.nbunch_iter(-1) + # but gets caught when iterator used + with pytest.raises(nx.NetworkXError, match="is not a node or a sequence"): + list(bunch) + # unhashable doesn't get caught upon creation of iterator + bunch = G.nbunch_iter([0, 1, 2, {}]) + # but gets caught when iterator hits the unhashable + with pytest.raises( + nx.NetworkXError, match="in sequence nbunch is not a valid node" + ): + list(bunch) + + def test_nbunch_iter_node_format_raise(self): + # Tests that a node that would have failed string formatting + # doesn't cause an error when attempting to raise a + # :exc:`nx.NetworkXError`. + + # For more information, see pull request #1813. + G = self.Graph() + nbunch = [("x", set())] + with pytest.raises(nx.NetworkXError): + list(G.nbunch_iter(nbunch)) + + def test_selfloop_degree(self): + G = self.Graph() + G.add_edge(1, 1) + assert sorted(G.degree()) == [(1, 2)] + assert dict(G.degree()) == {1: 2} + assert G.degree(1) == 2 + assert sorted(G.degree([1])) == [(1, 2)] + assert G.degree(1, weight="weight") == 2 + + def test_selfloops(self): + G = self.K3.copy() + G.add_edge(0, 0) + assert nodes_equal(nx.nodes_with_selfloops(G), [0]) + assert edges_equal(nx.selfloop_edges(G), [(0, 0)]) + assert nx.number_of_selfloops(G) == 1 + G.remove_edge(0, 0) + G.add_edge(0, 0) + G.remove_edges_from([(0, 0)]) + G.add_edge(1, 1) + G.remove_node(1) + G.add_edge(0, 0) + G.add_edge(1, 1) + G.remove_nodes_from([0, 1]) + + def test_cache_reset(self): + G = self.K3.copy() + old_adj = G.adj + assert id(G.adj) == id(old_adj) + G._adj = {} + assert id(G.adj) != id(old_adj) + + old_nodes = G.nodes + assert id(G.nodes) == id(old_nodes) + G._node = {} + assert id(G.nodes) != id(old_nodes) + + def test_attributes_cached(self): + G = self.K3.copy() + assert id(G.nodes) == id(G.nodes) + assert id(G.edges) == id(G.edges) + assert id(G.degree) == id(G.degree) + assert id(G.adj) == id(G.adj) + + +class BaseAttrGraphTester(BaseGraphTester): + """Tests of graph class attribute features.""" + + def test_weighted_degree(self): + G = self.Graph() + G.add_edge(1, 2, weight=2, other=3) + G.add_edge(2, 3, weight=3, other=4) + assert sorted(d for n, d in G.degree(weight="weight")) == [2, 3, 5] + assert dict(G.degree(weight="weight")) == {1: 2, 2: 5, 3: 3} + assert G.degree(1, weight="weight") == 2 + assert nodes_equal((G.degree([1], weight="weight")), [(1, 2)]) + + assert nodes_equal((d for n, d in G.degree(weight="other")), [3, 7, 4]) + assert dict(G.degree(weight="other")) == {1: 3, 2: 7, 3: 4} + assert G.degree(1, weight="other") == 3 + assert edges_equal((G.degree([1], weight="other")), [(1, 3)]) + + def add_attributes(self, G): + G.graph["foo"] = [] + G.nodes[0]["foo"] = [] + G.remove_edge(1, 2) + ll = [] + G.add_edge(1, 2, foo=ll) + G.add_edge(2, 1, foo=ll) + + def test_name(self): + G = self.Graph(name="") + assert G.name == "" + G = self.Graph(name="test") + assert G.name == "test" + + def test_str_unnamed(self): + G = self.Graph() + G.add_edges_from([(1, 2), (2, 3)]) + assert str(G) == f"{type(G).__name__} with 3 nodes and 2 edges" + + def test_str_named(self): + G = self.Graph(name="foo") + G.add_edges_from([(1, 2), (2, 3)]) + assert str(G) == f"{type(G).__name__} named 'foo' with 3 nodes and 2 edges" + + def test_graph_chain(self): + G = self.Graph([(0, 1), (1, 2)]) + DG = G.to_directed(as_view=True) + SDG = DG.subgraph([0, 1]) + RSDG = SDG.reverse(copy=False) + assert G is DG._graph + assert DG is SDG._graph + assert SDG is RSDG._graph + + def test_copy(self): + G = self.Graph() + G.add_node(0) + G.add_edge(1, 2) + self.add_attributes(G) + # copy edge datadict but any container attr are same + H = G.copy() + self.graphs_equal(H, G) + self.different_attrdict(H, G) + self.shallow_copy_attrdict(H, G) + + def test_class_copy(self): + G = self.Graph() + G.add_node(0) + G.add_edge(1, 2) + self.add_attributes(G) + # copy edge datadict but any container attr are same + H = G.__class__(G) + self.graphs_equal(H, G) + self.different_attrdict(H, G) + self.shallow_copy_attrdict(H, G) + + def test_fresh_copy(self): + G = self.Graph() + G.add_node(0) + G.add_edge(1, 2) + self.add_attributes(G) + # copy graph structure but use fresh datadict + H = G.__class__() + H.add_nodes_from(G) + H.add_edges_from(G.edges()) + assert len(G.nodes[0]) == 1 + ddict = G.adj[1][2][0] if G.is_multigraph() else G.adj[1][2] + assert len(ddict) == 1 + assert len(H.nodes[0]) == 0 + ddict = H.adj[1][2][0] if H.is_multigraph() else H.adj[1][2] + assert len(ddict) == 0 + + def is_deepcopy(self, H, G): + self.graphs_equal(H, G) + self.different_attrdict(H, G) + self.deep_copy_attrdict(H, G) + + def deep_copy_attrdict(self, H, G): + self.deepcopy_graph_attr(H, G) + self.deepcopy_node_attr(H, G) + self.deepcopy_edge_attr(H, G) + + def deepcopy_graph_attr(self, H, G): + assert G.graph["foo"] == H.graph["foo"] + G.graph["foo"].append(1) + assert G.graph["foo"] != H.graph["foo"] + + def deepcopy_node_attr(self, H, G): + assert G.nodes[0]["foo"] == H.nodes[0]["foo"] + G.nodes[0]["foo"].append(1) + assert G.nodes[0]["foo"] != H.nodes[0]["foo"] + + def deepcopy_edge_attr(self, H, G): + assert G[1][2]["foo"] == H[1][2]["foo"] + G[1][2]["foo"].append(1) + assert G[1][2]["foo"] != H[1][2]["foo"] + + def is_shallow_copy(self, H, G): + self.graphs_equal(H, G) + self.shallow_copy_attrdict(H, G) + + def shallow_copy_attrdict(self, H, G): + self.shallow_copy_graph_attr(H, G) + self.shallow_copy_node_attr(H, G) + self.shallow_copy_edge_attr(H, G) + + def shallow_copy_graph_attr(self, H, G): + assert G.graph["foo"] == H.graph["foo"] + G.graph["foo"].append(1) + assert G.graph["foo"] == H.graph["foo"] + + def shallow_copy_node_attr(self, H, G): + assert G.nodes[0]["foo"] == H.nodes[0]["foo"] + G.nodes[0]["foo"].append(1) + assert G.nodes[0]["foo"] == H.nodes[0]["foo"] + + def shallow_copy_edge_attr(self, H, G): + assert G[1][2]["foo"] == H[1][2]["foo"] + G[1][2]["foo"].append(1) + assert G[1][2]["foo"] == H[1][2]["foo"] + + def same_attrdict(self, H, G): + old_foo = H[1][2]["foo"] + H.adj[1][2]["foo"] = "baz" + assert G.edges == H.edges + H.adj[1][2]["foo"] = old_foo + assert G.edges == H.edges + + old_foo = H.nodes[0]["foo"] + H.nodes[0]["foo"] = "baz" + assert G.nodes == H.nodes + H.nodes[0]["foo"] = old_foo + assert G.nodes == H.nodes + + def different_attrdict(self, H, G): + old_foo = H[1][2]["foo"] + H.adj[1][2]["foo"] = "baz" + assert G._adj != H._adj + H.adj[1][2]["foo"] = old_foo + assert G._adj == H._adj + + old_foo = H.nodes[0]["foo"] + H.nodes[0]["foo"] = "baz" + assert G._node != H._node + H.nodes[0]["foo"] = old_foo + assert G._node == H._node + + def graphs_equal(self, H, G): + assert G._adj == H._adj + assert G._node == H._node + assert G.graph == H.graph + assert G.name == H.name + if not G.is_directed() and not H.is_directed(): + assert H._adj[1][2] is H._adj[2][1] + assert G._adj[1][2] is G._adj[2][1] + else: # at least one is directed + if not G.is_directed(): + G._pred = G._adj + G._succ = G._adj + if not H.is_directed(): + H._pred = H._adj + H._succ = H._adj + assert G._pred == H._pred + assert G._succ == H._succ + assert H._succ[1][2] is H._pred[2][1] + assert G._succ[1][2] is G._pred[2][1] + + def test_graph_attr(self): + G = self.K3.copy() + G.graph["foo"] = "bar" + assert isinstance(G.graph, G.graph_attr_dict_factory) + assert G.graph["foo"] == "bar" + del G.graph["foo"] + assert G.graph == {} + H = self.Graph(foo="bar") + assert H.graph["foo"] == "bar" + + def test_node_attr(self): + G = self.K3.copy() + G.add_node(1, foo="bar") + assert all( + isinstance(d, G.node_attr_dict_factory) for u, d in G.nodes(data=True) + ) + assert nodes_equal(G.nodes(), [0, 1, 2]) + assert nodes_equal(G.nodes(data=True), [(0, {}), (1, {"foo": "bar"}), (2, {})]) + G.nodes[1]["foo"] = "baz" + assert nodes_equal(G.nodes(data=True), [(0, {}), (1, {"foo": "baz"}), (2, {})]) + assert nodes_equal(G.nodes(data="foo"), [(0, None), (1, "baz"), (2, None)]) + assert nodes_equal( + G.nodes(data="foo", default="bar"), [(0, "bar"), (1, "baz"), (2, "bar")] + ) + + def test_node_attr2(self): + G = self.K3.copy() + a = {"foo": "bar"} + G.add_node(3, **a) + assert nodes_equal(G.nodes(), [0, 1, 2, 3]) + assert nodes_equal( + G.nodes(data=True), [(0, {}), (1, {}), (2, {}), (3, {"foo": "bar"})] + ) + + def test_edge_lookup(self): + G = self.Graph() + G.add_edge(1, 2, foo="bar") + assert edges_equal(G.edges[1, 2], {"foo": "bar"}) + + def test_edge_attr(self): + G = self.Graph() + G.add_edge(1, 2, foo="bar") + assert all( + isinstance(d, G.edge_attr_dict_factory) for u, v, d in G.edges(data=True) + ) + assert edges_equal(G.edges(data=True), [(1, 2, {"foo": "bar"})]) + assert edges_equal(G.edges(data="foo"), [(1, 2, "bar")]) + + def test_edge_attr2(self): + G = self.Graph() + G.add_edges_from([(1, 2), (3, 4)], foo="foo") + assert edges_equal( + G.edges(data=True), [(1, 2, {"foo": "foo"}), (3, 4, {"foo": "foo"})] + ) + assert edges_equal(G.edges(data="foo"), [(1, 2, "foo"), (3, 4, "foo")]) + + def test_edge_attr3(self): + G = self.Graph() + G.add_edges_from([(1, 2, {"weight": 32}), (3, 4, {"weight": 64})], foo="foo") + assert edges_equal( + G.edges(data=True), + [ + (1, 2, {"foo": "foo", "weight": 32}), + (3, 4, {"foo": "foo", "weight": 64}), + ], + ) + + G.remove_edges_from([(1, 2), (3, 4)]) + G.add_edge(1, 2, data=7, spam="bar", bar="foo") + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 7, "spam": "bar", "bar": "foo"})] + ) + + def test_edge_attr4(self): + G = self.Graph() + G.add_edge(1, 2, data=7, spam="bar", bar="foo") + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 7, "spam": "bar", "bar": "foo"})] + ) + G[1][2]["data"] = 10 # OK to set data like this + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 10, "spam": "bar", "bar": "foo"})] + ) + + G.adj[1][2]["data"] = 20 + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 20, "spam": "bar", "bar": "foo"})] + ) + G.edges[1, 2]["data"] = 21 # another spelling, "edge" + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 21, "spam": "bar", "bar": "foo"})] + ) + G.adj[1][2]["listdata"] = [20, 200] + G.adj[1][2]["weight"] = 20 + dd = { + "data": 21, + "spam": "bar", + "bar": "foo", + "listdata": [20, 200], + "weight": 20, + } + assert edges_equal(G.edges(data=True), [(1, 2, dd)]) + + def test_to_undirected(self): + G = self.K3 + self.add_attributes(G) + H = nx.Graph(G) + self.is_shallow_copy(H, G) + self.different_attrdict(H, G) + H = G.to_undirected() + self.is_deepcopy(H, G) + + def test_to_directed_as_view(self): + H = nx.path_graph(2, create_using=self.Graph) + H2 = H.to_directed(as_view=True) + assert H is H2._graph + assert H2.has_edge(0, 1) + assert H2.has_edge(1, 0) or H.is_directed() + pytest.raises(nx.NetworkXError, H2.add_node, -1) + pytest.raises(nx.NetworkXError, H2.add_edge, 1, 2) + H.add_edge(1, 2) + assert H2.has_edge(1, 2) + assert H2.has_edge(2, 1) or H.is_directed() + + def test_to_undirected_as_view(self): + H = nx.path_graph(2, create_using=self.Graph) + H2 = H.to_undirected(as_view=True) + assert H is H2._graph + assert H2.has_edge(0, 1) + assert H2.has_edge(1, 0) + pytest.raises(nx.NetworkXError, H2.add_node, -1) + pytest.raises(nx.NetworkXError, H2.add_edge, 1, 2) + H.add_edge(1, 2) + assert H2.has_edge(1, 2) + assert H2.has_edge(2, 1) + + def test_directed_class(self): + G = self.Graph() + + class newGraph(G.to_undirected_class()): + def to_directed_class(self): + return newDiGraph + + def to_undirected_class(self): + return newGraph + + class newDiGraph(G.to_directed_class()): + def to_directed_class(self): + return newDiGraph + + def to_undirected_class(self): + return newGraph + + G = newDiGraph() if G.is_directed() else newGraph() + H = G.to_directed() + assert isinstance(H, newDiGraph) + H = G.to_undirected() + assert isinstance(H, newGraph) + + def test_to_directed(self): + G = self.K3 + self.add_attributes(G) + H = nx.DiGraph(G) + self.is_shallow_copy(H, G) + self.different_attrdict(H, G) + H = G.to_directed() + self.is_deepcopy(H, G) + + def test_subgraph(self): + G = self.K3 + self.add_attributes(G) + H = G.subgraph([0, 1, 2, 5]) + self.graphs_equal(H, G) + self.same_attrdict(H, G) + self.shallow_copy_attrdict(H, G) + + H = G.subgraph(0) + assert H.adj == {0: {}} + H = G.subgraph([]) + assert H.adj == {} + assert G.adj != {} + + def test_selfloops_attr(self): + G = self.K3.copy() + G.add_edge(0, 0) + G.add_edge(1, 1, weight=2) + assert edges_equal( + nx.selfloop_edges(G, data=True), [(0, 0, {}), (1, 1, {"weight": 2})] + ) + assert edges_equal( + nx.selfloop_edges(G, data="weight"), [(0, 0, None), (1, 1, 2)] + ) + + +class TestGraph(BaseAttrGraphTester): + """Tests specific to dict-of-dict-of-dict graph data structure""" + + def setup_method(self): + self.Graph = nx.Graph + # build dict-of-dict-of-dict K3 + ed1, ed2, ed3 = ({}, {}, {}) + self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed1, 2: ed3}, 2: {0: ed2, 1: ed3}} + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._adj = self.k3adj + self.K3._node = {} + self.K3._node[0] = {} + self.K3._node[1] = {} + self.K3._node[2] = {} + + def test_pickle(self): + G = self.K3 + pg = pickle.loads(pickle.dumps(G, -1)) + self.graphs_equal(pg, G) + pg = pickle.loads(pickle.dumps(G)) + self.graphs_equal(pg, G) + + def test_data_input(self): + G = self.Graph({1: [2], 2: [1]}, name="test") + assert G.name == "test" + assert sorted(G.adj.items()) == [(1, {2: {}}), (2, {1: {}})] + + def test_adjacency(self): + G = self.K3 + assert dict(G.adjacency()) == { + 0: {1: {}, 2: {}}, + 1: {0: {}, 2: {}}, + 2: {0: {}, 1: {}}, + } + + def test_getitem(self): + G = self.K3 + assert G.adj[0] == {1: {}, 2: {}} + assert G[0] == {1: {}, 2: {}} + with pytest.raises(KeyError): + G.__getitem__("j") + with pytest.raises(TypeError): + G.__getitem__(["A"]) + + def test_add_node(self): + G = self.Graph() + G.add_node(0) + assert G.adj == {0: {}} + # test add attributes + G.add_node(1, c="red") + G.add_node(2, c="blue") + G.add_node(3, c="red") + assert G.nodes[1]["c"] == "red" + assert G.nodes[2]["c"] == "blue" + assert G.nodes[3]["c"] == "red" + # test updating attributes + G.add_node(1, c="blue") + G.add_node(2, c="red") + G.add_node(3, c="blue") + assert G.nodes[1]["c"] == "blue" + assert G.nodes[2]["c"] == "red" + assert G.nodes[3]["c"] == "blue" + + def test_add_nodes_from(self): + G = self.Graph() + G.add_nodes_from([0, 1, 2]) + assert G.adj == {0: {}, 1: {}, 2: {}} + # test add attributes + G.add_nodes_from([0, 1, 2], c="red") + assert G.nodes[0]["c"] == "red" + assert G.nodes[2]["c"] == "red" + # test that attribute dicts are not the same + assert G.nodes[0] is not G.nodes[1] + # test updating attributes + G.add_nodes_from([0, 1, 2], c="blue") + assert G.nodes[0]["c"] == "blue" + assert G.nodes[2]["c"] == "blue" + assert G.nodes[0] is not G.nodes[1] + # test tuple input + H = self.Graph() + H.add_nodes_from(G.nodes(data=True)) + assert H.nodes[0]["c"] == "blue" + assert H.nodes[2]["c"] == "blue" + assert H.nodes[0] is not H.nodes[1] + # specific overrides general + H.add_nodes_from([0, (1, {"c": "green"}), (3, {"c": "cyan"})], c="red") + assert H.nodes[0]["c"] == "red" + assert H.nodes[1]["c"] == "green" + assert H.nodes[2]["c"] == "blue" + assert H.nodes[3]["c"] == "cyan" + + def test_remove_node(self): + G = self.K3.copy() + G.remove_node(0) + assert G.adj == {1: {2: {}}, 2: {1: {}}} + with pytest.raises(nx.NetworkXError): + G.remove_node(-1) + + # generator here to implement list,set,string... + + def test_remove_nodes_from(self): + G = self.K3.copy() + G.remove_nodes_from([0, 1]) + assert G.adj == {2: {}} + G.remove_nodes_from([-1]) # silent fail + + def test_add_edge(self): + G = self.Graph() + G.add_edge(0, 1) + assert G.adj == {0: {1: {}}, 1: {0: {}}} + G = self.Graph() + G.add_edge(*(0, 1)) + assert G.adj == {0: {1: {}}, 1: {0: {}}} + G = self.Graph() + with pytest.raises(ValueError): + G.add_edge(None, "anything") + + def test_add_edges_from(self): + G = self.Graph() + G.add_edges_from([(0, 1), (0, 2, {"weight": 3})]) + assert G.adj == { + 0: {1: {}, 2: {"weight": 3}}, + 1: {0: {}}, + 2: {0: {"weight": 3}}, + } + G = self.Graph() + G.add_edges_from([(0, 1), (0, 2, {"weight": 3}), (1, 2, {"data": 4})], data=2) + assert G.adj == { + 0: {1: {"data": 2}, 2: {"weight": 3, "data": 2}}, + 1: {0: {"data": 2}, 2: {"data": 4}}, + 2: {0: {"weight": 3, "data": 2}, 1: {"data": 4}}, + } + + with pytest.raises(nx.NetworkXError): + G.add_edges_from([(0,)]) # too few in tuple + with pytest.raises(nx.NetworkXError): + G.add_edges_from([(0, 1, 2, 3)]) # too many in tuple + with pytest.raises(TypeError): + G.add_edges_from([0]) # not a tuple + with pytest.raises(ValueError): + G.add_edges_from([(None, 3), (3, 2)]) # None cannot be a node + + def test_remove_edge(self): + G = self.K3.copy() + G.remove_edge(0, 1) + assert G.adj == {0: {2: {}}, 1: {2: {}}, 2: {0: {}, 1: {}}} + with pytest.raises(nx.NetworkXError): + G.remove_edge(-1, 0) + + def test_remove_edges_from(self): + G = self.K3.copy() + G.remove_edges_from([(0, 1)]) + assert G.adj == {0: {2: {}}, 1: {2: {}}, 2: {0: {}, 1: {}}} + G.remove_edges_from([(0, 0)]) # silent fail + + def test_clear(self): + G = self.K3.copy() + G.graph["name"] = "K3" + G.clear() + assert list(G.nodes) == [] + assert G.adj == {} + assert G.graph == {} + + def test_clear_edges(self): + G = self.K3.copy() + G.graph["name"] = "K3" + nodes = list(G.nodes) + G.clear_edges() + assert list(G.nodes) == nodes + assert G.adj == {0: {}, 1: {}, 2: {}} + assert list(G.edges) == [] + assert G.graph["name"] == "K3" + + def test_edges_data(self): + G = self.K3 + all_edges = [(0, 1, {}), (0, 2, {}), (1, 2, {})] + assert edges_equal(G.edges(data=True), all_edges) + assert edges_equal(G.edges(0, data=True), [(0, 1, {}), (0, 2, {})]) + assert edges_equal(G.edges([0, 1], data=True), all_edges) + with pytest.raises(nx.NetworkXError): + G.edges(-1, True) + + def test_get_edge_data(self): + G = self.K3.copy() + assert G.get_edge_data(0, 1) == {} + assert G[0][1] == {} + assert G.get_edge_data(10, 20) is None + assert G.get_edge_data(-1, 0) is None + assert G.get_edge_data(-1, 0, default=1) == 1 + + def test_update(self): + # specify both edges and nodes + G = self.K3.copy() + G.update(nodes=[3, (4, {"size": 2})], edges=[(4, 5), (6, 7, {"weight": 2})]) + nlist = [ + (0, {}), + (1, {}), + (2, {}), + (3, {}), + (4, {"size": 2}), + (5, {}), + (6, {}), + (7, {}), + ] + assert sorted(G.nodes.data()) == nlist + if G.is_directed(): + elist = [ + (0, 1, {}), + (0, 2, {}), + (1, 0, {}), + (1, 2, {}), + (2, 0, {}), + (2, 1, {}), + (4, 5, {}), + (6, 7, {"weight": 2}), + ] + else: + elist = [ + (0, 1, {}), + (0, 2, {}), + (1, 2, {}), + (4, 5, {}), + (6, 7, {"weight": 2}), + ] + assert sorted(G.edges.data()) == elist + assert G.graph == {} + + # no keywords -- order is edges, nodes + G = self.K3.copy() + G.update([(4, 5), (6, 7, {"weight": 2})], [3, (4, {"size": 2})]) + assert sorted(G.nodes.data()) == nlist + assert sorted(G.edges.data()) == elist + assert G.graph == {} + + # update using only a graph + G = self.Graph() + G.graph["foo"] = "bar" + G.add_node(2, data=4) + G.add_edge(0, 1, weight=0.5) + GG = G.copy() + H = self.Graph() + GG.update(H) + assert graphs_equal(G, GG) + H.update(G) + assert graphs_equal(H, G) + + # update nodes only + H = self.Graph() + H.update(nodes=[3, 4]) + assert H.nodes ^ {3, 4} == set() + assert H.size() == 0 + + # update edges only + H = self.Graph() + H.update(edges=[(3, 4)]) + assert sorted(H.edges.data()) == [(3, 4, {})] + assert H.size() == 1 + + # No inputs -> exception + with pytest.raises(nx.NetworkXError): + nx.Graph().update() + + +class TestEdgeSubgraph: + """Unit tests for the :meth:`Graph.edge_subgraph` method.""" + + def setup_method(self): + # Create a path graph on five nodes. + G = nx.path_graph(5) + # Add some node, edge, and graph attributes. + for i in range(5): + G.nodes[i]["name"] = f"node{i}" + G.edges[0, 1]["name"] = "edge01" + G.edges[3, 4]["name"] = "edge34" + G.graph["name"] = "graph" + # Get the subgraph induced by the first and last edges. + self.G = G + self.H = G.edge_subgraph([(0, 1), (3, 4)]) + + def test_correct_nodes(self): + """Tests that the subgraph has the correct nodes.""" + assert [0, 1, 3, 4] == sorted(self.H.nodes()) + + def test_correct_edges(self): + """Tests that the subgraph has the correct edges.""" + assert [(0, 1, "edge01"), (3, 4, "edge34")] == sorted(self.H.edges(data="name")) + + def test_add_node(self): + """Tests that adding a node to the original graph does not + affect the nodes of the subgraph. + + """ + self.G.add_node(5) + assert [0, 1, 3, 4] == sorted(self.H.nodes()) + + def test_remove_node(self): + """Tests that removing a node in the original graph does + affect the nodes of the subgraph. + + """ + self.G.remove_node(0) + assert [1, 3, 4] == sorted(self.H.nodes()) + + def test_node_attr_dict(self): + """Tests that the node attribute dictionary of the two graphs is + the same object. + + """ + for v in self.H: + assert self.G.nodes[v] == self.H.nodes[v] + # Making a change to G should make a change in H and vice versa. + self.G.nodes[0]["name"] = "foo" + assert self.G.nodes[0] == self.H.nodes[0] + self.H.nodes[1]["name"] = "bar" + assert self.G.nodes[1] == self.H.nodes[1] + + def test_edge_attr_dict(self): + """Tests that the edge attribute dictionary of the two graphs is + the same object. + + """ + for u, v in self.H.edges(): + assert self.G.edges[u, v] == self.H.edges[u, v] + # Making a change to G should make a change in H and vice versa. + self.G.edges[0, 1]["name"] = "foo" + assert self.G.edges[0, 1]["name"] == self.H.edges[0, 1]["name"] + self.H.edges[3, 4]["name"] = "bar" + assert self.G.edges[3, 4]["name"] == self.H.edges[3, 4]["name"] + + def test_graph_attr_dict(self): + """Tests that the graph attribute dictionary of the two graphs + is the same object. + + """ + assert self.G.graph is self.H.graph diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/test_graph_historical.py b/MLPY/Lib/site-packages/networkx/classes/tests/test_graph_historical.py new file mode 100644 index 0000000000000000000000000000000000000000..7af081c4871e1e3435668cd62200080c72719955 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/tests/test_graph_historical.py @@ -0,0 +1,12 @@ +"""Original NetworkX graph tests""" +import networkx +import networkx as nx + +from .historical_tests import HistoricalTests + + +class TestGraphHistorical(HistoricalTests): + @classmethod + def setup_class(cls): + HistoricalTests.setup_class() + cls.G = nx.Graph diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/test_graphviews.py b/MLPY/Lib/site-packages/networkx/classes/tests/test_graphviews.py new file mode 100644 index 0000000000000000000000000000000000000000..591c760c9bdfa61984dd7b87e938b1c15c04be0e --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/tests/test_graphviews.py @@ -0,0 +1,350 @@ +import pytest + +import networkx as nx +from networkx.utils import edges_equal, nodes_equal + +# Note: SubGraph views are not tested here. They have their own testing file + + +class TestReverseView: + def setup_method(self): + self.G = nx.path_graph(9, create_using=nx.DiGraph()) + self.rv = nx.reverse_view(self.G) + + def test_pickle(self): + import pickle + + rv = self.rv + prv = pickle.loads(pickle.dumps(rv, -1)) + assert rv._node == prv._node + assert rv._adj == prv._adj + assert rv.graph == prv.graph + + def test_contains(self): + assert (2, 3) in self.G.edges + assert (3, 2) not in self.G.edges + assert (2, 3) not in self.rv.edges + assert (3, 2) in self.rv.edges + + def test_iter(self): + expected = sorted(tuple(reversed(e)) for e in self.G.edges) + assert sorted(self.rv.edges) == expected + + def test_exceptions(self): + G = nx.Graph() + pytest.raises(nx.NetworkXNotImplemented, nx.reverse_view, G) + + def test_subclass(self): + class MyGraph(nx.DiGraph): + def my_method(self): + return "me" + + def to_directed_class(self): + return MyGraph() + + M = MyGraph() + M.add_edge(1, 2) + RM = nx.reverse_view(M) + print("RM class", RM.__class__) + RMC = RM.copy() + print("RMC class", RMC.__class__) + print(RMC.edges) + assert RMC.has_edge(2, 1) + assert RMC.my_method() == "me" + + +class TestMultiReverseView: + def setup_method(self): + self.G = nx.path_graph(9, create_using=nx.MultiDiGraph()) + self.G.add_edge(4, 5) + self.rv = nx.reverse_view(self.G) + + def test_pickle(self): + import pickle + + rv = self.rv + prv = pickle.loads(pickle.dumps(rv, -1)) + assert rv._node == prv._node + assert rv._adj == prv._adj + assert rv.graph == prv.graph + + def test_contains(self): + assert (2, 3, 0) in self.G.edges + assert (3, 2, 0) not in self.G.edges + assert (2, 3, 0) not in self.rv.edges + assert (3, 2, 0) in self.rv.edges + assert (5, 4, 1) in self.rv.edges + assert (4, 5, 1) not in self.rv.edges + + def test_iter(self): + expected = sorted((v, u, k) for u, v, k in self.G.edges) + assert sorted(self.rv.edges) == expected + + def test_exceptions(self): + MG = nx.MultiGraph(self.G) + pytest.raises(nx.NetworkXNotImplemented, nx.reverse_view, MG) + + +def test_generic_multitype(): + nxg = nx.graphviews + G = nx.DiGraph([(1, 2)]) + with pytest.raises(nx.NetworkXError): + nxg.generic_graph_view(G, create_using=nx.MultiGraph) + G = nx.MultiDiGraph([(1, 2)]) + with pytest.raises(nx.NetworkXError): + nxg.generic_graph_view(G, create_using=nx.DiGraph) + + +class TestToDirected: + def setup_method(self): + self.G = nx.path_graph(9) + self.dv = nx.to_directed(self.G) + self.MG = nx.path_graph(9, create_using=nx.MultiGraph()) + self.Mdv = nx.to_directed(self.MG) + + def test_directed(self): + assert not self.G.is_directed() + assert self.dv.is_directed() + + def test_already_directed(self): + dd = nx.to_directed(self.dv) + Mdd = nx.to_directed(self.Mdv) + assert edges_equal(dd.edges, self.dv.edges) + assert edges_equal(Mdd.edges, self.Mdv.edges) + + def test_pickle(self): + import pickle + + dv = self.dv + pdv = pickle.loads(pickle.dumps(dv, -1)) + assert dv._node == pdv._node + assert dv._succ == pdv._succ + assert dv._pred == pdv._pred + assert dv.graph == pdv.graph + + def test_contains(self): + assert (2, 3) in self.G.edges + assert (3, 2) in self.G.edges + assert (2, 3) in self.dv.edges + assert (3, 2) in self.dv.edges + + def test_iter(self): + revd = [tuple(reversed(e)) for e in self.G.edges] + expected = sorted(list(self.G.edges) + revd) + assert sorted(self.dv.edges) == expected + + +class TestToUndirected: + def setup_method(self): + self.DG = nx.path_graph(9, create_using=nx.DiGraph()) + self.uv = nx.to_undirected(self.DG) + self.MDG = nx.path_graph(9, create_using=nx.MultiDiGraph()) + self.Muv = nx.to_undirected(self.MDG) + + def test_directed(self): + assert self.DG.is_directed() + assert not self.uv.is_directed() + + def test_already_directed(self): + uu = nx.to_undirected(self.uv) + Muu = nx.to_undirected(self.Muv) + assert edges_equal(uu.edges, self.uv.edges) + assert edges_equal(Muu.edges, self.Muv.edges) + + def test_pickle(self): + import pickle + + uv = self.uv + puv = pickle.loads(pickle.dumps(uv, -1)) + assert uv._node == puv._node + assert uv._adj == puv._adj + assert uv.graph == puv.graph + assert hasattr(uv, "_graph") + + def test_contains(self): + assert (2, 3) in self.DG.edges + assert (3, 2) not in self.DG.edges + assert (2, 3) in self.uv.edges + assert (3, 2) in self.uv.edges + + def test_iter(self): + expected = sorted(self.DG.edges) + assert sorted(self.uv.edges) == expected + + +class TestChainsOfViews: + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.DG = nx.path_graph(9, create_using=nx.DiGraph()) + cls.MG = nx.path_graph(9, create_using=nx.MultiGraph()) + cls.MDG = nx.path_graph(9, create_using=nx.MultiDiGraph()) + cls.Gv = nx.to_undirected(cls.DG) + cls.DGv = nx.to_directed(cls.G) + cls.MGv = nx.to_undirected(cls.MDG) + cls.MDGv = nx.to_directed(cls.MG) + cls.Rv = cls.DG.reverse() + cls.MRv = cls.MDG.reverse() + cls.graphs = [ + cls.G, + cls.DG, + cls.MG, + cls.MDG, + cls.Gv, + cls.DGv, + cls.MGv, + cls.MDGv, + cls.Rv, + cls.MRv, + ] + for G in cls.graphs: + G.edges, G.nodes, G.degree + + def test_pickle(self): + import pickle + + for G in self.graphs: + H = pickle.loads(pickle.dumps(G, -1)) + assert edges_equal(H.edges, G.edges) + assert nodes_equal(H.nodes, G.nodes) + + def test_subgraph_of_subgraph(self): + SGv = nx.subgraph(self.G, range(3, 7)) + SDGv = nx.subgraph(self.DG, range(3, 7)) + SMGv = nx.subgraph(self.MG, range(3, 7)) + SMDGv = nx.subgraph(self.MDG, range(3, 7)) + for G in self.graphs + [SGv, SDGv, SMGv, SMDGv]: + SG = nx.induced_subgraph(G, [4, 5, 6]) + assert list(SG) == [4, 5, 6] + SSG = SG.subgraph([6, 7]) + assert list(SSG) == [6] + # subgraph-subgraph chain is short-cut in base class method + assert SSG._graph is G + + def test_restricted_induced_subgraph_chains(self): + """Test subgraph chains that both restrict and show nodes/edges. + + A restricted_view subgraph should allow induced subgraphs using + G.subgraph that automagically without a chain (meaning the result + is a subgraph view of the original graph not a subgraph-of-subgraph. + """ + hide_nodes = [3, 4, 5] + hide_edges = [(6, 7)] + RG = nx.restricted_view(self.G, hide_nodes, hide_edges) + nodes = [4, 5, 6, 7, 8] + SG = nx.induced_subgraph(RG, nodes) + SSG = RG.subgraph(nodes) + assert RG._graph is self.G + assert SSG._graph is self.G + assert SG._graph is RG + assert edges_equal(SG.edges, SSG.edges) + # should be same as morphing the graph + CG = self.G.copy() + CG.remove_nodes_from(hide_nodes) + CG.remove_edges_from(hide_edges) + assert edges_equal(CG.edges(nodes), SSG.edges) + CG.remove_nodes_from([0, 1, 2, 3]) + assert edges_equal(CG.edges, SSG.edges) + # switch order: subgraph first, then restricted view + SSSG = self.G.subgraph(nodes) + RSG = nx.restricted_view(SSSG, hide_nodes, hide_edges) + assert RSG._graph is not self.G + assert edges_equal(RSG.edges, CG.edges) + + def test_subgraph_copy(self): + for origG in self.graphs: + G = nx.Graph(origG) + SG = G.subgraph([4, 5, 6]) + H = SG.copy() + assert type(G) == type(H) + + def test_subgraph_todirected(self): + SG = nx.induced_subgraph(self.G, [4, 5, 6]) + SSG = SG.to_directed() + assert sorted(SSG) == [4, 5, 6] + assert sorted(SSG.edges) == [(4, 5), (5, 4), (5, 6), (6, 5)] + + def test_subgraph_toundirected(self): + SG = nx.induced_subgraph(self.G, [4, 5, 6]) + SSG = SG.to_undirected() + assert list(SSG) == [4, 5, 6] + assert sorted(SSG.edges) == [(4, 5), (5, 6)] + + def test_reverse_subgraph_toundirected(self): + G = self.DG.reverse(copy=False) + SG = G.subgraph([4, 5, 6]) + SSG = SG.to_undirected() + assert list(SSG) == [4, 5, 6] + assert sorted(SSG.edges) == [(4, 5), (5, 6)] + + def test_reverse_reverse_copy(self): + G = self.DG.reverse(copy=False) + H = G.reverse(copy=True) + assert H.nodes == self.DG.nodes + assert H.edges == self.DG.edges + G = self.MDG.reverse(copy=False) + H = G.reverse(copy=True) + assert H.nodes == self.MDG.nodes + assert H.edges == self.MDG.edges + + def test_subgraph_edgesubgraph_toundirected(self): + G = self.G.copy() + SG = G.subgraph([4, 5, 6]) + SSG = SG.edge_subgraph([(4, 5), (5, 4)]) + USSG = SSG.to_undirected() + assert list(USSG) == [4, 5] + assert sorted(USSG.edges) == [(4, 5)] + + def test_copy_subgraph(self): + G = self.G.copy() + SG = G.subgraph([4, 5, 6]) + CSG = SG.copy(as_view=True) + DCSG = SG.copy(as_view=False) + assert hasattr(CSG, "_graph") # is a view + assert not hasattr(DCSG, "_graph") # not a view + + def test_copy_disubgraph(self): + G = self.DG.copy() + SG = G.subgraph([4, 5, 6]) + CSG = SG.copy(as_view=True) + DCSG = SG.copy(as_view=False) + assert hasattr(CSG, "_graph") # is a view + assert not hasattr(DCSG, "_graph") # not a view + + def test_copy_multidisubgraph(self): + G = self.MDG.copy() + SG = G.subgraph([4, 5, 6]) + CSG = SG.copy(as_view=True) + DCSG = SG.copy(as_view=False) + assert hasattr(CSG, "_graph") # is a view + assert not hasattr(DCSG, "_graph") # not a view + + def test_copy_multisubgraph(self): + G = self.MG.copy() + SG = G.subgraph([4, 5, 6]) + CSG = SG.copy(as_view=True) + DCSG = SG.copy(as_view=False) + assert hasattr(CSG, "_graph") # is a view + assert not hasattr(DCSG, "_graph") # not a view + + def test_copy_of_view(self): + G = nx.MultiGraph(self.MGv) + assert G.__class__.__name__ == "MultiGraph" + G = G.copy(as_view=True) + assert G.__class__.__name__ == "MultiGraph" + + def test_subclass(self): + class MyGraph(nx.DiGraph): + def my_method(self): + return "me" + + def to_directed_class(self): + return MyGraph() + + for origG in self.graphs: + G = MyGraph(origG) + SG = G.subgraph([4, 5, 6]) + H = SG.copy() + assert SG.my_method() == "me" + assert H.my_method() == "me" + assert 3 not in H or 3 in SG diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/test_multidigraph.py b/MLPY/Lib/site-packages/networkx/classes/tests/test_multidigraph.py new file mode 100644 index 0000000000000000000000000000000000000000..fc0bd5467d0a62dc8f533af7a6c5bbc0a57fc010 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/tests/test_multidigraph.py @@ -0,0 +1,459 @@ +from collections import UserDict + +import pytest + +import networkx as nx +from networkx.utils import edges_equal + +from .test_multigraph import BaseMultiGraphTester +from .test_multigraph import TestEdgeSubgraph as _TestMultiGraphEdgeSubgraph +from .test_multigraph import TestMultiGraph as _TestMultiGraph + + +class BaseMultiDiGraphTester(BaseMultiGraphTester): + def test_edges(self): + G = self.K3 + edges = [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.edges()) == edges + assert sorted(G.edges(0)) == [(0, 1), (0, 2)] + pytest.raises((KeyError, nx.NetworkXError), G.edges, -1) + + def test_edges_data(self): + G = self.K3 + edges = [(0, 1, {}), (0, 2, {}), (1, 0, {}), (1, 2, {}), (2, 0, {}), (2, 1, {})] + assert sorted(G.edges(data=True)) == edges + assert sorted(G.edges(0, data=True)) == [(0, 1, {}), (0, 2, {})] + pytest.raises((KeyError, nx.NetworkXError), G.neighbors, -1) + + def test_edges_multi(self): + G = self.K3 + assert sorted(G.edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.edges(0)) == [(0, 1), (0, 2)] + G.add_edge(0, 1) + assert sorted(G.edges()) == [ + (0, 1), + (0, 1), + (0, 2), + (1, 0), + (1, 2), + (2, 0), + (2, 1), + ] + + def test_out_edges(self): + G = self.K3 + assert sorted(G.out_edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.out_edges(0)) == [(0, 1), (0, 2)] + pytest.raises((KeyError, nx.NetworkXError), G.out_edges, -1) + assert sorted(G.out_edges(0, keys=True)) == [(0, 1, 0), (0, 2, 0)] + + def test_out_edges_multi(self): + G = self.K3 + assert sorted(G.out_edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.out_edges(0)) == [(0, 1), (0, 2)] + G.add_edge(0, 1, 2) + assert sorted(G.out_edges()) == [ + (0, 1), + (0, 1), + (0, 2), + (1, 0), + (1, 2), + (2, 0), + (2, 1), + ] + + def test_out_edges_data(self): + G = self.K3 + assert sorted(G.edges(0, data=True)) == [(0, 1, {}), (0, 2, {})] + G.remove_edge(0, 1) + G.add_edge(0, 1, data=1) + assert sorted(G.edges(0, data=True)) == [(0, 1, {"data": 1}), (0, 2, {})] + assert sorted(G.edges(0, data="data")) == [(0, 1, 1), (0, 2, None)] + assert sorted(G.edges(0, data="data", default=-1)) == [(0, 1, 1), (0, 2, -1)] + + def test_in_edges(self): + G = self.K3 + assert sorted(G.in_edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.in_edges(0)) == [(1, 0), (2, 0)] + pytest.raises((KeyError, nx.NetworkXError), G.in_edges, -1) + G.add_edge(0, 1, 2) + assert sorted(G.in_edges()) == [ + (0, 1), + (0, 1), + (0, 2), + (1, 0), + (1, 2), + (2, 0), + (2, 1), + ] + assert sorted(G.in_edges(0, keys=True)) == [(1, 0, 0), (2, 0, 0)] + + def test_in_edges_no_keys(self): + G = self.K3 + assert sorted(G.in_edges()) == [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)] + assert sorted(G.in_edges(0)) == [(1, 0), (2, 0)] + G.add_edge(0, 1, 2) + assert sorted(G.in_edges()) == [ + (0, 1), + (0, 1), + (0, 2), + (1, 0), + (1, 2), + (2, 0), + (2, 1), + ] + + assert sorted(G.in_edges(data=True, keys=False)) == [ + (0, 1, {}), + (0, 1, {}), + (0, 2, {}), + (1, 0, {}), + (1, 2, {}), + (2, 0, {}), + (2, 1, {}), + ] + + def test_in_edges_data(self): + G = self.K3 + assert sorted(G.in_edges(0, data=True)) == [(1, 0, {}), (2, 0, {})] + G.remove_edge(1, 0) + G.add_edge(1, 0, data=1) + assert sorted(G.in_edges(0, data=True)) == [(1, 0, {"data": 1}), (2, 0, {})] + assert sorted(G.in_edges(0, data="data")) == [(1, 0, 1), (2, 0, None)] + assert sorted(G.in_edges(0, data="data", default=-1)) == [(1, 0, 1), (2, 0, -1)] + + def is_shallow(self, H, G): + # graph + assert G.graph["foo"] == H.graph["foo"] + G.graph["foo"].append(1) + assert G.graph["foo"] == H.graph["foo"] + # node + assert G.nodes[0]["foo"] == H.nodes[0]["foo"] + G.nodes[0]["foo"].append(1) + assert G.nodes[0]["foo"] == H.nodes[0]["foo"] + # edge + assert G[1][2][0]["foo"] == H[1][2][0]["foo"] + G[1][2][0]["foo"].append(1) + assert G[1][2][0]["foo"] == H[1][2][0]["foo"] + + def is_deep(self, H, G): + # graph + assert G.graph["foo"] == H.graph["foo"] + G.graph["foo"].append(1) + assert G.graph["foo"] != H.graph["foo"] + # node + assert G.nodes[0]["foo"] == H.nodes[0]["foo"] + G.nodes[0]["foo"].append(1) + assert G.nodes[0]["foo"] != H.nodes[0]["foo"] + # edge + assert G[1][2][0]["foo"] == H[1][2][0]["foo"] + G[1][2][0]["foo"].append(1) + assert G[1][2][0]["foo"] != H[1][2][0]["foo"] + + def test_to_undirected(self): + # MultiDiGraph -> MultiGraph changes number of edges so it is + # not a copy operation... use is_shallow, not is_shallow_copy + G = self.K3 + self.add_attributes(G) + H = nx.MultiGraph(G) + # self.is_shallow(H,G) + # the result is traversal order dependent so we + # can't use the is_shallow() test here. + try: + assert edges_equal(H.edges(), [(0, 1), (1, 2), (2, 0)]) + except AssertionError: + assert edges_equal(H.edges(), [(0, 1), (1, 2), (1, 2), (2, 0)]) + H = G.to_undirected() + self.is_deep(H, G) + + def test_has_successor(self): + G = self.K3 + assert G.has_successor(0, 1) + assert not G.has_successor(0, -1) + + def test_successors(self): + G = self.K3 + assert sorted(G.successors(0)) == [1, 2] + pytest.raises((KeyError, nx.NetworkXError), G.successors, -1) + + def test_has_predecessor(self): + G = self.K3 + assert G.has_predecessor(0, 1) + assert not G.has_predecessor(0, -1) + + def test_predecessors(self): + G = self.K3 + assert sorted(G.predecessors(0)) == [1, 2] + pytest.raises((KeyError, nx.NetworkXError), G.predecessors, -1) + + def test_degree(self): + G = self.K3 + assert sorted(G.degree()) == [(0, 4), (1, 4), (2, 4)] + assert dict(G.degree()) == {0: 4, 1: 4, 2: 4} + assert G.degree(0) == 4 + assert list(G.degree(iter([0]))) == [(0, 4)] + G.add_edge(0, 1, weight=0.3, other=1.2) + assert sorted(G.degree(weight="weight")) == [(0, 4.3), (1, 4.3), (2, 4)] + assert sorted(G.degree(weight="other")) == [(0, 5.2), (1, 5.2), (2, 4)] + + def test_in_degree(self): + G = self.K3 + assert sorted(G.in_degree()) == [(0, 2), (1, 2), (2, 2)] + assert dict(G.in_degree()) == {0: 2, 1: 2, 2: 2} + assert G.in_degree(0) == 2 + assert list(G.in_degree(iter([0]))) == [(0, 2)] + assert G.in_degree(0, weight="weight") == 2 + + def test_out_degree(self): + G = self.K3 + assert sorted(G.out_degree()) == [(0, 2), (1, 2), (2, 2)] + assert dict(G.out_degree()) == {0: 2, 1: 2, 2: 2} + assert G.out_degree(0) == 2 + assert list(G.out_degree(iter([0]))) == [(0, 2)] + assert G.out_degree(0, weight="weight") == 2 + + def test_size(self): + G = self.K3 + assert G.size() == 6 + assert G.number_of_edges() == 6 + G.add_edge(0, 1, weight=0.3, other=1.2) + assert round(G.size(weight="weight"), 2) == 6.3 + assert round(G.size(weight="other"), 2) == 7.2 + + def test_to_undirected_reciprocal(self): + G = self.Graph() + G.add_edge(1, 2) + assert G.to_undirected().has_edge(1, 2) + assert not G.to_undirected(reciprocal=True).has_edge(1, 2) + G.add_edge(2, 1) + assert G.to_undirected(reciprocal=True).has_edge(1, 2) + + def test_reverse_copy(self): + G = nx.MultiDiGraph([(0, 1), (0, 1)]) + R = G.reverse() + assert sorted(R.edges()) == [(1, 0), (1, 0)] + R.remove_edge(1, 0) + assert sorted(R.edges()) == [(1, 0)] + assert sorted(G.edges()) == [(0, 1), (0, 1)] + + def test_reverse_nocopy(self): + G = nx.MultiDiGraph([(0, 1), (0, 1)]) + R = G.reverse(copy=False) + assert sorted(R.edges()) == [(1, 0), (1, 0)] + pytest.raises(nx.NetworkXError, R.remove_edge, 1, 0) + + def test_di_attributes_cached(self): + G = self.K3.copy() + assert id(G.in_edges) == id(G.in_edges) + assert id(G.out_edges) == id(G.out_edges) + assert id(G.in_degree) == id(G.in_degree) + assert id(G.out_degree) == id(G.out_degree) + assert id(G.succ) == id(G.succ) + assert id(G.pred) == id(G.pred) + + +class TestMultiDiGraph(BaseMultiDiGraphTester, _TestMultiGraph): + def setup_method(self): + self.Graph = nx.MultiDiGraph + # build K3 + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._succ = {0: {}, 1: {}, 2: {}} + # K3._adj is synced with K3._succ + self.K3._pred = {0: {}, 1: {}, 2: {}} + for u in self.k3nodes: + for v in self.k3nodes: + if u == v: + continue + d = {0: {}} + self.K3._succ[u][v] = d + self.K3._pred[v][u] = d + self.K3._node = {} + self.K3._node[0] = {} + self.K3._node[1] = {} + self.K3._node[2] = {} + + def test_add_edge(self): + G = self.Graph() + G.add_edge(0, 1) + assert G._adj == {0: {1: {0: {}}}, 1: {}} + assert G._succ == {0: {1: {0: {}}}, 1: {}} + assert G._pred == {0: {}, 1: {0: {0: {}}}} + G = self.Graph() + G.add_edge(*(0, 1)) + assert G._adj == {0: {1: {0: {}}}, 1: {}} + assert G._succ == {0: {1: {0: {}}}, 1: {}} + assert G._pred == {0: {}, 1: {0: {0: {}}}} + with pytest.raises(ValueError, match="None cannot be a node"): + G.add_edge(None, 3) + + def test_add_edges_from(self): + G = self.Graph() + G.add_edges_from([(0, 1), (0, 1, {"weight": 3})]) + assert G._adj == {0: {1: {0: {}, 1: {"weight": 3}}}, 1: {}} + assert G._succ == {0: {1: {0: {}, 1: {"weight": 3}}}, 1: {}} + assert G._pred == {0: {}, 1: {0: {0: {}, 1: {"weight": 3}}}} + + G.add_edges_from([(0, 1), (0, 1, {"weight": 3})], weight=2) + assert G._succ == { + 0: {1: {0: {}, 1: {"weight": 3}, 2: {"weight": 2}, 3: {"weight": 3}}}, + 1: {}, + } + assert G._pred == { + 0: {}, + 1: {0: {0: {}, 1: {"weight": 3}, 2: {"weight": 2}, 3: {"weight": 3}}}, + } + + G = self.Graph() + edges = [ + (0, 1, {"weight": 3}), + (0, 1, (("weight", 2),)), + (0, 1, 5), + (0, 1, "s"), + ] + G.add_edges_from(edges) + keydict = {0: {"weight": 3}, 1: {"weight": 2}, 5: {}, "s": {}} + assert G._succ == {0: {1: keydict}, 1: {}} + assert G._pred == {1: {0: keydict}, 0: {}} + + # too few in tuple + pytest.raises(nx.NetworkXError, G.add_edges_from, [(0,)]) + # too many in tuple + pytest.raises(nx.NetworkXError, G.add_edges_from, [(0, 1, 2, 3, 4)]) + # not a tuple + pytest.raises(TypeError, G.add_edges_from, [0]) + with pytest.raises(ValueError, match="None cannot be a node"): + G.add_edges_from([(None, 3), (3, 2)]) + + def test_remove_edge(self): + G = self.K3 + G.remove_edge(0, 1) + assert G._succ == { + 0: {2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + assert G._pred == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + pytest.raises((KeyError, nx.NetworkXError), G.remove_edge, -1, 0) + pytest.raises((KeyError, nx.NetworkXError), G.remove_edge, 0, 2, key=1) + + def test_remove_multiedge(self): + G = self.K3 + G.add_edge(0, 1, key="parallel edge") + G.remove_edge(0, 1, key="parallel edge") + assert G._adj == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + + assert G._succ == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + + assert G._pred == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + G.remove_edge(0, 1) + assert G._succ == { + 0: {2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + assert G._pred == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + pytest.raises((KeyError, nx.NetworkXError), G.remove_edge, -1, 0) + + def test_remove_edges_from(self): + G = self.K3 + G.remove_edges_from([(0, 1)]) + assert G._succ == { + 0: {2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + assert G._pred == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + G.remove_edges_from([(0, 0)]) # silent fail + + +class TestEdgeSubgraph(_TestMultiGraphEdgeSubgraph): + """Unit tests for the :meth:`MultiDiGraph.edge_subgraph` method.""" + + def setup_method(self): + # Create a quadruply-linked path graph on five nodes. + G = nx.MultiDiGraph() + nx.add_path(G, range(5)) + nx.add_path(G, range(5)) + nx.add_path(G, reversed(range(5))) + nx.add_path(G, reversed(range(5))) + # Add some node, edge, and graph attributes. + for i in range(5): + G.nodes[i]["name"] = f"node{i}" + G.adj[0][1][0]["name"] = "edge010" + G.adj[0][1][1]["name"] = "edge011" + G.adj[3][4][0]["name"] = "edge340" + G.adj[3][4][1]["name"] = "edge341" + G.graph["name"] = "graph" + # Get the subgraph induced by one of the first edges and one of + # the last edges. + self.G = G + self.H = G.edge_subgraph([(0, 1, 0), (3, 4, 1)]) + + +class CustomDictClass(UserDict): + pass + + +class MultiDiGraphSubClass(nx.MultiDiGraph): + node_dict_factory = CustomDictClass # type: ignore[assignment] + node_attr_dict_factory = CustomDictClass # type: ignore[assignment] + adjlist_outer_dict_factory = CustomDictClass # type: ignore[assignment] + adjlist_inner_dict_factory = CustomDictClass # type: ignore[assignment] + edge_key_dict_factory = CustomDictClass # type: ignore[assignment] + edge_attr_dict_factory = CustomDictClass # type: ignore[assignment] + graph_attr_dict_factory = CustomDictClass # type: ignore[assignment] + + +class TestMultiDiGraphSubclass(TestMultiDiGraph): + def setup_method(self): + self.Graph = MultiDiGraphSubClass + # build K3 + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._succ = self.K3.adjlist_outer_dict_factory( + { + 0: self.K3.adjlist_inner_dict_factory(), + 1: self.K3.adjlist_inner_dict_factory(), + 2: self.K3.adjlist_inner_dict_factory(), + } + ) + # K3._adj is synced with K3._succ + self.K3._pred = {0: {}, 1: {}, 2: {}} + for u in self.k3nodes: + for v in self.k3nodes: + if u == v: + continue + d = {0: {}} + self.K3._succ[u][v] = d + self.K3._pred[v][u] = d + self.K3._node = self.K3.node_dict_factory() + self.K3._node[0] = self.K3.node_attr_dict_factory() + self.K3._node[1] = self.K3.node_attr_dict_factory() + self.K3._node[2] = self.K3.node_attr_dict_factory() diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/test_multigraph.py b/MLPY/Lib/site-packages/networkx/classes/tests/test_multigraph.py new file mode 100644 index 0000000000000000000000000000000000000000..cd912d1d7c33c056b3c9808221bf7b72cd10fcac --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/tests/test_multigraph.py @@ -0,0 +1,528 @@ +from collections import UserDict + +import pytest + +import networkx as nx +from networkx.utils import edges_equal + +from .test_graph import BaseAttrGraphTester +from .test_graph import TestGraph as _TestGraph + + +class BaseMultiGraphTester(BaseAttrGraphTester): + def test_has_edge(self): + G = self.K3 + assert G.has_edge(0, 1) + assert not G.has_edge(0, -1) + assert G.has_edge(0, 1, 0) + assert not G.has_edge(0, 1, 1) + + def test_get_edge_data(self): + G = self.K3 + assert G.get_edge_data(0, 1) == {0: {}} + assert G[0][1] == {0: {}} + assert G[0][1][0] == {} + assert G.get_edge_data(10, 20) is None + assert G.get_edge_data(0, 1, 0) == {} + + def test_adjacency(self): + G = self.K3 + assert dict(G.adjacency()) == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + + def deepcopy_edge_attr(self, H, G): + assert G[1][2][0]["foo"] == H[1][2][0]["foo"] + G[1][2][0]["foo"].append(1) + assert G[1][2][0]["foo"] != H[1][2][0]["foo"] + + def shallow_copy_edge_attr(self, H, G): + assert G[1][2][0]["foo"] == H[1][2][0]["foo"] + G[1][2][0]["foo"].append(1) + assert G[1][2][0]["foo"] == H[1][2][0]["foo"] + + def graphs_equal(self, H, G): + assert G._adj == H._adj + assert G._node == H._node + assert G.graph == H.graph + assert G.name == H.name + if not G.is_directed() and not H.is_directed(): + assert H._adj[1][2][0] is H._adj[2][1][0] + assert G._adj[1][2][0] is G._adj[2][1][0] + else: # at least one is directed + if not G.is_directed(): + G._pred = G._adj + G._succ = G._adj + if not H.is_directed(): + H._pred = H._adj + H._succ = H._adj + assert G._pred == H._pred + assert G._succ == H._succ + assert H._succ[1][2][0] is H._pred[2][1][0] + assert G._succ[1][2][0] is G._pred[2][1][0] + + def same_attrdict(self, H, G): + # same attrdict in the edgedata + old_foo = H[1][2][0]["foo"] + H.adj[1][2][0]["foo"] = "baz" + assert G._adj == H._adj + H.adj[1][2][0]["foo"] = old_foo + assert G._adj == H._adj + + old_foo = H.nodes[0]["foo"] + H.nodes[0]["foo"] = "baz" + assert G._node == H._node + H.nodes[0]["foo"] = old_foo + assert G._node == H._node + + def different_attrdict(self, H, G): + # used by graph_equal_but_different + old_foo = H[1][2][0]["foo"] + H.adj[1][2][0]["foo"] = "baz" + assert G._adj != H._adj + H.adj[1][2][0]["foo"] = old_foo + assert G._adj == H._adj + + old_foo = H.nodes[0]["foo"] + H.nodes[0]["foo"] = "baz" + assert G._node != H._node + H.nodes[0]["foo"] = old_foo + assert G._node == H._node + + def test_to_undirected(self): + G = self.K3 + self.add_attributes(G) + H = nx.MultiGraph(G) + self.is_shallow_copy(H, G) + H = G.to_undirected() + self.is_deepcopy(H, G) + + def test_to_directed(self): + G = self.K3 + self.add_attributes(G) + H = nx.MultiDiGraph(G) + self.is_shallow_copy(H, G) + H = G.to_directed() + self.is_deepcopy(H, G) + + def test_number_of_edges_selfloops(self): + G = self.K3 + G.add_edge(0, 0) + G.add_edge(0, 0) + G.add_edge(0, 0, key="parallel edge") + G.remove_edge(0, 0, key="parallel edge") + assert G.number_of_edges(0, 0) == 2 + G.remove_edge(0, 0) + assert G.number_of_edges(0, 0) == 1 + + def test_edge_lookup(self): + G = self.Graph() + G.add_edge(1, 2, foo="bar") + G.add_edge(1, 2, "key", foo="biz") + assert edges_equal(G.edges[1, 2, 0], {"foo": "bar"}) + assert edges_equal(G.edges[1, 2, "key"], {"foo": "biz"}) + + def test_edge_attr(self): + G = self.Graph() + G.add_edge(1, 2, key="k1", foo="bar") + G.add_edge(1, 2, key="k2", foo="baz") + assert isinstance(G.get_edge_data(1, 2), G.edge_key_dict_factory) + assert all( + isinstance(d, G.edge_attr_dict_factory) for u, v, d in G.edges(data=True) + ) + assert edges_equal( + G.edges(keys=True, data=True), + [(1, 2, "k1", {"foo": "bar"}), (1, 2, "k2", {"foo": "baz"})], + ) + assert edges_equal( + G.edges(keys=True, data="foo"), [(1, 2, "k1", "bar"), (1, 2, "k2", "baz")] + ) + + def test_edge_attr4(self): + G = self.Graph() + G.add_edge(1, 2, key=0, data=7, spam="bar", bar="foo") + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 7, "spam": "bar", "bar": "foo"})] + ) + G[1][2][0]["data"] = 10 # OK to set data like this + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 10, "spam": "bar", "bar": "foo"})] + ) + + G.adj[1][2][0]["data"] = 20 + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 20, "spam": "bar", "bar": "foo"})] + ) + G.edges[1, 2, 0]["data"] = 21 # another spelling, "edge" + assert edges_equal( + G.edges(data=True), [(1, 2, {"data": 21, "spam": "bar", "bar": "foo"})] + ) + G.adj[1][2][0]["listdata"] = [20, 200] + G.adj[1][2][0]["weight"] = 20 + assert edges_equal( + G.edges(data=True), + [ + ( + 1, + 2, + { + "data": 21, + "spam": "bar", + "bar": "foo", + "listdata": [20, 200], + "weight": 20, + }, + ) + ], + ) + + +class TestMultiGraph(BaseMultiGraphTester, _TestGraph): + def setup_method(self): + self.Graph = nx.MultiGraph + # build K3 + ed1, ed2, ed3 = ({0: {}}, {0: {}}, {0: {}}) + self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed1, 2: ed3}, 2: {0: ed2, 1: ed3}} + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._adj = self.k3adj + self.K3._node = {} + self.K3._node[0] = {} + self.K3._node[1] = {} + self.K3._node[2] = {} + + def test_data_input(self): + G = self.Graph({1: [2], 2: [1]}, name="test") + assert G.name == "test" + expected = [(1, {2: {0: {}}}), (2, {1: {0: {}}})] + assert sorted(G.adj.items()) == expected + + def test_data_multigraph_input(self): + # standard case with edge keys and edge data + edata0 = {"w": 200, "s": "foo"} + edata1 = {"w": 201, "s": "bar"} + keydict = {0: edata0, 1: edata1} + dododod = {"a": {"b": keydict}} + + multiple_edge = [("a", "b", 0, edata0), ("a", "b", 1, edata1)] + single_edge = [("a", "b", 0, keydict)] + + G = self.Graph(dododod, multigraph_input=True) + assert list(G.edges(keys=True, data=True)) == multiple_edge + G = self.Graph(dododod, multigraph_input=None) + assert list(G.edges(keys=True, data=True)) == multiple_edge + G = self.Graph(dododod, multigraph_input=False) + assert list(G.edges(keys=True, data=True)) == single_edge + + # test round-trip to_dict_of_dict and MultiGraph constructor + G = self.Graph(dododod, multigraph_input=True) + H = self.Graph(nx.to_dict_of_dicts(G)) + assert nx.is_isomorphic(G, H) is True # test that default is True + for mgi in [True, False]: + H = self.Graph(nx.to_dict_of_dicts(G), multigraph_input=mgi) + assert nx.is_isomorphic(G, H) == mgi + + # Set up cases for when incoming_graph_data is not multigraph_input + etraits = {"w": 200, "s": "foo"} + egraphics = {"color": "blue", "shape": "box"} + edata = {"traits": etraits, "graphics": egraphics} + dodod1 = {"a": {"b": edata}} + dodod2 = {"a": {"b": etraits}} + dodod3 = {"a": {"b": {"traits": etraits, "s": "foo"}}} + dol = {"a": ["b"]} + + multiple_edge = [("a", "b", "traits", etraits), ("a", "b", "graphics", egraphics)] + single_edge = [("a", "b", 0, {})] # type: ignore[var-annotated] + single_edge1 = [("a", "b", 0, edata)] + single_edge2 = [("a", "b", 0, etraits)] + single_edge3 = [("a", "b", 0, {"traits": etraits, "s": "foo"})] + + cases = [ # (dod, mgi, edges) + (dodod1, True, multiple_edge), + (dodod1, False, single_edge1), + (dodod2, False, single_edge2), + (dodod3, False, single_edge3), + (dol, False, single_edge), + ] + + @pytest.mark.parametrize("dod, mgi, edges", cases) + def test_non_multigraph_input(self, dod, mgi, edges): + G = self.Graph(dod, multigraph_input=mgi) + assert list(G.edges(keys=True, data=True)) == edges + G = nx.to_networkx_graph(dod, create_using=self.Graph, multigraph_input=mgi) + assert list(G.edges(keys=True, data=True)) == edges + + mgi_none_cases = [ + (dodod1, multiple_edge), + (dodod2, single_edge2), + (dodod3, single_edge3), + ] + + @pytest.mark.parametrize("dod, edges", mgi_none_cases) + def test_non_multigraph_input_mgi_none(self, dod, edges): + # test constructor without to_networkx_graph for mgi=None + G = self.Graph(dod) + assert list(G.edges(keys=True, data=True)) == edges + + raise_cases = [dodod2, dodod3, dol] + + @pytest.mark.parametrize("dod", raise_cases) + def test_non_multigraph_input_raise(self, dod): + # cases where NetworkXError is raised + pytest.raises(nx.NetworkXError, self.Graph, dod, multigraph_input=True) + pytest.raises( + nx.NetworkXError, + nx.to_networkx_graph, + dod, + create_using=self.Graph, + multigraph_input=True, + ) + + def test_getitem(self): + G = self.K3 + assert G[0] == {1: {0: {}}, 2: {0: {}}} + with pytest.raises(KeyError): + G.__getitem__("j") + with pytest.raises(TypeError): + G.__getitem__(["A"]) + + def test_remove_node(self): + G = self.K3 + G.remove_node(0) + assert G.adj == {1: {2: {0: {}}}, 2: {1: {0: {}}}} + with pytest.raises(nx.NetworkXError): + G.remove_node(-1) + + def test_add_edge(self): + G = self.Graph() + G.add_edge(0, 1) + assert G.adj == {0: {1: {0: {}}}, 1: {0: {0: {}}}} + G = self.Graph() + G.add_edge(*(0, 1)) + assert G.adj == {0: {1: {0: {}}}, 1: {0: {0: {}}}} + G = self.Graph() + with pytest.raises(ValueError): + G.add_edge(None, "anything") + + def test_add_edge_conflicting_key(self): + G = self.Graph() + G.add_edge(0, 1, key=1) + G.add_edge(0, 1) + assert G.number_of_edges() == 2 + G = self.Graph() + G.add_edges_from([(0, 1, 1, {})]) + G.add_edges_from([(0, 1)]) + assert G.number_of_edges() == 2 + + def test_add_edges_from(self): + G = self.Graph() + G.add_edges_from([(0, 1), (0, 1, {"weight": 3})]) + assert G.adj == { + 0: {1: {0: {}, 1: {"weight": 3}}}, + 1: {0: {0: {}, 1: {"weight": 3}}}, + } + G.add_edges_from([(0, 1), (0, 1, {"weight": 3})], weight=2) + assert G.adj == { + 0: {1: {0: {}, 1: {"weight": 3}, 2: {"weight": 2}, 3: {"weight": 3}}}, + 1: {0: {0: {}, 1: {"weight": 3}, 2: {"weight": 2}, 3: {"weight": 3}}}, + } + G = self.Graph() + edges = [ + (0, 1, {"weight": 3}), + (0, 1, (("weight", 2),)), + (0, 1, 5), + (0, 1, "s"), + ] + G.add_edges_from(edges) + keydict = {0: {"weight": 3}, 1: {"weight": 2}, 5: {}, "s": {}} + assert G._adj == {0: {1: keydict}, 1: {0: keydict}} + + # too few in tuple + with pytest.raises(nx.NetworkXError): + G.add_edges_from([(0,)]) + # too many in tuple + with pytest.raises(nx.NetworkXError): + G.add_edges_from([(0, 1, 2, 3, 4)]) + # not a tuple + with pytest.raises(TypeError): + G.add_edges_from([0]) + + def test_multigraph_add_edges_from_four_tuple_misordered(self): + """add_edges_from expects 4-tuples of the format (u, v, key, data_dict). + + Ensure 4-tuples of form (u, v, data_dict, key) raise exception. + """ + G = nx.MultiGraph() + with pytest.raises(TypeError): + # key/data values flipped in 4-tuple + G.add_edges_from([(0, 1, {"color": "red"}, 0)]) + + def test_remove_edge(self): + G = self.K3 + G.remove_edge(0, 1) + assert G.adj == {0: {2: {0: {}}}, 1: {2: {0: {}}}, 2: {0: {0: {}}, 1: {0: {}}}} + + with pytest.raises(nx.NetworkXError): + G.remove_edge(-1, 0) + with pytest.raises(nx.NetworkXError): + G.remove_edge(0, 2, key=1) + + def test_remove_edges_from(self): + G = self.K3.copy() + G.remove_edges_from([(0, 1)]) + kd = {0: {}} + assert G.adj == {0: {2: kd}, 1: {2: kd}, 2: {0: kd, 1: kd}} + G.remove_edges_from([(0, 0)]) # silent fail + self.K3.add_edge(0, 1) + G = self.K3.copy() + G.remove_edges_from(list(G.edges(data=True, keys=True))) + assert G.adj == {0: {}, 1: {}, 2: {}} + G = self.K3.copy() + G.remove_edges_from(list(G.edges(data=False, keys=True))) + assert G.adj == {0: {}, 1: {}, 2: {}} + G = self.K3.copy() + G.remove_edges_from(list(G.edges(data=False, keys=False))) + assert G.adj == {0: {}, 1: {}, 2: {}} + G = self.K3.copy() + G.remove_edges_from([(0, 1, 0), (0, 2, 0, {}), (1, 2)]) + assert G.adj == {0: {1: {1: {}}}, 1: {0: {1: {}}}, 2: {}} + + def test_remove_multiedge(self): + G = self.K3 + G.add_edge(0, 1, key="parallel edge") + G.remove_edge(0, 1, key="parallel edge") + assert G.adj == { + 0: {1: {0: {}}, 2: {0: {}}}, + 1: {0: {0: {}}, 2: {0: {}}}, + 2: {0: {0: {}}, 1: {0: {}}}, + } + G.remove_edge(0, 1) + kd = {0: {}} + assert G.adj == {0: {2: kd}, 1: {2: kd}, 2: {0: kd, 1: kd}} + with pytest.raises(nx.NetworkXError): + G.remove_edge(-1, 0) + + +class TestEdgeSubgraph: + """Unit tests for the :meth:`MultiGraph.edge_subgraph` method.""" + + def setup_method(self): + # Create a doubly-linked path graph on five nodes. + G = nx.MultiGraph() + nx.add_path(G, range(5)) + nx.add_path(G, range(5)) + # Add some node, edge, and graph attributes. + for i in range(5): + G.nodes[i]["name"] = f"node{i}" + G.adj[0][1][0]["name"] = "edge010" + G.adj[0][1][1]["name"] = "edge011" + G.adj[3][4][0]["name"] = "edge340" + G.adj[3][4][1]["name"] = "edge341" + G.graph["name"] = "graph" + # Get the subgraph induced by one of the first edges and one of + # the last edges. + self.G = G + self.H = G.edge_subgraph([(0, 1, 0), (3, 4, 1)]) + + def test_correct_nodes(self): + """Tests that the subgraph has the correct nodes.""" + assert [0, 1, 3, 4] == sorted(self.H.nodes()) + + def test_correct_edges(self): + """Tests that the subgraph has the correct edges.""" + assert [(0, 1, 0, "edge010"), (3, 4, 1, "edge341")] == sorted( + self.H.edges(keys=True, data="name") + ) + + def test_add_node(self): + """Tests that adding a node to the original graph does not + affect the nodes of the subgraph. + + """ + self.G.add_node(5) + assert [0, 1, 3, 4] == sorted(self.H.nodes()) + + def test_remove_node(self): + """Tests that removing a node in the original graph does + affect the nodes of the subgraph. + + """ + self.G.remove_node(0) + assert [1, 3, 4] == sorted(self.H.nodes()) + + def test_node_attr_dict(self): + """Tests that the node attribute dictionary of the two graphs is + the same object. + + """ + for v in self.H: + assert self.G.nodes[v] == self.H.nodes[v] + # Making a change to G should make a change in H and vice versa. + self.G.nodes[0]["name"] = "foo" + assert self.G.nodes[0] == self.H.nodes[0] + self.H.nodes[1]["name"] = "bar" + assert self.G.nodes[1] == self.H.nodes[1] + + def test_edge_attr_dict(self): + """Tests that the edge attribute dictionary of the two graphs is + the same object. + + """ + for u, v, k in self.H.edges(keys=True): + assert self.G._adj[u][v][k] == self.H._adj[u][v][k] + # Making a change to G should make a change in H and vice versa. + self.G._adj[0][1][0]["name"] = "foo" + assert self.G._adj[0][1][0]["name"] == self.H._adj[0][1][0]["name"] + self.H._adj[3][4][1]["name"] = "bar" + assert self.G._adj[3][4][1]["name"] == self.H._adj[3][4][1]["name"] + + def test_graph_attr_dict(self): + """Tests that the graph attribute dictionary of the two graphs + is the same object. + + """ + assert self.G.graph is self.H.graph + + +class CustomDictClass(UserDict): + pass + + +class MultiGraphSubClass(nx.MultiGraph): + node_dict_factory = CustomDictClass # type: ignore[assignment] + node_attr_dict_factory = CustomDictClass # type: ignore[assignment] + adjlist_outer_dict_factory = CustomDictClass # type: ignore[assignment] + adjlist_inner_dict_factory = CustomDictClass # type: ignore[assignment] + edge_key_dict_factory = CustomDictClass # type: ignore[assignment] + edge_attr_dict_factory = CustomDictClass # type: ignore[assignment] + graph_attr_dict_factory = CustomDictClass # type: ignore[assignment] + + +class TestMultiGraphSubclass(TestMultiGraph): + def setup_method(self): + self.Graph = MultiGraphSubClass + # build K3 + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._adj = self.K3.adjlist_outer_dict_factory( + { + 0: self.K3.adjlist_inner_dict_factory(), + 1: self.K3.adjlist_inner_dict_factory(), + 2: self.K3.adjlist_inner_dict_factory(), + } + ) + self.K3._pred = {0: {}, 1: {}, 2: {}} + for u in self.k3nodes: + for v in self.k3nodes: + if u != v: + d = {0: {}} + self.K3._adj[u][v] = d + self.K3._adj[v][u] = d + self.K3._node = self.K3.node_dict_factory() + self.K3._node[0] = self.K3.node_attr_dict_factory() + self.K3._node[1] = self.K3.node_attr_dict_factory() + self.K3._node[2] = self.K3.node_attr_dict_factory() diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/test_reportviews.py b/MLPY/Lib/site-packages/networkx/classes/tests/test_reportviews.py new file mode 100644 index 0000000000000000000000000000000000000000..a68d6eb822982cf7d11704583476a09fe42f1ab4 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/tests/test_reportviews.py @@ -0,0 +1,1423 @@ +import pickle +from copy import deepcopy + +import pytest + +import networkx as nx +from networkx.classes import reportviews as rv +from networkx.classes.reportviews import NodeDataView + + +# Nodes +class TestNodeView: + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.nv = cls.G.nodes # NodeView(G) + + def test_pickle(self): + import pickle + + nv = self.nv + pnv = pickle.loads(pickle.dumps(nv, -1)) + assert nv == pnv + assert nv.__slots__ == pnv.__slots__ + + def test_str(self): + assert str(self.nv) == "[0, 1, 2, 3, 4, 5, 6, 7, 8]" + + def test_repr(self): + assert repr(self.nv) == "NodeView((0, 1, 2, 3, 4, 5, 6, 7, 8))" + + def test_contains(self): + G = self.G.copy() + nv = G.nodes + assert 7 in nv + assert 9 not in nv + G.remove_node(7) + G.add_node(9) + assert 7 not in nv + assert 9 in nv + + def test_getitem(self): + G = self.G.copy() + nv = G.nodes + G.nodes[3]["foo"] = "bar" + assert nv[7] == {} + assert nv[3] == {"foo": "bar"} + # slicing + with pytest.raises(nx.NetworkXError): + G.nodes[0:5] + + def test_iter(self): + nv = self.nv + for i, n in enumerate(nv): + assert i == n + inv = iter(nv) + assert next(inv) == 0 + assert iter(nv) != nv + assert iter(inv) == inv + inv2 = iter(nv) + next(inv2) + assert list(inv) == list(inv2) + # odd case where NodeView calls NodeDataView with data=False + nnv = nv(data=False) + for i, n in enumerate(nnv): + assert i == n + + def test_call(self): + nodes = self.nv + assert nodes is nodes() + assert nodes is not nodes(data=True) + assert nodes is not nodes(data="weight") + + +class TestNodeDataView: + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.nv = NodeDataView(cls.G) + cls.ndv = cls.G.nodes.data(True) + cls.nwv = cls.G.nodes.data("foo") + + def test_viewtype(self): + nv = self.G.nodes + ndvfalse = nv.data(False) + assert nv is ndvfalse + assert nv is not self.ndv + + def test_pickle(self): + import pickle + + nv = self.nv + pnv = pickle.loads(pickle.dumps(nv, -1)) + assert nv == pnv + assert nv.__slots__ == pnv.__slots__ + + def test_str(self): + msg = str([(n, {}) for n in range(9)]) + assert str(self.ndv) == msg + + def test_repr(self): + expected = "NodeDataView((0, 1, 2, 3, 4, 5, 6, 7, 8))" + assert repr(self.nv) == expected + expected = ( + "NodeDataView({0: {}, 1: {}, 2: {}, 3: {}, " + + "4: {}, 5: {}, 6: {}, 7: {}, 8: {}})" + ) + assert repr(self.ndv) == expected + expected = ( + "NodeDataView({0: None, 1: None, 2: None, 3: None, 4: None, " + + "5: None, 6: None, 7: None, 8: None}, data='foo')" + ) + assert repr(self.nwv) == expected + + def test_contains(self): + G = self.G.copy() + nv = G.nodes.data() + nwv = G.nodes.data("foo") + G.nodes[3]["foo"] = "bar" + assert (7, {}) in nv + assert (3, {"foo": "bar"}) in nv + assert (3, "bar") in nwv + assert (7, None) in nwv + # default + nwv_def = G.nodes(data="foo", default="biz") + assert (7, "biz") in nwv_def + assert (3, "bar") in nwv_def + + def test_getitem(self): + G = self.G.copy() + nv = G.nodes + G.nodes[3]["foo"] = "bar" + assert nv[3] == {"foo": "bar"} + # default + nwv_def = G.nodes(data="foo", default="biz") + assert nwv_def[7], "biz" + assert nwv_def[3] == "bar" + # slicing + with pytest.raises(nx.NetworkXError): + G.nodes.data()[0:5] + + def test_iter(self): + G = self.G.copy() + nv = G.nodes.data() + ndv = G.nodes.data(True) + nwv = G.nodes.data("foo") + for i, (n, d) in enumerate(nv): + assert i == n + assert d == {} + inv = iter(nv) + assert next(inv) == (0, {}) + G.nodes[3]["foo"] = "bar" + # default + for n, d in nv: + if n == 3: + assert d == {"foo": "bar"} + else: + assert d == {} + # data=True + for n, d in ndv: + if n == 3: + assert d == {"foo": "bar"} + else: + assert d == {} + # data='foo' + for n, d in nwv: + if n == 3: + assert d == "bar" + else: + assert d is None + # data='foo', default=1 + for n, d in G.nodes.data("foo", default=1): + if n == 3: + assert d == "bar" + else: + assert d == 1 + + +def test_nodedataview_unhashable(): + G = nx.path_graph(9) + G.nodes[3]["foo"] = "bar" + nvs = [G.nodes.data()] + nvs.append(G.nodes.data(True)) + H = G.copy() + H.nodes[4]["foo"] = {1, 2, 3} + nvs.append(H.nodes.data(True)) + # raise unhashable + for nv in nvs: + pytest.raises(TypeError, set, nv) + pytest.raises(TypeError, eval, "nv | nv", locals()) + # no raise... hashable + Gn = G.nodes.data(False) + set(Gn) + Gn | Gn + Gn = G.nodes.data("foo") + set(Gn) + Gn | Gn + + +class TestNodeViewSetOps: + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.G.nodes[3]["foo"] = "bar" + cls.nv = cls.G.nodes + + def n_its(self, nodes): + return set(nodes) + + def test_len(self): + G = self.G.copy() + nv = G.nodes + assert len(nv) == 9 + G.remove_node(7) + assert len(nv) == 8 + G.add_node(9) + assert len(nv) == 9 + + def test_and(self): + # print("G & H nodes:", gnv & hnv) + nv = self.nv + some_nodes = self.n_its(range(5, 12)) + assert nv & some_nodes == self.n_its(range(5, 9)) + assert some_nodes & nv == self.n_its(range(5, 9)) + + def test_or(self): + # print("G | H nodes:", gnv | hnv) + nv = self.nv + some_nodes = self.n_its(range(5, 12)) + assert nv | some_nodes == self.n_its(range(12)) + assert some_nodes | nv == self.n_its(range(12)) + + def test_xor(self): + # print("G ^ H nodes:", gnv ^ hnv) + nv = self.nv + some_nodes = self.n_its(range(5, 12)) + nodes = {0, 1, 2, 3, 4, 9, 10, 11} + assert nv ^ some_nodes == self.n_its(nodes) + assert some_nodes ^ nv == self.n_its(nodes) + + def test_sub(self): + # print("G - H nodes:", gnv - hnv) + nv = self.nv + some_nodes = self.n_its(range(5, 12)) + assert nv - some_nodes == self.n_its(range(5)) + assert some_nodes - nv == self.n_its(range(9, 12)) + + +class TestNodeDataViewSetOps(TestNodeViewSetOps): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.G.nodes[3]["foo"] = "bar" + cls.nv = cls.G.nodes.data("foo") + + def n_its(self, nodes): + return {(node, "bar" if node == 3 else None) for node in nodes} + + +class TestNodeDataViewDefaultSetOps(TestNodeDataViewSetOps): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.G.nodes[3]["foo"] = "bar" + cls.nv = cls.G.nodes.data("foo", default=1) + + def n_its(self, nodes): + return {(node, "bar" if node == 3 else 1) for node in nodes} + + +# Edges Data View +class TestEdgeDataView: + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.eview = nx.reportviews.EdgeView + + def test_pickle(self): + import pickle + + ev = self.eview(self.G)(data=True) + pev = pickle.loads(pickle.dumps(ev, -1)) + assert list(ev) == list(pev) + assert ev.__slots__ == pev.__slots__ + + def modify_edge(self, G, e, **kwds): + G._adj[e[0]][e[1]].update(kwds) + + def test_str(self): + ev = self.eview(self.G)(data=True) + rep = str([(n, n + 1, {}) for n in range(8)]) + assert str(ev) == rep + + def test_repr(self): + ev = self.eview(self.G)(data=True) + rep = ( + "EdgeDataView([(0, 1, {}), (1, 2, {}), " + + "(2, 3, {}), (3, 4, {}), " + + "(4, 5, {}), (5, 6, {}), " + + "(6, 7, {}), (7, 8, {})])" + ) + assert repr(ev) == rep + + def test_iterdata(self): + G = self.G.copy() + evr = self.eview(G) + ev = evr(data=True) + ev_def = evr(data="foo", default=1) + + for u, v, d in ev: + pass + assert d == {} + + for u, v, wt in ev_def: + pass + assert wt == 1 + + self.modify_edge(G, (2, 3), foo="bar") + for e in ev: + assert len(e) == 3 + if set(e[:2]) == {2, 3}: + assert e[2] == {"foo": "bar"} + checked = True + else: + assert e[2] == {} + assert checked + + for e in ev_def: + assert len(e) == 3 + if set(e[:2]) == {2, 3}: + assert e[2] == "bar" + checked_wt = True + else: + assert e[2] == 1 + assert checked_wt + + def test_iter(self): + evr = self.eview(self.G) + ev = evr() + for u, v in ev: + pass + iev = iter(ev) + assert next(iev) == (0, 1) + assert iter(ev) != ev + assert iter(iev) == iev + + def test_contains(self): + evr = self.eview(self.G) + ev = evr() + if self.G.is_directed(): + assert (1, 2) in ev and (2, 1) not in ev + else: + assert (1, 2) in ev and (2, 1) in ev + assert (1, 4) not in ev + assert (1, 90) not in ev + assert (90, 1) not in ev + + def test_contains_with_nbunch(self): + evr = self.eview(self.G) + ev = evr(nbunch=[0, 2]) + if self.G.is_directed(): + assert (0, 1) in ev + assert (1, 2) not in ev + assert (2, 3) in ev + else: + assert (0, 1) in ev + assert (1, 2) in ev + assert (2, 3) in ev + assert (3, 4) not in ev + assert (4, 5) not in ev + assert (5, 6) not in ev + assert (7, 8) not in ev + assert (8, 9) not in ev + + def test_len(self): + evr = self.eview(self.G) + ev = evr(data="foo") + assert len(ev) == 8 + assert len(evr(1)) == 2 + assert len(evr([1, 2, 3])) == 4 + + assert len(self.G.edges(1)) == 2 + assert len(self.G.edges()) == 8 + assert len(self.G.edges) == 8 + + H = self.G.copy() + H.add_edge(1, 1) + assert len(H.edges(1)) == 3 + assert len(H.edges()) == 9 + assert len(H.edges) == 9 + + +class TestOutEdgeDataView(TestEdgeDataView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, create_using=nx.DiGraph()) + cls.eview = nx.reportviews.OutEdgeView + + def test_repr(self): + ev = self.eview(self.G)(data=True) + rep = ( + "OutEdgeDataView([(0, 1, {}), (1, 2, {}), " + + "(2, 3, {}), (3, 4, {}), " + + "(4, 5, {}), (5, 6, {}), " + + "(6, 7, {}), (7, 8, {})])" + ) + assert repr(ev) == rep + + def test_len(self): + evr = self.eview(self.G) + ev = evr(data="foo") + assert len(ev) == 8 + assert len(evr(1)) == 1 + assert len(evr([1, 2, 3])) == 3 + + assert len(self.G.edges(1)) == 1 + assert len(self.G.edges()) == 8 + assert len(self.G.edges) == 8 + + H = self.G.copy() + H.add_edge(1, 1) + assert len(H.edges(1)) == 2 + assert len(H.edges()) == 9 + assert len(H.edges) == 9 + + def test_contains_with_nbunch(self): + evr = self.eview(self.G) + ev = evr(nbunch=[0, 2]) + assert (0, 1) in ev + assert (1, 2) not in ev + assert (2, 3) in ev + assert (3, 4) not in ev + assert (4, 5) not in ev + assert (5, 6) not in ev + assert (7, 8) not in ev + assert (8, 9) not in ev + + +class TestInEdgeDataView(TestOutEdgeDataView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, create_using=nx.DiGraph()) + cls.eview = nx.reportviews.InEdgeView + + def test_repr(self): + ev = self.eview(self.G)(data=True) + rep = ( + "InEdgeDataView([(0, 1, {}), (1, 2, {}), " + + "(2, 3, {}), (3, 4, {}), " + + "(4, 5, {}), (5, 6, {}), " + + "(6, 7, {}), (7, 8, {})])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + evr = self.eview(self.G) + ev = evr(nbunch=[0, 2]) + assert (0, 1) not in ev + assert (1, 2) in ev + assert (2, 3) not in ev + assert (3, 4) not in ev + assert (4, 5) not in ev + assert (5, 6) not in ev + assert (7, 8) not in ev + assert (8, 9) not in ev + + +class TestMultiEdgeDataView(TestEdgeDataView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, create_using=nx.MultiGraph()) + cls.eview = nx.reportviews.MultiEdgeView + + def modify_edge(self, G, e, **kwds): + G._adj[e[0]][e[1]][0].update(kwds) + + def test_repr(self): + ev = self.eview(self.G)(data=True) + rep = ( + "MultiEdgeDataView([(0, 1, {}), (1, 2, {}), " + + "(2, 3, {}), (3, 4, {}), " + + "(4, 5, {}), (5, 6, {}), " + + "(6, 7, {}), (7, 8, {})])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + evr = self.eview(self.G) + ev = evr(nbunch=[0, 2]) + assert (0, 1) in ev + assert (1, 2) in ev + assert (2, 3) in ev + assert (3, 4) not in ev + assert (4, 5) not in ev + assert (5, 6) not in ev + assert (7, 8) not in ev + assert (8, 9) not in ev + + +class TestOutMultiEdgeDataView(TestOutEdgeDataView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, create_using=nx.MultiDiGraph()) + cls.eview = nx.reportviews.OutMultiEdgeView + + def modify_edge(self, G, e, **kwds): + G._adj[e[0]][e[1]][0].update(kwds) + + def test_repr(self): + ev = self.eview(self.G)(data=True) + rep = ( + "OutMultiEdgeDataView([(0, 1, {}), (1, 2, {}), " + + "(2, 3, {}), (3, 4, {}), " + + "(4, 5, {}), (5, 6, {}), " + + "(6, 7, {}), (7, 8, {})])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + evr = self.eview(self.G) + ev = evr(nbunch=[0, 2]) + assert (0, 1) in ev + assert (1, 2) not in ev + assert (2, 3) in ev + assert (3, 4) not in ev + assert (4, 5) not in ev + assert (5, 6) not in ev + assert (7, 8) not in ev + assert (8, 9) not in ev + + +class TestInMultiEdgeDataView(TestOutMultiEdgeDataView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, create_using=nx.MultiDiGraph()) + cls.eview = nx.reportviews.InMultiEdgeView + + def test_repr(self): + ev = self.eview(self.G)(data=True) + rep = ( + "InMultiEdgeDataView([(0, 1, {}), (1, 2, {}), " + + "(2, 3, {}), (3, 4, {}), " + + "(4, 5, {}), (5, 6, {}), " + + "(6, 7, {}), (7, 8, {})])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + evr = self.eview(self.G) + ev = evr(nbunch=[0, 2]) + assert (0, 1) not in ev + assert (1, 2) in ev + assert (2, 3) not in ev + assert (3, 4) not in ev + assert (4, 5) not in ev + assert (5, 6) not in ev + assert (7, 8) not in ev + assert (8, 9) not in ev + + +# Edge Views +class TestEdgeView: + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9) + cls.eview = nx.reportviews.EdgeView + + def test_pickle(self): + import pickle + + ev = self.eview(self.G) + pev = pickle.loads(pickle.dumps(ev, -1)) + assert ev == pev + assert ev.__slots__ == pev.__slots__ + + def modify_edge(self, G, e, **kwds): + G._adj[e[0]][e[1]].update(kwds) + + def test_str(self): + ev = self.eview(self.G) + rep = str([(n, n + 1) for n in range(8)]) + assert str(ev) == rep + + def test_repr(self): + ev = self.eview(self.G) + rep = ( + "EdgeView([(0, 1), (1, 2), (2, 3), (3, 4), " + + "(4, 5), (5, 6), (6, 7), (7, 8)])" + ) + assert repr(ev) == rep + + def test_getitem(self): + G = self.G.copy() + ev = G.edges + G.edges[0, 1]["foo"] = "bar" + assert ev[0, 1] == {"foo": "bar"} + + # slicing + with pytest.raises(nx.NetworkXError): + G.edges[0:5] + + def test_call(self): + ev = self.eview(self.G) + assert id(ev) == id(ev()) + assert id(ev) == id(ev(data=False)) + assert id(ev) != id(ev(data=True)) + assert id(ev) != id(ev(nbunch=1)) + + def test_data(self): + ev = self.eview(self.G) + assert id(ev) != id(ev.data()) + assert id(ev) == id(ev.data(data=False)) + assert id(ev) != id(ev.data(data=True)) + assert id(ev) != id(ev.data(nbunch=1)) + + def test_iter(self): + ev = self.eview(self.G) + for u, v in ev: + pass + iev = iter(ev) + assert next(iev) == (0, 1) + assert iter(ev) != ev + assert iter(iev) == iev + + def test_contains(self): + ev = self.eview(self.G) + edv = ev() + if self.G.is_directed(): + assert (1, 2) in ev and (2, 1) not in ev + assert (1, 2) in edv and (2, 1) not in edv + else: + assert (1, 2) in ev and (2, 1) in ev + assert (1, 2) in edv and (2, 1) in edv + assert (1, 4) not in ev + assert (1, 4) not in edv + # edge not in graph + assert (1, 90) not in ev + assert (90, 1) not in ev + assert (1, 90) not in edv + assert (90, 1) not in edv + + def test_contains_with_nbunch(self): + ev = self.eview(self.G) + evn = ev(nbunch=[0, 2]) + assert (0, 1) in evn + assert (1, 2) in evn + assert (2, 3) in evn + assert (3, 4) not in evn + assert (4, 5) not in evn + assert (5, 6) not in evn + assert (7, 8) not in evn + assert (8, 9) not in evn + + def test_len(self): + ev = self.eview(self.G) + num_ed = 9 if self.G.is_multigraph() else 8 + assert len(ev) == num_ed + + H = self.G.copy() + H.add_edge(1, 1) + assert len(H.edges(1)) == 3 + H.is_multigraph() - H.is_directed() + assert len(H.edges()) == num_ed + 1 + assert len(H.edges) == num_ed + 1 + + def test_and(self): + # print("G & H edges:", gnv & hnv) + ev = self.eview(self.G) + some_edges = {(0, 1), (1, 0), (0, 2)} + if self.G.is_directed(): + assert some_edges & ev, {(0, 1)} + assert ev & some_edges, {(0, 1)} + else: + assert ev & some_edges == {(0, 1), (1, 0)} + assert some_edges & ev == {(0, 1), (1, 0)} + return + + def test_or(self): + # print("G | H edges:", gnv | hnv) + ev = self.eview(self.G) + some_edges = {(0, 1), (1, 0), (0, 2)} + result1 = {(n, n + 1) for n in range(8)} + result1.update(some_edges) + result2 = {(n + 1, n) for n in range(8)} + result2.update(some_edges) + assert (ev | some_edges) in (result1, result2) + assert (some_edges | ev) in (result1, result2) + + def test_xor(self): + # print("G ^ H edges:", gnv ^ hnv) + ev = self.eview(self.G) + some_edges = {(0, 1), (1, 0), (0, 2)} + if self.G.is_directed(): + result = {(n, n + 1) for n in range(1, 8)} + result.update({(1, 0), (0, 2)}) + assert ev ^ some_edges == result + else: + result = {(n, n + 1) for n in range(1, 8)} + result.update({(0, 2)}) + assert ev ^ some_edges == result + return + + def test_sub(self): + # print("G - H edges:", gnv - hnv) + ev = self.eview(self.G) + some_edges = {(0, 1), (1, 0), (0, 2)} + result = {(n, n + 1) for n in range(8)} + result.remove((0, 1)) + assert ev - some_edges, result + + +class TestOutEdgeView(TestEdgeView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, nx.DiGraph()) + cls.eview = nx.reportviews.OutEdgeView + + def test_repr(self): + ev = self.eview(self.G) + rep = ( + "OutEdgeView([(0, 1), (1, 2), (2, 3), (3, 4), " + + "(4, 5), (5, 6), (6, 7), (7, 8)])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + ev = self.eview(self.G) + evn = ev(nbunch=[0, 2]) + assert (0, 1) in evn + assert (1, 2) not in evn + assert (2, 3) in evn + assert (3, 4) not in evn + assert (4, 5) not in evn + assert (5, 6) not in evn + assert (7, 8) not in evn + assert (8, 9) not in evn + + +class TestInEdgeView(TestEdgeView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, nx.DiGraph()) + cls.eview = nx.reportviews.InEdgeView + + def test_repr(self): + ev = self.eview(self.G) + rep = ( + "InEdgeView([(0, 1), (1, 2), (2, 3), (3, 4), " + + "(4, 5), (5, 6), (6, 7), (7, 8)])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + ev = self.eview(self.G) + evn = ev(nbunch=[0, 2]) + assert (0, 1) not in evn + assert (1, 2) in evn + assert (2, 3) not in evn + assert (3, 4) not in evn + assert (4, 5) not in evn + assert (5, 6) not in evn + assert (7, 8) not in evn + assert (8, 9) not in evn + + +class TestMultiEdgeView(TestEdgeView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, nx.MultiGraph()) + cls.G.add_edge(1, 2, key=3, foo="bar") + cls.eview = nx.reportviews.MultiEdgeView + + def modify_edge(self, G, e, **kwds): + if len(e) == 2: + e = e + (0,) + G._adj[e[0]][e[1]][e[2]].update(kwds) + + def test_str(self): + ev = self.eview(self.G) + replist = [(n, n + 1, 0) for n in range(8)] + replist.insert(2, (1, 2, 3)) + rep = str(replist) + assert str(ev) == rep + + def test_getitem(self): + G = self.G.copy() + ev = G.edges + G.edges[0, 1, 0]["foo"] = "bar" + assert ev[0, 1, 0] == {"foo": "bar"} + + # slicing + with pytest.raises(nx.NetworkXError): + G.edges[0:5] + + def test_repr(self): + ev = self.eview(self.G) + rep = ( + "MultiEdgeView([(0, 1, 0), (1, 2, 0), (1, 2, 3), (2, 3, 0), " + + "(3, 4, 0), (4, 5, 0), (5, 6, 0), (6, 7, 0), (7, 8, 0)])" + ) + assert repr(ev) == rep + + def test_call(self): + ev = self.eview(self.G) + assert id(ev) == id(ev(keys=True)) + assert id(ev) == id(ev(data=False, keys=True)) + assert id(ev) != id(ev(keys=False)) + assert id(ev) != id(ev(data=True)) + assert id(ev) != id(ev(nbunch=1)) + + def test_data(self): + ev = self.eview(self.G) + assert id(ev) != id(ev.data()) + assert id(ev) == id(ev.data(data=False, keys=True)) + assert id(ev) != id(ev.data(keys=False)) + assert id(ev) != id(ev.data(data=True)) + assert id(ev) != id(ev.data(nbunch=1)) + + def test_iter(self): + ev = self.eview(self.G) + for u, v, k in ev: + pass + iev = iter(ev) + assert next(iev) == (0, 1, 0) + assert iter(ev) != ev + assert iter(iev) == iev + + def test_iterkeys(self): + G = self.G + evr = self.eview(G) + ev = evr(keys=True) + for u, v, k in ev: + pass + assert k == 0 + ev = evr(keys=True, data="foo", default=1) + for u, v, k, wt in ev: + pass + assert wt == 1 + + self.modify_edge(G, (2, 3, 0), foo="bar") + ev = evr(keys=True, data=True) + for e in ev: + assert len(e) == 4 + print("edge:", e) + if set(e[:2]) == {2, 3}: + print(self.G._adj[2][3]) + assert e[2] == 0 + assert e[3] == {"foo": "bar"} + checked = True + elif set(e[:3]) == {1, 2, 3}: + assert e[2] == 3 + assert e[3] == {"foo": "bar"} + checked_multi = True + else: + assert e[2] == 0 + assert e[3] == {} + assert checked + assert checked_multi + ev = evr(keys=True, data="foo", default=1) + for e in ev: + if set(e[:2]) == {1, 2} and e[2] == 3: + assert e[3] == "bar" + if set(e[:2]) == {1, 2} and e[2] == 0: + assert e[3] == 1 + if set(e[:2]) == {2, 3}: + assert e[2] == 0 + assert e[3] == "bar" + assert len(e) == 4 + checked_wt = True + assert checked_wt + ev = evr(keys=True) + for e in ev: + assert len(e) == 3 + elist = sorted([(i, i + 1, 0) for i in range(8)] + [(1, 2, 3)]) + assert sorted(ev) == elist + # test that the keyword arguments are passed correctly + ev = evr((1, 2), "foo", keys=True, default=1) + with pytest.raises(TypeError): + evr((1, 2), "foo", True, 1) + with pytest.raises(TypeError): + evr((1, 2), "foo", True, default=1) + for e in ev: + if set(e[:2]) == {1, 2}: + assert e[2] in {0, 3} + if e[2] == 3: + assert e[3] == "bar" + else: # e[2] == 0 + assert e[3] == 1 + if G.is_directed(): + assert len(list(ev)) == 3 + else: + assert len(list(ev)) == 4 + + def test_or(self): + # print("G | H edges:", gnv | hnv) + ev = self.eview(self.G) + some_edges = {(0, 1, 0), (1, 0, 0), (0, 2, 0)} + result = {(n, n + 1, 0) for n in range(8)} + result.update(some_edges) + result.update({(1, 2, 3)}) + assert ev | some_edges == result + assert some_edges | ev == result + + def test_sub(self): + # print("G - H edges:", gnv - hnv) + ev = self.eview(self.G) + some_edges = {(0, 1, 0), (1, 0, 0), (0, 2, 0)} + result = {(n, n + 1, 0) for n in range(8)} + result.remove((0, 1, 0)) + result.update({(1, 2, 3)}) + assert ev - some_edges, result + assert some_edges - ev, result + + def test_xor(self): + # print("G ^ H edges:", gnv ^ hnv) + ev = self.eview(self.G) + some_edges = {(0, 1, 0), (1, 0, 0), (0, 2, 0)} + if self.G.is_directed(): + result = {(n, n + 1, 0) for n in range(1, 8)} + result.update({(1, 0, 0), (0, 2, 0), (1, 2, 3)}) + assert ev ^ some_edges == result + assert some_edges ^ ev == result + else: + result = {(n, n + 1, 0) for n in range(1, 8)} + result.update({(0, 2, 0), (1, 2, 3)}) + assert ev ^ some_edges == result + assert some_edges ^ ev == result + + def test_and(self): + # print("G & H edges:", gnv & hnv) + ev = self.eview(self.G) + some_edges = {(0, 1, 0), (1, 0, 0), (0, 2, 0)} + if self.G.is_directed(): + assert ev & some_edges == {(0, 1, 0)} + assert some_edges & ev == {(0, 1, 0)} + else: + assert ev & some_edges == {(0, 1, 0), (1, 0, 0)} + assert some_edges & ev == {(0, 1, 0), (1, 0, 0)} + + def test_contains_with_nbunch(self): + ev = self.eview(self.G) + evn = ev(nbunch=[0, 2]) + assert (0, 1) in evn + assert (1, 2) in evn + assert (2, 3) in evn + assert (3, 4) not in evn + assert (4, 5) not in evn + assert (5, 6) not in evn + assert (7, 8) not in evn + assert (8, 9) not in evn + + +class TestOutMultiEdgeView(TestMultiEdgeView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, nx.MultiDiGraph()) + cls.G.add_edge(1, 2, key=3, foo="bar") + cls.eview = nx.reportviews.OutMultiEdgeView + + def modify_edge(self, G, e, **kwds): + if len(e) == 2: + e = e + (0,) + G._adj[e[0]][e[1]][e[2]].update(kwds) + + def test_repr(self): + ev = self.eview(self.G) + rep = ( + "OutMultiEdgeView([(0, 1, 0), (1, 2, 0), (1, 2, 3), (2, 3, 0)," + + " (3, 4, 0), (4, 5, 0), (5, 6, 0), (6, 7, 0), (7, 8, 0)])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + ev = self.eview(self.G) + evn = ev(nbunch=[0, 2]) + assert (0, 1) in evn + assert (1, 2) not in evn + assert (2, 3) in evn + assert (3, 4) not in evn + assert (4, 5) not in evn + assert (5, 6) not in evn + assert (7, 8) not in evn + assert (8, 9) not in evn + + +class TestInMultiEdgeView(TestMultiEdgeView): + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, nx.MultiDiGraph()) + cls.G.add_edge(1, 2, key=3, foo="bar") + cls.eview = nx.reportviews.InMultiEdgeView + + def modify_edge(self, G, e, **kwds): + if len(e) == 2: + e = e + (0,) + G._adj[e[0]][e[1]][e[2]].update(kwds) + + def test_repr(self): + ev = self.eview(self.G) + rep = ( + "InMultiEdgeView([(0, 1, 0), (1, 2, 0), (1, 2, 3), (2, 3, 0), " + + "(3, 4, 0), (4, 5, 0), (5, 6, 0), (6, 7, 0), (7, 8, 0)])" + ) + assert repr(ev) == rep + + def test_contains_with_nbunch(self): + ev = self.eview(self.G) + evn = ev(nbunch=[0, 2]) + assert (0, 1) not in evn + assert (1, 2) in evn + assert (2, 3) not in evn + assert (3, 4) not in evn + assert (4, 5) not in evn + assert (5, 6) not in evn + assert (7, 8) not in evn + assert (8, 9) not in evn + + +# Degrees +class TestDegreeView: + GRAPH = nx.Graph + dview = nx.reportviews.DegreeView + + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(6, cls.GRAPH()) + cls.G.add_edge(1, 3, foo=2) + cls.G.add_edge(1, 3, foo=3) + + def test_pickle(self): + import pickle + + deg = self.G.degree + pdeg = pickle.loads(pickle.dumps(deg, -1)) + assert dict(deg) == dict(pdeg) + + def test_str(self): + dv = self.dview(self.G) + rep = str([(0, 1), (1, 3), (2, 2), (3, 3), (4, 2), (5, 1)]) + assert str(dv) == rep + dv = self.G.degree() + assert str(dv) == rep + + def test_repr(self): + dv = self.dview(self.G) + rep = "DegreeView({0: 1, 1: 3, 2: 2, 3: 3, 4: 2, 5: 1})" + assert repr(dv) == rep + + def test_iter(self): + dv = self.dview(self.G) + for n, d in dv: + pass + idv = iter(dv) + assert iter(dv) != dv + assert iter(idv) == idv + assert next(idv) == (0, dv[0]) + assert next(idv) == (1, dv[1]) + # weighted + dv = self.dview(self.G, weight="foo") + for n, d in dv: + pass + idv = iter(dv) + assert iter(dv) != dv + assert iter(idv) == idv + assert next(idv) == (0, dv[0]) + assert next(idv) == (1, dv[1]) + + def test_nbunch(self): + dv = self.dview(self.G) + dvn = dv(0) + assert dvn == 1 + dvn = dv([2, 3]) + assert sorted(dvn) == [(2, 2), (3, 3)] + + def test_getitem(self): + dv = self.dview(self.G) + assert dv[0] == 1 + assert dv[1] == 3 + assert dv[2] == 2 + assert dv[3] == 3 + dv = self.dview(self.G, weight="foo") + assert dv[0] == 1 + assert dv[1] == 5 + assert dv[2] == 2 + assert dv[3] == 5 + + def test_weight(self): + dv = self.dview(self.G) + dvw = dv(0, weight="foo") + assert dvw == 1 + dvw = dv(1, weight="foo") + assert dvw == 5 + dvw = dv([2, 3], weight="foo") + assert sorted(dvw) == [(2, 2), (3, 5)] + dvd = dict(dv(weight="foo")) + assert dvd[0] == 1 + assert dvd[1] == 5 + assert dvd[2] == 2 + assert dvd[3] == 5 + + def test_len(self): + dv = self.dview(self.G) + assert len(dv) == 6 + + +class TestDiDegreeView(TestDegreeView): + GRAPH = nx.DiGraph + dview = nx.reportviews.DiDegreeView + + def test_repr(self): + dv = self.G.degree() + rep = "DiDegreeView({0: 1, 1: 3, 2: 2, 3: 3, 4: 2, 5: 1})" + assert repr(dv) == rep + + +class TestOutDegreeView(TestDegreeView): + GRAPH = nx.DiGraph + dview = nx.reportviews.OutDegreeView + + def test_str(self): + dv = self.dview(self.G) + rep = str([(0, 1), (1, 2), (2, 1), (3, 1), (4, 1), (5, 0)]) + assert str(dv) == rep + dv = self.G.out_degree() + assert str(dv) == rep + + def test_repr(self): + dv = self.G.out_degree() + rep = "OutDegreeView({0: 1, 1: 2, 2: 1, 3: 1, 4: 1, 5: 0})" + assert repr(dv) == rep + + def test_nbunch(self): + dv = self.dview(self.G) + dvn = dv(0) + assert dvn == 1 + dvn = dv([2, 3]) + assert sorted(dvn) == [(2, 1), (3, 1)] + + def test_getitem(self): + dv = self.dview(self.G) + assert dv[0] == 1 + assert dv[1] == 2 + assert dv[2] == 1 + assert dv[3] == 1 + dv = self.dview(self.G, weight="foo") + assert dv[0] == 1 + assert dv[1] == 4 + assert dv[2] == 1 + assert dv[3] == 1 + + def test_weight(self): + dv = self.dview(self.G) + dvw = dv(0, weight="foo") + assert dvw == 1 + dvw = dv(1, weight="foo") + assert dvw == 4 + dvw = dv([2, 3], weight="foo") + assert sorted(dvw) == [(2, 1), (3, 1)] + dvd = dict(dv(weight="foo")) + assert dvd[0] == 1 + assert dvd[1] == 4 + assert dvd[2] == 1 + assert dvd[3] == 1 + + +class TestInDegreeView(TestDegreeView): + GRAPH = nx.DiGraph + dview = nx.reportviews.InDegreeView + + def test_str(self): + dv = self.dview(self.G) + rep = str([(0, 0), (1, 1), (2, 1), (3, 2), (4, 1), (5, 1)]) + assert str(dv) == rep + dv = self.G.in_degree() + assert str(dv) == rep + + def test_repr(self): + dv = self.G.in_degree() + rep = "InDegreeView({0: 0, 1: 1, 2: 1, 3: 2, 4: 1, 5: 1})" + assert repr(dv) == rep + + def test_nbunch(self): + dv = self.dview(self.G) + dvn = dv(0) + assert dvn == 0 + dvn = dv([2, 3]) + assert sorted(dvn) == [(2, 1), (3, 2)] + + def test_getitem(self): + dv = self.dview(self.G) + assert dv[0] == 0 + assert dv[1] == 1 + assert dv[2] == 1 + assert dv[3] == 2 + dv = self.dview(self.G, weight="foo") + assert dv[0] == 0 + assert dv[1] == 1 + assert dv[2] == 1 + assert dv[3] == 4 + + def test_weight(self): + dv = self.dview(self.G) + dvw = dv(0, weight="foo") + assert dvw == 0 + dvw = dv(1, weight="foo") + assert dvw == 1 + dvw = dv([2, 3], weight="foo") + assert sorted(dvw) == [(2, 1), (3, 4)] + dvd = dict(dv(weight="foo")) + assert dvd[0] == 0 + assert dvd[1] == 1 + assert dvd[2] == 1 + assert dvd[3] == 4 + + +class TestMultiDegreeView(TestDegreeView): + GRAPH = nx.MultiGraph + dview = nx.reportviews.MultiDegreeView + + def test_str(self): + dv = self.dview(self.G) + rep = str([(0, 1), (1, 4), (2, 2), (3, 4), (4, 2), (5, 1)]) + assert str(dv) == rep + dv = self.G.degree() + assert str(dv) == rep + + def test_repr(self): + dv = self.G.degree() + rep = "MultiDegreeView({0: 1, 1: 4, 2: 2, 3: 4, 4: 2, 5: 1})" + assert repr(dv) == rep + + def test_nbunch(self): + dv = self.dview(self.G) + dvn = dv(0) + assert dvn == 1 + dvn = dv([2, 3]) + assert sorted(dvn) == [(2, 2), (3, 4)] + + def test_getitem(self): + dv = self.dview(self.G) + assert dv[0] == 1 + assert dv[1] == 4 + assert dv[2] == 2 + assert dv[3] == 4 + dv = self.dview(self.G, weight="foo") + assert dv[0] == 1 + assert dv[1] == 7 + assert dv[2] == 2 + assert dv[3] == 7 + + def test_weight(self): + dv = self.dview(self.G) + dvw = dv(0, weight="foo") + assert dvw == 1 + dvw = dv(1, weight="foo") + assert dvw == 7 + dvw = dv([2, 3], weight="foo") + assert sorted(dvw) == [(2, 2), (3, 7)] + dvd = dict(dv(weight="foo")) + assert dvd[0] == 1 + assert dvd[1] == 7 + assert dvd[2] == 2 + assert dvd[3] == 7 + + +class TestDiMultiDegreeView(TestMultiDegreeView): + GRAPH = nx.MultiDiGraph + dview = nx.reportviews.DiMultiDegreeView + + def test_repr(self): + dv = self.G.degree() + rep = "DiMultiDegreeView({0: 1, 1: 4, 2: 2, 3: 4, 4: 2, 5: 1})" + assert repr(dv) == rep + + +class TestOutMultiDegreeView(TestDegreeView): + GRAPH = nx.MultiDiGraph + dview = nx.reportviews.OutMultiDegreeView + + def test_str(self): + dv = self.dview(self.G) + rep = str([(0, 1), (1, 3), (2, 1), (3, 1), (4, 1), (5, 0)]) + assert str(dv) == rep + dv = self.G.out_degree() + assert str(dv) == rep + + def test_repr(self): + dv = self.G.out_degree() + rep = "OutMultiDegreeView({0: 1, 1: 3, 2: 1, 3: 1, 4: 1, 5: 0})" + assert repr(dv) == rep + + def test_nbunch(self): + dv = self.dview(self.G) + dvn = dv(0) + assert dvn == 1 + dvn = dv([2, 3]) + assert sorted(dvn) == [(2, 1), (3, 1)] + + def test_getitem(self): + dv = self.dview(self.G) + assert dv[0] == 1 + assert dv[1] == 3 + assert dv[2] == 1 + assert dv[3] == 1 + dv = self.dview(self.G, weight="foo") + assert dv[0] == 1 + assert dv[1] == 6 + assert dv[2] == 1 + assert dv[3] == 1 + + def test_weight(self): + dv = self.dview(self.G) + dvw = dv(0, weight="foo") + assert dvw == 1 + dvw = dv(1, weight="foo") + assert dvw == 6 + dvw = dv([2, 3], weight="foo") + assert sorted(dvw) == [(2, 1), (3, 1)] + dvd = dict(dv(weight="foo")) + assert dvd[0] == 1 + assert dvd[1] == 6 + assert dvd[2] == 1 + assert dvd[3] == 1 + + +class TestInMultiDegreeView(TestDegreeView): + GRAPH = nx.MultiDiGraph + dview = nx.reportviews.InMultiDegreeView + + def test_str(self): + dv = self.dview(self.G) + rep = str([(0, 0), (1, 1), (2, 1), (3, 3), (4, 1), (5, 1)]) + assert str(dv) == rep + dv = self.G.in_degree() + assert str(dv) == rep + + def test_repr(self): + dv = self.G.in_degree() + rep = "InMultiDegreeView({0: 0, 1: 1, 2: 1, 3: 3, 4: 1, 5: 1})" + assert repr(dv) == rep + + def test_nbunch(self): + dv = self.dview(self.G) + dvn = dv(0) + assert dvn == 0 + dvn = dv([2, 3]) + assert sorted(dvn) == [(2, 1), (3, 3)] + + def test_getitem(self): + dv = self.dview(self.G) + assert dv[0] == 0 + assert dv[1] == 1 + assert dv[2] == 1 + assert dv[3] == 3 + dv = self.dview(self.G, weight="foo") + assert dv[0] == 0 + assert dv[1] == 1 + assert dv[2] == 1 + assert dv[3] == 6 + + def test_weight(self): + dv = self.dview(self.G) + dvw = dv(0, weight="foo") + assert dvw == 0 + dvw = dv(1, weight="foo") + assert dvw == 1 + dvw = dv([2, 3], weight="foo") + assert sorted(dvw) == [(2, 1), (3, 6)] + dvd = dict(dv(weight="foo")) + assert dvd[0] == 0 + assert dvd[1] == 1 + assert dvd[2] == 1 + assert dvd[3] == 6 + + +@pytest.mark.parametrize( + ("reportview", "err_msg_terms"), + ( + (rv.NodeView, "list(G.nodes"), + (rv.NodeDataView, "list(G.nodes.data"), + (rv.EdgeView, "list(G.edges"), + # Directed EdgeViews + (rv.InEdgeView, "list(G.in_edges"), + (rv.OutEdgeView, "list(G.edges"), + # Multi EdgeViews + (rv.MultiEdgeView, "list(G.edges"), + (rv.InMultiEdgeView, "list(G.in_edges"), + (rv.OutMultiEdgeView, "list(G.edges"), + ), +) +def test_slicing_reportviews(reportview, err_msg_terms): + G = nx.complete_graph(3) + view = reportview(G) + with pytest.raises(nx.NetworkXError) as exc: + view[0:2] + errmsg = str(exc.value) + assert type(view).__name__ in errmsg + assert err_msg_terms in errmsg + + +@pytest.mark.parametrize( + "graph", [nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph] +) +def test_cache_dict_get_set_state(graph): + G = nx.path_graph(5, graph()) + G.nodes, G.edges, G.adj, G.degree + if G.is_directed(): + G.pred, G.succ, G.in_edges, G.out_edges, G.in_degree, G.out_degree + cached_dict = G.__dict__ + assert "nodes" in cached_dict + assert "edges" in cached_dict + assert "adj" in cached_dict + assert "degree" in cached_dict + if G.is_directed(): + assert "pred" in cached_dict + assert "succ" in cached_dict + assert "in_edges" in cached_dict + assert "out_edges" in cached_dict + assert "in_degree" in cached_dict + assert "out_degree" in cached_dict + + # Raises error if the cached properties and views do not work + pickle.loads(pickle.dumps(G, -1)) + deepcopy(G) diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/test_special.py b/MLPY/Lib/site-packages/networkx/classes/tests/test_special.py new file mode 100644 index 0000000000000000000000000000000000000000..1fa79605b484f57ed6cbd17762b21a23406ee006 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/tests/test_special.py @@ -0,0 +1,131 @@ +import networkx as nx + +from .test_digraph import BaseDiGraphTester +from .test_digraph import TestDiGraph as _TestDiGraph +from .test_graph import BaseGraphTester +from .test_graph import TestGraph as _TestGraph +from .test_multidigraph import TestMultiDiGraph as _TestMultiDiGraph +from .test_multigraph import TestMultiGraph as _TestMultiGraph + + +def test_factories(): + class mydict1(dict): + pass + + class mydict2(dict): + pass + + class mydict3(dict): + pass + + class mydict4(dict): + pass + + class mydict5(dict): + pass + + for Graph in (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph): + # print("testing class: ", Graph.__name__) + class MyGraph(Graph): + node_dict_factory = mydict1 + adjlist_outer_dict_factory = mydict2 + adjlist_inner_dict_factory = mydict3 + edge_key_dict_factory = mydict4 + edge_attr_dict_factory = mydict5 + + G = MyGraph() + assert isinstance(G._node, mydict1) + assert isinstance(G._adj, mydict2) + G.add_node(1) + assert isinstance(G._adj[1], mydict3) + if G.is_directed(): + assert isinstance(G._pred, mydict2) + assert isinstance(G._succ, mydict2) + assert isinstance(G._pred[1], mydict3) + G.add_edge(1, 2) + if G.is_multigraph(): + assert isinstance(G._adj[1][2], mydict4) + assert isinstance(G._adj[1][2][0], mydict5) + else: + assert isinstance(G._adj[1][2], mydict5) + + +class TestSpecialGraph(_TestGraph): + def setup_method(self): + _TestGraph.setup_method(self) + self.Graph = nx.Graph + + +class TestThinGraph(BaseGraphTester): + def setup_method(self): + all_edge_dict = {"weight": 1} + + class MyGraph(nx.Graph): + def edge_attr_dict_factory(self): + return all_edge_dict + + self.Graph = MyGraph + # build dict-of-dict-of-dict K3 + ed1, ed2, ed3 = (all_edge_dict, all_edge_dict, all_edge_dict) + self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed1, 2: ed3}, 2: {0: ed2, 1: ed3}} + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._adj = self.k3adj + self.K3._node = {} + self.K3._node[0] = {} + self.K3._node[1] = {} + self.K3._node[2] = {} + + +class TestSpecialDiGraph(_TestDiGraph): + def setup_method(self): + _TestDiGraph.setup_method(self) + self.Graph = nx.DiGraph + + +class TestThinDiGraph(BaseDiGraphTester): + def setup_method(self): + all_edge_dict = {"weight": 1} + + class MyGraph(nx.DiGraph): + def edge_attr_dict_factory(self): + return all_edge_dict + + self.Graph = MyGraph + # build dict-of-dict-of-dict K3 + ed1, ed2, ed3 = (all_edge_dict, all_edge_dict, all_edge_dict) + ed4, ed5, ed6 = (all_edge_dict, all_edge_dict, all_edge_dict) + self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed3, 2: ed4}, 2: {0: ed5, 1: ed6}} + self.k3edges = [(0, 1), (0, 2), (1, 2)] + self.k3nodes = [0, 1, 2] + self.K3 = self.Graph() + self.K3._succ = self.k3adj + # K3._adj is synced with K3._succ + self.K3._pred = {0: {1: ed3, 2: ed5}, 1: {0: ed1, 2: ed6}, 2: {0: ed2, 1: ed4}} + self.K3._node = {} + self.K3._node[0] = {} + self.K3._node[1] = {} + self.K3._node[2] = {} + + ed1, ed2 = (all_edge_dict, all_edge_dict) + self.P3 = self.Graph() + self.P3._succ = {0: {1: ed1}, 1: {2: ed2}, 2: {}} + # P3._adj is synced with P3._succ + self.P3._pred = {0: {}, 1: {0: ed1}, 2: {1: ed2}} + self.P3._node = {} + self.P3._node[0] = {} + self.P3._node[1] = {} + self.P3._node[2] = {} + + +class TestSpecialMultiGraph(_TestMultiGraph): + def setup_method(self): + _TestMultiGraph.setup_method(self) + self.Graph = nx.MultiGraph + + +class TestSpecialMultiDiGraph(_TestMultiDiGraph): + def setup_method(self): + _TestMultiDiGraph.setup_method(self) + self.Graph = nx.MultiDiGraph diff --git a/MLPY/Lib/site-packages/networkx/classes/tests/test_subgraphviews.py b/MLPY/Lib/site-packages/networkx/classes/tests/test_subgraphviews.py new file mode 100644 index 0000000000000000000000000000000000000000..73e0fdd2d52bcb7623dbd4e4f502e8bed0a4e3d3 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/classes/tests/test_subgraphviews.py @@ -0,0 +1,362 @@ +import pytest + +import networkx as nx +from networkx.utils import edges_equal + + +class TestSubGraphView: + gview = staticmethod(nx.subgraph_view) + graph = nx.Graph + hide_edges_filter = staticmethod(nx.filters.hide_edges) + show_edges_filter = staticmethod(nx.filters.show_edges) + + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, create_using=cls.graph()) + cls.hide_edges_w_hide_nodes = {(3, 4), (4, 5), (5, 6)} + + def test_hidden_nodes(self): + hide_nodes = [4, 5, 111] + nodes_gone = nx.filters.hide_nodes(hide_nodes) + gview = self.gview + G = gview(self.G, filter_node=nodes_gone) + assert self.G.nodes - G.nodes == {4, 5} + assert self.G.edges - G.edges == self.hide_edges_w_hide_nodes + if G.is_directed(): + assert list(G[3]) == [] + assert list(G[2]) == [3] + else: + assert list(G[3]) == [2] + assert set(G[2]) == {1, 3} + pytest.raises(KeyError, G.__getitem__, 4) + pytest.raises(KeyError, G.__getitem__, 112) + pytest.raises(KeyError, G.__getitem__, 111) + assert G.degree(3) == (3 if G.is_multigraph() else 1) + assert G.size() == (7 if G.is_multigraph() else 5) + + def test_hidden_edges(self): + hide_edges = [(2, 3), (8, 7), (222, 223)] + edges_gone = self.hide_edges_filter(hide_edges) + gview = self.gview + G = gview(self.G, filter_edge=edges_gone) + assert self.G.nodes == G.nodes + if G.is_directed(): + assert self.G.edges - G.edges == {(2, 3)} + assert list(G[2]) == [] + assert list(G.pred[3]) == [] + assert list(G.pred[2]) == [1] + assert G.size() == 7 + else: + assert self.G.edges - G.edges == {(2, 3), (7, 8)} + assert list(G[2]) == [1] + assert G.size() == 6 + assert list(G[3]) == [4] + pytest.raises(KeyError, G.__getitem__, 221) + pytest.raises(KeyError, G.__getitem__, 222) + assert G.degree(3) == 1 + + def test_shown_node(self): + induced_subgraph = nx.filters.show_nodes([2, 3, 111]) + gview = self.gview + G = gview(self.G, filter_node=induced_subgraph) + assert set(G.nodes) == {2, 3} + if G.is_directed(): + assert list(G[3]) == [] + else: + assert list(G[3]) == [2] + assert list(G[2]) == [3] + pytest.raises(KeyError, G.__getitem__, 4) + pytest.raises(KeyError, G.__getitem__, 112) + pytest.raises(KeyError, G.__getitem__, 111) + assert G.degree(3) == (3 if G.is_multigraph() else 1) + assert G.size() == (3 if G.is_multigraph() else 1) + + def test_shown_edges(self): + show_edges = [(2, 3), (8, 7), (222, 223)] + edge_subgraph = self.show_edges_filter(show_edges) + G = self.gview(self.G, filter_edge=edge_subgraph) + assert self.G.nodes == G.nodes + if G.is_directed(): + assert G.edges == {(2, 3)} + assert list(G[3]) == [] + assert list(G[2]) == [3] + assert list(G.pred[3]) == [2] + assert list(G.pred[2]) == [] + assert G.size() == 1 + else: + assert G.edges == {(2, 3), (7, 8)} + assert list(G[3]) == [2] + assert list(G[2]) == [3] + assert G.size() == 2 + pytest.raises(KeyError, G.__getitem__, 221) + pytest.raises(KeyError, G.__getitem__, 222) + assert G.degree(3) == 1 + + +class TestSubDiGraphView(TestSubGraphView): + gview = staticmethod(nx.subgraph_view) + graph = nx.DiGraph + hide_edges_filter = staticmethod(nx.filters.hide_diedges) + show_edges_filter = staticmethod(nx.filters.show_diedges) + hide_edges = [(2, 3), (8, 7), (222, 223)] + excluded = {(2, 3), (3, 4), (4, 5), (5, 6)} + + def test_inoutedges(self): + edges_gone = self.hide_edges_filter(self.hide_edges) + hide_nodes = [4, 5, 111] + nodes_gone = nx.filters.hide_nodes(hide_nodes) + G = self.gview(self.G, filter_node=nodes_gone, filter_edge=edges_gone) + + assert self.G.in_edges - G.in_edges == self.excluded + assert self.G.out_edges - G.out_edges == self.excluded + + def test_pred(self): + edges_gone = self.hide_edges_filter(self.hide_edges) + hide_nodes = [4, 5, 111] + nodes_gone = nx.filters.hide_nodes(hide_nodes) + G = self.gview(self.G, filter_node=nodes_gone, filter_edge=edges_gone) + + assert list(G.pred[2]) == [1] + assert list(G.pred[6]) == [] + + def test_inout_degree(self): + edges_gone = self.hide_edges_filter(self.hide_edges) + hide_nodes = [4, 5, 111] + nodes_gone = nx.filters.hide_nodes(hide_nodes) + G = self.gview(self.G, filter_node=nodes_gone, filter_edge=edges_gone) + + assert G.degree(2) == 1 + assert G.out_degree(2) == 0 + assert G.in_degree(2) == 1 + assert G.size() == 4 + + +# multigraph +class TestMultiGraphView(TestSubGraphView): + gview = staticmethod(nx.subgraph_view) + graph = nx.MultiGraph + hide_edges_filter = staticmethod(nx.filters.hide_multiedges) + show_edges_filter = staticmethod(nx.filters.show_multiedges) + + @classmethod + def setup_class(cls): + cls.G = nx.path_graph(9, create_using=cls.graph()) + multiedges = {(2, 3, 4), (2, 3, 5)} + cls.G.add_edges_from(multiedges) + cls.hide_edges_w_hide_nodes = {(3, 4, 0), (4, 5, 0), (5, 6, 0)} + + def test_hidden_edges(self): + hide_edges = [(2, 3, 4), (2, 3, 3), (8, 7, 0), (222, 223, 0)] + edges_gone = self.hide_edges_filter(hide_edges) + G = self.gview(self.G, filter_edge=edges_gone) + assert self.G.nodes == G.nodes + if G.is_directed(): + assert self.G.edges - G.edges == {(2, 3, 4)} + assert list(G[3]) == [4] + assert list(G[2]) == [3] + assert list(G.pred[3]) == [2] # only one 2 but two edges + assert list(G.pred[2]) == [1] + assert G.size() == 9 + else: + assert self.G.edges - G.edges == {(2, 3, 4), (7, 8, 0)} + assert list(G[3]) == [2, 4] + assert list(G[2]) == [1, 3] + assert G.size() == 8 + assert G.degree(3) == 3 + pytest.raises(KeyError, G.__getitem__, 221) + pytest.raises(KeyError, G.__getitem__, 222) + + def test_shown_edges(self): + show_edges = [(2, 3, 4), (2, 3, 3), (8, 7, 0), (222, 223, 0)] + edge_subgraph = self.show_edges_filter(show_edges) + G = self.gview(self.G, filter_edge=edge_subgraph) + assert self.G.nodes == G.nodes + if G.is_directed(): + assert G.edges == {(2, 3, 4)} + assert list(G[3]) == [] + assert list(G.pred[3]) == [2] + assert list(G.pred[2]) == [] + assert G.size() == 1 + else: + assert G.edges == {(2, 3, 4), (7, 8, 0)} + assert G.size() == 2 + assert list(G[3]) == [2] + assert G.degree(3) == 1 + assert list(G[2]) == [3] + pytest.raises(KeyError, G.__getitem__, 221) + pytest.raises(KeyError, G.__getitem__, 222) + + +# multidigraph +class TestMultiDiGraphView(TestMultiGraphView, TestSubDiGraphView): + gview = staticmethod(nx.subgraph_view) + graph = nx.MultiDiGraph + hide_edges_filter = staticmethod(nx.filters.hide_multidiedges) + show_edges_filter = staticmethod(nx.filters.show_multidiedges) + hide_edges = [(2, 3, 0), (8, 7, 0), (222, 223, 0)] + excluded = {(2, 3, 0), (3, 4, 0), (4, 5, 0), (5, 6, 0)} + + def test_inout_degree(self): + edges_gone = self.hide_edges_filter(self.hide_edges) + hide_nodes = [4, 5, 111] + nodes_gone = nx.filters.hide_nodes(hide_nodes) + G = self.gview(self.G, filter_node=nodes_gone, filter_edge=edges_gone) + + assert G.degree(2) == 3 + assert G.out_degree(2) == 2 + assert G.in_degree(2) == 1 + assert G.size() == 6 + + +# induced_subgraph +class TestInducedSubGraph: + @classmethod + def setup_class(cls): + cls.K3 = G = nx.complete_graph(3) + G.graph["foo"] = [] + G.nodes[0]["foo"] = [] + G.remove_edge(1, 2) + ll = [] + G.add_edge(1, 2, foo=ll) + G.add_edge(2, 1, foo=ll) + + def test_full_graph(self): + G = self.K3 + H = nx.induced_subgraph(G, [0, 1, 2, 5]) + assert H.name == G.name + self.graphs_equal(H, G) + self.same_attrdict(H, G) + + def test_partial_subgraph(self): + G = self.K3 + H = nx.induced_subgraph(G, 0) + assert dict(H.adj) == {0: {}} + assert dict(G.adj) != {0: {}} + + H = nx.induced_subgraph(G, [0, 1]) + assert dict(H.adj) == {0: {1: {}}, 1: {0: {}}} + + def same_attrdict(self, H, G): + old_foo = H[1][2]["foo"] + H.edges[1, 2]["foo"] = "baz" + assert G.edges == H.edges + H.edges[1, 2]["foo"] = old_foo + assert G.edges == H.edges + old_foo = H.nodes[0]["foo"] + H.nodes[0]["foo"] = "baz" + assert G.nodes == H.nodes + H.nodes[0]["foo"] = old_foo + assert G.nodes == H.nodes + + def graphs_equal(self, H, G): + assert G._adj == H._adj + assert G._node == H._node + assert G.graph == H.graph + assert G.name == H.name + if not G.is_directed() and not H.is_directed(): + assert H._adj[1][2] is H._adj[2][1] + assert G._adj[1][2] is G._adj[2][1] + else: # at least one is directed + if not G.is_directed(): + G._pred = G._adj + G._succ = G._adj + if not H.is_directed(): + H._pred = H._adj + H._succ = H._adj + assert G._pred == H._pred + assert G._succ == H._succ + assert H._succ[1][2] is H._pred[2][1] + assert G._succ[1][2] is G._pred[2][1] + + +# edge_subgraph +class TestEdgeSubGraph: + @classmethod + def setup_class(cls): + # Create a path graph on five nodes. + cls.G = G = nx.path_graph(5) + # Add some node, edge, and graph attributes. + for i in range(5): + G.nodes[i]["name"] = f"node{i}" + G.edges[0, 1]["name"] = "edge01" + G.edges[3, 4]["name"] = "edge34" + G.graph["name"] = "graph" + # Get the subgraph induced by the first and last edges. + cls.H = nx.edge_subgraph(G, [(0, 1), (3, 4)]) + + def test_correct_nodes(self): + """Tests that the subgraph has the correct nodes.""" + assert [(0, "node0"), (1, "node1"), (3, "node3"), (4, "node4")] == sorted( + self.H.nodes.data("name") + ) + + def test_correct_edges(self): + """Tests that the subgraph has the correct edges.""" + assert edges_equal( + [(0, 1, "edge01"), (3, 4, "edge34")], self.H.edges.data("name") + ) + + def test_add_node(self): + """Tests that adding a node to the original graph does not + affect the nodes of the subgraph. + + """ + self.G.add_node(5) + assert [0, 1, 3, 4] == sorted(self.H.nodes) + self.G.remove_node(5) + + def test_remove_node(self): + """Tests that removing a node in the original graph + removes the nodes of the subgraph. + + """ + self.G.remove_node(0) + assert [1, 3, 4] == sorted(self.H.nodes) + self.G.add_node(0, name="node0") + self.G.add_edge(0, 1, name="edge01") + + def test_node_attr_dict(self): + """Tests that the node attribute dictionary of the two graphs is + the same object. + + """ + for v in self.H: + assert self.G.nodes[v] == self.H.nodes[v] + # Making a change to G should make a change in H and vice versa. + self.G.nodes[0]["name"] = "foo" + assert self.G.nodes[0] == self.H.nodes[0] + self.H.nodes[1]["name"] = "bar" + assert self.G.nodes[1] == self.H.nodes[1] + # Revert the change, so tests pass with pytest-randomly + self.G.nodes[0]["name"] = "node0" + self.H.nodes[1]["name"] = "node1" + + def test_edge_attr_dict(self): + """Tests that the edge attribute dictionary of the two graphs is + the same object. + + """ + for u, v in self.H.edges(): + assert self.G.edges[u, v] == self.H.edges[u, v] + # Making a change to G should make a change in H and vice versa. + self.G.edges[0, 1]["name"] = "foo" + assert self.G.edges[0, 1]["name"] == self.H.edges[0, 1]["name"] + self.H.edges[3, 4]["name"] = "bar" + assert self.G.edges[3, 4]["name"] == self.H.edges[3, 4]["name"] + # Revert the change, so tests pass with pytest-randomly + self.G.edges[0, 1]["name"] = "edge01" + self.H.edges[3, 4]["name"] = "edge34" + + def test_graph_attr_dict(self): + """Tests that the graph attribute dictionary of the two graphs + is the same object. + + """ + assert self.G.graph is self.H.graph + + def test_readonly(self): + """Tests that the subgraph cannot change the graph structure""" + pytest.raises(nx.NetworkXError, self.H.add_node, 5) + pytest.raises(nx.NetworkXError, self.H.remove_node, 0) + pytest.raises(nx.NetworkXError, self.H.add_edge, 5, 6) + pytest.raises(nx.NetworkXError, self.H.remove_edge, 0, 1) diff --git a/MLPY/Lib/site-packages/networkx/conftest.py b/MLPY/Lib/site-packages/networkx/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..0218891ff33e8168050c689ccfcb443b57c07c46 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/conftest.py @@ -0,0 +1,265 @@ +""" +Testing +======= + +General guidelines for writing good tests: + +- doctests always assume ``import networkx as nx`` so don't add that +- prefer pytest fixtures over classes with setup methods. +- use the ``@pytest.mark.parametrize`` decorator +- use ``pytest.importorskip`` for numpy, scipy, pandas, and matplotlib b/c of PyPy. + and add the module to the relevant entries below. + +""" +import os +import sys +import warnings +from importlib.metadata import entry_points + +import pytest + +import networkx + + +def pytest_addoption(parser): + parser.addoption( + "--runslow", action="store_true", default=False, help="run slow tests" + ) + parser.addoption( + "--backend", + action="store", + default=None, + help="Run tests with a backend by auto-converting nx graphs to backend graphs", + ) + parser.addoption( + "--fallback-to-nx", + action="store_true", + default=False, + help="Run nx function if a backend doesn't implement a dispatchable function" + " (use with --backend)", + ) + + +def pytest_configure(config): + config.addinivalue_line("markers", "slow: mark test as slow to run") + backend = config.getoption("--backend") + if backend is None: + backend = os.environ.get("NETWORKX_TEST_BACKEND") + if backend: + networkx.utils.backends._dispatch._automatic_backends = [backend] + fallback_to_nx = config.getoption("--fallback-to-nx") + if not fallback_to_nx: + fallback_to_nx = os.environ.get("NETWORKX_FALLBACK_TO_NX") + networkx.utils.backends._dispatch._fallback_to_nx = bool(fallback_to_nx) + # nx-loopback backend is only available when testing + if sys.version_info < (3, 10): + backends = ( + ep for ep in entry_points()["networkx.backends"] if ep.name == "nx-loopback" + ) + else: + backends = entry_points(name="nx-loopback", group="networkx.backends") + if backends: + networkx.utils.backends.backends["nx-loopback"] = next(iter(backends)) + else: + warnings.warn( + "\n\n WARNING: Mixed NetworkX configuration! \n\n" + " This environment has mixed configuration for networkx.\n" + " The test object nx-loopback is not configured correctly.\n" + " You should not be seeing this message.\n" + " Try `pip install -e .`, or change your PYTHONPATH\n" + " Make sure python finds the networkx repo you are testing\n\n" + ) + + +def pytest_collection_modifyitems(config, items): + # Setting this to True here allows tests to be set up before dispatching + # any function call to a backend. + networkx.utils.backends._dispatch._is_testing = True + if automatic_backends := networkx.utils.backends._dispatch._automatic_backends: + # Allow pluggable backends to add markers to tests (such as skip or xfail) + # when running in auto-conversion test mode + backend = networkx.utils.backends.backends[automatic_backends[0]].load() + if hasattr(backend, "on_start_tests"): + getattr(backend, "on_start_tests")(items) + + if config.getoption("--runslow"): + # --runslow given in cli: do not skip slow tests + return + skip_slow = pytest.mark.skip(reason="need --runslow option to run") + for item in items: + if "slow" in item.keywords: + item.add_marker(skip_slow) + + +# TODO: The warnings below need to be dealt with, but for now we silence them. +@pytest.fixture(autouse=True) +def set_warnings(): + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="nx.nx_pydot" + ) + warnings.filterwarnings( + "ignore", + category=DeprecationWarning, + message="single_target_shortest_path_length will", + ) + warnings.filterwarnings( + "ignore", + category=DeprecationWarning, + message="shortest_path for all_pairs", + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="\nforest_str is deprecated" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="\n\nrandom_tree" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="Edmonds has been deprecated" + ) + warnings.filterwarnings( + "ignore", + category=DeprecationWarning, + message="MultiDiGraph_EdgeKey has been deprecated", + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="\n\nThe `normalized`" + ) + warnings.filterwarnings( + "ignore", category=DeprecationWarning, message="function `join` is deprecated" + ) + warnings.filterwarnings( + "ignore", + category=DeprecationWarning, + message="\n\nstrongly_connected_components_recursive", + ) + + +@pytest.fixture(autouse=True) +def add_nx(doctest_namespace): + doctest_namespace["nx"] = networkx + # TODO: remove the try-except block when we require numpy >= 2 + try: + import numpy as np + + np.set_printoptions(legacy="1.21") + except ImportError: + pass + + +# What dependencies are installed? + +try: + import numpy + + has_numpy = True +except ImportError: + has_numpy = False + +try: + import scipy + + has_scipy = True +except ImportError: + has_scipy = False + +try: + import matplotlib + + has_matplotlib = True +except ImportError: + has_matplotlib = False + +try: + import pandas + + has_pandas = True +except ImportError: + has_pandas = False + +try: + import pygraphviz + + has_pygraphviz = True +except ImportError: + has_pygraphviz = False + +try: + import pydot + + has_pydot = True +except ImportError: + has_pydot = False + +try: + import sympy + + has_sympy = True +except ImportError: + has_sympy = False + + +# List of files that pytest should ignore + +collect_ignore = [] + +needs_numpy = [ + "algorithms/approximation/traveling_salesman.py", + "algorithms/centrality/current_flow_closeness.py", + "algorithms/node_classification.py", + "algorithms/non_randomness.py", + "algorithms/shortest_paths/dense.py", + "linalg/bethehessianmatrix.py", + "linalg/laplacianmatrix.py", + "utils/misc.py", + "algorithms/centrality/laplacian.py", +] +needs_scipy = [ + "algorithms/approximation/traveling_salesman.py", + "algorithms/assortativity/correlation.py", + "algorithms/assortativity/mixing.py", + "algorithms/assortativity/pairs.py", + "algorithms/bipartite/matrix.py", + "algorithms/bipartite/spectral.py", + "algorithms/centrality/current_flow_betweenness.py", + "algorithms/centrality/current_flow_betweenness_subset.py", + "algorithms/centrality/eigenvector.py", + "algorithms/centrality/katz.py", + "algorithms/centrality/second_order.py", + "algorithms/centrality/subgraph_alg.py", + "algorithms/communicability_alg.py", + "algorithms/link_analysis/hits_alg.py", + "algorithms/link_analysis/pagerank_alg.py", + "algorithms/node_classification.py", + "algorithms/similarity.py", + "convert_matrix.py", + "drawing/layout.py", + "generators/spectral_graph_forge.py", + "linalg/algebraicconnectivity.py", + "linalg/attrmatrix.py", + "linalg/bethehessianmatrix.py", + "linalg/graphmatrix.py", + "linalg/modularitymatrix.py", + "linalg/spectrum.py", + "utils/rcm.py", + "algorithms/centrality/laplacian.py", +] +needs_matplotlib = ["drawing/nx_pylab.py"] +needs_pandas = ["convert_matrix.py"] +needs_pygraphviz = ["drawing/nx_agraph.py"] +needs_pydot = ["drawing/nx_pydot.py"] +needs_sympy = ["algorithms/polynomials.py"] + +if not has_numpy: + collect_ignore += needs_numpy +if not has_scipy: + collect_ignore += needs_scipy +if not has_matplotlib: + collect_ignore += needs_matplotlib +if not has_pandas: + collect_ignore += needs_pandas +if not has_pygraphviz: + collect_ignore += needs_pygraphviz +if not has_pydot: + collect_ignore += needs_pydot +if not has_sympy: + collect_ignore += needs_sympy diff --git a/MLPY/Lib/site-packages/networkx/convert.py b/MLPY/Lib/site-packages/networkx/convert.py new file mode 100644 index 0000000000000000000000000000000000000000..2fd20710934ac01912eddb8dc4722e30a8d0277f --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/convert.py @@ -0,0 +1,496 @@ +"""Functions to convert NetworkX graphs to and from other formats. + +The preferred way of converting data to a NetworkX graph is through the +graph constructor. The constructor calls the to_networkx_graph() function +which attempts to guess the input type and convert it automatically. + +Examples +-------- +Create a graph with a single edge from a dictionary of dictionaries + +>>> d = {0: {1: 1}} # dict-of-dicts single edge (0,1) +>>> G = nx.Graph(d) + +See Also +-------- +nx_agraph, nx_pydot +""" +import warnings +from collections.abc import Collection, Generator, Iterator + +import networkx as nx + +__all__ = [ + "to_networkx_graph", + "from_dict_of_dicts", + "to_dict_of_dicts", + "from_dict_of_lists", + "to_dict_of_lists", + "from_edgelist", + "to_edgelist", +] + + +def to_networkx_graph(data, create_using=None, multigraph_input=False): + """Make a NetworkX graph from a known data structure. + + The preferred way to call this is automatically + from the class constructor + + >>> d = {0: {1: {"weight": 1}}} # dict-of-dicts single edge (0,1) + >>> G = nx.Graph(d) + + instead of the equivalent + + >>> G = nx.from_dict_of_dicts(d) + + Parameters + ---------- + data : object to be converted + + Current known types are: + any NetworkX graph + dict-of-dicts + dict-of-lists + container (e.g. set, list, tuple) of edges + iterator (e.g. itertools.chain) that produces edges + generator of edges + Pandas DataFrame (row per edge) + 2D numpy array + scipy sparse array + pygraphviz agraph + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + multigraph_input : bool (default False) + If True and data is a dict_of_dicts, + try to create a multigraph assuming dict_of_dict_of_lists. + If data and create_using are both multigraphs then create + a multigraph from a multigraph. + + """ + # NX graph + if hasattr(data, "adj"): + try: + result = from_dict_of_dicts( + data.adj, + create_using=create_using, + multigraph_input=data.is_multigraph(), + ) + # data.graph should be dict-like + result.graph.update(data.graph) + # data.nodes should be dict-like + # result.add_node_from(data.nodes.items()) possible but + # for custom node_attr_dict_factory which may be hashable + # will be unexpected behavior + for n, dd in data.nodes.items(): + result._node[n].update(dd) + return result + except Exception as err: + raise nx.NetworkXError("Input is not a correct NetworkX graph.") from err + + # pygraphviz agraph + if hasattr(data, "is_strict"): + try: + return nx.nx_agraph.from_agraph(data, create_using=create_using) + except Exception as err: + raise nx.NetworkXError("Input is not a correct pygraphviz graph.") from err + + # dict of dicts/lists + if isinstance(data, dict): + try: + return from_dict_of_dicts( + data, create_using=create_using, multigraph_input=multigraph_input + ) + except Exception as err1: + if multigraph_input is True: + raise nx.NetworkXError( + f"converting multigraph_input raised:\n{type(err1)}: {err1}" + ) + try: + return from_dict_of_lists(data, create_using=create_using) + except Exception as err2: + raise TypeError("Input is not known type.") from err2 + + # Pandas DataFrame + try: + import pandas as pd + + if isinstance(data, pd.DataFrame): + if data.shape[0] == data.shape[1]: + try: + return nx.from_pandas_adjacency(data, create_using=create_using) + except Exception as err: + msg = "Input is not a correct Pandas DataFrame adjacency matrix." + raise nx.NetworkXError(msg) from err + else: + try: + return nx.from_pandas_edgelist( + data, edge_attr=True, create_using=create_using + ) + except Exception as err: + msg = "Input is not a correct Pandas DataFrame edge-list." + raise nx.NetworkXError(msg) from err + except ImportError: + warnings.warn("pandas not found, skipping conversion test.", ImportWarning) + + # numpy array + try: + import numpy as np + + if isinstance(data, np.ndarray): + try: + return nx.from_numpy_array(data, create_using=create_using) + except Exception as err: + raise nx.NetworkXError( + f"Failed to interpret array as an adjacency matrix." + ) from err + except ImportError: + warnings.warn("numpy not found, skipping conversion test.", ImportWarning) + + # scipy sparse array - any format + try: + import scipy + + if hasattr(data, "format"): + try: + return nx.from_scipy_sparse_array(data, create_using=create_using) + except Exception as err: + raise nx.NetworkXError( + "Input is not a correct scipy sparse array type." + ) from err + except ImportError: + warnings.warn("scipy not found, skipping conversion test.", ImportWarning) + + # Note: most general check - should remain last in order of execution + # Includes containers (e.g. list, set, dict, etc.), generators, and + # iterators (e.g. itertools.chain) of edges + + if isinstance(data, (Collection, Generator, Iterator)): + try: + return from_edgelist(data, create_using=create_using) + except Exception as err: + raise nx.NetworkXError("Input is not a valid edge list") from err + + raise nx.NetworkXError("Input is not a known data type for conversion.") + + +@nx._dispatch +def to_dict_of_lists(G, nodelist=None): + """Returns adjacency representation of graph as a dictionary of lists. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list + Use only nodes specified in nodelist + + Notes + ----- + Completely ignores edge data for MultiGraph and MultiDiGraph. + + """ + if nodelist is None: + nodelist = G + + d = {} + for n in nodelist: + d[n] = [nbr for nbr in G.neighbors(n) if nbr in nodelist] + return d + + +@nx._dispatch(graphs=None) +def from_dict_of_lists(d, create_using=None): + """Returns a graph from a dictionary of lists. + + Parameters + ---------- + d : dictionary of lists + A dictionary of lists adjacency representation. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Examples + -------- + >>> dol = {0: [1]} # single edge (0,1) + >>> G = nx.from_dict_of_lists(dol) + + or + + >>> G = nx.Graph(dol) # use Graph constructor + + """ + G = nx.empty_graph(0, create_using) + G.add_nodes_from(d) + if G.is_multigraph() and not G.is_directed(): + # a dict_of_lists can't show multiedges. BUT for undirected graphs, + # each edge shows up twice in the dict_of_lists. + # So we need to treat this case separately. + seen = {} + for node, nbrlist in d.items(): + for nbr in nbrlist: + if nbr not in seen: + G.add_edge(node, nbr) + seen[node] = 1 # don't allow reverse edge to show up + else: + G.add_edges_from( + ((node, nbr) for node, nbrlist in d.items() for nbr in nbrlist) + ) + return G + + +def to_dict_of_dicts(G, nodelist=None, edge_data=None): + """Returns adjacency representation of graph as a dictionary of dictionaries. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list + Use only nodes specified in nodelist + + edge_data : scalar, optional + If provided, the value of the dictionary will be set to `edge_data` for + all edges. Usual values could be `1` or `True`. If `edge_data` is + `None` (the default), the edgedata in `G` is used, resulting in a + dict-of-dict-of-dicts. If `G` is a MultiGraph, the result will be a + dict-of-dict-of-dict-of-dicts. See Notes for an approach to customize + handling edge data. `edge_data` should *not* be a container. + + Returns + ------- + dod : dict + A nested dictionary representation of `G`. Note that the level of + nesting depends on the type of `G` and the value of `edge_data` + (see Examples). + + See Also + -------- + from_dict_of_dicts, to_dict_of_lists + + Notes + ----- + For a more custom approach to handling edge data, try:: + + dod = { + n: { + nbr: custom(n, nbr, dd) for nbr, dd in nbrdict.items() + } + for n, nbrdict in G.adj.items() + } + + where `custom` returns the desired edge data for each edge between `n` and + `nbr`, given existing edge data `dd`. + + Examples + -------- + >>> G = nx.path_graph(3) + >>> nx.to_dict_of_dicts(G) + {0: {1: {}}, 1: {0: {}, 2: {}}, 2: {1: {}}} + + Edge data is preserved by default (``edge_data=None``), resulting + in dict-of-dict-of-dicts where the innermost dictionary contains the + edge data: + + >>> G = nx.Graph() + >>> G.add_edges_from( + ... [ + ... (0, 1, {'weight': 1.0}), + ... (1, 2, {'weight': 2.0}), + ... (2, 0, {'weight': 1.0}), + ... ] + ... ) + >>> d = nx.to_dict_of_dicts(G) + >>> d # doctest: +SKIP + {0: {1: {'weight': 1.0}, 2: {'weight': 1.0}}, + 1: {0: {'weight': 1.0}, 2: {'weight': 2.0}}, + 2: {1: {'weight': 2.0}, 0: {'weight': 1.0}}} + >>> d[1][2]['weight'] + 2.0 + + If `edge_data` is not `None`, edge data in the original graph (if any) is + replaced: + + >>> d = nx.to_dict_of_dicts(G, edge_data=1) + >>> d + {0: {1: 1, 2: 1}, 1: {0: 1, 2: 1}, 2: {1: 1, 0: 1}} + >>> d[1][2] + 1 + + This also applies to MultiGraphs: edge data is preserved by default: + + >>> G = nx.MultiGraph() + >>> G.add_edge(0, 1, key='a', weight=1.0) + 'a' + >>> G.add_edge(0, 1, key='b', weight=5.0) + 'b' + >>> d = nx.to_dict_of_dicts(G) + >>> d # doctest: +SKIP + {0: {1: {'a': {'weight': 1.0}, 'b': {'weight': 5.0}}}, + 1: {0: {'a': {'weight': 1.0}, 'b': {'weight': 5.0}}}} + >>> d[0][1]['b']['weight'] + 5.0 + + But multi edge data is lost if `edge_data` is not `None`: + + >>> d = nx.to_dict_of_dicts(G, edge_data=10) + >>> d + {0: {1: 10}, 1: {0: 10}} + """ + dod = {} + if nodelist is None: + if edge_data is None: + for u, nbrdict in G.adjacency(): + dod[u] = nbrdict.copy() + else: # edge_data is not None + for u, nbrdict in G.adjacency(): + dod[u] = dod.fromkeys(nbrdict, edge_data) + else: # nodelist is not None + if edge_data is None: + for u in nodelist: + dod[u] = {} + for v, data in ((v, data) for v, data in G[u].items() if v in nodelist): + dod[u][v] = data + else: # nodelist and edge_data are not None + for u in nodelist: + dod[u] = {} + for v in (v for v in G[u] if v in nodelist): + dod[u][v] = edge_data + return dod + + +@nx._dispatch(graphs=None) +def from_dict_of_dicts(d, create_using=None, multigraph_input=False): + """Returns a graph from a dictionary of dictionaries. + + Parameters + ---------- + d : dictionary of dictionaries + A dictionary of dictionaries adjacency representation. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + multigraph_input : bool (default False) + When True, the dict `d` is assumed + to be a dict-of-dict-of-dict-of-dict structure keyed by + node to neighbor to edge keys to edge data for multi-edges. + Otherwise this routine assumes dict-of-dict-of-dict keyed by + node to neighbor to edge data. + + Examples + -------- + >>> dod = {0: {1: {"weight": 1}}} # single edge (0,1) + >>> G = nx.from_dict_of_dicts(dod) + + or + + >>> G = nx.Graph(dod) # use Graph constructor + + """ + G = nx.empty_graph(0, create_using) + G.add_nodes_from(d) + # does dict d represent a MultiGraph or MultiDiGraph? + if multigraph_input: + if G.is_directed(): + if G.is_multigraph(): + G.add_edges_from( + (u, v, key, data) + for u, nbrs in d.items() + for v, datadict in nbrs.items() + for key, data in datadict.items() + ) + else: + G.add_edges_from( + (u, v, data) + for u, nbrs in d.items() + for v, datadict in nbrs.items() + for key, data in datadict.items() + ) + else: # Undirected + if G.is_multigraph(): + seen = set() # don't add both directions of undirected graph + for u, nbrs in d.items(): + for v, datadict in nbrs.items(): + if (u, v) not in seen: + G.add_edges_from( + (u, v, key, data) for key, data in datadict.items() + ) + seen.add((v, u)) + else: + seen = set() # don't add both directions of undirected graph + for u, nbrs in d.items(): + for v, datadict in nbrs.items(): + if (u, v) not in seen: + G.add_edges_from( + (u, v, data) for key, data in datadict.items() + ) + seen.add((v, u)) + + else: # not a multigraph to multigraph transfer + if G.is_multigraph() and not G.is_directed(): + # d can have both representations u-v, v-u in dict. Only add one. + # We don't need this check for digraphs since we add both directions, + # or for Graph() since it is done implicitly (parallel edges not allowed) + seen = set() + for u, nbrs in d.items(): + for v, data in nbrs.items(): + if (u, v) not in seen: + G.add_edge(u, v, key=0) + G[u][v][0].update(data) + seen.add((v, u)) + else: + G.add_edges_from( + ((u, v, data) for u, nbrs in d.items() for v, data in nbrs.items()) + ) + return G + + +@nx._dispatch(preserve_edge_attrs=True) +def to_edgelist(G, nodelist=None): + """Returns a list of edges in the graph. + + Parameters + ---------- + G : graph + A NetworkX graph + + nodelist : list + Use only nodes specified in nodelist + + """ + if nodelist is None: + return G.edges(data=True) + return G.edges(nodelist, data=True) + + +@nx._dispatch(graphs=None) +def from_edgelist(edgelist, create_using=None): + """Returns a graph from a list of edges. + + Parameters + ---------- + edgelist : list or iterator + Edge tuples + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Examples + -------- + >>> edgelist = [(0, 1)] # single edge (0,1) + >>> G = nx.from_edgelist(edgelist) + + or + + >>> G = nx.Graph(edgelist) # use Graph constructor + + """ + G = nx.empty_graph(0, create_using) + G.add_edges_from(edgelist) + return G diff --git a/MLPY/Lib/site-packages/networkx/convert_matrix.py b/MLPY/Lib/site-packages/networkx/convert_matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..a8496e7cb4f91c944ce5af94ff36e1637a7affd9 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/convert_matrix.py @@ -0,0 +1,1200 @@ +"""Functions to convert NetworkX graphs to and from common data containers +like numpy arrays, scipy sparse arrays, and pandas DataFrames. + +The preferred way of converting data to a NetworkX graph is through the +graph constructor. The constructor calls the `~networkx.convert.to_networkx_graph` +function which attempts to guess the input type and convert it automatically. + +Examples +-------- +Create a 10 node random graph from a numpy array + +>>> import numpy as np +>>> rng = np.random.default_rng() +>>> a = rng.integers(low=0, high=2, size=(10, 10)) +>>> DG = nx.from_numpy_array(a, create_using=nx.DiGraph) + +or equivalently: + +>>> DG = nx.DiGraph(a) + +which calls `from_numpy_array` internally based on the type of ``a``. + +See Also +-------- +nx_agraph, nx_pydot +""" + +import itertools +from collections import defaultdict + +import networkx as nx +from networkx.utils import not_implemented_for + +__all__ = [ + "from_pandas_adjacency", + "to_pandas_adjacency", + "from_pandas_edgelist", + "to_pandas_edgelist", + "from_scipy_sparse_array", + "to_scipy_sparse_array", + "from_numpy_array", + "to_numpy_array", +] + + +@nx._dispatch(edge_attrs="weight") +def to_pandas_adjacency( + G, + nodelist=None, + dtype=None, + order=None, + multigraph_weight=sum, + weight="weight", + nonedge=0.0, +): + """Returns the graph adjacency matrix as a Pandas DataFrame. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the Pandas DataFrame. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is None, then the ordering is produced by G.nodes(). + + multigraph_weight : {sum, min, max}, optional + An operator that determines how weights in multigraphs are handled. + The default is to sum the weights of the multiple edges. + + weight : string or None, optional + The edge attribute that holds the numerical value used for + the edge weight. If an edge does not have that attribute, then the + value 1 is used instead. + + nonedge : float, optional + The matrix values corresponding to nonedges are typically set to zero. + However, this could be undesirable if there are matrix values + corresponding to actual edges that also have the value zero. If so, + one might prefer nonedges to have some other value, such as nan. + + Returns + ------- + df : Pandas DataFrame + Graph adjacency matrix + + Notes + ----- + For directed graphs, entry i,j corresponds to an edge from i to j. + + The DataFrame entries are assigned to the weight edge attribute. When + an edge does not have a weight attribute, the value of the entry is set to + the number 1. For multiple (parallel) edges, the values of the entries + are determined by the 'multigraph_weight' parameter. The default is to + sum the weight attributes for each of the parallel edges. + + When `nodelist` does not contain every node in `G`, the matrix is built + from the subgraph of `G` that is induced by the nodes in `nodelist`. + + The convention used for self-loop edges in graphs is to assign the + diagonal matrix entry value to the weight attribute of the edge + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting Pandas DataFrame can be modified as follows: + + >>> import pandas as pd + >>> pd.options.display.max_columns = 20 + >>> import numpy as np + >>> G = nx.Graph([(1, 1)]) + >>> df = nx.to_pandas_adjacency(G, dtype=int) + >>> df + 1 + 1 1 + >>> df.values[np.diag_indices_from(df)] *= 2 + >>> df + 1 + 1 2 + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0, 1, weight=2) + 0 + >>> G.add_edge(1, 0) + 0 + >>> G.add_edge(2, 2, weight=3) + 0 + >>> G.add_edge(2, 2) + 1 + >>> nx.to_pandas_adjacency(G, nodelist=[0, 1, 2], dtype=int) + 0 1 2 + 0 0 2 0 + 1 1 0 0 + 2 0 0 4 + + """ + import pandas as pd + + M = to_numpy_array( + G, + nodelist=nodelist, + dtype=dtype, + order=order, + multigraph_weight=multigraph_weight, + weight=weight, + nonedge=nonedge, + ) + if nodelist is None: + nodelist = list(G) + return pd.DataFrame(data=M, index=nodelist, columns=nodelist) + + +@nx._dispatch(graphs=None) +def from_pandas_adjacency(df, create_using=None): + r"""Returns a graph from Pandas DataFrame. + + The Pandas DataFrame is interpreted as an adjacency matrix for the graph. + + Parameters + ---------- + df : Pandas DataFrame + An adjacency matrix representation of a graph + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + Notes + ----- + For directed graphs, explicitly mention create_using=nx.DiGraph, + and entry i,j of df corresponds to an edge from i to j. + + If `df` has a single data type for each entry it will be converted to an + appropriate Python data type. + + If you have node attributes stored in a separate dataframe `df_nodes`, + you can load those attributes to the graph `G` using the following code: + + ``` + df_nodes = pd.DataFrame({"node_id": [1, 2, 3], "attribute1": ["A", "B", "C"]}) + G.add_nodes_from((n, dict(d)) for n, d in df_nodes.iterrows()) + ``` + + If `df` has a user-specified compound data type the names + of the data fields will be used as attribute keys in the resulting + NetworkX graph. + + See Also + -------- + to_pandas_adjacency + + Examples + -------- + Simple integer weights on edges: + + >>> import pandas as pd + >>> pd.options.display.max_columns = 20 + >>> df = pd.DataFrame([[1, 1], [2, 1]]) + >>> df + 0 1 + 0 1 1 + 1 2 1 + >>> G = nx.from_pandas_adjacency(df) + >>> G.name = "Graph from pandas adjacency matrix" + >>> print(G) + Graph named 'Graph from pandas adjacency matrix' with 2 nodes and 3 edges + """ + + try: + df = df[df.index] + except Exception as err: + missing = list(set(df.index).difference(set(df.columns))) + msg = f"{missing} not in columns" + raise nx.NetworkXError("Columns must match Indices.", msg) from err + + A = df.values + G = from_numpy_array(A, create_using=create_using) + + nx.relabel.relabel_nodes(G, dict(enumerate(df.columns)), copy=False) + return G + + +@nx._dispatch(preserve_edge_attrs=True) +def to_pandas_edgelist( + G, + source="source", + target="target", + nodelist=None, + dtype=None, + edge_key=None, +): + """Returns the graph edge list as a Pandas DataFrame. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the Pandas DataFrame. + + source : str or int, optional + A valid column name (string or integer) for the source nodes (for the + directed case). + + target : str or int, optional + A valid column name (string or integer) for the target nodes (for the + directed case). + + nodelist : list, optional + Use only nodes specified in nodelist + + dtype : dtype, default None + Use to create the DataFrame. Data type to force. + Only a single dtype is allowed. If None, infer. + + edge_key : str or int or None, optional (default=None) + A valid column name (string or integer) for the edge keys (for the + multigraph case). If None, edge keys are not stored in the DataFrame. + + Returns + ------- + df : Pandas DataFrame + Graph edge list + + Examples + -------- + >>> G = nx.Graph( + ... [ + ... ("A", "B", {"cost": 1, "weight": 7}), + ... ("C", "E", {"cost": 9, "weight": 10}), + ... ] + ... ) + >>> df = nx.to_pandas_edgelist(G, nodelist=["A", "C"]) + >>> df[["source", "target", "cost", "weight"]] + source target cost weight + 0 A B 1 7 + 1 C E 9 10 + + >>> G = nx.MultiGraph([('A', 'B', {'cost': 1}), ('A', 'B', {'cost': 9})]) + >>> df = nx.to_pandas_edgelist(G, nodelist=['A', 'C'], edge_key='ekey') + >>> df[['source', 'target', 'cost', 'ekey']] + source target cost ekey + 0 A B 1 0 + 1 A B 9 1 + + """ + import pandas as pd + + if nodelist is None: + edgelist = G.edges(data=True) + else: + edgelist = G.edges(nodelist, data=True) + source_nodes = [s for s, _, _ in edgelist] + target_nodes = [t for _, t, _ in edgelist] + + all_attrs = set().union(*(d.keys() for _, _, d in edgelist)) + if source in all_attrs: + raise nx.NetworkXError(f"Source name {source!r} is an edge attr name") + if target in all_attrs: + raise nx.NetworkXError(f"Target name {target!r} is an edge attr name") + + nan = float("nan") + edge_attr = {k: [d.get(k, nan) for _, _, d in edgelist] for k in all_attrs} + + if G.is_multigraph() and edge_key is not None: + if edge_key in all_attrs: + raise nx.NetworkXError(f"Edge key name {edge_key!r} is an edge attr name") + edge_keys = [k for _, _, k in G.edges(keys=True)] + edgelistdict = {source: source_nodes, target: target_nodes, edge_key: edge_keys} + else: + edgelistdict = {source: source_nodes, target: target_nodes} + + edgelistdict.update(edge_attr) + return pd.DataFrame(edgelistdict, dtype=dtype) + + +@nx._dispatch(graphs=None) +def from_pandas_edgelist( + df, + source="source", + target="target", + edge_attr=None, + create_using=None, + edge_key=None, +): + """Returns a graph from Pandas DataFrame containing an edge list. + + The Pandas DataFrame should contain at least two columns of node names and + zero or more columns of edge attributes. Each row will be processed as one + edge instance. + + Note: This function iterates over DataFrame.values, which is not + guaranteed to retain the data type across columns in the row. This is only + a problem if your row is entirely numeric and a mix of ints and floats. In + that case, all values will be returned as floats. See the + DataFrame.iterrows documentation for an example. + + Parameters + ---------- + df : Pandas DataFrame + An edge list representation of a graph + + source : str or int + A valid column name (string or integer) for the source nodes (for the + directed case). + + target : str or int + A valid column name (string or integer) for the target nodes (for the + directed case). + + edge_attr : str or int, iterable, True, or None + A valid column name (str or int) or iterable of column names that are + used to retrieve items and add them to the graph as edge attributes. + If `True`, all of the remaining columns will be added. + If `None`, no edge attributes are added to the graph. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + edge_key : str or None, optional (default=None) + A valid column name for the edge keys (for a MultiGraph). The values in + this column are used for the edge keys when adding edges if create_using + is a multigraph. + + If you have node attributes stored in a separate dataframe `df_nodes`, + you can load those attributes to the graph `G` using the following code: + + ``` + df_nodes = pd.DataFrame({"node_id": [1, 2, 3], "attribute1": ["A", "B", "C"]}) + G.add_nodes_from((n, dict(d)) for n, d in df_nodes.iterrows()) + ``` + + See Also + -------- + to_pandas_edgelist + + Examples + -------- + Simple integer weights on edges: + + >>> import pandas as pd + >>> pd.options.display.max_columns = 20 + >>> import numpy as np + >>> rng = np.random.RandomState(seed=5) + >>> ints = rng.randint(1, 11, size=(3, 2)) + >>> a = ["A", "B", "C"] + >>> b = ["D", "A", "E"] + >>> df = pd.DataFrame(ints, columns=["weight", "cost"]) + >>> df[0] = a + >>> df["b"] = b + >>> df[["weight", "cost", 0, "b"]] + weight cost 0 b + 0 4 7 A D + 1 7 1 B A + 2 10 9 C E + >>> G = nx.from_pandas_edgelist(df, 0, "b", ["weight", "cost"]) + >>> G["E"]["C"]["weight"] + 10 + >>> G["E"]["C"]["cost"] + 9 + >>> edges = pd.DataFrame( + ... { + ... "source": [0, 1, 2], + ... "target": [2, 2, 3], + ... "weight": [3, 4, 5], + ... "color": ["red", "blue", "blue"], + ... } + ... ) + >>> G = nx.from_pandas_edgelist(edges, edge_attr=True) + >>> G[0][2]["color"] + 'red' + + Build multigraph with custom keys: + + >>> edges = pd.DataFrame( + ... { + ... "source": [0, 1, 2, 0], + ... "target": [2, 2, 3, 2], + ... "my_edge_key": ["A", "B", "C", "D"], + ... "weight": [3, 4, 5, 6], + ... "color": ["red", "blue", "blue", "blue"], + ... } + ... ) + >>> G = nx.from_pandas_edgelist( + ... edges, + ... edge_key="my_edge_key", + ... edge_attr=["weight", "color"], + ... create_using=nx.MultiGraph(), + ... ) + >>> G[0][2] + AtlasView({'A': {'weight': 3, 'color': 'red'}, 'D': {'weight': 6, 'color': 'blue'}}) + + + """ + g = nx.empty_graph(0, create_using) + + if edge_attr is None: + g.add_edges_from(zip(df[source], df[target])) + return g + + reserved_columns = [source, target] + + # Additional columns requested + attr_col_headings = [] + attribute_data = [] + if edge_attr is True: + attr_col_headings = [c for c in df.columns if c not in reserved_columns] + elif isinstance(edge_attr, (list, tuple)): + attr_col_headings = edge_attr + else: + attr_col_headings = [edge_attr] + if len(attr_col_headings) == 0: + raise nx.NetworkXError( + f"Invalid edge_attr argument: No columns found with name: {attr_col_headings}" + ) + + try: + attribute_data = zip(*[df[col] for col in attr_col_headings]) + except (KeyError, TypeError) as err: + msg = f"Invalid edge_attr argument: {edge_attr}" + raise nx.NetworkXError(msg) from err + + if g.is_multigraph(): + # => append the edge keys from the df to the bundled data + if edge_key is not None: + try: + multigraph_edge_keys = df[edge_key] + attribute_data = zip(attribute_data, multigraph_edge_keys) + except (KeyError, TypeError) as err: + msg = f"Invalid edge_key argument: {edge_key}" + raise nx.NetworkXError(msg) from err + + for s, t, attrs in zip(df[source], df[target], attribute_data): + if edge_key is not None: + attrs, multigraph_edge_key = attrs + key = g.add_edge(s, t, key=multigraph_edge_key) + else: + key = g.add_edge(s, t) + + g[s][t][key].update(zip(attr_col_headings, attrs)) + else: + for s, t, attrs in zip(df[source], df[target], attribute_data): + g.add_edge(s, t) + g[s][t].update(zip(attr_col_headings, attrs)) + + return g + + +@nx._dispatch(edge_attrs="weight") +def to_scipy_sparse_array(G, nodelist=None, dtype=None, weight="weight", format="csr"): + """Returns the graph adjacency matrix as a SciPy sparse array. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the sparse matrix. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is None, then the ordering is produced by G.nodes(). + + dtype : NumPy data-type, optional + A valid NumPy dtype used to initialize the array. If None, then the + NumPy default is used. + + weight : string or None optional (default='weight') + The edge attribute that holds the numerical value used for + the edge weight. If None then all edge weights are 1. + + format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'} + The type of the matrix to be returned (default 'csr'). For + some algorithms different implementations of sparse matrices + can perform better. See [1]_ for details. + + Returns + ------- + A : SciPy sparse array + Graph adjacency matrix. + + Notes + ----- + For directed graphs, matrix entry i,j corresponds to an edge from i to j. + + The matrix entries are populated using the edge attribute held in + parameter weight. When an edge does not have that attribute, the + value of the entry is 1. + + For multiple edges the matrix values are the sums of the edge weights. + + When `nodelist` does not contain every node in `G`, the adjacency matrix + is built from the subgraph of `G` that is induced by the nodes in + `nodelist`. + + The convention used for self-loop edges in graphs is to assign the + diagonal matrix entry value to the weight attribute of the edge + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting SciPy sparse array can be modified as follows: + + >>> G = nx.Graph([(1, 1)]) + >>> A = nx.to_scipy_sparse_array(G) + >>> print(A.todense()) + [[1]] + >>> A.setdiag(A.diagonal() * 2) + >>> print(A.toarray()) + [[2]] + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0, 1, weight=2) + 0 + >>> G.add_edge(1, 0) + 0 + >>> G.add_edge(2, 2, weight=3) + 0 + >>> G.add_edge(2, 2) + 1 + >>> S = nx.to_scipy_sparse_array(G, nodelist=[0, 1, 2]) + >>> print(S.toarray()) + [[0 2 0] + [1 0 0] + [0 0 4]] + + References + ---------- + .. [1] Scipy Dev. References, "Sparse Matrices", + https://docs.scipy.org/doc/scipy/reference/sparse.html + """ + import scipy as sp + + if len(G) == 0: + raise nx.NetworkXError("Graph has no nodes or edges") + + if nodelist is None: + nodelist = list(G) + nlen = len(G) + else: + nlen = len(nodelist) + if nlen == 0: + raise nx.NetworkXError("nodelist has no nodes") + nodeset = set(G.nbunch_iter(nodelist)) + if nlen != len(nodeset): + for n in nodelist: + if n not in G: + raise nx.NetworkXError(f"Node {n} in nodelist is not in G") + raise nx.NetworkXError("nodelist contains duplicates.") + if nlen < len(G): + G = G.subgraph(nodelist) + + index = dict(zip(nodelist, range(nlen))) + coefficients = zip( + *((index[u], index[v], wt) for u, v, wt in G.edges(data=weight, default=1)) + ) + try: + row, col, data = coefficients + except ValueError: + # there is no edge in the subgraph + row, col, data = [], [], [] + + if G.is_directed(): + A = sp.sparse.coo_array((data, (row, col)), shape=(nlen, nlen), dtype=dtype) + else: + # symmetrize matrix + d = data + data + r = row + col + c = col + row + # selfloop entries get double counted when symmetrizing + # so we subtract the data on the diagonal + selfloops = list(nx.selfloop_edges(G, data=weight, default=1)) + if selfloops: + diag_index, diag_data = zip(*((index[u], -wt) for u, v, wt in selfloops)) + d += diag_data + r += diag_index + c += diag_index + A = sp.sparse.coo_array((d, (r, c)), shape=(nlen, nlen), dtype=dtype) + try: + return A.asformat(format) + except ValueError as err: + raise nx.NetworkXError(f"Unknown sparse matrix format: {format}") from err + + +def _csr_gen_triples(A): + """Converts a SciPy sparse array in **Compressed Sparse Row** format to + an iterable of weighted edge triples. + + """ + nrows = A.shape[0] + data, indices, indptr = A.data, A.indices, A.indptr + for i in range(nrows): + for j in range(indptr[i], indptr[i + 1]): + yield i, int(indices[j]), data[j] + + +def _csc_gen_triples(A): + """Converts a SciPy sparse array in **Compressed Sparse Column** format to + an iterable of weighted edge triples. + + """ + ncols = A.shape[1] + data, indices, indptr = A.data, A.indices, A.indptr + for i in range(ncols): + for j in range(indptr[i], indptr[i + 1]): + yield int(indices[j]), i, data[j] + + +def _coo_gen_triples(A): + """Converts a SciPy sparse array in **Coordinate** format to an iterable + of weighted edge triples. + + """ + return ((int(i), int(j), d) for i, j, d in zip(A.row, A.col, A.data)) + + +def _dok_gen_triples(A): + """Converts a SciPy sparse array in **Dictionary of Keys** format to an + iterable of weighted edge triples. + + """ + for (r, c), v in A.items(): + yield r, c, v + + +def _generate_weighted_edges(A): + """Returns an iterable over (u, v, w) triples, where u and v are adjacent + vertices and w is the weight of the edge joining u and v. + + `A` is a SciPy sparse array (in any format). + + """ + if A.format == "csr": + return _csr_gen_triples(A) + if A.format == "csc": + return _csc_gen_triples(A) + if A.format == "dok": + return _dok_gen_triples(A) + # If A is in any other format (including COO), convert it to COO format. + return _coo_gen_triples(A.tocoo()) + + +@nx._dispatch(graphs=None) +def from_scipy_sparse_array( + A, parallel_edges=False, create_using=None, edge_attribute="weight" +): + """Creates a new graph from an adjacency matrix given as a SciPy sparse + array. + + Parameters + ---------- + A: scipy.sparse array + An adjacency matrix representation of a graph + + parallel_edges : Boolean + If this is True, `create_using` is a multigraph, and `A` is an + integer matrix, then entry *(i, j)* in the matrix is interpreted as the + number of parallel edges joining vertices *i* and *j* in the graph. + If it is False, then the entries in the matrix are interpreted as + the weight of a single edge joining the vertices. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + edge_attribute: string + Name of edge attribute to store matrix numeric value. The data will + have the same type as the matrix entry (int, float, (real,imag)). + + Notes + ----- + For directed graphs, explicitly mention create_using=nx.DiGraph, + and entry i,j of A corresponds to an edge from i to j. + + If `create_using` is :class:`networkx.MultiGraph` or + :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the + entries of `A` are of type :class:`int`, then this function returns a + multigraph (constructed from `create_using`) with parallel edges. + In this case, `edge_attribute` will be ignored. + + If `create_using` indicates an undirected multigraph, then only the edges + indicated by the upper triangle of the matrix `A` will be added to the + graph. + + Examples + -------- + >>> import scipy as sp + >>> A = sp.sparse.eye(2, 2, 1) + >>> G = nx.from_scipy_sparse_array(A) + + If `create_using` indicates a multigraph and the matrix has only integer + entries and `parallel_edges` is False, then the entries will be treated + as weights for edges joining the nodes (without creating parallel edges): + + >>> A = sp.sparse.csr_array([[1, 1], [1, 2]]) + >>> G = nx.from_scipy_sparse_array(A, create_using=nx.MultiGraph) + >>> G[1][1] + AtlasView({0: {'weight': 2}}) + + If `create_using` indicates a multigraph and the matrix has only integer + entries and `parallel_edges` is True, then the entries will be treated + as the number of parallel edges joining those two vertices: + + >>> A = sp.sparse.csr_array([[1, 1], [1, 2]]) + >>> G = nx.from_scipy_sparse_array( + ... A, parallel_edges=True, create_using=nx.MultiGraph + ... ) + >>> G[1][1] + AtlasView({0: {'weight': 1}, 1: {'weight': 1}}) + + """ + G = nx.empty_graph(0, create_using) + n, m = A.shape + if n != m: + raise nx.NetworkXError(f"Adjacency matrix not square: nx,ny={A.shape}") + # Make sure we get even the isolated nodes of the graph. + G.add_nodes_from(range(n)) + # Create an iterable over (u, v, w) triples and for each triple, add an + # edge from u to v with weight w. + triples = _generate_weighted_edges(A) + # If the entries in the adjacency matrix are integers, the graph is a + # multigraph, and parallel_edges is True, then create parallel edges, each + # with weight 1, for each entry in the adjacency matrix. Otherwise, create + # one edge for each positive entry in the adjacency matrix and set the + # weight of that edge to be the entry in the matrix. + if A.dtype.kind in ("i", "u") and G.is_multigraph() and parallel_edges: + chain = itertools.chain.from_iterable + # The following line is equivalent to: + # + # for (u, v) in edges: + # for d in range(A[u, v]): + # G.add_edge(u, v, weight=1) + # + triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples) + # If we are creating an undirected multigraph, only add the edges from the + # upper triangle of the matrix. Otherwise, add all the edges. This relies + # on the fact that the vertices created in the + # `_generated_weighted_edges()` function are actually the row/column + # indices for the matrix `A`. + # + # Without this check, we run into a problem where each edge is added twice + # when `G.add_weighted_edges_from()` is invoked below. + if G.is_multigraph() and not G.is_directed(): + triples = ((u, v, d) for u, v, d in triples if u <= v) + G.add_weighted_edges_from(triples, weight=edge_attribute) + return G + + +@nx._dispatch(edge_attrs="weight") # edge attrs may also be obtained from `dtype` +def to_numpy_array( + G, + nodelist=None, + dtype=None, + order=None, + multigraph_weight=sum, + weight="weight", + nonedge=0.0, +): + """Returns the graph adjacency matrix as a NumPy array. + + Parameters + ---------- + G : graph + The NetworkX graph used to construct the NumPy array. + + nodelist : list, optional + The rows and columns are ordered according to the nodes in `nodelist`. + If `nodelist` is ``None``, then the ordering is produced by ``G.nodes()``. + + dtype : NumPy data type, optional + A NumPy data type used to initialize the array. If None, then the NumPy + default is used. The dtype can be structured if `weight=None`, in which + case the dtype field names are used to look up edge attributes. The + result is a structured array where each named field in the dtype + corresponds to the adjacency for that edge attribute. See examples for + details. + + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. If None, then the NumPy default + is used. + + multigraph_weight : callable, optional + An function that determines how weights in multigraphs are handled. + The function should accept a sequence of weights and return a single + value. The default is to sum the weights of the multiple edges. + + weight : string or None optional (default = 'weight') + The edge attribute that holds the numerical value used for + the edge weight. If an edge does not have that attribute, then the + value 1 is used instead. `weight` must be ``None`` if a structured + dtype is used. + + nonedge : array_like (default = 0.0) + The value used to represent non-edges in the adjacency matrix. + The array values corresponding to nonedges are typically set to zero. + However, this could be undesirable if there are array values + corresponding to actual edges that also have the value zero. If so, + one might prefer nonedges to have some other value, such as ``nan``. + + Returns + ------- + A : NumPy ndarray + Graph adjacency matrix + + Raises + ------ + NetworkXError + If `dtype` is a structured dtype and `G` is a multigraph + ValueError + If `dtype` is a structured dtype and `weight` is not `None` + + See Also + -------- + from_numpy_array + + Notes + ----- + For directed graphs, entry ``i, j`` corresponds to an edge from ``i`` to ``j``. + + Entries in the adjacency matrix are given by the `weight` edge attribute. + When an edge does not have a weight attribute, the value of the entry is + set to the number 1. For multiple (parallel) edges, the values of the + entries are determined by the `multigraph_weight` parameter. The default is + to sum the weight attributes for each of the parallel edges. + + When `nodelist` does not contain every node in `G`, the adjacency matrix is + built from the subgraph of `G` that is induced by the nodes in `nodelist`. + + The convention used for self-loop edges in graphs is to assign the + diagonal array entry value to the weight attribute of the edge + (or the number 1 if the edge has no weight attribute). If the + alternate convention of doubling the edge weight is desired the + resulting NumPy array can be modified as follows: + + >>> import numpy as np + >>> G = nx.Graph([(1, 1)]) + >>> A = nx.to_numpy_array(G) + >>> A + array([[1.]]) + >>> A[np.diag_indices_from(A)] *= 2 + >>> A + array([[2.]]) + + Examples + -------- + >>> G = nx.MultiDiGraph() + >>> G.add_edge(0, 1, weight=2) + 0 + >>> G.add_edge(1, 0) + 0 + >>> G.add_edge(2, 2, weight=3) + 0 + >>> G.add_edge(2, 2) + 1 + >>> nx.to_numpy_array(G, nodelist=[0, 1, 2]) + array([[0., 2., 0.], + [1., 0., 0.], + [0., 0., 4.]]) + + When `nodelist` argument is used, nodes of `G` which do not appear in the `nodelist` + and their edges are not included in the adjacency matrix. Here is an example: + + >>> G = nx.Graph() + >>> G.add_edge(3, 1) + >>> G.add_edge(2, 0) + >>> G.add_edge(2, 1) + >>> G.add_edge(3, 0) + >>> nx.to_numpy_array(G, nodelist=[1, 2, 3]) + array([[0., 1., 1.], + [1., 0., 0.], + [1., 0., 0.]]) + + This function can also be used to create adjacency matrices for multiple + edge attributes with structured dtypes: + + >>> G = nx.Graph() + >>> G.add_edge(0, 1, weight=10) + >>> G.add_edge(1, 2, cost=5) + >>> G.add_edge(2, 3, weight=3, cost=-4.0) + >>> dtype = np.dtype([("weight", int), ("cost", float)]) + >>> A = nx.to_numpy_array(G, dtype=dtype, weight=None) + >>> A["weight"] + array([[ 0, 10, 0, 0], + [10, 0, 1, 0], + [ 0, 1, 0, 3], + [ 0, 0, 3, 0]]) + >>> A["cost"] + array([[ 0., 1., 0., 0.], + [ 1., 0., 5., 0.], + [ 0., 5., 0., -4.], + [ 0., 0., -4., 0.]]) + + As stated above, the argument "nonedge" is useful especially when there are + actually edges with weight 0 in the graph. Setting a nonedge value different than 0, + makes it much clearer to differentiate such 0-weighted edges and actual nonedge values. + + >>> G = nx.Graph() + >>> G.add_edge(3, 1, weight=2) + >>> G.add_edge(2, 0, weight=0) + >>> G.add_edge(2, 1, weight=0) + >>> G.add_edge(3, 0, weight=1) + >>> nx.to_numpy_array(G, nonedge=-1.) + array([[-1., 2., -1., 1.], + [ 2., -1., 0., -1.], + [-1., 0., -1., 0.], + [ 1., -1., 0., -1.]]) + """ + import numpy as np + + if nodelist is None: + nodelist = list(G) + nlen = len(nodelist) + + # Input validation + nodeset = set(nodelist) + if nodeset - set(G): + raise nx.NetworkXError(f"Nodes {nodeset - set(G)} in nodelist is not in G") + if len(nodeset) < nlen: + raise nx.NetworkXError("nodelist contains duplicates.") + + A = np.full((nlen, nlen), fill_value=nonedge, dtype=dtype, order=order) + + # Corner cases: empty nodelist or graph without any edges + if nlen == 0 or G.number_of_edges() == 0: + return A + + # If dtype is structured and weight is None, use dtype field names as + # edge attributes + edge_attrs = None # Only single edge attribute by default + if A.dtype.names: + if weight is None: + edge_attrs = dtype.names + else: + raise ValueError( + "Specifying `weight` not supported for structured dtypes\n." + "To create adjacency matrices from structured dtypes, use `weight=None`." + ) + + # Map nodes to row/col in matrix + idx = dict(zip(nodelist, range(nlen))) + if len(nodelist) < len(G): + G = G.subgraph(nodelist).copy() + + # Collect all edge weights and reduce with `multigraph_weights` + if G.is_multigraph(): + if edge_attrs: + raise nx.NetworkXError( + "Structured arrays are not supported for MultiGraphs" + ) + d = defaultdict(list) + for u, v, wt in G.edges(data=weight, default=1.0): + d[(idx[u], idx[v])].append(wt) + i, j = np.array(list(d.keys())).T # indices + wts = [multigraph_weight(ws) for ws in d.values()] # reduced weights + else: + i, j, wts = [], [], [] + + # Special branch: multi-attr adjacency from structured dtypes + if edge_attrs: + # Extract edges with all data + for u, v, data in G.edges(data=True): + i.append(idx[u]) + j.append(idx[v]) + wts.append(data) + # Map each attribute to the appropriate named field in the + # structured dtype + for attr in edge_attrs: + attr_data = [wt.get(attr, 1.0) for wt in wts] + A[attr][i, j] = attr_data + if not G.is_directed(): + A[attr][j, i] = attr_data + return A + + for u, v, wt in G.edges(data=weight, default=1.0): + i.append(idx[u]) + j.append(idx[v]) + wts.append(wt) + + # Set array values with advanced indexing + A[i, j] = wts + if not G.is_directed(): + A[j, i] = wts + + return A + + +@nx._dispatch(graphs=None) +def from_numpy_array(A, parallel_edges=False, create_using=None, edge_attr="weight"): + """Returns a graph from a 2D NumPy array. + + The 2D NumPy array is interpreted as an adjacency matrix for the graph. + + Parameters + ---------- + A : a 2D numpy.ndarray + An adjacency matrix representation of a graph + + parallel_edges : Boolean + If this is True, `create_using` is a multigraph, and `A` is an + integer array, then entry *(i, j)* in the array is interpreted as the + number of parallel edges joining vertices *i* and *j* in the graph. + If it is False, then the entries in the array are interpreted as + the weight of a single edge joining the vertices. + + create_using : NetworkX graph constructor, optional (default=nx.Graph) + Graph type to create. If graph instance, then cleared before populated. + + edge_attr : String, optional (default="weight") + The attribute to which the array values are assigned on each edge. If + it is None, edge attributes will not be assigned. + + Notes + ----- + For directed graphs, explicitly mention create_using=nx.DiGraph, + and entry i,j of A corresponds to an edge from i to j. + + If `create_using` is :class:`networkx.MultiGraph` or + :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the + entries of `A` are of type :class:`int`, then this function returns a + multigraph (of the same type as `create_using`) with parallel edges. + + If `create_using` indicates an undirected multigraph, then only the edges + indicated by the upper triangle of the array `A` will be added to the + graph. + + If `edge_attr` is Falsy (False or None), edge attributes will not be + assigned, and the array data will be treated like a binary mask of + edge presence or absence. Otherwise, the attributes will be assigned + as follows: + + If the NumPy array has a single data type for each array entry it + will be converted to an appropriate Python data type. + + If the NumPy array has a user-specified compound data type the names + of the data fields will be used as attribute keys in the resulting + NetworkX graph. + + See Also + -------- + to_numpy_array + + Examples + -------- + Simple integer weights on edges: + + >>> import numpy as np + >>> A = np.array([[1, 1], [2, 1]]) + >>> G = nx.from_numpy_array(A) + >>> G.edges(data=True) + EdgeDataView([(0, 0, {'weight': 1}), (0, 1, {'weight': 2}), (1, 1, {'weight': 1})]) + + If `create_using` indicates a multigraph and the array has only integer + entries and `parallel_edges` is False, then the entries will be treated + as weights for edges joining the nodes (without creating parallel edges): + + >>> A = np.array([[1, 1], [1, 2]]) + >>> G = nx.from_numpy_array(A, create_using=nx.MultiGraph) + >>> G[1][1] + AtlasView({0: {'weight': 2}}) + + If `create_using` indicates a multigraph and the array has only integer + entries and `parallel_edges` is True, then the entries will be treated + as the number of parallel edges joining those two vertices: + + >>> A = np.array([[1, 1], [1, 2]]) + >>> temp = nx.MultiGraph() + >>> G = nx.from_numpy_array(A, parallel_edges=True, create_using=temp) + >>> G[1][1] + AtlasView({0: {'weight': 1}, 1: {'weight': 1}}) + + User defined compound data type on edges: + + >>> dt = [("weight", float), ("cost", int)] + >>> A = np.array([[(1.0, 2)]], dtype=dt) + >>> G = nx.from_numpy_array(A) + >>> G.edges() + EdgeView([(0, 0)]) + >>> G[0][0]["cost"] + 2 + >>> G[0][0]["weight"] + 1.0 + + """ + kind_to_python_type = { + "f": float, + "i": int, + "u": int, + "b": bool, + "c": complex, + "S": str, + "U": str, + "V": "void", + } + G = nx.empty_graph(0, create_using) + if A.ndim != 2: + raise nx.NetworkXError(f"Input array must be 2D, not {A.ndim}") + n, m = A.shape + if n != m: + raise nx.NetworkXError(f"Adjacency matrix not square: nx,ny={A.shape}") + dt = A.dtype + try: + python_type = kind_to_python_type[dt.kind] + except Exception as err: + raise TypeError(f"Unknown numpy data type: {dt}") from err + + # Make sure we get even the isolated nodes of the graph. + G.add_nodes_from(range(n)) + # Get a list of all the entries in the array with nonzero entries. These + # coordinates become edges in the graph. (convert to int from np.int64) + edges = ((int(e[0]), int(e[1])) for e in zip(*A.nonzero())) + # handle numpy constructed data type + if python_type == "void": + # Sort the fields by their offset, then by dtype, then by name. + fields = sorted( + (offset, dtype, name) for name, (dtype, offset) in A.dtype.fields.items() + ) + triples = ( + ( + u, + v, + {} + if edge_attr in [False, None] + else { + name: kind_to_python_type[dtype.kind](val) + for (_, dtype, name), val in zip(fields, A[u, v]) + }, + ) + for u, v in edges + ) + # If the entries in the adjacency matrix are integers, the graph is a + # multigraph, and parallel_edges is True, then create parallel edges, each + # with weight 1, for each entry in the adjacency matrix. Otherwise, create + # one edge for each positive entry in the adjacency matrix and set the + # weight of that edge to be the entry in the matrix. + elif python_type is int and G.is_multigraph() and parallel_edges: + chain = itertools.chain.from_iterable + # The following line is equivalent to: + # + # for (u, v) in edges: + # for d in range(A[u, v]): + # G.add_edge(u, v, weight=1) + # + if edge_attr in [False, None]: + triples = chain(((u, v, {}) for d in range(A[u, v])) for (u, v) in edges) + else: + triples = chain( + ((u, v, {edge_attr: 1}) for d in range(A[u, v])) for (u, v) in edges + ) + else: # basic data type + if edge_attr in [False, None]: + triples = ((u, v, {}) for u, v in edges) + else: + triples = ((u, v, {edge_attr: python_type(A[u, v])}) for u, v in edges) + # If we are creating an undirected multigraph, only add the edges from the + # upper triangle of the matrix. Otherwise, add all the edges. This relies + # on the fact that the vertices created in the + # `_generated_weighted_edges()` function are actually the row/column + # indices for the matrix `A`. + # + # Without this check, we run into a problem where each edge is added twice + # when `G.add_edges_from()` is invoked below. + if G.is_multigraph() and not G.is_directed(): + triples = ((u, v, d) for u, v, d in triples if u <= v) + G.add_edges_from(triples) + return G diff --git a/MLPY/Lib/site-packages/networkx/drawing/__init__.py b/MLPY/Lib/site-packages/networkx/drawing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0f53309d4da23a445bcce8cb7570a6de364452b5 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/drawing/__init__.py @@ -0,0 +1,7 @@ +# graph drawing and interface to graphviz + +from .layout import * +from .nx_latex import * +from .nx_pylab import * +from . import nx_agraph +from . import nx_pydot diff --git a/MLPY/Lib/site-packages/networkx/drawing/__pycache__/__init__.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/drawing/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbfae3b06e050f4b40bc60e56636ff02d02614b7 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/drawing/__pycache__/__init__.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/drawing/__pycache__/layout.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/drawing/__pycache__/layout.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d00ecba50da2e42633b819970309ee971cec5ab1 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/drawing/__pycache__/layout.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/drawing/__pycache__/nx_agraph.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/drawing/__pycache__/nx_agraph.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c257676707b1e9995fd7ce422181f6dadd42c98b Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/drawing/__pycache__/nx_agraph.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/drawing/__pycache__/nx_latex.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/drawing/__pycache__/nx_latex.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..390235716b67427f3423814a5df5663e37e53c15 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/drawing/__pycache__/nx_latex.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/drawing/__pycache__/nx_pydot.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/drawing/__pycache__/nx_pydot.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..145c783ad0e37907fae800779a7ccfa097d76fe4 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/drawing/__pycache__/nx_pydot.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/drawing/__pycache__/nx_pylab.cpython-39.pyc b/MLPY/Lib/site-packages/networkx/drawing/__pycache__/nx_pylab.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cb3bc00bee8752a0fdbe4a3325ad62ba96710a8 Binary files /dev/null and b/MLPY/Lib/site-packages/networkx/drawing/__pycache__/nx_pylab.cpython-39.pyc differ diff --git a/MLPY/Lib/site-packages/networkx/drawing/layout.py b/MLPY/Lib/site-packages/networkx/drawing/layout.py new file mode 100644 index 0000000000000000000000000000000000000000..fa120d670748a67cb536f48dbba20081669b96a4 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/drawing/layout.py @@ -0,0 +1,1297 @@ +""" +****** +Layout +****** + +Node positioning algorithms for graph drawing. + +For `random_layout()` the possible resulting shape +is a square of side [0, scale] (default: [0, 1]) +Changing `center` shifts the layout by that amount. + +For the other layout routines, the extent is +[center - scale, center + scale] (default: [-1, 1]). + +Warning: Most layout routines have only been tested in 2-dimensions. + +""" +import networkx as nx +from networkx.utils import np_random_state + +__all__ = [ + "bipartite_layout", + "circular_layout", + "kamada_kawai_layout", + "random_layout", + "rescale_layout", + "rescale_layout_dict", + "shell_layout", + "spring_layout", + "spectral_layout", + "planar_layout", + "fruchterman_reingold_layout", + "spiral_layout", + "multipartite_layout", + "arf_layout", +] + + +def _process_params(G, center, dim): + # Some boilerplate code. + import numpy as np + + if not isinstance(G, nx.Graph): + empty_graph = nx.Graph() + empty_graph.add_nodes_from(G) + G = empty_graph + + if center is None: + center = np.zeros(dim) + else: + center = np.asarray(center) + + if len(center) != dim: + msg = "length of center coordinates must match dimension of layout" + raise ValueError(msg) + + return G, center + + +@np_random_state(3) +def random_layout(G, center=None, dim=2, seed=None): + """Position nodes uniformly at random in the unit square. + + For every node, a position is generated by choosing each of dim + coordinates uniformly at random on the interval [0.0, 1.0). + + NumPy (http://scipy.org) is required for this function. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout. + + seed : int, RandomState instance or None optional (default=None) + Set the random state for deterministic node layouts. + If int, `seed` is the seed used by the random number generator, + if numpy.random.RandomState instance, `seed` is the random + number generator, + if None, the random number generator is the RandomState instance used + by numpy.random. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Examples + -------- + >>> G = nx.lollipop_graph(4, 3) + >>> pos = nx.random_layout(G) + + """ + import numpy as np + + G, center = _process_params(G, center, dim) + pos = seed.rand(len(G), dim) + center + pos = pos.astype(np.float32) + pos = dict(zip(G, pos)) + + return pos + + +def circular_layout(G, scale=1, center=None, dim=2): + # dim=2 only + """Position nodes on a circle. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout. + If dim>2, the remaining dimensions are set to zero + in the returned positions. + If dim<2, a ValueError is raised. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Raises + ------ + ValueError + If dim < 2 + + Examples + -------- + >>> G = nx.path_graph(4) + >>> pos = nx.circular_layout(G) + + Notes + ----- + This algorithm currently only works in two dimensions and does not + try to minimize edge crossings. + + """ + import numpy as np + + if dim < 2: + raise ValueError("cannot handle dimensions < 2") + + G, center = _process_params(G, center, dim) + + paddims = max(0, (dim - 2)) + + if len(G) == 0: + pos = {} + elif len(G) == 1: + pos = {nx.utils.arbitrary_element(G): center} + else: + # Discard the extra angle since it matches 0 radians. + theta = np.linspace(0, 1, len(G) + 1)[:-1] * 2 * np.pi + theta = theta.astype(np.float32) + pos = np.column_stack( + [np.cos(theta), np.sin(theta), np.zeros((len(G), paddims))] + ) + pos = rescale_layout(pos, scale=scale) + center + pos = dict(zip(G, pos)) + + return pos + + +def shell_layout(G, nlist=None, rotate=None, scale=1, center=None, dim=2): + """Position nodes in concentric circles. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + nlist : list of lists + List of node lists for each shell. + + rotate : angle in radians (default=pi/len(nlist)) + Angle by which to rotate the starting position of each shell + relative to the starting position of the previous shell. + To recreate behavior before v2.5 use rotate=0. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout, currently only dim=2 is supported. + Other dimension values result in a ValueError. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Raises + ------ + ValueError + If dim != 2 + + Examples + -------- + >>> G = nx.path_graph(4) + >>> shells = [[0], [1, 2, 3]] + >>> pos = nx.shell_layout(G, shells) + + Notes + ----- + This algorithm currently only works in two dimensions and does not + try to minimize edge crossings. + + """ + import numpy as np + + if dim != 2: + raise ValueError("can only handle 2 dimensions") + + G, center = _process_params(G, center, dim) + + if len(G) == 0: + return {} + if len(G) == 1: + return {nx.utils.arbitrary_element(G): center} + + if nlist is None: + # draw the whole graph in one shell + nlist = [list(G)] + + radius_bump = scale / len(nlist) + + if len(nlist[0]) == 1: + # single node at center + radius = 0.0 + else: + # else start at r=1 + radius = radius_bump + + if rotate is None: + rotate = np.pi / len(nlist) + first_theta = rotate + npos = {} + for nodes in nlist: + # Discard the last angle (endpoint=False) since 2*pi matches 0 radians + theta = ( + np.linspace(0, 2 * np.pi, len(nodes), endpoint=False, dtype=np.float32) + + first_theta + ) + pos = radius * np.column_stack([np.cos(theta), np.sin(theta)]) + center + npos.update(zip(nodes, pos)) + radius += radius_bump + first_theta += rotate + + return npos + + +def bipartite_layout( + G, nodes, align="vertical", scale=1, center=None, aspect_ratio=4 / 3 +): + """Position nodes in two straight lines. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + nodes : list or container + Nodes in one node set of the bipartite graph. + This set will be placed on left or top. + + align : string (default='vertical') + The alignment of nodes. Vertical or horizontal. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + aspect_ratio : number (default=4/3): + The ratio of the width to the height of the layout. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node. + + Examples + -------- + >>> G = nx.bipartite.gnmk_random_graph(3, 5, 10, seed=123) + >>> top = nx.bipartite.sets(G)[0] + >>> pos = nx.bipartite_layout(G, top) + + Notes + ----- + This algorithm currently only works in two dimensions and does not + try to minimize edge crossings. + + """ + + import numpy as np + + if align not in ("vertical", "horizontal"): + msg = "align must be either vertical or horizontal." + raise ValueError(msg) + + G, center = _process_params(G, center=center, dim=2) + if len(G) == 0: + return {} + + height = 1 + width = aspect_ratio * height + offset = (width / 2, height / 2) + + top = dict.fromkeys(nodes) + bottom = [v for v in G if v not in top] + nodes = list(top) + bottom + + left_xs = np.repeat(0, len(top)) + right_xs = np.repeat(width, len(bottom)) + left_ys = np.linspace(0, height, len(top)) + right_ys = np.linspace(0, height, len(bottom)) + + top_pos = np.column_stack([left_xs, left_ys]) - offset + bottom_pos = np.column_stack([right_xs, right_ys]) - offset + + pos = np.concatenate([top_pos, bottom_pos]) + pos = rescale_layout(pos, scale=scale) + center + if align == "horizontal": + pos = pos[:, ::-1] # swap x and y coords + pos = dict(zip(nodes, pos)) + return pos + + +@np_random_state(10) +def spring_layout( + G, + k=None, + pos=None, + fixed=None, + iterations=50, + threshold=1e-4, + weight="weight", + scale=1, + center=None, + dim=2, + seed=None, +): + """Position nodes using Fruchterman-Reingold force-directed algorithm. + + The algorithm simulates a force-directed representation of the network + treating edges as springs holding nodes close, while treating nodes + as repelling objects, sometimes called an anti-gravity force. + Simulation continues until the positions are close to an equilibrium. + + There are some hard-coded values: minimal distance between + nodes (0.01) and "temperature" of 0.1 to ensure nodes don't fly away. + During the simulation, `k` helps determine the distance between nodes, + though `scale` and `center` determine the size and place after + rescaling occurs at the end of the simulation. + + Fixing some nodes doesn't allow them to move in the simulation. + It also turns off the rescaling feature at the simulation's end. + In addition, setting `scale` to `None` turns off rescaling. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + k : float (default=None) + Optimal distance between nodes. If None the distance is set to + 1/sqrt(n) where n is the number of nodes. Increase this value + to move nodes farther apart. + + pos : dict or None optional (default=None) + Initial positions for nodes as a dictionary with node as keys + and values as a coordinate list or tuple. If None, then use + random initial positions. + + fixed : list or None optional (default=None) + Nodes to keep fixed at initial position. + Nodes not in ``G.nodes`` are ignored. + ValueError raised if `fixed` specified and `pos` not. + + iterations : int optional (default=50) + Maximum number of iterations taken + + threshold: float optional (default = 1e-4) + Threshold for relative error in node position changes. + The iteration stops if the error is below this threshold. + + weight : string or None optional (default='weight') + The edge attribute that holds the numerical value used for + the edge weight. Larger means a stronger attractive force. + If None, then all edge weights are 1. + + scale : number or None (default: 1) + Scale factor for positions. Not used unless `fixed is None`. + If scale is None, no rescaling is performed. + + center : array-like or None + Coordinate pair around which to center the layout. + Not used unless `fixed is None`. + + dim : int + Dimension of layout. + + seed : int, RandomState instance or None optional (default=None) + Set the random state for deterministic node layouts. + If int, `seed` is the seed used by the random number generator, + if numpy.random.RandomState instance, `seed` is the random + number generator, + if None, the random number generator is the RandomState instance used + by numpy.random. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Examples + -------- + >>> G = nx.path_graph(4) + >>> pos = nx.spring_layout(G) + + # The same using longer but equivalent function name + >>> pos = nx.fruchterman_reingold_layout(G) + """ + import numpy as np + + G, center = _process_params(G, center, dim) + + if fixed is not None: + if pos is None: + raise ValueError("nodes are fixed without positions given") + for node in fixed: + if node not in pos: + raise ValueError("nodes are fixed without positions given") + nfixed = {node: i for i, node in enumerate(G)} + fixed = np.asarray([nfixed[node] for node in fixed if node in nfixed]) + + if pos is not None: + # Determine size of existing domain to adjust initial positions + dom_size = max(coord for pos_tup in pos.values() for coord in pos_tup) + if dom_size == 0: + dom_size = 1 + pos_arr = seed.rand(len(G), dim) * dom_size + center + + for i, n in enumerate(G): + if n in pos: + pos_arr[i] = np.asarray(pos[n]) + else: + pos_arr = None + dom_size = 1 + + if len(G) == 0: + return {} + if len(G) == 1: + return {nx.utils.arbitrary_element(G.nodes()): center} + + try: + # Sparse matrix + if len(G) < 500: # sparse solver for large graphs + raise ValueError + A = nx.to_scipy_sparse_array(G, weight=weight, dtype="f") + if k is None and fixed is not None: + # We must adjust k by domain size for layouts not near 1x1 + nnodes, _ = A.shape + k = dom_size / np.sqrt(nnodes) + pos = _sparse_fruchterman_reingold( + A, k, pos_arr, fixed, iterations, threshold, dim, seed + ) + except ValueError: + A = nx.to_numpy_array(G, weight=weight) + if k is None and fixed is not None: + # We must adjust k by domain size for layouts not near 1x1 + nnodes, _ = A.shape + k = dom_size / np.sqrt(nnodes) + pos = _fruchterman_reingold( + A, k, pos_arr, fixed, iterations, threshold, dim, seed + ) + if fixed is None and scale is not None: + pos = rescale_layout(pos, scale=scale) + center + pos = dict(zip(G, pos)) + return pos + + +fruchterman_reingold_layout = spring_layout + + +@np_random_state(7) +def _fruchterman_reingold( + A, k=None, pos=None, fixed=None, iterations=50, threshold=1e-4, dim=2, seed=None +): + # Position nodes in adjacency matrix A using Fruchterman-Reingold + # Entry point for NetworkX graph is fruchterman_reingold_layout() + import numpy as np + + try: + nnodes, _ = A.shape + except AttributeError as err: + msg = "fruchterman_reingold() takes an adjacency matrix as input" + raise nx.NetworkXError(msg) from err + + if pos is None: + # random initial positions + pos = np.asarray(seed.rand(nnodes, dim), dtype=A.dtype) + else: + # make sure positions are of same type as matrix + pos = pos.astype(A.dtype) + + # optimal distance between nodes + if k is None: + k = np.sqrt(1.0 / nnodes) + # the initial "temperature" is about .1 of domain area (=1x1) + # this is the largest step allowed in the dynamics. + # We need to calculate this in case our fixed positions force our domain + # to be much bigger than 1x1 + t = max(max(pos.T[0]) - min(pos.T[0]), max(pos.T[1]) - min(pos.T[1])) * 0.1 + # simple cooling scheme. + # linearly step down by dt on each iteration so last iteration is size dt. + dt = t / (iterations + 1) + delta = np.zeros((pos.shape[0], pos.shape[0], pos.shape[1]), dtype=A.dtype) + # the inscrutable (but fast) version + # this is still O(V^2) + # could use multilevel methods to speed this up significantly + for iteration in range(iterations): + # matrix of difference between points + delta = pos[:, np.newaxis, :] - pos[np.newaxis, :, :] + # distance between points + distance = np.linalg.norm(delta, axis=-1) + # enforce minimum distance of 0.01 + np.clip(distance, 0.01, None, out=distance) + # displacement "force" + displacement = np.einsum( + "ijk,ij->ik", delta, (k * k / distance**2 - A * distance / k) + ) + # update positions + length = np.linalg.norm(displacement, axis=-1) + length = np.where(length < 0.01, 0.1, length) + delta_pos = np.einsum("ij,i->ij", displacement, t / length) + if fixed is not None: + # don't change positions of fixed nodes + delta_pos[fixed] = 0.0 + pos += delta_pos + # cool temperature + t -= dt + if (np.linalg.norm(delta_pos) / nnodes) < threshold: + break + return pos + + +@np_random_state(7) +def _sparse_fruchterman_reingold( + A, k=None, pos=None, fixed=None, iterations=50, threshold=1e-4, dim=2, seed=None +): + # Position nodes in adjacency matrix A using Fruchterman-Reingold + # Entry point for NetworkX graph is fruchterman_reingold_layout() + # Sparse version + import numpy as np + import scipy as sp + + try: + nnodes, _ = A.shape + except AttributeError as err: + msg = "fruchterman_reingold() takes an adjacency matrix as input" + raise nx.NetworkXError(msg) from err + # make sure we have a LIst of Lists representation + try: + A = A.tolil() + except AttributeError: + A = (sp.sparse.coo_array(A)).tolil() + + if pos is None: + # random initial positions + pos = np.asarray(seed.rand(nnodes, dim), dtype=A.dtype) + else: + # make sure positions are of same type as matrix + pos = pos.astype(A.dtype) + + # no fixed nodes + if fixed is None: + fixed = [] + + # optimal distance between nodes + if k is None: + k = np.sqrt(1.0 / nnodes) + # the initial "temperature" is about .1 of domain area (=1x1) + # this is the largest step allowed in the dynamics. + t = max(max(pos.T[0]) - min(pos.T[0]), max(pos.T[1]) - min(pos.T[1])) * 0.1 + # simple cooling scheme. + # linearly step down by dt on each iteration so last iteration is size dt. + dt = t / (iterations + 1) + + displacement = np.zeros((dim, nnodes)) + for iteration in range(iterations): + displacement *= 0 + # loop over rows + for i in range(A.shape[0]): + if i in fixed: + continue + # difference between this row's node position and all others + delta = (pos[i] - pos).T + # distance between points + distance = np.sqrt((delta**2).sum(axis=0)) + # enforce minimum distance of 0.01 + distance = np.where(distance < 0.01, 0.01, distance) + # the adjacency matrix row + Ai = A.getrowview(i).toarray() # TODO: revisit w/ sparse 1D container + # displacement "force" + displacement[:, i] += ( + delta * (k * k / distance**2 - Ai * distance / k) + ).sum(axis=1) + # update positions + length = np.sqrt((displacement**2).sum(axis=0)) + length = np.where(length < 0.01, 0.1, length) + delta_pos = (displacement * t / length).T + pos += delta_pos + # cool temperature + t -= dt + if (np.linalg.norm(delta_pos) / nnodes) < threshold: + break + return pos + + +def kamada_kawai_layout( + G, dist=None, pos=None, weight="weight", scale=1, center=None, dim=2 +): + """Position nodes using Kamada-Kawai path-length cost-function. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + dist : dict (default=None) + A two-level dictionary of optimal distances between nodes, + indexed by source and destination node. + If None, the distance is computed using shortest_path_length(). + + pos : dict or None optional (default=None) + Initial positions for nodes as a dictionary with node as keys + and values as a coordinate list or tuple. If None, then use + circular_layout() for dim >= 2 and a linear layout for dim == 1. + + weight : string or None optional (default='weight') + The edge attribute that holds the numerical value used for + the edge weight. If None, then all edge weights are 1. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Examples + -------- + >>> G = nx.path_graph(4) + >>> pos = nx.kamada_kawai_layout(G) + """ + import numpy as np + + G, center = _process_params(G, center, dim) + nNodes = len(G) + if nNodes == 0: + return {} + + if dist is None: + dist = dict(nx.shortest_path_length(G, weight=weight)) + dist_mtx = 1e6 * np.ones((nNodes, nNodes)) + for row, nr in enumerate(G): + if nr not in dist: + continue + rdist = dist[nr] + for col, nc in enumerate(G): + if nc not in rdist: + continue + dist_mtx[row][col] = rdist[nc] + + if pos is None: + if dim >= 3: + pos = random_layout(G, dim=dim) + elif dim == 2: + pos = circular_layout(G, dim=dim) + else: + pos = dict(zip(G, np.linspace(0, 1, len(G)))) + pos_arr = np.array([pos[n] for n in G]) + + pos = _kamada_kawai_solve(dist_mtx, pos_arr, dim) + + pos = rescale_layout(pos, scale=scale) + center + return dict(zip(G, pos)) + + +def _kamada_kawai_solve(dist_mtx, pos_arr, dim): + # Anneal node locations based on the Kamada-Kawai cost-function, + # using the supplied matrix of preferred inter-node distances, + # and starting locations. + + import numpy as np + import scipy as sp + + meanwt = 1e-3 + costargs = (np, 1 / (dist_mtx + np.eye(dist_mtx.shape[0]) * 1e-3), meanwt, dim) + + optresult = sp.optimize.minimize( + _kamada_kawai_costfn, + pos_arr.ravel(), + method="L-BFGS-B", + args=costargs, + jac=True, + ) + + return optresult.x.reshape((-1, dim)) + + +def _kamada_kawai_costfn(pos_vec, np, invdist, meanweight, dim): + # Cost-function and gradient for Kamada-Kawai layout algorithm + nNodes = invdist.shape[0] + pos_arr = pos_vec.reshape((nNodes, dim)) + + delta = pos_arr[:, np.newaxis, :] - pos_arr[np.newaxis, :, :] + nodesep = np.linalg.norm(delta, axis=-1) + direction = np.einsum("ijk,ij->ijk", delta, 1 / (nodesep + np.eye(nNodes) * 1e-3)) + + offset = nodesep * invdist - 1.0 + offset[np.diag_indices(nNodes)] = 0 + + cost = 0.5 * np.sum(offset**2) + grad = np.einsum("ij,ij,ijk->ik", invdist, offset, direction) - np.einsum( + "ij,ij,ijk->jk", invdist, offset, direction + ) + + # Additional parabolic term to encourage mean position to be near origin: + sumpos = np.sum(pos_arr, axis=0) + cost += 0.5 * meanweight * np.sum(sumpos**2) + grad += meanweight * sumpos + + return (cost, grad.ravel()) + + +def spectral_layout(G, weight="weight", scale=1, center=None, dim=2): + """Position nodes using the eigenvectors of the graph Laplacian. + + Using the unnormalized Laplacian, the layout shows possible clusters of + nodes which are an approximation of the ratio cut. If dim is the number of + dimensions then the positions are the entries of the dim eigenvectors + corresponding to the ascending eigenvalues starting from the second one. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + weight : string or None optional (default='weight') + The edge attribute that holds the numerical value used for + the edge weight. If None, then all edge weights are 1. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Examples + -------- + >>> G = nx.path_graph(4) + >>> pos = nx.spectral_layout(G) + + Notes + ----- + Directed graphs will be considered as undirected graphs when + positioning the nodes. + + For larger graphs (>500 nodes) this will use the SciPy sparse + eigenvalue solver (ARPACK). + """ + # handle some special cases that break the eigensolvers + import numpy as np + + G, center = _process_params(G, center, dim) + + if len(G) <= 2: + if len(G) == 0: + pos = np.array([]) + elif len(G) == 1: + pos = np.array([center]) + else: + pos = np.array([np.zeros(dim), np.array(center) * 2.0]) + return dict(zip(G, pos)) + try: + # Sparse matrix + if len(G) < 500: # dense solver is faster for small graphs + raise ValueError + A = nx.to_scipy_sparse_array(G, weight=weight, dtype="d") + # Symmetrize directed graphs + if G.is_directed(): + A = A + np.transpose(A) + pos = _sparse_spectral(A, dim) + except (ImportError, ValueError): + # Dense matrix + A = nx.to_numpy_array(G, weight=weight) + # Symmetrize directed graphs + if G.is_directed(): + A += A.T + pos = _spectral(A, dim) + + pos = rescale_layout(pos, scale=scale) + center + pos = dict(zip(G, pos)) + return pos + + +def _spectral(A, dim=2): + # Input adjacency matrix A + # Uses dense eigenvalue solver from numpy + import numpy as np + + try: + nnodes, _ = A.shape + except AttributeError as err: + msg = "spectral() takes an adjacency matrix as input" + raise nx.NetworkXError(msg) from err + + # form Laplacian matrix where D is diagonal of degrees + D = np.identity(nnodes, dtype=A.dtype) * np.sum(A, axis=1) + L = D - A + + eigenvalues, eigenvectors = np.linalg.eig(L) + # sort and keep smallest nonzero + index = np.argsort(eigenvalues)[1 : dim + 1] # 0 index is zero eigenvalue + return np.real(eigenvectors[:, index]) + + +def _sparse_spectral(A, dim=2): + # Input adjacency matrix A + # Uses sparse eigenvalue solver from scipy + # Could use multilevel methods here, see Koren "On spectral graph drawing" + import numpy as np + import scipy as sp + + try: + nnodes, _ = A.shape + except AttributeError as err: + msg = "sparse_spectral() takes an adjacency matrix as input" + raise nx.NetworkXError(msg) from err + + # form Laplacian matrix + # TODO: Rm csr_array wrapper in favor of spdiags array constructor when available + D = sp.sparse.csr_array(sp.sparse.spdiags(A.sum(axis=1), 0, nnodes, nnodes)) + L = D - A + + k = dim + 1 + # number of Lanczos vectors for ARPACK solver.What is the right scaling? + ncv = max(2 * k + 1, int(np.sqrt(nnodes))) + # return smallest k eigenvalues and eigenvectors + eigenvalues, eigenvectors = sp.sparse.linalg.eigsh(L, k, which="SM", ncv=ncv) + index = np.argsort(eigenvalues)[1:k] # 0 index is zero eigenvalue + return np.real(eigenvectors[:, index]) + + +def planar_layout(G, scale=1, center=None, dim=2): + """Position nodes without edge intersections. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. If G is of type + nx.PlanarEmbedding, the positions are selected accordingly. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + dim : int + Dimension of layout. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Raises + ------ + NetworkXException + If G is not planar + + Examples + -------- + >>> G = nx.path_graph(4) + >>> pos = nx.planar_layout(G) + """ + import numpy as np + + if dim != 2: + raise ValueError("can only handle 2 dimensions") + + G, center = _process_params(G, center, dim) + + if len(G) == 0: + return {} + + if isinstance(G, nx.PlanarEmbedding): + embedding = G + else: + is_planar, embedding = nx.check_planarity(G) + if not is_planar: + raise nx.NetworkXException("G is not planar.") + pos = nx.combinatorial_embedding_to_pos(embedding) + node_list = list(embedding) + pos = np.row_stack([pos[x] for x in node_list]) + pos = pos.astype(np.float64) + pos = rescale_layout(pos, scale=scale) + center + return dict(zip(node_list, pos)) + + +def spiral_layout(G, scale=1, center=None, dim=2, resolution=0.35, equidistant=False): + """Position nodes in a spiral layout. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + scale : number (default: 1) + Scale factor for positions. + center : array-like or None + Coordinate pair around which to center the layout. + dim : int, default=2 + Dimension of layout, currently only dim=2 is supported. + Other dimension values result in a ValueError. + resolution : float, default=0.35 + The compactness of the spiral layout returned. + Lower values result in more compressed spiral layouts. + equidistant : bool, default=False + If True, nodes will be positioned equidistant from each other + by decreasing angle further from center. + If False, nodes will be positioned at equal angles + from each other by increasing separation further from center. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node + + Raises + ------ + ValueError + If dim != 2 + + Examples + -------- + >>> G = nx.path_graph(4) + >>> pos = nx.spiral_layout(G) + >>> nx.draw(G, pos=pos) + + Notes + ----- + This algorithm currently only works in two dimensions. + + """ + import numpy as np + + if dim != 2: + raise ValueError("can only handle 2 dimensions") + + G, center = _process_params(G, center, dim) + + if len(G) == 0: + return {} + if len(G) == 1: + return {nx.utils.arbitrary_element(G): center} + + pos = [] + if equidistant: + chord = 1 + step = 0.5 + theta = resolution + theta += chord / (step * theta) + for _ in range(len(G)): + r = step * theta + theta += chord / r + pos.append([np.cos(theta) * r, np.sin(theta) * r]) + + else: + dist = np.arange(len(G), dtype=float) + angle = resolution * dist + pos = np.transpose(dist * np.array([np.cos(angle), np.sin(angle)])) + + pos = rescale_layout(np.array(pos), scale=scale) + center + + pos = dict(zip(G, pos)) + + return pos + + +def multipartite_layout(G, subset_key="subset", align="vertical", scale=1, center=None): + """Position nodes in layers of straight lines. + + Parameters + ---------- + G : NetworkX graph or list of nodes + A position will be assigned to every node in G. + + subset_key : string (default='subset') + Key of node data to be used as layer subset. + + align : string (default='vertical') + The alignment of nodes. Vertical or horizontal. + + scale : number (default: 1) + Scale factor for positions. + + center : array-like or None + Coordinate pair around which to center the layout. + + Returns + ------- + pos : dict + A dictionary of positions keyed by node. + + Examples + -------- + >>> G = nx.complete_multipartite_graph(28, 16, 10) + >>> pos = nx.multipartite_layout(G) + + Notes + ----- + This algorithm currently only works in two dimensions and does not + try to minimize edge crossings. + + Network does not need to be a complete multipartite graph. As long as nodes + have subset_key data, they will be placed in the corresponding layers. + + """ + import numpy as np + + if align not in ("vertical", "horizontal"): + msg = "align must be either vertical or horizontal." + raise ValueError(msg) + + G, center = _process_params(G, center=center, dim=2) + if len(G) == 0: + return {} + + layers = {} + for v, data in G.nodes(data=True): + try: + layer = data[subset_key] + except KeyError: + msg = "all nodes must have subset_key (default='subset') as data" + raise ValueError(msg) + layers[layer] = [v] + layers.get(layer, []) + + # Sort by layer, if possible + try: + layers = sorted(layers.items()) + except TypeError: + layers = list(layers.items()) + + pos = None + nodes = [] + width = len(layers) + for i, (_, layer) in enumerate(layers): + height = len(layer) + xs = np.repeat(i, height) + ys = np.arange(0, height, dtype=float) + offset = ((width - 1) / 2, (height - 1) / 2) + layer_pos = np.column_stack([xs, ys]) - offset + if pos is None: + pos = layer_pos + else: + pos = np.concatenate([pos, layer_pos]) + nodes.extend(layer) + pos = rescale_layout(pos, scale=scale) + center + if align == "horizontal": + pos = pos[:, ::-1] # swap x and y coords + pos = dict(zip(nodes, pos)) + return pos + + +def arf_layout( + G, + pos=None, + scaling=1, + a=1.1, + etol=1e-6, + dt=1e-3, + max_iter=1000, +): + """Arf layout for networkx + + The attractive and repulsive forces (arf) layout [1] + improves the spring layout in three ways. First, it + prevents congestion of highly connected nodes due to + strong forcing between nodes. Second, it utilizes the + layout space more effectively by preventing large gaps + that spring layout tends to create. Lastly, the arf + layout represents symmetries in the layout better than + the default spring layout. + + Parameters + ---------- + G : nx.Graph or nx.DiGraph + Networkx graph. + pos : dict + Initial position of the nodes. If set to None a + random layout will be used. + scaling : float + Scales the radius of the circular layout space. + a : float + Strength of springs between connected nodes. Should be larger than 1. The greater a, the clearer the separation ofunconnected sub clusters. + etol : float + Gradient sum of spring forces must be larger than `etol` before successful termination. + dt : float + Time step for force differential equation simulations. + max_iter : int + Max iterations before termination of the algorithm. + + References + .. [1] "Self-Organization Applied to Dynamic Network Layout", M. Geipel, + International Journal of Modern Physics C, 2007, Vol 18, No 10, pp. 1537-1549. + https://doi.org/10.1142/S0129183107011558 https://arxiv.org/abs/0704.1748 + + Returns + ------- + pos : dict + A dictionary of positions keyed by node. + + Examples + -------- + >>> G = nx.grid_graph((5, 5)) + >>> pos = nx.arf_layout(G) + + """ + import warnings + + import numpy as np + + if a <= 1: + msg = "The parameter a should be larger than 1" + raise ValueError(msg) + + pos_tmp = nx.random_layout(G) + if pos is None: + pos = pos_tmp + else: + for node in G.nodes(): + if node not in pos: + pos[node] = pos_tmp[node].copy() + + # Initialize spring constant matrix + N = len(G) + # No nodes no computation + if N == 0: + return pos + + # init force of springs + K = np.ones((N, N)) - np.eye(N) + node_order = {node: i for i, node in enumerate(G)} + for x, y in G.edges(): + if x != y: + idx, jdx = (node_order[i] for i in (x, y)) + K[idx, jdx] = a + + # vectorize values + p = np.asarray(list(pos.values())) + + # equation 10 in [1] + rho = scaling * np.sqrt(N) + + # looping variables + error = etol + 1 + n_iter = 0 + while error > etol: + diff = p[:, np.newaxis] - p[np.newaxis] + A = np.linalg.norm(diff, axis=-1)[..., np.newaxis] + # attraction_force - repulsions force + # suppress nans due to division; caused by diagonal set to zero. + # Does not affect the computation due to nansum + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + change = K[..., np.newaxis] * diff - rho / A * diff + change = np.nansum(change, axis=0) + p += change * dt + + error = np.linalg.norm(change, axis=-1).sum() + if n_iter > max_iter: + break + n_iter += 1 + return dict(zip(G.nodes(), p)) + + +def rescale_layout(pos, scale=1): + """Returns scaled position array to (-scale, scale) in all axes. + + The function acts on NumPy arrays which hold position information. + Each position is one row of the array. The dimension of the space + equals the number of columns. Each coordinate in one column. + + To rescale, the mean (center) is subtracted from each axis separately. + Then all values are scaled so that the largest magnitude value + from all axes equals `scale` (thus, the aspect ratio is preserved). + The resulting NumPy Array is returned (order of rows unchanged). + + Parameters + ---------- + pos : numpy array + positions to be scaled. Each row is a position. + + scale : number (default: 1) + The size of the resulting extent in all directions. + + Returns + ------- + pos : numpy array + scaled positions. Each row is a position. + + See Also + -------- + rescale_layout_dict + """ + import numpy as np + + # Find max length over all dimensions + pos -= pos.mean(axis=0) + lim = np.abs(pos).max() # max coordinate for all axes + # rescale to (-scale, scale) in all directions, preserves aspect + if lim > 0: + pos *= scale / lim + return pos + + +def rescale_layout_dict(pos, scale=1): + """Return a dictionary of scaled positions keyed by node + + Parameters + ---------- + pos : A dictionary of positions keyed by node + + scale : number (default: 1) + The size of the resulting extent in all directions. + + Returns + ------- + pos : A dictionary of positions keyed by node + + Examples + -------- + >>> import numpy as np + >>> pos = {0: np.array((0, 0)), 1: np.array((1, 1)), 2: np.array((0.5, 0.5))} + >>> nx.rescale_layout_dict(pos) + {0: array([-1., -1.]), 1: array([1., 1.]), 2: array([0., 0.])} + + >>> pos = {0: np.array((0, 0)), 1: np.array((-1, 1)), 2: np.array((-0.5, 0.5))} + >>> nx.rescale_layout_dict(pos, scale=2) + {0: array([ 2., -2.]), 1: array([-2., 2.]), 2: array([0., 0.])} + + See Also + -------- + rescale_layout + """ + import numpy as np + + if not pos: # empty_graph + return {} + pos_v = np.array(list(pos.values())) + pos_v = rescale_layout(pos_v, scale=scale) + return dict(zip(pos, pos_v)) diff --git a/MLPY/Lib/site-packages/networkx/drawing/nx_agraph.py b/MLPY/Lib/site-packages/networkx/drawing/nx_agraph.py new file mode 100644 index 0000000000000000000000000000000000000000..8db3400f20041ed8d6ce23b1b4cb937ac192c0ee --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/drawing/nx_agraph.py @@ -0,0 +1,469 @@ +""" +*************** +Graphviz AGraph +*************** + +Interface to pygraphviz AGraph class. + +Examples +-------- +>>> G = nx.complete_graph(5) +>>> A = nx.nx_agraph.to_agraph(G) +>>> H = nx.nx_agraph.from_agraph(A) + +See Also +-------- + - Pygraphviz: http://pygraphviz.github.io/ + - Graphviz: https://www.graphviz.org + - DOT Language: http://www.graphviz.org/doc/info/lang.html +""" +import os +import tempfile + +import networkx as nx + +__all__ = [ + "from_agraph", + "to_agraph", + "write_dot", + "read_dot", + "graphviz_layout", + "pygraphviz_layout", + "view_pygraphviz", +] + + +@nx._dispatch(graphs=None) +def from_agraph(A, create_using=None): + """Returns a NetworkX Graph or DiGraph from a PyGraphviz graph. + + Parameters + ---------- + A : PyGraphviz AGraph + A graph created with PyGraphviz + + create_using : NetworkX graph constructor, optional (default=None) + Graph type to create. If graph instance, then cleared before populated. + If `None`, then the appropriate Graph type is inferred from `A`. + + Examples + -------- + >>> K5 = nx.complete_graph(5) + >>> A = nx.nx_agraph.to_agraph(K5) + >>> G = nx.nx_agraph.from_agraph(A) + + Notes + ----- + The Graph G will have a dictionary G.graph_attr containing + the default graphviz attributes for graphs, nodes and edges. + + Default node attributes will be in the dictionary G.node_attr + which is keyed by node. + + Edge attributes will be returned as edge data in G. With + edge_attr=False the edge data will be the Graphviz edge weight + attribute or the value 1 if no edge weight attribute is found. + + """ + if create_using is None: + if A.is_directed(): + if A.is_strict(): + create_using = nx.DiGraph + else: + create_using = nx.MultiDiGraph + else: + if A.is_strict(): + create_using = nx.Graph + else: + create_using = nx.MultiGraph + + # assign defaults + N = nx.empty_graph(0, create_using) + if A.name is not None: + N.name = A.name + + # add graph attributes + N.graph.update(A.graph_attr) + + # add nodes, attributes to N.node_attr + for n in A.nodes(): + str_attr = {str(k): v for k, v in n.attr.items()} + N.add_node(str(n), **str_attr) + + # add edges, assign edge data as dictionary of attributes + for e in A.edges(): + u, v = str(e[0]), str(e[1]) + attr = dict(e.attr) + str_attr = {str(k): v for k, v in attr.items()} + if not N.is_multigraph(): + if e.name is not None: + str_attr["key"] = e.name + N.add_edge(u, v, **str_attr) + else: + N.add_edge(u, v, key=e.name, **str_attr) + + # add default attributes for graph, nodes, and edges + # hang them on N.graph_attr + N.graph["graph"] = dict(A.graph_attr) + N.graph["node"] = dict(A.node_attr) + N.graph["edge"] = dict(A.edge_attr) + return N + + +def to_agraph(N): + """Returns a pygraphviz graph from a NetworkX graph N. + + Parameters + ---------- + N : NetworkX graph + A graph created with NetworkX + + Examples + -------- + >>> K5 = nx.complete_graph(5) + >>> A = nx.nx_agraph.to_agraph(K5) + + Notes + ----- + If N has an dict N.graph_attr an attempt will be made first + to copy properties attached to the graph (see from_agraph) + and then updated with the calling arguments if any. + + """ + try: + import pygraphviz + except ImportError as err: + raise ImportError( + "requires pygraphviz " "http://pygraphviz.github.io/" + ) from err + directed = N.is_directed() + strict = nx.number_of_selfloops(N) == 0 and not N.is_multigraph() + + for node in N: + if "pos" in N.nodes[node]: + N.nodes[node]["pos"] = "{},{}!".format( + N.nodes[node]["pos"][0], N.nodes[node]["pos"][1] + ) + + A = pygraphviz.AGraph(name=N.name, strict=strict, directed=directed) + + # default graph attributes + A.graph_attr.update(N.graph.get("graph", {})) + A.node_attr.update(N.graph.get("node", {})) + A.edge_attr.update(N.graph.get("edge", {})) + + A.graph_attr.update( + (k, v) for k, v in N.graph.items() if k not in ("graph", "node", "edge") + ) + + # add nodes + for n, nodedata in N.nodes(data=True): + A.add_node(n) + # Add node data + a = A.get_node(n) + a.attr.update({k: str(v) for k, v in nodedata.items()}) + + # loop over edges + if N.is_multigraph(): + for u, v, key, edgedata in N.edges(data=True, keys=True): + str_edgedata = {k: str(v) for k, v in edgedata.items() if k != "key"} + A.add_edge(u, v, key=str(key)) + # Add edge data + a = A.get_edge(u, v) + a.attr.update(str_edgedata) + + else: + for u, v, edgedata in N.edges(data=True): + str_edgedata = {k: str(v) for k, v in edgedata.items()} + A.add_edge(u, v) + # Add edge data + a = A.get_edge(u, v) + a.attr.update(str_edgedata) + + return A + + +def write_dot(G, path): + """Write NetworkX graph G to Graphviz dot format on path. + + Parameters + ---------- + G : graph + A networkx graph + path : filename + Filename or file handle to write + + Notes + ----- + To use a specific graph layout, call ``A.layout`` prior to `write_dot`. + Note that some graphviz layouts are not guaranteed to be deterministic, + see https://gitlab.com/graphviz/graphviz/-/issues/1767 for more info. + """ + A = to_agraph(G) + A.write(path) + A.clear() + return + + +@nx._dispatch(name="agraph_read_dot", graphs=None) +def read_dot(path): + """Returns a NetworkX graph from a dot file on path. + + Parameters + ---------- + path : file or string + File name or file handle to read. + """ + try: + import pygraphviz + except ImportError as err: + raise ImportError( + "read_dot() requires pygraphviz " "http://pygraphviz.github.io/" + ) from err + A = pygraphviz.AGraph(file=path) + gr = from_agraph(A) + A.clear() + return gr + + +def graphviz_layout(G, prog="neato", root=None, args=""): + """Create node positions for G using Graphviz. + + Parameters + ---------- + G : NetworkX graph + A graph created with NetworkX + prog : string + Name of Graphviz layout program + root : string, optional + Root node for twopi layout + args : string, optional + Extra arguments to Graphviz layout program + + Returns + ------- + Dictionary of x, y, positions keyed by node. + + Examples + -------- + >>> G = nx.petersen_graph() + >>> pos = nx.nx_agraph.graphviz_layout(G) + >>> pos = nx.nx_agraph.graphviz_layout(G, prog="dot") + + Notes + ----- + This is a wrapper for pygraphviz_layout. + + Note that some graphviz layouts are not guaranteed to be deterministic, + see https://gitlab.com/graphviz/graphviz/-/issues/1767 for more info. + """ + return pygraphviz_layout(G, prog=prog, root=root, args=args) + + +def pygraphviz_layout(G, prog="neato", root=None, args=""): + """Create node positions for G using Graphviz. + + Parameters + ---------- + G : NetworkX graph + A graph created with NetworkX + prog : string + Name of Graphviz layout program + root : string, optional + Root node for twopi layout + args : string, optional + Extra arguments to Graphviz layout program + + Returns + ------- + node_pos : dict + Dictionary of x, y, positions keyed by node. + + Examples + -------- + >>> G = nx.petersen_graph() + >>> pos = nx.nx_agraph.graphviz_layout(G) + >>> pos = nx.nx_agraph.graphviz_layout(G, prog="dot") + + Notes + ----- + If you use complex node objects, they may have the same string + representation and GraphViz could treat them as the same node. + The layout may assign both nodes a single location. See Issue #1568 + If this occurs in your case, consider relabeling the nodes just + for the layout computation using something similar to:: + + >>> H = nx.convert_node_labels_to_integers(G, label_attribute="node_label") + >>> H_layout = nx.nx_agraph.pygraphviz_layout(G, prog="dot") + >>> G_layout = {H.nodes[n]["node_label"]: p for n, p in H_layout.items()} + + Note that some graphviz layouts are not guaranteed to be deterministic, + see https://gitlab.com/graphviz/graphviz/-/issues/1767 for more info. + """ + try: + import pygraphviz + except ImportError as err: + raise ImportError( + "requires pygraphviz " "http://pygraphviz.github.io/" + ) from err + if root is not None: + args += f"-Groot={root}" + A = to_agraph(G) + A.layout(prog=prog, args=args) + node_pos = {} + for n in G: + node = pygraphviz.Node(A, n) + try: + xs = node.attr["pos"].split(",") + node_pos[n] = tuple(float(x) for x in xs) + except: + print("no position for node", n) + node_pos[n] = (0.0, 0.0) + return node_pos + + +@nx.utils.open_file(5, "w+b") +def view_pygraphviz( + G, edgelabel=None, prog="dot", args="", suffix="", path=None, show=True +): + """Views the graph G using the specified layout algorithm. + + Parameters + ---------- + G : NetworkX graph + The machine to draw. + edgelabel : str, callable, None + If a string, then it specifies the edge attribute to be displayed + on the edge labels. If a callable, then it is called for each + edge and it should return the string to be displayed on the edges. + The function signature of `edgelabel` should be edgelabel(data), + where `data` is the edge attribute dictionary. + prog : string + Name of Graphviz layout program. + args : str + Additional arguments to pass to the Graphviz layout program. + suffix : str + If `filename` is None, we save to a temporary file. The value of + `suffix` will appear at the tail end of the temporary filename. + path : str, None + The filename used to save the image. If None, save to a temporary + file. File formats are the same as those from pygraphviz.agraph.draw. + show : bool, default = True + Whether to display the graph with :mod:`PIL.Image.show`, + default is `True`. If `False`, the rendered graph is still available + at `path`. + + Returns + ------- + path : str + The filename of the generated image. + A : PyGraphviz graph + The PyGraphviz graph instance used to generate the image. + + Notes + ----- + If this function is called in succession too quickly, sometimes the + image is not displayed. So you might consider time.sleep(.5) between + calls if you experience problems. + + Note that some graphviz layouts are not guaranteed to be deterministic, + see https://gitlab.com/graphviz/graphviz/-/issues/1767 for more info. + + """ + if not len(G): + raise nx.NetworkXException("An empty graph cannot be drawn.") + + # If we are providing default values for graphviz, these must be set + # before any nodes or edges are added to the PyGraphviz graph object. + # The reason for this is that default values only affect incoming objects. + # If you change the default values after the objects have been added, + # then they inherit no value and are set only if explicitly set. + + # to_agraph() uses these values. + attrs = ["edge", "node", "graph"] + for attr in attrs: + if attr not in G.graph: + G.graph[attr] = {} + + # These are the default values. + edge_attrs = {"fontsize": "10"} + node_attrs = { + "style": "filled", + "fillcolor": "#0000FF40", + "height": "0.75", + "width": "0.75", + "shape": "circle", + } + graph_attrs = {} + + def update_attrs(which, attrs): + # Update graph attributes. Return list of those which were added. + added = [] + for k, v in attrs.items(): + if k not in G.graph[which]: + G.graph[which][k] = v + added.append(k) + + def clean_attrs(which, added): + # Remove added attributes + for attr in added: + del G.graph[which][attr] + if not G.graph[which]: + del G.graph[which] + + # Update all default values + update_attrs("edge", edge_attrs) + update_attrs("node", node_attrs) + update_attrs("graph", graph_attrs) + + # Convert to agraph, so we inherit default values + A = to_agraph(G) + + # Remove the default values we added to the original graph. + clean_attrs("edge", edge_attrs) + clean_attrs("node", node_attrs) + clean_attrs("graph", graph_attrs) + + # If the user passed in an edgelabel, we update the labels for all edges. + if edgelabel is not None: + if not callable(edgelabel): + + def func(data): + return "".join([" ", str(data[edgelabel]), " "]) + + else: + func = edgelabel + + # update all the edge labels + if G.is_multigraph(): + for u, v, key, data in G.edges(keys=True, data=True): + # PyGraphviz doesn't convert the key to a string. See #339 + edge = A.get_edge(u, v, str(key)) + edge.attr["label"] = str(func(data)) + else: + for u, v, data in G.edges(data=True): + edge = A.get_edge(u, v) + edge.attr["label"] = str(func(data)) + + if path is None: + ext = "png" + if suffix: + suffix = f"_{suffix}.{ext}" + else: + suffix = f".{ext}" + path = tempfile.NamedTemporaryFile(suffix=suffix, delete=False) + else: + # Assume the decorator worked and it is a file-object. + pass + + # Write graph to file + A.draw(path=path, format=None, prog=prog, args=args) + path.close() + + # Show graph in a new window (depends on platform configuration) + if show: + from PIL import Image + + Image.open(path.name).show() + + return path.name, A diff --git a/MLPY/Lib/site-packages/networkx/drawing/nx_latex.py b/MLPY/Lib/site-packages/networkx/drawing/nx_latex.py new file mode 100644 index 0000000000000000000000000000000000000000..6312f71505e79bf5a2e2d9979274ba2a08b2e32b --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/drawing/nx_latex.py @@ -0,0 +1,571 @@ +r""" +***** +LaTeX +***** + +Export NetworkX graphs in LaTeX format using the TikZ library within TeX/LaTeX. +Usually, you will want the drawing to appear in a figure environment so +you use ``to_latex(G, caption="A caption")``. If you want the raw +drawing commands without a figure environment use :func:`to_latex_raw`. +And if you want to write to a file instead of just returning the latex +code as a string, use ``write_latex(G, "filename.tex", caption="A caption")``. + +To construct a figure with subfigures for each graph to be shown, provide +``to_latex`` or ``write_latex`` a list of graphs, a list of subcaptions, +and a number of rows of subfigures inside the figure. + +To be able to refer to the figures or subfigures in latex using ``\\ref``, +the keyword ``latex_label`` is available for figures and `sub_labels` for +a list of labels, one for each subfigure. + +We intend to eventually provide an interface to the TikZ Graph +features which include e.g. layout algorithms. + +Let us know via github what you'd like to see available, or better yet +give us some code to do it, or even better make a github pull request +to add the feature. + +The TikZ approach +================= +Drawing options can be stored on the graph as node/edge attributes, or +can be provided as dicts keyed by node/edge to a string of the options +for that node/edge. Similarly a label can be shown for each node/edge +by specifying the labels as graph node/edge attributes or by providing +a dict keyed by node/edge to the text to be written for that node/edge. + +Options for the tikzpicture environment (e.g. "[scale=2]") can be provided +via a keyword argument. Similarly default node and edge options can be +provided through keywords arguments. The default node options are applied +to the single TikZ "path" that draws all nodes (and no edges). The default edge +options are applied to a TikZ "scope" which contains a path for each edge. + +Examples +======== +>>> G = nx.path_graph(3) +>>> nx.write_latex(G, "just_my_figure.tex", as_document=True) +>>> nx.write_latex(G, "my_figure.tex", caption="A path graph", latex_label="fig1") +>>> latex_code = nx.to_latex(G) # a string rather than a file + +You can change many features of the nodes and edges. + +>>> G = nx.path_graph(4, create_using=nx.DiGraph) +>>> pos = {n: (n, n) for n in G} # nodes set on a line + +>>> G.nodes[0]["style"] = "blue" +>>> G.nodes[2]["style"] = "line width=3,draw" +>>> G.nodes[3]["label"] = "Stop" +>>> G.edges[(0, 1)]["label"] = "1st Step" +>>> G.edges[(0, 1)]["label_opts"] = "near start" +>>> G.edges[(1, 2)]["style"] = "line width=3" +>>> G.edges[(1, 2)]["label"] = "2nd Step" +>>> G.edges[(2, 3)]["style"] = "green" +>>> G.edges[(2, 3)]["label"] = "3rd Step" +>>> G.edges[(2, 3)]["label_opts"] = "near end" + +>>> nx.write_latex(G, "latex_graph.tex", pos=pos, as_document=True) + +Then compile the LaTeX using something like ``pdflatex latex_graph.tex`` +and view the pdf file created: ``latex_graph.pdf``. + +If you want **subfigures** each containing one graph, you can input a list of graphs. + +>>> H1 = nx.path_graph(4) +>>> H2 = nx.complete_graph(4) +>>> H3 = nx.path_graph(8) +>>> H4 = nx.complete_graph(8) +>>> graphs = [H1, H2, H3, H4] +>>> caps = ["Path 4", "Complete graph 4", "Path 8", "Complete graph 8"] +>>> lbls = ["fig2a", "fig2b", "fig2c", "fig2d"] +>>> nx.write_latex(graphs, "subfigs.tex", n_rows=2, sub_captions=caps, sub_labels=lbls) +>>> latex_code = nx.to_latex(graphs, n_rows=2, sub_captions=caps, sub_labels=lbls) + +>>> node_color = {0: "red", 1: "orange", 2: "blue", 3: "gray!90"} +>>> edge_width = {e: "line width=1.5" for e in H3.edges} +>>> pos = nx.circular_layout(H3) +>>> latex_code = nx.to_latex(H3, pos, node_options=node_color, edge_options=edge_width) +>>> print(latex_code) +\documentclass{report} +\usepackage{tikz} +\usepackage{subcaption} + +\begin{document} +\begin{figure} + \begin{tikzpicture} + \draw + (1.0, 0.0) node[red] (0){0} + (0.707, 0.707) node[orange] (1){1} + (-0.0, 1.0) node[blue] (2){2} + (-0.707, 0.707) node[gray!90] (3){3} + (-1.0, -0.0) node (4){4} + (-0.707, -0.707) node (5){5} + (0.0, -1.0) node (6){6} + (0.707, -0.707) node (7){7}; + \begin{scope}[-] + \draw[line width=1.5] (0) to (1); + \draw[line width=1.5] (1) to (2); + \draw[line width=1.5] (2) to (3); + \draw[line width=1.5] (3) to (4); + \draw[line width=1.5] (4) to (5); + \draw[line width=1.5] (5) to (6); + \draw[line width=1.5] (6) to (7); + \end{scope} + \end{tikzpicture} +\end{figure} +\end{document} + +Notes +----- +If you want to change the preamble/postamble of the figure/document/subfigure +environment, use the keyword arguments: `figure_wrapper`, `document_wrapper`, +`subfigure_wrapper`. The default values are stored in private variables +e.g. ``nx.nx_layout._DOCUMENT_WRAPPER`` + +References +---------- +TikZ: https://tikz.dev/ + +TikZ options details: https://tikz.dev/tikz-actions +""" +import numbers +import os + +import networkx as nx + +__all__ = [ + "to_latex_raw", + "to_latex", + "write_latex", +] + + +@nx.utils.not_implemented_for("multigraph") +def to_latex_raw( + G, + pos="pos", + tikz_options="", + default_node_options="", + node_options="node_options", + node_label="label", + default_edge_options="", + edge_options="edge_options", + edge_label="label", + edge_label_options="edge_label_options", +): + """Return a string of the LaTeX/TikZ code to draw `G` + + This function produces just the code for the tikzpicture + without any enclosing environment. + + Parameters + ========== + G : NetworkX graph + The NetworkX graph to be drawn + pos : string or dict (default "pos") + The name of the node attribute on `G` that holds the position of each node. + Positions can be sequences of length 2 with numbers for (x,y) coordinates. + They can also be strings to denote positions in TikZ style, such as (x, y) + or (angle:radius). + If a dict, it should be keyed by node to a position. + If an empty dict, a circular layout is computed by TikZ. + tikz_options : string + The tikzpicture options description defining the options for the picture. + Often large scale options like `[scale=2]`. + default_node_options : string + The draw options for a path of nodes. Individual node options override these. + node_options : string or dict + The name of the node attribute on `G` that holds the options for each node. + Or a dict keyed by node to a string holding the options for that node. + node_label : string or dict + The name of the node attribute on `G` that holds the node label (text) + displayed for each node. If the attribute is "" or not present, the node + itself is drawn as a string. LaTeX processing such as ``"$A_1$"`` is allowed. + Or a dict keyed by node to a string holding the label for that node. + default_edge_options : string + The options for the scope drawing all edges. The default is "[-]" for + undirected graphs and "[->]" for directed graphs. + edge_options : string or dict + The name of the edge attribute on `G` that holds the options for each edge. + If the edge is a self-loop and ``"loop" not in edge_options`` the option + "loop," is added to the options for the self-loop edge. Hence you can + use "[loop above]" explicitly, but the default is "[loop]". + Or a dict keyed by edge to a string holding the options for that edge. + edge_label : string or dict + The name of the edge attribute on `G` that holds the edge label (text) + displayed for each edge. If the attribute is "" or not present, no edge + label is drawn. + Or a dict keyed by edge to a string holding the label for that edge. + edge_label_options : string or dict + The name of the edge attribute on `G` that holds the label options for + each edge. For example, "[sloped,above,blue]". The default is no options. + Or a dict keyed by edge to a string holding the label options for that edge. + + Returns + ======= + latex_code : string + The text string which draws the desired graph(s) when compiled by LaTeX. + + See Also + ======== + to_latex + write_latex + """ + i4 = "\n " + i8 = "\n " + + # set up position dict + # TODO allow pos to be None and use a nice TikZ default + if not isinstance(pos, dict): + pos = nx.get_node_attributes(G, pos) + if not pos: + # circular layout with radius 2 + pos = {n: f"({round(360.0 * i / len(G), 3)}:2)" for i, n in enumerate(G)} + for node in G: + if node not in pos: + raise nx.NetworkXError(f"node {node} has no specified pos {pos}") + posnode = pos[node] + if not isinstance(posnode, str): + try: + posx, posy = posnode + pos[node] = f"({round(posx, 3)}, {round(posy, 3)})" + except (TypeError, ValueError): + msg = f"position pos[{node}] is not 2-tuple or a string: {posnode}" + raise nx.NetworkXError(msg) + + # set up all the dicts + if not isinstance(node_options, dict): + node_options = nx.get_node_attributes(G, node_options) + if not isinstance(node_label, dict): + node_label = nx.get_node_attributes(G, node_label) + if not isinstance(edge_options, dict): + edge_options = nx.get_edge_attributes(G, edge_options) + if not isinstance(edge_label, dict): + edge_label = nx.get_edge_attributes(G, edge_label) + if not isinstance(edge_label_options, dict): + edge_label_options = nx.get_edge_attributes(G, edge_label_options) + + # process default options (add brackets or not) + topts = "" if tikz_options == "" else f"[{tikz_options.strip('[]')}]" + defn = "" if default_node_options == "" else f"[{default_node_options.strip('[]')}]" + linestyle = f"{'->' if G.is_directed() else '-'}" + if default_edge_options == "": + defe = "[" + linestyle + "]" + elif "-" in default_edge_options: + defe = default_edge_options + else: + defe = f"[{linestyle},{default_edge_options.strip('[]')}]" + + # Construct the string line by line + result = " \\begin{tikzpicture}" + topts + result += i4 + " \\draw" + defn + # load the nodes + for n in G: + # node options goes inside square brackets + nopts = f"[{node_options[n].strip('[]')}]" if n in node_options else "" + # node text goes inside curly brackets {} + ntext = f"{{{node_label[n]}}}" if n in node_label else f"{{{n}}}" + + result += i8 + f"{pos[n]} node{nopts} ({n}){ntext}" + result += ";\n" + + # load the edges + result += " \\begin{scope}" + defe + for edge in G.edges: + u, v = edge[:2] + e_opts = f"{edge_options[edge]}".strip("[]") if edge in edge_options else "" + # add loop options for selfloops if not present + if u == v and "loop" not in e_opts: + e_opts = "loop," + e_opts + e_opts = f"[{e_opts}]" if e_opts != "" else "" + # TODO -- handle bending of multiedges + + els = edge_label_options[edge] if edge in edge_label_options else "" + # edge label options goes inside square brackets [] + els = f"[{els.strip('[]')}]" + # edge text is drawn using the TikZ node command inside curly brackets {} + e_label = f" node{els} {{{edge_label[edge]}}}" if edge in edge_label else "" + + result += i8 + f"\\draw{e_opts} ({u}) to{e_label} ({v});" + + result += "\n \\end{scope}\n \\end{tikzpicture}\n" + return result + + +_DOC_WRAPPER_TIKZ = r"""\documentclass{{report}} +\usepackage{{tikz}} +\usepackage{{subcaption}} + +\begin{{document}} +{content} +\end{{document}}""" + + +_FIG_WRAPPER = r"""\begin{{figure}} +{content}{caption}{label} +\end{{figure}}""" + + +_SUBFIG_WRAPPER = r""" \begin{{subfigure}}{{{size}\textwidth}} +{content}{caption}{label} + \end{{subfigure}}""" + + +def to_latex( + Gbunch, + pos="pos", + tikz_options="", + default_node_options="", + node_options="node_options", + node_label="node_label", + default_edge_options="", + edge_options="edge_options", + edge_label="edge_label", + edge_label_options="edge_label_options", + caption="", + latex_label="", + sub_captions=None, + sub_labels=None, + n_rows=1, + as_document=True, + document_wrapper=_DOC_WRAPPER_TIKZ, + figure_wrapper=_FIG_WRAPPER, + subfigure_wrapper=_SUBFIG_WRAPPER, +): + """Return latex code to draw the graph(s) in `Gbunch` + + The TikZ drawing utility in LaTeX is used to draw the graph(s). + If `Gbunch` is a graph, it is drawn in a figure environment. + If `Gbunch` is an iterable of graphs, each is drawn in a subfigure environment + within a single figure environment. + + If `as_document` is True, the figure is wrapped inside a document environment + so that the resulting string is ready to be compiled by LaTeX. Otherwise, + the string is ready for inclusion in a larger tex document using ``\\include`` + or ``\\input`` statements. + + Parameters + ========== + Gbunch : NetworkX graph or iterable of NetworkX graphs + The NetworkX graph to be drawn or an iterable of graphs + to be drawn inside subfigures of a single figure. + pos : string or list of strings + The name of the node attribute on `G` that holds the position of each node. + Positions can be sequences of length 2 with numbers for (x,y) coordinates. + They can also be strings to denote positions in TikZ style, such as (x, y) + or (angle:radius). + If a dict, it should be keyed by node to a position. + If an empty dict, a circular layout is computed by TikZ. + If you are drawing many graphs in subfigures, use a list of position dicts. + tikz_options : string + The tikzpicture options description defining the options for the picture. + Often large scale options like `[scale=2]`. + default_node_options : string + The draw options for a path of nodes. Individual node options override these. + node_options : string or dict + The name of the node attribute on `G` that holds the options for each node. + Or a dict keyed by node to a string holding the options for that node. + node_label : string or dict + The name of the node attribute on `G` that holds the node label (text) + displayed for each node. If the attribute is "" or not present, the node + itself is drawn as a string. LaTeX processing such as ``"$A_1$"`` is allowed. + Or a dict keyed by node to a string holding the label for that node. + default_edge_options : string + The options for the scope drawing all edges. The default is "[-]" for + undirected graphs and "[->]" for directed graphs. + edge_options : string or dict + The name of the edge attribute on `G` that holds the options for each edge. + If the edge is a self-loop and ``"loop" not in edge_options`` the option + "loop," is added to the options for the self-loop edge. Hence you can + use "[loop above]" explicitly, but the default is "[loop]". + Or a dict keyed by edge to a string holding the options for that edge. + edge_label : string or dict + The name of the edge attribute on `G` that holds the edge label (text) + displayed for each edge. If the attribute is "" or not present, no edge + label is drawn. + Or a dict keyed by edge to a string holding the label for that edge. + edge_label_options : string or dict + The name of the edge attribute on `G` that holds the label options for + each edge. For example, "[sloped,above,blue]". The default is no options. + Or a dict keyed by edge to a string holding the label options for that edge. + caption : string + The caption string for the figure environment + latex_label : string + The latex label used for the figure for easy referral from the main text + sub_captions : list of strings + The sub_caption string for each subfigure in the figure + sub_latex_labels : list of strings + The latex label for each subfigure in the figure + n_rows : int + The number of rows of subfigures to arrange for multiple graphs + as_document : bool + Whether to wrap the latex code in a document environment for compiling + document_wrapper : formatted text string with variable ``content``. + This text is called to evaluate the content embedded in a document + environment with a preamble setting up TikZ. + figure_wrapper : formatted text string + This text is evaluated with variables ``content``, ``caption`` and ``label``. + It wraps the content and if a caption is provided, adds the latex code for + that caption, and if a label is provided, adds the latex code for a label. + subfigure_wrapper : formatted text string + This text evaluate variables ``size``, ``content``, ``caption`` and ``label``. + It wraps the content and if a caption is provided, adds the latex code for + that caption, and if a label is provided, adds the latex code for a label. + The size is the vertical size of each row of subfigures as a fraction. + + Returns + ======= + latex_code : string + The text string which draws the desired graph(s) when compiled by LaTeX. + + See Also + ======== + write_latex + to_latex_raw + """ + if hasattr(Gbunch, "adj"): + raw = to_latex_raw( + Gbunch, + pos, + tikz_options, + default_node_options, + node_options, + node_label, + default_edge_options, + edge_options, + edge_label, + edge_label_options, + ) + else: # iterator of graphs + sbf = subfigure_wrapper + size = 1 / n_rows + + N = len(Gbunch) + if isinstance(pos, (str, dict)): + pos = [pos] * N + if sub_captions is None: + sub_captions = [""] * N + if sub_labels is None: + sub_labels = [""] * N + if not (len(Gbunch) == len(pos) == len(sub_captions) == len(sub_labels)): + raise nx.NetworkXError( + "length of Gbunch, sub_captions and sub_figures must agree" + ) + + raw = "" + for G, pos, subcap, sublbl in zip(Gbunch, pos, sub_captions, sub_labels): + subraw = to_latex_raw( + G, + pos, + tikz_options, + default_node_options, + node_options, + node_label, + default_edge_options, + edge_options, + edge_label, + edge_label_options, + ) + cap = f" \\caption{{{subcap}}}" if subcap else "" + lbl = f"\\label{{{sublbl}}}" if sublbl else "" + raw += sbf.format(size=size, content=subraw, caption=cap, label=lbl) + raw += "\n" + + # put raw latex code into a figure environment and optionally into a document + raw = raw[:-1] + cap = f"\n \\caption{{{caption}}}" if caption else "" + lbl = f"\\label{{{latex_label}}}" if latex_label else "" + fig = figure_wrapper.format(content=raw, caption=cap, label=lbl) + if as_document: + return document_wrapper.format(content=fig) + return fig + + +@nx.utils.open_file(1, mode="w") +def write_latex(Gbunch, path, **options): + """Write the latex code to draw the graph(s) onto `path`. + + This convenience function creates the latex drawing code as a string + and writes that to a file ready to be compiled when `as_document` is True + or ready to be ``import`` ed or ``include`` ed into your main LaTeX document. + + The `path` argument can be a string filename or a file handle to write to. + + Parameters + ---------- + Gbunch : NetworkX graph or iterable of NetworkX graphs + If Gbunch is a graph, it is drawn in a figure environment. + If Gbunch is an iterable of graphs, each is drawn in a subfigure + environment within a single figure environment. + path : filename + Filename or file handle to write to + options : dict + By default, TikZ is used with options: (others are ignored):: + + pos : string or dict or list + The name of the node attribute on `G` that holds the position of each node. + Positions can be sequences of length 2 with numbers for (x,y) coordinates. + They can also be strings to denote positions in TikZ style, such as (x, y) + or (angle:radius). + If a dict, it should be keyed by node to a position. + If an empty dict, a circular layout is computed by TikZ. + If you are drawing many graphs in subfigures, use a list of position dicts. + tikz_options : string + The tikzpicture options description defining the options for the picture. + Often large scale options like `[scale=2]`. + default_node_options : string + The draw options for a path of nodes. Individual node options override these. + node_options : string or dict + The name of the node attribute on `G` that holds the options for each node. + Or a dict keyed by node to a string holding the options for that node. + node_label : string or dict + The name of the node attribute on `G` that holds the node label (text) + displayed for each node. If the attribute is "" or not present, the node + itself is drawn as a string. LaTeX processing such as ``"$A_1$"`` is allowed. + Or a dict keyed by node to a string holding the label for that node. + default_edge_options : string + The options for the scope drawing all edges. The default is "[-]" for + undirected graphs and "[->]" for directed graphs. + edge_options : string or dict + The name of the edge attribute on `G` that holds the options for each edge. + If the edge is a self-loop and ``"loop" not in edge_options`` the option + "loop," is added to the options for the self-loop edge. Hence you can + use "[loop above]" explicitly, but the default is "[loop]". + Or a dict keyed by edge to a string holding the options for that edge. + edge_label : string or dict + The name of the edge attribute on `G` that holds the edge label (text) + displayed for each edge. If the attribute is "" or not present, no edge + label is drawn. + Or a dict keyed by edge to a string holding the label for that edge. + edge_label_options : string or dict + The name of the edge attribute on `G` that holds the label options for + each edge. For example, "[sloped,above,blue]". The default is no options. + Or a dict keyed by edge to a string holding the label options for that edge. + caption : string + The caption string for the figure environment + latex_label : string + The latex label used for the figure for easy referral from the main text + sub_captions : list of strings + The sub_caption string for each subfigure in the figure + sub_latex_labels : list of strings + The latex label for each subfigure in the figure + n_rows : int + The number of rows of subfigures to arrange for multiple graphs + as_document : bool + Whether to wrap the latex code in a document environment for compiling + document_wrapper : formatted text string with variable ``content``. + This text is called to evaluate the content embedded in a document + environment with a preamble setting up the TikZ syntax. + figure_wrapper : formatted text string + This text is evaluated with variables ``content``, ``caption`` and ``label``. + It wraps the content and if a caption is provided, adds the latex code for + that caption, and if a label is provided, adds the latex code for a label. + subfigure_wrapper : formatted text string + This text evaluate variables ``size``, ``content``, ``caption`` and ``label``. + It wraps the content and if a caption is provided, adds the latex code for + that caption, and if a label is provided, adds the latex code for a label. + The size is the vertical size of each row of subfigures as a fraction. + + See Also + ======== + to_latex + """ + path.write(to_latex(Gbunch, **options)) diff --git a/MLPY/Lib/site-packages/networkx/drawing/nx_pydot.py b/MLPY/Lib/site-packages/networkx/drawing/nx_pydot.py new file mode 100644 index 0000000000000000000000000000000000000000..1cd17818373fff6fb4120c5a3f9d39046d3df441 --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/drawing/nx_pydot.py @@ -0,0 +1,454 @@ +""" +***** +Pydot +***** + +Import and export NetworkX graphs in Graphviz dot format using pydot. + +Either this module or nx_agraph can be used to interface with graphviz. + +Examples +-------- +>>> G = nx.complete_graph(5) +>>> PG = nx.nx_pydot.to_pydot(G) +>>> H = nx.nx_pydot.from_pydot(PG) + +See Also +-------- + - pydot: https://github.com/erocarrera/pydot + - Graphviz: https://www.graphviz.org + - DOT Language: http://www.graphviz.org/doc/info/lang.html +""" +import warnings +from locale import getpreferredencoding + +import networkx as nx +from networkx.utils import open_file + +__all__ = [ + "write_dot", + "read_dot", + "graphviz_layout", + "pydot_layout", + "to_pydot", + "from_pydot", +] + + +@open_file(1, mode="w") +def write_dot(G, path): + """Write NetworkX graph G to Graphviz dot format on path. + + Path can be a string or a file handle. + """ + msg = ( + "nx.nx_pydot.write_dot depends on the pydot package, which has " + "known issues and is not actively maintained. Consider using " + "nx.nx_agraph.write_dot instead.\n\n" + "See https://github.com/networkx/networkx/issues/5723" + ) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + P = to_pydot(G) + path.write(P.to_string()) + return + + +@open_file(0, mode="r") +@nx._dispatch(name="pydot_read_dot", graphs=None) +def read_dot(path): + """Returns a NetworkX :class:`MultiGraph` or :class:`MultiDiGraph` from the + dot file with the passed path. + + If this file contains multiple graphs, only the first such graph is + returned. All graphs _except_ the first are silently ignored. + + Parameters + ---------- + path : str or file + Filename or file handle. + + Returns + ------- + G : MultiGraph or MultiDiGraph + A :class:`MultiGraph` or :class:`MultiDiGraph`. + + Notes + ----- + Use `G = nx.Graph(nx.nx_pydot.read_dot(path))` to return a :class:`Graph` instead of a + :class:`MultiGraph`. + """ + import pydot + + msg = ( + "nx.nx_pydot.read_dot depends on the pydot package, which has " + "known issues and is not actively maintained. Consider using " + "nx.nx_agraph.read_dot instead.\n\n" + "See https://github.com/networkx/networkx/issues/5723" + ) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + + data = path.read() + + # List of one or more "pydot.Dot" instances deserialized from this file. + P_list = pydot.graph_from_dot_data(data) + + # Convert only the first such instance into a NetworkX graph. + return from_pydot(P_list[0]) + + +@nx._dispatch(graphs=None) +def from_pydot(P): + """Returns a NetworkX graph from a Pydot graph. + + Parameters + ---------- + P : Pydot graph + A graph created with Pydot + + Returns + ------- + G : NetworkX multigraph + A MultiGraph or MultiDiGraph. + + Examples + -------- + >>> K5 = nx.complete_graph(5) + >>> A = nx.nx_pydot.to_pydot(K5) + >>> G = nx.nx_pydot.from_pydot(A) # return MultiGraph + + # make a Graph instead of MultiGraph + >>> G = nx.Graph(nx.nx_pydot.from_pydot(A)) + + """ + msg = ( + "nx.nx_pydot.from_pydot depends on the pydot package, which has " + "known issues and is not actively maintained.\n\n" + "See https://github.com/networkx/networkx/issues/5723" + ) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + + if P.get_strict(None): # pydot bug: get_strict() shouldn't take argument + multiedges = False + else: + multiedges = True + + if P.get_type() == "graph": # undirected + if multiedges: + N = nx.MultiGraph() + else: + N = nx.Graph() + else: + if multiedges: + N = nx.MultiDiGraph() + else: + N = nx.DiGraph() + + # assign defaults + name = P.get_name().strip('"') + if name != "": + N.name = name + + # add nodes, attributes to N.node_attr + for p in P.get_node_list(): + n = p.get_name().strip('"') + if n in ("node", "graph", "edge"): + continue + N.add_node(n, **p.get_attributes()) + + # add edges + for e in P.get_edge_list(): + u = e.get_source() + v = e.get_destination() + attr = e.get_attributes() + s = [] + d = [] + + if isinstance(u, str): + s.append(u.strip('"')) + else: + for unodes in u["nodes"]: + s.append(unodes.strip('"')) + + if isinstance(v, str): + d.append(v.strip('"')) + else: + for vnodes in v["nodes"]: + d.append(vnodes.strip('"')) + + for source_node in s: + for destination_node in d: + N.add_edge(source_node, destination_node, **attr) + + # add default attributes for graph, nodes, edges + pattr = P.get_attributes() + if pattr: + N.graph["graph"] = pattr + try: + N.graph["node"] = P.get_node_defaults()[0] + except (IndexError, TypeError): + pass # N.graph['node']={} + try: + N.graph["edge"] = P.get_edge_defaults()[0] + except (IndexError, TypeError): + pass # N.graph['edge']={} + return N + + +def _check_colon_quotes(s): + # A quick helper function to check if a string has a colon in it + # and if it is quoted properly with double quotes. + # refer https://github.com/pydot/pydot/issues/258 + return ":" in s and (s[0] != '"' or s[-1] != '"') + + +def to_pydot(N): + """Returns a pydot graph from a NetworkX graph N. + + Parameters + ---------- + N : NetworkX graph + A graph created with NetworkX + + Examples + -------- + >>> K5 = nx.complete_graph(5) + >>> P = nx.nx_pydot.to_pydot(K5) + + Notes + ----- + + """ + import pydot + + msg = ( + "nx.nx_pydot.to_pydot depends on the pydot package, which has " + "known issues and is not actively maintained.\n\n" + "See https://github.com/networkx/networkx/issues/5723" + ) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + + # set Graphviz graph type + if N.is_directed(): + graph_type = "digraph" + else: + graph_type = "graph" + strict = nx.number_of_selfloops(N) == 0 and not N.is_multigraph() + + name = N.name + graph_defaults = N.graph.get("graph", {}) + if name == "": + P = pydot.Dot("", graph_type=graph_type, strict=strict, **graph_defaults) + else: + P = pydot.Dot( + f'"{name}"', graph_type=graph_type, strict=strict, **graph_defaults + ) + try: + P.set_node_defaults(**N.graph["node"]) + except KeyError: + pass + try: + P.set_edge_defaults(**N.graph["edge"]) + except KeyError: + pass + + for n, nodedata in N.nodes(data=True): + str_nodedata = {str(k): str(v) for k, v in nodedata.items()} + # Explicitly catch nodes with ":" in node names or nodedata. + n = str(n) + raise_error = _check_colon_quotes(n) or ( + any( + (_check_colon_quotes(k) or _check_colon_quotes(v)) + for k, v in str_nodedata.items() + ) + ) + if raise_error: + raise ValueError( + f'Node names and attributes should not contain ":" unless they are quoted with "".\ + For example the string \'attribute:data1\' should be written as \'"attribute:data1"\'.\ + Please refer https://github.com/pydot/pydot/issues/258' + ) + p = pydot.Node(n, **str_nodedata) + P.add_node(p) + + if N.is_multigraph(): + for u, v, key, edgedata in N.edges(data=True, keys=True): + str_edgedata = {str(k): str(v) for k, v in edgedata.items() if k != "key"} + u, v = str(u), str(v) + raise_error = ( + _check_colon_quotes(u) + or _check_colon_quotes(v) + or ( + any( + (_check_colon_quotes(k) or _check_colon_quotes(val)) + for k, val in str_edgedata.items() + ) + ) + ) + if raise_error: + raise ValueError( + f'Node names and attributes should not contain ":" unless they are quoted with "".\ + For example the string \'attribute:data1\' should be written as \'"attribute:data1"\'.\ + Please refer https://github.com/pydot/pydot/issues/258' + ) + edge = pydot.Edge(u, v, key=str(key), **str_edgedata) + P.add_edge(edge) + + else: + for u, v, edgedata in N.edges(data=True): + str_edgedata = {str(k): str(v) for k, v in edgedata.items()} + u, v = str(u), str(v) + raise_error = ( + _check_colon_quotes(u) + or _check_colon_quotes(v) + or ( + any( + (_check_colon_quotes(k) or _check_colon_quotes(val)) + for k, val in str_edgedata.items() + ) + ) + ) + if raise_error: + raise ValueError( + f'Node names and attributes should not contain ":" unless they are quoted with "".\ + For example the string \'attribute:data1\' should be written as \'"attribute:data1"\'.\ + Please refer https://github.com/pydot/pydot/issues/258' + ) + edge = pydot.Edge(u, v, **str_edgedata) + P.add_edge(edge) + return P + + +def graphviz_layout(G, prog="neato", root=None): + """Create node positions using Pydot and Graphviz. + + Returns a dictionary of positions keyed by node. + + Parameters + ---------- + G : NetworkX Graph + The graph for which the layout is computed. + prog : string (default: 'neato') + The name of the GraphViz program to use for layout. + Options depend on GraphViz version but may include: + 'dot', 'twopi', 'fdp', 'sfdp', 'circo' + root : Node from G or None (default: None) + The node of G from which to start some layout algorithms. + + Returns + ------- + Dictionary of (x, y) positions keyed by node. + + Examples + -------- + >>> G = nx.complete_graph(4) + >>> pos = nx.nx_pydot.graphviz_layout(G) + >>> pos = nx.nx_pydot.graphviz_layout(G, prog="dot") + + Notes + ----- + This is a wrapper for pydot_layout. + """ + msg = ( + "nx.nx_pydot.graphviz_layout depends on the pydot package, which has " + "known issues and is not actively maintained. Consider using " + "nx.nx_agraph.graphviz_layout instead.\n\n" + "See https://github.com/networkx/networkx/issues/5723" + ) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + + return pydot_layout(G=G, prog=prog, root=root) + + +def pydot_layout(G, prog="neato", root=None): + """Create node positions using :mod:`pydot` and Graphviz. + + Parameters + ---------- + G : Graph + NetworkX graph to be laid out. + prog : string (default: 'neato') + Name of the GraphViz command to use for layout. + Options depend on GraphViz version but may include: + 'dot', 'twopi', 'fdp', 'sfdp', 'circo' + root : Node from G or None (default: None) + The node of G from which to start some layout algorithms. + + Returns + ------- + dict + Dictionary of positions keyed by node. + + Examples + -------- + >>> G = nx.complete_graph(4) + >>> pos = nx.nx_pydot.pydot_layout(G) + >>> pos = nx.nx_pydot.pydot_layout(G, prog="dot") + + Notes + ----- + If you use complex node objects, they may have the same string + representation and GraphViz could treat them as the same node. + The layout may assign both nodes a single location. See Issue #1568 + If this occurs in your case, consider relabeling the nodes just + for the layout computation using something similar to:: + + H = nx.convert_node_labels_to_integers(G, label_attribute='node_label') + H_layout = nx.nx_pydot.pydot_layout(G, prog='dot') + G_layout = {H.nodes[n]['node_label']: p for n, p in H_layout.items()} + + """ + import pydot + + msg = ( + "nx.nx_pydot.pydot_layout depends on the pydot package, which has " + "known issues and is not actively maintained.\n\n" + "See https://github.com/networkx/networkx/issues/5723" + ) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + P = to_pydot(G) + if root is not None: + P.set("root", str(root)) + + # List of low-level bytes comprising a string in the dot language converted + # from the passed graph with the passed external GraphViz command. + D_bytes = P.create_dot(prog=prog) + + # Unique string decoded from these bytes with the preferred locale encoding + D = str(D_bytes, encoding=getpreferredencoding()) + + if D == "": # no data returned + print(f"Graphviz layout with {prog} failed") + print() + print("To debug what happened try:") + print("P = nx.nx_pydot.to_pydot(G)") + print('P.write_dot("file.dot")') + print(f"And then run {prog} on file.dot") + return + + # List of one or more "pydot.Dot" instances deserialized from this string. + Q_list = pydot.graph_from_dot_data(D) + assert len(Q_list) == 1 + + # The first and only such instance, as guaranteed by the above assertion. + Q = Q_list[0] + + node_pos = {} + for n in G.nodes(): + str_n = str(n) + # Explicitly catch nodes with ":" in node names or nodedata. + if _check_colon_quotes(str_n): + raise ValueError( + f'Node names and node attributes should not contain ":" unless they are quoted with "".\ + For example the string \'attribute:data1\' should be written as \'"attribute:data1"\'.\ + Please refer https://github.com/pydot/pydot/issues/258' + ) + pydot_node = pydot.Node(str_n).get_name() + node = Q.get_node(pydot_node) + + if isinstance(node, list): + node = node[0] + pos = node.get_pos()[1:-1] # strip leading and trailing double quotes + if pos is not None: + xx, yy = pos.split(",") + node_pos[n] = (float(xx), float(yy)) + return node_pos diff --git a/MLPY/Lib/site-packages/networkx/drawing/nx_pylab.py b/MLPY/Lib/site-packages/networkx/drawing/nx_pylab.py new file mode 100644 index 0000000000000000000000000000000000000000..096e7b01d052adcd9bbfb64b23ad9abbcf43deef --- /dev/null +++ b/MLPY/Lib/site-packages/networkx/drawing/nx_pylab.py @@ -0,0 +1,1594 @@ +""" +********** +Matplotlib +********** + +Draw networks with matplotlib. + +Examples +-------- +>>> G = nx.complete_graph(5) +>>> nx.draw(G) + +See Also +-------- + - :doc:`matplotlib ` + - :func:`matplotlib.pyplot.scatter` + - :obj:`matplotlib.patches.FancyArrowPatch` +""" +from numbers import Number + +import networkx as nx +from networkx.drawing.layout import ( + circular_layout, + kamada_kawai_layout, + planar_layout, + random_layout, + shell_layout, + spectral_layout, + spring_layout, +) + +__all__ = [ + "draw", + "draw_networkx", + "draw_networkx_nodes", + "draw_networkx_edges", + "draw_networkx_labels", + "draw_networkx_edge_labels", + "draw_circular", + "draw_kamada_kawai", + "draw_random", + "draw_spectral", + "draw_spring", + "draw_planar", + "draw_shell", +] + + +def draw(G, pos=None, ax=None, **kwds): + """Draw the graph G with Matplotlib. + + Draw the graph as a simple representation with no node + labels or edge labels and using the full Matplotlib figure area + and no axis labels by default. See draw_networkx() for more + full-featured drawing that allows title, axis labels etc. + + Parameters + ---------- + G : graph + A networkx graph + + pos : dictionary, optional + A dictionary with nodes as keys and positions as values. + If not specified a spring layout positioning will be computed. + See :py:mod:`networkx.drawing.layout` for functions that + compute node positions. + + ax : Matplotlib Axes object, optional + Draw the graph in specified Matplotlib axes. + + kwds : optional keywords + See networkx.draw_networkx() for a description of optional keywords. + + Examples + -------- + >>> G = nx.dodecahedral_graph() + >>> nx.draw(G) + >>> nx.draw(G, pos=nx.spring_layout(G)) # use spring layout + + See Also + -------- + draw_networkx + draw_networkx_nodes + draw_networkx_edges + draw_networkx_labels + draw_networkx_edge_labels + + Notes + ----- + This function has the same name as pylab.draw and pyplot.draw + so beware when using `from networkx import *` + + since you might overwrite the pylab.draw function. + + With pyplot use + + >>> import matplotlib.pyplot as plt + >>> G = nx.dodecahedral_graph() + >>> nx.draw(G) # networkx draw() + >>> plt.draw() # pyplot draw() + + Also see the NetworkX drawing examples at + https://networkx.org/documentation/latest/auto_examples/index.html + """ + import matplotlib.pyplot as plt + + if ax is None: + cf = plt.gcf() + else: + cf = ax.get_figure() + cf.set_facecolor("w") + if ax is None: + if cf.axes: + ax = cf.gca() + else: + ax = cf.add_axes((0, 0, 1, 1)) + + if "with_labels" not in kwds: + kwds["with_labels"] = "labels" in kwds + + draw_networkx(G, pos=pos, ax=ax, **kwds) + ax.set_axis_off() + plt.draw_if_interactive() + return + + +def draw_networkx(G, pos=None, arrows=None, with_labels=True, **kwds): + r"""Draw the graph G using Matplotlib. + + Draw the graph with Matplotlib with options for node positions, + labeling, titles, and many other drawing features. + See draw() for simple drawing without labels or axes. + + Parameters + ---------- + G : graph + A networkx graph + + pos : dictionary, optional + A dictionary with nodes as keys and positions as values. + If not specified a spring layout positioning will be computed. + See :py:mod:`networkx.drawing.layout` for functions that + compute node positions. + + arrows : bool or None, optional (default=None) + If `None`, directed graphs draw arrowheads with + `~matplotlib.patches.FancyArrowPatch`, while undirected graphs draw edges + via `~matplotlib.collections.LineCollection` for speed. + If `True`, draw arrowheads with FancyArrowPatches (bendable and stylish). + If `False`, draw edges using LineCollection (linear and fast). + For directed graphs, if True draw arrowheads. + Note: Arrows will be the same color as edges. + + arrowstyle : str (default='-\|>' for directed graphs) + For directed graphs, choose the style of the arrowsheads. + For undirected graphs default to '-' + + See `matplotlib.patches.ArrowStyle` for more options. + + arrowsize : int or list (default=10) + For directed graphs, choose the size of the arrow head's length and + width. A list of values can be passed in to assign a different size for arrow head's length and width. + See `matplotlib.patches.FancyArrowPatch` for attribute `mutation_scale` + for more info. + + with_labels : bool (default=True) + Set to True to draw labels on the nodes. + + ax : Matplotlib Axes object, optional + Draw the graph in the specified Matplotlib axes. + + nodelist : list (default=list(G)) + Draw only specified nodes + + edgelist : list (default=list(G.edges())) + Draw only specified edges + + node_size : scalar or array (default=300) + Size of nodes. If an array is specified it must be the + same length as nodelist. + + node_color : color or array of colors (default='#1f78b4') + Node color. Can be a single color or a sequence of colors with the same + length as nodelist. Color can be string or rgb (or rgba) tuple of + floats from 0-1. If numeric values are specified they will be + mapped to colors using the cmap and vmin,vmax parameters. See + matplotlib.scatter for more details. + + node_shape : string (default='o') + The shape of the node. Specification is as matplotlib.scatter + marker, one of 'so^>v>> G = nx.dodecahedral_graph() + >>> nx.draw(G) + >>> nx.draw(G, pos=nx.spring_layout(G)) # use spring layout + + >>> import matplotlib.pyplot as plt + >>> limits = plt.axis("off") # turn off axis + + Also see the NetworkX drawing examples at + https://networkx.org/documentation/latest/auto_examples/index.html + + See Also + -------- + draw + draw_networkx_nodes + draw_networkx_edges + draw_networkx_labels + draw_networkx_edge_labels + """ + from inspect import signature + + import matplotlib.pyplot as plt + + # Get all valid keywords by inspecting the signatures of draw_networkx_nodes, + # draw_networkx_edges, draw_networkx_labels + + valid_node_kwds = signature(draw_networkx_nodes).parameters.keys() + valid_edge_kwds = signature(draw_networkx_edges).parameters.keys() + valid_label_kwds = signature(draw_networkx_labels).parameters.keys() + + # Create a set with all valid keywords across the three functions and + # remove the arguments of this function (draw_networkx) + valid_kwds = (valid_node_kwds | valid_edge_kwds | valid_label_kwds) - { + "G", + "pos", + "arrows", + "with_labels", + } + + if any(k not in valid_kwds for k in kwds): + invalid_args = ", ".join([k for k in kwds if k not in valid_kwds]) + raise ValueError(f"Received invalid argument(s): {invalid_args}") + + node_kwds = {k: v for k, v in kwds.items() if k in valid_node_kwds} + edge_kwds = {k: v for k, v in kwds.items() if k in valid_edge_kwds} + label_kwds = {k: v for k, v in kwds.items() if k in valid_label_kwds} + + if pos is None: + pos = nx.drawing.spring_layout(G) # default to spring layout + + draw_networkx_nodes(G, pos, **node_kwds) + draw_networkx_edges(G, pos, arrows=arrows, **edge_kwds) + if with_labels: + draw_networkx_labels(G, pos, **label_kwds) + plt.draw_if_interactive() + + +def draw_networkx_nodes( + G, + pos, + nodelist=None, + node_size=300, + node_color="#1f78b4", + node_shape="o", + alpha=None, + cmap=None, + vmin=None, + vmax=None, + ax=None, + linewidths=None, + edgecolors=None, + label=None, + margins=None, +): + """Draw the nodes of the graph G. + + This draws only the nodes of the graph G. + + Parameters + ---------- + G : graph + A networkx graph + + pos : dictionary + A dictionary with nodes as keys and positions as values. + Positions should be sequences of length 2. + + ax : Matplotlib Axes object, optional + Draw the graph in the specified Matplotlib axes. + + nodelist : list (default list(G)) + Draw only specified nodes + + node_size : scalar or array (default=300) + Size of nodes. If an array it must be the same length as nodelist. + + node_color : color or array of colors (default='#1f78b4') + Node color. Can be a single color or a sequence of colors with the same + length as nodelist. Color can be string or rgb (or rgba) tuple of + floats from 0-1. If numeric values are specified they will be + mapped to colors using the cmap and vmin,vmax parameters. See + matplotlib.scatter for more details. + + node_shape : string (default='o') + The shape of the node. Specification is as matplotlib.scatter + marker, one of 'so^>v>> G = nx.dodecahedral_graph() + >>> nodes = nx.draw_networkx_nodes(G, pos=nx.spring_layout(G)) + + Also see the NetworkX drawing examples at + https://networkx.org/documentation/latest/auto_examples/index.html + + See Also + -------- + draw + draw_networkx + draw_networkx_edges + draw_networkx_labels + draw_networkx_edge_labels + """ + from collections.abc import Iterable + + import matplotlib as mpl + import matplotlib.collections # call as mpl.collections + import matplotlib.pyplot as plt + import numpy as np + + if ax is None: + ax = plt.gca() + + if nodelist is None: + nodelist = list(G) + + if len(nodelist) == 0: # empty nodelist, no drawing + return mpl.collections.PathCollection(None) + + try: + xy = np.asarray([pos[v] for v in nodelist]) + except KeyError as err: + raise nx.NetworkXError(f"Node {err} has no position.") from err + + if isinstance(alpha, Iterable): + node_color = apply_alpha(node_color, alpha, nodelist, cmap, vmin, vmax) + alpha = None + + node_collection = ax.scatter( + xy[:, 0], + xy[:, 1], + s=node_size, + c=node_color, + marker=node_shape, + cmap=cmap, + vmin=vmin, + vmax=vmax, + alpha=alpha, + linewidths=linewidths, + edgecolors=edgecolors, + label=label, + ) + ax.tick_params( + axis="both", + which="both", + bottom=False, + left=False, + labelbottom=False, + labelleft=False, + ) + + if margins is not None: + if isinstance(margins, Iterable): + ax.margins(*margins) + else: + ax.margins(margins) + + node_collection.set_zorder(2) + return node_collection + + +def draw_networkx_edges( + G, + pos, + edgelist=None, + width=1.0, + edge_color="k", + style="solid", + alpha=None, + arrowstyle=None, + arrowsize=10, + edge_cmap=None, + edge_vmin=None, + edge_vmax=None, + ax=None, + arrows=None, + label=None, + node_size=300, + nodelist=None, + node_shape="o", + connectionstyle="arc3", + min_source_margin=0, + min_target_margin=0, +): + r"""Draw the edges of the graph G. + + This draws only the edges of the graph G. + + Parameters + ---------- + G : graph + A networkx graph + + pos : dictionary + A dictionary with nodes as keys and positions as values. + Positions should be sequences of length 2. + + edgelist : collection of edge tuples (default=G.edges()) + Draw only specified edges + + width : float or array of floats (default=1.0) + Line width of edges + + edge_color : color or array of colors (default='k') + Edge color. Can be a single color or a sequence of colors with the same + length as edgelist. Color can be string or rgb (or rgba) tuple of + floats from 0-1. If numeric values are specified they will be + mapped to colors using the edge_cmap and edge_vmin,edge_vmax parameters. + + style : string or array of strings (default='solid') + Edge line style e.g.: '-', '--', '-.', ':' + or words like 'solid' or 'dashed'. + Can be a single style or a sequence of styles with the same + length as the edge list. + If less styles than edges are given the styles will cycle. + If more styles than edges are given the styles will be used sequentially + and not be exhausted. + Also, `(offset, onoffseq)` tuples can be used as style instead of a strings. + (See `matplotlib.patches.FancyArrowPatch`: `linestyle`) + + alpha : float or array of floats (default=None) + The edge transparency. This can be a single alpha value, + in which case it will be applied to all specified edges. Otherwise, + if it is an array, the elements of alpha will be applied to the colors + in order (cycling through alpha multiple times if necessary). + + edge_cmap : Matplotlib colormap, optional + Colormap for mapping intensities of edges + + edge_vmin,edge_vmax : floats, optional + Minimum and maximum for edge colormap scaling + + ax : Matplotlib Axes object, optional + Draw the graph in the specified Matplotlib axes. + + arrows : bool or None, optional (default=None) + If `None`, directed graphs draw arrowheads with + `~matplotlib.patches.FancyArrowPatch`, while undirected graphs draw edges + via `~matplotlib.collections.LineCollection` for speed. + If `True`, draw arrowheads with FancyArrowPatches (bendable and stylish). + If `False`, draw edges using LineCollection (linear and fast). + + Note: Arrowheads will be the same color as edges. + + arrowstyle : str (default='-\|>' for directed graphs) + For directed graphs and `arrows==True` defaults to '-\|>', + For undirected graphs default to '-'. + + See `matplotlib.patches.ArrowStyle` for more options. + + arrowsize : int (default=10) + For directed graphs, choose the size of the arrow head's length and + width. See `matplotlib.patches.FancyArrowPatch` for attribute + `mutation_scale` for more info. + + connectionstyle : string (default="arc3") + Pass the connectionstyle parameter to create curved arc of rounding + radius rad. For example, connectionstyle='arc3,rad=0.2'. + See `matplotlib.patches.ConnectionStyle` and + `matplotlib.patches.FancyArrowPatch` for more info. + + node_size : scalar or array (default=300) + Size of nodes. Though the nodes are not drawn with this function, the + node size is used in determining edge positioning. + + nodelist : list, optional (default=G.nodes()) + This provides the node order for the `node_size` array (if it is an array). + + node_shape : string (default='o') + The marker used for nodes, used in determining edge positioning. + Specification is as a `matplotlib.markers` marker, e.g. one of 'so^>v>> G = nx.dodecahedral_graph() + >>> edges = nx.draw_networkx_edges(G, pos=nx.spring_layout(G)) + + >>> G = nx.DiGraph() + >>> G.add_edges_from([(1, 2), (1, 3), (2, 3)]) + >>> arcs = nx.draw_networkx_edges(G, pos=nx.spring_layout(G)) + >>> alphas = [0.3, 0.4, 0.5] + >>> for i, arc in enumerate(arcs): # change alpha values of arcs + ... arc.set_alpha(alphas[i]) + + The FancyArrowPatches corresponding to self-loops are not always + returned, but can always be accessed via the ``patches`` attribute of the + `matplotlib.Axes` object. + + >>> import matplotlib.pyplot as plt + >>> fig, ax = plt.subplots() + >>> G = nx.Graph([(0, 1), (0, 0)]) # Self-loop at node 0 + >>> edge_collection = nx.draw_networkx_edges(G, pos=nx.circular_layout(G), ax=ax) + >>> self_loop_fap = ax.patches[0] + + Also see the NetworkX drawing examples at + https://networkx.org/documentation/latest/auto_examples/index.html + + See Also + -------- + draw + draw_networkx + draw_networkx_nodes + draw_networkx_labels + draw_networkx_edge_labels + + """ + import matplotlib as mpl + import matplotlib.collections # call as mpl.collections + import matplotlib.colors # call as mpl.colors + import matplotlib.patches # call as mpl.patches + import matplotlib.path # call as mpl.path + import matplotlib.pyplot as plt + import numpy as np + + # The default behavior is to use LineCollection to draw edges for + # undirected graphs (for performance reasons) and use FancyArrowPatches + # for directed graphs. + # The `arrows` keyword can be used to override the default behavior + use_linecollection = not G.is_directed() + if arrows in (True, False): + use_linecollection = not arrows + + # Some kwargs only apply to FancyArrowPatches. Warn users when they use + # non-default values for these kwargs when LineCollection is being used + # instead of silently ignoring the specified option + if use_linecollection and any( + [ + arrowstyle is not None, + arrowsize != 10, + connectionstyle != "arc3", + min_source_margin != 0, + min_target_margin != 0, + ] + ): + import warnings + + msg = ( + "\n\nThe {0} keyword argument is not applicable when drawing edges\n" + "with LineCollection.\n\n" + "To make this warning go away, either specify `arrows=True` to\n" + "force FancyArrowPatches or use the default value for {0}.\n" + "Note that using FancyArrowPatches may be slow for large graphs.\n" + ) + if arrowstyle is not None: + msg = msg.format("arrowstyle") + if arrowsize != 10: + msg = msg.format("arrowsize") + if connectionstyle != "arc3": + msg = msg.format("connectionstyle") + if min_source_margin != 0: + msg = msg.format("min_source_margin") + if min_target_margin != 0: + msg = msg.format("min_target_margin") + warnings.warn(msg, category=UserWarning, stacklevel=2) + + if arrowstyle == None: + if G.is_directed(): + arrowstyle = "-|>" + else: + arrowstyle = "-" + + if ax is None: + ax = plt.gca() + + if edgelist is None: + edgelist = list(G.edges()) + + if len(edgelist) == 0: # no edges! + return [] + + if nodelist is None: + nodelist = list(G.nodes()) + + # FancyArrowPatch handles color=None different from LineCollection + if edge_color is None: + edge_color = "k" + edgelist_tuple = list(map(tuple, edgelist)) + + # set edge positions + edge_pos = np.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist]) + + # Check if edge_color is an array of floats and map to edge_cmap. + # This is the only case handled differently from matplotlib + if ( + np.iterable(edge_color) + and (len(edge_color) == len(edge_pos)) + and np.all([isinstance(c, Number) for c in edge_color]) + ): + if edge_cmap is not None: + assert isinstance(edge_cmap, mpl.colors.Colormap) + else: + edge_cmap = plt.get_cmap() + if edge_vmin is None: + edge_vmin = min(edge_color) + if edge_vmax is None: + edge_vmax = max(edge_color) + color_normal = mpl.colors.Normalize(vmin=edge_vmin, vmax=edge_vmax) + edge_color = [edge_cmap(color_normal(e)) for e in edge_color] + + def _draw_networkx_edges_line_collection(): + edge_collection = mpl.collections.LineCollection( + edge_pos, + colors=edge_color, + linewidths=width, + antialiaseds=(1,), + linestyle=style, + alpha=alpha, + ) + edge_collection.set_cmap(edge_cmap) + edge_collection.set_clim(edge_vmin, edge_vmax) + edge_collection.set_zorder(1) # edges go behind nodes + edge_collection.set_label(label) + ax.add_collection(edge_collection) + + return edge_collection + + def _draw_networkx_edges_fancy_arrow_patch(): + # Note: Waiting for someone to implement arrow to intersection with + # marker. Meanwhile, this works well for polygons with more than 4 + # sides and circle. + + def to_marker_edge(marker_size, marker): + if marker in "s^>v