repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
panodata/python_dwd
[ "a9ee1bdf21b8fc12f6b6b33628ca804e656f310d", "a9ee1bdf21b8fc12f6b6b33628ca804e656f310d" ]
[ "tests/dwd/observations/test_api_sites_geo.py", "example/radar/radar_scan_precip.py" ]
[ "from pathlib import Path\n\nimport pytest\nimport numpy as np\nfrom datetime import datetime\nfrom unittest.mock import patch, MagicMock\nimport pandas as pd\n\nfrom wetterdienst.dwd.metadata.column_map import METADATA_DTYPE_MAPPING\nfrom wetterdienst.util.geo import derive_nearest_neighbours\nfrom wetterdienst.util.geo import Coordinates\nfrom wetterdienst.dwd.observations import (\n DWDObservationSites,\n DWDObservationParameterSet,\n DWDObservationPeriod,\n DWDObservationResolution,\n)\nfrom wetterdienst.exceptions import InvalidParameterCombination\n\n\nHERE = Path(__file__).parent\nMETADATA_FILE = HERE / \"FIXED_METADATA.JSON\"\nMETADATA_DF = pd.read_json(METADATA_FILE)\nMETADATA_DF = METADATA_DF.astype(METADATA_DTYPE_MAPPING)\n\n\n@patch(\n \"wetterdienst.dwd.observations.stations.metadata_for_climate_observations\",\n MagicMock(return_value=METADATA_DF),\n)\ndef test_dwd_observation_sites_nearby_number_success():\n\n # Test for one nearest station\n sites = DWDObservationSites(\n DWDObservationParameterSet.TEMPERATURE_AIR,\n DWDObservationResolution.HOURLY,\n DWDObservationPeriod.RECENT,\n datetime(2020, 1, 1),\n datetime(2020, 1, 20),\n )\n\n nearby_station = sites.nearby_number(\n 50.0,\n 8.9,\n 1,\n )\n nearby_station = nearby_station.drop(\"TO_DATE\", axis=\"columns\")\n nearby_station.STATION_ID = nearby_station.STATION_ID.astype(np.int64)\n\n pd.testing.assert_frame_equal(\n nearby_station,\n pd.DataFrame(\n [\n [\n np.int64(4411),\n np.datetime64(\"2002-01-24\"),\n 155.0,\n 49.9195,\n 8.9671,\n \"Schaafheim-Schlierbach\",\n \"Hessen\",\n 11.65302672,\n ]\n ],\n columns=[\n \"STATION_ID\",\n \"FROM_DATE\",\n \"STATION_HEIGHT\",\n \"LAT\",\n \"LON\",\n \"STATION_NAME\",\n \"STATE\",\n \"DISTANCE_TO_LOCATION\",\n ],\n ),\n )\n\n nearby_station = DWDObservationSites(\n DWDObservationParameterSet.TEMPERATURE_AIR,\n DWDObservationResolution.HOURLY,\n DWDObservationPeriod.RECENT,\n datetime(2020, 1, 1),\n datetime(2020, 1, 20),\n ).nearby_radius(\n 50.0,\n 8.9,\n 20,\n )\n nearby_station = nearby_station.drop(\"TO_DATE\", axis=\"columns\")\n nearby_station.STATION_ID = nearby_station.STATION_ID.astype(np.int64)\n\n pd.testing.assert_frame_equal(\n nearby_station,\n pd.DataFrame(\n [\n [\n np.int64(4411),\n np.datetime64(\"2002-01-24 00:00:00\"),\n 155.0,\n 49.9195,\n 8.9671,\n \"Schaafheim-Schlierbach\",\n \"Hessen\",\n 11.653026716750542,\n ],\n [\n np.int64(2480),\n np.datetime64(\"2004-09-01 00:00:00\"),\n 108.0,\n 50.0643,\n 8.993,\n \"Kahl/Main\",\n \"Bayern\",\n 12.572153957087247,\n ],\n [\n np.int64(7341),\n np.datetime64(\"2005-07-16 00:00:00\"),\n 119.0,\n 50.09,\n 8.7862,\n \"Offenbach-Wetterpark\",\n \"Hessen\",\n 16.13301589362613,\n ],\n ],\n columns=[\n \"STATION_ID\",\n \"FROM_DATE\",\n \"STATION_HEIGHT\",\n \"LAT\",\n \"LON\",\n \"STATION_NAME\",\n \"STATE\",\n \"DISTANCE_TO_LOCATION\",\n ],\n ),\n )\n\n\n@patch(\n \"wetterdienst.dwd.observations.stations.metadata_for_climate_observations\",\n MagicMock(return_value=METADATA_DF),\n)\ndef test_dwd_observation_sites_nearby_number_fail_1():\n\n with pytest.raises(ValueError):\n DWDObservationSites(\n DWDObservationParameterSet.TEMPERATURE_AIR,\n DWDObservationResolution.HOURLY,\n DWDObservationPeriod.RECENT,\n datetime(2020, 1, 1),\n datetime(2020, 1, 20),\n ).nearby_number(\n 51.4,\n 9.3,\n 0,\n )\n\n\n@patch(\n \"wetterdienst.dwd.observations.stations.metadata_for_climate_observations\",\n MagicMock(return_value=METADATA_DF),\n)\ndef test_dwd_observation_sites_nearby_number_fail_2():\n\n with pytest.raises(InvalidParameterCombination):\n DWDObservationSites(\n DWDObservationParameterSet.SOIL,\n DWDObservationResolution.MINUTE_10,\n DWDObservationPeriod.RECENT,\n datetime(2020, 1, 1),\n datetime(2020, 1, 20),\n ).nearby_number(\n 51.4,\n 9.3,\n 1,\n )\n\n\n@patch(\n \"wetterdienst.dwd.observations.stations.metadata_for_climate_observations\",\n MagicMock(return_value=METADATA_DF),\n)\ndef test_dwd_observation_sites_nearby_distance():\n nearby_station = DWDObservationSites(\n DWDObservationParameterSet.TEMPERATURE_AIR,\n DWDObservationResolution.HOURLY,\n DWDObservationPeriod.RECENT,\n datetime(2020, 1, 1),\n datetime(2020, 1, 20),\n ).nearby_radius(\n 50.0,\n 8.9,\n 10,\n )\n assert nearby_station.empty is True\n\n\ndef test_derive_nearest_neighbours():\n coords = Coordinates(np.array([50.0, 51.4]), np.array([8.9, 9.3]))\n\n metadata = pd.read_json(METADATA_FILE)\n\n distances, indices_nearest_neighbours = derive_nearest_neighbours(\n metadata.LAT.values, metadata.LON.values, coords\n )\n\n np.testing.assert_array_almost_equal(distances, np.array([0.00182907, 0.00227919]))\n\n np.testing.assert_array_almost_equal(\n indices_nearest_neighbours, np.array([432, 655])\n )\n", "\"\"\"\n=====\nAbout\n=====\nExample for DWD radar sites data in OPERA HDF5 (ODIM_H5) format using wetterdienst and wradlib. # noqa\nDerived from https://gist.github.com/kmuehlbauer/ac990569e6ad38a49412fc74a2035c37.\n\nSee also:\n- https://docs.wradlib.org/en/stable/notebooks/fileio/wradlib_radar_formats.html#OPERA-HDF5-(ODIM_H5) # noqa\n\nThis program will request the most recent complete SWEEP_PCP data\nfor Boostedt and plot the outcome with matplotlib.\n\n\n=====\nSetup\n=====\n::\n\n brew install gdal\n pip install wradlib\n\n\"\"\"\nimport logging\nimport os\nfrom itertools import chain\n\nimport wradlib as wrl\nimport matplotlib.pyplot as pl\n\nfrom wetterdienst.dwd.radar import (\n DWDRadarData,\n DWDRadarParameter,\n DWDRadarDate,\n DWDRadarDataFormat,\n DWDRadarDataSubset,\n DWDRadarSite,\n)\n\nlogging.basicConfig(level=logging.INFO)\nlog = logging.getLogger()\n\n\ndef plot(data: wrl.io.XRadVolume):\n \"\"\"\n Convenience function for plotting radar data.\n \"\"\"\n\n # Get first sweep in volume.\n swp0 = data[0].data\n\n # Georeference Data.\n swp0 = swp0.pipe(wrl.georef.georeference_dataset)\n\n # Plot and display data using cartopy.\n fig = pl.figure(figsize=(20, 8))\n ax1 = fig.add_subplot(121, aspect=\"equal\")\n swp0.DBZH[0].plot(x=\"x\", y=\"y\", ax=ax1)\n ax2 = fig.add_subplot(122, aspect=\"equal\")\n swp0.VRADH[0].plot(x=\"x\", y=\"y\", ax=ax2)\n\n\ndef radar_info(data: dict):\n \"\"\"\n Display data from radar request.\n \"\"\"\n print(data)\n\n return\n print(\"Keys:\", data.keys())\n\n log.info(\"Data\")\n for key, value in data.items():\n print(f\"- {key}: {value}\")\n\n\ndef radar_scan_precip():\n\n request_velocity = DWDRadarData(\n parameter=DWDRadarParameter.SWEEP_PCP_VELOCITY_H,\n start_date=DWDRadarDate.MOST_RECENT,\n site=DWDRadarSite.BOO,\n fmt=DWDRadarDataFormat.HDF5,\n subset=DWDRadarDataSubset.POLARIMETRIC,\n )\n request_reflectivity = DWDRadarData(\n parameter=DWDRadarParameter.SWEEP_PCP_REFLECTIVITY_H,\n start_date=DWDRadarDate.MOST_RECENT,\n site=DWDRadarSite.BOO,\n fmt=DWDRadarDataFormat.HDF5,\n subset=DWDRadarDataSubset.POLARIMETRIC,\n )\n\n log.info(\n f\"Acquiring radar SWEEP_PCP data for {DWDRadarSite.BOO} at \"\n f\"{request_velocity.start_date}\"\n )\n\n # Submit requests.\n results = chain(\n request_velocity.collect_data(), request_reflectivity.collect_data()\n )\n\n # Collect list of buffers.\n files = list(map(lambda item: item.data, results))\n\n # Decode data using wradlib.\n data = wrl.io.open_odim(files)\n\n # Output debug information.\n radar_info(data)\n\n # Plot and display data.\n plot(data)\n pl.show()\n\n # Remove temporary files.\n for tmpfile in files:\n os.unlink(tmpfile)\n\n\ndef main():\n radar_scan_precip()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.int64", "numpy.array", "pandas.read_json", "numpy.datetime64" ], [ "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
UKPLab/coling2018-fake-news-challenge-
[ "6446c4459b520b7f7713bc66117917e341d899dc", "6446c4459b520b7f7713bc66117917e341d899dc" ]
[ "fnc/pipeline.py", "fnc/refs/feature_engineering.py" ]
[ "import sys\nimport datetime\nimport argparse\nimport os\nimport csv\nimport numpy as np\nimport os.path as path\nfrom builtins import isinstance\nsys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\nimport fnc.refs.fnc1.scorer as scorer\nimport fnc.utils.score_calculation as score_calculation\nimport fnc.utils.estimator_definitions as esitmator_definitions\nfrom fnc.refs.utils.score import LABELS, score_submission\nfrom fnc.settings import myConstants\nfrom fnc.utils import printout_manager\nfrom fnc.models.MultiThreadingFeedForwardMLP import MultiThreadingFeedForwardMLP\nfrom fnc.src.models import Model\nfrom fnc.refs.utils.generate_test_splits import kfold_split, get_stances_for_folds\n#FNC challenge features from Athene\nfrom fnc.refs.feature_engineering import NMF_fit_all_incl_holdout_and_test, \\\n latent_dirichlet_allocation_incl_holdout_and_test, latent_semantic_indexing_gensim_holdout_and_test,\\\n NMF_fit_all_concat_300_and_test, word_ngrams_concat_tf5000_l2_w_holdout_and_test, NMF_fit_all, \\\n latent_dirichlet_allocation, latent_semantic_indexing_gensim_test, NMF_fit_all_concat_300, word_ngrams_concat_tf5000_l2_w_holdout\n#FNC challenge features from baseline implementation and from Benjamin Schiller\nfrom fnc.refs.feature_engineering import refuting_features, polarity_features, hand_features, word_overlap_features, \\\n gen_non_bleeding_feats, gen_or_load_feats, \\\n word_unigrams_5000_concat_tf_l2_holdout_unlbled_test, NMF_cos_300_holdout_unlbled_test, \\\n NMF_concat_300_holdout_unlbled_test, latent_dirichlet_allocation_25_holdout_unlbled_test, \\\n latent_semantic_indexing_gensim_300_concat_holdout_unlbled_test, \\\n NMF_cos_50, latent_dirichlet_allocation_25, \\\n latent_semantic_indexing_gensim_300_concat_holdout, NMF_concat_300_holdout, word_unigrams_5000_concat_tf_l2_holdout, \\\n sen2sen_similarity_max, word_mover_distance_similarity_sentence_min, \\\n word_mover_distance_wholebody, stanford_ppdb_score, stanford_ppdb_score_1sent, stanford_ppdb_score_2sent, stanford_ppdb_score_3sent, \\\n stanford_sentiment, stanford_sentiment_1sent, stanford_sentiment_2sent, stanford_sentiment_3sent, \\\n stanford_negation_features, stanford_negation_features_1sent, stanford_negation_features_2sent, stanford_negation_features_3sent, \\\n stanford_based_verb_noun_sim, stanford_based_verb_noun_sim_1sent, stanford_based_verb_noun_sim_2sent, stanford_based_verb_noun_sim_3sent, \\\n sdm_sim, stanford_avg_words_per_sent, stanford_avg_words_per_sent_1sent, stanford_avg_words_per_sent_2sent, stanford_avg_words_per_sent_3sent, \\\n hedging_features, ppdb, discuss_features, single_flat_LSTM_50d_100, latent_dirichlet_allocation_300, NMF_cos_300, \\\n char_3grams_5000_concat_all_data, \\\n lexical_features,max_diff_twitter_uni_bigrams,mpqa_unigrams, negated_context_word_12grams_concat_tf5000_l2_all_data, \\\n nrc_emo_lex,nrc_hashtag_sentiment_unigram, nrc_hashtag_sentiment_unigram_POS, POS_features, readability_features , \\\n sentiment140_unigrams, structural_features\n\nsys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\n\ndef get_args():\n ''' This function parses and return arguments passed in'''\n parser = argparse.ArgumentParser(description='Scorer pipeline')\n parser.add_argument('-p', '--pipeline_type', type=str, nargs='+', help='Pipeline Type (crossv,holdout,ftrain,ftest), e.g. -p ftrain', required=True)\n parser.add_argument('-s', '--scorer_type', type=str, help='Scorer Type (baselines, CNN, tf_idf, avg_embed, sdm, doc2vec, word_mover_sentence, word_mover_wholeText)', required=False)\n parser.add_argument('-t', '--threshold', type=float, help='Threshold', required=False)\n\n args = parser.parse_args()\n pipeline_type = args.pipeline_type\n scorer_type = args.scorer_type\n threshold = args.threshold\n return pipeline_type, scorer_type, threshold\n\n\ndef generate_features(stances, dataset, name, feature_list, features_dir):\n \"\"\"\n Creates feature vectors out of the provided dataset\n \"\"\"\n h, b, y, bodyId, headId = [], [], [], [], []\n\n feature_dict = {'overlap': word_overlap_features,\n 'refuting': refuting_features,\n 'polarity': polarity_features,\n 'hand': hand_features,\n 'word_unigrams_5000_concat_tf_l2_holdout_unlbled_test': word_unigrams_5000_concat_tf_l2_holdout_unlbled_test,\n 'NMF_cos_300_holdout_unlbled_test': NMF_cos_300_holdout_unlbled_test,\n 'NMF_concat_300_holdout_unlbled_test': NMF_concat_300_holdout_unlbled_test,\n 'latent_dirichlet_allocation_25_holdout_unlbled_test': latent_dirichlet_allocation_25_holdout_unlbled_test,\n 'latent_semantic_indexing_gensim_300_concat_holdout_unlbled_test': latent_semantic_indexing_gensim_300_concat_holdout_unlbled_test,\n 'NMF_fit_all_incl_holdout_and_test': NMF_fit_all_incl_holdout_and_test,\n 'latent_dirichlet_allocation_incl_holdout_and_test': latent_dirichlet_allocation_incl_holdout_and_test,\n 'latent_semantic_indexing_gensim_holdout_and_test': latent_semantic_indexing_gensim_holdout_and_test,\n 'NMF_fit_all_concat_300_and_test': NMF_fit_all_concat_300_and_test,\n 'word_ngrams_concat_tf5000_l2_w_holdout_and_test': word_ngrams_concat_tf5000_l2_w_holdout_and_test,\n 'NMF_fit_all': NMF_fit_all,\n 'word_ngrams_concat_tf5000_l2_w_holdout': word_ngrams_concat_tf5000_l2_w_holdout,\n 'latent_dirichlet_allocation': latent_dirichlet_allocation,\n 'latent_semantic_indexing_gensim_test': latent_semantic_indexing_gensim_test,\n 'NMF_fit_all_concat_300': NMF_fit_all_concat_300,\n 'NMF_cos_50': NMF_cos_50,\n 'latent_dirichlet_allocation_25': latent_dirichlet_allocation_25,\n 'latent_semantic_indexing_gensim_300_concat_holdout': latent_semantic_indexing_gensim_300_concat_holdout,\n 'NMF_concat_300_holdout': NMF_concat_300_holdout,\n 'word_unigrams_5000_concat_tf_l2_holdout': word_unigrams_5000_concat_tf_l2_holdout,\n 'ppdb': ppdb,\n 'stanford_ppdb': stanford_ppdb_score,\n 'stanford_ppdb_1sent': stanford_ppdb_score_1sent,\n 'stanford_ppdb_2sent': stanford_ppdb_score_2sent,\n 'stanford_ppdb_3sent': stanford_ppdb_score_3sent,\n 'stanford_sentiment': stanford_sentiment,\n 'stanford_sentiment_1sent': stanford_sentiment_1sent,\n 'stanford_sentiment_2sent': stanford_sentiment_2sent,\n 'stanford_sentiment_3sent': stanford_sentiment_3sent,\n 'stanford_wordsim': stanford_based_verb_noun_sim,\n 'stanford_wordsim_1sent': stanford_based_verb_noun_sim_1sent,\n 'stanford_wordsim_2sent': stanford_based_verb_noun_sim_2sent,\n 'stanford_wordsim_3sent': stanford_based_verb_noun_sim_3sent,\n 'stanford_negation': stanford_negation_features,\n 'stanford_negation_1sent': stanford_negation_features_1sent,\n 'stanford_negation_2sent': stanford_negation_features_2sent,\n 'stanford_negation_3sent': stanford_negation_features_3sent,\n 'stanford_avg_words_per_sent': stanford_avg_words_per_sent,\n 'stanford_avg_words_per_sent_1sent': stanford_avg_words_per_sent_1sent,\n 'stanford_avg_words_per_sent_2sent': stanford_avg_words_per_sent_2sent,\n 'stanford_avg_words_per_sent_3sent': stanford_avg_words_per_sent_3sent,\n 'hedging': hedging_features,\n 'sen2sen': sen2sen_similarity_max,\n 'wmdsenSen': word_mover_distance_similarity_sentence_min,\n 'wmdsenDoc': word_mover_distance_wholebody,\n 'sdm_sim': sdm_sim,\n 'discuss': discuss_features,\n 'single_flat_LSTM_50d_100': single_flat_LSTM_50d_100,\n 'char_3grams_5000_concat_all_data': char_3grams_5000_concat_all_data,\n 'lexical_features': lexical_features,\n 'max_diff_twitter_uni_bigrams': max_diff_twitter_uni_bigrams,\n 'mpqa_unigrams': mpqa_unigrams,\n 'negated_context_word_12grams_concat_tf5000_l2_all_data': negated_context_word_12grams_concat_tf5000_l2_all_data,\n 'nrc_emo_lex': nrc_emo_lex,\n 'nrc_hashtag_sentiment_unigram': nrc_hashtag_sentiment_unigram,\n 'nrc_hashtag_sentiment_unigram_POS': nrc_hashtag_sentiment_unigram_POS,\n #'POS_features': POS_features,\n 'readability_features': readability_features,\n 'sentiment140_unigrams': sentiment140_unigrams,\n 'structural_features': structural_features,\n 'latent_dirichlet_allocation_300': latent_dirichlet_allocation_300,\n 'NMF_cos_300': NMF_cos_300\n }\n\n stanceCounter = 0\n for stance in stances:\n y.append(LABELS.index(stance['Stance']))\n h.append(stance['Headline'])\n b.append(dataset.articles[stance['Body ID']])\n bodyId.append(stance['Body ID'])\n headId.append(name+str(stanceCounter))\n stanceCounter += 1\n\n X_feat = []\n feat_list = []\n last_index = 0\n for feature in feature_list:\n feat = gen_or_load_feats(feature_dict[feature], h, b, features_dir+\"/\"+feature+\".\"+name+'.npy', bodyId, feature, headId, fold=name)\n feat_list.append((last_index, last_index+len(feat[0]), str(feature)))\n last_index += len(feat[0])\n X_feat.append(feat)\n X = np.concatenate(X_feat, axis=1)\n\n return X, y, feat_list\n\ndef generate_features_test(stances, dataset, name, feature_list, features_dir):\n \"\"\"\n Equal to generate_features(), but creates features for the unlabeled test data\n \"\"\"\n h, b, bodyId, headId = [], [], [], []\n\n feature_dict = {'overlap': word_overlap_features,\n 'refuting': refuting_features,\n 'polarity': polarity_features,\n 'hand': hand_features,\n 'word_unigrams_5000_concat_tf_l2_holdout_unlbled_test': word_unigrams_5000_concat_tf_l2_holdout_unlbled_test,\n 'NMF_cos_300_holdout_unlbled_test': NMF_cos_300_holdout_unlbled_test,\n 'NMF_concat_300_holdout_unlbled_test': NMF_concat_300_holdout_unlbled_test,\n 'latent_dirichlet_allocation_25_holdout_unlbled_test': latent_dirichlet_allocation_25_holdout_unlbled_test,\n 'latent_semantic_indexing_gensim_300_concat_holdout_unlbled_test': latent_semantic_indexing_gensim_300_concat_holdout_unlbled_test,\n 'NMF_fit_all_incl_holdout_and_test': NMF_fit_all_incl_holdout_and_test,\n 'latent_dirichlet_allocation_incl_holdout_and_test': latent_dirichlet_allocation_incl_holdout_and_test,\n 'latent_semantic_indexing_gensim_holdout_and_test': latent_semantic_indexing_gensim_holdout_and_test,\n 'NMF_fit_all_concat_300_and_test': NMF_fit_all_concat_300_and_test,\n 'word_ngrams_concat_tf5000_l2_w_holdout_and_test': word_ngrams_concat_tf5000_l2_w_holdout_and_test,\n 'NMF_fit_all': NMF_fit_all,\n 'word_ngrams_concat_tf5000_l2_w_holdout': word_ngrams_concat_tf5000_l2_w_holdout,\n 'latent_dirichlet_allocation': latent_dirichlet_allocation,\n 'latent_semantic_indexing_gensim_test': latent_semantic_indexing_gensim_test,\n 'NMF_fit_all_concat_300': NMF_fit_all_concat_300,\n 'NMF_cos_50': NMF_cos_50,\n 'latent_dirichlet_allocation_25': latent_dirichlet_allocation_25,\n 'latent_semantic_indexing_gensim_300_concat_holdout': latent_semantic_indexing_gensim_300_concat_holdout,\n 'NMF_concat_300_holdout': NMF_concat_300_holdout,\n 'word_unigrams_5000_concat_tf_l2_holdout': word_unigrams_5000_concat_tf_l2_holdout,\n 'ppdb': ppdb,\n 'stanford_ppdb': stanford_ppdb_score,\n 'stanford_ppdb_1sent': stanford_ppdb_score_1sent,\n 'stanford_ppdb_2sent': stanford_ppdb_score_2sent,\n 'stanford_ppdb_3sent': stanford_ppdb_score_3sent,\n 'stanford_sentiment': stanford_sentiment,\n 'stanford_sentiment_1sent': stanford_sentiment_1sent,\n 'stanford_sentiment_2sent': stanford_sentiment_2sent,\n 'stanford_sentiment_3sent': stanford_sentiment_3sent,\n 'stanford_wordsim': stanford_based_verb_noun_sim,\n 'stanford_wordsim_1sent': stanford_based_verb_noun_sim_1sent,\n 'stanford_wordsim_2sent': stanford_based_verb_noun_sim_2sent,\n 'stanford_wordsim_3sent': stanford_based_verb_noun_sim_3sent,\n 'stanford_negation': stanford_negation_features,\n 'stanford_negation_1sent': stanford_negation_features_1sent,\n 'stanford_negation_2sent': stanford_negation_features_2sent,\n 'stanford_negation_3sent': stanford_negation_features_3sent,\n 'stanford_avg_words_per_sent': stanford_avg_words_per_sent,\n 'stanford_avg_words_per_sent_1sent': stanford_avg_words_per_sent_1sent,\n 'stanford_avg_words_per_sent_2sent': stanford_avg_words_per_sent_2sent,\n 'stanford_avg_words_per_sent_3sent': stanford_avg_words_per_sent_3sent,\n 'hedging': hedging_features,\n 'sen2sen': sen2sen_similarity_max,\n 'wmdsenSen': word_mover_distance_similarity_sentence_min,\n 'wmdsenDoc': word_mover_distance_wholebody,\n 'sdm_sim': sdm_sim,\n 'discuss': discuss_features,\n 'single_flat_LSTM_50d_100': single_flat_LSTM_50d_100,\n 'char_3grams_5000_concat_all_data': char_3grams_5000_concat_all_data,\n 'lexical_features': lexical_features,\n 'max_diff_twitter_uni_bigrams': max_diff_twitter_uni_bigrams,\n 'mpqa_unigrams': mpqa_unigrams,\n 'negated_context_word_12grams_concat_tf5000_l2_all_data': negated_context_word_12grams_concat_tf5000_l2_all_data,\n 'nrc_emo_lex': nrc_emo_lex,\n 'nrc_hashtag_sentiment_unigram': nrc_hashtag_sentiment_unigram,\n 'nrc_hashtag_sentiment_unigram_POS': nrc_hashtag_sentiment_unigram_POS,\n #'POS_features': POS_features,\n 'readability_features': readability_features,\n 'sentiment140_unigrams': sentiment140_unigrams,\n 'structural_features': structural_features,\n 'latent_dirichlet_allocation_300': latent_dirichlet_allocation_300,\n 'NMF_cos_300': NMF_cos_300\n }\n\n stanceCounter = 0\n for stance in stances:\n h.append(stance['Headline'])\n b.append(dataset.articles[stance['Body ID']])\n bodyId.append(stance['Body ID'])\n headId.append(name+str(stanceCounter))\n stanceCounter += 1\n\n X_feat = []\n for feature in feature_list:\n print(\"calculate feature: \" + str(feature))\n feat = gen_or_load_feats(feature_dict[feature], h, b, features_dir+\"/\"+feature+\"_test.\"+name+'.npy', bodyId, feature, headId, fold=name)\n X_feat.append(feat)\n print(len(feat))\n X = np.concatenate(X_feat, axis=1)\n return X\n\ndef generate_non_bleeding_features(fold_stances, hold_out_stances, no_folds, BOW_feature_list, features_dir, d):\n \"\"\"\n Does the same as generate_features(), just for non-bleeding features. It prevents bleeding by training certain features\n (e.g. word unigrams) strictly on the training data, instead of training+test data.\n Feature extraction methods in feature_engineering.py have to provide an extended parameter list in order to use this\n (method_name(headlines, bodies, headlines_test, bodies_test)). The saved feature files have the following structure:\n - e.g. feature_name_0.py will hold the features of the folds from 1 to 9 and feature_name_0.test.py\n will hold the features of fold 0, derived of the folds 1 to 9\n\n This method (and feature methods based on this one) is just to get more reliable (non-bleeding) score results and cannot be used for\n the training of the final classifier.\n \"\"\"\n\n # holds all bag of words features and their feature extraction methods\n non_bleeding_feature_dict = {}\n\n def generate_holdout_BOW_features():\n for fold in range(no_folds):\n ids = list(range(no_folds))\n merged = []\n for i in ids:\n merged.extend(fold_stances[i])\n\n h = []\n b = []\n for stance in merged:\n h.append(stance['Headline'])\n b.append(d.articles[stance['Body ID']])\n\n h_test = []\n b_test = []\n for stance in hold_out_stances:\n h_test.append(stance['Headline'])\n b_test.append(d.articles[stance['Body ID']])\n\n gen_non_bleeding_feats(non_bleeding_feature_dict[feature], h, b, h_test, b_test,\n features_dir, feature, 'holdout')\n\n def generate_fold_BOW_features():\n for fold in range(no_folds):\n ids = list(range(no_folds))\n del ids[fold]\n\n merged = []\n for i in ids:\n merged.extend(fold_stances[i])\n\n # 9 of 10 folds merged for training BOW features\n h = []\n b = []\n for stance in merged:\n h.append(stance['Headline'])\n b.append(d.articles[stance['Body ID']])\n\n # 1 fold (test) to extract features out of the generated BOW\n h_test = []\n b_test = []\n for stance in fold_stances[fold]:\n h_test.append(stance['Headline'])\n b_test.append(d.articles[stance['Body ID']])\n\n gen_non_bleeding_feats(non_bleeding_feature_dict[feature],\n h, b, h_test, b_test, features_dir, feature,\n fold)\n\n for feature in BOW_feature_list:\n generate_fold_BOW_features()\n generate_holdout_BOW_features()\n\n\ndef concat_non_bleeding_features(X_train, X_test, BOW_feature_list, features_dir, fold):\n \"\"\"\n Concatenates the given train and test feature vectors with all the non bleeding features\n specified in the non_bleeding_feature_list.\n \"\"\"\n feat_list = []\n for feature in BOW_feature_list:\n X_train_part = np.load(\"%s/%s.%s.npy\" % (features_dir, feature, fold))\n last_index = len(X_train[0])\n X_train = np.concatenate([X_train, X_train_part], axis=1)\n feat_list.append((last_index, last_index+len(X_train_part[0]), str(feature)))\n X_test_part = np.load(\"%s/%s.%s.test.npy\" % (features_dir, feature, fold))\n X_test = np.concatenate([X_test, X_test_part], axis=1)\n return X_train, X_test, feat_list\n\ndef print_score_from_restored_model(clf, X_test, y_test):\n \"\"\"\n Takes a fitted classifier, predicts on base on the given X,\n compares to the actual y and prints the score.\n \"\"\"\n y_predicted = clf.predict(X_test)\n predicted = [LABELS[int(a)] for a in y_predicted]\n actual = [LABELS[int(a)] for a in y_test]\n\n # calc FNC score\n fold_score, _ = score_submission(actual, predicted)\n max_fold_score, _ = score_submission(actual, actual)\n score = fold_score / max_fold_score\n\n print(\"FNC-1 score from restored model: \" + str(score) +\"\\n\")\n\n return score\n\ndef save_model(clf, save_folder, filename):\n \"\"\"\n Dumps a given classifier to the specific folder with the given name\n \"\"\"\n import pickle\n path = save_folder + filename\n with open(path, 'wb') as handle:\n pickle.dump(clf, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\ndef load_model(save_folder, filename):\n \"\"\"\n Loads and returns a classifier at the given folder with the given name\n \"\"\"\n print(\"Warning: Make sure older models with this name have been trained on the same features! Otherwise,\"\n \"if the lengths of the features the model has been trained on, differ, an error will occur!\")\n import pickle\n path = save_folder + filename\n with open(path, 'rb') as handle:\n return pickle.load(handle)\n\ndef get_save_folder(parent_folder, scorer_type):\n \"\"\"\n Returns an unused save location for a classifier based on its name\n \"\"\"\n if not os.path.exists(parent_folder):\n os.makedirs(parent_folder)\n\n # in order to prevent overwriting existing learned models, always create a new folder\n save_folder = parent_folder + scorer_type\n id = 0\n while os.path.exists(save_folder+\"_\"+str(id)):\n id += 1\n save_folder += \"_\" + str(id) +\"/\"\n os.makedirs(save_folder)\n return save_folder\n\ndef cross_validation(fold_stances, folds, Xs, ys, non_bleeding_features, features_dir,\n scorer_type, all_accuracies_related, all_accuracies_stance,\n all_f1_related, all_f1_stance, all_scores, result_string, learning_rate_string):\n best_score = 0\n\n for fold in fold_stances:\n ids = list(range(len(folds)))\n del ids[fold]\n\n X_train = np.vstack(tuple([Xs[i] for i in ids]))\n y_train = np.hstack(tuple([ys[i] for i in ids]))\n\n X_test = Xs[fold]\n y_test = ys[fold]\n\n # Add BOW features to current feature vectors\n # The features are specified in BOW_feature_list\n X_train, X_test, _ = concat_non_bleeding_features(\n X_train, X_test,\n non_bleeding_features, features_dir, fold)\n\n # get the estimator for this loop\n clf = esitmator_definitions.get_estimator(scorer_type)\n\n print(\"Begin fitting at: \" + str(datetime.datetime.now()).split('.')[0] + \"\\n\")\n\n # start fitting the estimator\n clf.fit(X_train, y_train)\n\n # predict the labes for fitted classifier with the test data\n predicted_int = clf.predict(X_test)\n\n #Baseline \"hack\" - uncomment to calculate the baseline\n #predicted_int = np.empty(len(y_test))\n #predicted_int.fill(3)\n\n predicted = [LABELS[int(a)] for a in predicted_int]\n actual = [LABELS[int(a)] for a in y_test]\n\n # calculate the FNC-1 score based on the predicted and the actual labels\n fold_score, _ = score_submission(actual, predicted)\n max_fold_score, _ = score_submission(actual, actual)\n score = fold_score / max_fold_score\n\n # calculates accuracy and f1-macro scores\n accuracy_stance = score_calculation.get_accuracy(predicted_int, y_test, stance=True)\n accuracy_related = score_calculation.get_accuracy(predicted_int, y_test, stance=False)\n f1_stance = score_calculation.get_f1score(predicted_int, y_test, stance=True)\n f1_related = score_calculation.get_f1score(predicted_int, y_test, stance=False)\n\n # add the scores to the list holding the stores of all folds\n all_accuracies_related.append(accuracy_related)\n all_accuracies_stance.append(accuracy_stance)\n all_f1_related.append(f1_related)\n all_f1_stance.append(f1_stance)\n\n # get best score of all folds\n all_scores.append(score)\n if score > best_score:\n best_score = score\n\n # Prepare printout for fold result\n printout = printout_manager.get_foldwise_printout(fold, accuracy_related, accuracy_stance, f1_related,\n f1_stance, score)\n print(printout) # print results for this fold\n result_string += printout # add results to final result file\n\n # add to special file that shows learning rate and loss of optimizer\n if isinstance(clf, MultiThreadingFeedForwardMLP):\n learning_rate_string += clf.get_learning_rates(fold) + \"\\n\"\n\n # Prepare printout for final result\n printout = printout_manager.get_cross_validation_printout(\n all_accuracies_related, all_accuracies_stance, all_f1_related, all_f1_stance, all_scores, best_score)\n print(printout) # print cross validation results\n result_string += printout # add cross validation results to result file\n\n return result_string, learning_rate_string\n\n#Taken from Benjamins LSTM\ndef append_to_loss_monitor_file(text, filepath):\n with open(filepath, 'a+') as the_file:\n the_file.write(text+\"\\n\")\n\ndef validate_holdout(Xs, ys, X_holdout, y_holdout, non_bleeding_features, features_dir,\n scorer_type, feat_indices, result_string, learning_rate_string, features):\n \"\"\"\n Trains the classifier on all of the train+test data and tests it on the holdout set\n :param Xs: All the training data's feature vectors, split in their folds\n :param ys: All the training data's labels, split in their folds\n :param X_holdout: The holdout feature vectors\n :param y_holdout: The holdout labels\n :param non_bleeding_features: The list of non-bleeding features that has to be concatenated to the existing feature vectors\n :param features_dir: the directory where the features are stored\n :param scorer_type: the scorer type, e.g. MLB_base (see estimator_definitions.py in utils folder)\n :param feat_indices: indices returned by generate_features() method. They indicate at what index of the feature vector a specific\n feature starts and where it ends. This is used for printing out the feature importances by the RandomForest classifier\n :param result_string: The current result string in order to add the holdout results\n :param learning_rate_string: The current learning rate string in order to add information about the learning rate\n :return: the updated result_string and learning_rate_string\n \"\"\"\n # define folder to save the classifier and create it if not existing\n parent_folder = \"%s/data/fnc-1/mlp_models/\" % (path.dirname(path.dirname(path.abspath(__file__))))\n\n # create the new save folder\n save_folder = get_save_folder(parent_folder, scorer_type+\"_new\")\n\n # only pass a save folder if the classifier should be saved\n best_clf = esitmator_definitions.get_estimator(scorer_type, save_folder=save_folder)\n\n # stack all the feature vectors of all the folds\n X_train = np.vstack(tuple([Xs[i] for i in range(10)]))\n y_train = np.hstack(tuple([ys[i] for i in range(10)]))\n\n # concat non-bleeding features\n X_train, X_holdout, feat_indices_holdout = concat_non_bleeding_features(\n X_train, X_holdout,\n non_bleeding_features, features_dir, 'holdout')\n\n # test for oversampling: fits the current classifier, oversampled with a given\n # method and checks the score on the holdout set\n use_over_sampling = False\n if use_over_sampling == True:\n from imblearn.over_sampling import SMOTE\n kind = ['regular', 'borderline1', 'borderline2', 'svm']\n for m in kind:\n sm = SMOTE(kind=m)\n X_res, y_res = sm.fit_sample(X_train, y_train)\n best_clf.fit(X_res, y_res)\n y_predicted = best_clf.predict(X_holdout)\n predicted = [LABELS[int(a)] for a in y_predicted]\n actual = [LABELS[int(a)] for a in y_holdout]\n fold_score, _ = score_submission(actual, predicted)\n max_fold_score, _ = score_submission(actual, actual)\n score = fold_score / max_fold_score\n print(\"Score \" + m + \":\" + str(score))\n\n\n #Taken from Benjamins LSTM\n loss_monitor_file_dir = \"%s/data/fnc-1/model_results/loss_results/\" % (\n path.dirname(path.dirname(path.abspath(__file__))))\n loss_filename = loss_monitor_file_dir + str(datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M\")) + \".txt\"\n # fit the classifier\n if 'f_ext' in scorer_type:\n append_to_loss_monitor_file(\"\\n\\nFOLD holdout and classifier: \" + scorer_type + \"\\n\", loss_filename)\n append_to_loss_monitor_file(str(datetime.datetime.now()).split('.')[0], loss_filename)\n best_clf.fit(X_train, y_train, X_holdout, np.array(y_holdout), 'holdout', loss_filename)\n else:\n best_clf.fit(X_train, y_train)\n\n # predict labels\n y_predicted = best_clf.predict(X_holdout)\n predicted = [LABELS[int(a)] for a in y_predicted]\n actual = [LABELS[int(a)] for a in y_holdout]\n\n # calc FNC score\n fold_score, cm = score_submission(actual, predicted)\n max_fold_score, _ = score_submission(actual, actual)\n score = fold_score / max_fold_score\n\n # calc accuracy for related/unrelated and stances\n accuracy_stance = score_calculation.get_accuracy(y_predicted, y_holdout, stance=True)\n accuracy_related = score_calculation.get_accuracy(y_predicted, y_holdout, stance=False)\n f1_stance = score_calculation.get_f1score(y_predicted, y_holdout, stance=True)\n f1_related = score_calculation.get_f1score(y_predicted, y_holdout, stance=False)\n\n # prepare printout for final results of holdout set\n printout = printout_manager.get_holdout_printout(save_folder, accuracy_related, accuracy_stance, f1_related, f1_stance, score)\n printout += printout_manager.calculate_confusion_matrix(cm)\n print(printout) # print holdout results\n result_string += printout + \"\\n\"# add results to string that is going to be saved into a file\n\n result_file_folder = \"%s\" % (path.dirname(path.dirname(path.abspath(__file__))))\n printout_manager.save_file(result_string, result_file_folder + \"/fnc_results_holdout.txt\", \"a+\")\n\n #aligned printout for ablation:\n summary = printout_manager.get_holdout_ablation_printout(features, score,f1_stance,save_folder)\n printout_manager.save_file(summary, result_file_folder + \"/fnc_results_holdout_summary.txt\", \"a+\")\n\n # test saving and restoring model\n #filename = scorer_type + \".sav\"\n #save_model(best_clf, save_folder,filename)\n #load_clf = load_model(parent_folder + scorer_type + \"_new_0/\", filename) # the 0th folder should always exist\n #print_score_from_restored_model(load_clf, X_holdout, y_holdout)\n\n # add to special file that shows learning rate and loss of optimizer\n if isinstance(best_clf, MultiThreadingFeedForwardMLP):\n learning_rate_string += best_clf.get_learning_rates('holdout') + \"\\n\"\n\n # print feature importances\n if scorer_type == 'randomforest':\n result_file_folder = \"%s\" % (path.dirname(path.dirname(path.abspath(__file__))))\n importances = best_clf.feature_importances_\n std = np.std([tree.feature_importances_ for tree in best_clf.estimators_],\n axis=0)\n indices = np.argsort(importances)[::-1]\n feat_indices.append(feat_indices_holdout)\n\n feat_importance_string = str(feat_indices) + \"\\n\"\n for i in indices:\n feat_importance_string += str(i) + \";\" + str(importances[i]) + \";\" + str(std[i]) + \"\\n\"\n\n # save feature importances as file\n printout_manager.save_file(feat_importance_string, result_file_folder + \"/feat_importance_rf.txt\", \"a+\")\n\n return result_string, learning_rate_string\n\n\ndef final_clf_training(Xs, ys, X_holdout, y_holdout, scorer_type, sanity_check=False, oversampling=False):\n \"\"\"\n Train final classifier on all of the data to prepare it for the prediction of the FNC-1's unlabeled data\n :param Xs: All the training data's feature vectors, split in their folds\n :param ys: All the training data's labels, split in their folds\n :param X_holdout: The holdout feature vectors\n :param y_holdout: The holdout labels\n :param scorer_type: the scorer type, e.g. MLB_base (see estimator_definitions.py in utils folder)\n :param sanity_check: If true, the trained classifier predicts the labels of the data it was trained on and prints out the score\n :return: the final classifier\n \"\"\"\n\n # stack all the feature vectors of all the folds\n X_train = np.vstack(tuple([Xs[i] for i in range(10)]))\n y_train = np.hstack(tuple([ys[i] for i in range(10)]))\n\n # stack the holdout feature vectors on the feature vectors of all folds\n X_all = np.concatenate([X_train, X_holdout], axis=0)\n y_all = np.concatenate([y_train, y_holdout], axis=0)\n\n # define and create parent folder to save all trained classifiers into\n parent_folder = \"%s/data/fnc-1/mlp_models/\" % (path.dirname(path.dirname(path.abspath(__file__))))\n\n # create the new save folder for the specific classifer\n scorer_folder_name = scorer_type+\"_final\"\n save_folder = get_save_folder(parent_folder, scorer_folder_name+\"_new\")\n\n # get classifier and only pass a save folder if the classifier should be saved\n clf = esitmator_definitions.get_estimator(scorer_type, save_folder=save_folder)\n\n #perform oversampling if selected\n if oversampling == True:\n if 'f_ext' in scorer_type:\n print(\"Oversampling not defined for LSTM\")\n exit()\n\n import datetime\n start = datetime.datetime.now().time()\n print(\"Started oversampling/undersampling at: \" + str(start))\n # uncomment following lines for the different sampling methods #####\n # Oversampling\n from imblearn.over_sampling import SMOTE, ADASYN, RandomOverSampler\n print(\"Oversampling data\")\n #kind = ['regular', 'borderline1', 'borderline2', 'svm']\n #sm = SMOTE(kind='regular',)\n #X_res, y_res = sm.fit_sample(X_all, y_all)\n\n #ros = RandomOverSampler()\n #X_res, y_res = ros.fit_sample(X_all, y_all)\n\n #ada = ADASYN()\n #X_res, y_res = ada.fit_sample(X_all, y_all)\n\n ######################################################\n # Undersampling\n from imblearn.under_sampling import TomekLinks, EditedNearestNeighbours, CondensedNearestNeighbour, \\\n NeighbourhoodCleaningRule, InstanceHardnessThreshold\n # remove Tomek links\n tl = TomekLinks(return_indices=True)\n X_res, y_res, idx_resampled = tl.fit_sample(X_all, y_all)\n\n #enn = EditedNearestNeighbours(random_state=0)\n #X_res, y_res = enn.fit_sample(X_all, y_all)\n\n #cnn = CondensedNearestNeighbour(random_state=0)\n #X_res, y_res = cnn.fit_sample(X_all, y_all)\n\n #ncr = NeighbourhoodCleaningRule(random_state=0)\n #X_res, y_res = ncr.fit_sample(X_all, y_all)\n\n #iht = InstanceHardnessThreshold(random_state=0, estimator=clf)\n #X_res, y_res = iht.fit_sample(X_all, y_all)\n\n\n ##################\n # Combination of Undersampling and oversampling\n\n from imblearn.combine import SMOTEENN, SMOTETomek\n #smote_enn = SMOTEENN(random_state=0)\n #X_res, y_res = smote_enn.fit_sample(X_all, y_all)\n\n #smote_tomek = SMOTETomek(random_state=0)\n #X_res, y_res = smote_tomek.fit_sample(X_all, y_all)\n\n end = datetime.datetime.now().time()\n print(\"Ended oversampling/undersampling at: \" + str(end))\n\n clf.fit(X_res, y_res)\n else: # if oversampling is false\n import datetime\n # fit the final classifier\n loss_monitor_file_dir = \"%s/data/fnc-1/model_results/loss_results/\" % (\n path.dirname(path.dirname(path.abspath(__file__))))\n loss_filename = loss_monitor_file_dir + str(datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M\")) + \".txt\"\n # fit the classifier\n if 'f_ext' in scorer_type:\n append_to_loss_monitor_file(\"\\n\\nFOLD holdout and classifier: \" + scorer_type + \"\\n\", loss_filename)\n append_to_loss_monitor_file(str(datetime.datetime.now()).split('.')[0], loss_filename)\n clf.fit(X_train, y_train, X_holdout, np.array(y_holdout), 'holdout', loss_filename)\n else:\n clf.fit(X_all, y_all)\n\n # save the model\n filename = scorer_folder_name + \".sav\"\n save_model(clf, save_folder, filename) # save model with filename to specific folder\n\n # predict on the data the classifier was trained on => should give near perfect score\n if sanity_check == True:\n # get predicted and actual labels\n y_predicted = clf.predict(X_all)\n predicted = [LABELS[int(a)] for a in y_predicted]\n actual = [LABELS[int(a)] for a in y_all]\n\n # calc FNC score\n fold_score, _ = score_submission(actual, predicted)\n max_fold_score, _ = score_submission(actual, actual)\n score = fold_score / max_fold_score\n\n # calc accuracy, f1 macro\n accuracy_stance = score_calculation.get_accuracy(y_predicted, y_all, stance=True)\n accuracy_related = score_calculation.get_accuracy(y_predicted, y_all, stance=False)\n f1_stance = score_calculation.get_f1score(y_predicted, y_all, stance=True)\n f1_related = score_calculation.get_f1score(y_predicted, y_all, stance=False)\n\n # printout results\n printout = printout_manager.get_holdout_printout(save_folder, accuracy_related, accuracy_stance, f1_related,\n f1_stance, score)\n print(\"SANITY CHECK (predict on train data):\")\n print(printout)\n return clf\n\ndef final_clf_prediction(data_path, features, features_dir, scorer_type, run_final_train, final_clf):\n \"\"\"\n Run the prediction on the final model. In order to do that, the features vectors of the unlabeled FNC-1 data are\n generated first.\n :param data_path: data_path to the unlabeled stances and the corresponding bodies\n :param features: The feature list\n :param features_dir: The directory where the features are stored\n :param scorer_type: the scorer type, e.g. MLB_base (see estimator_definitions.py in utils folder)\n :param run_final_train: Sanity check: if the final classifier has been trained in this run, check if the prediction of it\n compared to the classifier that is being loaded in this method, are the same. If yes, they represent the same model.\n :param final_clf: The classifier that was trained in this run (IF a classifier was trained)\n :return:\n \"\"\"\n\n d = myConstants.testdataset\n\n # generate features for the unlabeled testing set\n X_final_test = generate_features_test(d.stances, d, str(\"final_test\"), features, features_dir)\n\n # define and create parent folder to save all trained classifiers into\n parent_folder = \"%s/data/fnc-1/mlp_models/\" % (path.dirname(path.dirname(path.abspath(__file__))))\n fnc_result_folder = \"%s/data/fnc-1/fnc_results/\" % (path.dirname(path.dirname(path.abspath(__file__))))\n\n # load model [scorer_type]_final_2 classifier\n filename = scorer_type + \"_final.sav\"\n load_clf = load_model(parent_folder + scorer_type + myConstants.model_name, filename)\n # The model is set in settings.py in class \"myConstants\"\n\n print(\"Load model for final prediction of test set: \" + parent_folder + scorer_type + myConstants.model_name + filename)\n\n # predict classes and turn into labels\n y_predicted = load_clf.predict(X_final_test)\n predicted = [LABELS[int(a)] for a in y_predicted]\n\n # create folder to save the file\n if not os.path.exists(parent_folder):\n os.makedirs(parent_folder)\n if not os.path.exists(fnc_result_folder):\n os.makedirs(fnc_result_folder)\n\n # save the submission file, including the prediction for the labels\n with open(fnc_result_folder + scorer_type + \"_submission.csv\", 'w') as csvfile:\n fieldnames = [\"Headline\", \"Body ID\", \"Stance\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n i = 0\n for stance in d.stances:\n writer.writerow(\n {'Headline': stance['Headline'], 'Body ID': stance['Body ID'], 'Stance': predicted[i]})\n i += 1\n\n\n # save the probabilities file, including the prediction for the labels\n if (\"voting_\" not in scorer_type) and (\"f_ext\" not in scorer_type) and (\"MLP_base_2\" not in scorer_type) and (\"featMLP\" not in scorer_type) and (\"stackLSTM\" not in scorer_type):\n print(\"Generating submission_probabilities.csv\")\n predicted_proba = load_clf.predict_proba(X_final_test)\n with open(fnc_result_folder + scorer_type + \"_probabilities.csv\", 'w') as csvfile:\n fieldnames = [\"Headline\", \"Body ID\", \"Agree\", \"Disagree\", \"Discuss\", \"Unrelated\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n i = 0\n for stance in d.stances:\n writer.writerow(\n {'Headline': stance['Headline'], 'Body ID': stance['Body ID'], 'Agree': predicted_proba[i][0],\n 'Disagree': predicted_proba[i][1], 'Discuss': predicted_proba[i][2],\n 'Unrelated': predicted_proba[i][3]})\n i += 1\n else:\n print(\"Not generating submission_probabilities.csv - because classifier contains \\\"voting\\\", \\\"f_ext\\\" or \\\"MLP_base_2\\\"\")\n\n # check whether loaded clf from disk and just trained clf return the same results\n if (run_final_train == True) and not (final_clf is None):\n print(\"Check whether loaded final model and just trained final model retrieve the same results.\"\n \"The results are only equal (=None) if they are the same model. This is a check to see whether \"\n \"saving and loading the model works correctly:\")\n print(np.testing.assert_array_equal(y_predicted, final_clf.predict(X_final_test)))\n\ndef delete_ffmlp_data():\n \"\"\"\n In order to copy the structure of Sklearn's BaseEstimator (fit(), predict(), ...) the MultiThreadingFeedForwardMLP\n has to save its graph after fitting. If its argument \"save_folder\" doesn't get a specific folder, it's seen as a\n temporary model (lifetime of the model is just for the runtime). The model will be saved in a special temporary folder.\n This method is called after the pipeline run has finished and deletes all the temporarily saved models of\n MultiThreadingFeedForwardMLP.\n \"\"\"\n import shutil\n ffmlp_dir = \"%s/data/fnc-1/mlp_models/temp_models\" % (\n path.dirname(path.dirname(path.abspath(__file__))))\n if (os.path.exists(ffmlp_dir)):\n for the_file in os.listdir(ffmlp_dir):\n file_path = os.path.join(ffmlp_dir, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print(e)\n\ndef pipeline():\n # define data paths\n result_file_folder = \"%s\" % (path.dirname(path.dirname(path.abspath(__file__))))\n data_path = \"%s/data/fnc-1\" % (path.dirname(path.dirname(path.abspath(__file__))))\n embeddPath = \"%s/data/embeddings/google_news/GoogleNews-vectors-negative300.bin.gz\" % (path.dirname(path.dirname(path.abspath(__file__))))\n\n # get arguments for pipeline call\n pipeline_type, scorer_type, threshold = get_args()\n splits_dir = myConstants.splits_dir\n features_dir = myConstants.features_dir\n\n # configure pipeline runs by using given terminal arguments\n run_CV = False\n if \"crossv\" in pipeline_type:\n run_CV = True # run cross validation\n\n run_validation = False\n if \"holdout\" in pipeline_type:\n run_validation = True # run validation on holdout set\n\n run_final_train = False\n if \"ftrain\" in pipeline_type:\n run_final_train = True # train classifier on all the data available\n\n run_final_prediction = False\n if \"ftest\" in pipeline_type:\n run_final_prediction = True # run prediction on test data provided by FNC-1 challenge\n\n if \"analysis\" in pipeline_type:\n # parse in train bodies and stances for analysis\n bodyDict = myConstants.testdataset.articles\n train_data = myConstants.testdataset.stances\n\n if scorer_type == None:\n raise AttributeError(\"Please specify scorer_type\")\n if threshold == None:\n raise AttributeError(\"Please specify Threshold\")\n model = Model(scorer_type, embeddPath=embeddPath)\n model.analyze_data(train_data, bodyDict, threshold=threshold)\n\n perform_oversampling = myConstants.perform_oversampling\n\n # train the model / predict on basis of the model\n if True in [run_CV, run_validation, run_final_train, run_final_prediction]:\n\n if sys.version_info.major < 3:\n sys.stderr.write('Please use Python version 3 and above\\n')\n sys.exit(1)\n\n d = myConstants.d\n\n folds, hold_out = kfold_split(d, n_folds=10, base_dir=splits_dir)\n fold_stances, hold_out_stances = get_stances_for_folds(d,folds,hold_out)\n\n Xs = dict()\n ys = dict()\n\n feature_list = myConstants.feature_list\n\n for scorer_type, features, non_bleeding_features in feature_list:\n\n # print classifier and features for this loop\n print(scorer_type)\n print(features)\n print(non_bleeding_features)\n\n # generate bow features for later use\n generate_non_bleeding_features(fold_stances, hold_out_stances, 10, non_bleeding_features, features_dir, d)\n\n # Load/Precompute all features now\n X_holdout, y_holdout, feat_indices = generate_features(hold_out_stances, d, \"holdout\", features, features_dir)\n for fold in fold_stances:\n Xs[fold], ys[fold], _ = generate_features(fold_stances[fold], d, str(fold), features, features_dir)\n\n # initialize lists needed to save results for each fold\n all_accuracies_related = []\n all_accuracies_stance = []\n all_f1_related = []\n all_f1_stance = []\n all_scores = []\n\n # get head for result file\n file_head = printout_manager.get_printout_file_head(scorer_type, features, non_bleeding_features)\n result_string = file_head # use head for result file\n learning_rate_string = file_head # use head for learning rate file\n\n # run cross validation on the specified folds\n if run_CV == True:\n result_string, learning_rate_string = cross_validation(fold_stances, folds, Xs, ys, non_bleeding_features, features_dir,\n scorer_type, all_accuracies_related, all_accuracies_stance,\n all_f1_related, all_f1_stance, all_scores, result_string, learning_rate_string)\n\n # Test on holdout set\n if run_validation == True:\n result_string, learning_rate_string = validate_holdout(Xs, ys, X_holdout, y_holdout, non_bleeding_features,\n features_dir, scorer_type, feat_indices, result_string, learning_rate_string, features)\n\n # Train the final classifer\n if run_final_train == True:\n final_clf = final_clf_training(Xs, ys, X_holdout, y_holdout, scorer_type, sanity_check=True, oversampling=perform_oversampling)\n\n # Run the final classifier on the test data\n if run_final_prediction == True:\n if run_final_train == True:\n final_clf_prediction(data_path, features, features_dir, scorer_type, run_final_train, final_clf)\n else:\n final_clf_prediction(data_path, features, features_dir, scorer_type, run_final_train, None)\n\n # calculate FNC Score\n fnc_result_folder = \"%s/data/fnc-1/fnc_results/%s_submission.csv\" % (path.dirname(path.dirname(path.abspath(__file__))), scorer_type)\n fnc_gold_labels_file = myConstants.test_stances_gold\n predicted_set = scorer.load_dataset(fnc_result_folder)\n fnc_gold_labels = scorer.load_dataset(fnc_gold_labels_file)\n test_score, cm, f1_score = scorer.score_submission(fnc_gold_labels, predicted_set)\n null_score, max_score = scorer.score_defaults(fnc_gold_labels)\n\n fnc_results = \"################################################ \\n\"\n fnc_results += \"Corpora: \" + myConstants.datasetName + \"\\n\"\n fnc_results += \"Model:\" + scorer_type + myConstants.model_name + \"\\n\"\n if perform_oversampling == True:\n fnc_results += \"Using oversampling \\n\"\n fnc_results += result_string + \"\\n\" + printout_manager.calculate_confusion_matrix(cm)\n fnc_results += scorer.SCORE_REPORT.format(max_score, null_score, test_score) + \"\\n\"\n fnc_results += \"\\nRelative FNC Score: {:.3f}\".format(100/max_score*test_score) + \"% \\n\"\n fnc_results += \"\\n\" + f1_score + \"\\n\"\n\n print(fnc_results)\n printout_manager.save_file(fnc_results, result_file_folder + \"/fnc_results.txt\", \"a+\")\n\n # save file with results to disk\n printout_manager.save_file(result_string, result_file_folder + \"/result_file_temp.txt\", \"a+\")\n\n # save file with learning rates to disk\n learning_rate_string += \"===================================\\n\"\n printout_manager.save_file(learning_rate_string, result_file_folder + \"/learning_rate_file_temp.txt\", \"a+\")\n\n # delete temporary saved MultiThreadingFeedForwardMLP models if existing\n delete_ffmlp_data()\n\nif __name__ == '__main__':\n pipeline()\n", "import math\nimport os\nimport os.path as path\nfrom datetime import datetime\nimport nltk\nimport numpy as np\nimport regex as re\nimport pickle\nimport retinasdk\nimport string\nimport pandas as pd\nimport csv\nimport collections\nfrom time import time\nfrom nltk.corpus import reuters, stopwords\nfrom collections import defaultdict, Counter\nfrom sklearn import feature_extraction\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_distances\nfrom sklearn.decomposition import LatentDirichletAllocation, NMF\n\nfrom tqdm import tqdm\nfrom itertools import zip_longest\nfrom fnc.refs.feature_engineering_helper import word_ngrams\nfrom fnc.refs.feature_engineering_helper import topic_models\nfrom fnc.utils.doc2vec import avg_embedding_similarity\nfrom fnc.utils.loadEmbeddings import LoadEmbeddings\nfrom fnc.utils.stanford_parser import StanfordMethods\nfrom fnc.utils.tf_idf_helpers import tf_idf_helpers\nfrom fnc.utils.hungarian_alignment import hungarian_alignment_calculator\nfrom fnc.utils.data_helpers import sent2stokens_wostop, text2sent, get_tokenized_lemmas\nfrom fnc.utils.word_mover_distance import computeAverageWMD\nfrom fnc.settings import myConstants\nfrom fnc.refs.utils.generate_test_splits import kfold_split\nimport fnc.refs.feature_engineering_helper.readability_indices as fe_util\n\n_wnl = nltk.WordNetLemmatizer()\n\n\"\"\"\nThis file is based on the fnc-1-baseline implementation https://github.com/FakeNewsChallenge/fnc-1-baseline\n\"\"\"\n\ndef normalize_word(w):\n return _wnl.lemmatize(w).lower()\n\n\ndef clean(s):\n # Cleans a string: Lowercasing, trimming, removing non-alphanumeric\n\n return \" \".join(re.findall(r'\\w+', s, flags=re.UNICODE)).lower()\n\n\ndef remove_stopwords(l):\n # Removes stopwords from a list of tokens\n return [w for w in l if w not in feature_extraction.text.ENGLISH_STOP_WORDS]\n\n\ndef clear_unwanted_chars(mystring):\n return str(mystring.encode('latin', errors='ignore').decode('latin'))\n\n\ndef gen_or_load_feats(feat_fn, headlines, bodies, feature_file, bodyId, feature, headId=\"\", fold=\"\"):\n if not os.path.isfile(feature_file):\n if 'stanford' in feature:\n feats = feat_fn(headlines, bodies, bodyId, headId)\n elif 'single_flat_LSTM_50d_100' in feature:\n feats = feat_fn(headlines, bodies, fold)\n\n else:\n feats = feat_fn(headlines, bodies)\n np.save(feature_file, feats)\n\n return np.load(feature_file)\n\ndef gen_non_bleeding_feats(feat_fn, headlines, bodies, headlines_test, bodies_test, features_dir, feature,\n fold):\n \"\"\"\n Similar to gen_or_load_feats() it generates the non bleeding features and save them on the disk\n \"\"\"\n feature_file = \"%s/%s.%s.npy\" % (features_dir, feature, fold)\n if not os.path.isfile(feature_file):\n print (str(datetime.now()) + \": Generating features for: \" + feature + \", fold/holdout: \" + str(fold))\n\n X_train, X_test = feat_fn(headlines, bodies, headlines_test, bodies_test)\n\n if (str(fold) != 'holdout'):\n np.save(\"%s/%s.%s.npy\" % (features_dir, feature, fold), X_train)\n np.save(\"%s/%s.%s.test.npy\" % (features_dir, feature, fold), X_test)\n else:\n np.save(\"%s/%s.%s.npy\" % (features_dir, feature, 'holdout'), X_train)\n np.save(\"%s/%s.%s.test.npy\" % (features_dir, feature, 'holdout'), X_test)\n\n\ndef word_overlap_features(headlines, bodies):\n X = []\n for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):\n clean_headline = clean(headline)\n clean_body = clean(body)\n clean_headline = get_tokenized_lemmas(clean_headline)\n clean_body = get_tokenized_lemmas(clean_body)\n features = [\n len(set(clean_headline).intersection(clean_body)) / float(len(set(clean_headline).union(clean_body)))]\n X.append(features)\n return X\n\ndef refuting_features(headlines, bodies):\n _refuting_words = [\n 'fake',\n 'fraud',\n 'hoax',\n 'false',\n 'deny', 'denies',\n # 'refute',\n 'not',\n 'despite',\n 'nope',\n 'doubt', 'doubts',\n 'bogus',\n 'debunk',\n 'pranks',\n 'retract'\n ]\n X = []\n for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):\n clean_headline = clean(headline)\n clean_headline = get_tokenized_lemmas(clean_headline)\n features = [1 if word in clean_headline else 0 for word in _refuting_words]\n X.append(features)\n return X\n\n\ndef polarity_features(headlines, bodies):\n _refuting_words = [\n 'fake',\n 'fraud',\n 'hoax',\n 'false',\n 'deny', 'denies',\n 'not',\n 'despite',\n 'nope',\n 'doubt', 'doubts',\n 'bogus',\n 'debunk',\n 'pranks',\n 'retract'\n ]\n\n def calculate_polarity(text):\n tokens = get_tokenized_lemmas(text)\n return sum([t in _refuting_words for t in tokens]) % 2\n X = []\n for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):\n clean_headline = clean(headline)\n clean_body = clean(body)\n features = []\n features.append(calculate_polarity(clean_headline))\n features.append(calculate_polarity(clean_body))\n X.append(features)\n return np.array(X)\n\n\ndef ngrams(input, n):\n input = input.split(' ')\n output = []\n for i in range(len(input) - n + 1):\n output.append(input[i:i + n])\n return output\n\n\ndef chargrams(input, n):\n output = []\n for i in range(len(input) - n + 1):\n output.append(input[i:i + n])\n return output\n\n\ndef append_chargrams(features, text_headline, text_body, size):\n grams = [' '.join(x) for x in chargrams(\" \".join(remove_stopwords(text_headline.split())), size)]\n grams_hits = 0\n grams_early_hits = 0\n grams_first_hits = 0\n for gram in grams:\n if gram in text_body:\n grams_hits += 1\n if gram in text_body[:255]:\n grams_early_hits += 1\n if gram in text_body[:100]:\n grams_first_hits += 1\n features.append(grams_hits)\n features.append(grams_early_hits)\n features.append(grams_first_hits)\n return features\n\n\ndef append_ngrams(features, text_headline, text_body, size):\n grams = [' '.join(x) for x in ngrams(text_headline, size)]\n grams_hits = 0\n grams_early_hits = 0\n for gram in grams:\n if gram in text_body:\n grams_hits += 1\n if gram in text_body[:255]:\n grams_early_hits += 1\n features.append(grams_hits)\n features.append(grams_early_hits)\n return features\n\n\ndef hand_features(headlines, bodies):\n\n def binary_co_occurence(headline, body):\n # Count how many times a token in the title\n # appears in the body text.\n bin_count = 0\n bin_count_early = 0\n for headline_token in clean(headline).split(\" \"):\n if headline_token in clean(body):\n bin_count += 1\n if headline_token in clean(body)[:255]:\n bin_count_early += 1\n return [bin_count, bin_count_early]\n\n def binary_co_occurence_stops(headline, body):\n # Count how many times a token in the title\n # appears in the body text. Stopwords in the title\n # are ignored.\n bin_count = 0\n bin_count_early = 0\n for headline_token in remove_stopwords(clean(headline).split(\" \")):\n if headline_token in clean(body):\n bin_count += 1\n bin_count_early += 1\n return [bin_count, bin_count_early]\n\n def count_grams(headline, body):\n # Count how many times an n-gram of the title\n # appears in the entire body, and intro paragraph\n\n clean_body = clean(body)\n clean_headline = clean(headline)\n features = []\n features = append_chargrams(features, clean_headline, clean_body, 2)\n features = append_chargrams(features, clean_headline, clean_body, 8)\n features = append_chargrams(features, clean_headline, clean_body, 4)\n features = append_chargrams(features, clean_headline, clean_body, 16)\n features = append_ngrams(features, clean_headline, clean_body, 2)\n features = append_ngrams(features, clean_headline, clean_body, 3)\n features = append_ngrams(features, clean_headline, clean_body, 4)\n features = append_ngrams(features, clean_headline, clean_body, 5)\n features = append_ngrams(features, clean_headline, clean_body, 6)\n return features\n\n X = []\n for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):\n X.append(binary_co_occurence(headline, body)\n + binary_co_occurence_stops(headline, body)\n + count_grams(headline, body))\n return X\n\n###########################\n# NEW FEATURES START HERE #\n###########################\ndef NMF_cos_50(headlines, bodies):\n \"\"\"\n Implements non negative matrix factorization. Calculates the cos distance between the resulting head and body vector.\n \"\"\"\n return topic_models.NMF_topics(headlines, bodies, n_topics=50, include_holdout=False, include_unlbled_test=False)\n\ndef NMF_cos_300(headlines, bodies):\n \"\"\"\n Implements non negative matrix factorization. Calculates the cos distance between the resulting head and body vector.\n \"\"\"\n return topic_models.NMF_topics(headlines, bodies, n_topics=300, include_holdout=False, include_unlbled_test=False)\n\ndef NMF_cos_300_holdout_unlbled_test(headlines, bodies):\n \"\"\"\n Implements non negative matrix factorization. Calculates the cos distance between the resulting head and body vector.\n \"\"\"\n return topic_models.NMF_topics(headlines, bodies, n_topics=300, include_holdout=True, include_unlbled_test=True)\n\ndef latent_dirichlet_allocation_25(headlines, bodies):\n \"\"\"\n Sklearn LDA implementation based on the 5000 most important words (based on train+test+holdout+ unlabeled test data's term freq => bleeding).\n Returns feature vector of cosinus distances between the topic models of headline and bodies.\n\n Links:\n https://pypi.python.org/pypi/lda, bottom see suggestions like MALLET, hca\n https://medium.com/@aneesha/topic-modeling-with-scikit-learn-e80d33668730\n https://www.quora.com/What-are-the-best-features-to-put-into-Latent-Dirichlet-Allocation-LDA-for-topic-modeling-of-short-text\n \"\"\"\n return topic_models.latent_dirichlet_allocation_cos(headlines, bodies, n_topics=25, include_holdout=False,\n use_idf=False, term_freq=True, incl_unlbled_test=False)\n\ndef latent_dirichlet_allocation_300(headlines, bodies):\n \"\"\"\n Sklearn LDA implementation based on the 5000 most important words (based on train+test+holdout+ unlabeled test data's term freq => bleeding).\n Returns feature vector of cosinus distances between the topic models of headline and bodies.\n\n Links:\n https://pypi.python.org/pypi/lda, bottom see suggestions like MALLET, hca\n https://medium.com/@aneesha/topic-modeling-with-scikit-learn-e80d33668730\n https://www.quora.com/What-are-the-best-features-to-put-into-Latent-Dirichlet-Allocation-LDA-for-topic-modeling-of-short-text\n \"\"\"\n return topic_models.latent_dirichlet_allocation_cos(headlines, bodies, n_topics=300, include_holdout=False,\n use_idf=False, term_freq=True, incl_unlbled_test=False)\n\ndef latent_dirichlet_allocation_25_holdout_unlbled_test(headlines, bodies):\n \"\"\"\n Sklearn LDA implementation based on the 5000 most important words (based on train+test+holdout+ unlabeled test data's term freq => bleeding).\n Returns feature vector of cosinus distances between the topic models of headline and bodies.\n\n Links:\n https://pypi.python.org/pypi/lda, bottom see suggestions like MALLET, hca\n https://medium.com/@aneesha/topic-modeling-with-scikit-learn-e80d33668730\n https://www.quora.com/What-are-the-best-features-to-put-into-Latent-Dirichlet-Allocation-LDA-for-topic-modeling-of-short-text\n \"\"\"\n return topic_models.latent_dirichlet_allocation_cos(headlines, bodies, n_topics=25, include_holdout=True,\n use_idf=False, term_freq=True, incl_unlbled_test=True)\n\ndef latent_semantic_indexing_gensim_300_concat_holdout(headlines, bodies):\n \"\"\"\n Takes all the data (holdout+test+train) and interpretes the headlines and bodies as different\n documents. Instead of combining them, they are appended. Then it tokenizes these ~50k headline-docs and ~50k body-docs,\n builds a Tfidf-Matrix out of them and creates a LSI-Model out of it. In the next step the headlines and\n bodies for the feature generation are also treated as different documents and merely appended. Also, they are tokenized and\n a Tfifd-Matrix is built. This matix is passed to the learned LSI-Model and a Matrix is being returned.\n In this matrix, each document is represented as a vector with length(topics) of (topic-id, distance of this doc to the topic).\n The probabilities are then taken as a feature vector for the document. The first half of the matrix represent the headline docs,\n the latter half represent the body docs. In the end, the feature vectors of the headlines get concatenated with its body feature vector.\n\n The differences to the latent_semantic_indexing_gensim_300_concat_OLD are:\n - holdout data is also used\n - a Tfidf matrix is built and used to create the LSI model and also to retrieve the features instead of just a corpus to build the LSI model and\n passing each headline and body separately into the LSI model to retrieve its features (does it make a difference, since dictionary already takes\n tfidf into account?)\n - the vectors are taken fully and not just the cosinus distance between them\n \"\"\"\n return topic_models.latent_semantic_indexing_gensim_concat(headlines, bodies, n_topics=300, include_holdout=True,\n include_unlbled_test=False)\n\ndef latent_semantic_indexing_gensim_300_concat_holdout_unlbled_test(headlines, bodies):\n \"\"\"\n Takes all the data (holdout+test+train) and interpretes the headlines and bodies as different\n documents. Instead of combining them, they are appended. Then it tokenizes these ~50k headline-docs and ~50k body-docs,\n builds a Tfidf-Matrix out of them and creates a LSI-Model out of it. In the next step the headlines and\n bodies for the feature generation are also treated as different documents and merely appended. Also, they are tokenized and\n a Tfifd-Matrix is built. This matix is passed to the learned LSI-Model and a Matrix is being returned.\n In this matrix, each document is represented as a vector with length(topics) of (topic-id, distance of this doc to the topic).\n The probabilities are then taken as a feature vector for the document. The first half of the matrix represent the headline docs,\n the latter half represent the body docs. In the end, the feature vectors of the headlines get concatenated with its body feature vector.\n\n The differences to the latent_semantic_indexing_gensim_300_concat_OLD are:\n - holdout data is also used\n - a Tfidf matrix is built and used to create the LSI model and also to retrieve the features instead of just a corpus to build the LSI model and\n passing each headline and body separately into the LSI model to retrieve its features (does it make a difference, since dictionary already takes\n tfidf into account?)\n - the vectors are taken fully and not just the cosinus distance between them\n \"\"\"\n return topic_models.latent_semantic_indexing_gensim_concat(headlines, bodies, n_topics=300, include_holdout=True,\n include_unlbled_test=True)\n\ndef NMF_concat_300_holdout(headlines, bodies):\n \"\"\"\n Implements non negative matrix factorization. Concatenates the resulting head and body vector.\n \"\"\"\n return topic_models.NMF_topics(headlines, bodies, n_topics=300, include_holdout=True, include_unlbled_test=False,\n cosinus_dist=False)\n\ndef NMF_concat_300_holdout_unlbled_test(headlines, bodies):\n \"\"\"\n Implements non negative matrix factorization. Concatenates the resulting head and body vector.\n \"\"\"\n return topic_models.NMF_topics(headlines, bodies, n_topics=300, include_holdout=True, include_unlbled_test=True,\n cosinus_dist=False)\n\ndef word_unigrams_5000_concat_tf_l2_holdout(headlines, bodies):\n \"\"\"\n Simple bag of words feature extraction with term freq of words as feature vectors, length 5000 head + 5000 body,\n concatenation of head and body, l2 norm and bleeding (BoW = train+test+holdout+unlabeled test set).\n \"\"\"\n\n def combine_head_and_body(headlines, bodies):\n head_and_body = [headline + \" \" + body for i, (headline, body) in\n enumerate(zip(headlines, bodies))]\n return head_and_body\n\n def get_features(vocab):\n vectorizer_head = TfidfVectorizer(vocabulary=vocab, use_idf=True,\n norm=\"l2\", stop_words='english')\n X_head = vectorizer_head.fit_transform(headlines)\n\n vectorizer_body = TfidfVectorizer(vocabulary=vocab, use_idf=True,\n norm=\"l2\", stop_words='english')\n X_body = vectorizer_body.fit_transform(bodies)\n\n X = np.concatenate([X_head.toarray(), X_body.toarray()], axis=1)\n\n return X\n\n # get headlines and bodies of train, test and holdout set\n h, b = word_ngrams.get_head_body_tuples(include_holdout=True)\n\n # create the vocab out of the BoW\n tfidf = TfidfVectorizer(ngram_range=(1, 1), stop_words='english', max_features=5000, use_idf=False,\n norm='l2')\n tfidf.fit_transform(combine_head_and_body(h, b))\n vocab = tfidf.vocabulary_\n\n X = get_features(vocab)\n\n return X\n\ndef word_unigrams_5000_concat_tf_l2_holdout_unlbled_test(headlines, bodies):\n \"\"\"\n Simple bag of words feature extraction with term freq of words as feature vectors, length 5000 head + 5000 body,\n concatenation of head and body, l2 norm and bleeding (BoW = train+test+holdout+unlabeled test set).\n \"\"\"\n\n def combine_head_and_body(headlines, bodies):\n head_and_body = [headline + \" \" + body for i, (headline, body) in\n enumerate(zip(headlines, bodies))]\n return head_and_body\n\n def get_features(vocab):\n vectorizer_head = TfidfVectorizer(vocabulary=vocab, use_idf=True,\n norm=\"l2\", stop_words='english')\n X_head = vectorizer_head.fit_transform(headlines)\n\n vectorizer_body = TfidfVectorizer(vocabulary=vocab, use_idf=True,\n norm=\"l2\", stop_words='english')\n X_body = vectorizer_body.fit_transform(bodies)\n\n X = np.concatenate([X_head.toarray(), X_body.toarray()], axis=1)\n\n return X\n\n # get headlines and bodies of train, test and holdout set\n h, b = word_ngrams.get_head_body_tuples(include_holdout=True)\n\n\n # Comment out for clean ablation tests\n # add the unlabeled test data words to the BoW of test+train+holdout data\n h_unlbled_test, b_unlbled_test = word_ngrams.get_head_body_tuples_unlbled_test()\n h.extend(h_unlbled_test)\n b.extend(b_unlbled_test)\n\n # create the vocab out of the BoW\n tfidf = TfidfVectorizer(ngram_range=(1, 1), stop_words='english', max_features=5000, use_idf=True,\n norm='l2')\n tfidf.fit_transform(combine_head_and_body(h, b))\n vocab = tfidf.vocabulary_\n\n X = get_features(vocab)\n\n return X\n\n\n\n#'PPDB' paraphrase database\n#hungarian alignment score\n#computing score of each word of headline with each word of body - very resource-hungry\ndef ppdb(headlines, bodies):\n myHungarian_calculator = hungarian_alignment_calculator()\n x = []\n for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):\n x.append(myHungarian_calculator.calc_hungarian_alignment_score(headline, body))\n return x\n\n\ndef hedging_features(headlines, bodies):\n _hedging_seed_words = \\\n [\n 'alleged', 'allegedly',\n 'apparently',\n 'appear', 'appears',\n 'claim', 'claims',\n 'could',\n 'evidently',\n 'largely',\n 'likely',\n 'mainly',\n 'may', 'maybe', 'might',\n 'mostly',\n 'perhaps',\n 'presumably',\n 'probably',\n 'purported', 'purportedly',\n 'reported', 'reportedly',\n 'rumor', 'rumour', 'rumors', 'rumours', 'rumored', 'rumoured',\n 'says',\n 'seem',\n 'somewhat',\n # 'supposedly',\n 'unconfirmed']\n # Taken from https://github.com/wooorm/hedges/blob/master/index.json\n _hedging_seed_words = \\\n [\n \"a bit\",\n \"about\",\n \"actually\",\n \"allege\",\n \"alleged\",\n \"almost\",\n \"almost never\",\n \"always\",\n \"and all that\",\n \"and so forth\",\n \"apparent\",\n \"apparently\",\n \"appear\",\n \"appear to be\",\n \"appeared\",\n \"appears\",\n \"approximately\",\n \"around\",\n \"assume\",\n \"assumed\",\n \"assumes\",\n \"assumption\",\n \"at least\",\n \"basically\",\n \"be sure\",\n \"believe\",\n \"believed\",\n \"believes\",\n \"bunch\",\n \"can\",\n \"certain\",\n \"certainly\",\n \"clear\",\n \"clearly\",\n \"conceivably\",\n \"consider\",\n \"considered\",\n \"considers\",\n \"consistent with\",\n \"could\",\n \"couple\",\n \"definite\",\n \"definitely\",\n \"diagnostic\",\n \"don't know\",\n \"doubt\",\n \"doubtful\",\n \"effectively\",\n \"estimate\",\n \"estimated\",\n \"estimates\",\n \"et cetera\",\n \"evidently\",\n \"fairly\",\n \"few\",\n \"find\",\n \"finds\",\n \"found\",\n \"frequently\",\n \"generally\",\n \"guess\",\n \"guessed\",\n \"guesses\",\n \"hopefully\",\n \"if i'm understanding you correctly\",\n \"improbable\",\n \"in general\",\n \"in my mind\",\n \"in my opinion\",\n \"in my understanding\",\n \"in my view\",\n \"inconclusive\",\n \"indicate\",\n \"kind of\",\n \"largely\",\n \"like\",\n \"likely\",\n \"little\",\n \"look like\",\n \"looks like\",\n \"mainly\",\n \"many\",\n \"may\",\n \"maybe\",\n \"might\",\n \"more or less\",\n \"most\",\n \"mostly\",\n \"much\",\n \"must\",\n \"my impression\",\n \"my thinking is\",\n \"my understanding is\",\n \"necessarily\",\n \"occasionally\",\n \"often\",\n \"overall\",\n \"partially\",\n \"perhaps\",\n \"possibility\",\n \"possible\",\n \"possibly\",\n \"practically\",\n \"presumable\",\n \"presumably\",\n \"pretty\",\n \"probability\",\n \"probable\",\n \"probably\",\n \"quite\",\n \"quite clearly\",\n \"rare\",\n \"rarely\",\n \"rather\",\n \"read\",\n \"really\",\n \"roughly\",\n \"say\",\n \"says\",\n \"seem\",\n \"seemed\",\n \"seems\",\n \"seldom\",\n \"several\",\n \"should\",\n \"so far\",\n \"some\",\n \"somebody\",\n \"somehow\",\n \"someone\",\n \"something\",\n \"something or other\",\n \"sometimes\",\n \"somewhat\",\n \"somewhere\",\n \"sort of\",\n \"speculate\",\n \"speculated\",\n \"speculates\",\n \"suggest\",\n \"suggested\",\n \"suggestive\",\n \"suggests\",\n \"suppose\",\n \"supposed\",\n \"supposedly\",\n \"supposes\",\n \"surely\",\n \"tend\",\n \"their impression\",\n \"think\",\n \"thinks\",\n \"thought\",\n \"understand\",\n \"understands\",\n \"understood\",\n \"unlikely\",\n \"unsure\",\n \"usually\",\n \"virtually\",\n \"will\",\n \"would\"\n ]\n\n def calculate_hedging_polarity(text):\n tokens = get_tokenized_lemmas(text)\n return sum([t in _hedging_seed_words for t in tokens]) % 2\n def contains_hedging_seeed(text):\n tokens = get_tokenized_lemmas(text)\n return (min(1,sum([t in _hedging_seed_words for t in tokens])))\n X = []\n for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):\n clean_headline = clean(headline)\n clean_body = clean(body)\n features = []\n #features.append(calculate_hedging_polarity(clean_headline))\n #features.append(calculate_hedging_polarity(clean_body))\n features.append(contains_hedging_seeed(clean_headline))\n features.append(contains_hedging_seeed(clean_body))\n X.append(features)\n return np.array(X)\n\n# End Features taken from: https://github.com/willferreira/mscproject\n##############################################################\n\ndef load_embeddings(headlines, bodies):\n # embedding parameters:\n embedding_size = 300\n vocab_size = 3000000\n embeddPath = \"%s/data/embeddings/google_news/GoogleNews-vectors-negative300.bin.gz\" % (path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n embeddData = path.normpath(\"%s/data/\" % (path.dirname(path.abspath(embeddPath))))\n binary_val = True\n embeddings = LoadEmbeddings(filepath=embeddPath, data_path=embeddData, vocab_size=vocab_size, embedding_size=embedding_size, binary_val=binary_val)\n # print('Loaded embeddings: Vocab-Size: ' + str(vocab_size) + ' \\n Embedding size: ' + str(embedding_size))\n return embedding_size, embeddings\n\n\n\n# calculate average sentence vector and compare headline with whole body text\n# same like avg_embed in model.py\ndef sen2Doc_headline_wholebody(headlines, bodies):\n\n def headline_wholebody(embeddings, headline, body):\n headline_w = sent2stokens_wostop(headline)\n body_w = sent2stokens_wostop(body)\n sim = avg_embedding_similarity(embeddings, embedding_size, ' '.join(headline_w), ' '.join(body_w))\n features = []\n features.append(sim)\n return features\n\n x = []\n embedding_size, embeddings = load_embeddings(headlines, bodies)\n for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):\n x.append(headline_wholebody(embeddings, headline, body))\n return x\n\n# calculate average sentence vector and compare headline with each sentence, use highest similarity\ndef sen2sen_similarity_max(headlines, bodies):\n\n def similarity(embeddings, headline, body):\n sentence_list = []\n score = 0\n sentence_list = text2sent(body)\n for sentence in sentence_list:\n # compare both sentences - vectors not necessary, since this procedure works with text\n # note: avg_embeddings_similarity tokenizes and lemmatizes the sentences prior to calculation, so no pre-assessment is necessary (Sentence to tokens without stopwords)\n temp_score = avg_embedding_similarity(embeddings, embedding_size, headline, sentence)\n # store the highest similarity score\n score=max(score, temp_score)\n\n features = []\n features.append(score)\n return features\n\n x = []\n embedding_size, embeddings = load_embeddings(headlines, bodies)\n for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):\n x.append(similarity(embeddings, headline, body))\n return x\n\n# calculate word_mover_distance from headline to each sentence, use lowest distance\ndef word_mover_distance_similarity_sentence_min(headlines, bodies):\n\n def similarity(embeddings, headline, body):\n distance = 99999\n sentence_list = []\n sentence_list = text2sent(body)\n embedding_size, embeddings = load_embeddings(headline, body)\n for sentence in sentence_list:\n temp_distance = abs(computeAverageWMD(embeddings, headline, sentence))\n # store the lowest distance\n # Note: Distance is not normallized!!\n distance=min(distance, temp_distance)\n\n features = []\n features.append(distance)\n return features\n x = []\n embedding_size, embeddings = load_embeddings(headlines, bodies)\n for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):\n x.append(similarity(embeddings, headline, body))\n return x\n\n# calculate word_mover_distance from headline to whole body text\ndef word_mover_distance_wholebody(headlines, bodies):\n def similarity(embeddings, headline, body):\n embedding_size, embeddings = load_embeddings(headline, body)\n distance = abs(computeAverageWMD(embeddings, headline, body))\n features = []\n features.append(distance)\n return features\n x = []\n embedding_size, embeddings = load_embeddings(headlines, bodies)\n for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):\n x.append(similarity(embeddings, headline, body))\n return x\n\n# compare sdm of the headline with the sdm of the whole body\ndef sdm_sim(headlines, bodies):\n def similarity(headline, body):\n clean_headline = clean(headline)\n clean_body = clean(body)\n fullClient = retinasdk.FullClient(\"e8bf8de0-fe52-11e6-b22d-93a4ae922ff1\", apiServer=\"http://api.cortical.io/rest\", retinaName=\"en_associative\")\n\n RE = re.compile(u'[⺀-⺙⺛-⻳⼀-⿕々〇〡-〩〸-〺〻㐀-䶵一-鿃豈-鶴侮-頻並-龎]', re.UNICODE)\n clean_body = RE.sub(u'', clean_body)\n # clean_body = clean_body.encode('ascii', 'ignore')\n clean_body = clean_body.encode('utf8', 'ignore')\n clean_body = clean_body.decode('utf8', 'ignore')\n # print(clean_body)\n clean_body.replace(\"0x6e\", \" \")\n # newdata = clean_body[:start] + clean_body[end:]\n # clean_body = clean_body.translate(None, '0x6e')\n comp_with_stop_words = fullClient.compare('[{\"text\": \"'+clean_headline+'\"}, {\"text\": \"'+clean_body +'\"}]')\n sim = comp_with_stop_words.cosineSimilarity\n\n features = []\n features.append(sim)\n return features\n x = []\n for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):\n x.append(similarity(headline, body))\n return x\n\n\ndef stanford_based_verb_noun_sim(headlines, bodies, bodyIds, headIds, order_sentences=False, num_sents=99):\n myStanfordmethods = StanfordMethods()\n mytf_tf_idf_helpers = tf_idf_helpers()\n\n def calculate_word_sim(embeddings, headline, body, body_id, head_id):\n clean_headline = clear_unwanted_chars(headline)\n clean_body = clear_unwanted_chars(body)\n\n ranked_sentences, body_id = stanford_helper_order_sents(order_sentences, num_sents, body_id,clean_headline, clean_body, myStanfordmethods, mytf_tf_idf_helpers)\n headline_nouns, headline_verbs, head_neg, head_sentiment, head_words_per_sentence = myStanfordmethods.getStanfordInfo('headline', str(body_id), str(head_id), clean_headline, max_number_of_sentences=num_sents)\n body_nouns, body_verbs, body_neg, body_sentiment, body_words_per_sentence = myStanfordmethods.getStanfordInfo('body', str(body_id), str(head_id), ranked_sentences, max_number_of_sentences=num_sents)\n\n try:\n noun_sim = avg_embedding_similarity(embeddings, embedding_size, ' '.join(headline_nouns), ' '.join(body_nouns))\n except Exception as e:\n #print(e)\n #print('Problem with nouns for dataset with headline ID: ' + str(body_id) + '\\n Headline-text: ' + str(clean_headline))\n #print(body_nouns)\n noun_sim = -1\n if math.isnan(noun_sim):\n #print('NAN for nouns for dataset with headline ID: ' + str(body_id) + '\\n Headline-text: ' + str(clean_headline) + '\\n \\n Body-text: ' + str(ranked_sentences) + ' \\n \\n Body-verbs: ' + str(body_verbs) + '\\n \\n Headline-verbs:' + str(headline_verbs))\n #print(body_nouns)\n noun_sim = -1\n\n try:\n verb_sim = avg_embedding_similarity(embeddings, embedding_size, ' '.join(headline_verbs), ' '.join(body_verbs))\n except Exception as e:\n #print(e)\n #print('Problem with verbs for dataset with headline ID: ' + str(body_id) + '\\n Headline-text: ' + str(clean_headline))\n #print(body_verbs)\n verb_sim = -1\n\n if math.isnan(verb_sim):\n #print('NAN for verbs for dataset with headline for body ID: ' + str(body_id) + '\\n Headline-text: ' + str(clean_headline) + '\\n \\n Body-text: ' + str(ranked_sentences) + ' \\n \\n Body-verbs: ' + str(body_verbs) + '\\n \\n Headline-verbs:' + str(headline_verbs))\n #print(body_verbs)\n verb_sim = -1\n\n features = []\n features.append(noun_sim)\n features.append(verb_sim)\n\n return features\n\n x = []\n embedding_size, embeddings = load_embeddings(headlines, bodies)\n for i, (headline, body, bodyIds, headIds) in tqdm(enumerate(zip(headlines, bodies, bodyIds, headIds))):\n x.append(calculate_word_sim(embeddings, headline, body, bodyIds, headIds))\n # save all information in file\n myStanfordmethods.store_pickle_file()\n return x\n\n\ndef stanford_based_verb_noun_sim_1sent(headlines, bodies, bodyIds, headIds, order_sentences=True, num_sents=1):\n return stanford_based_verb_noun_sim(headlines, bodies, bodyIds, headIds, order_sentences, num_sents)\n\ndef stanford_based_verb_noun_sim_2sent(headlines, bodies, bodyIds, headIds, order_sentences=True, num_sents=2):\n return stanford_based_verb_noun_sim(headlines, bodies, bodyIds, headIds, order_sentences, num_sents)\n\ndef stanford_based_verb_noun_sim_3sent(headlines, bodies, bodyIds, headIds, order_sentences=True, num_sents=3):\n return stanford_based_verb_noun_sim(headlines, bodies, bodyIds, headIds, order_sentences, num_sents)\n\n\ndef stanford_ppdb_score(headlines, bodies, bodyIds, headIds, order_sentences=False, num_sents=99):\n myStanfordmethods = StanfordMethods()\n myHungarian_calculator = hungarian_alignment_calculator()\n mytf_tf_idf_helpers = tf_idf_helpers()\n\n def calculate_ppdb_score(headline, body, body_id, head_id):\n clean_headline = clear_unwanted_chars(headline)\n clean_body = clear_unwanted_chars(body)\n\n ranked_sentences, body_id = stanford_helper_order_sents(order_sentences, num_sents, body_id,clean_headline, clean_body, myStanfordmethods, mytf_tf_idf_helpers)\n\n headline_nouns, headline_verbs, head_neg, head_sentiment, head_words_per_sentence = myStanfordmethods.getStanfordInfo('headline', str(body_id), str(head_id), clean_headline, max_number_of_sentences=num_sents)\n body_nouns, body_verbs, body_neg, body_sentiment, body_words_per_sentence = myStanfordmethods.getStanfordInfo('body', str(body_id), str(head_id), ranked_sentences, max_number_of_sentences=num_sents)\n\n try:\n noun_ppdb_score = myHungarian_calculator.calc_hungarian_alignment_score(' '.join(headline_nouns), ' '.join(body_nouns))\n except Exception as e:\n #print(e)\n #print('Problem with ppdb score in nouns for dataset with headline ID: ' + str(body_id) + '\\n Headline-text: ' + str(clean_headline))\n #print(body_nouns)\n noun_ppdb_score = -99\n\n try:\n verb_ppdb_score = myHungarian_calculator.calc_hungarian_alignment_score( ' '.join(headline_verbs), ' '.join(body_verbs))\n except Exception as e:\n #print(e)\n #print('Problem with ppdb score in verbs for dataset with headline ID: ' + str(body_id) + '\\n Headline-text: ' + str(clean_headline))\n #print(body_verbs)\n verb_ppdb_score = -99\n\n features = []\n features.append(noun_ppdb_score)\n features.append(verb_ppdb_score)\n\n return features\n\n x = []\n for i, (headline, body, bodyIds, headIds) in tqdm(enumerate(zip(headlines, bodies, bodyIds, headIds))):\n x.append(calculate_ppdb_score(headline, body, bodyIds, headIds))\n # save all information in file\n myStanfordmethods.store_pickle_file()\n return x\n\n\ndef stanford_ppdb_score_1sent(headlines, bodies, bodyIds, headIds, order_sentences=True, num_sents=1):\n return stanford_ppdb_score(headlines, bodies, bodyIds, headIds, order_sentences, num_sents)\n\ndef stanford_ppdb_score_2sent(headlines, bodies, bodyIds, headIds, order_sentences=True, num_sents=2):\n return stanford_ppdb_score(headlines, bodies, bodyIds, headIds, order_sentences, num_sents)\n\ndef stanford_ppdb_score_3sent(headlines, bodies, bodyIds, headIds, order_sentences=True, num_sents=3):\n return stanford_ppdb_score(headlines, bodies, bodyIds, headIds, order_sentences, num_sents)\n\n\ndef stanford_sentiment(headlines, bodies, bodyIds, headIds, order_sentences=False, num_sents=99):\n myStanfordmethods = StanfordMethods()\n mytf_tf_idf_helpers = tf_idf_helpers()\n\n def calculate_sentiment(headline, body, body_id, head_id):\n clean_headline = clear_unwanted_chars(headline)\n clean_body = clear_unwanted_chars(body)\n\n ranked_sentences, body_id = stanford_helper_order_sents(order_sentences, num_sents, body_id,clean_headline, clean_body, myStanfordmethods, mytf_tf_idf_helpers)\n\n headline_nouns, headline_verbs, head_neg, head_sentiment, head_words_per_sentence = myStanfordmethods.getStanfordInfo('headline', str(body_id), str(head_id), clean_headline, max_number_of_sentences=num_sents)\n body_nouns, body_verbs, body_neg, body_sentiment, body_words_per_sentence = myStanfordmethods.getStanfordInfo('body', str(body_id), str(head_id), ranked_sentences, max_number_of_sentences=num_sents)\n\n #get average sentiment of sentences\n head_sentiment_avg = float(sum(head_sentiment))/len(head_sentiment)\n body_sentiment_avg = float(sum(body_sentiment))/len(body_sentiment)\n\n features = []\n features.append(head_sentiment_avg)\n features.append(body_sentiment_avg)\n\n return features\n\n x = []\n for i, (headline, body, bodyIds, headIds) in tqdm(enumerate(zip(headlines, bodies, bodyIds, headIds))):\n x.append(calculate_sentiment(headline, body, bodyIds, headIds))\n # save all information in file\n myStanfordmethods.store_pickle_file()\n return x\n\ndef stanford_sentiment_1sent(headlines, bodies, bodyIds, headIds, order_sentences=True, num_sents=1):\n return stanford_sentiment(headlines, bodies, bodyIds, headIds, order_sentences, num_sents)\n\ndef stanford_sentiment_2sent(headlines, bodies, bodyIds, headIds, order_sentences=True, num_sents=2):\n return stanford_sentiment(headlines, bodies, bodyIds, headIds, order_sentences, num_sents)\n\ndef stanford_sentiment_3sent(headlines, bodies, bodyIds, headIds, order_sentences=True, num_sents=3):\n return stanford_sentiment(headlines, bodies, bodyIds, headIds, order_sentences, num_sents)\n\n\ndef stanford_negation_features(headlines, bodies, bodyIds, headIds, order_sentences=False, num_sents=99):\n myStanfordmethods = StanfordMethods()\n mytf_tf_idf_helpers = tf_idf_helpers()\n\n def calculate_negation(headline, body, body_id, head_id):\n clean_headline = clear_unwanted_chars(headline)\n clean_body = clear_unwanted_chars(body)\n\n ranked_sentences, body_id = stanford_helper_order_sents(order_sentences, num_sents, body_id,clean_headline, clean_body, myStanfordmethods, mytf_tf_idf_helpers)\n\n headline_nouns, headline_verbs, head_neg, head_sentiment, head_words_per_sentence = myStanfordmethods.getStanfordInfo('headline', str(body_id), str(head_id), clean_headline, max_number_of_sentences=num_sents)\n body_nouns, body_verbs, body_neg, body_sentiment, body_words_per_sentence = myStanfordmethods.getStanfordInfo('body', str(body_id), str(head_id), ranked_sentences, max_number_of_sentences=num_sents)\n\n features = []\n\n if head_neg[0] >= 0:\n features.append(head_neg[1][0])\n else:\n features.append(-1)\n #The following section has been previously commented out - I do not know anymore why this has been done..\n if body_neg[0] >= 0:\n features.append(body_neg[1][0])\n else:\n features.append(-1)\n\n return features\n\n x = []\n for i, (headline, body, bodyIds, headIds) in tqdm(enumerate(zip(headlines, bodies, bodyIds, headIds))):\n x.append(calculate_negation(headline, body, bodyIds, headIds))\n # save all information in file\n myStanfordmethods.store_pickle_file()\n return x\n\n\ndef stanford_negation_features_1sent(headlines, bodies, bodyIds, headIds, order_sentences=True, num_sents=1):\n return stanford_negation_features(headlines, bodies, bodyIds, headIds, order_sentences, num_sents)\n\ndef stanford_negation_features_2sent(headlines, bodies, bodyIds, headIds, order_sentences=True, num_sents=2):\n return stanford_negation_features(headlines, bodies, bodyIds, headIds, order_sentences, num_sents)\n\ndef stanford_negation_features_3sent(headlines, bodies, bodyIds, headIds, order_sentences=True, num_sents=3):\n return stanford_negation_features(headlines, bodies, bodyIds, headIds, order_sentences, num_sents)\n\ndef stanford_avg_words_per_sent(headlines, bodies, bodyIds, headIds, order_sentences=False, num_sents=99):\n myStanfordmethods = StanfordMethods()\n mytf_tf_idf_helpers = tf_idf_helpers()\n\n def calculate_words_per_sent(headline, body, body_id, head_id):\n clean_headline = clear_unwanted_chars(headline)\n clean_body = clear_unwanted_chars(body)\n\n ranked_sentences, body_id = stanford_helper_order_sents(order_sentences, num_sents, body_id,clean_headline, clean_body, myStanfordmethods, mytf_tf_idf_helpers)\n\n headline_nouns, headline_verbs, head_neg, head_sentiment, head_words_per_sentence = myStanfordmethods.getStanfordInfo('headline', str(body_id), str(head_id), clean_headline, max_number_of_sentences=num_sents)\n body_nouns, body_verbs, body_neg, body_sentiment, body_words_per_sentence = myStanfordmethods.getStanfordInfo('body', str(body_id), str(head_id), ranked_sentences, max_number_of_sentences=num_sents)\n\n features = []\n features.append(head_words_per_sentence)\n features.append(body_words_per_sentence)\n\n return features\n\n x = []\n for i, (headline, body, bodyIds, headIds) in tqdm(enumerate(zip(headlines, bodies, bodyIds, headIds))):\n x.append(calculate_words_per_sent(headline, body, bodyIds, headIds))\n # save all information in file\n myStanfordmethods.store_pickle_file()\n return x\n\n\ndef stanford_avg_words_per_sent_1sent(headlines, bodies, bodyIds, headIds, order_sentences=True, num_sents=1):\n return stanford_avg_words_per_sent(headlines, bodies, bodyIds, headIds, order_sentences, num_sents)\n\ndef stanford_avg_words_per_sent_2sent(headlines, bodies, bodyIds, headIds, order_sentences=True, num_sents=2):\n return stanford_avg_words_per_sent(headlines, bodies, bodyIds, headIds, order_sentences, num_sents)\n\ndef stanford_avg_words_per_sent_3sent(headlines, bodies, bodyIds, headIds, order_sentences=True, num_sents=3):\n return stanford_avg_words_per_sent(headlines, bodies, bodyIds, headIds, order_sentences, num_sents)\n\n'This is not a feature, but used by any stanford feature calculation to order the sentences based on their tf idf score'\ndef stanford_helper_order_sents(order_sentences, num_of_sents, body_id, clean_headline, clean_body, myStanfordmethods, mytf_tf_idf_helpers):\n #Order sentences by tf-idf-score:\n if order_sentences:\n body_id = \"ranked_\"+str(num_of_sents)+\"_\"+str(body_id)\n 'Only rank sentences, if there is no entry in the StanfordPickle'\n if not myStanfordmethods.check_if_already_parsed(body_id):\n #print(body_id + \" is not in stanford_pickle\")\n ranked_sentences = mytf_tf_idf_helpers.order_by_tf_id_rank(clean_headline, clean_body, num_of_sents)\n else:\n 'In this case the content of ranked sentences does not matter, since the Stanford stored information is used'\n #print(body_id + \" is already in stanford_pickle _ skipping tf_idf_ranking\")\n ranked_sentences = clean_body\n else:\n ranked_sentences = clean_body\n body_id = \"unranked_\"+str(body_id)\n\n return ranked_sentences, body_id\n\n\ndef discuss_features(headlines, bodies):\n _discuss_words = [\n 'allegedly',\n 'report',\n 'reported',\n 'reportedly',\n 'said',\n 'say',\n 'source',\n 'sources',\n 'told',\n 'according to',\n 'claim',\n 'claims'\n ]\n\n def calculate_discuss_feature(text):\n tokens = get_tokenized_lemmas(text)\n #result = [1 if word in tokens else 0 for word in _discuss_words]\n result = min(1,sum([t in _discuss_words for t in tokens]))\n return result\n\n X = []\n for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):\n clean_headline = clean(headline)\n clean_body = clean(body)\n features = []\n features.append(calculate_discuss_feature(clean_headline))\n features.append(calculate_discuss_feature(clean_body))\n X.append(features)\n print(str(len(X)))\n return np.array(X)\n\n\n## Benjamins LSTM features:\ndef single_flat_LSTM_50d_100(headlines, bodies, fold):\n # Following the guide at https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html\n # see also documentation https://keras.io/layers/embeddings/\n\n \"\"\"\n Improve on former LSTM features by dividing the tokens much better on the documents and evidences for a claim, in order to remove sparsitiy\n and add more useful information into the vectors.\n :param claims:\n :param evidences:\n :param orig_docs:\n :param fold:\n :return:\n \"\"\"\n from fnc.refs.feature_engineering_helper.misc import create_embedding_lookup_pandas, \\\n text_to_sequences_fixed_size, load_embedding_pandas\n\n #########################\n # PARAMETER DEFINITIONS #\n #########################\n method_name = \"single_flat_LSTM_50d_100\"\n # location path for features\n FEATURES_DIR = \"%s/../data/fnc-1/features/\" % (path.dirname(path.dirname(path.abspath(__file__))))\n PARAM_DICT_FILENAME = method_name+\"_param_dict.pkl\"\n\n param_dict = {\n \"MAX_NB_WORDS\": 50000, # size of the vocabulary\n\n # sequence lengths\n \"MAX_SEQ_LENGTH\": 100, #1000\n\n # embedding specific values\n \"EMBEDDING_DIM\": 50, # dimension of the GloVe embeddings\n \"GLOVE_ZIP_FILE\": 'glove.twitter.27B.zip',\n \"GLOVE_FILE\": 'glove.twitter.27B.50d.txt',\n\n # embedding file names\n \"EMBEDDING_FILE\": method_name+\"_embedding.npy\",\n\n # vocab file names\n \"VOCAB_FILE\": method_name+\"_vocab.pkl\",\n }\n\n\n ###############################################\n # GET VOCABULARY AND PREPARE EMBEDDING MATRIX #\n ###############################################\n\n # load GloVe embeddings\n GloVe_vectors = load_embedding_pandas(param_dict[\"GLOVE_ZIP_FILE\"], param_dict[\"GLOVE_FILE\"])\n\n # load all claims, orig_docs and evidences\n all_heads, all_bodies = word_ngrams.get_head_body_tuples(include_holdout=True)\n all = all_heads\n all.extend(all_bodies)\n\n\n # Comment out for clean ablation checks\n # add the unlabeled test data words to the BoW of test+train+holdout data\n h_unlbled_test, b_unlbled_test = word_ngrams.get_head_body_tuples_unlbled_test()\n all.extend(h_unlbled_test)\n all.extend(b_unlbled_test)\n\n # create and save the embedding matrices for claims, orig_docs and evidences\n vocab = create_embedding_lookup_pandas(all, param_dict[\"MAX_NB_WORDS\"], param_dict[\"EMBEDDING_DIM\"],\n GloVe_vectors, param_dict[\"EMBEDDING_FILE\"], param_dict[\"VOCAB_FILE\"], init_zeros=False,\n add_unknown=True, rdm_emb_init=True, tokenizer=nltk.word_tokenize)\n\n # unload GloVe_vectors in order to make debugging possible\n del GloVe_vectors\n\n\n #################################################\n # Create sequences and embedding for the claims #\n #################################################\n print(\"Create sequences and embedding for the heads\")\n\n concatenated = []\n for i in range(len(headlines)):\n concatenated.append(headlines[i] + \". \" + bodies[i])\n\n # replace tokens of claims by vocabulary ids - the ids refer to the index of the embedding matrix which holds the word embedding for this vocab word\n sequences = text_to_sequences_fixed_size(concatenated, vocab, param_dict[\"MAX_SEQ_LENGTH\"], save_full_text=False,\n take_full_claim=True)\n\n\n\n #################################################\n # SAVE PARAM_DICT AND CONCATENATE TRAINING DATA #\n #################################################\n\n # save param_dict\n with open(FEATURES_DIR+PARAM_DICT_FILENAME, 'wb') as f:\n pickle.dump(param_dict, f, pickle.HIGHEST_PROTOCOL)\n print(\"Save PARAM_DICT as \" + FEATURES_DIR+PARAM_DICT_FILENAME)\n\n return sequences\n\n#### Features from Benjamin\n\n## Helper functions\ndef get_head_body_tuples_test():\n d = myConstants.testdataset\n\n h = []\n b = []\n for stance in d.stances:\n h.append(stance['Headline'])\n b.append(d.articles[int(stance['Body ID'])])\n\n return h, b\n\ndef get_head_body_tuples(include_holdout=False):\n # file paths\n '''\n data_path = \"%s/data/fnc-1\" % (path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n splits_dir = \"%s/data/fnc-1/splits\" % (path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n dataset = DataSet(data_path)\n '''\n data_path = myConstants.data_path\n splits_dir = myConstants.splits_dir\n dataset = myConstants.d\n\n def get_stances(dataset, folds, holdout):\n # Creates the list with a dict {'headline': ..., 'body': ..., 'stance': ...} for each\n # stance in the data set (except for holdout)\n stances = []\n for stance in dataset.stances:\n if stance['Body ID'] in holdout and include_holdout == True:\n stances.append(stance)\n for fold in folds:\n if stance['Body ID'] in fold:\n stances.append(stance)\n\n return stances\n\n # create new vocabulary\n folds, holdout = kfold_split(dataset, n_folds=10, base_dir=splits_dir) # [[133,1334,65645,], [32323,...]] => body ids for each fold\n stances = get_stances(dataset, folds, holdout)\n\n print(\"Stances length: \" + str(len(stances)))\n\n h = []\n b = []\n # create the final lists with all the headlines and bodies of the set except for holdout\n for stance in stances:\n h.append(stance['Headline'])\n b.append(dataset.articles[stance['Body ID']])\n\n return h, b\n\ndef get_unigram_features_of_lexicon(headlines, bodies, lexicon_path, no_hash=False, given_lexicon=False):\n def polarity(x):\n score = wordDict[x]\n if score > 0:\n return 'positive'\n if score < 0:\n return 'negative'\n else:\n return 'none'\n\n def count_tokens_with_polarity(tokenized):\n\n scorelist = []\n for token in tokenized:\n token = token.lower()\n score = polarity(token)\n scorelist.append(score)\n\n pol_dict = dict(Counter(scorelist))\n\n if 'none' not in pol_dict:\n pol_dict['none'] = 0\n\n if 'positive' not in pol_dict:\n pol_dict['positive'] = 0\n\n if 'negative' not in pol_dict:\n pol_dict['negative'] = 0\n\n return pol_dict\n\n def polarity_sum(tokenized):\n\n negList = []\n posList = []\n for token in tokenized:\n token = token.lower()\n if polarity(token) == 'positive':\n posList.append(wordDict[token])\n elif polarity(token) == 'negative':\n negList.append(abs(wordDict[token]))\n\n return {'pos_sum': sum(posList), 'neg_sum': sum(negList)}\n\n def max_token(tokenized):\n\n negList = []\n posList = []\n\n for token in tokenized:\n token = token.lower()\n if polarity(token) == 'positive':\n posList.append(wordDict[token])\n elif polarity(token) == 'negative':\n negList.append(wordDict[token])\n\n try:\n pos_max = max(posList)\n except ValueError:\n pos_max = 0\n try:\n neg_max = min(negList)\n except ValueError:\n neg_max = 0\n\n return {'pos_max': pos_max, 'neg_max': neg_max}\n\n def last_token(tokenized):\n\n pol_dict = {'last_polarity': 0}\n\n for token in reversed(tokenized):\n token = token.lower()\n if polarity(token) == 'positive' or polarity(token) == 'negative':\n pol_dict['last_polarity'] = wordDict[token]\n else:\n continue\n return pol_dict\n\n def all_feats_dict(string, tokenizer):\n\n tokenized = tokenizer.word_tokenize(string)\n\n ct = count_tokens_with_polarity(tokenized)\n pol = polarity_sum(tokenized)\n max_tkn = max_token(tokenized)\n last = last_token(tokenized)\n\n complete = dict()\n for dictionary in [ct, pol, max_tkn, last]:\n complete.update(dictionary)\n return complete\n\n wordDict = defaultdict(float)\n if given_lexicon == False:\n # load lexicon and create dictionary out of it\n with open(lexicon_path, 'r') as f:\n for row in f.readlines():\n row = row.split()\n if (no_hash and row[0].startswith('#')):\n row[0] = row[0][1:]\n wordDict[row[0]] = float(row[1])\n else:\n wordDict = given_lexicon\n\n # create features and build feature matrix\n emo_counts_head = [all_feats_dict(headline, nltk) for headline in tqdm(headlines)]\n emo_counts_body = [all_feats_dict(body, nltk) for body in tqdm(bodies)]\n\n emo_counts_head_df = pd.DataFrame(emo_counts_head)\n emo_counts_head_df = emo_counts_head_df.fillna(0)\n\n emo_counts_body_df = pd.DataFrame(emo_counts_body)\n emo_counts_body_df = emo_counts_body_df.fillna(0)\n\n emo_counts = np.concatenate([emo_counts_head_df.as_matrix(), emo_counts_body_df.as_matrix()], axis=1)\n\n return emo_counts\n\ndef char_3grams_5000_concat_all_data(headlines, bodies):\n\n def combine_head_and_body(headlines, bodies):\n return [headline + \" \" + body for i, (headline, body) in\n tqdm(enumerate(zip(headlines, bodies)))]\n\n # Load train data into CountVectorizer, get the resulting X-values and also the vocabulary\n # for the test data feature creation\n def get_features(headlines, bodies, headlines_all, bodies_all):\n # create vocab on basis of training data\n head_and_body = combine_head_and_body(headlines_all, bodies_all)\n head_and_body_tfidf = TfidfVectorizer(analyzer='char', ngram_range=(3, 3), lowercase=True,\n max_features=5000, use_idf=False, norm='l2')\n head_and_body_tfidf.fit(head_and_body)\n vocab = head_and_body_tfidf.vocabulary_\n\n # create training feature vectors\n X_train_head_tfidf = TfidfVectorizer(analyzer='char', ngram_range=(3, 3), lowercase=True,\n stop_words='english', vocabulary=vocab, use_idf=False, norm='l2')\n X_train_head = X_train_head_tfidf.fit_transform(headlines)\n\n X_train_body_tfidf = TfidfVectorizer(analyzer='char', ngram_range=(3, 3), lowercase=True,\n stop_words='english', vocabulary=vocab, use_idf=False, norm='l2')\n X_train_body = X_train_body_tfidf.fit_transform(bodies)\n\n X_train = np.concatenate([X_train_head.toarray(), X_train_body.toarray()], axis=1)\n\n return X_train\n\n h, b = get_head_body_tuples(include_holdout=True)\n h_test, b_test = get_head_body_tuples_test()\n\n # Comment out for clean ablation tests\n h.extend(h_test)\n b.extend(b_test)\n\n X_train = get_features(headlines, bodies, h, b)\n\n return X_train\n\ndef lexical_features(headlines, bodies):\n # calculates lexical diversities for head and body\n\n def get_info(text):\n sent_list = nltk.sent_tokenize(text)\n tokenized_sents = [nltk.word_tokenize(sent) for sent in sent_list]\n\n word_count = 0\n punctuation_count = 0\n types = set()\n token_list = []\n for sent in sent_list:\n for word in sent:\n token_list.extend(word)\n # get number of punctuations\n if word in string.punctuation:\n punctuation_count += 1\n else:\n # get types\n types.add(word.lower())\n\n # get number of tokens\n word_count += 1\n return types, word_count, token_list\n\n def get_head_features(head):\n types, word_count, _ = get_info(head)\n\n # get type-toke-ratio TTR (STTR might be better http://www.lexically.net/downloads/version5/HTML/index.html?type_token_ratio_proc.htm)\n if word_count != 0:\n ttr = float(len(types) / word_count)\n else:\n ttr = 0\n\n return [ttr]\n\n def get_body_features(body):\n types, word_count, token_list = get_info(body)\n\n\n # get type-toke-ratio TTR (STTR might be better http://www.lexically.net/downloads/version5/HTML/index.html?type_token_ratio_proc.htm)\n if word_count != 0:\n ttr = float(len(types) / word_count)\n else:\n ttr = 0\n\n # lexical diversity\n mtld = fe_util.mtld(token_list)\n\n return [ttr, mtld]\n\n body_features = [get_body_features(body) for body in tqdm(bodies)]\n head_features = [get_head_features(head) for head in tqdm(headlines)]\n\n features = np.concatenate([head_features, body_features], axis=1)\n\n return features\n\ndef max_diff_twitter_uni_bigrams(headlines, bodies):\n # Generates a set of features from the MaxDiff Twitter Sentiment Lexicon.\n # Features generated follow the ones generated in\n # [Mohammad et. al 2013](http://www.aclweb.org/website/old_anthology/S/S13/S13-2.pdf#page=357)\n # - The polarity occurences (neg, none, pos) of all tokens and bigrams of the text\n # - Sum of score within tweet for each `p`\n # - Maximum token score for each `p`\n # - Score of last token in each tweet\n #\n # Source: http://saifmohammad.com/WebPages/lexicons.html#EmoLex4\n\n\n def last_token(tokenized, ngrams_list):\n # retrieve the polarity of the last unigram or bigram and take the highest value\n\n for unigram, bigram in list(zip_longest(reversed(tokenized), reversed(ngrams_list))):\n if unigram is not None:\n unigram = unigram.lower()\n\n if bigram is not None:\n bigram = bigram.lower()\n\n if polarity(unigram) != 'none' or polarity(bigram) != 'none':\n try:\n last_polarity_uni = wordDict[unigram]\n except KeyError:\n last_polarity_uni = 0\n try:\n last_polarity_bi = wordDict[bigram]\n except KeyError:\n last_polarity_bi = 0\n\n if abs(last_polarity_uni) > abs(last_polarity_bi):\n return {'last_polarity': last_polarity_uni}\n elif abs(last_polarity_uni) < abs(last_polarity_bi):\n return {'last_polarity': last_polarity_bi}\n elif abs(last_polarity_uni) == abs(last_polarity_bi):\n return {'last_polarity': last_polarity_uni}\n else:\n return {'last_polarity': 0}\n else:\n continue\n\n else: # called if KeyError occures\n return {'last_polarity': 0}\n\n def max_token(tokenized):\n # get highest and lowest polarity value for the words in the text\n negList = []\n posList = []\n\n for token in tokenized:\n token = token.lower()\n if polarity(token) == 'positive':\n posList.append(wordDict[token])\n elif polarity(token) == 'negative':\n negList.append(wordDict[token])\n\n try:\n pos_max = max(posList)\n except ValueError:\n pos_max = 0\n try:\n neg_max = min(negList)\n except ValueError:\n neg_max = 0\n\n return {'pos_max': pos_max, 'neg_max': neg_max}\n\n def polarity_sum(all_grams):\n # sums up the polarity-values found in the lexicon for the\n # tokens and bigrams in a text (negative and positive separately)\n negList = []\n posList = []\n\n for token in all_grams:\n token = token.lower()\n if polarity(token) == 'positive':\n posList.append(wordDict[token])\n elif polarity(token) == 'negative':\n negList.append(abs(wordDict[token]))\n\n return {'pos_sum': sum(posList), 'neg_sum': sum(negList)}\n\n def polarity(x):\n score = wordDict[x]\n if score > 0:\n return 'positive'\n if score < 0:\n return 'negative'\n else:\n return 'none'\n\n def count_tokens_with_polarity(all_grams):\n # counts the polarity (neg, none, pos) of all tokens and bigrams of the text\n scorelist = []\n for token in all_grams:\n token = token.lower()\n score = polarity(token)\n scorelist.append(score)\n\n pol_dict = dict(Counter(scorelist))\n\n if 'none' not in pol_dict:\n pol_dict['none'] = 0\n\n if 'positive' not in pol_dict:\n pol_dict['positive'] = 0\n\n if 'negative' not in pol_dict:\n pol_dict['negative'] = 0\n\n return pol_dict\n\n def get_function_parameters(string, tokenizer):\n tokenized = tokenizer.word_tokenize(string)\n ngrams_list = [' '.join(i) for i in nltk.ngrams(tokenized, 2)]\n all_grams = tokenized + ngrams_list\n\n return tokenized, ngrams_list, all_grams\n\n def all_feats_dict(string, tokenizer):\n\n tokenized, ngrams_list, all_grams = get_function_parameters(string, tokenizer)\n\n ct = count_tokens_with_polarity(all_grams)\n pol = polarity_sum(all_grams)\n max_tkn = max_token(tokenized)\n last = last_token(tokenized, ngrams_list)\n\n complete = dict()\n\n for dictionary in [ct, pol, max_tkn, last]:\n complete.update(dictionary)\n return complete\n\n lexicon_path = \"%s/../data/lexicons/maxDiffTwitter/\" % (path.dirname(path.dirname(path.abspath(__file__))))\n wordDict = defaultdict(float)\n with open(lexicon_path + 'SemEval2015-English-Twitter-Lexicon.txt', 'r') as f:\n for row in f.readlines():\n row = row.split()\n wordDict[' '.join(row[1:])] = float(row[0])\n\n emo_counts_head = [all_feats_dict(headline, nltk) for headline in tqdm(headlines)]\n emo_counts_body = [all_feats_dict(body, nltk) for body in tqdm(bodies)]\n\n emo_counts_head_df = pd.DataFrame(emo_counts_head)\n emo_counts_head_df = emo_counts_head_df.fillna(0)\n\n emo_counts_body_df = pd.DataFrame(emo_counts_body)\n emo_counts_body_df = emo_counts_body_df.fillna(0)\n\n emo_counts = np.concatenate([emo_counts_head_df.as_matrix(), emo_counts_body_df.as_matrix()], axis=1)\n\n return emo_counts\n\ndef mpqa_unigrams(headlines, bodies):\n \"\"\"\n Extracts the same features as in get_unigram_features_of_lexicon, just with the subjectivity clues lexicon\n\n NOTE:\n Simplified form of dictionary initialization here; an entry for a word can be an array. E.g.\n dict[\"abandon\"] = [(-1, strongsubj), (-1, weaksubj)]. This is ignored here. The strongsubj entries\n are getting multiplied by a factor 3 and will replace weaksubj entries.\n\n ADDITIONAL NOTE:\n Weighted features perform worse (See feature_engineering_crapyard.py variants of this method)\n\n \"\"\"\n\n lexicon_path = \"%s/../data/lexicons/MPQA/subjectivity-clues.csv\" % (\n path.dirname(path.dirname(path.abspath(__file__))))\n\n wordDict = defaultdict(float)\n with open(lexicon_path, 'r') as f:\n reader = csv.reader(f)\n headerRows = [i for i in range(0, 1)]\n for row in headerRows:\n next(reader)\n for row in reader:\n\n score = row[5]\n if score == 'positive':\n score = 1\n elif score == 'negative':\n score = -1\n else:\n score = 0\n\n subjectivity = row[0]\n if subjectivity == 'strongsubj':\n score = score * 3\n\n if row[2] in wordDict and wordDict[row[2]] < score:\n wordDict[row[2]] = float(score)\n elif row[2] not in wordDict:\n wordDict[row[2]] = float(score)\n\n return get_unigram_features_of_lexicon(headlines, bodies, \"\", no_hash=False, given_lexicon=wordDict)\n\ndef negated_context_word_12grams_concat_tf5000_l2_all_data(headlines, bodies):\n \"\"\"\n Negates string after special negation word by adding a \"NEG_\" in front\n of every negated word, until a punctuation mark appears.\n Source:\n NRC-Canada: Buidling the State-of-the-Art in Sentiment Analysis of Tweets\n http://sentiment.christopherpotts.net/lingstruc.html\n http://stackoverflow.com/questions/23384351/how-to-add-tags-to-negated-words-in-strings-that-follow-not-no-and-never\n\n\n :param headlines:\n :param bodies:\n :return:\n \"\"\"\n\n def get_negated_text(text):\n transformed = re.sub(\n r'\\b'\n r'(?:never|no|nothing|nowhere|noone|none|not|havent|hasnt|hadnt|'\n r'cant|couldnt|shouldnt|wont|wouldnt|dont|doesnt|didnt|isnt|arent|'\n r'aint|[a-z].n\\'t)'\n r'\\b'\n r'[\\w\\s]+[,.:;!?]',\n lambda match: re.sub(r'(\\s+)(\\w+)', r'\\1NEG_\\2', match.group(0)),\n text,\n flags=re.IGNORECASE)\n return transformed\n\n def combine_head_and_body(headlines, bodies):\n head_and_body = [headline + \" \" + body for i, (headline, body) in\n enumerate(zip(headlines, bodies))]\n\n return head_and_body\n\n def get_vocab(neg_headlines, neg_bodies):\n tf_vectorizer = TfidfVectorizer(ngram_range=(1, 2), stop_words='english', max_features=5000, use_idf=False,\n norm='l2')\n tf_vectorizer.fit_transform(combine_head_and_body(neg_headlines, neg_bodies))\n vocab = tf_vectorizer.vocabulary_\n\n return vocab\n\n def get_features(neg_headlines_test, neg_bodies_test, vocab):\n tf_vectorizer_head = TfidfVectorizer(vocabulary=vocab, stop_words='english', use_idf=False, norm='l2')\n X_test_head = tf_vectorizer_head.fit_transform(neg_headlines_test)\n\n tf_vectorizer_body = TfidfVectorizer(vocabulary=vocab, stop_words='english', use_idf=False, norm='l2')\n X_test_body = tf_vectorizer_body.fit_transform(neg_bodies_test)\n\n X_test = np.concatenate([X_test_head.toarray(), X_test_body.toarray()], axis=1)\n return X_test\n\n h, b = get_head_body_tuples(include_holdout=True)\n h_test, b_test = get_head_body_tuples_test()\n\n # Comment out for clean ablation tests\n h.extend(h_test)\n b.extend(b_test)\n\n neg_headlines_all = [get_negated_text(h) for h in h]\n neg_bodies_all = [get_negated_text(b) for b in b]\n neg_headlines = [get_negated_text(h) for h in headlines]\n neg_bodies = [get_negated_text(b) for b in bodies]\n\n vocab = get_vocab(neg_headlines_all, neg_bodies_all)\n X_train = get_features(neg_headlines, neg_bodies, vocab)\n\n return X_train\n\ndef nrc_emo_lex(headlines, bodies):\n \"\"\"\n Counts Number of words in a text associated with 8 different emotions.\n Uses EmoLex lexicon: http://saifmohammad.com/WebPages/lexicons.html#EmoLex\n\n \"\"\"\n\n lexicon_path = \"%s/../data/lexicons/emoLex/\" % (path.dirname(path.dirname(path.abspath(__file__))))\n word_list = defaultdict(list)\n # emotion_list = defaultdict(list)\n emotion_set = set()\n\n with open(lexicon_path + 'NRC_emotion_lexicon_list.txt', 'r') as f:\n reader = csv.reader(f, delimiter='\\t')\n for word, emotion, present in reader:\n if int(present) == 1: # 1 = word/emotion-allocation present\n word_list[word].append(emotion)\n # emotion_list[emotion].append(word)\n emotion_set.add(emotion)\n\n def generate_emotion_count(string):\n emo_count = Counter()\n for token in nltk.word_tokenize(string):\n token = token.lower()\n emo_count += Counter(word_list[token])\n\n # Guarantee same length for each feature vector by adding emotions\n # that do no appear in the text\n for emotion in emotion_set:\n if (emotion not in emo_count):\n emo_count[emotion] = 0\n\n return emo_count\n\n emo_counts_head = [generate_emotion_count(headline) for headline in tqdm(headlines)]\n emo_counts_body = [generate_emotion_count(body) for body in tqdm(bodies)]\n\n emo_counts_head_df = pd.DataFrame(emo_counts_head)\n emo_counts_head_df = emo_counts_head_df.fillna(0)\n\n emo_counts_body_df = pd.DataFrame(emo_counts_body)\n emo_counts_body_df = emo_counts_body_df.fillna(0)\n\n emo_counts = np.concatenate([emo_counts_head_df.as_matrix(), emo_counts_body_df.as_matrix()], axis=1)\n\n return emo_counts\n\n\ndef nrc_hashtag_sentiment_unigram(headlines, bodies):\n lexicon_path = \"%s/../data/lexicons/hashtagSentiment/unigrams-pmilexicon.txt\" % (\n path.dirname(path.dirname(path.abspath(__file__))))\n return get_unigram_features_of_lexicon(headlines, bodies, lexicon_path, no_hash=False)\n\n\ndef nrc_hashtag_sentiment_unigram_POS(headlines, bodies):\n \"\"\"\n From Paper: NRC-Canada: Building the State-Of-The-Art in Sentiment Analysis of Tweets\n Mohammad et al.\n # calculates the nrc hashtag sentiments for:\n # all verbs in the head / body\n # all nouns in the head / body\n # all adjectives in head / body\n # and merges the features after that\n\n \"\"\"\n\n def get_features_head(headline):\n tokenized_head = nltk.word_tokenize(headline)\n pos_tags_head = nltk.pos_tag(tokenized_head, tagset='universal')\n\n head_text_VERB = \"\"\n head_text_ADJ = \"\"\n head_text_NOUN = \"\"\n head_text_ADV = \"\"\n head_text_PRON = \"\"\n for word, tag in pos_tags_head:\n if tag == 'VERB':\n head_text_VERB += \" \" + word\n if tag == 'ADJ':\n head_text_ADJ += \" \" + word\n if tag == 'NOUN':\n head_text_NOUN += \" \" + word\n if tag == 'ADV':\n head_text_ADV += \" \" + word\n if tag == 'PRON':\n head_text_PRON += \" \" + word\n pos_dict_head['VERB'].append(head_text_VERB)\n pos_dict_head['ADJ'].append(head_text_ADJ)\n pos_dict_head['NOUN'].append(head_text_NOUN)\n pos_dict_head['ADV'].append(head_text_ADV)\n pos_dict_head['PRON'].append(head_text_PRON)\n\n def get_features_body(body):\n sent_list_body = nltk.sent_tokenize(body)\n tokenized_sents_body = [nltk.word_tokenize(sent) for sent in sent_list_body]\n pos_tags = nltk.pos_tag_sents(tokenized_sents_body, tagset='universal')\n\n text_VERB = \"\"\n text_ADJ = \"\"\n text_NOUN = \"\"\n text_ADV = \"\"\n text_PRON = \"\"\n\n for sent in pos_tags:\n for word, tag in sent:\n if tag == 'VERB':\n text_VERB += \" \" + word\n if tag == 'ADJ':\n text_ADJ += \" \" + word\n if tag == 'NOUN':\n text_NOUN += \" \" + word\n if tag == 'ADV':\n text_ADV += \" \" + word\n if tag == 'PRON':\n text_PRON += \" \" + word\n pos_dict_body['VERB'].append(text_VERB)\n pos_dict_body['ADJ'].append(text_ADJ)\n pos_dict_body['NOUN'].append(text_NOUN)\n pos_dict_body['ADV'].append(text_ADV)\n pos_dict_body['PRON'].append(text_PRON)\n\n pos_dict_head = {\n 'VERB': [],\n 'ADJ': [],\n 'NOUN': [],\n 'ADV': [],\n 'PRON': []\n }\n pos_dict_body = {\n 'VERB': [],\n 'ADJ': [],\n 'NOUN': [],\n 'ADV': [],\n 'PRON': []\n }\n\n for h in tqdm(headlines):\n get_features_head(h)\n\n for b in tqdm(bodies):\n get_features_body(b)\n\n verb_features = nrc_hashtag_sentiment_unigram(pos_dict_head['VERB'], pos_dict_body['VERB'])\n adj_features = nrc_hashtag_sentiment_unigram(pos_dict_head['ADJ'], pos_dict_body['ADJ'])\n noun_features = nrc_hashtag_sentiment_unigram(pos_dict_head['NOUN'], pos_dict_body['NOUN'])\n adv_features = nrc_hashtag_sentiment_unigram(pos_dict_head['ADV'], pos_dict_body['ADV'])\n pron_features = nrc_hashtag_sentiment_unigram(pos_dict_head['PRON'], pos_dict_body['PRON'])\n\n feature_matrix = np.concatenate(\n [verb_features,\n adj_features,\n noun_features,\n adv_features,\n pron_features\n ], axis=1)\n\n return feature_matrix\n\ndef sentiment140_unigrams(headlines, bodies):\n lexicon_path = \"%s/../data/lexicons/sentiment140/unigrams-pmilexicon.txt\" % (\n path.dirname(path.dirname(path.abspath(__file__))))\n return get_unigram_features_of_lexicon(headlines, bodies, lexicon_path, no_hash=False)\n\n\ndef readability_features(headlines, bodies):\n\n def get_head_features(head):\n tokenized = nltk.word_tokenize(head)\n word_counter = 0\n\n # get average word length\n for word in tokenized:\n if word not in string.punctuation:\n word_counter += 1\n\n # get coleman liau index\n CL_index = fe_util.coleman_liau_index(head, word_counter)\n\n # get automated readability index\n AR_index = fe_util.automated_readability_index(head, word_counter)\n\n # LIX readability index\n LIX_index = fe_util.lix_index(head, word_counter)\n\n # RIX readability index\n RIX_index = fe_util.rix_index(head)\n\n # McAlpine EFLAW index\n EFLAW_index = fe_util.mcalpine_eflaw_index(head)\n\n # Strain index\n strain_index = fe_util.strain_index(head)\n\n # calculate flesch-kincaid grade level\n FK_grade = fe_util.flesch_grade_level(head)\n\n # calculate gunning-fog grade level\n GI_grade = fe_util.gunning_fog_index(head, word_counter)\n\n # Flesh Kincaid Reading Ease index\n FK_reading_ease = fe_util.flesch_reading_ease(head)\n\n return [FK_grade, GI_grade, FK_reading_ease, CL_index, AR_index, LIX_index, RIX_index, EFLAW_index, strain_index]\n\n def get_body_features(body):\n # get number of nouns and tokens\n sent_list = nltk.sent_tokenize(body)\n tokenized_sents = [nltk.word_tokenize(sent) for sent in sent_list]\n pos_tags_sents = nltk.pos_tag_sents(tokenized_sents)\n\n word_count = 0\n punctuation_count = 0\n types = set()\n for sent in pos_tags_sents:\n for word, tag in sent:\n # get number of punctuations\n if word in string.punctuation:\n punctuation_count += 1\n else:\n # get types\n types.add(word.lower())\n\n # get number of tokens\n word_count += 1\n\n\n # get coleman liau index\n CL_index = fe_util.coleman_liau_index(body, word_count)\n\n # get automated readability index\n AR_index = fe_util.automated_readability_index(body, word_count)\n\n # LIX readability index\n LIX_index = fe_util.lix_index(body, word_count)\n\n # RIX readability index\n RIX_index = fe_util.rix_index(body)\n\n # McAlpine EFLAW index\n EFLAW_index = fe_util.mcalpine_eflaw_index(body)\n\n # Strain index\n strain_index = fe_util.strain_index(body)\n\n # calculate flesch-kincaid grade level\n FK_grade = fe_util.flesch_grade_level(body)\n\n # calculate gunning-fog grade level\n GI_grade = fe_util.gunning_fog_index(body, word_count)\n\n SMOG_index = fe_util.smog_index(body)\n\n # Flesh Kincaid Reading Ease index\n FK_reading_ease = fe_util.flesch_reading_ease(body)\n\n return [FK_grade, GI_grade, FK_reading_ease, CL_index, AR_index, LIX_index, RIX_index, EFLAW_index, strain_index, SMOG_index]\n\n body_features = [get_body_features(body) for body in tqdm(bodies)]\n head_features = [get_head_features(head) for head in tqdm(headlines)]\n\n features = np.concatenate([head_features, body_features], axis=1)\n\n return features\n\ndef structural_features(headlines, bodies):\n \"\"\"\n Implements the significant features of the paper \"This Just In: Fake News Packs a Lot in Title,\n Uses Simpler, Repetitive Content in Text Body, More Similar to Satire than Real News\" by\n Benjamin D. Horne and Sibel Adali of Rensslar Polytechnic Institute, New York\n\n Open to implement:\n avg_negstr: method implemented, but due to opening a jar lib way too slow to use\n analytic: LIWC lexicon needed (not free)\n \"\"\"\n\n def count_verb_phrases(t, print_tree=False):\n # http: // www.nltk.org / book / ch07.html # developing-and-evaluating-chunkers\n count = 0\n try:\n t.label()\n except AttributeError:\n if print_tree:\n print(t, end=\" \")\n else:\n # Now we know that t.node is defined\n if print_tree:\n print('(', t.label(), end=\" \")\n for child in t:\n if t.label() == 'VP':\n count = 1\n count = count + count_verb_phrases(child, print_tree)\n if print_tree:\n print(')', end=\" \")\n return count\n\n def get_head_features(head):\n tokenized = nltk.word_tokenize(head)\n word_len_sum = 0\n avg_wlen = 0\n word_counter = 0\n\n # get average word length\n for word in tokenized:\n if word not in string.punctuation:\n word_len_sum += len(word)\n word_counter += 1\n if word_counter > 0:\n avg_wlen = float(word_len_sum / word_counter)\n return [avg_wlen]\n\n def get_paragraph_breaks(text):\n \"\"\"Identifies indented text or line breaks as the beginning of\n paragraphs and returns a list with indices of paragraph\n beginnings. List always starts with a 0 => from TextTilingTokenizer\"\"\"\n\n MIN_PARAGRAPH = 100\n pattern = re.compile(\"[ \\t\\r\\f\\v]*\\n[ \\t\\r\\f\\v]*\\n[ \\t\\r\\f\\v]*\")\n matches = pattern.finditer(text)\n\n last_break = 0\n pbreaks = [0]\n for pb in matches:\n if pb.start() - last_break < MIN_PARAGRAPH:\n continue\n else:\n pbreaks.append(pb.start())\n last_break = pb.start()\n\n return pbreaks\n\n def get_avg_paragraph_length(text, pbreaks):\n \"\"\"\n Takes a text and the indices of the paragraph breaks and reaturn the average\n paragraph lengths\n \"\"\"\n paragraph_list = []\n counter = 0\n for index in pbreaks:\n if counter > 0:\n paragraph_list.append(text[pbreaks[counter - 1]:index])\n counter += 1\n paragraph_list.append(text[pbreaks[-1]:])\n\n paragraph_lengths = []\n for para in paragraph_list:\n tokenized = nltk.word_tokenize(para)\n para_length = 0\n for token in tokenized:\n if token not in string.punctuation:\n para_length += 1\n paragraph_lengths.append(para_length)\n\n if len(paragraph_lengths) > 0:\n return sum(paragraph_lengths) / len(paragraph_lengths)\n else:\n return 0\n\n def get_body_features(body):\n\n # get number of nouns and tokens\n sent_list = nltk.sent_tokenize(body)\n tokenized_sents = [nltk.word_tokenize(sent) for sent in sent_list]\n\n\n word_count = 0\n punctuation_count = 0\n\n word_len_sum = 0\n for sent in tokenized_sents:\n for word in sent:\n\n # get number of punctuations\n if word in string.punctuation:\n punctuation_count += 1\n else:\n # sum up length of words\n word_len_sum += len(word)\n\n # get number of tokens\n word_count += 1\n\n\n # number of paragraphs and their avg lengths\n pbreaks = get_paragraph_breaks(body)\n paragraph_count = len(pbreaks) - 1\n avg_paragraph_length = get_avg_paragraph_length(body, pbreaks)\n\n # get average word length\n avg_wlen = 0\n if word_count > 0:\n avg_wlen = float(word_len_sum / word_count)\n\n return [avg_wlen, paragraph_count, avg_paragraph_length]\n\n body_features = [get_body_features(body) for body in tqdm(bodies)]\n head_features = [get_head_features(head) for head in tqdm(headlines)]\n\n features = np.concatenate([head_features, body_features], axis=1)\n\n return features\n\n##\\\\Too add explanation\ndef POS_features(headlines, bodies):\n \"\"\"\n Implements the significant features of the paper \"This Just In: Fake News Packs a Lot in Title,\n Uses Simpler, Repetitive Content in Text Body, More Similar to Satire than Real News\" by\n Benjamin D. Horne and Sibel Adali of Rensslar Polytechnic Institute, New York\n\n Open to implement:\n avg_negstr: method implemented, but due to opening a jar lib way too slow to use\n analytic: LIWC lexicon needed (not free)\n \"\"\"\n\n def count_verb_phrases(t, print_tree=False):\n # http: // www.nltk.org / book / ch07.html # developing-and-evaluating-chunkers\n count = 0\n try:\n t.label()\n except AttributeError:\n if print_tree:\n print(t, end=\" \")\n else:\n # Now we know that t.node is defined\n if print_tree:\n print('(', t.label(), end=\" \")\n for child in t:\n if t.label() == 'VP':\n count = 1\n count = count + count_verb_phrases(child, print_tree)\n if print_tree:\n print(')', end=\" \")\n return count\n\n def get_head_features(head):\n tokenized = nltk.word_tokenize(head)\n word_len_sum = 0\n avg_wlen = 0\n word_counter = 0\n\n # get average word length\n for word in tokenized:\n if word not in string.punctuation:\n word_len_sum += len(word)\n word_counter += 1\n\n if word_counter > 0:\n avg_wlen = float(word_len_sum / word_counter)\n\n # calculate percentage of stopwords\n stop_words_nltk = set(stopwords.words('english')) # use set for faster \"not in\" check\n stop_words_sklearn = feature_extraction.text.ENGLISH_STOP_WORDS\n all_stop_words = stop_words_sklearn.union(stop_words_nltk)\n stop_words_counter = 0\n per_stop = 0\n word_freq_in_head = defaultdict(int)\n for word in tokenized:\n if word.lower() in all_stop_words:\n stop_words_counter += 1\n word_freq_in_head[word] = word_freq_dict[word]\n if word_counter > 0:\n per_stop = stop_words_counter / word_counter\n\n # calculate frequency of 3 least common words\n w_freq_list = list(reversed(collections.Counter(word_freq_in_head.values()).most_common()))\n flu_reuters_c = 0\n counter = 0\n for i in range(3):\n if len(w_freq_list) > i:\n counter += 1\n flu_reuters_c += w_freq_list[i][0]\n if counter > 0:\n flu_reuters_c = float(flu_reuters_c / counter)\n else:\n flu_reuters_c = 0\n\n # get number of quotes http://stackoverflow.com/questions/28037857/how-to-extract-all-quotes-in-a-document-text-using-regex\n # and then calculate the ratio #quoted words / #words\n match = re.findall('(?:[\\“\\'\\\"](.*?)[\\”\\'\\\"])', head)\n quoted_words_count = 0\n quoted_word_ratio = 0\n for quote in match:\n tokenized_quote = nltk.word_tokenize(quote)\n for token in tokenized_quote:\n if token not in string.punctuation:\n quoted_words_count += 1\n if word_counter > 0:\n quoted_word_ratio = float(quoted_words_count / word_counter)\n\n # calculate number of nouns and proper nouns\n pos_tagged = nltk.pos_tag(tokenized)\n NN_count = 0\n NNP_count = 0\n focuspast = 0\n for word, tag in pos_tagged: # http://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html\n if tag == 'NN' or tag == 'NNS':\n NN_count += 1\n if tag == 'NNP' or tag == 'NNPS':\n NNP_count += 1\n if tag == 'VBD': # verb, past tense\n focuspast += 1\n\n # calculate verb phrases\n grammar = \"\"\"\n P: {<IN>} # Preposition\n PP: {<P> <NP>} # PP -> P NP\n V: {<V.*>} # Verb\n NP: {<DT>? <JJ>* <NN>*} # NP\n VP: {<RB.*>? <V>+ <NP|PP>*}\n \"\"\"\n cp = nltk.RegexpParser(grammar)\n sent_list = nltk.sent_tokenize(head)\n tokenized_sents = [nltk.word_tokenize(sent) for sent in sent_list]\n pos_tags_sents = nltk.pos_tag_sents(tokenized_sents)\n\n vp_count = 0\n for pos_sent in pos_tags_sents:\n tree = cp.parse(pos_sent)\n vp_count += count_verb_phrases(tree, print_tree=False)\n\n return [per_stop, NN_count, NNP_count, vp_count, flu_reuters_c, focuspast, quoted_word_ratio]\n\n def get_paragraph_breaks(text):\n \"\"\"Identifies indented text or line breaks as the beginning of\n paragraphs and returns a list with indices of paragraph\n beginnings. List always starts with a 0 => from TextTilingTokenizer\"\"\"\n\n MIN_PARAGRAPH = 100\n pattern = re.compile(\"[ \\t\\r\\f\\v]*\\n[ \\t\\r\\f\\v]*\\n[ \\t\\r\\f\\v]*\")\n matches = pattern.finditer(text)\n\n last_break = 0\n pbreaks = [0]\n for pb in matches:\n if pb.start() - last_break < MIN_PARAGRAPH:\n continue\n else:\n pbreaks.append(pb.start())\n last_break = pb.start()\n\n return pbreaks\n\n def get_avg_paragraph_length(text, pbreaks):\n \"\"\"\n Takes a text and the indices of the paragraph breaks and reaturn the average\n paragraph lengths\n \"\"\"\n paragraph_list = []\n counter = 0\n for index in pbreaks:\n if counter > 0:\n paragraph_list.append(text[pbreaks[counter - 1]:index])\n counter += 1\n paragraph_list.append(text[pbreaks[-1]:])\n\n paragraph_lengths = []\n for para in paragraph_list:\n tokenized = nltk.word_tokenize(para)\n para_length = 0\n for token in tokenized:\n if token not in string.punctuation:\n para_length += 1\n paragraph_lengths.append(para_length)\n\n if len(paragraph_lengths) > 0:\n return sum(paragraph_lengths) / len(paragraph_lengths)\n else:\n return 0\n\n def get_body_features(body):\n # get number of quotes http://stackoverflow.com/questions/28037857/how-to-extract-all-quotes-in-a-document-text-using-regex\n match = re.findall('(?:[\\“\\'\\\"](.*?)[\\”\\'\\\"])', body)\n quote_count = len(match)\n\n # get number of nouns and tokens\n sent_list = nltk.sent_tokenize(body)\n tokenized_sents = [nltk.word_tokenize(sent) for sent in sent_list]\n pos_tags_sents = nltk.pos_tag_sents(tokenized_sents)\n\n NN_count = 0\n word_count = 0\n punctuation_count = 0\n PRP_count = 0\n RB_count = 0\n CD_count = 0\n word_len_sum = 0\n types = set()\n for sent in pos_tags_sents:\n for word, tag in sent:\n\n # get number of punctuations\n if word in string.punctuation:\n punctuation_count += 1\n else:\n # sum up length of words\n word_len_sum += len(word)\n\n # get types\n types.add(word.lower())\n\n # get number of tokens\n word_count += 1\n\n # get number of personal pronouns\n if tag == 'PRP':\n PRP_count += 1\n\n # get number of nouns\n if tag == 'NN' or tag == 'NNS':\n NN_count += 1\n\n # get number of nouns\n if tag == 'RB' or tag == 'RBR' or tag == 'RBS':\n RB_count += 1\n\n if tag == 'CD':\n CD_count += 1\n\n # number of paragraphs and their avg lengths\n pbreaks = get_paragraph_breaks(body)\n paragraph_count = len(pbreaks) - 1\n avg_paragraph_length = get_avg_paragraph_length(body, pbreaks)\n\n # get number of quotes http://stackoverflow.com/questions/28037857/how-to-extract-all-quotes-in-a-document-text-using-regex\n # and then calculate the ratio #quoted words / #words\n match = re.findall('(?:[\\“\\'\\\"](.*?)[\\”\\'\\\"])', body)\n quoted_words_count = 0\n quoted_word_ratio = 0\n for quote in match:\n tokenized_quote = nltk.word_tokenize(quote)\n for token in tokenized_quote:\n if token not in string.punctuation:\n quoted_words_count += 1\n if word_count > 0:\n quoted_word_ratio = float(quoted_words_count / word_count)\n\n # get average word length\n avg_wlen = 0\n if word_count > 0:\n avg_wlen = float(word_len_sum / word_count)\n\n return [NN_count,\n punctuation_count, PRP_count, RB_count, CD_count, quoted_word_ratio]\n\n word_freq_dict = nltk.FreqDist(reuters.words())\n body_features = [get_body_features(body) for body in tqdm(bodies)]\n head_features = [get_head_features(head) for head in tqdm(headlines)]\n\n features = np.concatenate([head_features, body_features], axis=1)\n\n return features\n\n####Athene features of the FNC-1\ndef NMF_fit_all_incl_holdout_and_test(headlines, bodies):\n #http://scikit-learn.org/stable/auto_examples/applications/topics_extraction_with_nmf_lda.html#sphx-glr-auto-examples-applications-topics-extraction-with-nmf-lda-py\n # https://pypi.python.org/pypi/lda on bottom see suggestions like MALLET, hca\n # https://medium.com/@aneesha/topic-modeling-with-scikit-learn-e80d33668730\n # https://www.quora.com/What-are-the-best-features-to-put-into-Latent-Dirichlet-Allocation-LDA-for-topic-modeling-of-short-text\n from sklearn.externals import joblib\n\n print(\"WARNING: IF SIZE OF HEAD AND BODY DO NOT MATCH, \"\n \"RUN THIS FEATURE EXTRACTION METHOD SEPERATELY (WITHOUT ANY OTHER FE METHODS) TO CREATE THE FEATURES ONCE!\")\n\n def combine_head_and_body(headlines, bodies):\n head_and_body = [headline + \" \" + body for i, (headline, body) in\n enumerate(zip(headlines, bodies))]\n\n return head_and_body\n\n def get_all_data(head_and_body):\n features_dir = \"%s/data/fnc-1/features\" % (path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n filename = \"NMF_fit_all_incl_holdout_and_test\"\n if not (os.path.exists(features_dir + \"/\" + filename + \".vocab\")):\n vectorizer_all = TfidfVectorizer(ngram_range=(1,1), stop_words='english', use_idf=True, norm='l2')\n X_all = vectorizer_all.fit_transform(head_and_body)\n vocab = vectorizer_all.vocabulary_\n print(\"NMF_fit_all_incl_holdout_and_test: complete vocabulary length=\" + str(len(list(vocab.keys()))))\n\n with open(features_dir + \"/\" + filename + \".vocab\", 'wb') as handle:\n pickle.dump(vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n return X_all, vocab\n else:\n with open(features_dir + \"/\" + filename + \".vocab\", 'rb') as handle:\n vocab = pickle.load(handle)\n vectorizer_all = TfidfVectorizer(vocabulary=vocab, norm='l2')\n X_all = vectorizer_all.fit_transform(head_and_body)\n return X_all, vectorizer_all.vocabulary_\n\n def get_vocab(head_and_body):\n features_dir = \"%s/data/fnc-1/features\" % (path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n filename = \"NMF_fit_all_incl_holdout_and_test\"\n if not (os.path.exists(features_dir + \"/\" + filename + \".vocab\")):\n vectorizer_all = TfidfVectorizer(ngram_range=(1, 1), stop_words='english', use_idf=True, norm='l2')\n X_all = vectorizer_all.fit_transform(head_and_body)\n vocab = vectorizer_all.vocabulary_\n print(\"NMF_fit_all_incl_holdout_and_test: complete vocabulary length=\" + str(len(X_all[0])))\n\n with open(features_dir + \"/\" + filename + \".vocab\", 'wb') as handle:\n pickle.dump(vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n return vocab\n else:\n with open(features_dir + \"/\" + filename + \".vocab\", 'rb') as handle:\n return pickle.load(handle)\n\n\n def get_features(head_and_body):\n features_dir = \"%s/data/fnc-1/features\" % (path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n filename = \"NMF_fit_all_incl_holdout_and_test\"\n if not (os.path.exists(features_dir + \"/\" + filename + \".pkl\")):\n X_all, vocab = get_all_data(head_and_body)\n\n # calculates n most important topics of the bodies. Each topic contains all words but ordered by importance. The\n # more important topic words a body contains of a certain topic, the higher its value for this topic\n nfm = NMF(n_components=300, random_state=1, alpha=.1)\n\n print(\"NMF_fit_all_incl_holdout_and_test: fit and transform body\")\n t0 = time()\n nfm.fit_transform(X_all)\n print(\"done in %0.3fs.\" % (time() - t0))\n\n with open(features_dir + \"/\" + filename + \".pkl\", 'wb') as handle:\n joblib.dump(nfm, handle, protocol=pickle.HIGHEST_PROTOCOL)\n else:\n vocab = get_vocab(head_and_body)\n with open(features_dir + \"/\" + filename + \".pkl\", 'rb') as handle:\n nfm = joblib.load(handle)\n\n\n vectorizer_head = TfidfVectorizer(vocabulary=vocab, norm='l2')\n X_train_head = vectorizer_head.fit_transform(headlines)\n\n vectorizer_body = TfidfVectorizer(vocabulary=vocab, norm='l2')\n X_train_body = vectorizer_body.fit_transform(bodies)\n\n print(\"NMF_fit_all_incl_holdout_and_test: transform head and body\")\n # use the lda trained for body topcis on the headlines => if the headlines and bodies share topics\n # their vectors should be similar\n nfm_head_matrix = nfm.transform(X_train_head)\n nfm_body_matrix = nfm.transform(X_train_body)\n\n print('NMF_fit_all_incl_holdout_and_test: calculating cosine distance between head and body')\n # calculate cosine distance between the body and head\n X = []\n for i in range(len(nfm_head_matrix)):\n X_head_vector = np.array(nfm_head_matrix[i]).reshape((1, -1)) #1d array is deprecated\n X_body_vector = np.array(nfm_body_matrix[i]).reshape((1, -1))\n cos_dist = cosine_distances(X_head_vector, X_body_vector).flatten()\n X.append(cos_dist.tolist())\n return X\n\n h, b = get_head_body_tuples(include_holdout=True)\n h_test, b_test = get_head_body_tuples_test()\n h.extend(h_test)\n b.extend(b_test)\n head_and_body = combine_head_and_body(h, b)\n\n X = get_features(head_and_body)\n\n return X\n\ndef create_word_ngram_vocabulary(ngram_range=(1,1), max_features=100, lemmatize=False, term_freq=False, norm='l1', use_idf=False, include_holdout=False):\n \"\"\"\n Creates, returns and saves a vocabulary for (Count-)Vectorizer over all training and test data (holdout excluded) to create BoW\n methods. The method simplifies using the pipeline and later tests with feature creation for a single headline and body.\n This method will cause bleeding, since it also includes the test set.\n\n :param filename: a filename for the vocabulary\n :param ngram_range: the ngram range for the Vectorizer. Default is (1, 1) => unigrams\n :param max_features: the length of the vocabulary\n :return: the vocabulary\n \"\"\"\n # file paths\n '''\n data_path = \"%s/data/fnc-1\" % (path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n splits_dir = \"%s/data/fnc-1/splits\" % (path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n features_dir = \"%s/data/fnc-1/features\" % (path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n\n dataset = DataSet(data_path)\n '''\n features_dir = myConstants.features_dir\n\n print(\"Calling create_word_ngram_vocabulary with ngram_range=(\"\n + str(ngram_range[0]) + \", \" + str(ngram_range[1]) + \"), max_features=\"\n + str(max_features) + \", lemmatize=\" + str(lemmatize) + \", term_freq=\" + str(term_freq))\n def get_all_stopwords():\n stop_words_nltk = set(stopwords.words('english')) # use set for faster \"not in\" check\n stop_words_sklearn = feature_extraction.text.ENGLISH_STOP_WORDS\n all_stop_words = stop_words_sklearn.union(stop_words_nltk)\n return all_stop_words\n\n def get_tokenized_lemmas_without_stopwords(s):\n all_stop_words = get_all_stopwords()\n return [normalize_word(t) for t in nltk.word_tokenize(s)\n if t not in string.punctuation and t.lower() not in all_stop_words]\n\n\n def train_vocabulary(head_and_body):\n # trains a CountVectorizer on all of the data except for holdout data\n if lemmatize == False:\n vectorizer = CountVectorizer(ngram_range=ngram_range, stop_words='english', max_features=max_features)\n if term_freq == True:\n vectorizer = TfidfVectorizer(ngram_range=ngram_range, stop_words='english', max_features=max_features, use_idf=use_idf, norm=norm)\n else:\n vectorizer = CountVectorizer(ngram_range=ngram_range, max_features=max_features,\n tokenizer=get_tokenized_lemmas_without_stopwords)\n if term_freq == True:\n vectorizer = TfidfVectorizer(ngram_range=ngram_range, max_features=max_features,\n tokenizer=get_tokenized_lemmas_without_stopwords, use_idf=use_idf, norm=norm)\n vectorizer.fit_transform(head_and_body)\n vocab = vectorizer.vocabulary_\n return vocab\n\n def combine_head_and_body(headlines, bodies):\n head_and_body = [headline + \" \" + body for i, (headline, body) in\n enumerate(zip(headlines, bodies))]\n return head_and_body\n\n\n # create filename for vocab\n vocab_file = \"word_(\" + str(ngram_range[0]) + \"_\" + str(ngram_range[1]) + \")-gram_\" + str(max_features)\n if lemmatize == True:\n vocab_file += \"_lemmatized\"\n if term_freq == True:\n vocab_file += \"_tf\"\n if use_idf == True:\n vocab_file += \"_idf\"\n if include_holdout == True:\n vocab_file += \"_holdout\"\n vocab_file += \"_\" + norm + \".pickle\"\n\n # if vocab already exists, just load and return it\n if (os.path.exists(features_dir + \"/\" + vocab_file)):\n with open(features_dir + \"/\" + vocab_file, 'rb') as handle:\n vocab = pickle.load(handle)\n print(\"Existing vocabulary found and load.\")\n return vocab\n\n h, b = get_head_body_tuples(include_holdout=include_holdout)\n head_and_body = combine_head_and_body(h, b) # combine head and body\n vocab = train_vocabulary(head_and_body) # get vocabulary (features)\n\n # save the vocabulary as file\n with open(features_dir + \"/\" + vocab_file, 'wb') as handle:\n pickle.dump(vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)\n print(\"vocab length: \" + str(len(vocab)))\n return vocab\n\ndef latent_dirichlet_allocation(headlines, bodies):\n # https://pypi.python.org/pypi/lda on bottom see suggestions like MALLET, hca\n # https://medium.com/@aneesha/topic-modeling-with-scikit-learn-e80d33668730\n # https://www.quora.com/What-are-the-best-features-to-put-into-Latent-Dirichlet-Allocation-LDA-for-topic-modeling-of-short-text\n\n def print_top_words(model, feature_names, n_top_words):\n for topic_idx, topic in enumerate(model.components_):\n print(\"Topic #%d:\" % topic_idx)\n print(\", \".join([feature_names[i]\n for i in topic.argsort()[:-n_top_words - 1:-1]]))\n print()\n\n def combine_head_and_body(headlines, bodies):\n head_and_body = [headline + \" \" + body for i, (headline, body) in\n enumerate(zip(headlines, bodies))]\n\n return head_and_body\n\n def get_features(vocab):\n vectorizer_head = TfidfVectorizer(vocabulary=vocab, use_idf=False, norm='l2')\n X_train_head = vectorizer_head.fit_transform(headlines)\n\n vectorizer_body = TfidfVectorizer(vocabulary=vocab, use_idf=False, norm='l2')\n X_train_body = vectorizer_body.fit_transform(bodies)\n\n # calculates n most important topics of the bodies. Each topic contains all words but ordered by importance. The\n # more important topic words a body contains of a certain topic, the higher its value for this topic\n lda_body = LatentDirichletAllocation(n_topics=25, learning_method='online', random_state=0, n_jobs=3)\n\n print(\"latent_dirichlet_allocation: fit and transform body\")\n t0 = time()\n lda_body_matrix = lda_body.fit_transform(X_train_body)\n print(\"done in %0.3fs.\" % (time() - t0))\n\n print(\"latent_dirichlet_allocation: transform head\")\n # use the lda trained for body topcis on the headlines => if the headlines and bodies share topics\n # their vectors should be similar\n lda_head_matrix = lda_body.transform(X_train_head)\n\n #print_top_words(lda_body, vectorizer_body.get_feature_names(), 100)\n\n print('latent_dirichlet_allocation: calculating cosine distance between head and body')\n # calculate cosine distance between the body and head\n X = []\n for i in range(len(lda_head_matrix)):\n X_head_vector = np.array(lda_head_matrix[i]).reshape((1, -1)) #1d array is deprecated\n X_body_vector = np.array(lda_body_matrix[i]).reshape((1, -1))\n cos_dist = cosine_distances(X_head_vector, X_body_vector).flatten()\n X.append(cos_dist.tolist())\n return X\n\n\n vocab = create_word_ngram_vocabulary(ngram_range=(1, 1), max_features=5000, lemmatize=False, term_freq=True,\n norm='l2')\n X = get_features(vocab)\n return X\n\ndef latent_dirichlet_allocation_incl_holdout_and_test(headlines, bodies):\n # https://pypi.python.org/pypi/lda on bottom see suggestions like MALLET, hca\n # https://medium.com/@aneesha/topic-modeling-with-scikit-learn-e80d33668730\n # https://www.quora.com/What-are-the-best-features-to-put-into-Latent-Dirichlet-Allocation-LDA-for-topic-modeling-of-short-text\n\n def print_top_words(model, feature_names, n_top_words):\n for topic_idx, topic in enumerate(model.components_):\n print(\"Topic #%d:\" % topic_idx)\n print(\", \".join([feature_names[i]\n for i in topic.argsort()[:-n_top_words - 1:-1]]))\n print()\n\n def combine_head_and_body(headlines, bodies):\n head_and_body = [headline + \" \" + body for i, (headline, body) in\n enumerate(zip(headlines, bodies))]\n\n return head_and_body\n\n def get_features(vocab):\n vectorizer_head = TfidfVectorizer(vocabulary=vocab, use_idf=False, norm='l2')\n X_train_head = vectorizer_head.fit_transform(headlines)\n\n vectorizer_body = TfidfVectorizer(vocabulary=vocab, use_idf=False, norm='l2')\n X_train_body = vectorizer_body.fit_transform(bodies)\n\n # calculates n most important topics of the bodies. Each topic contains all words but ordered by importance. The\n # more important topic words a body contains of a certain topic, the higher its value for this topic\n lda_body = LatentDirichletAllocation(n_topics=100, learning_method='online', random_state=0, n_jobs=3)\n\n print(\"latent_dirichlet_allocation_incl_holdout_and_test: fit and transform body\")\n t0 = time()\n lda_body_matrix = lda_body.fit_transform(X_train_body)\n print(\"done in %0.3fs.\" % (time() - t0))\n\n print(\"latent_dirichlet_allocation_incl_holdout_and_test: transform head\")\n # use the lda trained for body topcis on the headlines => if the headlines and bodies share topics\n # their vectors should be similar\n lda_head_matrix = lda_body.transform(X_train_head)\n\n #print_top_words(lda_body, vectorizer_body.get_feature_names(), 100)\n\n print('latent_dirichlet_allocation_incl_holdout_and_test: calculating cosine distance between head and body')\n # calculate cosine distance between the body and head\n X = []\n for i in range(len(lda_head_matrix)):\n X_head_vector = np.array(lda_head_matrix[i]).reshape((1, -1)) #1d array is deprecated\n X_body_vector = np.array(lda_body_matrix[i]).reshape((1, -1))\n cos_dist = cosine_distances(X_head_vector, X_body_vector).flatten()\n X.append(cos_dist.tolist())\n return X\n\n\n h, b = get_head_body_tuples(include_holdout=True)\n\n h_test, b_test = get_head_body_tuples_test()\n\n print(\"word_ngrams_concat_tf5000_l2_w_holdout_and_test length of heads: \" + str(len(h)))\n print(\"word_ngrams_concat_tf5000_l2_w_holdout_and_test length of bodies: \" + str(len(b)))\n h.extend(h_test)\n b.extend(b_test)\n print(\"word_ngrams_concat_tf5000_l2_w_holdout_and_test length of heads after ext: \" + str(len(h)))\n print(\"word_ngrams_concat_tf5000_l2_w_holdout_and_test length of bodies after ext: \" + str(len(b)))\n\n tfidf = TfidfVectorizer(ngram_range=(1,1), stop_words='english', max_features=5000, use_idf=False,\n norm='l2')\n tfidf.fit_transform(combine_head_and_body(h,b))\n vocab = tfidf.vocabulary_\n\n X = get_features(vocab)\n return X\n\ndef latent_semantic_indexing_gensim_holdout_and_test(headlines, bodies):\n \"\"\"\n Takes all the data (holdout+test+train) and interpretes the headlines and bodies as different\n documents. Instead of combining them, they are appended. Then it tokenizes these ~50k headline-docs and ~50k body-docs,\n builds a Tfidf-Matrix out of them and creates a LSI-Model out of it. In the next step the headlines and\n bodies for the feature generation are also treated as different documents and merely appended. Also, they are tokenized and\n a Tfifd-Matrix is built. This matix is passed to the learned LSI-Model and a Matrix is being returned.\n In this matrix, each document is represented as a vector with length(topics) of (topic-id, distance of this doc to the topic).\n The probabilities are then taken as a feature vector for the document. The first half of the matrix represent the headline docs,\n the latter half represent the body docs. In the end, the feature vectors of the headlines get concatenated with its body feature vector.\n\n The differences to the latent_semantic_indexing_gensim are:\n - holdout data is also used\n - a Tfidf matrix is built and used to create the LSI model and also to retrieve the features instead of just a corpus to build the LSI model and\n passing each headline and body separately into the LSI model to retrieve its features (does it make a difference, since dictionary already takes\n tfidf into account?)\n - the vectors are taken fully and not just the cosinus distance between them\n \"\"\"\n from gensim import corpora, models\n\n def combine_and_tokenize_head_and_body(headlines, bodies, file_path=None):\n all_text = []\n all_text.extend(headlines)\n all_text.extend(bodies)\n if file_path != None and (os.path.exists(file_path)):\n with open(file_path, 'rb') as handle:\n return pickle.load(handle)\n\n print(\"head+body appended size should be around 100k and 19/8k: \" + str(len(bodies)))\n head_and_body_tokens = [nltk.word_tokenize(line) for line in all_text]\n\n if file_path != None:\n with open(file_path, 'wb') as handle:\n pickle.dump(head_and_body_tokens, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n return head_and_body_tokens\n\n def get_features(n_topics):\n features_dir = \"%s/data/fnc-1/features\" % (path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n\n filename = \"lsi_gensim_test_\" + str(n_topics) + \"topics_and_test\"\n\n h, b = get_head_body_tuples(include_holdout=True)\n h_test, b_test = get_head_body_tuples_test()\n h.extend(h_test)\n b.extend(b_test)\n head_and_body = combine_and_tokenize_head_and_body(h, b,\n file_path=features_dir + \"/\" + \"lsi_gensim_h_b_tokenized_and_test\" + \".pkl\")\n\n if (os.path.exists(features_dir + \"/\" + \"lsi_gensim_holdout_and_test\" + \".dict\")):\n print(\"dict found and load\")\n dictionary = corpora.Dictionary.load(features_dir + \"/\" + \"lsi_gensim_all_and_test\" + \".dict\")\n else:\n print(\"create new dict\")\n dictionary = corpora.Dictionary(head_and_body)\n dictionary.save(features_dir + \"/\" + \"lsi_gensim_all_and_test\" + \".dict\")\n\n if (os.path.exists(features_dir + \"/\" + filename + \".lsi\")):\n print(\"found lsi model\")\n lsi = models.LsiModel.load(features_dir + \"/\" + filename + \".lsi\")\n else:\n print(\"build corpus and tfidf corpus\")\n corpus = [dictionary.doc2bow(text) for text in head_and_body]\n tfidf = models.TfidfModel(corpus) # https://stackoverflow.com/questions/6287411/lsi-using-gensim-in-python\n corpus_tfidf = tfidf[corpus]\n\n print(\"create new lsi model\")\n lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=n_topics)\n lsi.save(features_dir + \"/\" + filename + \".lsi\")\n\n # get tfidf corpus of head and body\n corpus_train = [dictionary.doc2bow(text) for text in combine_and_tokenize_head_and_body(headlines, bodies)]\n tfidf_train = models.TfidfModel(corpus_train)\n corpus_train_tfidf = tfidf_train[corpus_train]\n\n corpus_lsi = lsi[corpus_train_tfidf]\n\n X_head = []\n X_body = []\n i = 0\n for doc in corpus_lsi:\n if i < int(len(corpus_lsi) / 2):\n X_head_vector_filled = np.zeros(n_topics, dtype=np.float64)\n for id, prob in doc:\n X_head_vector_filled[id] = prob\n X_head.append(X_head_vector_filled)\n else:\n X_body_vector_filled = np.zeros(n_topics, dtype=np.float64)\n for id, prob in doc:\n X_body_vector_filled[id] = prob\n X_body.append(X_body_vector_filled)\n i += 1\n\n X = np.concatenate([X_head, X_body], axis=1)\n\n return X\n\n n_topics = 300\n X = get_features(n_topics)\n\n return X\n\ndef NMF_fit_all_concat_300_and_test(headlines, bodies):\n #http://scikit-learn.org/stable/auto_examples/applications/topics_extraction_with_nmf_lda.html#sphx-glr-auto-examples-applications-topics-extraction-with-nmf-lda-py\n # https://pypi.python.org/pypi/lda on bottom see suggestions like MALLET, hca\n # https://medium.com/@aneesha/topic-modeling-with-scikit-learn-e80d33668730\n # https://www.quora.com/What-are-the-best-features-to-put-into-Latent-Dirichlet-Allocation-LDA-for-topic-modeling-of-short-text\n\n from sklearn.externals import joblib\n\n def combine_head_and_body(headlines, bodies):\n head_and_body = [headline + \" \" + body for i, (headline, body) in\n enumerate(zip(headlines, bodies))]\n\n return head_and_body\n\n def get_all_data(head_and_body):\n features_dir = \"%s/data/fnc-1/features\" % (path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n filename = \"NMF_fit_all_concat_300_and_test\"\n if not (os.path.exists(features_dir + \"/\" + filename + \".vocab\")):\n vectorizer_all = TfidfVectorizer(ngram_range=(1,1), stop_words='english', use_idf=True, norm='l2')\n X_all = vectorizer_all.fit_transform(head_and_body)\n print(\"X_all_length (w Holdout round 50k): \" + str(len(head_and_body)))\n vocab = vectorizer_all.vocabulary_\n print(\"NMF_fit_all_concat_300_and_test: complete vocabulary length=\" + str(len(list(vocab.keys()))))\n\n with open(features_dir + \"/\" + filename + \".vocab\", 'wb') as handle:\n pickle.dump(vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n return X_all, vocab\n else:\n with open(features_dir + \"/\" + filename + \".vocab\", 'rb') as handle:\n vocab = pickle.load(handle)\n vectorizer_all = TfidfVectorizer(vocabulary=vocab, norm='l2')\n X_all = vectorizer_all.fit_transform(head_and_body)\n return X_all, vectorizer_all.vocabulary_\n\n def get_vocab(head_and_body):\n features_dir = \"%s/data/fnc-1/features\" % (path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n filename = \"NMF_fit_all_concat_300_and_test\"\n if not (os.path.exists(features_dir + \"/\" + filename + \".vocab\")):\n vectorizer_all = TfidfVectorizer(ngram_range=(1, 1), stop_words='english', use_idf=True, norm='l2')\n X_all = vectorizer_all.fit_transform(head_and_body)\n vocab = vectorizer_all.vocabulary_\n print(\"NMF_fit_all_concat_300_and_test: complete vocabulary length=\" + str(len(X_all[0])))\n\n with open(features_dir + \"/\" + filename + \".vocab\", 'wb') as handle:\n pickle.dump(vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n return vocab\n else:\n with open(features_dir + \"/\" + filename + \".vocab\", 'rb') as handle:\n return pickle.load(handle)\n\n\n def get_features(head_and_body):\n features_dir = \"%s/data/fnc-1/features\" % (path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n filename = \"NMF_fit_all_concat_300_and_test\"\n if not (os.path.exists(features_dir + \"/\" + filename + \".pkl\")):\n X_all, vocab = get_all_data(head_and_body)\n\n # calculates n most important topics of the bodies. Each topic contains all words but ordered by importance. The\n # more important topic words a body contains of a certain topic, the higher its value for this topic\n nfm = NMF(n_components=300, random_state=1, alpha=.1)\n\n print(\"NMF_fit_all_concat_300_and_test: fit NMF to all data\")\n t0 = time()\n nfm.fit_transform(X_all)\n print(\"done in %0.3fs.\" % (time() - t0))\n\n with open(features_dir + \"/\" + filename + \".pkl\", 'wb') as handle:\n joblib.dump(nfm, handle, protocol=pickle.HIGHEST_PROTOCOL)\n else:\n vocab = get_vocab(head_and_body)\n with open(features_dir + \"/\" + filename + \".pkl\", 'rb') as handle:\n nfm = joblib.load(handle)\n\n\n vectorizer_head = TfidfVectorizer(vocabulary=vocab, norm='l2')\n X_train_head = vectorizer_head.fit_transform(headlines)\n\n vectorizer_body = TfidfVectorizer(vocabulary=vocab, norm='l2')\n X_train_body = vectorizer_body.fit_transform(bodies)\n\n print(\"NMF_fit_all_concat_300_and_test: transform head and body\")\n # use the lda trained for body topcis on the headlines => if the headlines and bodies share topics\n # their vectors should be similar\n nfm_head_matrix = nfm.transform(X_train_head)\n nfm_body_matrix = nfm.transform(X_train_body)\n\n print('NMF_fit_all_concat_300_and_test: concat head and body')\n # calculate cosine distance between the body and head\n return np.concatenate([nfm_head_matrix, nfm_body_matrix], axis=1)\n\n h, b = get_head_body_tuples(include_holdout=True)\n h_test, b_test = get_head_body_tuples_test()\n h.extend(h_test)\n b.extend(b_test)\n head_and_body = combine_head_and_body(h, b)\n\n X = get_features(head_and_body)\n\n return X\n\ndef latent_semantic_indexing_gensim_test(headlines, bodies):\n \"\"\"\n Takes all the data (holdout+test+train) and interpretes the headlines and bodies as different\n documents. Instead of combining them, they are appended. Then it tokenizes these ~50k headline-docs and ~50k body-docs,\n builds a Tfidf-Matrix out of them and creates a LSI-Model out of it. In the next step the headlines and\n bodies for the feature generation are also treated as different documents and merely appended. Also, they are tokenized and\n a Tfifd-Matrix is built. This matix is passed to the learned LSI-Model and a Matrix is being returned.\n In this matrix, each document is represented as a vector with length(topics) of (topic-id, distance of this doc to the topic).\n The probabilities are then taken as a feature vector for the document. The first half of the matrix represent the headline docs,\n the latter half represent the body docs. In the end, the feature vectors of the headlines get concatenated with its body feature vector.\n\n The differences to the latent_semantic_indexing_gensim are:\n - holdout data is also used\n - a Tfidf matrix is built and used to create the LSI model and also to retrieve the features instead of just a corpus to build the LSI model and\n passing each headline and body separately into the LSI model to retrieve its features (does it make a difference, since dictionary already takes\n tfidf into account?)\n - the vectors are taken fully and not just the cosinus distance between them\n \"\"\"\n from gensim import corpora, models\n\n def combine_and_tokenize_head_and_body(headlines, bodies, file_path=None):\n all_text = []\n all_text.extend(headlines)\n all_text.extend(bodies)\n if file_path != None and (os.path.exists(file_path)):\n with open(file_path, 'rb') as handle:\n return pickle.load(handle)\n\n print(\"head+body appended size should be around 100k and 19/8k: \" + str(len(bodies)))\n head_and_body_tokens = [nltk.word_tokenize(line) for line in all_text]\n\n if file_path != None:\n with open(file_path, 'wb') as handle:\n pickle.dump(head_and_body_tokens, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n return head_and_body_tokens\n\n def get_features(n_topics):\n features_dir = \"%s/data/fnc-1/features\" % (path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n\n filename = \"lsi_gensim_test_\" + str(n_topics) + \"topics\"\n\n h, b = get_head_body_tuples(include_holdout=True)\n head_and_body = combine_and_tokenize_head_and_body(h, b,\n file_path=features_dir + \"/\" + \"lsi_gensim_h_b_tokenized\" + \".pkl\")\n\n if (os.path.exists(features_dir + \"/\" + \"lsi_gensim_holdout\" + \".dict\")):\n print(\"dict found and load\")\n dictionary = corpora.Dictionary.load(features_dir + \"/\" + \"lsi_gensim_all\" + \".dict\")\n else:\n print(\"create new dict\")\n dictionary = corpora.Dictionary(head_and_body)\n dictionary.save(features_dir + \"/\" + \"lsi_gensim_all\" + \".dict\")\n\n if (os.path.exists(features_dir + \"/\" + filename + \".lsi\")):\n print(\"found lsi model\")\n lsi = models.LsiModel.load(features_dir + \"/\" + filename + \".lsi\")\n else:\n print(\"build corpus and tfidf corpus\")\n corpus = [dictionary.doc2bow(text) for text in head_and_body]\n tfidf = models.TfidfModel(corpus) # https://stackoverflow.com/questions/6287411/lsi-using-gensim-in-python\n corpus_tfidf = tfidf[corpus]\n\n print(\"create new lsi model\")\n lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=n_topics)\n lsi.save(features_dir + \"/\" + filename + \".lsi\")\n\n # get tfidf corpus of head and body\n corpus_train = [dictionary.doc2bow(text) for text in combine_and_tokenize_head_and_body(headlines, bodies)]\n tfidf_train = models.TfidfModel(corpus_train)\n corpus_train_tfidf = tfidf_train[corpus_train]\n\n corpus_lsi = lsi[corpus_train_tfidf]\n\n X_head = []\n X_body = []\n i = 0\n for doc in corpus_lsi:\n if i < int(len(corpus_lsi) / 2):\n X_head_vector_filled = np.zeros(n_topics, dtype=np.float64)\n for id, prob in doc:\n X_head_vector_filled[id] = prob\n X_head.append(X_head_vector_filled)\n else:\n X_body_vector_filled = np.zeros(n_topics, dtype=np.float64)\n for id, prob in doc:\n X_body_vector_filled[id] = prob\n X_body.append(X_body_vector_filled)\n i += 1\n\n X = np.concatenate([X_head, X_body], axis=1)\n\n return X\n\n n_topics = 300\n X = get_features(n_topics)\n\n return X\n\ndef NMF_fit_all_concat_300(headlines, bodies):\n #http://scikit-learn.org/stable/auto_examples/applications/topics_extraction_with_nmf_lda.html#sphx-glr-auto-examples-applications-topics-extraction-with-nmf-lda-py\n # https://pypi.python.org/pypi/lda on bottom see suggestions like MALLET, hca\n # https://medium.com/@aneesha/topic-modeling-with-scikit-learn-e80d33668730\n # https://www.quora.com/What-are-the-best-features-to-put-into-Latent-Dirichlet-Allocation-LDA-for-topic-modeling-of-short-text\n\n from sklearn.externals import joblib\n\n def combine_head_and_body(headlines, bodies):\n head_and_body = [headline + \" \" + body for i, (headline, body) in\n enumerate(zip(headlines, bodies))]\n\n return head_and_body\n\n def get_all_data(head_and_body):\n features_dir = \"%s/data/fnc-1/features\" % (path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n filename = \"NMF_fit_all_concat_300\"\n if not (os.path.exists(features_dir + \"/\" + filename + \".vocab\")):\n vectorizer_all = TfidfVectorizer(ngram_range=(1,1), stop_words='english', use_idf=True, norm='l2')\n X_all = vectorizer_all.fit_transform(head_and_body)\n print(\"X_all_length (w Holout round 50k): \" + str(len(head_and_body)))\n vocab = vectorizer_all.vocabulary_\n print(\"NMF_fit_all_concat_300: complete vocabulary length=\" + str(len(list(vocab.keys()))))\n\n with open(features_dir + \"/\" + filename + \".vocab\", 'wb') as handle:\n pickle.dump(vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n return X_all, vocab\n else:\n with open(features_dir + \"/\" + filename + \".vocab\", 'rb') as handle:\n vocab = pickle.load(handle)\n vectorizer_all = TfidfVectorizer(vocabulary=vocab, norm='l2')\n X_all = vectorizer_all.fit_transform(head_and_body)\n return X_all, vectorizer_all.vocabulary_\n\n def get_vocab(head_and_body):\n features_dir = \"%s/data/fnc-1/features\" % (path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n filename = \"NMF_fit_all_concat_300\"\n if not (os.path.exists(features_dir + \"/\" + filename + \".vocab\")):\n vectorizer_all = TfidfVectorizer(ngram_range=(1, 1), stop_words='english', use_idf=True, norm='l2')\n X_all = vectorizer_all.fit_transform(head_and_body)\n vocab = vectorizer_all.vocabulary_\n print(\"NMF_fit_all_concat_300: complete vocabulary length=\" + str(len(X_all[0])))\n\n with open(features_dir + \"/\" + filename + \".vocab\", 'wb') as handle:\n pickle.dump(vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n return vocab\n else:\n with open(features_dir + \"/\" + filename + \".vocab\", 'rb') as handle:\n return pickle.load(handle)\n\n\n def get_features(head_and_body):\n features_dir = \"%s/data/fnc-1/features\" % (path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n filename = \"NMF_fit_all_concat_300\"\n if not (os.path.exists(features_dir + \"/\" + filename + \".pkl\")):\n X_all, vocab = get_all_data(head_and_body)\n\n # calculates n most important topics of the bodies. Each topic contains all words but ordered by importance. The\n # more important topic words a body contains of a certain topic, the higher its value for this topic\n nfm = NMF(n_components=300, random_state=1, alpha=.1)\n\n print(\"NMF_fit_all_concat_300: fit NMF to all data\")\n t0 = time()\n nfm.fit_transform(X_all)\n print(\"done in %0.3fs.\" % (time() - t0))\n\n with open(features_dir + \"/\" + filename + \".pkl\", 'wb') as handle:\n joblib.dump(nfm, handle, protocol=pickle.HIGHEST_PROTOCOL)\n else:\n vocab = get_vocab(head_and_body)\n with open(features_dir + \"/\" + filename + \".pkl\", 'rb') as handle:\n nfm = joblib.load(handle)\n\n\n vectorizer_head = TfidfVectorizer(vocabulary=vocab, norm='l2')\n X_train_head = vectorizer_head.fit_transform(headlines)\n\n vectorizer_body = TfidfVectorizer(vocabulary=vocab, norm='l2')\n X_train_body = vectorizer_body.fit_transform(bodies)\n\n print(\"NMF_fit_all_concat_300: transform head and body\")\n # use the lda trained for body topcis on the headlines => if the headlines and bodies share topics\n # their vectors should be similar\n nfm_head_matrix = nfm.transform(X_train_head)\n nfm_body_matrix = nfm.transform(X_train_body)\n\n print('NMF_fit_all_concat_300: concat head and body')\n # calculate cosine distance between the body and head\n return np.concatenate([nfm_head_matrix, nfm_body_matrix], axis=1)\n\n h, b = get_head_body_tuples(include_holdout=True)\n head_and_body = combine_head_and_body(h, b)\n\n X = get_features(head_and_body)\n\n return X\n\ndef word_ngrams_concat_tf5000_l2_w_holdout(headlines, bodies):\n \"\"\"\n Simple bag of words feature extraction\n \"\"\"\n def get_features(vocab):\n vectorizer_head = TfidfVectorizer(vocabulary=vocab, use_idf=False,\n norm=\"l2\", stop_words='english')\n X_head = vectorizer_head.fit_transform(headlines)\n\n vectorizer_body = TfidfVectorizer(vocabulary=vocab, use_idf=False,\n norm=\"l2\", stop_words='english')\n X_body = vectorizer_body.fit_transform(bodies)\n\n X = np.concatenate([X_head.toarray(), X_body.toarray()], axis=1)\n\n return X\n\n\n vocab = create_word_ngram_vocabulary(ngram_range=(1,1), max_features=5000,\n lemmatize=False, use_idf=False, term_freq=True, norm='l2',\n include_holdout=True)\n\n X = get_features(vocab)\n\n return X\n\n\ndef word_ngrams_concat_tf5000_l2_w_holdout_and_test(headlines, bodies):\n \"\"\"\n Simple bag of words feature extraction\n \"\"\"\n\n def combine_head_and_body(headlines, bodies):\n head_and_body = [headline + \" \" + body for i, (headline, body) in\n enumerate(zip(headlines, bodies))]\n return head_and_body\n\n def get_features(vocab):\n vectorizer_head = TfidfVectorizer(vocabulary=vocab, use_idf=True,\n norm=\"l2\", stop_words='english')\n X_head = vectorizer_head.fit_transform(headlines)\n\n vectorizer_body = TfidfVectorizer(vocabulary=vocab, use_idf=True,\n norm=\"l2\", stop_words='english')\n X_body = vectorizer_body.fit_transform(bodies)\n\n X = np.concatenate([X_head.toarray(), X_body.toarray()], axis=1)\n\n return X\n\n h, b = get_head_body_tuples(include_holdout=True)\n h_test, b_test = get_head_body_tuples_test()\n\n print(\"word_ngrams_concat_tf5000_l2_w_holdout_and_test length of heads: \" + str(len(h)))\n print(\"word_ngrams_concat_tf5000_l2_w_holdout_and_test length of bodies: \" + str(len(b)))\n h.extend(h_test)\n b.extend(b_test)\n print(\"word_ngrams_concat_tf5000_l2_w_holdout_and_test length of heads after ext: \" + str(len(h)))\n print(\"word_ngrams_concat_tf5000_l2_w_holdout_and_test length of bodies after ext: \" + str(len(b)))\n\n tfidf = TfidfVectorizer(ngram_range=(1,1), stop_words='english', max_features=5000, use_idf=True,\n norm='l2')\n tfidf.fit_transform(combine_head_and_body(h,b))\n vocab = tfidf.vocabulary_\n\n X = get_features(vocab)\n\n return X\n\ndef NMF_fit_all(headlines, bodies):\n #http://scikit-learn.org/stable/auto_examples/applications/topics_extraction_with_nmf_lda.html#sphx-glr-auto-examples-applications-topics-extraction-with-nmf-lda-py\n # https://pypi.python.org/pypi/lda on bottom see suggestions like MALLET, hca\n # https://medium.com/@aneesha/topic-modeling-with-scikit-learn-e80d33668730\n # https://www.quora.com/What-are-the-best-features-to-put-into-Latent-Dirichlet-Allocation-LDA-for-topic-modeling-of-short-text\n from sklearn.externals import joblib\n\n def combine_head_and_body(headlines, bodies):\n head_and_body = [headline + \" \" + body for i, (headline, body) in\n enumerate(zip(headlines, bodies))]\n\n return head_and_body\n\n def get_all_data(head_and_body):\n features_dir = \"%s/data/fnc-1/features\" % (path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n filename = \"NMF_fit_all\"\n if not (os.path.exists(features_dir + \"/\" + filename + \".vocab\")):\n vectorizer_all = TfidfVectorizer(ngram_range=(1,1), stop_words='english', use_idf=True, norm='l2')\n X_all = vectorizer_all.fit_transform(head_and_body)\n vocab = vectorizer_all.vocabulary_\n print(\"NMF_fit_all: complete vocabulary length=\" + str(len(list(vocab.keys()))))\n\n with open(features_dir + \"/\" + filename + \".vocab\", 'wb') as handle:\n pickle.dump(vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n return X_all, vocab\n else:\n with open(features_dir + \"/\" + filename + \".vocab\", 'rb') as handle:\n vocab = pickle.load(handle)\n vectorizer_all = TfidfVectorizer(vocabulary=vocab, norm='l2')\n X_all = vectorizer_all.fit_transform(head_and_body)\n return X_all, vectorizer_all.vocabulary_\n\n def get_vocab(head_and_body):\n features_dir = \"%s/data/fnc-1/features\" % (path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n filename = \"NMF_fit_all\"\n if not (os.path.exists(features_dir + \"/\" + filename + \".vocab\")):\n vectorizer_all = TfidfVectorizer(ngram_range=(1, 1), stop_words='english', use_idf=True, norm='l2')\n X_all = vectorizer_all.fit_transform(head_and_body)\n vocab = vectorizer_all.vocabulary_\n print(\"NMF_fit_all: complete vocabulary length=\" + str(len(X_all[0])))\n\n with open(features_dir + \"/\" + filename + \".vocab\", 'wb') as handle:\n pickle.dump(vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n return vocab\n else:\n with open(features_dir + \"/\" + filename + \".vocab\", 'rb') as handle:\n return pickle.load(handle)\n\n def get_features(head_and_body):\n features_dir = \"%s/data/fnc-1/features\" % (path.dirname(path.dirname(path.dirname(path.abspath(__file__)))))\n filename = \"NMF_fit_all\"\n if not (os.path.exists(features_dir + \"/\" + filename + \".pkl\")):\n X_all, vocab = get_all_data(head_and_body)\n\n # calculates n most important topics of the bodies. Each topic contains all words but ordered by importance. The\n # more important topic words a body contains of a certain topic, the higher its value for this topic\n nfm = NMF(n_components=50, random_state=1, alpha=.1)\n\n print(\"NMF_fit_all: fit and transform body\")\n t0 = time()\n nfm.fit_transform(X_all)\n print(\"done in %0.3fs.\" % (time() - t0))\n\n with open(features_dir + \"/\" + filename + \".pkl\", 'wb') as handle:\n joblib.dump(nfm, handle, protocol=pickle.HIGHEST_PROTOCOL)\n else:\n vocab = get_vocab(head_and_body)\n with open(features_dir + \"/\" + filename + \".pkl\", 'rb') as handle:\n nfm = joblib.load(handle)\n\n\n vectorizer_head = TfidfVectorizer(vocabulary=vocab, norm='l2')\n X_train_head = vectorizer_head.fit_transform(headlines)\n\n vectorizer_body = TfidfVectorizer(vocabulary=vocab, norm='l2')\n X_train_body = vectorizer_body.fit_transform(bodies)\n\n print(\"NMF_fit_all: transform head and body\")\n # use the lda trained for body topcis on the headlines => if the headlines and bodies share topics\n # their vectors should be similar\n nfm_head_matrix = nfm.transform(X_train_head)\n nfm_body_matrix = nfm.transform(X_train_body)\n\n print('NMF_fit_all: calculating cosine distance between head and body')\n # calculate cosine distance between the body and head\n X = []\n for i in range(len(nfm_head_matrix)):\n X_head_vector = np.array(nfm_head_matrix[i]).reshape((1, -1)) #1d array is deprecated\n X_body_vector = np.array(nfm_body_matrix[i]).reshape((1, -1))\n cos_dist = cosine_distances(X_head_vector, X_body_vector).flatten()\n X.append(cos_dist.tolist())\n return X\n\n h, b = get_head_body_tuples()\n head_and_body = combine_head_and_body(h, b)\n\n X = get_features(head_and_body)\n\n return X\n" ]
[ [ "numpy.concatenate", "numpy.std", "numpy.argsort", "numpy.load", "numpy.array" ], [ "sklearn.externals.joblib.dump", "sklearn.decomposition.NMF", "sklearn.externals.joblib.load", "pandas.DataFrame", "numpy.save", "numpy.concatenate", "sklearn.decomposition.LatentDirichletAllocation", "sklearn.feature_extraction.text.CountVectorizer", "sklearn.metrics.pairwise.cosine_distances", "numpy.load", "numpy.array", "numpy.zeros", "sklearn.feature_extraction.text.TfidfVectorizer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
PaullMP/TensorFlowT
[ "b9b3b5b19971671fe24868273ca5274c1ec7169f", "b9b3b5b19971671fe24868273ca5274c1ec7169f" ]
[ "tensorflow/python/__init__.py", "tensorflow/contrib/metrics/python/ops/metric_ops.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Import core names of TensorFlow.\n\nPrograms that want to build TensorFlow Ops and Graphs without having to import\nthe constructors and utilities individually can import this file:\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\"\"\"\n\nimport ctypes\nimport importlib\nimport inspect\nimport sys\nimport traceback\n\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import,g-bad-import-order,g-import-not-at-top\n\n# On UNIX-based platforms, pywrap_tensorflow is a SWIG-generated\n# python library that dynamically loads _pywrap_tensorflow.so. The\n# default mode for loading keeps all the symbol private and not\n# visible to other libraries that may be loaded. Setting the mode to\n# RTLD_GLOBAL to make the symbols visible, so that custom op libraries\n# imported using `tf.load_op_library()` can access symbols defined in\n# _pywrap_tensorflow.so.\nimport numpy as np\ntry:\n if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):\n _default_dlopen_flags = sys.getdlopenflags()\n sys.setdlopenflags(_default_dlopen_flags | ctypes.RTLD_GLOBAL)\n from tensorflow.python import pywrap_tensorflow\n sys.setdlopenflags(_default_dlopen_flags)\n else:\n # TODO(keveman,mrry): Support dynamic op loading on platforms that do not\n # use `dlopen()` for dynamic loading.\n from tensorflow.python import pywrap_tensorflow\nexcept ImportError:\n msg = \"\"\"%s\\n\\nError importing tensorflow. Unless you are using bazel,\nyou should not try to import tensorflow from its source directory;\nplease exit the tensorflow source tree, and relaunch your python interpreter\nfrom there.\"\"\" % traceback.format_exc()\n raise ImportError(msg)\n\n# Protocol buffers\nfrom tensorflow.core.framework.graph_pb2 import *\nfrom tensorflow.core.framework.node_def_pb2 import *\nfrom tensorflow.core.framework.summary_pb2 import *\nfrom tensorflow.core.framework.attr_value_pb2 import *\nfrom tensorflow.core.protobuf.config_pb2 import *\nfrom tensorflow.core.util.event_pb2 import *\n\n# Framework\nfrom tensorflow.python.framework.framework_lib import *\nfrom tensorflow.python.framework.versions import *\nfrom tensorflow.python.framework import errors\n\n# Session\nfrom tensorflow.python.client.client_lib import *\n\n# Ops\nfrom tensorflow.python.ops.standard_ops import *\n\n# Bring in subpackages.\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import sdca_ops as sdca\nfrom tensorflow.python.ops import image_ops as image\nfrom tensorflow.python.user_ops import user_ops\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.summary import summary\n\n# Import the names from python/training.py as train.Name.\nfrom tensorflow.python.training import training as train\n\n# Sub-package for performing i/o directly instead of via ops in a graph.\nfrom tensorflow.python.lib.io import python_io\n\n# Make some application and test modules available.\nfrom tensorflow.python.platform import app\nfrom tensorflow.python.platform import flags\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.platform import resource_loader\nfrom tensorflow.python.platform import sysconfig\nfrom tensorflow.python.platform import test\n\nfrom tensorflow.python.util.all_util import remove_undocumented\nfrom tensorflow.python.util.all_util import make_all\n\n# Import modules whose docstrings contribute, for use by remove_undocumented\n# below.\nfrom tensorflow.python.client import client_lib\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import framework_lib\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import functional_ops\nfrom tensorflow.python.ops import histogram_ops\nfrom tensorflow.python.ops import io_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import script_ops\nfrom tensorflow.python.ops import session_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.ops import tensor_array_ops\n\n# Symbols whitelisted for export without documentation.\n# TODO(cwhipkey): review these and move to contrib, expose through\n# documentation, or remove.\n_allowed_symbols = [\n 'AttrValue',\n 'ConfigProto',\n 'DeviceSpec',\n 'Event',\n 'GPUOptions',\n 'GRAPH_DEF_VERSION',\n 'GRAPH_DEF_VERSION_MIN_CONSUMER',\n 'GRAPH_DEF_VERSION_MIN_PRODUCER',\n 'GraphDef',\n 'GraphOptions',\n 'HistogramProto',\n 'LogMessage',\n 'NameAttrList',\n 'NodeDef',\n 'OptimizerOptions',\n 'RunOptions',\n 'RunMetadata',\n 'SessionLog',\n 'Summary',\n]\n\n# The following symbols are kept for compatibility. It is our plan\n# to remove them in the future.\n_allowed_symbols.extend([\n 'arg_max',\n 'arg_min',\n 'create_partitioned_variables',\n 'deserialize_many_sparse',\n 'lin_space',\n 'list_diff', # Use tf.listdiff instead.\n 'parse_single_sequence_example',\n 'serialize_many_sparse',\n 'serialize_sparse',\n 'sparse_matmul', ## use tf.matmul instead.\n])\n\n# This is needed temporarily because we import it explicitly.\n_allowed_symbols.extend([\n 'platform', ## This is included by the tf.learn main template.\n 'pywrap_tensorflow',\n])\n\n# Dtypes exported by framework/dtypes.py.\n# TODO(cwhipkey): expose these through documentation.\n_allowed_symbols.extend([\n 'QUANTIZED_DTYPES',\n 'bfloat16',\n 'bfloat16_ref',\n 'bool',\n 'bool_ref',\n 'complex64',\n 'complex64_ref',\n 'complex128',\n 'complex128_ref',\n 'double',\n 'double_ref',\n 'half',\n 'half_ref',\n 'float16',\n 'float16_ref',\n 'float32',\n 'float32_ref',\n 'float64',\n 'float64_ref',\n 'int16',\n 'int16_ref',\n 'int32',\n 'int32_ref',\n 'int64',\n 'int64_ref',\n 'int8',\n 'int8_ref',\n 'qint16',\n 'qint16_ref',\n 'qint32',\n 'qint32_ref',\n 'qint8',\n 'qint8_ref',\n 'quint16',\n 'quint16_ref',\n 'quint8',\n 'quint8_ref',\n 'string',\n 'string_ref',\n 'uint16',\n 'uint16_ref',\n 'uint8',\n 'uint8_ref',\n 'resource',\n 'resource_ref',\n])\n\n# Export modules and constants.\n_allowed_symbols.extend([\n 'app',\n 'compat',\n 'errors',\n 'flags',\n 'gfile',\n 'image',\n 'logging',\n 'newaxis',\n 'nn',\n 'python_io',\n 'resource_loader',\n 'sdca',\n 'summary',\n 'sysconfig',\n 'test',\n 'train',\n 'user_ops',\n])\n\n# Variables framework.versions:\n_allowed_symbols.extend([\n 'VERSION',\n 'GIT_VERSION',\n 'COMPILER_VERSION',\n])\n\n# Remove all extra symbols that don't have a docstring or are not explicitly\n# referenced in the whitelist.\nremove_undocumented(__name__, _allowed_symbols,\n [framework_lib, array_ops, client_lib, check_ops,\n compat, constant_op, control_flow_ops, functional_ops,\n histogram_ops, io_ops, math_ops, nn, script_ops,\n session_ops, sparse_ops, state_ops, string_ops,\n summary, tensor_array_ops, train])\n\n# Special dunders that we choose to export:\n_exported_dunders = set([\n '__version__',\n '__git_version__',\n '__compiler_version__',\n])\n\n# Expose symbols minus dunders, unless they are whitelisted above.\n# This is necessary to export our dunders.\n__all__ = [s for s in dir() if s in _exported_dunders or not s.startswith('_')]\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains metric-computing operations on streamed tensors.\n\nModule documentation, including \"@@\" callouts, should be put in\nthird_party/tensorflow/contrib/metrics/__init__.py\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.framework import deprecated\nfrom tensorflow.contrib.framework import deprecated_args\nfrom tensorflow.contrib.framework import tensor_util\nfrom tensorflow.contrib.framework.python.ops import variables as contrib_variables\nfrom tensorflow.contrib.metrics.python.ops import confusion_matrix_ops\nfrom tensorflow.contrib.metrics.python.ops import set_ops\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\n\n\nIGNORE_MASK_DATE = '2016-10-19'\nIGNORE_MASK_INSTRUCTIONS = (\n '`ignore_mask` is being deprecated. Instead use `weights` with values 0.0 '\n 'and 1.0 to mask values. For example, `weights=tf.logical_not(mask)`.')\n\n\ndef _mask_weights(mask=None, weights=None):\n \"\"\"Mask a given set of weights.\n\n Elements are included when the corresponding `mask` element is `False`, and\n excluded otherwise.\n\n Args:\n mask: An optional, `bool` `Tensor`.\n weights: An optional `Tensor` whose shape matches `mask` if `mask` is not\n `None`.\n\n Returns:\n Masked weights if `mask` and `weights` are not `None`, weights equivalent to\n `mask` if `weights` is `None`, and otherwise `weights`.\n\n Raises:\n ValueError: If `weights` and `mask` are not `None` and have mismatched\n shapes.\n \"\"\"\n if mask is not None:\n check_ops.assert_type(mask, dtypes.bool)\n if weights is None:\n weights = array_ops.ones_like(mask, dtype=dtypes.float32)\n weights = math_ops.cast(math_ops.logical_not(mask), weights.dtype) * weights\n\n return weights\n\n\ndef _safe_div(numerator, denominator, name):\n \"\"\"Divides two values, returning 0 if the denominator is <= 0.\n\n Args:\n numerator: A real `Tensor`.\n denominator: A real `Tensor`, with dtype matching `numerator`.\n name: Name for the returned op.\n\n Returns:\n 0 if `denominator` <= 0, else `numerator` / `denominator`\n \"\"\"\n return math_ops.select(\n math_ops.greater(denominator, 0),\n math_ops.truediv(numerator, denominator),\n 0,\n name=name)\n\n\ndef _safe_scalar_div(numerator, denominator, name):\n \"\"\"Divides two values, returning 0 if the denominator is 0.\n\n Args:\n numerator: A scalar `float64` `Tensor`.\n denominator: A scalar `float64` `Tensor`.\n name: Name for the returned op.\n\n Returns:\n 0 if `denominator` == 0, else `numerator` / `denominator`\n \"\"\"\n numerator.get_shape().with_rank_at_most(1)\n denominator.get_shape().with_rank_at_most(1)\n return control_flow_ops.cond(\n math_ops.equal(\n array_ops.constant(0.0, dtype=dtypes.float64), denominator),\n lambda: array_ops.constant(0.0, dtype=dtypes.float64),\n lambda: math_ops.div(numerator, denominator),\n name=name)\n\n\ndef _create_local(name, shape, collections=None, validate_shape=True,\n dtype=dtypes.float32):\n \"\"\"Creates a new local variable.\n\n Args:\n name: The name of the new or existing variable.\n shape: Shape of the new or existing variable.\n collections: A list of collection names to which the Variable will be added.\n validate_shape: Whether to validate the shape of the variable.\n dtype: Data type of the variables.\n\n Returns:\n The created variable.\n \"\"\"\n # Make sure local variables are added to tf.GraphKeys.LOCAL_VARIABLES\n collections = list(collections or [])\n collections += [ops.GraphKeys.LOCAL_VARIABLES]\n return variables.Variable(\n initial_value=array_ops.zeros(shape, dtype=dtype),\n name=name,\n trainable=False,\n collections=collections,\n validate_shape=validate_shape)\n\n\ndef _count_condition(values, weights=None, metrics_collections=None,\n updates_collections=None):\n \"\"\"Sums the weights of cases where the given values are True.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n values: A `bool` `Tensor` of arbitrary size.\n weights: An optional `Tensor` whose shape is broadcastable to `values`.\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n\n Returns:\n value_tensor: A tensor representing the current value of the metric.\n update_op: An operation that accumulates the error from a batch of data.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match `values`,\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n \"\"\"\n check_ops.assert_type(values, dtypes.bool)\n count = _create_local('count', shape=[])\n\n values = math_ops.to_float(values)\n if weights is not None:\n weights = math_ops.to_float(weights)\n values = math_ops.mul(values, weights)\n\n value_tensor = array_ops.identity(count)\n update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, value_tensor)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return value_tensor, update_op\n\n\ndef _streaming_true_positives(predictions, labels, weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Sum the weights of true_positives.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: The predicted values, a `bool` `Tensor` of arbitrary\n dimensions.\n labels: The ground truth values, a `bool` `Tensor` whose dimensions must\n match `predictions`.\n weights: An optional `Tensor` whose shape is broadcastable to `predictions`.\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n value_tensor: A tensor representing the current value of the metric.\n update_op: An operation that accumulates the error from a batch of data.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n with variable_scope.variable_scope(\n name, 'true_positives', [predictions, labels]):\n\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n is_true_positive = math_ops.logical_and(math_ops.equal(labels, 1),\n math_ops.equal(predictions, 1))\n return _count_condition(is_true_positive, weights, metrics_collections,\n updates_collections)\n\n\ndef _streaming_false_positives(predictions, labels, weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Sum the weights of false positives.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: The predicted values, a `bool` `Tensor` of arbitrary\n dimensions.\n labels: The ground truth values, a `bool` `Tensor` whose dimensions must\n match `predictions`.\n weights: An optional `Tensor` whose shape is broadcastable to `predictions`.\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n value_tensor: A tensor representing the current value of the metric.\n update_op: An operation that accumulates the error from a batch of data.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n with variable_scope.variable_scope(\n name, 'false_positives', [predictions, labels]):\n\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n is_false_positive = math_ops.logical_and(math_ops.equal(labels, 0),\n math_ops.equal(predictions, 1))\n return _count_condition(is_false_positive, weights, metrics_collections,\n updates_collections)\n\n\ndef _streaming_false_negatives(predictions, labels, weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the total number of false positives.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: The predicted values, a `bool` `Tensor` of arbitrary\n dimensions.\n labels: The ground truth values, a `bool` `Tensor` whose dimensions must\n match `predictions`.\n weights: An optional `Tensor` whose shape is broadcastable to `predictions`.\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n value_tensor: A tensor representing the current value of the metric.\n update_op: An operation that accumulates the error from a batch of data.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match `values`,\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n \"\"\"\n with variable_scope.variable_scope(\n name, 'false_negatives', [predictions, labels]):\n\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n is_false_negative = math_ops.logical_and(math_ops.equal(labels, 1),\n math_ops.equal(predictions, 0))\n return _count_condition(is_false_negative, weights, metrics_collections,\n updates_collections)\n\n\ndef _broadcast_weights(weights, values):\n \"\"\"Broadcast `weights` to the same shape as `values`.\n\n This returns a version of `weights` following the same broadcast rules as\n `mul(weights, values)`. When computing a weighted average, use this function\n to broadcast `weights` before summing them; e.g.,\n `reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`.\n\n Args:\n weights: `Tensor` whose shape is broadcastable to `values`.\n values: `Tensor` of any shape.\n\n Returns:\n `weights` broadcast to `values` shape.\n \"\"\"\n weights_shape = weights.get_shape()\n values_shape = values.get_shape()\n if (weights_shape.is_fully_defined() and\n values_shape.is_fully_defined() and\n weights_shape.is_compatible_with(values_shape)):\n return weights\n return math_ops.mul(\n weights, array_ops.ones_like(values), name='broadcast_weights')\n\n\ndef streaming_mean(values, weights=None, metrics_collections=None,\n updates_collections=None, name=None):\n \"\"\"Computes the (weighted) mean of the given values.\n\n The `streaming_mean` function creates two local variables, `total` and `count`\n that are used to compute the average of `values`. This average is ultimately\n returned as `mean` which is an idempotent operation that simply divides\n `total` by `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the `mean`.\n `update_op` increments `total` with the reduced sum of the product of `values`\n and `weights`, and it increments `count` with the reduced sum of `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n values: A `Tensor` of arbitrary dimensions.\n weights: An optional `Tensor` whose shape is broadcastable to `values`.\n metrics_collections: An optional list of collections that `mean`\n should be added to.\n updates_collections: An optional list of collections that `update_op`\n should be added to.\n name: An optional variable_scope name.\n\n Returns:\n mean: A tensor representing the current mean, the value of `total` divided\n by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `mean_value`.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match `values`,\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n \"\"\"\n with variable_scope.variable_scope(name, 'mean', [values, weights]):\n values = math_ops.to_float(values)\n\n total = _create_local('total', shape=[])\n count = _create_local('count', shape=[])\n\n if weights is not None:\n weights = math_ops.to_float(weights)\n values = math_ops.mul(values, weights)\n num_values = math_ops.reduce_sum(_broadcast_weights(weights, values))\n else:\n num_values = math_ops.to_float(array_ops.size(values))\n\n total_compute_op = state_ops.assign_add(total, math_ops.reduce_sum(values))\n count_compute_op = state_ops.assign_add(count, num_values)\n\n mean = _safe_div(total, count, 'value')\n with ops.control_dependencies([total_compute_op, count_compute_op]):\n update_op = _safe_div(total, count, 'update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, mean)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return mean, update_op\n\n\ndef streaming_mean_tensor(values, weights=None, metrics_collections=None,\n updates_collections=None, name=None):\n \"\"\"Computes the element-wise (weighted) mean of the given tensors.\n\n In contrast to the `streaming_mean` function which returns a scalar with the\n mean, this function returns an average tensor with the same shape as the\n input tensors.\n\n The `streaming_mean_tensor` function creates two local variables,\n `total_tensor` and `count_tensor` that are used to compute the average of\n `values`. This average is ultimately returned as `mean` which is an idempotent\n operation that simply divides `total` by `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the `mean`.\n `update_op` increments `total` with the reduced sum of the product of `values`\n and `weights`, and it increments `count` with the reduced sum of `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n values: A `Tensor` of arbitrary dimensions.\n weights: An optional `Tensor` whose shape is broadcastable to `values`.\n metrics_collections: An optional list of collections that `mean`\n should be added to.\n updates_collections: An optional list of collections that `update_op`\n should be added to.\n name: An optional variable_scope name.\n\n Returns:\n mean: A float tensor representing the current mean, the value of `total`\n divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `mean_value`.\n\n Raises:\n ValueError: If `weights` is not `None` and its shape doesn't match `values`,\n or if either `metrics_collections` or `updates_collections` are not a list\n or tuple.\n \"\"\"\n with variable_scope.variable_scope(name, 'mean', [values, weights]):\n total = _create_local('total_tensor', shape=values.get_shape())\n count = _create_local('count_tensor', shape=values.get_shape())\n\n num_values = array_ops.ones_like(values)\n if weights is not None:\n weights = math_ops.to_float(weights)\n values = math_ops.mul(values, weights)\n num_values = math_ops.mul(num_values, weights)\n\n total_compute_op = state_ops.assign_add(total, values)\n count_compute_op = state_ops.assign_add(count, num_values)\n\n def compute_mean(total, count, name):\n non_zero_count = math_ops.maximum(count,\n array_ops.ones_like(count),\n name=name)\n return math_ops.truediv(total, non_zero_count, name=name)\n\n mean = compute_mean(total, count, 'value')\n with ops.control_dependencies([total_compute_op, count_compute_op]):\n update_op = compute_mean(total, count, 'update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, mean)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return mean, update_op\n\n\ndef streaming_accuracy(predictions, labels, weights=None,\n metrics_collections=None, updates_collections=None,\n name=None):\n \"\"\"Calculates how often `predictions` matches `labels`.\n\n The `streaming_accuracy` function creates two local variables, `total` and\n `count` that are used to compute the frequency with which `predictions`\n matches `labels`. This frequency is ultimately returned as `accuracy`: an\n idempotent operation that simply divides `total` by `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the `accuracy`.\n Internally, an `is_correct` operation computes a `Tensor` with elements 1.0\n where the corresponding elements of `predictions` and `labels` match and 0.0\n otherwise. Then `update_op` increments `total` with the reduced sum of the\n product of `weights` and `is_correct`, and it increments `count` with the\n reduced sum of `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: The predicted values, a `Tensor` of any shape.\n labels: The ground truth values, a `Tensor` whose shape matches\n `predictions`.\n weights: An optional `Tensor` whose shape is broadcastable to `predictions`.\n metrics_collections: An optional list of collections that `accuracy` should\n be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n accuracy: A tensor representing the accuracy, the value of `total` divided\n by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `accuracy`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n predictions, labels = tensor_util.remove_squeezable_dimensions(\n predictions, labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n if labels.dtype != predictions.dtype:\n predictions = math_ops.cast(predictions, labels.dtype)\n is_correct = math_ops.to_float(math_ops.equal(predictions, labels))\n return streaming_mean(is_correct, weights, metrics_collections,\n updates_collections, name or 'accuracy')\n\n\n@deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask')\ndef streaming_precision(predictions, labels, ignore_mask=None, weights=None,\n metrics_collections=None, updates_collections=None,\n name=None):\n \"\"\"Computes the precision of the predictions with respect to the labels.\n\n The `streaming_precision` function creates two local variables,\n `true_positives` and `false_positives`, that are used to compute the\n precision. This value is ultimately returned as `precision`, an idempotent\n operation that simply divides `true_positives` by the sum of `true_positives`\n and `false_positives`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `precision`. `update_op` weights each prediction by the corresponding value in\n `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n Alternatively, if `ignore_mask` is not `None`, then mask values where\n `ignore_mask` is `True`.\n\n Args:\n predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.\n labels: The ground truth values, a `bool` `Tensor` whose dimensions must\n match `predictions`.\n ignore_mask: An optional, `bool` `Tensor` whose shape matches `predictions`.\n weights: An optional `Tensor` whose shape is broadcastable to `predictions`.\n metrics_collections: An optional list of collections that `precision` should\n be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n precision: Scalar float `Tensor` with the value of `true_positives`\n divided by the sum of `true_positives` and `false_positives`.\n update_op: `Operation` that increments `true_positives` and\n `false_positives` variables appropriately and whose value matches\n `precision`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `ignore_mask` is not `None` and its shape doesn't match `predictions`, or\n if `weights` is not `None` and its shape doesn't match `predictions`, or\n if either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n with variable_scope.variable_scope(\n name, 'precision', [predictions, labels]):\n\n predictions, labels = tensor_util.remove_squeezable_dimensions(\n predictions, labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n\n weights = _mask_weights(ignore_mask, weights)\n true_positives, true_positives_update_op = _streaming_true_positives(\n predictions, labels, weights, metrics_collections=None,\n updates_collections=None, name=None)\n false_positives, false_positives_update_op = _streaming_false_positives(\n predictions, labels, weights, metrics_collections=None,\n updates_collections=None, name=None)\n\n def compute_precision(name):\n return math_ops.select(\n math_ops.greater(true_positives + false_positives, 0),\n math_ops.div(true_positives, true_positives + false_positives),\n 0,\n name)\n\n precision = compute_precision('value')\n with ops.control_dependencies([true_positives_update_op,\n false_positives_update_op]):\n update_op = compute_precision('update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, precision)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return precision, update_op\n\n\n@deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask')\ndef streaming_recall(predictions, labels, ignore_mask=None, weights=None,\n metrics_collections=None, updates_collections=None,\n name=None):\n \"\"\"Computes the recall of the predictions with respect to the labels.\n\n The `streaming_recall` function creates two local variables, `true_positives`\n and `false_negatives`, that are used to compute the recall. This value is\n ultimately returned as `recall`, an idempotent operation that simply divides\n `true_positives` by the sum of `true_positives` and `false_negatives`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` that updates these variables and returns the `recall`. `update_op`\n weights each prediction by the corresponding value in `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n Alternatively, if `ignore_mask` is not `None`, then mask values where\n `ignore_mask` is `True`.\n\n Args:\n predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.\n labels: The ground truth values, a `bool` `Tensor` whose dimensions must\n match `predictions`.\n ignore_mask: An optional, `bool` `Tensor` whose shape matches `predictions`.\n weights: An optional `Tensor` whose shape is broadcastable to `predictions`.\n metrics_collections: An optional list of collections that `recall` should\n be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n recall: Scalar float `Tensor` with the value of `true_positives` divided\n by the sum of `true_positives` and `false_negatives`.\n update_op: `Operation` that increments `true_positives` and\n `false_negatives` variables appropriately and whose value matches\n `recall`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `ignore_mask` is not `None` and its shape doesn't match `predictions`, or\n if `weights` is not `None` and its shape doesn't match `predictions`, or\n if either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n with variable_scope.variable_scope(name, 'recall', [predictions, labels]):\n predictions, labels = tensor_util.remove_squeezable_dimensions(\n predictions, labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n\n weights = _mask_weights(ignore_mask, weights)\n true_positives, true_positives_update_op = _streaming_true_positives(\n predictions, labels, weights, metrics_collections=None,\n updates_collections=None, name=None)\n false_negatives, false_negatives_update_op = _streaming_false_negatives(\n predictions, labels, weights, metrics_collections=None,\n updates_collections=None, name=None)\n\n def compute_recall(true_positives, false_negatives, name):\n return math_ops.select(\n math_ops.greater(true_positives + false_negatives, 0),\n math_ops.div(true_positives, true_positives + false_negatives),\n 0,\n name)\n\n recall = compute_recall(true_positives, false_negatives, 'value')\n with ops.control_dependencies([true_positives_update_op,\n false_negatives_update_op]):\n update_op = compute_recall(true_positives, false_negatives, 'update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, recall)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return recall, update_op\n\n\ndef _tp_fn_tn_fp(predictions, labels, thresholds, weights=None):\n \"\"\"Computes true_positives, false_negatives, true_negatives, false_positives.\n\n The `_tp_fn_tn_fp` function creates four local variables, `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives`.\n `true_positive[i]` is defined as the total weight of values in `predictions`\n above `thresholds[i]` whose corresponding entry in `labels` is `True`.\n `false_negatives[i]` is defined as the total weight of values in `predictions`\n at most `thresholds[i]` whose corresponding entry in `labels` is `True`.\n `true_negatives[i]` is defined as the total weight of values in `predictions`\n at most `thresholds[i]` whose corresponding entry in `labels` is `False`.\n `false_positives[i]` is defined as the total weight of values in `predictions`\n above `thresholds[i]` whose corresponding entry in `labels` is `False`.\n\n For estimation of these metrics over a stream of data, for each metric the\n function respectively creates an `update_op` operation that updates the\n variable and returns its value.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n labels: A `Tensor` whose shape matches `predictions`. `labels` will be cast\n to `bool`.\n thresholds: A python list or tuple of float thresholds in `[0, 1]`.\n weights: An optional `Tensor` whose shape is broadcastable to `predictions`.\n\n Returns:\n true_positive: A variable of shape [len(thresholds)].\n false_negative: A variable of shape [len(thresholds)].\n true_negatives: A variable of shape [len(thresholds)].\n false_positives: A variable of shape [len(thresholds)].\n true_positives_update_op: An operation that increments the `true_positives`.\n false_negative_update_op: An operation that increments the `false_negative`.\n true_negatives_update_op: An operation that increments the `true_negatives`.\n false_positives_update_op: An operation that increments the\n `false_positives`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`.\n \"\"\"\n predictions, labels = tensor_util.remove_squeezable_dimensions(\n predictions, labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n\n num_thresholds = len(thresholds)\n\n # Reshape predictions and labels.\n predictions_2d = array_ops.reshape(predictions, [-1, 1])\n labels_2d = array_ops.reshape(\n math_ops.cast(labels, dtype=dtypes.bool), [1, -1])\n\n # Use static shape if known.\n num_predictions = predictions_2d.get_shape().as_list()[0]\n\n # Otherwise use dynamic shape.\n if num_predictions is None:\n num_predictions = array_ops.shape(predictions_2d)[0]\n thresh_tiled = array_ops.tile(\n array_ops.expand_dims(array_ops.constant(thresholds), [1]),\n array_ops.pack([1, num_predictions]))\n\n # Tile the predictions after thresholding them across different thresholds.\n pred_is_pos = math_ops.greater(\n array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),\n thresh_tiled)\n pred_is_neg = math_ops.logical_not(pred_is_pos)\n\n # Tile labels by number of thresholds\n label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])\n label_is_neg = math_ops.logical_not(label_is_pos)\n\n true_positives = _create_local('true_positives', shape=[num_thresholds])\n false_negatives = _create_local('false_negatives', shape=[num_thresholds])\n true_negatives = _create_local('true_negatives', shape=[num_thresholds])\n false_positives = _create_local('false_positives', shape=[num_thresholds])\n\n is_true_positive = math_ops.to_float(\n math_ops.logical_and(label_is_pos, pred_is_pos))\n is_false_negative = math_ops.to_float(\n math_ops.logical_and(label_is_pos, pred_is_neg))\n is_false_positive = math_ops.to_float(\n math_ops.logical_and(label_is_neg, pred_is_pos))\n is_true_negative = math_ops.to_float(\n math_ops.logical_and(label_is_neg, pred_is_neg))\n\n if weights is not None:\n weights = math_ops.to_float(weights)\n weights_tiled = array_ops.tile(array_ops.reshape(\n _broadcast_weights(weights, predictions), [1, -1]), [num_thresholds, 1])\n thresh_tiled.get_shape().assert_is_compatible_with(\n weights_tiled.get_shape())\n is_true_positive *= weights_tiled\n is_false_negative *= weights_tiled\n is_false_positive *= weights_tiled\n is_true_negative *= weights_tiled\n\n true_positives_update_op = state_ops.assign_add(\n true_positives, math_ops.reduce_sum(is_true_positive, 1))\n false_negatives_update_op = state_ops.assign_add(\n false_negatives, math_ops.reduce_sum(is_false_negative, 1))\n true_negatives_update_op = state_ops.assign_add(\n true_negatives, math_ops.reduce_sum(is_true_negative, 1))\n false_positives_update_op = state_ops.assign_add(\n false_positives, math_ops.reduce_sum(is_false_positive, 1))\n\n return (true_positives, false_negatives, true_negatives, false_positives,\n true_positives_update_op, false_negatives_update_op,\n true_negatives_update_op, false_positives_update_op)\n\n\ndef streaming_auc(predictions, labels, weights=None, num_thresholds=200,\n metrics_collections=None, updates_collections=None,\n curve='ROC', name=None):\n \"\"\"Computes the approximate AUC via a Riemann sum.\n\n The `streaming_auc` function creates four local variables, `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` that are used to\n compute the AUC. To discretize the AUC curve, a linearly spaced set of\n thresholds is used to compute pairs of recall and precision values. The area\n under the ROC-curve is therefore computed using the height of the recall\n values by the false positive rate, while the area under the PR-curve is the\n computed using the height of the precision values by the recall.\n\n This value is ultimately returned as `auc`, an idempotent operation that\n computes the area under a discretized curve of precision versus recall values\n (computed using the aforementioned variables). The `num_thresholds` variable\n controls the degree of discretization with larger numbers of thresholds more\n closely approximating the true AUC.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the `auc`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n labels: A `bool` `Tensor` whose shape matches `predictions`.\n weights: An optional `Tensor` whose shape is broadcastable to `predictions`.\n num_thresholds: The number of thresholds to use when discretizing the roc\n curve.\n metrics_collections: An optional list of collections that `auc` should be\n added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n curve: Specifies the name of the curve to be computed, 'ROC' [default] or\n 'PR' for the Precision-Recall-curve.\n name: An optional variable_scope name.\n\n Returns:\n auc: A scalar tensor representing the current area-under-curve.\n update_op: An operation that increments the `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` variables\n appropriately and whose value matches `auc`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n with variable_scope.variable_scope(name, 'auc', [predictions, labels]):\n if curve != 'ROC' and curve != 'PR':\n raise ValueError('curve must be either ROC or PR, %s unknown' %\n (curve))\n kepsilon = 1e-7 # to account for floating point imprecisions\n thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)\n for i in range(num_thresholds-2)]\n thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]\n\n (tp, fn, tn, fp, tp_update_op, fn_update_op, tn_update_op,\n fp_update_op) = _tp_fn_tn_fp(predictions, labels, thresholds, weights)\n\n # Add epsilons to avoid dividing by 0.\n epsilon = 1.0e-6\n assert array_ops.squeeze(fp).get_shape().as_list()[0] == num_thresholds\n\n def compute_auc(tp, fn, tn, fp, name):\n \"\"\"Computes the roc-auc or pr-auc based on confusion counts.\"\"\"\n recall = math_ops.div(tp + epsilon, tp + fn + epsilon)\n if curve == 'ROC':\n fp_rate = math_ops.div(fp, fp + tn + epsilon)\n x = fp_rate\n y = recall\n else: # curve == 'PR'.\n precision = math_ops.div(tp + epsilon, tp + fp + epsilon)\n x = recall\n y = precision\n return math_ops.reduce_sum(math_ops.mul(\n x[:num_thresholds - 1] - x[1:],\n (y[:num_thresholds - 1] + y[1:]) / 2.), name=name)\n\n # sum up the areas of all the trapeziums\n auc = compute_auc(tp, fn, tn, fp, 'value')\n update_op = compute_auc(\n tp_update_op, fn_update_op, tn_update_op, fp_update_op, 'update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, auc)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return auc, update_op\n\n\ndef streaming_specificity_at_sensitivity(\n predictions, labels, sensitivity, weights=None, num_thresholds=200,\n metrics_collections=None, updates_collections=None, name=None):\n \"\"\"Computes the the specificity at a given sensitivity.\n\n The `streaming_specificity_at_sensitivity` function creates four local\n variables, `true_positives`, `true_negatives`, `false_positives` and\n `false_negatives` that are used to compute the specificity at the given\n sensitivity value. The threshold for the given sensitivity value is computed\n and used to evaluate the corresponding specificity.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `specificity`. `update_op` increments the `true_positives`, `true_negatives`,\n `false_positives` and `false_negatives` counts with the weight of each case\n found in the `predictions` and `labels`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n For additional information about specificity and sensitivity, see the\n following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity\n\n Args:\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n labels: A `bool` `Tensor` whose shape matches `predictions`.\n sensitivity: A scalar value in range `[0, 1]`.\n weights: An optional `Tensor` whose shape is broadcastable to `predictions`.\n num_thresholds: The number of thresholds to use for matching the given\n sensitivity.\n metrics_collections: An optional list of collections that `specificity`\n should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n specificity: A scalar tensor representing the specificity at the given\n `specificity` value.\n update_op: An operation that increments the `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` variables\n appropriately and whose value matches `specificity`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n `sensitivity` is not between 0 and 1, or if either `metrics_collections`\n or `updates_collections` are not a list or tuple.\n \"\"\"\n if sensitivity < 0 or sensitivity > 1:\n raise ValueError('`sensitivity` must be in the range [0, 1].')\n\n with variable_scope.variable_scope(name, 'specificity_at_sensitivity',\n [predictions, labels]):\n kepsilon = 1e-7 # to account for floating point imprecisions\n thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)\n for i in range(num_thresholds-2)]\n thresholds = [0.0 - kepsilon] + thresholds + [1.0 - kepsilon]\n\n (tp, fn, tn, fp, tp_update_op, fn_update_op, tn_update_op,\n fp_update_op) = _tp_fn_tn_fp(predictions, labels, thresholds, weights)\n\n assert array_ops.squeeze(fp).get_shape().as_list()[0] == num_thresholds\n\n def compute_specificity_at_sensitivity(name):\n \"\"\"Computes the specificity at the given sensitivity.\n\n Args:\n name: The name of the operation.\n\n Returns:\n The specificity using the aggregated values.\n \"\"\"\n sensitivities = math_ops.div(tp, tp + fn + kepsilon)\n\n # We'll need to use this trick until tf.argmax allows us to specify\n # whether we should use the first or last index in case of ties.\n min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity))\n indices_at_minval = math_ops.equal(\n math_ops.abs(sensitivities - sensitivity), min_val)\n indices_at_minval = math_ops.to_int64(indices_at_minval)\n indices_at_minval = math_ops.cumsum(indices_at_minval)\n tf_index = math_ops.argmax(indices_at_minval, 0)\n tf_index = math_ops.cast(tf_index, dtypes.int32)\n\n # Now, we have the implicit threshold, so compute the specificity:\n return math_ops.div(tn[tf_index],\n tn[tf_index] + fp[tf_index] + kepsilon,\n name)\n\n specificity = compute_specificity_at_sensitivity('value')\n with ops.control_dependencies(\n [tp_update_op, fn_update_op, tn_update_op, fp_update_op]):\n update_op = compute_specificity_at_sensitivity('update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, specificity)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return specificity, update_op\n\n\ndef streaming_sensitivity_at_specificity(\n predictions, labels, specificity, weights=None, num_thresholds=200,\n metrics_collections=None, updates_collections=None, name=None):\n \"\"\"Computes the the specificity at a given sensitivity.\n\n The `streaming_sensitivity_at_specificity` function creates four local\n variables, `true_positives`, `true_negatives`, `false_positives` and\n `false_negatives` that are used to compute the sensitivity at the given\n specificity value. The threshold for the given specificity value is computed\n and used to evaluate the corresponding sensitivity.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,\n `false_positives` and `false_negatives` counts with the weight of each case\n found in the `predictions` and `labels`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n For additional information about specificity and sensitivity, see the\n following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity\n\n Args:\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n labels: A `bool` `Tensor` whose shape matches `predictions`.\n specificity: A scalar value in range `[0, 1]`.\n weights: An optional `Tensor` whose shape is broadcastable to `predictions`.\n num_thresholds: The number of thresholds to use for matching the given\n specificity.\n metrics_collections: An optional list of collections that `sensitivity`\n should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n sensitivity: A scalar tensor representing the sensitivity at the given\n `specificity` value.\n update_op: An operation that increments the `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` variables\n appropriately and whose value matches `sensitivity`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n `specificity` is not between 0 and 1, or if either `metrics_collections`\n or `updates_collections` are not a list or tuple.\n \"\"\"\n if specificity < 0 or specificity > 1:\n raise ValueError('`specificity` must be in the range [0, 1].')\n\n with variable_scope.variable_scope(name, 'sensitivity_at_specificity',\n [predictions, labels]):\n kepsilon = 1e-7 # to account for floating point imprecisions\n thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)\n for i in range(num_thresholds-2)]\n thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]\n\n (tp, fn, tn, fp, tp_update_op, fn_update_op, tn_update_op,\n fp_update_op) = _tp_fn_tn_fp(predictions, labels, thresholds, weights)\n assert array_ops.squeeze(fp).get_shape().as_list()[0] == num_thresholds\n\n def compute_sensitivity_at_specificity(name):\n specificities = math_ops.div(tn, tn + fp + kepsilon)\n tf_index = math_ops.argmin(math_ops.abs(specificities - specificity), 0)\n tf_index = math_ops.cast(tf_index, dtypes.int32)\n\n # Now, we have the implicit threshold, so compute the sensitivity:\n return math_ops.div(tp[tf_index],\n tp[tf_index] + fn[tf_index] + kepsilon,\n name)\n\n sensitivity = compute_sensitivity_at_specificity('value')\n with ops.control_dependencies(\n [tp_update_op, fn_update_op, tn_update_op, fp_update_op]):\n update_op = compute_sensitivity_at_specificity('update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, sensitivity)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return sensitivity, update_op\n\n\ndef streaming_precision_at_thresholds(predictions, labels, thresholds,\n weights=None,\n metrics_collections=None,\n updates_collections=None, name=None):\n \"\"\"Computes precision values for different `thresholds` on `predictions`.\n\n The `streaming_precision_at_thresholds` function creates four local variables,\n `true_positives`, `true_negatives`, `false_positives` and `false_negatives`\n for various values of thresholds. `precision[i]` is defined as the total\n weight of values in `predictions` above `thresholds[i]` whose corresponding\n entry in `labels` is `True`, divided by the total weight of values in\n `predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] +\n false_positives[i])`).\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `precision`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n labels: A `bool` `Tensor` whose shape matches `predictions`.\n thresholds: A python list or tuple of float thresholds in `[0, 1]`.\n weights: An optional `Tensor` whose shape is broadcastable to `predictions`.\n metrics_collections: An optional list of collections that `auc` should be\n added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n precision: A float tensor of shape [len(thresholds)].\n update_op: An operation that increments the `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` variables that\n are used in the computation of `precision`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n with variable_scope.variable_scope(name, 'precision_at_thresholds',\n [predictions, labels]):\n\n # TODO(nsilberman): Replace with only tp and fp, this results in unnecessary\n # variable creation. b/30842882\n (true_positives, _, _, false_positives, true_positives_compute_op, _, _,\n false_positives_compute_op,) = _tp_fn_tn_fp(\n predictions, labels, thresholds, weights)\n\n # avoid division by zero\n epsilon = 1e-7\n def compute_precision(name):\n precision = math_ops.div(true_positives,\n epsilon + true_positives + false_positives,\n name='precision_' + name)\n return precision\n\n precision = compute_precision('value')\n with ops.control_dependencies([true_positives_compute_op,\n false_positives_compute_op]):\n update_op = compute_precision('update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, precision)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return precision, update_op\n\n\ndef streaming_recall_at_thresholds(predictions, labels, thresholds,\n weights=None, metrics_collections=None,\n updates_collections=None, name=None):\n \"\"\"Computes various recall values for different `thresholds` on `predictions`.\n\n The `streaming_recall_at_thresholds` function creates four local variables,\n `true_positives`, `true_negatives`, `false_positives` and `false_negatives`\n for various values of thresholds. `recall[i]` is defined as the total weight\n of values in `predictions` above `thresholds[i]` whose corresponding entry in\n `labels` is `True`, divided by the total weight of `True` values in `labels`\n (`true_positives[i] / (true_positives[i] + false_negatives[i])`).\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the `recall`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n labels: A `bool` `Tensor` whose shape matches `predictions`.\n thresholds: A python list or tuple of float thresholds in `[0, 1]`.\n weights: An optional `Tensor` whose shape is broadcastable to `predictions`.\n metrics_collections: An optional list of collections that `recall` should be\n added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n recall: A float tensor of shape [len(thresholds)].\n update_op: An operation that increments the `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` variables that\n are used in the computation of `recall`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n with variable_scope.variable_scope(name, 'recall_at_thresholds',\n [predictions, labels]):\n (true_positives, false_negatives, _, _, true_positives_compute_op,\n false_negatives_compute_op, _, _,) = _tp_fn_tn_fp(\n predictions, labels, thresholds, weights)\n\n # avoid division by zero\n epsilon = 1e-7\n def compute_recall(name):\n recall = math_ops.div(true_positives,\n epsilon + true_positives + false_negatives,\n name='recall_' + name)\n return recall\n\n recall = compute_recall('value')\n with ops.control_dependencies([true_positives_compute_op,\n false_negatives_compute_op]):\n update_op = compute_recall('update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, recall)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return recall, update_op\n\n\ndef _at_k_name(name, k=None, class_id=None):\n if k is not None:\n name = '%s_at_%d' % (name, k)\n else:\n name = '%s_at_k' % (name)\n if class_id is not None:\n name = '%s_class%d' % (name, class_id)\n return name\n\n\n@deprecated('2016-11-08', 'Please use `streaming_sparse_recall_at_k`, '\n 'and reshape labels from [batch_size] to [batch_size, 1].')\n@deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask')\ndef streaming_recall_at_k(predictions, labels, k, ignore_mask=None,\n weights=None, metrics_collections=None,\n updates_collections=None, name=None):\n \"\"\"Computes the recall@k of the predictions with respect to dense labels.\n\n The `streaming_recall_at_k` function creates two local variables, `total` and\n `count`, that are used to compute the recall@k frequency. This frequency is\n ultimately returned as `recall_at_<k>`: an idempotent operation that simply\n divides `total` by `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `recall_at_<k>`. Internally, an `in_top_k` operation computes a `Tensor` with\n shape [batch_size] whose elements indicate whether or not the corresponding\n label is in the top `k` `predictions`. Then `update_op` increments `total`\n with the reduced sum of `weights` where `in_top_k` is `True`, and it\n increments `count` with the reduced sum of `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n Alternatively, if `ignore_mask` is not `None`, then mask values where\n `ignore_mask` is `True`.\n\n Args:\n predictions: A floating point tensor of dimension [batch_size, num_classes]\n labels: A tensor of dimension [batch_size] whose type is in `int32`,\n `int64`.\n k: The number of top elements to look at for computing recall.\n ignore_mask: An optional, `bool` `Tensor` whose shape matches `predictions`.\n weights: An optional `Tensor` whose shape is broadcastable to `predictions`.\n metrics_collections: An optional list of collections that `recall_at_k`\n should be added to.\n updates_collections: An optional list of collections `update_op` should be\n added to.\n name: An optional variable_scope name.\n\n Returns:\n recall_at_k: A tensor representing the recall@k, the fraction of labels\n which fall into the top `k` predictions.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `recall_at_k`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `ignore_mask` is not `None` and its shape doesn't match `predictions`, or\n if `weights` is not `None` and its shape doesn't match `predictions`, or\n if either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n in_top_k = math_ops.to_float(nn.in_top_k(predictions, labels, k))\n return streaming_mean(in_top_k,\n _mask_weights(ignore_mask, weights),\n metrics_collections,\n updates_collections,\n name or _at_k_name('recall', k))\n\n\n# TODO(ptucker): Validate range of values in labels?\n@deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask')\ndef streaming_sparse_recall_at_k(predictions,\n labels,\n k,\n class_id=None,\n ignore_mask=None,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes recall@k of the predictions with respect to sparse labels.\n\n If `class_id` is specified, we calculate recall by considering only the\n entries in the batch for which `class_id` is in the label, and computing\n the fraction of them for which `class_id` is in the top-k `predictions`.\n If `class_id` is not specified, we'll calculate recall as how often on\n average a class among the labels of a batch entry is in the top-k\n `predictions`.\n\n `streaming_sparse_recall_at_k` creates two local variables,\n `true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute\n the recall_at_k frequency. This frequency is ultimately returned as\n `recall_at_<k>`: an idempotent operation that simply divides\n `true_positive_at_<k>` by total (`true_positive_at_<k>` +\n `false_negative_at_<k>`).\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`\n indicating the top `k` `predictions`. Set operations applied to `top_k` and\n `labels` calculate the true positives and false negatives weighted by\n `weights`. Then `update_op` increments `true_positive_at_<k>` and\n `false_negative_at_<k>` using these values.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n Alternatively, if `ignore_mask` is not `None`, then mask values where\n `ignore_mask` is `True`.\n\n Args:\n predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where\n N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].\n The final dimension contains the logit values for each class. [D1, ... DN]\n must match `labels`.\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`.\n Values should be in range [0, num_classes), where num_classes is the last\n dimension of `predictions`. Values outside this range always count\n towards `false_negative_at_<k>`.\n k: Integer, k for @k metric.\n class_id: Integer class ID for which we want binary metrics. This should be\n in range [0, num_classes), where num_classes is the last dimension of\n `predictions`. If class_id is outside this range, the method returns NAN.\n ignore_mask: An optional, `bool` `Tensor` whose shape is broadcastable to\n the the first [D1, ... DN] dimensions of `predictions` and `labels`.\n weights: An optional `Tensor` whose shape is broadcastable to the the first\n [D1, ... DN] dimensions of `predictions` and `labels`.\n metrics_collections: An optional list of collections that values should\n be added to.\n updates_collections: An optional list of collections that updates should\n be added to.\n name: Name of new update operation, and namespace for other dependent ops.\n\n Returns:\n recall: Scalar `float64` `Tensor` with the value of `true_positives` divided\n by the sum of `true_positives` and `false_negatives`.\n update_op: `Operation` that increments `true_positives` and\n `false_negatives` variables appropriately, and whose value matches\n `recall`.\n\n Raises:\n ValueError: If `ignore_mask` is not `None` and its shape doesn't match\n `predictions`, or if `weights` is not `None` and its shape doesn't match\n `predictions`, or if either `metrics_collections` or `updates_collections`\n are not a list or tuple.\n \"\"\"\n default_name = _at_k_name('recall', k, class_id=class_id)\n with ops.name_scope(name, default_name, (predictions, labels)) as scope:\n _, top_k_idx = nn.top_k(predictions, k)\n top_k_idx = math_ops.to_int64(top_k_idx)\n weights = _mask_weights(ignore_mask, weights)\n tp, tp_update = _streaming_sparse_true_positive_at_k(\n predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,\n weights=weights)\n fn, fn_update = _streaming_sparse_false_negative_at_k(\n predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,\n weights=weights)\n\n metric = math_ops.div(tp, math_ops.add(tp, fn), name=scope)\n update = math_ops.div(\n tp_update, math_ops.add(tp_update, fn_update), name='update')\n if metrics_collections:\n ops.add_to_collections(metrics_collections, metric)\n if updates_collections:\n ops.add_to_collections(updates_collections, update)\n return metric, update\n\n\ndef _streaming_sparse_precision_at_k(top_k_idx,\n labels,\n k=None,\n class_id=None,\n ignore_mask=None,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes precision@k of the top-k indices with respect to sparse labels.\n\n This method contains the code shared by streaming_sparse_precision_at_k and\n streaming_sparse_precision_at_top_k. Refer to those methods for more details.\n\n Args:\n top_k_idx: Integer `Tensor` with shape [D1, ... DN, k] where\n N >= 1. Commonly, N=1 and top_k_idx has shape [batch size, k].\n The final dimension contains the indices of top-k labels. [D1, ... DN]\n must match `labels`.\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_idx`. Values should be in range [0, num_classes), where\n num_classes is the last dimension of `predictions`. Values outside this\n range are ignored.\n k: Integer, k for @k metric or `None`. Only used for default op name.\n class_id: Integer class ID for which we want binary metrics. This should be\n in range [0, num_classes), where num_classes is the last dimension of\n `predictions`. If `class_id` is outside this range, the method returns\n NAN.\n ignore_mask: An optional, `bool` `Tensor` whose shape is broadcastable to\n the the first [D1, ... DN] dimensions of `predictions` and `labels`.\n weights: An optional `Tensor` whose shape is broadcastable to the the first\n [D1, ... DN] dimensions of `predictions` and `labels`.\n metrics_collections: An optional list of collections that values should\n be added to.\n updates_collections: An optional list of collections that updates should\n be added to.\n name: Name of the metric and of the enclosing scope.\n\n Returns:\n precision: Scalar `float64` `Tensor` with the value of `true_positives`\n divided by the sum of `true_positives` and `false_positives`.\n update_op: `Operation` that increments `true_positives` and\n `false_positives` variables appropriately, and whose value matches\n `precision`.\n\n Raises:\n ValueError: If `ignore_mask` is not `None` and its shape doesn't match\n `predictions`, or if `weights` is not `None` and its shape doesn't match\n `predictions`, or if either `metrics_collections` or `updates_collections`\n are not a list or tuple.\n \"\"\"\n top_k_idx = math_ops.to_int64(top_k_idx)\n weights = _mask_weights(ignore_mask, weights)\n tp, tp_update = _streaming_sparse_true_positive_at_k(\n predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,\n weights=weights)\n fp, fp_update = _streaming_sparse_false_positive_at_k(\n predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id,\n weights=weights)\n\n metric = math_ops.div(tp, math_ops.add(tp, fp), name=name)\n update = math_ops.div(\n tp_update, math_ops.add(tp_update, fp_update), name='update')\n if metrics_collections:\n ops.add_to_collections(metrics_collections, metric)\n if updates_collections:\n ops.add_to_collections(updates_collections, update)\n return metric, update\n\n\n# TODO(ptucker): Validate range of values in labels?\n@deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask')\ndef streaming_sparse_precision_at_k(predictions,\n labels,\n k,\n class_id=None,\n ignore_mask=None,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes precision@k of the predictions with respect to sparse labels.\n\n If `class_id` is specified, we calculate precision by considering only the\n entries in the batch for which `class_id` is in the top-k highest\n `predictions`, and computing the fraction of them for which `class_id` is\n indeed a correct label.\n If `class_id` is not specified, we'll calculate precision as how often on\n average a class among the top-k classes with the highest predicted values\n of a batch entry is correct and can be found in the label for that entry.\n\n `streaming_sparse_precision_at_k` creates two local variables,\n `true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute\n the precision@k frequency. This frequency is ultimately returned as\n `precision_at_<k>`: an idempotent operation that simply divides\n `true_positive_at_<k>` by total (`true_positive_at_<k>` +\n `false_positive_at_<k>`).\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`\n indicating the top `k` `predictions`. Set operations applied to `top_k` and\n `labels` calculate the true positives and false positives weighted by\n `weights`. Then `update_op` increments `true_positive_at_<k>` and\n `false_positive_at_<k>` using these values.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n Alternatively, if `ignore_mask` is not `None`, then mask values where\n `ignore_mask` is `True`.\n\n Args:\n predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where\n N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].\n The final dimension contains the logit values for each class. [D1, ... DN]\n must match `labels`.\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions`. Values should be in range [0, num_classes), where\n num_classes is the last dimension of `predictions`. Values outside this\n range are ignored.\n k: Integer, k for @k metric.\n class_id: Integer class ID for which we want binary metrics. This should be\n in range [0, num_classes], where num_classes is the last dimension of\n `predictions`. If `class_id` is outside this range, the method returns\n NAN.\n ignore_mask: An optional, `bool` `Tensor` whose shape is broadcastable to\n the the first [D1, ... DN] dimensions of `predictions` and `labels`.\n weights: An optional `Tensor` whose shape is broadcastable to the the first\n [D1, ... DN] dimensions of `predictions` and `labels`.\n metrics_collections: An optional list of collections that values should\n be added to.\n updates_collections: An optional list of collections that updates should\n be added to.\n name: Name of new update operation, and namespace for other dependent ops.\n\n Returns:\n precision: Scalar `float64` `Tensor` with the value of `true_positives`\n divided by the sum of `true_positives` and `false_positives`.\n update_op: `Operation` that increments `true_positives` and\n `false_positives` variables appropriately, and whose value matches\n `precision`.\n\n Raises:\n ValueError: If `ignore_mask` is not `None` and its shape doesn't match\n `predictions`, or if `weights` is not `None` and its shape doesn't match\n `predictions`, or if either `metrics_collections` or `updates_collections`\n are not a list or tuple.\n \"\"\"\n default_name = _at_k_name('precision', k, class_id=class_id)\n with ops.name_scope(name, default_name,\n (predictions, labels, ignore_mask, weights)) as scope:\n _, top_k_idx = nn.top_k(predictions, k)\n return _streaming_sparse_precision_at_k(\n top_k_idx=top_k_idx,\n labels=labels,\n k=k,\n class_id=class_id,\n ignore_mask=ignore_mask,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=scope)\n\n\n# TODO(ptucker): Validate range of values in labels?\n@deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask')\ndef streaming_sparse_precision_at_top_k(top_k_predictions,\n labels,\n class_id=None,\n ignore_mask=None,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes precision@k of top-k predictions with respect to sparse labels.\n\n If `class_id` is specified, we calculate precision by considering only the\n entries in the batch for which `class_id` is in the top-k highest\n `predictions`, and computing the fraction of them for which `class_id` is\n indeed a correct label.\n If `class_id` is not specified, we'll calculate precision as how often on\n average a class among the top-k classes with the highest predicted values\n of a batch entry is correct and can be found in the label for that entry.\n\n `streaming_sparse_precision_at_top_k` creates two local variables,\n `true_positive_at_k` and `false_positive_at_k`, that are used to compute\n the precision@k frequency. This frequency is ultimately returned as\n `precision_at_k`: an idempotent operation that simply divides\n `true_positive_at_k` by total (`true_positive_at_k` + `false_positive_at_k`).\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `precision_at_k`. Internally, set operations applied to `top_k_predictions`\n and `labels` calculate the true positives and false positives weighted by\n `weights`. Then `update_op` increments `true_positive_at_k` and\n `false_positive_at_k` using these values.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n Alternatively, if `ignore_mask` is not `None`, then mask values where\n `ignore_mask` is `True`.\n\n Args:\n top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where\n N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].\n The final dimension contains the indices of top-k labels. [D1, ... DN]\n must match `labels`.\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `top_k_predictions`. Values should be in range [0, num_classes), where\n num_classes is the last dimension of `predictions`. Values outside this\n range are ignored.\n class_id: Integer class ID for which we want binary metrics. This should be\n in range [0, num_classes), where num_classes is the last dimension of\n `predictions`. If `class_id` is outside this range, the method returns\n NAN.\n ignore_mask: An optional, `bool` `Tensor` whose shape is broadcastable to\n the the first [D1, ... DN] dimensions of `predictions` and `labels`.\n weights: An optional `Tensor` whose shape is broadcastable to the the first\n [D1, ... DN] dimensions of `predictions` and `labels`.\n metrics_collections: An optional list of collections that values should\n be added to.\n updates_collections: An optional list of collections that updates should\n be added to.\n name: Name of new update operation, and namespace for other dependent ops.\n\n Returns:\n precision: Scalar `float64` `Tensor` with the value of `true_positives`\n divided by the sum of `true_positives` and `false_positives`.\n update_op: `Operation` that increments `true_positives` and\n `false_positives` variables appropriately, and whose value matches\n `precision`.\n\n Raises:\n ValueError: If `ignore_mask` is not `None` and its shape doesn't match\n `predictions`, or if `weights` is not `None` and its shape doesn't match\n `predictions`, or if either `metrics_collections` or `updates_collections`\n are not a list or tuple.\n ValueError: If `top_k_predictions` has rank < 2.\n \"\"\"\n default_name = _at_k_name('precision', class_id=class_id)\n with ops.name_scope(\n name, default_name,\n (top_k_predictions, labels, ignore_mask, weights)) as scope:\n rank = array_ops.rank(top_k_predictions)\n check_rank_op = control_flow_ops.Assert(\n math_ops.greater_equal(rank, 2),\n ['top_k_predictions must have rank 2 or higher, e.g. [batch_size, k].'])\n with ops.control_dependencies([check_rank_op]):\n return _streaming_sparse_precision_at_k(\n top_k_idx=top_k_predictions,\n labels=labels,\n class_id=class_id,\n ignore_mask=ignore_mask,\n weights=weights,\n metrics_collections=metrics_collections,\n updates_collections=updates_collections,\n name=scope)\n\n\ndef num_relevant(labels, k):\n \"\"\"Computes number of relevant values for each row in labels.\n\n For labels with shape [D1, ... DN, num_labels], this is the minimum of\n `num_labels` and `k`.\n\n Args:\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels].\n k: Integer, k for @k metric.\n\n Returns:\n Integer `Tensor` of shape [D1, ... DN], where each value is the number of\n relevant values for that row.\n\n Raises:\n ValueError: if inputs have invalid dtypes or values.\n \"\"\"\n if k < 1:\n raise ValueError('Invalid k=%s.' % k)\n with ops.name_scope(None, 'num_relevant', (labels,)) as scope:\n # For SparseTensor, calculate separate count for each row.\n if isinstance(labels, (ops.SparseTensor, ops.SparseTensorValue)):\n labels_sizes = set_ops.set_size(labels)\n return math_ops.minimum(labels_sizes, k, name=scope)\n\n # For dense Tensor, calculate scalar count based on last dimension, and\n # tile across labels shape.\n labels_shape = array_ops.shape(labels)\n labels_size = labels_shape[-1]\n num_relevant_scalar = math_ops.minimum(labels_size, k)\n return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)\n\n\ndef expand_and_tile(tensor, multiple, dim=0, name=None):\n \"\"\"Slice `tensor` shape in 2, then tile along the sliced dimension.\n\n A new dimension is inserted in shape of `tensor` before `dim`, then values are\n tiled `multiple` times along the new dimension.\n\n Args:\n tensor: Input `Tensor` or `SparseTensor`.\n multiple: Integer, number of times to tile.\n dim: Integer, dimension along which to tile.\n name: Name of operation.\n\n Returns:\n `Tensor` result of expanding and tiling `tensor`.\n\n Raises:\n ValueError: if `multiple` is less than 1, or `dim` is not in\n `[-rank(tensor), rank(tensor)]`.\n \"\"\"\n if multiple < 1:\n raise ValueError('Invalid multiple %s, must be > 0.' % multiple)\n with ops.name_scope(\n name, 'expand_and_tile', (tensor, multiple, dim)) as scope:\n # Sparse.\n if isinstance(tensor, ops.SparseTensorValue):\n tensor = ops.SparseTensor.from_value(tensor)\n if isinstance(tensor, ops.SparseTensor):\n if dim < 0:\n expand_dims = array_ops.reshape(\n array_ops.size(tensor.shape) + dim, [1])\n else:\n expand_dims = [dim]\n expanded_shape = array_ops.concat(\n 0, (array_ops.slice(tensor.shape, [0], expand_dims), [1],\n array_ops.slice(tensor.shape, expand_dims, [-1])),\n name='expanded_shape')\n expanded = sparse_ops.sparse_reshape(\n tensor, shape=expanded_shape, name='expand')\n if multiple == 1:\n return expanded\n return sparse_ops.sparse_concat(\n dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope)\n\n # Dense.\n expanded = array_ops.expand_dims(\n tensor, dim if (dim >= 0) else (dim - 1), name='expand')\n if multiple == 1:\n return expanded\n ones = array_ops.ones_like(array_ops.shape(tensor))\n tile_multiples = array_ops.concat(\n 0, (ones[:dim], (multiple,), ones[dim:]), name='multiples')\n return array_ops.tile(expanded, tile_multiples, name=scope)\n\n\ndef sparse_average_precision_at_k(predictions, labels, k):\n \"\"\"Computes average precision@k of predictions with respect to sparse labels.\n\n From en.wikipedia.org/wiki/Information_retrieval#Average_precision, formula\n for each row is:\n\n AveP = sum_{i=1...k} P_{i} * rel_{i} / num_relevant_items\n\n A \"row\" is the elements in dimension [D1, ... DN] of `predictions`, `labels`,\n and the result `Tensors`. In the common case, this is [batch_size]. Each row\n of the results contains the average precision for that row.\n\n Internally, a `top_k` operation computes a `Tensor` indicating the top `k`\n `predictions`. Set operations applied to `top_k` and `labels` calculate the\n true positives, which are used to calculate the precision (\"P_{i}\" term,\n above).\n\n Args:\n predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where\n N >= 1. Commonly, N=1 and `predictions` has shape\n [batch size, num_classes]. The final dimension contains the logit values\n for each class. [D1, ... DN] must match `labels`.\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions`. Values should be in range [0, num_classes), where\n num_classes is the last dimension of `predictions`. Values outside this\n range are ignored.\n k: Integer, k for @k metric. This will calculate an average precision for\n range `[1,k]`, as documented above.\n\n Returns:\n `float64` `Tensor` of shape [D1, ... DN], where each value is the average\n precision for that row.\n\n Raises:\n ValueError: if k is invalid.\n \"\"\"\n if k < 1:\n raise ValueError('Invalid k=%s.' % k)\n with ops.name_scope(\n None, 'average_precision', (predictions, labels, k)) as scope:\n # Calculate top k indices to produce [D1, ... DN, k] tensor.\n _, predictions_idx = nn.top_k(predictions, k)\n predictions_idx = math_ops.to_int64(predictions_idx, name='predictions_idx')\n\n # Expand dims to produce [D1, ... DN, k, 1] tensor. This gives us a separate\n # prediction for each k, so we can calculate separate true positive values\n # for each k.\n predictions_idx_per_k = array_ops.expand_dims(\n predictions_idx, -1, name='predictions_idx_per_k')\n\n # Replicate labels k times to produce [D1, ... DN, k, num_labels] tensor.\n labels_per_k = expand_and_tile(\n labels, multiple=k, dim=-1, name='labels_per_k')\n\n # The following tensors are all of shape [D1, ... DN, k], containing values\n # per row, per k value.\n # `relevant_per_k` (int32) - Relevance indicator, 1 if the prediction at\n # that k value is correct, 0 otherwise. This is the \"rel_{i}\" term from\n # the formula above.\n # `tp_per_k` (int32) - True positive counts.\n # `retrieved_per_k` (int32) - Number of predicted values at each k. This is\n # the precision denominator.\n # `precision_per_k` (float64) - Precision at each k. This is the \"P_{i}\"\n # term from the formula above.\n # `relevant_precision_per_k` (float64) - Relevant precisions; i.e.,\n # precisions at all k for which relevance indicator is true.\n relevant_per_k = _sparse_true_positive_at_k(\n predictions_idx_per_k, labels_per_k, name='relevant_per_k')\n tp_per_k = math_ops.cumsum(relevant_per_k, axis=-1, name='tp_per_k')\n retrieved_per_k = math_ops.cumsum(\n array_ops.ones_like(relevant_per_k), axis=-1, name='retrieved_per_k')\n precision_per_k = math_ops.div(\n math_ops.to_double(tp_per_k), math_ops.to_double(retrieved_per_k),\n name='precision_per_k')\n relevant_precision_per_k = math_ops.mul(\n precision_per_k, math_ops.to_double(relevant_per_k),\n name='relevant_precision_per_k')\n\n # Reduce along k dimension to get the sum, yielding a [D1, ... DN] tensor.\n precision_sum = math_ops.reduce_sum(\n relevant_precision_per_k, reduction_indices=(-1,), name='precision_sum')\n\n # Divide by number of relevant items to get average precision. These are\n # the \"num_relevant_items\" and \"AveP\" terms from the formula above.\n num_relevant_items = math_ops.to_double(num_relevant(labels, k))\n return math_ops.div(precision_sum, num_relevant_items, name=scope)\n\n\ndef streaming_sparse_average_precision_at_k(predictions,\n labels,\n k,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes average precision@k of predictions with respect to sparse labels.\n\n See `sparse_average_precision_at_k` for details on formula. `weights` are\n applied to the result of `sparse_average_precision_at_k`\n\n `streaming_sparse_average_precision_at_k` creates two local variables,\n `average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that\n are used to compute the frequency. This frequency is ultimately returned as\n `average_precision_at_<k>`: an idempotent operation that simply divides\n `average_precision_at_<k>/total` by `average_precision_at_<k>/max`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`\n indicating the top `k` `predictions`. Set operations applied to `top_k` and\n `labels` calculate the true positives and false positives weighted by\n `weights`. Then `update_op` increments `true_positive_at_<k>` and\n `false_positive_at_<k>` using these values.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where\n N >= 1. Commonly, N=1 and `predictions` has shape\n [batch size, num_classes]. The final dimension contains the logit values\n for each class. [D1, ... DN] must match `labels`.\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_`. Values should be in range [0, num_classes), where\n num_classes is the last dimension of `predictions`. Values outside this\n range are ignored.\n k: Integer, k for @k metric. This will calculate an average precision for\n range `[1,k]`, as documented above.\n weights: An optional `Tensor` whose shape is broadcastable to the the first\n [D1, ... DN] dimensions of `predictions` and `labels`.\n metrics_collections: An optional list of collections that values should\n be added to.\n updates_collections: An optional list of collections that updates should\n be added to.\n name: Name of new update operation, and namespace for other dependent ops.\n\n Returns:\n mean_average_precision: Scalar `float64` `Tensor` with the mean average\n precision values.\n update: `Operation` that increments variables appropriately, and whose\n value matches `metric`.\n \"\"\"\n default_name = _at_k_name('average_precision', k)\n with ops.name_scope(name, default_name, (predictions, labels)) as scope:\n # Calculate per-example average precision, and apply weights.\n average_precision = sparse_average_precision_at_k(\n predictions=predictions, labels=labels, k=k)\n if weights is not None:\n weights = math_ops.to_double(weights)\n average_precision = math_ops.mul(average_precision, weights)\n\n # Create accumulation variables and update ops for max average precision and\n # total average precision.\n with ops.name_scope(None, 'max', (average_precision,)) as max_scope:\n # `max` is the max possible precision. Since max for any row is 1.0:\n # - For the unweighted case, this is just the number of rows.\n # - For the weighted case, it's the sum of the weights broadcast across\n # `average_precision` rows.\n max_var = contrib_variables.local_variable(\n array_ops.zeros([], dtype=dtypes.float64), name=max_scope)\n if weights is None:\n batch_max = math_ops.to_double(\n array_ops.size(average_precision, name='batch_max'))\n else:\n # TODO(ptucker): More efficient way to broadcast?\n broadcast_weights = math_ops.mul(\n weights, array_ops.ones_like(average_precision),\n name='broadcast_weights')\n batch_max = math_ops.reduce_sum(broadcast_weights, name='batch_max')\n max_update = state_ops.assign_add(max_var, batch_max, name='update')\n with ops.name_scope(None, 'total', (average_precision,)) as total_scope:\n total_var = contrib_variables.local_variable(\n array_ops.zeros([], dtype=dtypes.float64), name=total_scope)\n batch_total = math_ops.reduce_sum(average_precision, name='batch_total')\n total_update = state_ops.assign_add(total_var, batch_total, name='update')\n\n # Divide total by max to get mean, for both vars and the update ops.\n mean_average_precision = _safe_scalar_div(total_var, max_var, name='mean')\n update = _safe_scalar_div(total_update, max_update, name=scope)\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, mean_average_precision)\n if updates_collections:\n ops.add_to_collections(updates_collections, update)\n\n return mean_average_precision, update\n\n\ndef _select_class_id(ids, selected_id):\n \"\"\"Filter all but `selected_id` out of `ids`.\n\n Args:\n ids: `int64` `Tensor` or `SparseTensor` of IDs.\n selected_id: Int id to select.\n\n Returns:\n `SparseTensor` of same dimensions as `ids`. This contains only the entries\n equal to `selected_id`.\n \"\"\"\n if isinstance(ids, (ops.SparseTensor, ops.SparseTensorValue)):\n return sparse_ops.sparse_retain(\n ids, math_ops.equal(ids.values, selected_id))\n\n # TODO(ptucker): Make this more efficient, maybe add a sparse version of\n # tf.equal and tf.reduce_any?\n\n # Shape of filled IDs is the same as `ids` with the last dim collapsed to 1.\n ids_shape = array_ops.shape(ids, out_type=dtypes.int64)\n ids_last_dim = array_ops.size(ids_shape) - 1\n filled_selected_id_shape = math_ops.reduced_shape(\n ids_shape, array_ops.reshape(ids_last_dim, [1]))\n\n # Intersect `ids` with the selected ID.\n filled_selected_id = array_ops.fill(\n filled_selected_id_shape, math_ops.to_int64(selected_id))\n result = set_ops.set_intersection(filled_selected_id, ids)\n return ops.SparseTensor(\n indices=result.indices, values=result.values, shape=ids_shape)\n\n\ndef _maybe_select_class_id(labels, predictions_idx, selected_id=None):\n \"\"\"If class ID is specified, filter all other classes.\n\n Args:\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_idx`.\n predictions_idx: `int64` `Tensor` of class IDs, with shape [D1, ... DN, k]\n where N >= 1. Commonly, N=1 and `predictions_idx` has shape\n [batch size, k].\n selected_id: Int id to select.\n\n Returns:\n Tuple of `labels` and `predictions_idx`, possibly with classes removed.\n \"\"\"\n if selected_id is None:\n return labels, predictions_idx\n return (_select_class_id(labels, selected_id),\n _select_class_id(predictions_idx, selected_id))\n\n\ndef _sparse_true_positive_at_k(predictions_idx,\n labels,\n class_id=None,\n weights=None,\n name=None):\n \"\"\"Calculates true positives for recall@k and precision@k.\n\n If `class_id` is specified, calculate binary true positives for `class_id`\n only.\n If `class_id` is not specified, calculate metrics for `k` predicted vs\n `n` label classes, where `n` is the 2nd dimension of `labels_sparse`.\n\n Args:\n predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,\n top `k` predicted classes. For rank `n`, the first `n-1` dimensions must\n match `labels`.\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_idx`.\n class_id: Class for which we want binary metrics.\n weights: `Tensor` whose shape is broadcastable to the the first [D1, ... DN]\n dimensions of `predictions_idx` and `labels`.\n name: Name of operation.\n\n Returns:\n A [D1, ... DN] `Tensor` of true positive counts.\n \"\"\"\n with ops.name_scope(name, 'true_positives', (predictions_idx, labels)):\n labels, predictions_idx = _maybe_select_class_id(\n labels, predictions_idx, class_id)\n tp = set_ops.set_size(set_ops.set_intersection(predictions_idx, labels))\n tp = math_ops.to_double(tp)\n if weights is not None:\n weights = math_ops.to_double(weights)\n tp = math_ops.mul(tp, weights)\n return tp\n\n\ndef _streaming_sparse_true_positive_at_k(predictions_idx,\n labels,\n k=None,\n class_id=None,\n weights=None,\n name=None):\n \"\"\"Calculates weighted per step true positives for recall@k and precision@k.\n\n If `class_id` is specified, calculate binary true positives for `class_id`\n only.\n If `class_id` is not specified, calculate metrics for `k` predicted vs\n `n` label classes, where `n` is the 2nd dimension of `labels`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,\n top `k` predicted classes. For rank `n`, the first `n-1` dimensions must\n match `labels`.\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_idx`.\n k: Integer, k for @k metric. This is only used for default op name.\n class_id: Class for which we want binary metrics.\n weights: `Tensor` whose shape is broadcastable to the the first [D1, ... DN]\n dimensions of `predictions_idx` and `labels`.\n name: Name of new variable, and namespace for other dependent ops.\n\n Returns:\n A tuple of `Variable` and update `Operation`.\n\n Raises:\n ValueError: If `weights` is not `None` and has an incomptable shape.\n \"\"\"\n default_name = _at_k_name('true_positive', k, class_id=class_id)\n with ops.name_scope(name, default_name, (predictions_idx, labels)) as scope:\n tp = _sparse_true_positive_at_k(\n predictions_idx=predictions_idx, labels=labels, class_id=class_id,\n weights=weights)\n batch_total_tp = math_ops.to_double(math_ops.reduce_sum(tp))\n\n var = contrib_variables.local_variable(\n array_ops.zeros([], dtype=dtypes.float64), name=scope)\n return var, state_ops.assign_add(var, batch_total_tp, name='update')\n\n\ndef _sparse_false_positive_at_k(predictions_idx,\n labels,\n class_id=None,\n weights=None):\n \"\"\"Calculates false positives for precision@k.\n\n If `class_id` is specified, calculate binary true positives for `class_id`\n only.\n If `class_id` is not specified, calculate metrics for `k` predicted vs\n `n` label classes, where `n` is the 2nd dimension of `labels_sparse`.\n\n Args:\n predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,\n top `k` predicted classes. For rank `n`, the first `n-1` dimensions must\n match `labels`.\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_idx`.\n class_id: Class for which we want binary metrics.\n weights: `Tensor` whose shape is broadcastable to the the first [D1, ... DN]\n dimensions of `predictions_idx` and `labels`.\n\n Returns:\n A [D1, ... DN] `Tensor` of false positive counts.\n \"\"\"\n with ops.name_scope(None, 'false_positives', (predictions_idx, labels)):\n labels, predictions_idx = _maybe_select_class_id(labels,\n predictions_idx,\n class_id)\n fp = set_ops.set_size(set_ops.set_difference(\n predictions_idx, labels, aminusb=True))\n fp = math_ops.to_double(fp)\n if weights is not None:\n weights = math_ops.to_double(weights)\n fp = math_ops.mul(fp, weights)\n return fp\n\n\ndef _streaming_sparse_false_positive_at_k(predictions_idx,\n labels,\n k=None,\n class_id=None,\n weights=None,\n name=None):\n \"\"\"Calculates weighted per step false positives for precision@k.\n\n If `class_id` is specified, calculate binary true positives for `class_id`\n only.\n If `class_id` is not specified, calculate metrics for `k` predicted vs\n `n` label classes, where `n` is the 2nd dimension of `labels`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,\n top `k` predicted classes. For rank `n`, the first `n-1` dimensions must\n match `labels`.\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_idx`.\n k: Integer, k for @k metric. This is only used for default op name.\n class_id: Class for which we want binary metrics.\n weights: `Tensor` whose shape is broadcastable to the the first [D1, ... DN]\n dimensions of `predictions_idx` and `labels`.\n name: Name of new variable, and namespace for other dependent ops.\n\n Returns:\n A tuple of `Variable` and update `Operation`.\n\n Raises:\n ValueError: If `weights` is not `None` and has an incomptable shape.\n \"\"\"\n default_name = _at_k_name('false_positive', k, class_id=class_id)\n with ops.name_scope(name, default_name, (predictions_idx, labels)) as scope:\n fp = _sparse_false_positive_at_k(\n predictions_idx=predictions_idx, labels=labels, class_id=class_id,\n weights=weights)\n batch_total_fp = math_ops.to_double(math_ops.reduce_sum(fp))\n\n var = contrib_variables.local_variable(\n array_ops.zeros([], dtype=dtypes.float64), name=scope)\n return var, state_ops.assign_add(var, batch_total_fp, name='update')\n\n\ndef _sparse_false_negative_at_k(predictions_idx,\n labels,\n class_id=None,\n weights=None):\n \"\"\"Calculates false negatives for recall@k.\n\n If `class_id` is specified, calculate binary true positives for `class_id`\n only.\n If `class_id` is not specified, calculate metrics for `k` predicted vs\n `n` label classes, where `n` is the 2nd dimension of `labels_sparse`.\n\n Args:\n predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,\n top `k` predicted classes. For rank `n`, the first `n-1` dimensions must\n match `labels`.\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_idx`.\n class_id: Class for which we want binary metrics.\n weights: `Tensor` whose shape is broadcastable to the the first [D1, ... DN]\n dimensions of `predictions_idx` and `labels`.\n\n Returns:\n A [D1, ... DN] `Tensor` of false negative counts.\n \"\"\"\n with ops.name_scope(None, 'false_negatives', (predictions_idx, labels)):\n labels, predictions_idx = _maybe_select_class_id(labels,\n predictions_idx,\n class_id)\n fn = set_ops.set_size(set_ops.set_difference(predictions_idx,\n labels,\n aminusb=False))\n fn = math_ops.to_double(fn)\n if weights is not None:\n weights = math_ops.to_double(weights)\n fn = math_ops.mul(fn, weights)\n return fn\n\n\ndef _streaming_sparse_false_negative_at_k(predictions_idx,\n labels,\n k,\n class_id=None,\n weights=None,\n name=None):\n \"\"\"Calculates weighted per step false negatives for recall@k.\n\n If `class_id` is specified, calculate binary true positives for `class_id`\n only.\n If `class_id` is not specified, calculate metrics for `k` predicted vs\n `n` label classes, where `n` is the 2nd dimension of `labels`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,\n top `k` predicted classes. For rank `n`, the first `n-1` dimensions must\n match `labels`.\n labels: `int64` `Tensor` or `SparseTensor` with shape\n [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of\n target classes for the associated prediction. Commonly, N=1 and `labels`\n has shape [batch_size, num_labels]. [D1, ... DN] must match\n `predictions_idx`.\n k: Integer, k for @k metric. This is only used for default op name.\n class_id: Class for which we want binary metrics.\n weights: `Tensor` whose shape is broadcastable to the the first [D1, ... DN]\n dimensions of `predictions_idx` and `labels`.\n name: Name of new variable, and namespace for other dependent ops.\n\n Returns:\n A tuple of `Variable` and update `Operation`.\n\n Raises:\n ValueError: If `weights` is not `None` and has an incomptable shape.\n \"\"\"\n default_name = _at_k_name('false_negative', k, class_id=class_id)\n with ops.name_scope(name, default_name, (predictions_idx, labels)) as scope:\n fn = _sparse_false_negative_at_k(\n predictions_idx=predictions_idx, labels=labels, class_id=class_id,\n weights=weights)\n batch_total_fn = math_ops.to_double(math_ops.reduce_sum(fn))\n\n var = contrib_variables.local_variable(\n array_ops.zeros([], dtype=dtypes.float64), name=scope)\n return var, state_ops.assign_add(var, batch_total_fn, name='update')\n\n\ndef streaming_mean_absolute_error(predictions, labels, weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the mean absolute error between the labels and predictions.\n\n The `streaming_mean_absolute_error` function creates two local variables,\n `total` and `count` that are used to compute the mean absolute error. This\n average is weighted by `weights`, and it is ultimately returned as\n `mean_absolute_error`: an idempotent operation that simply divides `total` by\n `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `mean_absolute_error`. Internally, an `absolute_errors` operation computes the\n absolute value of the differences between `predictions` and `labels`. Then\n `update_op` increments `total` with the reduced sum of the product of\n `weights` and `absolute_errors`, and it increments `count` with the reduced\n sum of `weights`\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A `Tensor` of arbitrary shape.\n labels: A `Tensor` of the same shape as `predictions`.\n weights: An optional `Tensor` whose shape is broadcastable to `predictions`.\n metrics_collections: An optional list of collections that\n `mean_absolute_error` should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n mean_absolute_error: A tensor representing the current mean, the value of\n `total` divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `mean_absolute_error`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n predictions, labels = tensor_util.remove_squeezable_dimensions(\n predictions, labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n absolute_errors = math_ops.abs(predictions - labels)\n return streaming_mean(absolute_errors, weights, metrics_collections,\n updates_collections, name or 'mean_absolute_error')\n\n\ndef streaming_mean_relative_error(predictions, labels, normalizer, weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the mean relative error by normalizing with the given values.\n\n The `streaming_mean_relative_error` function creates two local variables,\n `total` and `count` that are used to compute the mean relative absolute error.\n This average is weighted by `weights`, and it is ultimately returned as\n `mean_relative_error`: an idempotent operation that simply divides `total` by\n `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `mean_reative_error`. Internally, a `relative_errors` operation divides the\n absolute value of the differences between `predictions` and `labels` by the\n `normalizer`. Then `update_op` increments `total` with the reduced sum of the\n product of `weights` and `relative_errors`, and it increments `count` with the\n reduced sum of `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A `Tensor` of arbitrary shape.\n labels: A `Tensor` of the same shape as `predictions`.\n normalizer: A `Tensor` of the same shape as `predictions`.\n weights: An optional `Tensor` whose shape is broadcastable to `predictions`.\n metrics_collections: An optional list of collections that\n `mean_relative_error` should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n mean_relative_error: A tensor representing the current mean, the value of\n `total` divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `mean_relative_error`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n predictions, labels = tensor_util.remove_squeezable_dimensions(\n predictions, labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n\n predictions, normalizer = tensor_util.remove_squeezable_dimensions(\n predictions, normalizer)\n predictions.get_shape().assert_is_compatible_with(normalizer.get_shape())\n relative_errors = math_ops.select(\n math_ops.equal(normalizer, 0.0),\n array_ops.zeros_like(labels),\n math_ops.div(math_ops.abs(labels - predictions), normalizer))\n return streaming_mean(relative_errors, weights, metrics_collections,\n updates_collections, name or 'mean_relative_error')\n\n\ndef streaming_mean_squared_error(predictions, labels, weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the mean squared error between the labels and predictions.\n\n The `streaming_mean_squared_error` function creates two local variables,\n `total` and `count` that are used to compute the mean squared error.\n This average is weighted by `weights`, and it is ultimately returned as\n `mean_squared_error`: an idempotent operation that simply divides `total` by\n `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `mean_squared_error`. Internally, a `squared_error` operation computes the\n element-wise square of the difference between `predictions` and `labels`. Then\n `update_op` increments `total` with the reduced sum of the product of\n `weights` and `squared_error`, and it increments `count` with the reduced sum\n of `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A `Tensor` of arbitrary shape.\n labels: A `Tensor` of the same shape as `predictions`.\n weights: An optional `Tensor` whose shape is broadcastable to `predictions`.\n metrics_collections: An optional list of collections that\n `mean_squared_error` should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n mean_squared_error: A tensor representing the current mean, the value of\n `total` divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `mean_squared_error`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n predictions, labels = tensor_util.remove_squeezable_dimensions(\n predictions, labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n squared_error = math_ops.square(labels - predictions)\n return streaming_mean(squared_error, weights, metrics_collections,\n updates_collections, name or 'mean_squared_error')\n\n\ndef streaming_root_mean_squared_error(predictions, labels, weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the root mean squared error between the labels and predictions.\n\n The `streaming_root_mean_squared_error` function creates two local variables,\n `total` and `count` that are used to compute the root mean squared error.\n This average is weighted by `weights`, and it is ultimately returned as\n `root_mean_squared_error`: an idempotent operation that takes the square root\n of the division of `total` by `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `root_mean_squared_error`. Internally, a `squared_error` operation computes\n the element-wise square of the difference between `predictions` and `labels`.\n Then `update_op` increments `total` with the reduced sum of the product of\n `weights` and `squared_error`, and it increments `count` with the reduced sum\n of `weights`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A `Tensor` of arbitrary shape.\n labels: A `Tensor` of the same shape as `predictions`.\n weights: An optional `Tensor` whose shape is broadcastable to `predictions`.\n metrics_collections: An optional list of collections that\n `root_mean_squared_error` should be added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\n Returns:\n root_mean_squared_error: A tensor representing the current mean, the value\n of `total` divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately and whose value matches `root_mean_squared_error`.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n predictions, labels = tensor_util.remove_squeezable_dimensions(\n predictions, labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n value_tensor, update_op = streaming_mean_squared_error(\n predictions, labels, weights, None, None,\n name or 'root_mean_squared_error')\n\n root_mean_squared_error = math_ops.sqrt(value_tensor)\n with ops.control_dependencies([update_op]):\n update_op = math_ops.sqrt(update_op)\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, root_mean_squared_error)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return root_mean_squared_error, update_op\n\n\ndef streaming_covariance(predictions,\n labels,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the unbiased sample covariance between `predictions` and `labels`.\n\n The `streaming_covariance` function creates four local variables,\n `comoment`, `mean_prediction`, `mean_label`, and `count`, which are used to\n compute the sample covariance between predictions and labels across multiple\n batches of data. The covariance is ultimately returned as an idempotent\n operation that simply divides `comoment` by `count` - 1. We use `count` - 1\n in order to get an unbiased estimate.\n\n The algorithm used for this online computation is described in\n https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance.\n Specifically, the formula used to combine two sample comoments is\n `C_AB = C_A + C_B + (E[x_A] - E[x_B]) * (E[y_A] - E[y_B]) * n_A * n_B / n_AB`\n The comoment for a single batch of data is simply\n `sum((x - E[x]) * (y - E[y]))`, optionally weighted.\n\n If `weights` is not None, then it is used to compute weighted comoments,\n means, and count. NOTE: these weights are treated as \"frequency weights\", as\n opposed to \"reliability weights\". See discussion of the difference on\n https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance\n\n To facilitate the computation of covariance across multiple batches of data,\n the function creates an `update_op` operation, which updates underlying\n variables and returns the updated covariance.\n\n Args:\n predictions: A `Tensor` of arbitrary size.\n labels: A `Tensor` of the same size as `predictions`.\n weights: An optional set of weights which indicates the frequency with which\n an example is sampled. Must be broadcastable with `labels`.\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n covariance: A `Tensor` representing the current unbiased sample covariance,\n `comoment` / (`count` - 1).\n update_op: An operation that updates the local variables appropriately.\n\n Raises:\n ValueError: If labels and predictions are of different sizes or if either\n `metrics_collections` or `updates_collections` are not a list or tuple.\n \"\"\"\n with variable_scope.variable_scope(name, 'covariance', [predictions, labels]):\n predictions, labels = tensor_util.remove_squeezable_dimensions(\n predictions, labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n count = _create_local('count', [])\n mean_prediction = _create_local('mean_prediction', [])\n mean_label = _create_local('mean_label', [])\n comoment = _create_local('comoment', []) # C_A in update equation\n\n if weights is None:\n batch_count = math_ops.to_float(array_ops.size(labels)) # n_B in eqn\n weighted_predictions = predictions\n weighted_labels = labels\n else:\n batch_count = math_ops.reduce_sum(\n _broadcast_weights(weights, labels)) # n_B in eqn\n weighted_predictions = predictions * weights\n weighted_labels = labels * weights\n\n update_count = state_ops.assign_add(count, batch_count) # n_AB in eqn\n prev_count = update_count - batch_count # n_A in update equation\n\n # We update the means by Delta=Error*BatchCount/(BatchCount+PrevCount)\n # batch_mean_prediction is E[x_B] in the update equation\n batch_mean_prediction = _safe_div(\n math_ops.reduce_sum(weighted_predictions), batch_count,\n 'batch_mean_prediction')\n delta_mean_prediction = _safe_div(\n (batch_mean_prediction - mean_prediction) * batch_count, update_count,\n 'delta_mean_prediction')\n update_mean_prediction = state_ops.assign_add(mean_prediction,\n delta_mean_prediction)\n # prev_mean_prediction is E[x_A] in the update equation\n prev_mean_prediction = update_mean_prediction - delta_mean_prediction\n\n # batch_mean_label is E[y_B] in the update equation\n batch_mean_label = _safe_div(\n math_ops.reduce_sum(weighted_labels), batch_count, 'batch_mean_label')\n delta_mean_label = _safe_div((batch_mean_label - mean_label) * batch_count,\n update_count, 'delta_mean_label')\n update_mean_label = state_ops.assign_add(mean_label, delta_mean_label)\n # prev_mean_label is E[y_A] in the update equation\n prev_mean_label = update_mean_label - delta_mean_label\n\n unweighted_batch_coresiduals = (\n (predictions - batch_mean_prediction) * (labels - batch_mean_label))\n # batch_comoment is C_B in the update equation\n if weights is None:\n batch_comoment = math_ops.reduce_sum(unweighted_batch_coresiduals)\n else:\n batch_comoment = math_ops.reduce_sum(unweighted_batch_coresiduals *\n weights)\n\n # View delta_comoment as = C_AB - C_A in the update equation above.\n # Since C_A is stored in a var, by how much do we need to increment that var\n # to make the var = C_AB?\n delta_comoment = (batch_comoment +\n (prev_mean_prediction - batch_mean_prediction) *\n (prev_mean_label - batch_mean_label) *\n (prev_count * batch_count / update_count))\n update_comoment = state_ops.assign_add(comoment, delta_comoment)\n\n covariance = _safe_div(comoment, count - 1, 'covariance')\n with ops.control_dependencies([update_comoment]):\n update_op = _safe_div(comoment, count - 1, 'update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, covariance)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return covariance, update_op\n\n\ndef streaming_pearson_correlation(predictions,\n labels,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes Pearson correlation coefficient between `predictions`, `labels`.\n\n The `streaming_pearson_correlation` function delegates to\n `streaming_covariance` the tracking of three [co]variances:\n\n - `streaming_covariance(predictions, labels)`, i.e. covariance\n - `streaming_covariance(predictions, predictions)`, i.e. variance\n - `streaming_covariance(labels, labels)`, i.e. variance\n\n The product-moment correlation ultimately returned is an idempotent operation\n `cov(predictions, labels) / sqrt(var(predictions) * var(labels))`. To\n facilitate correlation computation across multiple batches, the function\n groups the `update_op`s of the underlying streaming_covariance and returns an\n `update_op`.\n\n If `weights` is not None, then it is used to compute a weighted correlation.\n NOTE: these weights are treated as \"frequency weights\", as opposed to\n \"reliability weights\". See discussion of the difference on\n https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance\n\n Args:\n predictions: A `Tensor` of arbitrary size.\n labels: A `Tensor` of the same size as predictions.\n weights: An optional set of weights which indicates the frequency with which\n an example is sampled. Must be broadcastable with `labels`.\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n pearson_r: A tensor representing the current Pearson product-moment\n correlation coefficient, the value of\n `cov(predictions, labels) / sqrt(var(predictions) * var(labels))`.\n update_op: An operation that updates the underlying variables appropriately.\n\n Raises:\n ValueError: If `labels` and `predictions` are of different sizes, or if\n `weights` is the wrong size, or if either `metrics_collections` or\n `updates_collections` are not a `list` or `tuple`.\n \"\"\"\n with variable_scope.variable_scope(name, 'pearson_r', [predictions, labels]):\n predictions, labels = tensor_util.remove_squeezable_dimensions(\n predictions, labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n cov, update_cov = streaming_covariance(\n predictions, labels, weights=weights, name='covariance')\n var_predictions, update_var_predictions = streaming_covariance(\n predictions, predictions, weights=weights, name='variance_predictions')\n var_labels, update_var_labels = streaming_covariance(\n labels, labels, weights=weights, name='variance_labels')\n\n pearson_r = _safe_div(\n cov,\n math_ops.mul(math_ops.sqrt(var_predictions), math_ops.sqrt(var_labels)),\n 'pearson_r')\n with ops.control_dependencies(\n [update_cov, update_var_predictions, update_var_labels]):\n update_op = _safe_div(update_cov, math_ops.mul(\n math_ops.sqrt(update_var_predictions),\n math_ops.sqrt(update_var_labels)), 'update_op')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, pearson_r)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return pearson_r, update_op\n\n\n# TODO(nsilberman): add a 'normalized' flag so that the user can request\n# normalization if the inputs are not normalized.\ndef streaming_mean_cosine_distance(predictions, labels, dim, weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the cosine distance between the labels and predictions.\n\n The `streaming_mean_cosine_distance` function creates two local variables,\n `total` and `count` that are used to compute the average cosine distance\n between `predictions` and `labels`. This average is weighted by `weights`,\n and it is ultimately returned as `mean_distance`, which is an idempotent\n operation that simply divides `total` by `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `mean_distance`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\n Args:\n predictions: A `Tensor` of the same shape as `labels`.\n labels: A `Tensor` of arbitrary shape.\n dim: The dimension along which the cosine distance is computed.\n weights: An optional `Tensor` whose shape is broadcastable to `predictions`,\n and whose dimension `dim` is 1.\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n mean_distance: A tensor representing the current mean, the value of `total`\n divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n predictions, labels = tensor_util.remove_squeezable_dimensions(\n predictions, labels)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n radial_diffs = math_ops.mul(predictions, labels)\n radial_diffs = math_ops.reduce_sum(radial_diffs,\n reduction_indices=[dim,],\n keep_dims=True)\n mean_distance, update_op = streaming_mean(radial_diffs, weights,\n None,\n None,\n name or 'mean_cosine_distance')\n mean_distance = math_ops.sub(1.0, mean_distance)\n update_op = math_ops.sub(1.0, update_op)\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, mean_distance)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return mean_distance, update_op\n\n\n@deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask')\ndef streaming_percentage_less(values, threshold, ignore_mask=None, weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Computes the percentage of values less than the given threshold.\n\n The `streaming_percentage_less` function creates two local variables,\n `total` and `count` that are used to compute the percentage of `values` that\n fall below `threshold`. This rate is weighted by `weights`, and it is\n ultimately returned as `percentage` which is an idempotent operation that\n simply divides `total` by `count`.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the\n `percentage`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n Alternatively, if `ignore_mask` is not `None`, then mask values where\n `ignore_mask` is `True`.\n\n Args:\n values: A numeric `Tensor` of arbitrary size.\n threshold: A scalar threshold.\n ignore_mask: An optional, `bool` `Tensor` whose shape matches `values`.\n weights: An optional `Tensor` whose shape is broadcastable to `values`.\n metrics_collections: An optional list of collections that the metric\n value variable should be added to.\n updates_collections: An optional list of collections that the metric update\n ops should be added to.\n name: An optional variable_scope name.\n\n Returns:\n percentage: A tensor representing the current mean, the value of `total`\n divided by `count`.\n update_op: An operation that increments the `total` and `count` variables\n appropriately.\n\n Raises:\n ValueError: If `ignore_mask` is not `None` and its shape doesn't match\n `values`, or if `weights` is not `None` and its shape doesn't match\n `values`, or if either `metrics_collections` or `updates_collections` are\n not a list or tuple.\n \"\"\"\n is_below_threshold = math_ops.to_float(math_ops.less(values, threshold))\n return streaming_mean(is_below_threshold, _mask_weights(ignore_mask, weights),\n metrics_collections,\n updates_collections,\n name or 'percentage_below_threshold')\n\n\n@deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask')\ndef streaming_mean_iou(predictions,\n labels,\n num_classes,\n ignore_mask=None,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Calculate per-step mean Intersection-Over-Union (mIOU).\n\n Mean Intersection-Over-Union is a common evaluation metric for\n semantic image segmentation, which first computes the IOU for each\n semantic class and then computes the average over classes.\n IOU is defined as follows:\n IOU = true_positive / (true_positive + false_positive + false_negative).\n The predictions are accumulated in a confusion matrix, weighted by `weights`,\n and mIOU is then calculated from it.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that updates these variables and returns the `mean_iou`.\n\n If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n Alternatively, if `ignore_mask` is not `None`, then mask values where\n `ignore_mask` is `True`.\n\n Args:\n predictions: A tensor of prediction results for semantic labels, whose\n shape is [batch size] and type `int32` or `int64`. The tensor will be\n flattened, if its rank > 1.\n labels: A tensor of ground truth labels with shape [batch size] and of\n type `int32` or `int64`. The tensor will be flattened, if its rank > 1.\n num_classes: The possible number of labels the prediction task can\n have. This value must be provided, since a confusion matrix of\n dimension = [num_classes, num_classes] will be allocated.\n ignore_mask: An optional, `bool` `Tensor` whose shape matches `predictions`.\n weights: An optional `Tensor` whose shape is broadcastable to `predictions`.\n metrics_collections: An optional list of collections that `mean_iou`\n should be added to.\n updates_collections: An optional list of collections `update_op` should be\n added to.\n name: An optional variable_scope name.\n\n Returns:\n mean_iou: A tensor representing the mean intersection-over-union.\n update_op: An operation that increments the confusion matrix.\n\n Raises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `ignore_mask` is not `None` and its shape doesn't match `predictions`, or\n if `weights` is not `None` and its shape doesn't match `predictions`, or\n if either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n \"\"\"\n with variable_scope.variable_scope(name, 'mean_iou', [predictions, labels]):\n # Check if shape is compatible.\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n\n # Local variable to accumulate the predictions in the confusion matrix.\n cm_dtype = dtypes.int64 if weights is not None else dtypes.float64\n total_cm = _create_local('total_confusion_matrix',\n shape=[num_classes, num_classes], dtype=cm_dtype)\n\n # Cast the type to int64 required by confusion_matrix_ops.\n predictions = math_ops.to_int64(predictions)\n labels = math_ops.to_int64(labels)\n num_classes = math_ops.to_int64(num_classes)\n\n # Flatten the input if its rank > 1.\n predictions_rank = predictions.get_shape().ndims\n if predictions_rank > 1:\n predictions = array_ops.reshape(predictions, [-1])\n\n labels_rank = labels.get_shape().ndims\n if labels_rank > 1:\n labels = array_ops.reshape(labels, [-1])\n\n weights = _mask_weights(ignore_mask, weights)\n if weights is not None:\n weights_rank = weights.get_shape().ndims\n if weights_rank > 1:\n weights = array_ops.reshape(weights, [-1])\n\n # Accumulate the prediction to current confusion matrix.\n current_cm = confusion_matrix_ops.confusion_matrix(\n predictions, labels, num_classes, weights=weights, dtype=cm_dtype)\n update_op = state_ops.assign_add(total_cm, current_cm)\n\n def compute_mean_iou(name):\n \"\"\"Compute the mean intersection-over-union via the confusion matrix.\"\"\"\n sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0))\n sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1))\n cm_diag = math_ops.to_float(array_ops.diag_part(total_cm))\n denominator = sum_over_row + sum_over_col - cm_diag\n\n # If the value of the denominator is 0, set it to 1 to avoid\n # zero division.\n denominator = math_ops.select(\n math_ops.greater(denominator, 0),\n denominator,\n array_ops.ones_like(denominator))\n iou = math_ops.div(cm_diag, denominator)\n return math_ops.reduce_mean(iou, name=name)\n\n mean_iou = compute_mean_iou('mean_iou')\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, mean_iou)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return mean_iou, update_op\n\n\ndef _next_array_size(required_size, growth_factor=1.5):\n \"\"\"Calculate the next size for reallocating a dynamic array.\n\n Args:\n required_size: number or tf.Tensor specifying required array capacity.\n growth_factor: optional number or tf.Tensor specifying the growth factor\n between subsequent allocations.\n\n Returns:\n tf.Tensor with dtype=int32 giving the next array size.\n \"\"\"\n exponent = math_ops.ceil(\n math_ops.log(math_ops.cast(required_size, dtypes.float32))\n / math_ops.log(math_ops.cast(growth_factor, dtypes.float32)))\n return math_ops.cast(math_ops.ceil(growth_factor ** exponent), dtypes.int32)\n\n\ndef streaming_concat(values,\n axis=0,\n max_size=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n \"\"\"Concatenate values along an axis across batches.\n\n The function `streaming_concat` creates two local variables, `array` and\n `size`, that are used to store concatenated values. Internally, `array` is\n used as storage for a dynamic array (if `maxsize` is `None`), which ensures\n that updates can be run in amortized constant time.\n\n For estimation of the metric over a stream of data, the function creates an\n `update_op` operation that appends the values of a tensor and returns the\n `value` of the concatenated tensors.\n\n This op allows for evaluating metrics that cannot be updated incrementally\n using the same framework as other streaming metrics.\n\n Args:\n values: tensor to concatenate. Rank and the shape along all axes other than\n the axis to concatenate along must be statically known.\n axis: optional integer axis to concatenate along.\n max_size: optional integer maximum size of `value` along the given axis.\n Once the maximum size is reached, further updates are no-ops. By default,\n there is no maximum size: the array is resized as necessary.\n metrics_collections: An optional list of collections that `value`\n should be added to.\n updates_collections: An optional list of collections `update_op` should be\n added to.\n name: An optional variable_scope name.\n\n Returns:\n value: A tensor representing the concatenated values.\n update_op: An operation that concatenates the next values.\n\n Raises:\n ValueError: if `values` does not have a statically known rank, `axis` is\n not in the valid range or the size of `values` is not statically known\n along any axis other than `axis`.\n \"\"\"\n with variable_scope.variable_scope(name, 'streaming_concat', [values]):\n # pylint: disable=invalid-slice-index\n values_shape = values.get_shape()\n if values_shape.dims is None:\n raise ValueError('`values` must have known statically known rank')\n\n ndim = len(values_shape)\n if axis < 0:\n axis += ndim\n if not 0 <= axis < ndim:\n raise ValueError('axis = %r not in [0, %r)' % (axis, ndim))\n\n fixed_shape = [dim.value for n, dim in enumerate(values_shape)\n if n != axis]\n if any(value is None for value in fixed_shape):\n raise ValueError('all dimensions of `values` other than the dimension to '\n 'concatenate along must have statically known size')\n\n # We move `axis` to the front of the internal array so assign ops can be\n # applied to contiguous slices\n init_size = 0 if max_size is None else max_size\n init_shape = [init_size] + fixed_shape\n array = _create_local(\n 'array', shape=init_shape, validate_shape=False, dtype=values.dtype)\n size = _create_local('size', shape=[], dtype=dtypes.int32)\n\n perm = [0 if n == axis else n + 1 if n < axis else n for n in range(ndim)]\n valid_array = array[:size]\n valid_array.set_shape([None] + fixed_shape)\n value = array_ops.transpose(valid_array, perm, name='concat')\n\n values_size = array_ops.shape(values)[axis]\n if max_size is None:\n batch_size = values_size\n else:\n batch_size = math_ops.minimum(values_size, max_size - size)\n\n perm = [axis] + [n for n in range(ndim) if n != axis]\n batch_values = array_ops.transpose(values, perm)[:batch_size]\n\n def reallocate():\n next_size = _next_array_size(new_size)\n next_shape = array_ops.pack([next_size] + fixed_shape)\n new_value = array_ops.zeros(next_shape, dtype=values.dtype)\n old_value = array.value()\n assign_op = state_ops.assign(array, new_value, validate_shape=False)\n with ops.control_dependencies([assign_op]):\n copy_op = array[:size].assign(old_value[:size])\n # return value needs to be the same dtype as no_op() for cond\n with ops.control_dependencies([copy_op]):\n return control_flow_ops.no_op()\n\n new_size = size + batch_size\n array_size = array_ops.shape_internal(array, optimize=False)[0]\n maybe_reallocate_op = control_flow_ops.cond(\n new_size > array_size, reallocate, control_flow_ops.no_op)\n with ops.control_dependencies([maybe_reallocate_op]):\n append_values_op = array[size:new_size].assign(batch_values)\n with ops.control_dependencies([append_values_op]):\n update_op = size.assign(new_size)\n\n if metrics_collections:\n ops.add_to_collections(metrics_collections, value)\n\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n\n return value, update_op\n # pylint: enable=invalid-slice-index\n\n\ndef aggregate_metrics(*value_update_tuples):\n \"\"\"Aggregates the metric value tensors and update ops into two lists.\n\n Args:\n *value_update_tuples: a variable number of tuples, each of which contain the\n pair of (value_tensor, update_op) from a streaming metric.\n\n Returns:\n a list of value tensors and a list of update ops.\n\n Raises:\n ValueError: if `value_update_tuples` is empty.\n \"\"\"\n if not value_update_tuples:\n raise ValueError('Expected at least one value_tensor/update_op pair')\n value_ops, update_ops = zip(*value_update_tuples)\n return list(value_ops), list(update_ops)\n\n\ndef aggregate_metric_map(names_to_tuples):\n \"\"\"Aggregates the metric names to tuple dictionary.\n\n This function is useful for pairing metric names with their associated value\n and update ops when the list of metrics is long. For example:\n\n ```python\n metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map({\n 'Mean Absolute Error': new_slim.metrics.streaming_mean_absolute_error(\n predictions, labels, weights),\n 'Mean Relative Error': new_slim.metrics.streaming_mean_relative_error(\n predictions, labels, labels, weights),\n 'RMSE Linear': new_slim.metrics.streaming_root_mean_squared_error(\n predictions, labels, weights),\n 'RMSE Log': new_slim.metrics.streaming_root_mean_squared_error(\n predictions, labels, weights),\n })\n ```\n\n Args:\n names_to_tuples: a map of metric names to tuples, each of which contain the\n pair of (value_tensor, update_op) from a streaming metric.\n\n Returns:\n A dictionary from metric names to value ops and a dictionary from metric\n names to update ops.\n \"\"\"\n metric_names = names_to_tuples.keys()\n value_ops, update_ops = zip(*names_to_tuples.values())\n return dict(zip(metric_names, value_ops)), dict(zip(metric_names, update_ops))\n\n\n__all__ = [\n 'aggregate_metric_map',\n 'aggregate_metrics',\n 'streaming_accuracy',\n 'streaming_auc',\n 'streaming_mean',\n 'streaming_mean_absolute_error',\n 'streaming_mean_cosine_distance',\n 'streaming_mean_iou',\n 'streaming_mean_relative_error',\n 'streaming_mean_squared_error',\n 'streaming_mean_tensor',\n 'streaming_percentage_less',\n 'streaming_precision',\n 'streaming_precision_at_thresholds',\n 'streaming_recall',\n 'streaming_recall_at_k',\n 'streaming_recall_at_thresholds',\n 'streaming_root_mean_squared_error',\n 'streaming_sensitivity_at_specificity',\n 'streaming_sparse_average_precision_at_k',\n 'streaming_sparse_precision_at_k',\n 'streaming_sparse_recall_at_k',\n 'streaming_specificity_at_sensitivity',\n]\n" ]
[ [ "tensorflow.python.util.all_util.remove_undocumented" ], [ "tensorflow.python.ops.math_ops.greater_equal", "tensorflow.python.ops.array_ops.constant", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.nn.in_top_k", "tensorflow.python.ops.math_ops.ceil", "tensorflow.python.ops.state_ops.assign_add", "tensorflow.python.ops.math_ops.to_double", "tensorflow.python.ops.math_ops.greater", "tensorflow.python.ops.array_ops.squeeze", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.ops.math_ops.sqrt", "tensorflow.python.ops.math_ops.to_float", "tensorflow.python.ops.state_ops.assign", "tensorflow.python.ops.control_flow_ops.no_op", "tensorflow.python.ops.array_ops.diag_part", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.control_flow_ops.cond", "tensorflow.python.ops.math_ops.sub", "tensorflow.python.ops.math_ops.logical_and", "tensorflow.python.ops.math_ops.logical_not", "tensorflow.contrib.metrics.python.ops.set_ops.set_intersection", "tensorflow.python.ops.array_ops.rank", "tensorflow.python.ops.array_ops.transpose", "tensorflow.python.ops.math_ops.to_int64", "tensorflow.python.ops.math_ops.abs", "tensorflow.python.ops.array_ops.fill", "tensorflow.contrib.metrics.python.ops.set_ops.set_size", "tensorflow.python.ops.math_ops.argmax", "tensorflow.python.ops.math_ops.less", "tensorflow.python.ops.sparse_ops.sparse_concat", "tensorflow.contrib.metrics.python.ops.set_ops.set_difference", "tensorflow.python.ops.math_ops.add", "tensorflow.python.ops.array_ops.size", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.array_ops.pack", "tensorflow.python.ops.sparse_ops.sparse_reshape", "tensorflow.contrib.framework.deprecated_args", "tensorflow.python.ops.math_ops.cast", "tensorflow.contrib.framework.tensor_util.remove_squeezable_dimensions", "tensorflow.python.ops.math_ops.minimum", "tensorflow.contrib.metrics.python.ops.confusion_matrix_ops.confusion_matrix", "tensorflow.python.ops.array_ops.tile", "tensorflow.python.ops.array_ops.slice", "tensorflow.python.framework.ops.SparseTensor", "tensorflow.python.ops.math_ops.square", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.ops.math_ops.truediv", "tensorflow.python.ops.math_ops.equal", "tensorflow.python.ops.math_ops.div", "tensorflow.python.ops.math_ops.mul", "tensorflow.python.ops.math_ops.reduce_mean", "tensorflow.python.ops.nn.top_k", "tensorflow.python.ops.check_ops.assert_type", "tensorflow.python.framework.ops.SparseTensor.from_value", "tensorflow.python.framework.ops.add_to_collections", "tensorflow.contrib.framework.deprecated", "tensorflow.python.ops.array_ops.ones_like", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.array_ops.shape_internal", "tensorflow.python.ops.math_ops.cumsum", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.ops.math_ops.reduce_sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
KOLANICH-ML/rbfopt
[ "2243135f7307b4cb9a99292220e2381a1e776fbf" ]
[ "tests/test_rbfopt_degree0_models.py" ]
[ "\"\"\"Test the successful creation of Pyomo 0-degree models in RBFOpt.\n\nThis module contains unit tests for the module rbfopt_degree0_models.\n\nLicensed under Revised BSD license, see LICENSE.\n(C) Copyright International Business Machines Corporation 2016.\n\n\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport unittest\nimport numpy as np\nimport pyomo.environ\nimport rbfopt\nimport rbfopt.rbfopt_utils as ru\nimport rbfopt.rbfopt_degree0_models as d0\nfrom rbfopt.rbfopt_settings import RbfoptSettings\n\nclass TestMultiquadricModels(unittest.TestCase):\n \"\"\"Test the rbfopt_degree0_models module using multiquadric RBF.\"\"\"\n\n def setUp(self):\n \"\"\"Generate data to simulate an optimization problem.\"\"\"\n np.random.seed(71294123)\n self.settings = RbfoptSettings(rbf = 'multiquadric')\n self.n = 3\n self.k = 5\n self.var_lower = np.array([i for i in range(self.n)])\n self.var_upper = np.array([i + 10 for i in range(self.n)])\n self.node_pos = np.array([self.var_lower, self.var_upper,\n [1, 2, 3], [9, 5, 8.8], [5.5, 7, 12]])\n self.node_val = np.array([2*i for i in range(self.k)])\n Amat = [[1.0, 17.349351572897476, 1.9999999999999998,\n 12.009995836801943, 12.932517156377562, 1.0],\n [17.349351572897476, 1.0, 15.620499351813308,\n 6.945502141674135, 6.103277807866851, 1.0],\n [1.9999999999999998, 15.620499351813308, 1.0,\n 10.374969879474351, 11.280514172678478, 1.0],\n [12.009995836801943, 6.945502141674135, 10.374969879474351,\n 1.0, 5.243090691567331, 1.0], \n [12.932517156377562, 6.103277807866851, 11.280514172678478,\n 5.243090691567331, 1.0, 1.0], \n [1.0, 1.0, 1.0, 1.0, 1.0, 0.0]]\n self.Amat = np.matrix(Amat)\n self.Amatinv = self.Amat.getI()\n self.rbf_lambda = np.array([1.981366489986409, 0.6262004309283905,\n -1.8477896263093248, -0.10028069928913483,\n -0.65949659531634])\n self.rbf_h = np.array([0.5833631458309435])\n self.integer_vars = np.array([1])\n # -- end function \n\n def test_create_min_rbf_model(self):\n \"\"\"Test the create_min_rbf_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n model = d0.create_min_rbf_model(self.settings, self.n, self.k,\n self.var_lower, self.var_upper,\n self.integer_vars, None, self.node_pos,\n self.rbf_lambda, self.rbf_h)\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n model = d0.create_min_rbf_model(\n self.settings, 10, 20, np.array([0] * 10),np.array([1] * 10),\n np.array([i for i in range(10)]),\n (np.array([0]), np.array([]),\n [(0, 0, np.array([i for i in range(10)]))]),\n np.random.randint(0, 2, size=(20, 10)),\n np.random.uniform(size=20), np.array([-1]))\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n \n\n def test_create_max_one_over_mu_model(self):\n \"\"\"Test the create_max_one_over_mu_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n model = d0.create_max_one_over_mu_model(self.settings, self.n, self.k,\n self.var_lower, self.var_upper,\n self.integer_vars, None,\n self.node_pos, self.Amat)\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n def test_create_max_h_k_model(self):\n \"\"\"Test the create_max_h_k_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n model = d0.create_max_h_k_model(self.settings, self.n, self.k,\n self.var_lower, self.var_upper,\n self.integer_vars, None,\n self.node_pos, self.rbf_lambda,\n self.rbf_h, self.Amat, -1)\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n def test_create_min_bump_model(self):\n \"\"\"Test the create_min_bump_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n Phimat = self.Amat[:self.k, :self.k]\n Pmat = self.Amat[:self.k, self.k:]\n node_err_bounds = np.array([[- 2, + 2] for i in range(self.k)])\n model = d0.create_min_bump_model(self.settings, self.n, self.k, \n Phimat, Pmat, self.node_val,\n node_err_bounds)\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n def test_create_maximin_dist_model(self):\n \"\"\"Test the create_maximin_dist_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n model = d0.create_maximin_dist_model(self.settings, self.n, self.k,\n self.var_lower, self.var_upper, \n self.integer_vars, None,\n self.node_pos)\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n def test_create_min_msrsm_model(self):\n \"\"\"Test the create_min_msrsm_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n model = d0.create_min_msrsm_model(self.settings, self.n, self.k,\n self.var_lower, self.var_upper,\n self.integer_vars, None,\n self.node_pos, self.rbf_lambda,\n self.rbf_h, 0.5, 0.0, 1.0,\n min(self.node_val),\n max(self.node_val))\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n# -- end class\n\nclass TestLinearModels(unittest.TestCase):\n \"\"\"Test the rbfopt_degree0_models module using linear RBF.\"\"\"\n\n def setUp(self):\n \"\"\"Generate data to simulate an optimization problem.\"\"\"\n self.settings = RbfoptSettings(rbf = 'linear')\n self.n = 3\n self.k = 5\n self.var_lower = np.array([i for i in range(self.n)])\n self.var_upper = np.array([i + 10 for i in range(self.n)])\n self.node_pos = np.array([self.var_lower, self.var_upper,\n [1, 2, 3], [9, 5, 8.8], [5.5, 7, 12]])\n self.node_val = np.array([2*i for i in range(self.k)])\n Amat = [[0.0, 17.320508075688775, 1.7320508075688772,\n 11.968291440301744, 12.893796958227627, 1.0],\n [17.320508075688775, 0.0, 15.588457268119896,\n 6.873136110975833, 6.020797289396148, 1.0],\n [1.7320508075688772, 15.588457268119896, 0.0,\n 10.32666451474047, 11.236102527122116, 1.0],\n [11.968291440301744, 6.873136110975833, \n 10.32666451474047, 0.0, 5.146843692983108, 1.0],\n [12.893796958227627, 6.020797289396148,\n 11.236102527122116, 5.146843692983108, 0.0, 1.0], \n [1.0, 1.0, 1.0, 1.0, 1.0, 0.0]]\n self.Amat = np.matrix(Amat)\n self.Amatinv = self.Amat.getI()\n self.rbf_lambda = np.array([1.1704846814048488, 0.5281643269521171,\n -0.9920149389974761, -0.1328847504999134,\n -0.5737493188595765])\n self.rbf_h = np.array([1.5583564301976252])\n self.integer_vars = np.array([1])\n # -- end function \n\n def test_create_min_rbf_model(self):\n \"\"\"Test the create_min_rbf_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n model = d0.create_min_rbf_model(self.settings, self.n, self.k,\n self.var_lower, self.var_upper,\n self.integer_vars, None, self.node_pos,\n self.rbf_lambda, self.rbf_h)\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n model = d0.create_min_rbf_model(\n self.settings, 10, 20, np.array([0] * 10),np.array([1] * 10),\n np.array([i for i in range(10)]),\n (np.array([0]), np.array([]),\n [(0, 0, np.array([i for i in range(10)]))]),\n np.random.randint(0, 2, size=(20, 10)),\n np.random.uniform(size=20), np.array([-1]))\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n\n def test_create_max_one_over_mu_model(self):\n \"\"\"Test the create_max_one_over_mu_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n model = d0.create_max_one_over_mu_model(self.settings, self.n, self.k,\n self.var_lower, self.var_upper,\n self.integer_vars, None,\n self.node_pos, self.Amat)\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n def test_create_max_h_k_model(self):\n \"\"\"Test the create_max_h_k_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n model = d0.create_max_h_k_model(self.settings, self.n, self.k,\n self.var_lower, self.var_upper,\n self.integer_vars, None,\n self.node_pos, self.rbf_lambda,\n self.rbf_h, self.Amat, -1)\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n def test_create_min_bump_model(self):\n \"\"\"Test the create_min_bump_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n Phimat = self.Amat[:self.k, :self.k]\n Pmat = self.Amat[:self.k, self.k:]\n node_err_bounds = np.array([[- 2, + 2] for i in range(self.k)])\n model = d0.create_min_bump_model(self.settings, self.n, self.k, \n Phimat, Pmat, self.node_val,\n node_err_bounds)\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n def test_create_maximin_dist_model(self):\n \"\"\"Test the create_maximin_dist_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n model = d0.create_maximin_dist_model(self.settings, self.n, self.k,\n self.var_lower, self.var_upper, \n self.integer_vars, None,\n self.node_pos)\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n def test_create_min_msrsm_model(self):\n \"\"\"Test the create_min_msrsm_model function.\n\n This test simply checks whether the function returns a valid\n pyomo.ConcreteModel object.\n \"\"\"\n model = d0.create_min_msrsm_model(self.settings, self.n, self.k,\n self.var_lower, self.var_upper,\n self.integer_vars, None,\n self.node_pos, self.rbf_lambda,\n self.rbf_h, 0.5, 0.0, 1.0,\n min(self.node_val),\n max(self.node_val))\n self.assertIsInstance(model, pyomo.environ.ConcreteModel)\n\n# -- end class\n" ]
[ [ "numpy.matrix", "numpy.random.seed", "numpy.random.uniform", "numpy.array", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
derdon/sunpy
[ "619102cd48c73a326c45263369446be9b74366e8", "619102cd48c73a326c45263369446be9b74366e8", "619102cd48c73a326c45263369446be9b74366e8" ]
[ "sunpy/wcs/wcs.py", "sunpy/lightcurve/sources/fermi_gbm.py", "sunpy/lightcurve/sources/logical.py" ]
[ "from __future__ import absolute_import\n\nimport numpy as np\nimport sunpy.sun as sun\n\nimport astropy.units as u\n\nrsun_meters = sun.constants.radius.si.value\n\n__all__ = ['_convert_angle_units', 'convert_pixel_to_data', 'convert_hpc_hg',\n 'convert_data_to_pixel', 'convert_hpc_hcc', 'convert_hcc_hpc',\n 'convert_hcc_hg', 'convert_hg_hcc', 'proj_tan',\n 'convert_hg_hpc', 'convert_to_coord',\n 'get_center']\n\ndef _convert_angle_units(unit='arcsec'):\n \"\"\"Determine the conversion factor between the data units and radians.\"\"\"\n if unit == 'degrees':\n return np.deg2rad(1)\n elif unit == 'arcmin':\n return np.deg2rad(1) / 60.0\n elif unit == 'arcsec':\n return np.deg2rad(1) / (60 * 60.0)\n elif unit == 'mas':\n return np.deg2rad(1) / (60 * 60 * 1000.0)\n else:\n raise ValueError(\"The units specified are either invalid or is not supported at this time.\")\n\ndef convert_pixel_to_data(size, scale, reference_pixel,\n reference_coordinate, x=None, y=None):\n \"\"\"Calculate the data coordinate for particular pixel indices.\n\n Parameters\n ----------\n size : 2d ndarray\n Number of pixels in width and height.\n scale : 2d ndarray\n The size of a pixel (dx,dy) in data coordinates (equivalent to WCS/CDELT)\n reference_pixel : 2d ndarray\n The reference pixel (x,y) at which the reference coordinate is given (equivalent to WCS/CRPIX)\n reference_coordinate : 2d ndarray\n The data coordinate (x, y) as measured at the reference pixel (equivalent to WCS/CRVAL)\n x,y : int or ndarray\n The pixel values at which data coordinates are requested. If none are given,\n returns coordinates for every pixel.\n\n Returns\n -------\n out : ndarray\n The data coordinates at pixel (x,y).\n\n Notes\n -----\n This function assumes a gnomic projection which is correct for a detector at the focus\n of an optic observing the Sun.\n\n Examples\n --------\n\n \"\"\"\n cdelt = np.array(scale)\n crpix = np.array(reference_pixel)\n crval = np.array(reference_coordinate)\n\n # first assume that coord is just [x,y]\n if (x is None) and (y is None):\n x, y = np.meshgrid(np.arange(size[0]), np.arange(size[1]))\n\n # note that crpix[] counts pixels starting at 1\n\n coordx = (x - (crpix[0] - 1)) * cdelt[0] + crval[0]\n coordy = (y - (crpix[1] - 1)) * cdelt[1] + crval[1]\n\n # Correct for Gnomic projection\n coordx, coordy = proj_tan(coordx, coordy)\n\n return coordx, coordy\n\ndef get_center(size, scale, reference_pixel, reference_coordinate):\n \"\"\"Returns the center of the image in data coordinates.\n\n Parameters\n ----------\n size : 2d ndarray\n Number of pixels in width and height.\n scale : 2d ndarray\n The size of a pixel (dx,dy) in data coordinates (equivalent to WCS/CDELT)\n reference_pixel : 2d ndarray\n The reference pixel (x,y) at which the reference coordinate is given (equivalent to WCS/CRPIX)\n reference_coordinate : 2d ndarray\n The data coordinate (x, y) as measured at the reference pixel (equivalent to WCS/CRVAL)\n\n Returns\n -------\n out : ndarray\n The data coordinates\n\n Examples\n --------\n\n \"\"\"\n return scale * (size - 1 * u.pix) / 2. + reference_coordinate - (reference_pixel - 1 * u.pix) * scale\n\ndef convert_data_to_pixel(x, y, scale, reference_pixel, reference_coordinate):\n \"\"\"Calculate the pixel indices for a given data coordinate.\n\n Parameters\n ----------\n x, y : float\n Data coordinate in same units as reference coordinate\n scale : 2d ndarray\n The size of a pixel (dx,dy) in data coordinates (equivalent to WCS/CDELT)\n reference_pixel : 2d ndarray\n The reference pixel (x,y) at which the reference coordinate is given (equivalent to WCS/CRPIX)\n reference_coordinate : 2d ndarray\n The data coordinate (x, y) as measured at the reference pixel (equivalent to WCS/CRVAL)\n\n Returns\n -------\n out : ndarray\n The pixel coordinates (x,y) at that data coordinate.\n\n Examples\n --------\n\n \"\"\"\n\n # TODO: Needs to check what coordinate system the data is given in\n cdelt = np.array(scale)\n crpix = np.array(reference_pixel)\n crval = np.array(reference_coordinate)\n # De-apply any tabular projections.\n # coord = inv_proj_tan(coord)\n\n # note that crpix[] counts pixels starting at 1\n pixelx = (x - crval[0]) / cdelt[0] + (crpix[1] - 1)\n pixely = (y - crval[1]) / cdelt[1] + (crpix[1] - 1)\n\n return pixelx, pixely\n\ndef convert_hpc_hcc(x, y, dsun_meters=None, angle_units='arcsec', z=False):\n \"\"\"Converts from Helioprojective-Cartesian (HPC) coordinates into\n Heliocentric-Cartesian (HCC) coordinates. Returns all three dimensions, x, y, z in\n meters.\n\n Parameters\n ----------\n x, y : float\n Data coordinate in angle units (default is arcsec)\n dsun_meters : float\n Distance from the observer to the Sun in meters. Default is 1 AU.\n angle_units : str\n Units of the data coordinates (e.g. arcsec, arcmin, degrees). Default is arcsec.\n z : Bool\n If true return the z coordinate as well.\n\n Returns\n -------\n out : ndarray\n The data coordinates (x,y,z) in heliocentric cartesian coordinates in meters.\n\n Notes\n -----\n Implements Eq. (15) of Thompson (2006), A&A, 449, 791.\n\n Examples\n --------\n >>> import sunpy.wcs\n >>> sunpy.wcs.convert_hpc_hcc(40.0, 32.0, z=True)\n (28876152.176423457, 23100922.071266972, 694524220.8157959)\n\n \"\"\"\n c = np.array([_convert_angle_units(unit=angle_units),\n _convert_angle_units(unit=angle_units)])\n\n cosx = np.cos(x * c[0])\n sinx = np.sin(x * c[0])\n cosy = np.cos(y * c[1])\n siny = np.sin(y * c[1])\n\n if dsun_meters is None:\n dsun_meters = sun.constants.au.si.value\n elif isinstance(dsun_meters, u.Quantity):\n dsun_meters = dsun_meters.si.value\n\n q = dsun_meters * cosy * cosx\n distance = q ** 2 - dsun_meters ** 2 + rsun_meters ** 2\n # distance[np.where(distance < 0)] = np.sqrt(-1)\n distance = q - np.sqrt(distance)\n\n rx = distance * cosy * sinx\n ry = distance * siny\n rz = dsun_meters - distance * cosy * cosx\n\n\n if np.all(z == True):\n return rx, ry, rz\n else:\n return rx, ry\n\ndef convert_hcc_hpc(x, y, dsun_meters=None, angle_units='arcsec'):\n \"\"\"Convert Heliocentric-Cartesian (HCC) to angular\n Helioprojective-Cartesian (HPC) coordinates (in degrees).\n\n Parameters\n ----------\n x, y : float (meters)\n Data coordinate in meters.\n dsun_meters : float\n Distance from the observer to the Sun in meters. Default is 1 AU.\n angle_units : str\n Units of the data coordinates (e.g. arcsec, arcmin, degrees). Default is arcsec.\n\n Returns\n -------\n out : ndarray\n The data coordinates (x,y) in helioprojective cartesian coordinates in arcsec.\n\n Notes\n -----\n Implements Eq. (16) of Thompson (2006), A&A, 449, 791.\n\n Examples\n --------\n >>> import sunpy.wcs\n >>> sunpy.wcs.convert_hcc_hpc(28748691, 22998953)\n (39.823439773829705, 31.858751644835717)\n\n \"\"\"\n\n # Calculate the z coordinate by assuming that it is on the surface of the Sun\n z = np.sqrt(rsun_meters ** 2 - x ** 2 - y ** 2)\n\n if dsun_meters is None:\n dsun_meters = sun.constants.au.si.value\n elif isinstance(dsun_meters, u.Quantity):\n dsun_meters = dsun_meters.si.value\n\n zeta = dsun_meters - z\n distance = np.sqrt(x**2 + y**2 + zeta**2)\n hpcx = np.rad2deg(np.arctan2(x, zeta))\n hpcy = np.rad2deg(np.arcsin(y / distance))\n\n if angle_units == 'arcsec':\n hpcx = 60 * 60 * hpcx\n hpcy = 60 * 60 * hpcy\n elif angle_units == 'arcmin':\n hpcx = 60 * hpcx\n hpcy = 60 * hpcy\n\n return hpcx, hpcy\n\ndef convert_hcc_hg(x, y, z=None, b0_deg=0, l0_deg=0, radius=False):\n \"\"\"Convert from Heliocentric-Cartesian (HCC) (given in meters) to\n Stonyhurst Heliographic coordinates (HG) given in degrees, with\n radial output in meters.\n\n Parameters\n ----------\n x, y : float (meters)\n Data coordinate in meters.\n z : float (meters)\n Data coordinate in meters. If None, then the z-coordinate is assumed\n to be on the Sun.\n b0_deg : float (degrees)\n Tilt of the solar North rotational axis toward the observer\n (heliographic latitude of the observer). Usually given as SOLAR_B0,\n HGLT_OBS, or CRLT_OBS. Default is 0.\n l0_deg : float (degrees)\n Carrington longitude of central meridian as seen from Earth. Default is 0.\n radius : Bool\n If true, forces the output to return a triple of (lon, lat, r). If\n false, return (lon, lat) only.\n\n Returns\n -------\n out : ndarray (degrees, meters)\n if radius is false, return the data coordinates (lon, lat). If\n radius=True, return the data coordinates (lon, lat, r). The quantities\n (lon, lat) are the heliographic coordinates in degrees. The quantity\n 'r' is the heliographic radius in meters.\n\n Notes\n -----\n Implements Eq. (12) of Thompson (2006), A&A, 449, 791.\n\n Examples\n --------\n >>> import sunpy.wcs\n >>> sunpy.wcs.convert_hcc_hg(230000.0,45000000.0,\n ... z=695508000.0 + 8000000.0, radius=True)\n (0.01873188196651189, 3.6599471896203317, 704945784.41465974)\n \"\"\"\n if z is None:\n z = np.sqrt(rsun_meters**2 - x**2 - y**2)\n\n cosb = np.cos(np.deg2rad(b0_deg))\n sinb = np.sin(np.deg2rad(b0_deg))\n\n hecr = np.sqrt(x**2 + y**2 + z**2)\n hgln = np.arctan2(x, z * cosb - y * sinb) + np.deg2rad(l0_deg)\n hglt = np.arcsin((y * cosb + z * sinb) / hecr)\n\n if radius:\n return np.rad2deg(hgln), np.rad2deg(hglt), hecr\n else:\n return np.rad2deg(hgln), np.rad2deg(hglt)\n\ndef convert_hg_hcc(hglon_deg, hglat_deg, b0_deg=0, l0_deg=0, occultation=False,\n z=False, r=rsun_meters):\n \"\"\"Convert from Stonyhurst Heliographic coordinates (given in degrees) to\n Heliocentric-Cartesian coordinates (given in meters).\n\n Parameters\n ----------\n hglon_deg, hglat_deg : float (degrees)\n Heliographic longitude and latitude in degrees.\n b0_deg : float (degrees)\n Tilt of the solar North rotational axis toward the observer\n (heliographic latitude of the observer). Usually given as SOLAR_B0,\n HGLT_OBS, or CRLT_OBS. Default is 0.\n l0_deg : float (degrees)\n Carrington longitude of central meridian as seen from Earth. Default is 0.\n occultation : Bool\n If true set all points behind the Sun (e.g. not visible) to Nan.\n z : Bool\n If true return the z coordinate as well.\n r : float (meters)\n Heliographic radius\n\n Returns\n -------\n out : ndarray (meters)\n The data coordinates in Heliocentric-Cartesian coordinates.\n\n Notes\n -----\n Implements Eq. (11) of Thompson (2006), A&A, 449, 791, with the default\n assumption that the value 'r' in Eq. (11) is identical to the radius of the\n Sun.\n\n Examples\n --------\n >>> import sunpy.wcs\n >>> sunpy.wcs.convert_hg_hcc(0.01873188196651189, 3.6599471896203317,\n ... r=704945784.41465974, z=True)\n (230000.0, 45000000.0, 703508000.0)\n \"\"\"\n lon = np.deg2rad(hglon_deg)\n lat = np.deg2rad(hglat_deg)\n\n cosb = np.cos(np.deg2rad(b0_deg))\n sinb = np.sin(np.deg2rad(b0_deg))\n\n lon = lon - np.deg2rad(l0_deg)\n\n cosx = np.cos(lon)\n sinx = np.sin(lon)\n cosy = np.cos(lat)\n siny = np.sin(lat)\n\n # Perform the conversion.\n x = r * cosy * sinx\n y = r * (siny * cosb - cosy * cosx * sinb)\n zz = r * (siny * sinb + cosy * cosx * cosb)\n\n if occultation:\n x[zz < 0] = np.nan\n y[zz < 0] = np.nan\n\n if np.all(z == True):\n return x, y, zz\n else:\n return x, y\n\ndef convert_hg_hpc(hglon_deg, hglat_deg, b0_deg=0, l0_deg=0, dsun_meters=None, angle_units='arcsec',\n occultation=False):\n \"\"\"Convert from Heliographic coordinates (HG) to Helioprojective-Cartesian\n (HPC).\n\n Parameters\n ----------\n hglon_deg, hglat_deg : float (degrees)\n Heliographic longitude and latitude in degrees.\n b0_deg : float (degrees)\n Tilt of the solar North rotational axis toward the observer\n (heliographic latitude of the observer). Usually given as SOLAR_B0,\n HGLT_OBS, or CRLT_OBS. Default is 0.\n l0_deg : float (degrees)\n Carrington longitude of central meridian as seen from Earth. Default is 0.\n occultation : Bool\n If true set all points behind the Sun (e.g. not visible) to Nan.\n dsun_meters : float (meters)\n Distance between the observer and the Sun.\n angle_units : str\n\n\n Returns\n -------\n out : ndarray (arcsec)\n The data coordinates (x,y) in Helioprojective-Cartesian coordinates.\n\n Notes\n -----\n Uses equations 11 and 16 in Thompson (2006), A&A, 449, 791-803.\n\n Examples\n --------\n >>> import sunpy.wcs\n >>> sunpy.wcs.convert_hg_hpc(34.0, 45.0, b0_deg=-7.064078, l0_deg=0.0)\n (380.05656560308898, 743.78281283290016)\n \"\"\"\n\n tempx, tempy = convert_hg_hcc(hglon_deg, hglat_deg, b0_deg=b0_deg, l0_deg=l0_deg, occultation=occultation)\n x, y = convert_hcc_hpc(tempx, tempy, dsun_meters=dsun_meters, angle_units=angle_units)\n return x, y\n\ndef convert_hpc_hg(x, y, b0_deg=0, l0_deg=0, dsun_meters=None, angle_units='arcsec'):\n \"\"\"Convert from Helioprojective-Cartesian (HPC) to Heliographic coordinates\n (HG) in degrees.\n\n Parameters\n ----------\n x, y : float ()\n Data coordinate in angle units.\n b0 : float (degrees)\n Tilt of the solar North rotational axis toward the observer\n (heliographic latitude of the observer). Usually given as SOLAR_B0,\n HGLT_OBS, or CRLT_OBS. Default is 0.\n l0 : float (degrees)\n Carrington longitude of central meridian as seen from Earth. Default is 0.\n dsun_meters : float (meters)\n Distance between the observer and the Sun.\n angle_units : str\n Units used for input x and y. Default is arcsec.\n\n Returns\n -------\n out : ndarray (degrees)\n The data coordinates (hglongitude, hglatitude) in Heliographic coordinates.\n\n Notes\n -----\n Uses equations 15 and 12 in Thompson (2006), A&A, 449, 791-803.\n\n Examples\n --------\n >>> import sunpy.wcs\n >>> sunpy.wcs.convert_hpc_hg(382, 748, b0_deg=-7.064078, l0_deg=0.0)\n (34.504653439914669, 45.443143275518182)\n \"\"\"\n tempx, tempy = convert_hpc_hcc(x, y, dsun_meters=dsun_meters, angle_units=angle_units)\n lon, lat = convert_hcc_hg(tempx, tempy, b0_deg=b0_deg, l0_deg=l0_deg)\n return lon, lat\n\ndef proj_tan(x, y, force=False):\n \"\"\"Applies the gnomonic (TAN) projection to intermediate relative\n coordinates. This function is not currently implemented!\"\"\"\n # if pixels are within 3 degrees of the Sun then skip the calculation unless\n # force is True. This applies to all sdo images so this function is just\n # here as a place holder for the future\n # TODO: write proj_tan function\n return x, y\n\ndef convert_to_coord(x, y, from_coord, to_coord, b0_deg=0, l0_deg=0, dsun_meters=None, angle_units='arcsec'):\n \"\"\"Apply a coordinate transform to coordinates. Right now can only do hpc\n to hcc to hg\"\"\"\n\n if (from_coord == 'hcc') and (to_coord == 'hg'):\n rx, ry = convert_hcc_hg(x, y, b0_deg=b0_deg, l0_deg=l0_deg)\n elif (from_coord == 'hpc') and (to_coord == 'hg'):\n rx, ry = convert_hpc_hg(x, y, b0_deg=b0_deg, l0_deg=l0_deg, dsun_meters=dsun_meters, angle_units=angle_units)\n elif (from_coord == 'hg') and (to_coord == 'hcc'):\n rx, ry = convert_hg_hcc(x, y, b0_deg=b0_deg, l0_deg=l0_deg)\n elif (from_coord == 'hcc') and (to_coord == 'hpc'):\n rx, ry = convert_hcc_hpc(x, y, dsun_meters=dsun_meters, angle_units=angle_units)\n elif (from_coord == 'hg') and (to_coord == 'hpc'):\n rx, ry = convert_hg_hpc(x, y, b0_deg=b0_deg, l0_deg=l0_deg, dsun_meters=dsun_meters, angle_units=angle_units)\n elif (from_coord == 'hpc') and (to_coord == 'hcc'):\n rx, ry = convert_hpc_hcc(x, y, dsun_meters=dsun_meters, angle_units=angle_units)\n\n return rx, ry\n", "\"\"\"Provides programs to process and analyse Fermi/GBM lightcurve data.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\n\nimport urlparse\nfrom collections import OrderedDict\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas\n\nfrom sunpy.io.fits import fits\nfrom sunpy.instr import fermi\nfrom sunpy.lightcurve import LightCurve\n\n\n__all__ = ['GBMSummaryLightCurve']\n\n\nclass GBMSummaryLightCurve(LightCurve):\n \"\"\"\n Fermi/GBM Summary Lightcurve.\n\n The Gamma-ray Burst Monitor (GBM) is an instrument aboard Fermi. It is meant\n to detect gamma-ray bursts but also detects solar flares. It consists of\n 12 Sodium Iodide (NaI) scintillation detectors and 2 Bismuth Germanate (BGO)\n scintillation detectors. The NaI detectors cover from a few keV to about 1 MeV\n and provide burst triggers and locations. The BGO detectors cover the energy range from\n about 150 keV to about 30 MeV.\n\n This summary lightcurve makes use of the CSPEC (daily version) data set which\n consists of the counts accumulated every 4.096 seconds in 128 energy channels\n for each of the 14 detectors. Note that the data is re-binned from the\n original 128 into the following 8 pre-determined energy channels.\n\n * 4-15 keV\n * 15-25 keV\n * 25-50 keV\n * 50-100 keV\n * 100-300 keV\n * 300-800 keV\n * 800-2000 keV\n\n Examples\n --------\n >>> from sunpy.lightcurve import GBMSummaryLightCurve\n >>> gbm = GBMSummaryLightCurve.create('2011-06-07') # doctest: +SKIP\n >>> gbm.peek() # doctest: +SKIP\n\n References\n ----------\n * `Fermi Mission Homepage <http://fermi.gsfc.nasa.gov>`_\n * `Fermi GBM Homepage <http://gammaray.nsstc.nasa.gov/gbm/>`_\n * `Fermi Science Support Center <http://fermi.gsfc.nasa.gov/ssc/>`_\n * `Fermi Data Product <http://fermi.gsfc.nasa.gov/ssc/data/access/>`_\n * `GBM Instrument Papers <http://gammaray.msfc.nasa.gov/gbm/publications/>`_\n \"\"\"\n\n def peek(self, **kwargs):\n \"\"\"Plots the GBM lightcurve. An example can be seen below.\n\n .. plot::\n\n from sunpy.lightcurve import GBMSummaryLightCurve\n from sunpy.data.sample import GBM_LIGHTCURVE\n gbm = GBMSummaryLightCurve.create(GBM_LIGHTCURVE)\n gbm.peek()\n\n Parameters\n ----------\n **kwargs : dict\n Any additional plot arguments that should be used\n when plotting.\n \"\"\"\n figure=plt.figure()\n axes = plt.gca()\n data_lab=self.data.columns.values\n\n for d in data_lab:\n axes.plot(self.data.index,self.data[d],label=d)\n\n axes.set_yscale(\"log\")\n axes.set_title('Fermi GBM Summary data ' + self.meta['DETNAM'])\n axes.set_xlabel('Start time: ' + self.data.index[0].strftime('%Y-%m-%d %H:%M:%S UT'))\n axes.set_ylabel('Counts/s/keV')\n axes.legend()\n figure.autofmt_xdate()\n\n plt.show()\n\n @classmethod\n def _get_url_for_date(cls,date, **kwargs):\n \"\"\"Returns the url for Fermi/GBM data for the given date.\"\"\"\n baseurl='http://heasarc.gsfc.nasa.gov/FTP/fermi/data/gbm/daily/'\n #date is a datetime object\n if 'detector' in kwargs:\n det=_parse_detector(kwargs['detector'])\n final_url=urlparse.urljoin(baseurl, date.strftime('%Y/%m/%d/' + 'current/' +\n 'glg_cspec_'+det+'_%y%m%d_v00.pha'))\n else:\n # if user doesn't specify a detector, find the one pointing closest to the Sun.'\n # OR: maybe user should have to specify detector or fail.\n det = cls._get_closest_detector_for_date(date)\n print('No detector specified. Detector with smallest mean angle '\n 'to Sun is ' + str(det))\n print('Using Detector ' + str(det))\n print('For Fermi detector pointing information, use tools in '\n 'sunpy/instr/fermi')\n final_url = urlparse.urljoin(\n baseurl, date.strftime('%Y/%m/%d/' + 'current/' +\n 'glg_cspec_' + det + '_%y%m%d_v00.pha'))\n\n return final_url\n\n @classmethod\n def _get_closest_detector_for_date(cls, date, **kwargs):\n \"\"\"Returns the GBM detector with the smallest mean angle to the Sun\n for the given date\"\"\"\n pointing_file = fermi.download_weekly_pointing_file(date)\n det_angles = fermi.get_detector_sun_angles_for_date(date,pointing_file)\n det_angle_means=[]\n for n in det_angles.keys():\n if not n == 'time':\n det_angle_values=[]\n for angle in det_angles[n]:\n det_angle_values.append(angle.value)\n\n det_angle_means.append(np.mean(det_angle_values))\n\n best_det = 'n' +str(np.argmin(det_angle_means))\n return best_det\n\n\n @staticmethod\n def _parse_fits(filepath):\n \"\"\"Parses GBM CSPEC data files to create summary lightcurves.\"\"\"\n hdulist=fits.open(filepath)\n header=OrderedDict(hdulist[0].header)\n #these GBM files have three FITS extensions.\n #extn1 - this gives the energy range for each of the 128 energy bins\n #extn2 - this contains the data, e.g. counts, exposure time, time of observation\n #extn3 - eclipse times?\n energy_bins=hdulist[1].data\n count_data=hdulist[2].data\n misc=hdulist[3].data\n\n\n #rebin the 128 energy channels into some summary ranges\n #4-15 keV, 15 - 25 keV, 25-50 keV, 50-100 keV, 100-300 keV, 300-800 keV, 800 - 2000 keV\n #put the data in the units of counts/s/keV\n summary_counts=_bin_data_for_summary(energy_bins,count_data)\n\n gbm_times=[]\n #get the time information in datetime format with the correct MET adjustment\n for t in count_data['time']:\n gbm_times.append(fermi.met_to_utc(t))\n column_labels=['4-15 keV','15-25 keV','25-50 keV','50-100 keV','100-300 keV',\n '300-800 keV','800-2000 keV']\n return header, pandas.DataFrame(summary_counts, columns=column_labels, index=gbm_times)\n\n\ndef _bin_data_for_summary(energy_bins,count_data):\n \"\"\"Missing doc string\"\"\"\n #find the indices corresponding to some standard summary energy bins\n ebands=[4,15,25,50,100,300,800,2000]\n indices=[]\n for e in ebands:\n indices.append(np.searchsorted(energy_bins['e_max'],e))\n\n #rebin the 128 energy channels into some summary ranges\n #4-15 keV, 15 - 25 keV, 25-50 keV, 50-100 keV, 100-300 keV, 300-800 keV, 800 - 2000 keV\n #put the data in the units of counts/s/keV\n summary_counts=[]\n for i in range(0,len(count_data['counts'])):\n counts_in_bands=[]\n for j in range(1,len(ebands)):\n counts_in_bands.append(np.sum(count_data['counts'][i][indices[j-1]:indices[j]]) /\n (count_data['exposure'][i] * (energy_bins['e_max'][indices[j]] -\n energy_bins['e_min'][indices[j-1]])))\n\n summary_counts.append(counts_in_bands)\n\n return summary_counts\n\n\ndef _parse_detector(detector):\n \"\"\"Missing Doc String\"\"\"\n oklist=['n0','n1','n2','n3','n4','n5','n6','n7','n8','n9','n10','n11']\n altlist = [str(i) for i in range(12)]\n if detector in oklist:\n return detector\n elif detector in altlist:\n return 'n'+detector\n else:\n raise ValueError('Detector string could not be interpreted')\n", "# -*- coding: utf-8 -*-\n\"\"\"Provides a logical lightcurve. Only two values are allowed - True or False.\nUseful for keeping track of when an event occurred, usually labeled as\n\"True\".\"\"\"\nfrom __future__ import absolute_import\n\nimport numpy as np\n\nfrom sunpy.lightcurve import LightCurve\nfrom scipy.ndimage import label\nfrom sunpy.time import TimeRange\n\n__all__ = ['LogicalLightCurve']\n\n#\n#\n# Logical Lightcurve\n# TODO\n# Change the init to accept a list of TimeRange objects. Durations between the\n# start and end time of each TimeRange object are labeled 'True'.\nclass LogicalLightCurve(LightCurve):\n \"\"\"\n Logical LightCurve with only True and False values.\n\n Examples\n --------\n >>> import sunpy.lightcurve as lightcurve\n >>> import datetime\n >>> base = datetime.datetime.today()\n >>> dates = [base - datetime.timedelta(minutes=x) for x in range(0, 24 * 60)]\n >>> z = [True for x in range(0, 24 * 60)]\n >>> light_curve = lightcurve.LogicalLightCurve.create({\"param1\": z}, index=dates)\n \"\"\"\n\n def complement(self):\n \"\"\"Return the logical complement of the original lightcurve.\"\"\"\n return LogicalLightCurve.create(np.invert(self.data),\n header = self.header)\n\n def times(self):\n \"\"\"Returns a list of time ranges where values are True.\n\n Returns\n -------\n outtr : `~sunpy.time.TimeRange` array\n An array of time ranges\n \"\"\"\n\n labeling = label(self.data)\n timeranges = []\n for i in xrange(1, labeling[1]+1):\n eventindices = (labeling[0] == i).nonzero()\n timeranges.append( TimeRange(self.data.index[ eventindices[0][0] ],\n self.data.index[ eventindices[0][-1] ]) )\n return timeranges\n" ]
[ [ "numpy.sqrt", "numpy.arcsin", "numpy.arange", "numpy.cos", "numpy.rad2deg", "numpy.sin", "numpy.all", "numpy.arctan2", "numpy.deg2rad", "numpy.array" ], [ "matplotlib.pyplot.gca", "pandas.DataFrame", "numpy.argmin", "numpy.mean", "numpy.searchsorted", "matplotlib.pyplot.show", "numpy.sum", "matplotlib.pyplot.figure" ], [ "scipy.ndimage.label", "numpy.invert" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
Julio-Felix/socket-python
[ "93b6ce44dd88c2af49e7702bb16c69bc4f55240d" ]
[ "transmissor.py" ]
[ "import socket\r\nimport numpy as np # pip install numpy\r\n\r\n\r\nsocketUDP = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\ntransmissor = (\"127.0.0.1\", 2020)\r\nreceptor = (\"127.0.0.1\", 3030)\r\nsocketUDP.bind(transmissor)\r\nbuff_size = 10000\r\n\r\nnext_sequence_number = 0\r\n\r\n\r\ndef calculate_checksum(data):\r\n data_sum = np.uint16(0)\r\n for element in data:\r\n data_sum += element\r\n return np.invert(data_sum)\r\n\r\n\r\ndef verify_checksum(data):\r\n data_sum = np.uint16(0)\r\n for element in data:\r\n data_sum += element\r\n return data_sum == 0xFFFF\r\n\r\n\r\ndef udt_send(packet):\r\n socketUDP.sendto(packet.tobytes(), receptor)\r\n\r\n\r\ndef rdt_rcv():\r\n while True:\r\n message, source = socketUDP.recvfrom(buff_size)\r\n if source == receptor:\r\n return np.frombuffer(message, dtype=np.uint16)\r\n\r\n\r\ndef rdt_send(data):\r\n global next_sequence_number\r\n\r\n sndpkt = np.array([], np.uint16)\r\n sndpkt = np.append(sndpkt, np.uint16(next_sequence_number))\r\n sndpkt = np.append(sndpkt, np.uint16(0)) # checksum\r\n sndpkt = np.concatenate((sndpkt, data))\r\n\r\n sndpkt[1] = calculate_checksum(sndpkt)\r\n udt_send(sndpkt)\r\n\r\n while True:\r\n rcvpkt = rdt_rcv()\r\n is_corrupt = not verify_checksum(rcvpkt)\r\n is_ack = rcvpkt[2] == True\r\n is_nack = rcvpkt[2] == False\r\n print(\"Dados recebidos \", rcvpkt)\r\n print(\"Está corrompido? \", is_corrupt)\r\n print(\"Está Nack? \", is_nack)\r\n print(\"Está Ack? \", is_ack)\r\n print(\"Seq Num? \", next_sequence_number)\r\n \r\n if is_corrupt or is_nack:\r\n udt_send(sndpkt)\r\n if is_ack and not is_corrupt:\r\n break\r\n\r\n if next_sequence_number == 0:\r\n next_sequence_number = 1\r\n else:\r\n next_sequence_number = 0\r\n\r\n\r\nif __name__ == \"__main__\":\r\n i = 1\r\n while i <= 3:\r\n dados = np.random.randint(5, size=10, dtype=np.uint16)\r\n print(f'Dados a serem enviados {dados}')\r\n rdt_send(dados)\r\n i+=1\r\n" ]
[ [ "numpy.invert", "numpy.concatenate", "numpy.frombuffer", "numpy.uint16", "numpy.array", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
UKPLab/linspector
[ "46a7cca6ad34dc673feb47c4d452f1248d5e635b" ]
[ "intrinsic/evaluation/classifiers/embeddings/sentence_embedding.py" ]
[ "import codecs\nfrom collections import defaultdict\n\nimport torch\nfrom allennlp.common import Params\nfrom allennlp.data import Vocabulary\nfrom allennlp.modules.token_embedders.token_embedder import TokenEmbedder\n\n\[email protected](\"sentence_embedding\")\nclass SentenceEmbedding(TokenEmbedder):\n \"\"\"\n Embedder for contextual embeddings. which reads a file of the format 'sentence TAB index TAB vector'.\n \"\"\"\n\n def read_file(self, path):\n self.embs = defaultdict(lambda: defaultdict())\n with codecs.open(path, encoding='utf-8') as f:\n for line in f:\n # Read sentence, index and word vector\n sp = line.split(\"\\t\")\n vector_str = sp[2]\n vector = []\n for n in vector_str.split(\" \"):\n try:\n vector.append(float(n))\n except ValueError:\n break\n index = int(sp[1])\n sentence = sp[0]\n\n # Save vector in a dict\n self.embs[sentence][index] = vector\n\n def get_output_dim(self) -> int:\n return self.output_dim\n\n def forward(self, # pylint: disable=arguments-differ\n inputs: torch.Tensor,\n word_inputs: torch.Tensor = None) -> torch.Tensor:\n \"\"\"\n\n :param inputs: list of sentences (sentence = list of token indices)\n :param word_inputs: not used\n :return: tensor which contains a list of embedded sentences (every sentence is a list of word vectors)\n \"\"\"\n if self.output_dim is None or self.output_dim == 0:\n raise NotImplementedError\n\n # Get tokens from token indices\n max_sentences_length = len(inputs[0].tolist())\n sentences = []\n for i in inputs:\n token_list = []\n for j in i:\n if j.item() != 0:\n token = self.vocab.get_token_from_index(j.item())\n token_list += [token]\n sentences += [token_list]\n\n sentence_emb = []\n\n # Read the embeddings from the dict\n for sentence_list in sentences:\n sentence = \" \".join(sentence_list[0:-1])\n index = int(sentence_list[-1])\n\n try:\n word_embedding = self.embs[sentence][index]\n except KeyError:\n print(\"KEY ERROR \" + sentence + \" INDEX \" + str(index))\n word_embedding = [0] * self.output_dim\n\n vector_list = []\n\n # Add zeros to the returning tensor for all tokens without vectors. AllenNLP wants an embedding for every token\n if index != 0:\n for i in range(0, index):\n vector_list += [[0] * self.output_dim]\n vector_list += [word_embedding]\n\n for i in range(0, max_sentences_length - index - 1):\n vector_list += [[0] * self.output_dim]\n\n sentence_emb += [vector_list]\n\n # Create tensor\n device = inputs.device\n # print(sentence_emb)\n tensor = torch.tensor(sentence_emb, device=device)\n\n return tensor\n\n @classmethod\n def from_params(cls, vocab: Vocabulary, params: Params) -> 'SentenceEmbedding':\n cls.vocab = vocab\n embedding_dim = params[\"embedding_dim\"]\n pretrained_file = params[\"pretrained_vector_file\"]\n return cls(pretrained_file, embedding_dim)\n\n def __init__(self, file, vector_size) -> None:\n super().__init__()\n self.embs = {}\n self.output_dim = vector_size\n self.read_file(file)\n" ]
[ [ "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
spatchcock/models
[ "b97eef75d080c903cc6280b1d5955033d14bcf84", "b97eef75d080c903cc6280b1d5955033d14bcf84" ]
[ "normal.py", "foraminifera/foraminiferal_test_accumulation_time_evolution.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 24 22:11:12 2014\n\n@author: spatchcock\n\"\"\"\n\nimport math\nimport numpy\nimport matplotlib.pyplot as plt\n\n# Plot the normal distribution function as well as its first and second derivatives\n# \n# Use the numpy.vectorize function to handle array manupulation\n\n# http://statistics.about.com/od/Mathstat/a/Inflection-Points-Of-The-Probability-Density-Function-Of-A-Normal-Distribution.htm\n\ndef norm(x, mean, sd):\n var = sd**2\n pi = 3.1415926\n denom = (2*pi*var)**.5\n num = math.exp(-(x-mean)**2/(2*var))\n return num/denom\n \ndef norm_first_deriv(x, mean, std):\n return -(x-mean)*norm(x, mean, std)/std**2\n\n \ndef norm_second_deriv(x, mean, std):\n return -norm(x, mean, std)/std**2 + (x-mean)**2*norm(x, mean, std)/std**4\n \n \nv_norm = numpy.vectorize(norm)\nv_norm_first_deriv = numpy.vectorize(norm_first_deriv)\nv_norm_second_deriv = numpy.vectorize(norm_second_deriv)\n\n\nmean = 0\nstd = 1.9\na = numpy.arange(-5,5,0.1)\nb = v_norm(a, mean, std)\nc = v_norm_first_deriv(a, mean, std)\nd = v_norm_second_deriv(a, mean, std)\n\nfig = plt.figure()\n\nnorm = fig.add_subplot(111, xlim=(-6, 6), ylim=(-1, 1))\nnorm.grid()\n\nline, = norm.plot([], [], lw=3, color='r')\nline.set_data(a,b)\n\nfirst = fig.add_subplot(111, xlim=(-6, 6), ylim=(-1, 1))\nline, = first.plot([], [], lw=3, color='b')\nline.set_data(a,c)\n\nsecond = fig.add_subplot(111, xlim=(-6, 6), ylim=(-1, 1))\nline, = second.plot([], [], lw=3, color='g')\nline.set_data(a,d)\n\n\nstddev = fig.add_subplot(111, xlim=(-6, 6), ylim=(-1, 1))\nline, = stddev.plot([], [], lw=3, color='y')\nline.set_data([-std, -std],[-1,1])\n\n\nconstant = fig.add_subplot(111, xlim=(-6, 6), ylim=(-1, 1))\nline, = constant.plot([], [], lw=3, color='b')\nline.set_data([-6, 6],[0.1,0.1])", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 15 22:52:11 2014\n\n@author: spatchcock\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n# Advection - diffusion - Decay - production\n#\n# Differential equation\n#\n# dC/dt = D(d^2C/dx^2) - w(dC/dx) - uC + Ra(x)\n# \n# Difference equation\n#\n# (C{x,t} = C{x,t-1} + dt * [D*(C{x+1,t-1} - 2C{x, t-1} + C{x-1,t-1})/dx^2 - w(C{x+1,t-1} - C{x-1,t-1})/2dx - u*C{x,t-1} + Ra{x}]\n#\n# Initial conditions\n#\n# C(x,0) = R{x}\n#\n\n# %% DEFINE NUMERICAL SCHEME\n\nmax_depth = 30.0 # maximum depth of domain of interest\nN_x = 101 # number of nodes across 1D domain\ndx = max_depth/N_x # cell size (regular/uniform)\nsigma = 0.1 # CFL sigma value. Scales the timestep according to the depth step. \n # Ensures timestep is sufficiently smaller that distance step to provide\n # stability (although this also depends on the sedimentation rate)\ndt = sigma*dx # time step\n\n\n# %% SET UP PLACEHOLDER ARRAYS FOR PARAMETERS AND VARIABLES\n\n# Each parameter and variable will be represented by a value at each node across\n# the 1D domain.\n\n# Dependent and independent variables (C, x)\nx = np.linspace(0.0,max_depth,N_x) # depth\nC = np.zeros(N_x) # concentration\n\n# Parameters - Each parameter can, in principle vary with depth, x. Initialise arrays \n# for each, although we can set a constant value for all x if required.\nRa = np.zeros(N_x) # production (product of standing crop, a, and reproduction rate, R)\nD = np.zeros(N_x) # diffusion (mixing rate)\nu = np.zeros(N_x) # taphonomic decay rate\nw = np.zeros(N_x) # advection speed (sedimentation rate)\nCu = np.zeros(N_x) # placeholder for memoizing previous timestep concentrations\n\n\n# %% DEFINE DEPTH-DEPENDENT FUNCTION FOR TAPHONOMIC DECAY\n\n# It is likely that taphonomic decay decreases with depth so most circumstances probably\n# require a function for the taphonomic decay rate that decrease through the domain. In\n# some circumstances, considering decay rates to be constant across some or all of the domain\n# might be appropriate. Three choices are presented below. Comment/uncomment as required\n# or set u[] to another appropriate function of depth.\n\n# Constant function\n\n# This simply sets the same decay rate for all values of x.\n\n# u[:] = 0.005\n\n\n# Decreasing function\n\n# This drescribes taphonic decay rate as decreasing exponential with depth frmom\n# some maximum value at the surface. This is the simplest decreasing function that\n# asymptotes with depth.\n\nu_0 = 0.005 # value at surface, i.e. x = 0\nu_attenuation = 0.5 # rate at which decay rate decreases with depth\nu[0:] = u_0 * np.exp(-u_attenuation*x[0:]) # exponentially decreasing taphonomic decay rate\n\n\n# Step function\n\n# This sets the decay rate as a constant across some limited upper interval of the\n# sediment. This resembles the commonly invoked concept of the Taphonomically Active Zone\n# (the \"TAZ\"). Of course, any other more complicated step function could be defined in a \n# similar way.\n\n# max_depth_decay = 10.0 # Maximum depth of decay\n# max_x_decay = int(max_depth_decay/max_depth*N_x) # Index of maximum decay depth\n# u[0:max_x_decay] = 0.005 # Step function\n\n\n# %% DEFINE DEPTH DEPENDENT FUNCTION FOR SEDIMENTATION RATE\n\n# In principle, sedimentation rate may have varied during the time in which a given\n# sediment interval has accumulated. For now, we'll just assume that it is constant.\n\n# Constant function\nw[:] = 0.6 \n\n\n# %% DEFINE DEPTH DEPENDENT FUNCTION FOR MIXING/BIOTURBATION\n\n# constant in upper mixed zone, zero below\nmax_depth_mixing = 15.0\nmax_x_mixing = int(max_depth_mixing/max_depth*N_x)\nD[0:max_x_mixing] = 0.2399 \n\n\n# %% DEFINE DEPTH-DEPENDENT FUNCTION FOR TEST PRODUCTION\n\nRa_0 = 30.0\nRa_attenuation = 0.05\nRa_peak_depth = 2\nRa_gamma = 4\nmax_x_Ra = int(Ra_peak_depth/max_depth*N_x)\n\n#Ra[0:max_x_Ra] = Ra_0 # constant over interval\n#Ra[0:] = Ra_0 * np.exp(-Ra_attenuation*x[0:]) # exponential decrease\nRa[0:] = Ra_0 * np.exp(-Ra_attenuation*(x[0:]-Ra_peak_depth)**Ra_gamma) # subsurface peak, normally distributed\n\n\n# %% IMPLEMENT DISCRETIZED EQUATION AS INVOKABLE TIMESTEP FUNCTION\n\ndef step():\n # memoize last timestep\n Cu[:] = C[:]\n \n # boundary, surficial layer (x=0)\n C[0] = dt * Ra[0]\n \n # Interior points \n C[1:-1] = Cu[1:-1] + dt * (D[1:-1]*(Cu[2:] - 2.0*Cu[1:-1] + Cu[0:-2])/dx**2.0 - w[1:-1]*(Cu[2:] - Cu[0:-2])/2.0*dx - u[1:-1]*Cu[1:-1] + Ra[1:-1])\n \n # boundary, bottomost layer (x=max_depth)\n C[-1] = C[-2] \n\n\n# %% SET UP PLOTS\n\nfig = plt.figure()\n\nRa_plot = fig.add_subplot(151, ylim=(max_depth, 0), xlim=(0, max(Ra)*1.5))\nRa_line, = Ra_plot.plot([], [], lw=3)\nRa_plot.grid()\nRa_plot.axes.get_xaxis().set_ticks([0.0, max(Ra)])\nRa_plot.set_xlabel('Ra')\n\nD_plot = fig.add_subplot(152, ylim=(max_depth, 0), xlim=(0, max(D)*1.5))\nD_line, = D_plot.plot([], [], lw=3)\nD_plot.grid()\nD_plot.axes.get_yaxis().set_ticklabels([])\nD_plot.axes.get_xaxis().set_ticks([0.0, max(D)])\nD_plot.set_xlabel('D')\n\nw_plot = fig.add_subplot(153, ylim=(max_depth, 0), xlim=(0, max(w)*1.5))\nw_line, = w_plot.plot([], [], lw=3)\nw_plot.grid()\nw_plot.axes.get_yaxis().set_ticklabels([])\nw_plot.axes.get_xaxis().set_ticks([0.0, max(w)])\nw_plot.set_xlabel('w')\n\nu_plot = fig.add_subplot(154, ylim=(max_depth, 0), xlim=(0, max(u)*1.5))\nu_line, = u_plot.plot([], [], lw=3)\nu_plot.grid()\nu_plot.axes.get_yaxis().set_ticklabels([])\nu_plot.axes.get_xaxis().set_ticks([0.0, max(u)])\nu_plot.set_xlabel('u')\n\nC_plot = fig.add_subplot(155, ylim=(max_depth, 0), xlim=(0, 1000))\nC_line, = C_plot.plot([], [], lw=3)\nstep_text = C_plot.text(0.2, 0.02, '', transform=C_plot.transAxes)\nC_plot.grid()\nC_plot.axes.get_yaxis().set_ticklabels([])\nC_plot.set_xlabel('C')\n\nplt.subplots_adjust(wspace=0.1)\n\n# %% SET ANIMATION\n\n# Clear frame on each interation\ndef init():\n # Reset each line\n Ra_line.set_data([], [])\n D_line.set_data([], [])\n w_line.set_data([], [])\n u_line.set_data([], [])\n C_line.set_data([], [])\n \n return Ra_line,D_line,w_line,u_line,C_line, \n\n\n# Invoke model timestep and replot data on each iteration\ndef animate(i):\n # Iterate model\n step()\n \n # Update each line\n Ra_line.set_data(Ra, x)\n D_line.set_data(D, x)\n w_line.set_data(w, x)\n u_line.set_data(u, x)\n C_line.set_data(C, x)\n\n step_text.set_text('iter: %.1f' % i)\n\n return Ra_line,D_line,w_line,u_line,C_line,step_text\n\n\n# %% RUN ANIMATION\nani = animation.FuncAnimation(fig, animate, frames=10000000, interval=1, blit=True, init_func=init)\n\n\n\n " ]
[ [ "numpy.arange", "numpy.vectorize", "matplotlib.pyplot.figure" ], [ "numpy.linspace", "matplotlib.animation.FuncAnimation", "matplotlib.pyplot.subplots_adjust", "numpy.exp", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
magood/MarkeplacePredict
[ "f74ea035d6b861b9594ec2b91b38adad18e1bb00" ]
[ "eda.py" ]
[ "# Exploratory data analysis\n# py 3, using \"mplace\" conda env.\n\nimport numpy as np\nimport pandas as pd\nimport pickle, itertools, os\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nfrom yahoofinancials import YahooFinancials as YF\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.ensemble import RandomForestClassifier\nimport utils\n\nmusic_fn = 'music.csv'\nsp_ticker = '^GSPC'\ndow_ticker = '^DJI'\nnasdaq_ticker = '^IXIC'\nall_tickers = [sp_ticker, dow_ticker, nasdaq_ticker]\nnice_markers = ('o', 'v', '^', '<', '>', '1', 's', 'P', '*', '+', 'X', 'D', '_', '|')\nrf_outpath = os.path.join('.', 'output', 'RF')\nrf_feature_imp_fn = 'rf_feature_imp.csv'\n\n\ndef download(ticker, start_date='2018-02-15', end_date='2015-08-19'):\n yf = YF(ticker)\n # this worked but looks like the dates are reversed?\n # hst = yf.get_historical_price_data('2015-08-19', '2018-02-15', 'daily')\n hst = yf.get_historical_price_data(start_date, end_date, 'daily')\n pricelist = hst[ticker]['prices']\n # example: [{'date': 1439991000, 'high': 2096.169921875, 'low': 2070.530029296875, 'open': 2095.68994140625, 'close': 2079.610107421875, 'volume': 3512920000, 'adjclose': 2079.610107421875, 'formatted_date': '2015-08-19'}]\n df = pd.DataFrame(pricelist)\n df['date'] = pd.to_datetime(df['formatted_date'])\n df.set_index('date', inplace=True)\n df.drop('formatted_date', axis=1, inplace=True)\n return df\n\n\ndef get_ticker_data(ticker, start_date, end_date):\n try:\n df = pd.read_pickle(f\"./{ticker}.pkl\")\n return df\n except FileNotFoundError:\n df = download(ticker, start_date, end_date)\n df.to_pickle(f\"./{ticker}.pkl\")\n return df\n\n\ndef augment_financials(df):\n df['swing'] = df['high'] - df['low']\n df['return'] = 0.\n df['return'] = (df['adjclose'] / df['adjclose'].shift(1)) - 1\n\n\ndef get_index_music(ticker):\n sp = get_ticker_data(ticker)\n augment_financials(sp)\n df = pd.DataFrame(index=sp.index)\n mdf = pd.read_csv(music_fn)\n mdf['date'] = pd.to_datetime(mdf['Date'])\n mdf.set_index('date', inplace=True)\n mdf.drop('Date', axis=1, inplace=True)\n mdf = mdf[mdf['Music'].isnull() == False]\n df = sp.join(mdf, how='inner')\n return df\n\n\ndef get_music_df():\n mdf = pd.read_csv(music_fn)\n mdf['date'] = pd.to_datetime(mdf['Date'])\n mdf.set_index('date', inplace=True)\n mdf.drop('Date', axis=1, inplace=True)\n mdf = mdf[mdf['Music'].isnull() == False]\n return mdf\n\n\ndef build_index_df(tickers, mindate, maxdate):\n df = None\n for ticker in tickers:\n idx_df = get_ticker_data(ticker, mindate, maxdate)\n augment_financials(idx_df)\n # rename columns with index postfix\n idx_df = idx_df.add_suffix('_' + ticker)\n if df is None:\n df = pd.DataFrame(index=idx_df.index)\n df = idx_df.join(df, how='inner')\n # Now possibly do any inter-index calculations.\n # What is the difference in return across indices from highest to lowest?\n df['max_return'] = df[['return_^GSPC', 'return_^IXIC', 'return_^DJI']].max(axis=1)\n df['min_return'] = df[['return_^GSPC', 'return_^IXIC', 'return_^DJI']].min(axis=1)\n df['return_diff'] = df['max_return'] - df['min_return']\n df = df.dropna()\n return df\n\n\ndef get_all_df(tickers):\n mdf = get_music_df()\n mindate = mdf.index.min().strftime('%Y-%m-%d')\n maxdate = mdf.index.max().strftime('%Y-%m-%d')\n df = build_index_df(tickers, mindate, maxdate)\n df = df.join(mdf, how='inner')\n return df\n\n\ndef scatter_markers(df, xcol, ycol):\n # ensure we have markers for each music selection, looping if necessary.\n music = list(df.Music.unique())\n # ensure we have markers for each music selection, looping if necessary.\n infmarkers = itertools.cycle(nice_markers)\n markers = list(itertools.islice(infmarkers, len(music)))\n for tune, symbol in zip(music, markers):\n df_tune = df[df['Music'] == tune]\n x = df_tune[xcol]\n y = df_tune[ycol]\n plt.scatter(x, y, marker=symbol, label=tune)\n plt.legend()\n plt.xlabel(xcol)\n plt.ylabel(ycol)\n plt.title(\"Marketplace Music Selection\")\n\n\ndef rf_feature_imp(X, y, columns):\n np.random.seed(0)\n X = StandardScaler().fit_transform(X)\n dims = [2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 35, 40, 45] # grid of dimensions to select\n dims = [d for d in dims if d < X.shape[1]]\n # Always include the actual number of features, too, as a baseline\n if X.shape[1] not in dims:\n dims += [X.shape[1]]\n\n rfc = RandomForestClassifier(n_estimators=100, class_weight='balanced', random_state=0, n_jobs=8)\n fs = rfc.fit(X, y).feature_importances_\n fi = dict(zip(columns, list(fs)))\n ordered_fi = [(k, fi[k]) for k in sorted(fi, key=fi.get, reverse=True)]\n ordered_fi_df = pd.DataFrame(ordered_fi)\n ordered_fi_df.columns = ['feature','importance']\n ordered_fi_df.to_csv(os.path.join(rf_outpath, rf_feature_imp_fn))\n return rfc\n\n\ndef plot_feature_imp(columns):\n plt.close()\n df = pd.read_csv(os.path.join(rf_outpath, rf_feature_imp_fn))\n ax = df.plot.bar(x='feature', y='importance', rot=0, figsize=(40, 10))\n plt.ylabel('Importance')\n plt.title('Feature Importances by Randomized Forest')\n plt.savefig(os.path.join(rf_outpath, 'full_feature_imp.png'), bbox_inches='tight')\n\n\ndef plot_correlations(df):\n plt.close()\n f = plt.figure(figsize=(25, 25))\n df2 = pd.get_dummies(df)\n sns.heatmap(df2.corr(), cmap=sns.diverging_palette(220, 10, as_cmap=True), center=0, linewidths=.5, square=True)\n plt.yticks(rotation=0)\n plt.xticks(rotation=90)\n plt.title('Correlation Matrix', fontsize=16)\n plt.savefig(os.path.join(rf_outpath, 'corr_matrix.png'), bbox_inches='tight')\n return f\n\n\ndef drop_useless_dim_prefixes(df, remove_field_prefixes):\n \"\"\"\n Drops dimensions/columns from the df that do not appear to be useful.\n Provide a list of prefixes for useless columns (remove_field_prefixes).\n \"\"\"\n droplist = []\n for t in all_tickers:\n for pfx in remove_field_prefixes:\n droplist.append(f'{pfx}_{t}')\n df.drop(droplist, axis=1, inplace=True)\n return df\n\n\nif __name__ == '__main__':\n df = get_all_df(all_tickers)\n # should get rid of a bunch of stuff we don't think will be predictive before doing a bunch of plots because it's confusing.\n target_field = 'Music'\n columns = df.drop(target_field, 1).columns\n X = df.drop(target_field, 1).copy().values\n y_categorical = df[target_field].copy().values\n le = LabelEncoder()\n le.fit(y_categorical)\n y = le.transform(y_categorical)\n # Scikit learn really wants floats or the scaler will complain\n X = X.astype(np.float64)\n rfc = rf_feature_imp(X, y, columns)\n plot_feature_imp(columns)\n\n # Look at some correlations here...\n cf = plot_correlations(df)\n \n # Items that are correlated to the music are:\n # Volume, return, swing, return diff, max return, min return.\n # We can see that there are many highly-correlated features, so we can remove many of those.\n # High, low, open, close, adjclose all worthless.\n remove_field_prefixes = ['adjclose', 'close', 'high', 'low', 'open']\n df = drop_useless_dim_prefixes(df, remove_field_prefixes)\n\n df.to_csv(utils.ix.ds_csv_file_name)\n\n # print(df.describe())\n # scatter_markers(df, 'return_^GSPC', 'swing_^GSPC')\n # df.groupby('Music').hist()\n # plt.show()\n # some other nice data vis examples: https://machinelearningmastery.com/quick-and-dirty-data-analysis-with-pandas/\n # Also, conda install -c conda-forge pandas-profiling, then import pandas_profiling, df.profile_report()" ]
[ [ "matplotlib.pyplot.legend", "pandas.to_datetime", "pandas.read_csv", "pandas.read_pickle", "matplotlib.pyplot.title", "numpy.random.seed", "sklearn.ensemble.RandomForestClassifier", "matplotlib.pyplot.figure", "matplotlib.pyplot.scatter", "pandas.DataFrame", "sklearn.preprocessing.LabelEncoder", "sklearn.preprocessing.StandardScaler", "matplotlib.pyplot.close", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.yticks", "pandas.get_dummies", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
ongchinkiat/CarND-Capstone
[ "abd768450825a03975f2b7b87f1379285357347b" ]
[ "ros/src/visual/visual.py" ]
[ "#!/usr/bin/env python\n\nimport numpy as np\nimport rospy\nimport matplotlib\nmatplotlib.use('Qt5Agg')\n\nfrom matplotlib import pyplot as plt\nfrom geometry_msgs.msg import PoseStamped\nfrom geometry_msgs.msg import TwistStamped\nfrom sensor_msgs.msg import Image\nfrom styx_msgs.msg import TrafficLightArray, TrafficLight\nfrom styx_msgs.msg import Lane\nfrom scipy.spatial import KDTree\nfrom cv_bridge import CvBridge\nimport cv2\n\nimport math\n\n'''\nThis node is for visualizing data\n\npip install -U matplotlib\n\napt-get install x11-apps\napt-get install gnome-calculator\napt-get install qtbase5-dev\napt-get install python-tk\napt-get install python-gtk2-dev\nexport DISPLAY=:0\n'''\n\nLOOKAHEAD_WPS = 200 # Number of waypoints we will publish. You can change this number\n\n\nclass Visual(object):\n def __init__(self):\n rospy.init_node('visual')\n\n self.pose = None\n self.base_waypoints = None\n self.waypoints_2d = None\n self.waypoint_tree = None\n self.fig = plt.figure(figsize=(15, 15))\n plt.ion()\n plt.show()\n self.table1 = self.fig.add_subplot(2,2,1)\n # x1,x2,y1,y2\n self.table1.axis([0, 2500, 1000, 3100])\n self.table2 = self.fig.add_subplot(2,2,2)\n self.table2.axis([0, 2500, 1000, 3100])\n self.table3 = self.fig.add_subplot(2,2,3)\n self.table4 = self.fig.add_subplot(2,2,4)\n self.waypoints_x = []\n self.waypoints_y = []\n self.pose_x = []\n self.pose_y = []\n self.recent_pose_x = []\n self.recent_pose_y = []\n self.vel_x = []\n self.vel_y = []\n self.lights_x = []\n self.lights_y = []\n self.final_waypoints_x = []\n self.final_waypoints_y = []\n\n self.has_image = False\n self.cv_image = None\n self.bridge = CvBridge()\n self.lights = None\n\t\t\n # prevent refreshing graph while updating pose array\n self.updatelock = 0\n self.start_seconds = rospy.get_time()\n self.last_image_time = rospy.get_time()\n self.image_update = 0\n self.pose_update = 0\n self.vel_update = 0\n\n rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)\n sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)\n sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)\n\n rospy.Subscriber('/final_waypoints', Lane, self.final_waypoints_cb)\n\n # TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below\n\n # TODO: Add other member variables you need below\n self.loop()\n\n def loop(self):\n rate = rospy.Rate(2) # can go as low as 30Hz\n while not rospy.is_shutdown():\n if self.pose and self.base_waypoints and self.table1:\n #table1.clear()\n self.table1.plot(self.waypoints_x, self.waypoints_y)\n if self.updatelock == 0:\n self.table1.plot(self.pose_x, self.pose_y)\n if self.updatelock == 0:\n self.table1.plot(self.lights_x, self.lights_y)\n if self.updatelock == 0:\n self.table4.plot(self.vel_x, self.vel_y)\n if (self.updatelock == 0) and self.has_image and (self.image_update == 1):\n self.table3.cla()\n self.table3.imshow(self.cv_image)\n self.image_update = 0\n\n if self.updatelock == 0:\n self.table2.cla()\n if self.updatelock == 0:\n self.table2.plot(self.recent_pose_x, self.recent_pose_y)\n if self.updatelock == 0:\n self.table2.plot(self.final_waypoints_x, self.final_waypoints_y)\n\n plt.draw()\n plt.pause(0.001)\n #print(\"loop\")\n rate.sleep()\n\n def get_closest_waypoint_idx(self):\n x = self.pose.pose.position.x\n y = self.pose.pose.position.y\n closest_idx = self.waypoint_tree.query([x, y], 1)[1]\n\n # check if closest is ahead or behind vehilcle\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx - 1]\n\n # equation for hyperplane through closest_coords\n cl_vect = np.array(closest_coord)\n prev_vect = np.array(prev_coord)\n pos_vect = np.array([x, y])\n\n val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)\n\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n return closest_idx\n\n def publish_waypoints(self):\n if self.waypoint_tree:\n closest_wp_idx = self.get_closest_waypoint_idx()\n farthest_wp_idx = closest_wp_idx + LOOKAHEAD_WPS\n\n lane = Lane()\n lane.header = self.base_waypoints.header\n lane.waypoints = self.base_waypoints.waypoints[closest_wp_idx:farthest_wp_idx]\n self.final_waypoints_pub.publish(lane)\n\n def traffic_cb(self, msg):\n self.lights = msg.lights\n self.updatelock = 1\n self.lights_x = []\n self.lights_y = []\t\t\n for light in self.lights:\n self.lights_x.append(light.pose.pose.position.x);\n self.lights_y.append(light.pose.pose.position.y);\n self.updatelock = 0\n\t\t\t\n def image_cb(self, msg):\n \"\"\"\n Args:\n msg (Image): image from car-mounted camera\n\n \"\"\"\n now_time = rospy.get_time()\n if (self.image_update == 0) and (now_time > (self.last_image_time + 1)):\n self.last_image_time = now_time\n self.has_image = True\n self.updatelock = 1\n self.cv_image = cv2.cvtColor(self.bridge.imgmsg_to_cv2(msg, \"bgr8\"), cv2.COLOR_BGR2RGB)\n\n self.cv_image = self.cv_image.astype(int)\n self.cv_image = cv2.normalize(self.cv_image, None, 255,0, cv2.NORM_MINMAX, cv2.CV_8UC1)\n self.updatelock = 0\n self.image_update = 1\n\t\t\n\t\t\t\n def pose_cb(self, msg):\n self.pose = msg # around 50 Hz\n self.updatelock = 1\n self.pose_x.append(msg.pose.position.x)\n self.pose_y.append(msg.pose.position.y)\n if len(self.pose_x) > 21:\n self.recent_pose_x = self.pose_x[-20:]\n self.recent_pose_y = self.pose_y[-20:]\n else:\n self.recent_pose_x = self.pose_x\n self.recent_pose_y = self.pose_y\n self.updatelock = 0\n #print(\"new pose\")\n\n def velocity_cb(self, msg):\n self.updatelock = 1\n timenow = rospy.get_time() - self.start_seconds\n self.vel_x.append(timenow)\n self.vel_y.append(msg.twist.linear.x)\n self.updatelock = 0\n #print(\"new vel\")\n\t\t\n def waypoints_cb(self, waypoints):\n # load base waypoints\n print(\"new waypoints\")\n self.base_waypoints = waypoints\n if not self.waypoints_2d:\n # convert waypoints to (x,y) list\n self.waypoints_2d = [\n [\n waypoint.pose.pose.position.x,\n waypoint.pose.pose.position.y\n ] for waypoint in waypoints.waypoints\n ]\n maxdist = 0\n prev_x = -1\n prev_y = -1\n for waypoint in waypoints.waypoints:\n self.waypoints_x.append(waypoint.pose.pose.position.x);\n self.waypoints_y.append(waypoint.pose.pose.position.y);\n if prev_x >= 0:\n x = waypoint.pose.pose.position.x - prev_x\n y = waypoint.pose.pose.position.y - prev_y\n\n dist = math.sqrt((x*x) + (y*y))\n if dist > maxdist:\n maxdist = dist\n prev_x = waypoint.pose.pose.position.x\n prev_y = waypoint.pose.pose.position.y\n # build KDTree\n self.waypoint_tree = KDTree(self.waypoints_2d)\n # for Highway map, maxdist = 2.6486\n print(\"Waypoints max distance between points = \",maxdist)\n\n def final_waypoints_cb(self, waypoints):\n self.updatelock = 1\n self.final_waypoints_x = []\n self.final_waypoints_y = []\n for waypoint in waypoints.waypoints:\n self.final_waypoints_x.append(waypoint.pose.pose.position.x);\n self.final_waypoints_y.append(waypoint.pose.pose.position.y);\n self.updatelock = 0\n\n def obstacle_cb(self, msg):\n # TODO: Callback for /obstacle_waypoint message. We will implement it later\n pass\n\n def get_waypoint_velocity(self, waypoint):\n return waypoint.twist.twist.linear.x\n\n def set_waypoint_velocity(self, waypoints, waypoint, velocity):\n waypoints[waypoint].twist.twist.linear.x = velocity\n\n def distance(self, waypoints, wp1, wp2):\n dist = 0\n dl = lambda a, b: math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)\n for i in range(wp1, wp2 + 1):\n dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)\n wp1 = i\n return dist\n\n\nif __name__ == '__main__':\n try:\n Visual()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start visual node.')\n" ]
[ [ "numpy.dot", "matplotlib.use", "matplotlib.pyplot.show", "matplotlib.pyplot.draw", "scipy.spatial.KDTree", "numpy.array", "matplotlib.pyplot.pause", "matplotlib.pyplot.ion", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
aphedges/pytorch-lightning
[ "160e7e128909abc8489261287a562777cf1ada02" ]
[ "pytorch_lightning/loops/utilities.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nfrom typing import Any, Dict, Generator, Iterator, Mapping, Optional, Sequence, Tuple\n\nimport torch\nfrom torch.optim import Optimizer\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.plugins import ParallelPlugin\nfrom pytorch_lightning.trainer.connectors.logger_connector.result import ResultCollection\nfrom pytorch_lightning.utilities.apply_func import apply_to_collection\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.fetching import AbstractDataFetcher, DataLoaderIterDataFetcher\nfrom pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature\nfrom pytorch_lightning.utilities.types import STEP_OUTPUT\n\n\ndef check_finite_loss(loss: Optional[torch.Tensor]) -> None:\n \"\"\"Checks for finite loss value.\n\n Args:\n loss: the loss value to check to be finite\n \"\"\"\n if loss is not None and not torch.isfinite(loss).all():\n raise ValueError(f\"The loss returned in `training_step` is {loss}.\")\n\n\ndef _check_training_step_output(model: \"pl.LightningModule\", training_step_output: STEP_OUTPUT) -> None:\n \"\"\"Sanity checks that training produced a valid output and optimizer step has already been called in manual\n optimization.\n\n Args:\n model: a reference to the trainer\n training_step_output: the output of the training step (before wrapping in an AttributeDict)\n \"\"\"\n if isinstance(training_step_output, torch.Tensor) and not model.automatic_optimization:\n if training_step_output.grad_fn is None:\n # TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...\n raise MisconfigurationException(\"In manual optimization, `training_step` should not return a Tensor\")\n elif model.automatic_optimization:\n if not any(\n (\n isinstance(training_step_output, torch.Tensor),\n (isinstance(training_step_output, Mapping) and \"loss\" in training_step_output),\n training_step_output is None,\n )\n ):\n raise MisconfigurationException(\n \"In automatic optimization, `training_step` must either return a Tensor, \"\n \"a dict with key 'loss' or None (where the step will be skipped).\"\n )\n\n\ndef _process_training_step_output(\n trainer: \"pl.Trainer\", training_step_output: STEP_OUTPUT\n) -> Tuple[Optional[ResultCollection], Optional[Any]]:\n \"\"\"Adds the :param:`training_step_output` to the trainer's results.\n\n Args:\n trainer: a reference to the trainer\n training_step_output: the output of the training step (before wrapping into an AttributeDict)\n\n Returns:\n the updated results (None if the training_step's output was None) and hiddens exract from the results\n \"\"\"\n if training_step_output is None:\n return None, None\n\n results = trainer._results\n\n loss = None\n hiddens = None\n\n # handle dict return\n if isinstance(training_step_output, dict):\n # this should not modify the `training_step_output`, as the user could be using it after `training_step_end`\n loss = training_step_output.get(\"loss\")\n hiddens = training_step_output.get(\"hiddens\")\n # detach hiddens to avoid `RuntimeError: Trying to backward through the graph a second time`\n hiddens = apply_to_collection(hiddens, torch.Tensor, lambda t: t.detach())\n # use the setter instead of `dict.update` because it calls `detach` on the tensor items\n results.extra = {k: v for k, v in training_step_output.items() if k not in (\"loss\", \"hiddens\")}\n\n # handle scalar return\n elif isinstance(training_step_output, torch.Tensor):\n loss = training_step_output\n\n if trainer.terminate_on_nan:\n check_finite_loss(loss)\n\n # the loss shouldn't be moved to cpu.\n if trainer.move_metrics_to_cpu:\n results.cpu()\n\n # map to results under the hood\n results.minimize = loss\n\n return results, hiddens\n\n\ndef _build_training_step_kwargs(\n lightning_module: \"pl.LightningModule\",\n optimizers: Sequence[Optimizer],\n batch: Any,\n batch_idx: int,\n opt_idx: Optional[int],\n hiddens: Optional[Any],\n) -> Dict[str, Any]:\n \"\"\"Builds the keyword arguments for training_step.\n\n Args:\n lightning_module: the LightningModule with a `training_step` hook implementation\n optimizers: the list of optimizers from the Trainer\n batch: the batch to train on\n batch_idx: the index of the current batch\n opt_idx: the index of the current optimizer\n hiddens: the hidden state of the previous RNN iteration\n\n Returns:\n the keyword arguments for the training step\n \"\"\"\n # enable not needing to add opt_idx to training_step\n step_kwargs = OrderedDict([(\"batch\", batch)])\n\n training_step_fx = getattr(lightning_module, \"training_step\")\n\n if is_param_in_hook_signature(training_step_fx, \"batch_idx\", min_args=2):\n step_kwargs[\"batch_idx\"] = batch_idx\n\n if len(optimizers) > 1:\n has_opt_idx_in_train_step = is_param_in_hook_signature(training_step_fx, \"optimizer_idx\")\n if has_opt_idx_in_train_step:\n if not lightning_module.automatic_optimization:\n raise ValueError(\n \"Your `LightningModule.training_step` signature contains an `optimizer_idx` argument but\"\n \" in manual optimization optimizers must be handled by the user. Remove the optimizer_idx\"\n \" argument or set `self.automatic_optimization = True`.\"\n )\n step_kwargs[\"optimizer_idx\"] = opt_idx\n elif not has_opt_idx_in_train_step and lightning_module.automatic_optimization:\n raise ValueError(\n f\"Your LightningModule defines {len(optimizers)} optimizers but\"\n \" `training_step` is missing the `optimizer_idx` argument.\"\n )\n\n # pass hiddens if using tbptt\n if lightning_module.truncated_bptt_steps > 0:\n step_kwargs[\"hiddens\"] = hiddens\n\n return step_kwargs\n\n\ndef _prepare_dataloader_iter(data_fetcher: AbstractDataFetcher, batch_idx: int) -> Iterator:\n \"\"\"Attach the dataloader.\"\"\"\n if not isinstance(data_fetcher, DataLoaderIterDataFetcher):\n # restore iteration\n dataloader_iter = enumerate(data_fetcher, batch_idx)\n else:\n dataloader_iter = iter(data_fetcher)\n return dataloader_iter\n\n\n@contextmanager\ndef _block_parallel_sync_behavior(trainer: \"pl.Trainer\", block: bool = True) -> Generator[None, None, None]:\n \"\"\"Blocks synchronization in :class:`~pytorch_lightning.plugins.training_type.parallel.ParallelPlugin`. This is\n useful for example when when accumulating gradients to reduce communication when it is not needed.\n\n Args:\n trainer: the trainer instance with a reference to a training type plugin\n block: whether the context manager is enabled or not\n\n Returns:\n context manager with sync behaviour off\n \"\"\"\n if isinstance(trainer.training_type_plugin, ParallelPlugin) and block:\n with trainer.training_type_plugin.block_backward_sync():\n yield None\n else:\n yield None\n" ]
[ [ "torch.isfinite" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fengtony686/workspace
[ "9e382a02439cb510df5fb2c278ae4e206d830336" ]
[ "MachineLearning/MINST/CNN.py" ]
[ "import os\nimport torch\nimport torch.nn as nn\nimport torch.utils.data as Data\nimport torchvision\n\n\nEPOCH = 1\nBATCH_SIZE = 50\nLR = 0.001\nDOWNLOAD_MNIST = False\n\n\nif not(os.path.exists('./mnist/')) or not os.listdir('./mnist/'):\n DOWNLOAD_MNIST = True\n\n\ntrain_data = torchvision.datasets.MNIST(\n root='./mnist/',\n train=True,\n transform=torchvision.transforms.ToTensor(),\n download=DOWNLOAD_MNIST,\n)\n\n\nprint(train_data.data.size())\nprint(train_data.targets.size())\n\n\ntrain_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True) # batch shape:(50,1,28,28)\n\n\ntest_data = torchvision.datasets.MNIST(root='./mnist/', train=False)\ntest_x = torch.unsqueeze(test_data.data, dim=1).type(torch.FloatTensor)[:2000]/255. # shape:(2000,1,28,28)\ntest_y = test_data.targets[:2000]\n\n\nclass CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(1, 16, 5, 1, 2),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2),\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(16, 32, 5, 1, 2),\n nn.ReLU(),\n nn.MaxPool2d(2),\n )\n self.out = nn.Linear(32*7*7, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = x.view(x.size(0), -1)\n out = self.out(x)\n return out, x\n\n\ncnn = CNN()\nprint(cnn)\noptimizer = torch.optim.Adam(cnn.parameters(), lr=LR)\nloss_func = nn.CrossEntropyLoss()\n\n\nfor epoch in range(EPOCH):\n for step, (b_x, b_y) in enumerate(train_loader):\n output = cnn(b_x)[0]\n loss = loss_func(output, b_y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if step % 50 == 0:\n test_output, last_layer = cnn(test_x)\n pred_y = torch.max(test_output, 1)[1].data.numpy()\n accuracy = float((pred_y == test_y.data.numpy()).astype(int).sum())/float(test_y.size(0))\n print('Epoch: ', epoch, '| Training Loss: %.4f' % loss.data.numpy(), '| Test Accuracy: %.2f' % accuracy)\n\n\ntest_output, _ = cnn(test_x[:20])\npred_y = torch.max(test_output, 1)[1].data.numpy()\nprint(pred_y, 'Prediction Number')\nprint(test_y[:20].numpy(), 'Real Number')" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.max", "torch.nn.Conv2d", "torch.utils.data.DataLoader", "torch.unsqueeze", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kundajelab/bias_correction
[ "521678ea8739473f793b0ce85e22e622d13df6fe" ]
[ "genomewide_gc/get_gc_content.py" ]
[ "import pandas as pd\nimport pysam\nimport argparse\ndef parse_args():\n parser=argparse.ArgumentParser(description=\"get gc content from a bed file\")\n parser.add_argument(\"--input_bed\")\n parser.add_argument(\"--ref_fasta\")\n parser.add_argument(\"--split_chroms\",action=\"store_true\",default=False)\n parser.add_argument(\"--out_prefix\")\n parser.add_argument(\"--center_summit\",action=\"store_true\",default=False)\n parser.add_argument(\"--flank_size\",type=int,default=500)\n parser.add_argument(\"--store_seq\",action=\"store_true\",default=False) \n return parser.parse_args()\ndef get_line_narrowPeak(row,args):\n chrom=row[0]\n start=row[1]\n end=row[2] \n if args.center_summit==True:\n summit=start+row[9]\n start=summit-args.flank_size\n end=summit+args.flank_size\n return chrom,start,end\n\ndef get_line_hdf5(index):\n chrom=index[0]\n start=index[1]\n end=index[2]\n return chrom, start,end \n\n\ndef main():\n args=parse_args()\n ref=pysam.FastaFile(args.ref_fasta)\n outputs=dict()\n outf=None\n is_narrowPeak=True\n if args.input_bed.endswith('.hdf5'):\n #load as hdf5\n is_narrowPeak=False\n data=pd.read_hdf(args.input_bed,header=0,sep='\\t')\n else:\n #load csv\n data=pd.read_csv(args.input_bed,header=0,sep='\\t')\n print(\"loaded bed file\")\n num_rows=str(data.shape[0])\n print(\"num_rows:\"+num_rows) \n cur_row=0 \n for index,row in data.iterrows():\n if cur_row%1000==0:\n print(str(cur_row)+\"/\"+num_rows)\n cur_row+=1\n if is_narrowPeak is True:\n chrom,start,end=get_line_narrowPeak(row,args)\n else:\n chrom,start,end=get_line_hdf5(index)\n #extract fasta\n seq=ref.fetch(chrom,start,end).upper()\n g=seq.count('G')\n c=seq.count('C')\n gc=g+c\n gc_fract=round(gc/len(seq),2)\n if args.split_chroms is True:\n if chrom not in outputs:\n outputs[chrom]=open(args.out_prefix+'.'+chrom,'w')\n print(\"created:\"+str(args.out_prefix+'.'+chrom))\n outputs[chrom].write(chrom+'\\t'+str(start)+'\\t'+str(end)+'\\t'+str(gc_fract))\n if args.store_seq is True:\n outputs[chrom].write('\\t'+seq+'\\n')\n else:\n outputs[chrom].write('\\n')\n else:\n if outf is None:\n outf=open(args.out_prefix,'w')\n print(\"created:\"+str(args.out_prefix))\n outf.write(chrom+'\\t'+str(start)+'\\t'+str(end)+'\\t'+str(gc_fract))\n if args.store_seq is True:\n outf.write('\\t'+seq+'\\n')\n else:\n outf.write('\\n')\n #close files\n if args.split_chroms is True:\n for chrom in outputs:\n outputs[chrom].close()\n else:\n outf.close()\n \nif __name__==\"__main__\":\n main()\n" ]
[ [ "pandas.read_hdf", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
repos-cl/akshare
[ "94fa42fb095ac4bfa5d8d58673b805d36cc0128e", "94fa42fb095ac4bfa5d8d58673b805d36cc0128e" ]
[ "akshare/index/index_eri.py", "akshare/energy/energy_carbon.py" ]
[ "# -*- coding:utf-8 -*-\n# /usr/bin/env python\n\"\"\"\nDate: 2021/5/9 16:16\nDesc: 浙江省排污权交易指数\nhttps://zs.zjpwq.net/\n\"\"\"\nimport requests\nimport pandas as pd\n\n\ndef index_eri() -> pd.DataFrame:\n \"\"\"\n 浙江省排污权交易指数\n https://zs.zjpwq.net\n :return: 浙江省排污权交易指数\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"https://zs.zjpwq.net/zhe-jiang-pwq-webapi/indexData\"\n params = {\n \"indexId\": \"1\",\n \"areaCode\": \"330000\",\n \"cycle\": \"MONTH\",\n \"structCode\": \"01\",\n }\n r = requests.get(url, params=params)\n data_json = r.json()\n temp_df = pd.DataFrame(data_json[\"data\"])\n del temp_df[\"id\"]\n del temp_df[\"indexId\"]\n del temp_df[\"stageId\"]\n del temp_df[\"structCode\"]\n del temp_df[\"areaCode\"]\n del temp_df[\"rawValue\"]\n temp_df.columns = [\n \"value\",\n \"date\",\n ]\n temp_df = temp_df[\n [\n \"date\",\n \"value\",\n ]\n ]\n big_df = temp_df\n url = \"https://zs.zjpwq.net/zhe-jiang-pwq-webapi/rawValueStatistics\"\n params = {\n \"orderBy\": \"-date\",\n \"pageSize\": \"1000\",\n \"quotaType\": \"0\",\n \"index\": \"TOTAL_QUANTITY\",\n \"areaCode\": \"330000\",\n }\n r = requests.get(url, params=params)\n data_json = r.json()\n temp_df = pd.DataFrame(data_json[\"data\"])\n del temp_df[\"id\"]\n del temp_df[\"quotaType\"]\n del temp_df[\"index\"]\n temp_df.columns = [\n \"date\",\n \"value\",\n \"update\",\n ]\n big_df = big_df.merge(temp_df, on=\"date\")\n big_df.columns = [\n \"日期\",\n \"交易指数\",\n \"成交量\",\n \"更新时间\",\n ]\n return big_df\n\n\nif __name__ == \"__main__\":\n index_eri_df = index_eri()\n print(index_eri_df)\n", "# -*- coding:utf-8 -*-\n# /usr/bin/env python\n\"\"\"\nDate: 2021/4/22 16:05\nDesc: 碳排放交易\n北京市碳排放权电子交易平台-北京市碳排放权公开交易行情\nhttps://www.bjets.com.cn/article/jyxx/\n\n深圳碳排放交易所-国内碳情\nhttp://www.cerx.cn/dailynewsCN/index.htm\n\n深圳碳排放交易所-国际碳情\nhttp://www.cerx.cn/dailynewsOuter/index.htm\n\n湖北碳排放权交易中心-现货交易数据-配额-每日概况\nhttp://www.cerx.cn/dailynewsOuter/index.htm\n\n广州碳排放权交易中心-行情信息\nhttp://www.cnemission.com/article/hqxx/\n\"\"\"\nimport re\n\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nfrom tqdm import tqdm\n\n\ndef energy_carbon_bj() -> pd.DataFrame:\n \"\"\"\n 北京市碳排放权电子交易平台-北京市碳排放权公开交易行情\n https://www.bjets.com.cn/article/jyxx/\n :return: 北京市碳排放权公开交易行情\n :rtype: pandas.DataFrame\n \"\"\"\n url = 'https://www.bjets.com.cn/article/jyxx/'\n r = requests.get(url)\n soup = BeautifulSoup(r.text, \"lxml\")\n total_page = soup.find('table').find('script').string.split('=')[-1].strip().strip(';').strip('\"')\n temp_df = pd.DataFrame()\n for i in tqdm(range(1, int(total_page)+1), desc=\"Please wait for a moment\"):\n if i == 1:\n i = \"\"\n url = f\"https://www.bjets.com.cn/article/jyxx/?{i}\"\n res = requests.get(url)\n res.encoding = \"utf-8\"\n df = pd.read_html(res.text)[0]\n temp_df = temp_df.append(df, ignore_index=True)\n temp_df.columns = [\"日期\", \"成交量\", \"成交均价\", \"成交额\"]\n return temp_df\n\n\ndef energy_carbon_sz():\n \"\"\"\n 深圳碳排放交易所-国内碳情\n http://www.cerx.cn/dailynewsCN/index.htm\n :return: 国内碳情每日行情数据\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://www.cerx.cn/dailynewsCN/index.htm\"\n r = requests.get(url)\n soup = BeautifulSoup(r.text, \"lxml\")\n page_num = int(soup.find(attrs={\"class\": \"pagebar\"}).find_all(\"option\")[-1].text)\n big_df = pd.read_html(r.text, header=0)[0]\n for page in tqdm(range(2, page_num+1), desc=\"Please wait for a moment\"):\n url = f\"http://www.cerx.cn/dailynewsCN/index_{page}.htm\"\n r = requests.get(url)\n temp_df = pd.read_html(r.text, header=0)[0]\n big_df = big_df.append(temp_df, ignore_index=True)\n return big_df\n\n\ndef energy_carbon_eu():\n \"\"\"\n 深圳碳排放交易所-国际碳情\n http://www.cerx.cn/dailynewsOuter/index.htm\n :return: 国际碳情每日行情数据\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://www.cerx.cn/dailynewsOuter/index.htm\"\n r = requests.get(url)\n soup = BeautifulSoup(r.text, \"lxml\")\n page_num = int(soup.find(attrs={\"class\": \"pagebar\"}).find_all(\"option\")[-1].text)\n big_df = pd.read_html(r.text, header=0)[0]\n for page in tqdm(range(2, page_num+1), desc=\"Please wait for a moment\"):\n url = f\"http://www.cerx.cn/dailynewsOuter/index_{page}.htm\"\n r = requests.get(url)\n temp_df = pd.read_html(r.text, header=0)[0]\n big_df = big_df.append(temp_df, ignore_index=True)\n return big_df\n\n\ndef energy_carbon_hb():\n \"\"\"\n 湖北碳排放权交易中心-现货交易数据-配额-每日概况\n http://www.hbets.cn/list/13.html?page=42\n :return: 现货交易数据-配额-每日概况行情数据\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://www.hbets.cn/list/13.html\"\n r = requests.get(url)\n soup = BeautifulSoup(r.text, \"lxml\")\n page_string = soup.find('div', attrs={'class': 'page'}).find_all('span')[-1].text\n page_num = int(re.findall(r'\\d+', page_string)[-1])\n columns = [item.text for item in soup.find('ul', attrs={\"class\": \"title\"}).find_all('li')]\n big_df = pd.DataFrame()\n for page in tqdm(range(1, page_num+1), desc=\"Please wait for a moment\"):\n url = f\"http://www.hbets.cn/list/13.html\"\n params = {\n 'page': page\n }\n r = requests.get(url, params=params)\n soup = BeautifulSoup(r.text, \"lxml\")\n page_node = [item for item in soup.find(attrs={\"class\": \"future_table\"}).find_all(attrs={\"class\": \"cont\"})]\n temp_list = []\n for item in page_node:\n temp_inner_list = []\n for inner_item in item.find_all(\"li\"):\n temp_inner_list.append(inner_item.text)\n temp_list.append(temp_inner_list)\n temp_df = pd.DataFrame(temp_list)\n big_df = big_df.append(temp_df, ignore_index=True)\n big_df.columns = columns\n return big_df\n\n\ndef energy_carbon_gz():\n \"\"\"\n 广州碳排放权交易中心-行情信息\n http://www.cnemission.com/article/hqxx/\n :return: 行情信息数据\n :rtype: pandas.DataFrame\n \"\"\"\n url = \"http://ets.cnemission.com/carbon/portalIndex/markethistory\"\n params = {\n \"Top\": \"1\",\n \"beginTime\": \"2010-01-01\",\n \"endTime\": \"2021-09-12\",\n }\n r = requests.get(url, params=params)\n temp_df = pd.read_html(r.text, header=0)[1]\n return temp_df\n\n\nif __name__ == '__main__':\n energy_carbon_bj_df = energy_carbon_bj()\n print(energy_carbon_bj_df)\n\n energy_carbon_sz_df = energy_carbon_sz()\n print(energy_carbon_sz_df)\n\n energy_carbon_eu_df = energy_carbon_eu()\n print(energy_carbon_eu_df)\n\n energy_carbon_hb_df = energy_carbon_hb()\n print(energy_carbon_hb_df)\n\n energy_carbon_gz_df = energy_carbon_gz()\n print(energy_carbon_gz_df)\n" ]
[ [ "pandas.DataFrame" ], [ "pandas.DataFrame", "pandas.read_html" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
abc4pwm/abc4pwm
[ "29c9e833b076f8ce7e3e206c5ae8b560eff02b9e" ]
[ "build/lib/abc4pwm/clustering.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 23 14:43:24 2019\n\n@author: omerali\n\"\"\"\n\nimport numpy as np\nimport os, shutil\nfrom pathlib import Path\nfrom time import gmtime, strftime\nimport json\nfrom glob import glob\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom distutils.dir_util import copy_tree\nimport multiprocessing as mp\nfrom abc4pwm.convert_count_to_pwm import motif_weight2p\nfrom abc4pwm.similarity_score import compute_similarity_score4alignment\nfrom abc4pwm.energy_to_p import read_energy_matrix\nfrom abc4pwm.non_dbd_clustering import non_dbd_ClusteringPwm\n\nfrom functools import partial\n\n\nfrom sklearn.cluster import AffinityPropagation\n\nclass ClusteringPwm():\n\n def __init__(self, input_folder_path, output_folder_path, in_dbd = True, minimum_pwms_in_dbd = 5, max_no_processors = 5):\n \"\"\"\n\n :param input_folder_path: this should point to folder which contain DBD folders\n :param output_folder_path: this should point to folder which contain clustered DBD folders\n :param in_dbd: this should be true if you want clustering inside dbd. Otherwise false.\n :param minimum_pwms_in_dbd: a dbd having less than this number of pwm will not be clustered\n :param max_no_processors: for parallel processing, select maximum number of processors. Default is 5\n\n \"\"\"\n print(\"\\nTask: Clustering of TFs based on their DNA Binding Domain\")\n\n self.output_folder_path = output_folder_path\n leaf_folder = Path(self.output_folder_path)\n out_dir = leaf_folder.parent\n\n if not os.path.exists(output_folder_path):\n os.makedirs(output_folder_path, exist_ok=True)\n\n if not in_dbd:\n clusteringClassobj = non_dbd_ClusteringPwm(input_folder_path, output_folder_path)\n exit()\n self.empty_dir(output_folder_path)\n self.minimum_pwms_in_dbd = minimum_pwms_in_dbd\n self.max_processors = max_no_processors\n self.total_clusters = 0\n self.unclustered_dbds = 0\n self.unclustered_pwms = 0\n\n copy_tree(input_folder_path,output_folder_path)\n input_folder_path = output_folder_path\n\n dbds = sorted(os.listdir(input_folder_path))\n for ind, i in enumerate(dbds):\n if i.startswith('.DS'):\n dbds.pop(ind)\n for i in dbds:\n path_to_dbd = os.path.join(input_folder_path, i)\n self.drive_clustering(self, path_to_dbd)\n\n\n path_to_text_reports = os.path.join(out_dir, 'reports_in_text/')\n\n if os.path.exists(os.path.join(path_to_text_reports,\"clusterSummary.txt\")):\n os.remove(os.path.join(path_to_text_reports,\"clusterSummary.txt\"))\n os.makedirs(path_to_text_reports, exist_ok=True)\n with open(os.path.join(path_to_text_reports,\"clusterSummary.txt\"),'w') as cs:\n cs.writelines(\"Clustering Time: \" + str(strftime(\"%a, %d %b %Y %H:%M:%S +0000\", gmtime()))+ \"\\n\"\n \"Clustering Technique: Affinity Propagation Clustering \\n\"\n \"Minimum PWMs in a DBD considered for Clustering: \" + str(self.minimum_pwms_in_dbd) + \"\\n\\n\"\n \"Total DBDs : \" + str(len(dbds)) + \"\\n\"\n \"Total Clusters made : \" + str(self.total_clusters) + \"\\n\"\n \"UnClustered DBDs due to less than threshold : \" + str(self.unclustered_dbds) + \"\\n\"\n \"UnClustered pwms in DBDs less than threshold: \" + str(self.unclustered_pwms) + \"\\n\")\n\n\n print(\"Task completed. \\n \"\n \"Please see clusters in : \", input_folder_path, \"<dbd_folder>/out \\n\"\n \"Clustering summary in data/out/reports_in_text\")\n\n\n @staticmethod\n def drive_clustering(self, inputdir):\n #this function prepares a similiarity matrix in parallel and send to a function for clustering\n #rename in files according to their clusters and put in respective cluster folder\n #also call representative motif function at the end\n\n leaf_folder = Path(self.output_folder_path)\n out_dir = leaf_folder.parent\n\n pwms = [i.split('/')[-1] for i in glob(os.path.join(inputdir, \"*.mlp\"))]\n\n if len(pwms) < int(self.minimum_pwms_in_dbd):\n # for x in pwms:\n # shutil.move(os.path.join(inputdir,x),dst_for_bad_pwms)\n\n\n\n self.unclustered_dbds+=1\n self.unclustered_pwms+=len(pwms)\n\n return 1\n else:\n n_processors = int(np.ceil(len(pwms)/30))\n if n_processors > self.max_processors:\n n_processors = self.max_processors\n pool = mp.Pool(processes=n_processors)\n\n start = 0\n processor_capacity = int(np.ceil(len(pwms) / n_processors))\n end = processor_capacity\n chunks_of_pwms = []\n for i in range(n_processors):\n chunks_of_pwms.append(pwms[start:end])\n start = end\n end = end + processor_capacity\n calculate_similarity = partial(self.calculate_similarity_matrix, inputdir=inputdir)\n chunks_similarity_matrix = pool.map(calculate_similarity, chunks_of_pwms)\n similarity_matrix = np.concatenate((chunks_similarity_matrix), axis=0)\n\n clusters_labels = self.clustering(similarity_matrix)\n\n\n\n self.renaming_mlp(self, inputdir, clusters_labels,pwms)\n self.total_clusters += len(np.unique(clusters_labels))\n\n self.folderizeclusters(os.path.join(inputdir,'out/'))\n\n @staticmethod\n def folderizeclusters(folder_path):\n # folder_path = \"test_out/\"\n\n mlpfiles = [i.split('/')[-1] for i in glob(os.path.join(folder_path, \"*.mlp\"))]\n\n for pwm in mlpfiles:\n folder_name = pwm.split('_')[0]\n\n new_path = os.path.join(folder_path, folder_name)\n if not os.path.exists(new_path):\n os.makedirs(new_path)\n\n old_mlp_path = os.path.join(folder_path, pwm)\n new_mlp_path = os.path.join(new_path, pwm)\n shutil.move(old_mlp_path, new_mlp_path)\n\n @staticmethod\n def calculate_similarity_matrix(pwms_full, inputdir):\n #this function extracts matrices from files, convert them to probablity and return similarity matrix\n\n\n\n full_pwms = [i.split('/')[-1] for i in glob(os.path.join(inputdir, \"*.mlp\"))]\n\n df = np.zeros((len(pwms_full), len(full_pwms)))\n for index1, i in enumerate(pwms_full):\n matrix1, matrix_string1, maximum_feq1, total_maximum1, info1 = read_energy_matrix(os.path.join(inputdir, i))\n matrix1 = motif_weight2p(matrix1)\n\n for index2, j in enumerate(full_pwms):\n matrix2, matrix_string2, maximum_feq2, total_maximum2, info2 = read_energy_matrix(os.path.join(inputdir, j))\n matrix2 = motif_weight2p(matrix2)\n\n df[index1, index2] = compute_similarity_score4alignment(matrix1, matrix2)\n\n\n similarityMatrix = np.asarray(df)\n return similarityMatrix\n\n\n\n @staticmethod\n def read_similarity_matrix():\n #funciton for reading similariy matrix stores in a json file\n with open('similarityMatrix.json') as f:\n similarityMatrix = np.array(json.load(f))\n return similarityMatrix\n\n @staticmethod\n def clustering(similarityMatrix):\n #function for clustering algorithm\n clusters = AffinityPropagation(damping=0.5, max_iter=400, convergence_iter=30, preference=None, affinity='precomputed', verbose=False).fit(similarityMatrix)\n return clusters.labels_\n\n\n @staticmethod\n def renaming_mlp(self, inputdir, clusters,pwms):\n #this function add cluster number to every file\n\n\n if not os.path.exists(os.path.join(inputdir,'out/')):\n os.mkdir(os.path.join(inputdir,'out/'))\n outputdir = os.path.join(inputdir,'out/')\n self.empty_dir(outputdir)\n\n for ind, i in enumerate(pwms):\n src = os.path.join(inputdir,i) # renaming\n dst = str(clusters[ind]) + '_' + str(i)\n dst = os.path.join(outputdir,dst)\n\n os.rename(src, dst)\n\n @staticmethod\n def empty_dir(folder):\n #function for deleting files from a folder\n\n for filename in os.listdir(folder):\n file_path = os.path.join(folder, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print(('Failed to delete %s. Reason: %s' % (file_path, e)))\n\n\n @staticmethod\n def moving_files_input(src_copy, dest_copy):\n #moving files to input folder from original files for the next run\n\n\n if not os.path.exists(dest_copy):\n os.mkdir(dest_copy,mode=0o777)\n\n pwms = os.listdir(src_copy)\n\n for indexpwm, i in enumerate(pwms):\n if i.startswith('.'):\n pwms.pop(indexpwm)\n\n for file_name in pwms:\n full_file_name = os.path.join(src_copy, file_name)\n if os.path.isfile(full_file_name):\n shutil.move(full_file_name, dest_copy)\n\n\n\nif __name__ == \"__main__\":\n clusteringClassobj = ClusteringPwm('../data/out/classification_out', '../data/out/clustering_out/')\n # clusteringClassobj = ClusteringPwm('../data/in/in_pwms', '../data/out/non_dbd_clustering_out/', False)" ]
[ [ "numpy.asarray", "sklearn.cluster.AffinityPropagation", "numpy.concatenate", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ezavesky/metadata-flatten-extractor
[ "5e81713424970087492b7835195235575f0024e2" ]
[ "contentai_metadata_flatten/parsers/yolo3.py" ]
[ "#! python\n# ===============LICENSE_START=======================================================\n# metadata-flatten-extractor Apache-2.0\n# ===================================================================================\n# Copyright (C) 2017-2020 AT&T Intellectual Property. All rights reserved.\n# ===================================================================================\n# This software file is distributed by AT&T \n# under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# This file is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ===============LICENSE_END=========================================================\n# -*- coding: utf-8 -*-\n\nfrom os import path\nimport json\nfrom pandas import DataFrame\n\nfrom contentai_metadata_flatten.parsers import Flatten\n\nclass Parser(Flatten):\n def __init__(self, path_content, logger=None):\n super().__init__(path_content, logger=logger)\n self.EXTRACTOR = \"yolo3\"\n\n @staticmethod\n def known_types():\n \"\"\"Return the output types for this generator\n :return: list. List of output types (file types) for this generator\n \"\"\"\n return ['tag']\n\n def parse(self, run_options):\n \"\"\"Flatten Yolo Classifier\n - https://pjreddie.com/darknet/yolo/\n\n :param: run_options (dict): specific runtime information\n :returns: (DataFrame): DataFrame on successful decoding and export, None (or exception) otherwise\n \"\"\"\n list_items = []\n\n dict_data = self.get_extractor_results(self.EXTRACTOR, \"data.json\")\n\n for local_obj in dict_data: # traverse items\n if \"results\" in local_obj or \"milliseconds\" in local_obj:\n # { \"milliseconds\": 5872.539205872539, \"frameNumber\": 176,\n # \"results\": [ { \"objects\": [ \n # { \"name\": \"person\", \"confidence\": 0.9987912774085999,\n # \"boundingBox\": { \"left\": 0.559375, \"top\": 0.03611, \"width\": 0.3, \"height\": 0.9611 } },\n # { \"name\": \"person\", \"confidence\": 0.9953850507736206,\n # \"boundingBox\": { \"left\": 0.134375, \"top\": 0.175, \"width\": 0.3078, \"height\": 0.80277 } }\n # ] } ] },\n\n time_frame = round(float(local_obj[\"milliseconds\"]) / 1000.0, self.ROUND_DIGITS)\n base_obj = { \"time_begin\": time_frame, \"time_event\": time_frame, \"time_end\": time_frame,\n \"tag_type\": \"tag\", \"source_event\": \"image\", \"extractor\": self.EXTRACTOR }\n for obj_result in local_obj[\"results\"]: # iterate through result sets\n if \"objects\" in obj_result:\n for instance_obj in obj_result[\"objects\"]: # iterate through objects\n details_obj = { 'box': {'w': round(instance_obj['boundingBox']['width'], self.ROUND_DIGITS), \n 'h': round(instance_obj['boundingBox']['height'], self.ROUND_DIGITS),\n 'l': round(instance_obj['boundingBox']['left'], self.ROUND_DIGITS), \n 't': round(instance_obj['boundingBox']['top'], self.ROUND_DIGITS) } }\n score_frame = round(float(instance_obj[\"confidence\"]), self.ROUND_DIGITS)\n obj_insert = { \"tag\": instance_obj[\"name\"], \"score\": score_frame, \n \"details\": json.dumps(details_obj) }\n obj_insert.update(base_obj)\n list_items.append(obj_insert)\n\n if len(list_items) > 0: # return the whole thing as dataframe\n return DataFrame(list_items)\n\n if run_options[\"verbose\"]:\n self.logger.critical(f\"No tag entries found in source '{self.EXTRACTOR}'\")\n return None\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
tiagotouso/TALENTOS_HUMANOS
[ "c391f7d7a331d5f8b186b27af6a9b61448620cc6" ]
[ "IMPORTAMBIENTE.py" ]
[ "'''\nARQUIVO PARA IMPORTAR OS AMBIENTES DOS SERVIDORES\n\nOSB: COLOCAR A LISTA DE SERVIDORES (XLSX) COM OS CAMPOS [SIAPE - AMBIENTE - SETOR EXERCÍCIO]\n'''\nimport os\nimport pandas as pd\n\nfrom SQL import sqlexecute\nfrom MENSAGEM import mensagemErro, mensagemInformacao\n\ndef importarAmbienteServidores():\n '''\n FUNÇÃO IMPORTAR AMBIENTE E EXERCÍCIO DOS SERVIDORES PARA O BANCO DE DADOS\n ENTRA\n PLANILHA DOS SERVIDORES DO SISTEMA INTEGRADO (RELATÓRIO)\n SAI\n BANCO DE DADOS ATUALIZADO COM AMBIENTE E EXERCÍCIO DOS SERVIDORES\n '''\n\n listdir = os.listdir('DADOS_EXTRATOR\\\\')\n if 'servidores.xlsx' in listdir:\n\n xls = 'DADOS_EXTRATOR\\\\servidores.xlsx'\n folha = 'Servidores'\n\n arq = pd.read_excel(xls, folha)\n dados = arq[['Siape', 'Ambiente', 'Exercício']]\n dados = dados[dados['Siape'].notnull()]\n dados['Siape'] = dados['Siape'].apply(lambda x: str(x).rjust(7, '0'))\n dados = dados.dropna(thresh=2)\n dados = dados.fillna('null')\n dados = dados[dados['Siape'].duplicated() == False]\n\n sql = '''delete from ts_sis_ambientes;'''\n sqlexecute(sql)\n\n sql = '''INSERT INTO ts_sis_ambientes\\n(GR_MATRICULA, AMBIENTE, EXERCICIO)\\nvalues\\n'''\n lx = ''\n for i in dados.values:\n if len(i[0]) == 7:\n lx = '''( '{0}', '{1}', '{2}' ),\\n'''.format(i[0], i[1], i[2])\n sql += lx\n sql = sql[:-2] + ';'\n sql = sql.replace('\\'null\\'', 'null')\n sqlexecute(sql)\n\n mensagemInformacao('Importação do AMBIENTE concluída.')\n else:\n mensagemErro('Arquivo \"servidores.xlsx\" não encontrado. (AMBIENTE)')\n\n\n" ]
[ [ "pandas.read_excel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
treid5/probnum
[ "1c5499883672cfa029c12045848ea04491c69e08", "1fed705b2443a14d08419e16f98f6ef815ae9ffa" ]
[ "src/probnum/quad/solvers/stopping_criteria/_rel_mean_change.py", "tests/test_diffeq/test_perturbed/test_step/test_perturbedstepsolution.py" ]
[ "\"\"\"Stopping criterion based on the relative change of the successive integral estimators.\"\"\"\n\nimport numpy as np\n\nfrom probnum.quad.solvers.bq_state import BQState\nfrom probnum.quad.solvers.stopping_criteria import BQStoppingCriterion\nfrom probnum.typing import FloatArgType\n\n# pylint: disable=too-few-public-methods\n\n\nclass RelativeMeanChange(BQStoppingCriterion):\n \"\"\"Stop once the relative change of consecutive integral estimates are smaller than\n a tolerance.\n\n The stopping criterion is: :math:`|\\\\hat{F}_{c} - \\\\hat{F}_{p}|/ |\\\\hat{F}_{c}| \\\\leq r`\n where :math:`\\\\hat{F}_{c}` and :math:`\\\\hat{F}_{p}` are the integral estimates of the current and previous iteration\n respectively, and :math:`r` is the relative tolerance.\n\n Parameters\n ----------\n rel_tol:\n Relative error tolerance on consecutive integral mean values.\n \"\"\"\n\n def __init__(self, rel_tol: FloatArgType):\n self.rel_tol = rel_tol\n\n def __call__(self, bq_state: BQState) -> bool:\n integral_belief = bq_state.integral_belief\n return (\n np.abs(\n (integral_belief.mean - bq_state.previous_integral_beliefs[-1].mean)\n / integral_belief.mean\n )\n <= self.rel_tol\n )\n", "import numpy as np\nimport pytest\nfrom scipy.integrate._ivp import rk\n\nimport probnum.problems.zoo.diffeq as diffeq_zoo\nfrom probnum import diffeq, randvars\n\n\[email protected]\ndef steprule():\n return diffeq.stepsize.AdaptiveSteps(0.1, atol=1e-4, rtol=1e-4)\n\n\[email protected]\ndef perturbed_solution(steprule):\n y0 = np.array([0.1, 0.1])\n ode = diffeq_zoo.lotkavolterra(t0=0.0, tmax=1.0, y0=y0)\n rng = np.random.default_rng(seed=1)\n testsolver = diffeq.perturbed.scipy_wrapper.WrappedScipyRungeKutta(\n rk.RK45, steprule=steprule\n )\n sol = diffeq.perturbed.step.PerturbedStepSolver(\n rng=rng,\n solver=testsolver,\n noise_scale=0.1,\n perturb_function=diffeq.perturbed.step.perturb_uniform,\n )\n return sol.solve(ode)\n\n\ndef test_states(perturbed_solution):\n assert isinstance(perturbed_solution.states, randvars._RandomVariableList)\n\n\ndef test_call(perturbed_solution):\n \"\"\"Test for continuity of the dense output.\n\n Small changes of the locations should come with small changes of the states.\n \"\"\"\n np.testing.assert_allclose(\n perturbed_solution(perturbed_solution.locations[0:]).mean,\n perturbed_solution.states[0:].mean,\n atol=1e-14,\n rtol=1e-14,\n )\n np.testing.assert_allclose(\n perturbed_solution(perturbed_solution.locations[0:-1] + 1e-14).mean,\n perturbed_solution(perturbed_solution.locations[0:-1]).mean,\n atol=1e-12,\n rtol=1e-12,\n )\n np.testing.assert_allclose(\n perturbed_solution(perturbed_solution.locations[1:] - 1e-14).mean,\n perturbed_solution(perturbed_solution.locations[1:]).mean,\n atol=1e-12,\n rtol=1e-12,\n )\n\n\ndef test_len(perturbed_solution):\n np.testing.assert_allclose(\n len(perturbed_solution),\n len(perturbed_solution.locations),\n atol=1e-14,\n rtol=1e-14,\n )\n\n\ndef test_getitem(perturbed_solution):\n np.testing.assert_allclose(\n perturbed_solution.interpolants[1](perturbed_solution.locations[1]),\n perturbed_solution[1].mean,\n atol=1e-14,\n rtol=1e-14,\n )\n" ]
[ [ "numpy.abs" ], [ "numpy.array", "numpy.random.default_rng" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JiangBowen0008/bop_toolkit
[ "375da05664c1b9b4249b191378f25d5815c305f9" ]
[ "bop_toolkit_lib/renderer_py.py" ]
[ "# Author: Tomas Hodan ([email protected])\n# Center for Machine Perception, Czech Technical University in Prague\n\n\"\"\"A Python based renderer.\"\"\"\n\nimport os\nimport numpy as np\nfrom glumpy import app, gloo, gl\n\nfrom bop_toolkit_lib import inout\nfrom bop_toolkit_lib import misc\nfrom bop_toolkit_lib import renderer\n\n# Set glumpy logging level.\nfrom glumpy.log import log\nimport logging\nlog.setLevel(logging.WARNING) # Options: ERROR, WARNING, DEBUG, INFO.\n\n# Set backend (http://glumpy.readthedocs.io/en/latest/api/app-backends.html).\n# app.use('glfw') # Options: 'glfw', 'qt5', 'pyside', 'pyglet'.\n\n\n# RGB vertex shader.\n_rgb_vertex_code = \"\"\"\nuniform mat4 u_mv;\nuniform mat4 u_nm;\nuniform mat4 u_mvp;\nuniform vec3 u_light_eye_pos;\n\nattribute vec3 a_position;\nattribute vec3 a_normal;\nattribute vec3 a_color;\nattribute vec2 a_texcoord;\n\nvarying vec3 v_color;\nvarying vec2 v_texcoord;\nvarying vec3 v_eye_pos;\nvarying vec3 v_L;\nvarying vec3 v_normal;\n\nvoid main() {\n gl_Position = u_mvp * vec4(a_position, 1.0);\n v_color = a_color;\n v_texcoord = a_texcoord;\n \n // The following points/vectors are expressed in the eye coordinates.\n v_eye_pos = (u_mv * vec4(a_position, 1.0)).xyz; // Vertex.\n v_L = normalize(u_light_eye_pos - v_eye_pos); // Vector to the light.\n v_normal = normalize(u_nm * vec4(a_normal, 1.0)).xyz; // Normal vector.\n}\n\"\"\"\n\n# RGB fragment shader - flat shading.\n_rgb_fragment_flat_code = \"\"\"\nuniform float u_light_ambient_w;\nuniform sampler2D u_texture;\nuniform int u_use_texture;\n\nvarying vec3 v_color;\nvarying vec2 v_texcoord;\nvarying vec3 v_eye_pos;\nvarying vec3 v_L;\n\nvoid main() {\n // Face normal in eye coords.\n vec3 f_normal = normalize(cross(dFdx(v_eye_pos), dFdy(v_eye_pos)));\n\n float light_diffuse_w = max(dot(normalize(v_L), normalize(f_normal)), 0.0);\n float light_w = u_light_ambient_w + light_diffuse_w;\n if(light_w > 1.0) light_w = 1.0;\n\n if(bool(u_use_texture)) {\n gl_FragColor = vec4(light_w * texture2D(u_texture, v_texcoord));\n }\n else {\n gl_FragColor = vec4(light_w * v_color, 1.0);\n }\n}\n\"\"\"\n\n# RGB fragment shader - Phong shading.\n_rgb_fragment_phong_code = \"\"\"\nuniform float u_light_ambient_w;\nuniform sampler2D u_texture;\nuniform int u_use_texture;\n\nvarying vec3 v_color;\nvarying vec2 v_texcoord;\nvarying vec3 v_eye_pos;\nvarying vec3 v_L;\nvarying vec3 v_normal;\n\nvoid main() {\n float light_diffuse_w = max(dot(normalize(v_L), normalize(v_normal)), 0.0);\n float light_w = u_light_ambient_w + light_diffuse_w;\n if(light_w > 1.0) light_w = 1.0;\n\n if(bool(u_use_texture)) {\n gl_FragColor = vec4(light_w * texture2D(u_texture, v_texcoord));\n }\n else {\n gl_FragColor = vec4(light_w * v_color, 1.0);\n }\n}\n\"\"\"\n\n# Depth vertex shader.\n# Ref: https://github.com/julienr/vertex_visibility/blob/master/depth.py\n#\n# Getting the depth from the depth buffer in OpenGL is doable, see here:\n# http://web.archive.org/web/20130416194336/http://olivers.posterous.com/linear-depth-in-glsl-for-real\n# http://web.archive.org/web/20130426093607/http://www.songho.ca/opengl/gl_projectionmatrix.html\n# http://stackoverflow.com/a/6657284/116067\n# but it is difficult to achieve high precision, as explained in this article:\n# http://dev.theomader.com/depth-precision/\n#\n# Once the vertex is in the view coordinates (view * model * v), its depth is\n# simply the Z axis. Hence, instead of reading from the depth buffer and undoing\n# the projection matrix, we store the Z coord of each vertex in the color\n# buffer. OpenGL allows for float32 color buffer components.\n_depth_vertex_code = \"\"\"\nuniform mat4 u_mv;\nuniform mat4 u_mvp;\nattribute vec3 a_position;\nattribute vec3 a_color;\nvarying float v_eye_depth;\n\nvoid main() {\n gl_Position = u_mvp * vec4(a_position, 1.0);\n vec3 v_eye_pos = (u_mv * vec4(a_position, 1.0)).xyz; // In eye coords.\n\n // OpenGL Z axis goes out of the screen, so depths are negative\n v_eye_depth = -v_eye_pos.z;\n}\n\"\"\"\n\n# Depth fragment shader.\n_depth_fragment_code = \"\"\"\nvarying float v_eye_depth;\n\nvoid main() {\n gl_FragColor = vec4(v_eye_depth, 0.0, 0.0, 1.0);\n}\n\"\"\"\n\n\n# Functions to calculate transformation matrices.\n# Note that OpenGL expects the matrices to be saved column-wise.\n# (Ref: http://www.songho.ca/opengl/gl_transform.html)\n\n\ndef _calc_model_view(model, view):\n \"\"\"Calculates the model-view matrix.\n\n :param model: 4x4 ndarray with the model matrix.\n :param view: 4x4 ndarray with the view matrix.\n :return: 4x4 ndarray with the model-view matrix.\n \"\"\"\n return np.dot(model, view)\n\n\ndef _calc_model_view_proj(model, view, proj):\n \"\"\"Calculates the model-view-projection matrix.\n\n :param model: 4x4 ndarray with the model matrix.\n :param view: 4x4 ndarray with the view matrix.\n :param proj: 4x4 ndarray with the projection matrix.\n :return: 4x4 ndarray with the model-view-projection matrix.\n \"\"\"\n return np.dot(np.dot(model, view), proj)\n\n\ndef _calc_normal_matrix(model, view):\n \"\"\"Calculates the normal matrix.\n\n Ref: http://www.songho.ca/opengl/gl_normaltransform.html\n\n :param model: 4x4 ndarray with the model matrix.\n :param view: 4x4 ndarray with the view matrix.\n :return: 4x4 ndarray with the normal matrix.\n \"\"\"\n return np.linalg.inv(np.dot(model, view)).T\n\n\ndef _calc_calib_proj(K, x0, y0, w, h, nc, fc, window_coords='y_down'):\n \"\"\"Conversion of Hartley-Zisserman intrinsic matrix to OpenGL proj. matrix.\n\n Ref:\n 1) https://strawlab.org/2011/11/05/augmented-reality-with-OpenGL\n 2) https://github.com/strawlab/opengl-hz/blob/master/src/calib_test_utils.py\n\n :param K: 3x3 ndarray with the intrinsic camera matrix.\n :param x0 The X coordinate of the camera image origin (typically 0).\n :param y0: The Y coordinate of the camera image origin (typically 0).\n :param w: Image width.\n :param h: Image height.\n :param nc: Near clipping plane.\n :param fc: Far clipping plane.\n :param window_coords: 'y_up' or 'y_down'.\n :return: 4x4 ndarray with the OpenGL projection matrix.\n \"\"\"\n depth = float(fc - nc)\n q = -(fc + nc) / depth\n qn = -2 * (fc * nc) / depth\n\n # Draw our images upside down, so that all the pixel-based coordinate\n # systems are the same.\n if window_coords == 'y_up':\n proj = np.array([\n [2 * K[0, 0] / w, -2 * K[0, 1] / w, (-2 * K[0, 2] + w + 2 * x0) / w, 0],\n [0, -2 * K[1, 1] / h, (-2 * K[1, 2] + h + 2 * y0) / h, 0],\n [0, 0, q, qn], # Sets near and far planes (glPerspective).\n [0, 0, -1, 0]\n ])\n\n # Draw the images upright and modify the projection matrix so that OpenGL\n # will generate window coords that compensate for the flipped image coords.\n else:\n assert window_coords == 'y_down'\n proj = np.array([\n [2 * K[0, 0] / w, -2 * K[0, 1] / w, (-2 * K[0, 2] + w + 2 * x0) / w, 0],\n [0, 2 * K[1, 1] / h, (2 * K[1, 2] - h + 2 * y0) / h, 0],\n [0, 0, q, qn], # Sets near and far planes (glPerspective).\n [0, 0, -1, 0]\n ])\n return proj.T\n\n\nclass RendererPython(renderer.Renderer):\n \"\"\"A Python based renderer.\"\"\"\n\n def __init__(self, width, height, mode='rgb+depth', shading='phong',\n bg_color=(0.0, 0.0, 0.0, 0.0)):\n \"\"\"Constructor.\n\n :param width: Width of the rendered image.\n :param height: Height of the rendered image.\n :param mode: Rendering mode ('rgb+depth', 'rgb', 'depth').\n :param shading: Type of shading ('flat', 'phong').\n :param bg_color: Color of the background (R, G, B, A).\n \"\"\"\n super(RendererPython, self).__init__(width, height)\n\n self.mode = mode\n self.shading = shading\n self.bg_color = bg_color\n\n # Indicators whether to render RGB and/or depth image.\n self.render_rgb = self.mode in ['rgb', 'rgb+depth']\n self.render_depth = self.mode in ['depth', 'rgb+depth']\n\n # Structures to store object models and related info.\n self.models = {}\n self.model_bbox_corners = {}\n self.model_textures = {}\n\n # Rendered images.\n self.rgb = None\n self.depth = None\n\n # Window for rendering.\n self.window = app.Window(visible=False)\n\n # Per-object vertex and index buffer.\n self.vertex_buffers = {}\n self.index_buffers = {}\n\n # Per-object OpenGL programs for rendering of RGB and depth images.\n self.rgb_programs = {}\n self.depth_programs = {}\n\n # The frame buffer object.\n rgb_buf = np.zeros(\n (self.height, self.width, 4), np.float32).view(gloo.TextureFloat2D)\n depth_buf = np.zeros(\n (self.height, self.width), np.float32).view(gloo.DepthTexture)\n self.fbo = gloo.FrameBuffer(color=rgb_buf, depth=depth_buf)\n\n # Activate the created frame buffer object.\n self.fbo.activate()\n\n def add_object(self, obj_id, model_path, **kwargs):\n \"\"\"See base class.\"\"\"\n # Color of the object model (the original color saved with the object model\n # will be used if None).\n surf_color = None\n if 'surf_color' in kwargs:\n surf_color = kwargs['surf_color']\n \n if 'scale' in kwargs:\n scale = kwargs['scale']\n\n # Load the object model.\n model = inout.load_ply(model_path)\n model['pts'] = model['pts'] * scale\n self.models[obj_id] = model\n\n # Calculate the 3D bounding box of the model (will be used to set the near\n # and far clipping plane).\n bb = misc.calc_3d_bbox(\n model['pts'][:, 0], model['pts'][:, 1], model['pts'][:, 2])\n self.model_bbox_corners[obj_id] = np.array([\n [bb[0], bb[1], bb[2]],\n [bb[0], bb[1], bb[2] + bb[5]],\n [bb[0], bb[1] + bb[4], bb[2]],\n [bb[0], bb[1] + bb[4], bb[2] + bb[5]],\n [bb[0] + bb[3], bb[1], bb[2]],\n [bb[0] + bb[3], bb[1], bb[2] + bb[5]],\n [bb[0] + bb[3], bb[1] + bb[4], bb[2]],\n [bb[0] + bb[3], bb[1] + bb[4], bb[2] + bb[5]],\n ])\n\n # Set texture/color of vertices.\n self.model_textures[obj_id] = None\n\n # Use the specified uniform surface color.\n if surf_color is not None:\n colors = np.tile(list(surf_color) + [1.0], [model['pts'].shape[0], 1])\n\n # Set UV texture coordinates to dummy values.\n texture_uv = np.zeros((model['pts'].shape[0], 2), np.float32)\n\n # Use the model texture.\n elif 'texture_file' in self.models[obj_id].keys():\n model_texture_path = os.path.join(\n os.path.dirname(model_path), self.models[obj_id]['texture_file'])\n model_texture = inout.load_im(model_texture_path)\n\n # Normalize the texture image.\n if model_texture.max() > 1.0:\n model_texture = model_texture.astype(np.float32) / 255.0\n model_texture = np.flipud(model_texture)\n self.model_textures[obj_id] = model_texture\n\n # UV texture coordinates.\n texture_uv = model['texture_uv']\n\n # Set the per-vertex color to dummy values.\n colors = np.zeros((model['pts'].shape[0], 3), np.float32)\n\n # Use the original model color.\n elif 'colors' in model.keys():\n assert (model['pts'].shape[0] == model['colors'].shape[0])\n colors = model['colors']\n if colors.max() > 1.0:\n colors /= 255.0 # Color values are expected in range [0, 1].\n\n # Set UV texture coordinates to dummy values.\n texture_uv = np.zeros((model['pts'].shape[0], 2), np.float32)\n\n # Set the model color to gray.\n else:\n colors = np.ones((model['pts'].shape[0], 3), np.float32) * 0.5\n\n # Set UV texture coordinates to dummy values.\n texture_uv = np.zeros((model['pts'].shape[0], 2), np.float32)\n\n # Set the vertex data.\n if self.mode == 'depth':\n vertices_type = [\n ('a_position', np.float32, 3),\n ('a_color', np.float32, colors.shape[1])\n ]\n vertices = np.array(list(zip(model['pts'], colors)), vertices_type)\n else:\n if self.shading == 'flat':\n vertices_type = [\n ('a_position', np.float32, 3),\n ('a_color', np.float32, colors.shape[1]),\n ('a_texcoord', np.float32, 2)\n ]\n vertices = np.array(list(zip(model['pts'], colors, texture_uv)),\n vertices_type)\n elif self.shading == 'phong':\n vertices_type = [\n ('a_position', np.float32, 3),\n ('a_normal', np.float32, 3),\n ('a_color', np.float32, colors.shape[1]),\n ('a_texcoord', np.float32, 2)\n ]\n vertices = np.array(list(zip(model['pts'], model['normals'],\n colors, texture_uv)), vertices_type)\n else:\n raise ValueError('Unknown shading type.')\n\n # Create vertex and index buffer for the loaded object model.\n self.vertex_buffers[obj_id] = vertices.view(gloo.VertexBuffer)\n self.index_buffers[obj_id] = \\\n model['faces'].flatten().astype(np.uint32).view(gloo.IndexBuffer)\n\n # Set shader for the selected shading.\n if self.shading == 'flat':\n rgb_fragment_code = _rgb_fragment_flat_code\n elif self.shading == 'phong':\n rgb_fragment_code = _rgb_fragment_phong_code\n else:\n raise ValueError('Unknown shading type.')\n\n # Prepare the RGB OpenGL program.\n rgb_program = gloo.Program(_rgb_vertex_code, rgb_fragment_code)\n rgb_program.bind(self.vertex_buffers[obj_id])\n if self.model_textures[obj_id] is not None:\n rgb_program['u_use_texture'] = int(True)\n rgb_program['u_texture'] = self.model_textures[obj_id]\n else:\n rgb_program['u_use_texture'] = int(False)\n rgb_program['u_texture'] = np.zeros((1, 1, 4), np.float32)\n self.rgb_programs[obj_id] = rgb_program\n\n # Prepare the depth OpenGL program.\n depth_program = gloo.Program(_depth_vertex_code,_depth_fragment_code)\n depth_program.bind(self.vertex_buffers[obj_id])\n self.depth_programs[obj_id] = depth_program\n\n def remove_object(self, obj_id):\n \"\"\"See base class.\"\"\"\n del self.models[obj_id]\n del self.model_bbox_corners[obj_id]\n if obj_id in self.model_textures:\n del self.model_textures[obj_id]\n del self.vertex_buffers[obj_id]\n del self.index_buffers[obj_id]\n del self.rgb_programs[obj_id]\n del self.depth_programs[obj_id]\n\n def render_object(self, obj_id, R, t, fx, fy, cx, cy):\n \"\"\"See base class.\"\"\"\n\n # Define the following variables as global so their latest values are always\n # seen in function on_draw below.\n global curr_obj_id, mat_model, mat_view, mat_proj\n curr_obj_id = obj_id\n\n # Model matrix (from object space to world space).\n mat_model = np.eye(4, dtype=np.float32)\n\n # View matrix (from world space to eye space; transforms also the coordinate\n # system from OpenCV to OpenGL camera space).\n mat_view_cv = np.eye(4, dtype=np.float32)\n mat_view_cv[:3, :3], mat_view_cv[:3, 3] = R, t.squeeze()\n yz_flip = np.eye(4, dtype=np.float32)\n yz_flip[1, 1], yz_flip[2, 2] = -1, -1\n mat_view = yz_flip.dot(mat_view_cv) # OpenCV to OpenGL camera system.\n mat_view = mat_view.T # OpenGL expects column-wise matrix format.\n\n # Calculate the near and far clipping plane from the 3D bounding box.\n bbox_corners = self.model_bbox_corners[obj_id]\n bbox_corners_ht = np.concatenate(\n (bbox_corners, np.ones((bbox_corners.shape[0], 1))), axis=1).transpose()\n bbox_corners_eye_z = mat_view_cv[2, :].reshape((1, 4)).dot(bbox_corners_ht)\n clip_near = bbox_corners_eye_z.min()\n clip_far = bbox_corners_eye_z.max()\n\n # Projection matrix.\n K = np.array([[fx, 0.0, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]])\n mat_proj = _calc_calib_proj(\n K, 0, 0, self.width, self.height, clip_near, clip_far)\n\n @self.window.event\n def on_draw(dt):\n self.window.clear()\n global curr_obj_id, mat_model, mat_view, mat_proj\n\n # Render the RGB image.\n if self.render_rgb:\n self.rgb = self._draw_rgb(\n curr_obj_id, mat_model, mat_view, mat_proj)\n\n # Render the depth image.\n if self.render_depth:\n self.depth = self._draw_depth(\n curr_obj_id, mat_model, mat_view, mat_proj)\n\n # The on_draw function is called framecount+1 times.\n app.run(framecount=0)\n\n if self.mode == 'rgb':\n return {'rgb': self.rgb}\n elif self.mode == 'depth':\n return {'depth': self.depth}\n elif self.mode == 'rgb+depth':\n return {'rgb': self.rgb, 'depth': self.depth}\n\n def _draw_rgb(self, obj_id, mat_model, mat_view, mat_proj):\n \"\"\"Renders an RGB image.\n\n :param obj_id: ID of the object model to render.\n :param mat_model: 4x4 ndarray with the model matrix.\n :param mat_view: 4x4 ndarray with the view matrix.\n :param mat_proj: 4x4 ndarray with the projection matrix.\n :return: HxWx3 ndarray with the rendered RGB image.\n \"\"\"\n # Update the OpenGL program.\n program = self.rgb_programs[obj_id]\n program['u_light_eye_pos'] = list(self.light_cam_pos)\n program['u_light_ambient_w'] = self.light_ambient_weight\n program['u_mv'] = _calc_model_view(mat_model, mat_view)\n program['u_nm'] = _calc_normal_matrix(mat_model, mat_view)\n program['u_mvp'] = _calc_model_view_proj(mat_model, mat_view, mat_proj)\n\n # OpenGL setup.\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glClearColor(\n self.bg_color[0], self.bg_color[1], self.bg_color[2], self.bg_color[3])\n gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)\n gl.glViewport(0, 0, self.width, self.height)\n\n # Keep the back-face culling disabled because of objects which do not have\n # well-defined surface (e.g. the lamp from the lm dataset).\n gl.glDisable(gl.GL_CULL_FACE)\n\n # Rendering.\n program.draw(gl.GL_TRIANGLES, self.index_buffers[obj_id])\n\n # Get the content of the FBO texture.\n rgb = np.zeros((self.height, self.width, 4), dtype=np.float32)\n gl.glReadPixels(0, 0, self.width, self.height, gl.GL_RGBA, gl.GL_FLOAT, rgb)\n rgb.shape = (self.height, self.width, 4)\n rgb = rgb[::-1, :]\n rgb = np.round(rgb[:, :, :3] * 255).astype(np.uint8) # Convert to [0, 255].\n\n return rgb\n\n def _draw_depth(self, obj_id, mat_model, mat_view, mat_proj):\n \"\"\"Renders a depth image.\n\n :param obj_id: ID of the object model to render.\n :param mat_model: 4x4 ndarray with the model matrix.\n :param mat_view: 4x4 ndarray with the view matrix.\n :param mat_proj: 4x4 ndarray with the projection matrix.\n :return: HxW ndarray with the rendered depth image.\n \"\"\"\n # Update the OpenGL program.\n program = self.depth_programs[obj_id]\n program['u_mv'] = _calc_model_view(mat_model, mat_view)\n program['u_mvp'] = _calc_model_view_proj(mat_model, mat_view, mat_proj)\n\n # OpenGL setup.\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glClearColor(0.0, 0.0, 0.0, 0.0)\n gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)\n gl.glViewport(0, 0, self.width, self.height)\n\n # Keep the back-face culling disabled because of objects which do not have\n # well-defined surface (e.g. the lamp from the lm dataset).\n gl.glDisable(gl.GL_CULL_FACE)\n\n # Rendering.\n program.draw(gl.GL_TRIANGLES, self.index_buffers[obj_id])\n\n # Get the content of the FBO texture.\n depth = np.zeros((self.height, self.width, 4), dtype=np.float32)\n gl.glReadPixels(\n 0, 0, self.width, self.height, gl.GL_RGBA, gl.GL_FLOAT, depth)\n depth.shape = (self.height, self.width, 4)\n depth = depth[::-1, :]\n depth = depth[:, :, 0] # Depth is saved in the first channel\n\n return depth\n" ]
[ [ "numpy.dot", "numpy.eye", "numpy.flipud", "numpy.ones", "numpy.round", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
falabrasil/kaldi-br
[ "2b11eb937c485941c2209f577af38c2f21bf9017" ]
[ "utils/clustering/cluster.py" ]
[ "#!/usr/bin/env python3\n#\n# author: dec 2020\n# cassio batista - https://cassota.gitlab.io\n#\n# sponsored by MidiaClip (Salvador - BA)\n\n\nimport sys\nimport os\nimport shutil\nimport glob\nimport argparse\nimport logging\nfrom collections import OrderedDict\n\nimport torch\nimport numpy as np\n\nfrom pyannote.pipeline.blocks.clustering import (\n HierarchicalAgglomerativeClustering\n)\n\n\nlogging.basicConfig(format=\"[%(filename)s] %(levelname)s: %(message)s\",\n level=logging.INFO)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Cluster audio files by speaker\")\n parser.add_argument(\"in_dir\", help=\"input dir\")\n parser.add_argument(\"out_dir\", help=\"output dir\")\n\n # parse args and minimally validate input\n args = parser.parse_args()\n if not os.path.isdir(args.in_dir):\n logging.error(\"input dir does not exist: '%s'\" % args.in_dir)\n sys.exit(1)\n if os.path.isdir(args.out_dir):\n logging.warning(\"output dir '%s' exists and *WILL NOT* \"\n \"be overwritten \" % args.out_dir)\n else:\n logging.info(\"creating output dir: '%s'\" % args.out_dir)\n os.mkdir(args.out_dir)\n\n # input dir is expected to contain only two subdirectories,\n # one for a male and another for a female speaker\n subdirs = []\n for d in os.listdir(args.in_dir):\n d = os.path.join(args.in_dir, d) # readlink -f\n if os.path.isdir(d):\n subdirs.append(d)\n\n if len(subdirs) < 1:\n logging.warning(\"expected at least one subdir in '%s'\" % args.in_dir)\n sys.exit(1)\n\n logging.info(\"loading pyannote's speaker embedding model\")\n model = torch.hub.load(\"pyannote/pyannote-audio\", \"emb\")\n clustering = HierarchicalAgglomerativeClustering()\n\n for d in subdirs:\n # get broadcaster name and gender tag + transmission date from dir name\n broadcaster, gtx = d.split(\"/\")[-1].split(\"_\")\n gender, txdate = gtx[0].upper(), gtx[1:]\n\n # sanity check on gender tag\n if gender != \"M\" and gender != \"F\":\n logging.error(\"gender flag expected to be either M or F. \"\n \"got '%s' instead\" % gender)\n sys.exit(1)\n\n # scan subdirs looking for wav and txt files\n # later check if the numbers match, abort if it doesn't\n wavlist = sorted(glob.glob(os.path.join(d, \"*.wav\")))\n txtlist = sorted(glob.glob(os.path.join(d, \"*.txt\")))\n if len(wavlist) != len(txtlist):\n logging.error(\"number of audio and transcription files do not \"\n \"match: %d vs %d\" % (len(wavlist), len(txtlist)))\n sys.exit(1)\n\n # clustering: check `_turn_level()` method from `SpeechTurnClustering`\n # https://github.com/pyannote/pyannote-audio/blob/master/pyannote/audio/pipeline/speech_turn_clustering.py#L162\n X, labels, num_emb = [], [], 0\n for i, wavfile in enumerate(wavlist):\n # label = re.sub('[/.-]', ' ', wavfile).split()[-2]\n label = os.path.basename(wavfile)\n\n logging.info(\"extracting embeddings from '%s'\" % wavfile)\n embedding = model(current_file={'audio': wavfile})\n num_emb += 1\n\n # I'm doing this because I found no way on earth to set a goddamn\n # `speech_turns` variable, which in turn contains a `Timeline`\n # object used for cropping\n # https://github.com/pyannote/pyannote-audio-hub#speaker-embedding\n # https://github.com/pyannote/pyannote-core/blob/develop/pyannote/core/timeline.py#L114\n for window, emb in embedding:\n x = embedding.crop(window)\n\n # TODO could I ignore this break and add multiple embedding\n # vectors for the same label? I know for a fact the mapping\n # label-cluster would be kept 1:1 if I moved in both `labels`\n # and `X` appends below...\n if len(x) > 0:\n break\n\n # FIXME skip labels so small we don't have any embedding for it\n if len(x) < 1:\n logging.warning(\"well, we'll have to think of something for \"\n \"utterances like '%s'\" % wavfile)\n continue\n\n labels.append(label)\n X.append(np.mean(x, axis=0))\n\n # apply clustering of label embeddings\n logging.info(\"clustering files from '%s' subdir\" % d)\n clusters = clustering(np.vstack(X)) # int indices\n\n # map each clustered label to its cluster (between 1 and N_CLUSTERS)\n # https://stackoverflow.com/questions/16772071/sort-dict-by-value-python\n mapping = {label: cluster for label, cluster in zip(labels, clusters)}\n mapping = OrderedDict(sorted(mapping.items(), key=lambda x:x[1]))\n\n # https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python/11101867#11101867\n for fileid, (label, cluster) in enumerate(mapping.items()):\n # dir names store tag and speaker id information\n tag = \"%s%s\" % (broadcaster, txdate) # andaiafm20201105\n spk = \"%s-%s%04d\" % (tag, gender, cluster) # andaiafm20201105-F0001\n\n src = os.path.join(d, label.replace(\".wav\", \"\"))\n dst = os.path.join(args.out_dir, tag, spk)\n if not os.path.isdir(dst):\n os.makedirs(dst, exist_ok=True)\n\n # andaiafm20201105/andaiafm20201105-F0001/andaiafm20201105F0001_000001.{wav,txt}\n dst = os.path.join(dst, \"%s_%06d\" % (spk.replace(\"-\", \"\"), fileid))\n logging.info(\"copy: '%s'.{wav,txt} -> '%s'.{wav,txt}\" % (src, dst))\n for ext in (\"wav\", \"txt\"):\n f = \"%s.%s\" % (src, ext) # from source\n t = \"%s.%s\" % (dst, ext) # to destination\n if os.path.isfile(t):\n logging.warning(\"dst file '%s' exists, that's odd\" % t)\n shutil.copy2(f, t)\n\n logging.info(\"done scanning subdir %s: %d embeddings extracted, \"\n \"%d embeddings processed\" % (d, num_emb, len(X)))\n" ]
[ [ "torch.hub.load", "numpy.mean", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RainingComputers/pykitml
[ "1c3e50cebcdb6c4da63979ef9a812b44d23a4857", "1c3e50cebcdb6c4da63979ef9a812b44d23a4857" ]
[ "tests/test_mnist.py", "tests/test_banknote.py" ]
[ "import sys\nimport os.path\n\nimport numpy as np\nimport pykitml as pk\nfrom pykitml.datasets import mnist\nfrom pykitml.testing import pktest_graph, pktest_nograph\n\ndef test_download():\n # Download the mnist data set\n mnist.get()\n # Test ran successfully\n assert True\n\n@pktest_graph\ndef test_adagrad():\n # Load dataset\n training_data, training_targets, testing_data, testing_targets = mnist.load()\n \n # Create a new neural network\n digit_classifier = pk.NeuralNetwork([784, 100, 10])\n\n # Train it\n digit_classifier.train(\n training_data=training_data,\n targets=training_targets, \n batch_size=50, \n epochs=1200, \n optimizer=pk.Adagrad(learning_rate=0.07, decay_rate=0.99), \n testing_data=testing_data, \n testing_targets=testing_targets,\n testing_freq=30,\n decay_freq=10\n )\n \n # Save it\n pk.save(digit_classifier, 'digit_classifier_network.pkl')\n\n # Show performance\n accuracy = digit_classifier.accuracy(training_data, training_targets)\n print('Train Accuracy:', accuracy) \n accuracy = digit_classifier.accuracy(testing_data, testing_targets)\n print('Test Accuracy:', accuracy)\n \n # Plot performance graph\n digit_classifier.plot_performance()\n\n # Show confusion matrix\n digit_classifier.confusion_matrix(training_data, training_targets)\n\n # Assert if it has enough accuracy\n assert digit_classifier.accuracy(training_data, training_targets) > 94\n\n@pktest_graph\ndef test_nesterov():\n # Load dataset\n training_data, training_targets, testing_data, testing_targets = mnist.load()\n \n # Create a new neural network\n digit_classifier = pk.NeuralNetwork([784, 100, 10])\n\n # Train it\n digit_classifier.train(\n training_data=training_data,\n targets=training_targets, \n batch_size=50, \n epochs=1200, \n optimizer=pk.Nesterov(learning_rate=0.1, decay_rate=0.99), \n testing_data=testing_data, \n testing_targets=testing_targets,\n testing_freq=30,\n decay_freq=10\n )\n \n # Save it\n pk.save(digit_classifier, 'digit_classifier_network.pkl')\n\n # Show performance\n accuracy = digit_classifier.accuracy(training_data, training_targets)\n print('Train Accuracy:', accuracy) \n accuracy = digit_classifier.accuracy(testing_data, testing_targets)\n print('Test Accuracy:', accuracy)\n \n # Plot performance graph\n digit_classifier.plot_performance()\n\n # Show confusion matrix\n digit_classifier.confusion_matrix(training_data, training_targets)\n\n # Assert if it has enough accuracy\n assert digit_classifier.accuracy(training_data, training_targets) > 94\n\n@pktest_graph\ndef test_relu_nesterov():\n # Load dataset\n training_data, training_targets, testing_data, testing_targets = mnist.load()\n \n # Create a new neural network\n digit_classifier = pk.NeuralNetwork([784, 100, 10], config='relu-softmax-cross_entropy')\n\n # Train it\n digit_classifier.train(\n training_data=training_data,\n targets=training_targets, \n batch_size=50, \n epochs=1200, \n optimizer=pk.Nesterov(learning_rate=0.1, decay_rate=0.99), \n testing_data=testing_data, \n testing_targets=testing_targets,\n testing_freq=30,\n decay_freq=10\n )\n \n # Save it\n pk.save(digit_classifier, 'digit_classifier_network.pkl')\n\n # Show performance\n accuracy = digit_classifier.accuracy(training_data, training_targets)\n print('Train Accuracy:', accuracy) \n accuracy = digit_classifier.accuracy(testing_data, testing_targets)\n print('Test Accuracy:', accuracy)\n \n # Plot performance graph\n digit_classifier.plot_performance()\n\n # Show confusion matrix\n digit_classifier.confusion_matrix(training_data, training_targets)\n\n # Assert if it has enough accuracy\n assert digit_classifier.accuracy(training_data, training_targets) > 94\n\n@pktest_graph\ndef test_momentum():\n # Load dataset\n training_data, training_targets, testing_data, testing_targets = mnist.load()\n \n # Create a new neural network\n digit_classifier = pk.NeuralNetwork([784, 100, 10])\n \n # Train it\n digit_classifier.train(\n training_data=training_data,\n targets=training_targets, \n batch_size=50, \n epochs=1200, \n optimizer=pk.Momentum(learning_rate=0.1, decay_rate=0.95), \n testing_data=testing_data, \n testing_targets=testing_targets,\n testing_freq=30,\n decay_freq=20\n )\n \n # Save it\n pk.save(digit_classifier, 'digit_classifier_network.pkl')\n\n # Show performance\n accuracy = digit_classifier.accuracy(training_data, training_targets)\n print('Train Accuracy:', accuracy) \n accuracy = digit_classifier.accuracy(testing_data, testing_targets)\n print('Test Accuracy:', accuracy)\n \n # Plot performance graph\n digit_classifier.plot_performance()\n\n # Show confusion matrix\n digit_classifier.confusion_matrix(training_data, training_targets)\n\n # Assert if it has enough accuracy\n assert digit_classifier.accuracy(training_data, training_targets) > 94\n\n@pktest_graph\ndef test_gradient_descent():\n # Load dataset\n training_data, training_targets, testing_data, testing_targets = mnist.load()\n \n # Create a new neural network\n digit_classifier = pk.NeuralNetwork([784, 100, 10])\n \n # Train it\n digit_classifier.train(\n training_data=training_data,\n targets=training_targets, \n batch_size=50, \n epochs=1200, \n optimizer=pk.GradientDescent(learning_rate=0.2, decay_rate=0.99), \n testing_data=testing_data, \n testing_targets=testing_targets,\n testing_freq=30,\n decay_freq=20\n )\n \n # Save it\n pk.save(digit_classifier, 'digit_classifier_network.pkl')\n\n # Show performance\n accuracy = digit_classifier.accuracy(training_data, training_targets)\n print('Train Accuracy:', accuracy) \n accuracy = digit_classifier.accuracy(testing_data, testing_targets)\n print('Test Accuracy:', accuracy)\n \n # Plot performance graph\n digit_classifier.plot_performance()\n\n # Show confusion matrix\n digit_classifier.confusion_matrix(training_data, training_targets)\n\n # Assert if it has enough accuracy\n assert digit_classifier.accuracy(training_data, training_targets) > 92\n\n@pktest_graph\ndef test_RMSprop():\n # Load dataset\n training_data, training_targets, testing_data, testing_targets = mnist.load()\n \n # Create a new neural network\n digit_classifier = pk.NeuralNetwork([784, 100, 10])\n \n # Train it\n digit_classifier.train(\n training_data=training_data,\n targets=training_targets, \n batch_size=50, \n epochs=1200, \n optimizer=pk.RMSprop(learning_rate=0.012, decay_rate=0.95), \n testing_data=testing_data, \n testing_targets=testing_targets,\n testing_freq=30,\n decay_freq=15\n )\n \n # Save it\n pk.save(digit_classifier, 'digit_classifier_network.pkl')\n\n # Show performance\n accuracy = digit_classifier.accuracy(training_data, training_targets)\n print('Train Accuracy:', accuracy) \n accuracy = digit_classifier.accuracy(testing_data, testing_targets)\n print('Test Accuracy:', accuracy)\n \n # Plot performance graph\n digit_classifier.plot_performance()\n\n # Show confusion matrix\n digit_classifier.confusion_matrix(training_data, training_targets)\n\n # Assert if it has enough accuracy\n assert digit_classifier.accuracy(training_data, training_targets) > 95\n\n@pktest_graph\ndef test_adam():\n import os.path\n\n import numpy as np\n import pykitml as pk\n from pykitml.datasets import mnist\n \n # Download dataset\n if(not os.path.exists('mnist.pkl')): mnist.get()\n\n # Load dataset\n training_data, training_targets, testing_data, testing_targets = mnist.load()\n \n # Create a new neural network\n digit_classifier = pk.NeuralNetwork([784, 100, 10])\n \n # Train it\n digit_classifier.train(\n training_data=training_data,\n targets=training_targets, \n batch_size=50, \n epochs=1200, \n optimizer=pk.Adam(learning_rate=0.012, decay_rate=0.95), \n testing_data=testing_data, \n testing_targets=testing_targets,\n testing_freq=30,\n decay_freq=15\n )\n \n # Save it\n pk.save(digit_classifier, 'digit_classifier_network.pkl')\n\n # Show performance\n accuracy = digit_classifier.accuracy(training_data, training_targets)\n print('Train Accuracy:', accuracy) \n accuracy = digit_classifier.accuracy(testing_data, testing_targets)\n print('Test Accuracy:', accuracy)\n \n # Plot performance graph\n digit_classifier.plot_performance()\n\n # Show confusion matrix\n digit_classifier.confusion_matrix(training_data, training_targets)\n\n # Assert if it has enough accuracy\n assert digit_classifier.accuracy(training_data, training_targets) > 95\n\n@pktest_graph\ndef test_predict_mnist_adam():\n import random\n\n import numpy as np\n import matplotlib.pyplot as plt\n import pykitml as pk\n from pykitml.datasets import mnist\n\n # Load dataset\n training_data, training_targets, testing_data, testing_targets = mnist.load()\n\n # Load the trained network\n digit_classifier = pk.load('digit_classifier_network.pkl')\n\n # Pick a random example from testing data\n index = random.randint(0, 9999)\n\n # Show the test data and the label\n plt.imshow(training_data[index].reshape(28, 28))\n plt.show()\n print('Label: ', training_targets[index])\n\n # Show prediction\n digit_classifier.feed(training_data[index])\n model_output = digit_classifier.get_output_onehot()\n print('Predicted: ', model_output)\n\nif __name__ == '__main__':\n # List of optimizers\n optimizers = [\n 'gradient_descent', 'momentum', 'nesterov',\n 'adagrad', 'RMSprop', 'adam' \n ]\n # Check if arguments passed to the script is correct\n if(len(sys.argv) != 2 or sys.argv[1] not in optimizers):\n print('Usage: python3 test_mnist.py OPTIMIZER')\n print('List of available optimizers:')\n print(str(optimizers))\n exit()\n \n # If the dataset is not available then download it\n if(not os.path.exists('mnist.pkl')): mnist.get()\n\n # Run the requested optimizer test function\n try:\n locals()['test_'+sys.argv[1]].__wrapped__()\n test_predict_mnist_adam.__wrapped__()\n except AssertionError:\n pass\n", "from pykitml.testing import pktest_graph, pktest_nograph\n\n@pktest_graph\ndef test_banknote():\n import os.path\n\n import numpy as np\n import pykitml as pk\n from pykitml.datasets import banknote\n\n # Download the dataset \n if(not os.path.exists('banknote.pkl')): banknote.get()\n\n # Load banknote data set\n inputs_train, outputs_train, inputs_test, outputs_test = banknote.load()\n\n # Normalize dataset\n array_min, array_max = pk.get_minmax(inputs_train)\n inputs_train = pk.normalize_minmax(inputs_train, array_min, array_max)\n inputs_test = pk.normalize_minmax(inputs_test, array_min, array_max)\n\n # Create polynomial features\n inputs_train_poly = pk.polynomial(inputs_train)\n inputs_test_poly = pk.polynomial(inputs_test)\n\n # Create model\n banknote_classifier = pk.LogisticRegression(inputs_train_poly.shape[1], 1)\n\n # Train the model\n banknote_classifier.train(\n training_data=inputs_train_poly,\n targets=outputs_train, \n batch_size=10, \n epochs=1500, \n optimizer=pk.Adam(learning_rate=0.06, decay_rate=0.99),\n testing_data=inputs_test_poly,\n testing_targets=outputs_test, \n testing_freq=30,\n decay_freq=40\n )\n\n # Save it\n pk.save(banknote_classifier, 'banknote_classifier.pkl') \n\n # Plot performance\n banknote_classifier.plot_performance()\n \n # Print accuracy\n accuracy = banknote_classifier.accuracy(inputs_train_poly, outputs_train)\n print('Train accuracy:', accuracy)\n accuracy = banknote_classifier.accuracy(inputs_test_poly, outputs_test)\n print('Test accuracy:', accuracy)\n\n # Plot confusion matrix\n banknote_classifier.confusion_matrix(inputs_test_poly, outputs_test)\n\n # Assert if it has enough accuracy\n assert banknote_classifier.accuracy(inputs_test_poly, outputs_test) >= 99\n\n@pktest_nograph\ndef test_predict_banknote():\n import os.path\n\n import numpy as np\n import pykitml as pk\n from pykitml.datasets import banknote\n\n # Predict banknote validity with variance, skewness, curtosis, entropy\n # of -2.3, -9.3, 9.37, -0.86\n\n # Load banknote data set\n inputs_train, outputs_train, inputs_test, outputs_test = banknote.load()\n\n # Load the model\n banknote_classifier = pk.load('banknote_classifier.pkl')\n\n # Normalize the inputs\n array_min, array_max = pk.get_minmax(inputs_train)\n input_data = pk.normalize_minmax(np.array([-2.3, -9.3, 9.37, -0.86]), array_min, array_max)\n\n # Create polynomial features\n input_data_poly = pk.polynomial(input_data)\n\n # Get output\n banknote_classifier.feed(input_data_poly)\n model_output = banknote_classifier.get_output()\n\n # Print result\n print(model_output) \n\nif __name__ == '__main__':\n try:\n test_banknote.__wrapped__()\n test_predict_banknote.__wrapped__()\n except AssertionError:\n pass" ]
[ [ "matplotlib.pyplot.show" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
testinground/Proctoring-AI
[ "27b04739fa8f126e3c796ea5e9a21bdfbf48debf" ]
[ "face_detector.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 29 17:52:00 2020\n\n@author: hp\n\"\"\"\n\nimport cv2\nimport numpy as np\n\ndef get_face_detector(modelFile = \"models/res10_300x300_ssd_iter_140000.caffemodel\",\n configFile = \"models/deploy.prototxt\"):\n \"\"\"\n Get the face detection caffe model of OpenCV's DNN module\n \n Parameters\n ----------\n modelFile : string, optional\n Path to model file. The default is \"models/res10_300x300_ssd_iter_140000.caffemodel\".\n configFile : string, optional\n Path to config file. The default is \"models/deploy.prototxt\".\n\n Returns\n -------\n model : dnn_Net\n\n \"\"\"\n modelFile = \"models/res10_300x300_ssd_iter_140000.caffemodel\"\n configFile = \"models/deploy.prototxt\"\n model = cv2.dnn.readNetFromCaffe(configFile, modelFile)\n return model\n\ndef find_faces(img, model):\n \"\"\"\n Find the faces in an image\n \n Parameters\n ----------\n img : np.uint8\n Image to find faces from\n model : dnn_Net\n Face detection model\n\n Returns\n -------\n faces : list\n List of coordinates of the faces detected in the image\n\n \"\"\"\n h, w = img.shape[:2]\n blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 1.0,\n\t(300, 300), (104.0, 177.0, 123.0))\n model.setInput(blob)\n res = model.forward()\n faces = []\n for i in range(res.shape[2]):\n confidence = res[0, 0, i, 2]\n if confidence > 0.5:\n box = res[0, 0, i, 3:7] * np.array([w, h, w, h])\n (x, y, x1, y1) = box.astype(\"int\")\n faces.append([x, y, x1, y1])\n return faces\n\ndef draw_faces(img, faces):\n \"\"\"\n Draw faces on image\n\n Parameters\n ----------\n img : np.uint8\n Image to draw faces on\n faces : List of face coordinates\n Coordinates of faces to draw\n\n Returns\n -------\n None.\n\n \"\"\"\n for x, y, x1, y1 in faces:\n cv2.rectangle(img, (x, y), (x1, y1), (0, 0, 255), 3)\n " ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
heikoschmidt1187/CarND-Advanced-Lane-Lines
[ "671c8d9a08853b4a9c00995a2ace6d25eb478e8f" ]
[ "threshold_par.py" ]
[ "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport glob\n\ndef abs_sobel_threshold(img, orientation='x', kernel_size=3, threshold=(0, 255)):\n \"\"\"\n `orientation` Input for setting the sobel operator gradient orientation (x, y)\n `kernel_size` Input for kernel size of sobel operator\n `threshold` Input tuple for lower and upper threshold\n\n This function calculates a binary image mask according to the absolute\n sobel operation on a given gradient, based on a lower and upper\n threshold.\n\n returns a binary image\n \"\"\"\n gray = cv2.GaussianBlur(img, (5, 5), 0)\n\n # calculate the sobel depending on the orientation\n if orientation == 'x':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, \\\n ksize=kernel_size))\n elif orientation == 'y':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, \\\n ksize=kernel_size))\n else:\n abs_sobel = np.zeros_like(gray)\n print(\"None\")\n\n # rescale the sobel to uint8 type\n scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))\n\n # calculate the binary output with respect to thresholds\n binary_output = np.zeros_like(scaled_sobel)\n binary_output[(scaled_sobel >= threshold[0]) & (scaled_sobel <= threshold[1])] = 1\n\n return binary_output\n\ndef direction_sobel_threshold(img, kernel_size=3, threshold=(0, np.pi / 2)):\n \"\"\"\n `kernel_size` Input for kernel size of sobel operator\n `threshold` Input tuple for lower and upper threshold in rad\n\n This function calculates the gradients and thresholds the direction based\n on given angles\n\n returns a binary image based on the given thresholds\n \"\"\"\n gray = cv2.GaussianBlur(img, (5, 5), 0)\n\n # calculate the sobel\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=kernel_size)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=kernel_size)\n\n # calculate the gradient direction\n absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))\n\n # calculate the binary output with respect to thresholds\n binary_output = np.zeros_like(absgraddir)\n binary_output[(absgraddir >= threshold[0]) & (absgraddir <= threshold[1])] = 1\n\n # Return the binary image\n return binary_output\n\ndef mag_sobel_threshold(img, kernel_size=3, threshold=(0, 255)):\n \"\"\"\n `kernel_size` Input for kernel size of sobel operator\n `threshold` Input tuple for lower and upper threshold\n\n This function calculates the magnitude of the gradients detected by the\n sobel operator in X and Y direction.\n\n returns a binary image based on the given thresholds\n \"\"\"\n gray = cv2.GaussianBlur(img, (5, 5), 0)\n\n # calculate the sobel\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=kernel_size)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=kernel_size)\n\n # calculate the gradient magnitude\n magnitude = np.sqrt(sobelx**2 + sobely**2)\n\n # rescale to 8 bit\n scale = np.max(magnitude)/255\n magnitude = (magnitude / scale).astype(np.uint8)\n\n # calculate the binary output with respect to thresholds\n binary_output = np.zeros_like(magnitude)\n binary_output[(magnitude >= threshold[0]) & (magnitude <= threshold[1])] = 1\n return binary_output\n\ndef nothing(x):\n pass\n\ncv2.namedWindow('image')\n\"\"\"\ncv2.createTrackbar('Low', 'image', 0, 255, nothing)\ncv2.createTrackbar('High', 'image', 0, 255, nothing)\n\"\"\"\ncv2.createTrackbar('Low', 'image', 0, 255, nothing)\ncv2.createTrackbar('High', 'image', 0, 255, nothing)\n\n#testimages = glob.glob('test_images/*.jpg')\ntestimages = glob.glob('output_images/debug/*.png')\n\nfor curImage in testimages:\n\n print(curImage)\n\n img = cv2.imread(curImage)\n img = img[:,:,:3]\n img = cv2.pyrDown(img)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)[:,:,1]\n\n lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)[:,:,2]\n\n \"\"\"\n f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(24, 9))\n ax1.imshow(img)\n ax1.set_title(\"RGB\")\n ax2.imshow(lab[:,:,0], cmap='gray')\n ax2.set_title(\"L\")\n ax3.imshow(lab[:,:,1], cmap='gray')\n ax3.set_title(\"A\")\n ax4.imshow(lab[:,:,2], cmap='gray')\n ax4.set_title(\"B\")\n plt.show()\n \"\"\"\n\n debug_image = np.zeros((360, 640 * 2, 3), dtype=np.uint8)\n debug_image[0:img.shape[0], 0:img.shape[1]] = img\n\n gray = cv2.equalizeHist(gray)\n\n\n while(1):\n\n k = cv2.waitKey(1) & 0xFF\n if k == 27:\n break\n\n low = cv2.getTrackbarPos('Low', 'image')\n high = cv2.getTrackbarPos('High', 'image')\n\n #binary = abs_sobel_threshold(gray, 'y', kernel_size=3, threshold=(low, high))\n #binary = mag_sobel_threshold(gray, kernel_size=3, threshold=(low, high))\n \"\"\"\n binary = np.zeros_like(hls)\n binary[(lab > low) & (lab < high)] = 1\n \"\"\"\n ret, binary = cv2.threshold(gray, thresh=low, maxval=high, type=cv2.THRESH_BINARY)\n bin = np.dstack((binary, binary, binary))\n debug_image[0:bin.shape[0], img.shape[1]:] = bin\n\n cv2.imshow('window', debug_image)\n\n\ncv2.destroyAllWindows()\n" ]
[ [ "numpy.absolute", "numpy.sqrt", "numpy.dstack", "numpy.max", "numpy.zeros_like", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
clebouteiller/landlab
[ "e6f47db76ea0814c4c5a24e695bbafb74c722ff7", "e6f47db76ea0814c4c5a24e695bbafb74c722ff7", "a5dd80b8ebfd03d1ba87ef6c4368c409485f222c", "a5dd80b8ebfd03d1ba87ef6c4368c409485f222c", "e6f47db76ea0814c4c5a24e695bbafb74c722ff7" ]
[ "landlab/components/overland_flow/generate_overland_flow_deAlmeida.py", "tests/graph/hex/test_hex.py", "landlab/components/erosion_deposition/generalized_erosion_deposition.py", "tests/components/space/test_space_large_scale_eroder.py", "landlab/grid/hex.py" ]
[ "\"\"\"Landlab component that simulates overland flow.\n\nThis component simulates overland flow using the 2-D numerical model of\nshallow-water flow over topography using the de Almeida et al., 2012\nalgorithm for storage-cell inundation modeling.\n\n.. codeauthor:: Jordan Adams\n\nExamples\n--------\n>>> import numpy as np\n>>> from landlab import RasterModelGrid\n>>> from landlab.components.overland_flow import OverlandFlow\n\nCreate a grid on which to calculate overland flow.\n\n>>> grid = RasterModelGrid((4, 5))\n\nThe grid will need some data to provide the overland flow component. To\ncheck the names of the fields that provide input to the overland flow\ncomponent use the *input_var_names* class property.\n\n>>> OverlandFlow.input_var_names\n('surface_water__depth', 'topographic__elevation')\n\nCreate fields of data for each of these input variables.\n\n>>> grid.at_node['topographic__elevation'] = np.array([\n... 0., 0., 0., 0., 0.,\n... 1., 1., 1., 1., 1.,\n... 2., 2., 2., 2., 2.,\n... 3., 3., 3., 3., 3.])\n>>> grid.at_node['surface_water__depth'] = np.array([\n... 0. , 0. , 0. , 0. , 0. ,\n... 0. , 0. , 0. , 0. , 0. ,\n... 0. , 0. , 0. , 0. , 0. ,\n... 0.1, 0.1, 0.1, 0.1, 0.1])\n\nInstantiate the `OverlandFlow` component to work on this grid, and run it.\n\n>>> of = OverlandFlow(grid, steep_slopes=True)\n>>> of.run_one_step()\n\nAfter calculating the overland flow, new fields have been added to the\ngrid. Use the *output_var_names* property to see the names of the fields that\nhave been changed.\n\n>>> of.output_var_names\n('surface_water__depth', 'surface_water__discharge', 'water_surface__gradient')\n\nThe `surface_water__depth` field is defined at nodes.\n\n>>> of.var_loc('surface_water__depth')\n'node'\n>>> grid.at_node['surface_water__depth'] # doctest: +NORMALIZE_WHITESPACE\narray([ 1.00000000e-05, 1.00000000e-05, 1.00000000e-05,\n 1.00000000e-05, 1.00000000e-05, 1.00000000e-05,\n 1.00000000e-05, 1.00000000e-05, 1.00000000e-05,\n 1.00000000e-05, 1.00000000e-05, 2.00100000e-02,\n 2.00100000e-02, 2.00100000e-02, 1.00000000e-05,\n 1.00010000e-01, 1.00010000e-01, 1.00010000e-01,\n 1.00010000e-01, 1.00010000e-01])\n\nThe `surface_water__discharge` field is defined at links. Because our initial\ntopography was a dipping plane, there is no water discharge in the horizontal\ndirection, only toward the bottom of the grid.\n\n>>> of.var_loc('surface_water__discharge')\n'link'\n>>> q = grid.at_link['surface_water__discharge'] # doctest: +NORMALIZE_WHITESPACE\n>>> np.all(q[grid.horizontal_links] == 0.)\nTrue\n>>> np.all(q[grid.vertical_links] <= 0.)\nTrue\n\nThe *water_surface__gradient* is also defined at links.\n\n>>> of.var_loc('water_surface__gradient')\n'link'\n>>> grid.at_link['water_surface__gradient'] # doctest: +NORMALIZE_WHITESPACE\narray([ 0. , 0. , 0. , 0. ,\n 0. , 1. , 1. , 1. , 0. ,\n 0. , 0. , 0. , 0. ,\n 0. , 1. , 1. , 1. , 0. ,\n 0. , 0. , 0. , 0. ,\n 0. , 1.1, 1.1, 1.1, 0. ,\n 0. , 0. , 0. , 0. ])\n\"\"\"\nimport numpy as np\nimport scipy.constants\n\nfrom landlab import Component, FieldError\n\nfrom . import _links as links\n\n_SEVEN_OVER_THREE = 7.0 / 3.0\n\n\ndef _active_links_at_node(grid, *args):\n \"\"\"_active_links_at_node([node_ids]) Active links of a node.\n\n .. note::\n\n This function returns links that are in *clockwise* order,\n rather than the standard *counterclockwise* ordering that\n landlab uses everywhere else.\n\n Parameters\n ----------\n grid : RasterModelGrid\n A grid.\n node_ids : int or list of ints\n ID(s) of node(s) for which to find connected active links\n\n Returns\n -------\n (4, N) ndarray\n The ids of active links attached to grid nodes with\n *node_ids*. If *node_ids* is not given, return links for all of the\n nodes in the grid. Link ids are listed in clockwise order starting\n with the south link. Diagonal links are never returned.\n\n Examples\n --------\n >>> from landlab import RasterModelGrid\n >>> from landlab.components.overland_flow.generate_overland_flow_deAlmeida import _active_links_at_node\n >>> grid = RasterModelGrid((3, 4))\n >>> grid.links_at_node[5]\n array([ 8, 11, 7, 4])\n >>> _active_links_at_node(grid, (5, 6))\n array([[ 4, 5],\n [ 7, 8],\n [11, 12],\n [ 8, 9]])\n >>> _active_links_at_node(grid)\n array([[-1, -1, -1, -1, -1, 4, 5, -1, -1, 11, 12, -1],\n [-1, -1, -1, -1, -1, 7, 8, 9, -1, -1, -1, -1],\n [-1, 4, 5, -1, -1, 11, 12, -1, -1, -1, -1, -1],\n [-1, -1, -1, -1, 7, 8, 9, -1, -1, -1, -1, -1]])\n\n LLCATS: DEPR LINF NINF\n \"\"\"\n active_links_at_node = grid.links_at_node.copy()\n active_links_at_node[grid.active_link_dirs_at_node == 0] = -1\n active_links_at_node = active_links_at_node[:, (3, 2, 1, 0)]\n\n if len(args) == 0:\n return active_links_at_node.T\n elif len(args) == 1:\n node_ids = np.broadcast_arrays(args[0])[0]\n return active_links_at_node[node_ids, :].T\n else:\n raise ValueError(\"only zero or one arguments accepted\")\n\n\nclass OverlandFlow(Component):\n\n \"\"\"Simulate overland flow using de Almeida approximations.\n\n Landlab component that simulates overland flow using the de Almeida\n et al., 2012 approximations of the 1D shallow water equations to be used\n for 2D flood inundation modeling.\n\n This component calculates discharge, depth and shear stress after some\n precipitation event across any raster grid. Default input file is named\n \"overland_flow_input.txt' and is contained in the\n landlab.components.overland_flow folder.\n\n The primary method of this class is :func:`run_one_step`.\n\n References\n ----------\n **Required Software Citation(s) Specific to this Component**\n\n Adams, J., Gasparini, N., Hobley, D., Tucker, G., Hutton, E., Nudurupati,\n S., Istanbulluoglu, E. (2017). The Landlab v1. 0 OverlandFlow component:\n a Python tool for computing shallow-water flow across watersheds.\n Geoscientific Model Development 10(4), 1645.\n https://dx.doi.org/10.5194/gmd-10-1645-2017\n\n **Additional References**\n\n de Almeida, G., Bates, P., Freer, J., Souvignet, M. (2012). Improving the\n stability of a simple formulation of the shallow water equations for 2-D\n flood modeling. Water Resources Research 48(5)\n https://dx.doi.org/10.1029/2011wr011570\n\n \"\"\"\n\n _name = \"OverlandFlow\"\n\n _unit_agnostic = False\n\n _cite_as = \"\"\"@article{adams2017landlab,\n title={The Landlab v1. 0 OverlandFlow component: a Python\n tool for computing shallow-water flow across watersheds},\n author={Adams, Jordan M and Gasparini, Nicole M and\n Hobley, Daniel EJ and Tucker, Gregory E and\n Hutton, Eric WH and Nudurupati, Sai S and\n Istanbulluoglu, Erkan},\n journal={Geoscientific Model Development},\n volume={10},\n number={4},\n pages={1645},\n year={2017},\n publisher={Copernicus GmbH}\n }\n \"\"\"\n\n _info = {\n \"surface_water__depth\": {\n \"dtype\": float,\n \"intent\": \"inout\",\n \"optional\": False,\n \"units\": \"m\",\n \"mapping\": \"node\",\n \"doc\": \"Depth of water on the surface\",\n },\n \"surface_water__discharge\": {\n \"dtype\": float,\n \"intent\": \"out\",\n \"optional\": False,\n \"units\": \"m3/s\",\n \"mapping\": \"link\",\n \"doc\": \"Volumetric discharge of surface water\",\n },\n \"topographic__elevation\": {\n \"dtype\": float,\n \"intent\": \"in\",\n \"optional\": False,\n \"units\": \"m\",\n \"mapping\": \"node\",\n \"doc\": \"Land surface topographic elevation\",\n },\n \"water_surface__gradient\": {\n \"dtype\": float,\n \"intent\": \"out\",\n \"optional\": False,\n \"units\": \"-\",\n \"mapping\": \"link\",\n \"doc\": \"Downstream gradient of the water surface.\",\n },\n }\n\n def __init__(\n self,\n grid,\n default_fixed_links=False,\n h_init=0.00001,\n alpha=0.7,\n mannings_n=0.03,\n g=scipy.constants.g,\n theta=0.8,\n rainfall_intensity=0.0,\n steep_slopes=False,\n ):\n \"\"\"Create an overland flow component.\n\n Parameters\n ----------\n grid : RasterModelGrid\n A landlab grid.\n h_init : float, optional\n Thicknes of initial thin layer of water to prevent divide by zero\n errors (m).\n alpha : float, optional\n Time step coeffcient, described in Bates et al., 2010 and\n de Almeida et al., 2012.\n mannings_n : float, optional\n Manning's roughness coefficient.\n g : float, optional\n Acceleration due to gravity (m/s^2).\n theta : float, optional\n Weighting factor from de Almeida et al., 2012.\n rainfall_intensity : float, optional\n Rainfall intensity. Default is zero.\n steep_slopes : bool, optional\n Modify the algorithm to handle steeper slopes at the expense of\n speed. If model runs become unstable, consider setting to True.\n \"\"\"\n super().__init__(grid)\n\n # First we copy our grid\n\n self._h_init = h_init\n self._alpha = alpha\n\n if isinstance(mannings_n, str):\n self._mannings_n = self._grid.at_link[mannings_n]\n else:\n self._mannings_n = mannings_n\n\n self._g = g\n self._theta = theta\n self.rainfall_intensity = rainfall_intensity\n self._steep_slopes = steep_slopes\n\n # Now setting up fields at the links...\n # For water discharge\n try:\n self._q = grid.add_zeros(\n \"surface_water__discharge\",\n at=\"link\",\n units=self._info[\"surface_water__discharge\"][\"units\"],\n )\n\n except FieldError:\n # Field was already set; still, fill it with zeros\n self._q = grid.at_link[\"surface_water__discharge\"]\n self._q.fill(0.0)\n\n # For water depths calculated at links\n try:\n self._h_links = grid.add_zeros(\n \"surface_water__depth\",\n at=\"link\",\n units=self._info[\"surface_water__depth\"][\"units\"],\n )\n except FieldError:\n self._h_links = grid.at_link[\"surface_water__depth\"]\n self._h_links.fill(0.0)\n self._h_links += self._h_init\n\n self._h = grid.at_node[\"surface_water__depth\"]\n self._h += self._h_init\n\n # For water surface slopes at links\n try:\n self._water_surface_slope = grid.add_zeros(\n \"water_surface__gradient\", at=\"link\"\n )\n except FieldError:\n self._water_surface_slope = grid.at_link[\"water_surface__gradient\"]\n self._water_surface_slope.fill(0.0)\n\n # Start time of simulation is at 1.0 s\n self._elapsed_time = 1.0\n\n self._dt = None\n self._dhdt = grid.zeros()\n\n # When we instantiate the class we recognize that neighbors have not\n # been found. After the user either calls self.set_up_neighbor_array\n # or self.overland_flow this will be set to True. This is done so\n # that every iteration of self.overland_flow does NOT need to\n # reinitalize the neighbors and saves computation time.\n self._neighbor_flag = False\n\n # When looking for neighbors, we automatically ignore inactive links\n # by default. However, what about when we want to look at fixed links\n # too? By default, we ignore these, but if they are important to your\n # model and will be updated in your driver loop, they can be used by\n # setting the flag in the initialization of the class to 'True'\n self._default_fixed_links = default_fixed_links\n\n # Assiging a class variable to the elevation field.\n self._z = self._grid.at_node[\"topographic__elevation\"]\n\n @property\n def h(self):\n \"\"\"The depth of water at each node.\"\"\"\n return self._h\n\n @property\n def dt(self):\n \"\"\"dt: Component timestep.\"\"\"\n return self._dt\n\n @dt.setter\n def dt(self, dt):\n assert dt > 0\n self._dt = dt\n\n @property\n def rainfall_intensity(self):\n \"\"\"rainfall_intensity: the rainfall rate [m/s]\n\n Must be positive.\n \"\"\"\n return self._rainfall_intensity\n\n @rainfall_intensity.setter\n def rainfall_intensity(self, rainfall_intensity):\n if rainfall_intensity >= 0:\n self._rainfall_intensity = rainfall_intensity\n else:\n raise ValueError(\"Rainfall intensity must be positive\")\n\n def calc_time_step(self):\n \"\"\"Calculate time step.\n\n Adaptive time stepper from Bates et al., 2010 and de Almeida et\n al., 2012\n \"\"\"\n self._dt = (\n self._alpha\n * self._grid.dx\n / np.sqrt(self._g * np.amax(self._grid.at_node[\"surface_water__depth\"]))\n )\n\n return self._dt\n\n def set_up_neighbor_arrays(self):\n \"\"\"Create and initialize link neighbor arrays.\n\n Set up arrays of neighboring horizontal and vertical links that\n are needed for the de Almeida solution.\n \"\"\"\n # First we identify all active links\n\n self._active_ids = links.active_link_ids(\n self._grid.shape, self._grid.status_at_node\n )\n\n self._active_links_at_open_bdy = _active_links_at_node(\n self.grid, self.grid.open_boundary_nodes\n ).transpose()\n\n self._active_links_at_open_bdy = self._active_links_at_open_bdy[\n np.where(self._active_links_at_open_bdy > -1)\n ]\n\n # And then find all horizontal link IDs (Active and Inactive)\n self._horizontal_ids = links.horizontal_link_ids(self._grid.shape)\n\n # And make the array 1-D\n self._horizontal_ids = self._horizontal_ids.flatten()\n\n # Find all horizontal active link ids\n self._horizontal_active_link_ids = links.horizontal_active_link_ids(\n self._grid.shape, self._active_ids\n )\n\n # Now we repeat this process for the vertical links.\n # First find the vertical link ids and reshape it into a 1-D array\n self._vertical_ids = links.vertical_link_ids(self._grid.shape).flatten()\n\n # Find the *active* verical link ids\n self._vertical_active_link_ids = links.vertical_active_link_ids(\n self._grid.shape, self._active_ids\n )\n\n if self._default_fixed_links is True:\n fixed_link_ids = links.fixed_link_ids(\n self._grid.shape, self._grid.status_at_node\n )\n fixed_horizontal_links = links.horizontal_fixed_link_ids(\n self._grid.shape, fixed_link_ids\n )\n fixed_vertical_links = links.vertical_fixed_link_ids(\n self._grid.shape, fixed_link_ids\n )\n self._horizontal_active_link_ids = np.maximum(\n self._horizontal_active_link_ids, fixed_horizontal_links\n )\n self._vertical_active_link_ids = np.maximum(\n self._vertical_active_link_ids, fixed_vertical_links\n )\n self._active_neighbors = find_active_neighbors_for_fixed_links(self._grid)\n\n self._vert_bdy_ids = self._active_links_at_open_bdy[\n links.is_vertical_link(self._grid.shape, self._active_links_at_open_bdy)\n ]\n\n self._vert_bdy_ids = links.nth_vertical_link(\n self._grid.shape, self._vert_bdy_ids\n )\n\n self._horiz_bdy_ids = self._active_links_at_open_bdy[\n links.is_horizontal_link(self._grid.shape, self._active_links_at_open_bdy)\n ]\n\n self._horiz_bdy_ids = links.nth_horizontal_link(\n self._grid.shape, self._horiz_bdy_ids\n )\n\n # Using the active vertical link ids we can find the north\n # and south vertical neighbors\n self._north_neighbors = links.vertical_north_link_neighbor(\n self._grid.shape, self._vertical_active_link_ids\n )\n self._south_neighbors = links.vertical_south_link_neighbor(\n self._grid.shape, self._vertical_active_link_ids\n )\n\n # Using the horizontal active link ids, we can find the west and\n # east neighbors\n self._west_neighbors = links.horizontal_west_link_neighbor(\n self._grid.shape, self._horizontal_active_link_ids\n )\n self._east_neighbors = links.horizontal_east_link_neighbor(\n self._grid.shape, self._horizontal_active_link_ids\n )\n\n # replace bdy condition links\n (ids,) = np.where(self._west_neighbors[self._horiz_bdy_ids] == -1)\n ids = self._horiz_bdy_ids[ids]\n self._west_neighbors[ids] = self._horizontal_active_link_ids[ids]\n\n (ids,) = np.where(self._east_neighbors[self._horiz_bdy_ids] == -1)\n ids = self._horiz_bdy_ids[ids]\n self._east_neighbors[ids] = self._horizontal_active_link_ids[ids]\n\n (ids,) = np.where(self._north_neighbors[self._vert_bdy_ids] == -1)\n ids = self._vert_bdy_ids[ids]\n self._north_neighbors[ids] = self._vertical_active_link_ids[ids]\n\n (ids,) = np.where(self._south_neighbors[self._vert_bdy_ids] == -1)\n ids = self._vert_bdy_ids[ids]\n self._south_neighbors[ids] = self._vertical_active_link_ids[ids]\n\n # Set up arrays for discharge in the horizontal & vertical directions.\n self._q_horizontal = np.zeros(\n links.number_of_horizontal_links(self._grid.shape)\n )\n self._q_vertical = np.zeros(links.number_of_vertical_links(self._grid.shape))\n\n # Once the neighbor arrays are set up, we change the flag to True!\n self._neighbor_flag = True\n\n def overland_flow(self, dt=None):\n \"\"\"Generate overland flow across a grid.\n\n For one time step, this generates 'overland flow' across a given grid\n by calculating discharge at each node.\n\n Using the depth slope product, shear stress is calculated at every\n node.\n\n Outputs water depth, discharge and shear stress values through time at\n every point in the input grid.\n \"\"\"\n # DH adds a loop to enable an imposed tstep while maintaining stability\n local_elapsed_time = 0.0\n if dt is None:\n dt = np.inf # to allow the loop to begin\n while local_elapsed_time < dt:\n dt_local = self.calc_time_step()\n # Can really get into trouble if nothing happens but we still run:\n if not dt_local < np.inf:\n break\n if local_elapsed_time + dt_local > dt:\n dt_local = dt - local_elapsed_time\n self._dt = dt_local\n\n # First, we check and see if the neighbor arrays have been\n # initialized\n if self._neighbor_flag is False:\n self.set_up_neighbor_arrays()\n\n # In case another component has added data to the fields, we just\n # reset our water depths, topographic elevations and water\n # discharge variables to the fields.\n self._h = self._grid[\"node\"][\"surface_water__depth\"]\n self._z = self._grid[\"node\"][\"topographic__elevation\"]\n self._q = self._grid[\"link\"][\"surface_water__discharge\"]\n self._h_links = self._grid[\"link\"][\"surface_water__depth\"]\n\n # Here we identify the core nodes and active links for later use.\n self._core_nodes = self._grid.core_nodes\n self._active_links = self._grid.active_links\n\n # Per Bates et al., 2010, this solution needs to find difference\n # between the highest water surface in the two cells and the\n # highest bed elevation\n zmax = self._grid.map_max_of_link_nodes_to_link(self._z)\n w = self._h + self._z\n wmax = self._grid.map_max_of_link_nodes_to_link(w)\n hflow = wmax[self._grid.active_links] - zmax[self._grid.active_links]\n\n # Insert this water depth into an array of water depths at the\n # links.\n self._h_links[self._active_links] = hflow\n\n # Now we calculate the slope of the water surface elevation at\n # active links\n self._water_surface__gradient = self._grid.calc_grad_at_link(w)[\n self._grid.active_links\n ]\n\n # And insert these values into an array of all links\n self._water_surface_slope[\n self._active_links\n ] = self._water_surface__gradient\n # If the user chooses to set boundary links to the neighbor value,\n # we set the discharge array to have the boundary links set to\n # their neighbor value\n if self._default_fixed_links is True:\n self._q[self._grid.fixed_links] = self._q[self._active_neighbors]\n\n # Now we can calculate discharge. To handle links with neighbors\n # that do not exist, we will do a fancy indexing trick. Non-\n # existent links or inactive links have an index of '-1', which in\n # Python, looks to the end of a list or array. To accommodate these\n # '-1' indices, we will simply insert an value of 0.0 discharge (in\n # units of L^2/T) to the end of the discharge array.\n self._q = np.append(self._q, [0])\n\n horiz = self._horizontal_ids\n vert = self._vertical_ids\n # Now we calculate discharge in the horizontal direction\n try:\n self._q[horiz] = (\n self._theta * self._q[horiz]\n + (1.0 - self._theta)\n / 2.0\n * (self._q[self._west_neighbors] + self._q[self._east_neighbors])\n - self._g\n * self._h_links[horiz]\n * self._dt\n * self._water_surface_slope[horiz]\n ) / (\n 1\n + self._g\n * self._dt\n * self._mannings_n**2.0\n * abs(self._q[horiz])\n / self._h_links[horiz] ** _SEVEN_OVER_THREE\n )\n\n # ... and in the vertical direction\n self._q[vert] = (\n self._theta * self._q[vert]\n + (1 - self._theta)\n / 2.0\n * (self._q[self._north_neighbors] + self._q[self._south_neighbors])\n - self._g\n * self._h_links[vert]\n * self._dt\n * self._water_surface_slope[vert]\n ) / (\n 1\n + self._g\n * self._dt\n * self._mannings_n**2.0\n * abs(self._q[vert])\n / self._h_links[vert] ** _SEVEN_OVER_THREE\n )\n\n except ValueError:\n self._mannings_n = self._grid[\"link\"][\"mannings_n\"]\n # if manning's n in a field\n # calc discharge in horizontal\n self._q[horiz] = (\n self._theta * self._q[horiz]\n + (1.0 - self._theta)\n / 2.0\n * (self._q[self._west_neighbors] + self._q[self._east_neighbors])\n - self._g\n * self._h_links[horiz]\n * self._dt\n * self._water_surface_slope[horiz]\n ) / (\n 1\n + self._g\n * self._dt\n * self._mannings_n[horiz] ** 2.0\n * abs(self._q[horiz])\n / self._h_links[horiz] ** _SEVEN_OVER_THREE\n )\n\n # ... and in the vertical direction\n self._q[vert] = (\n self._theta * self._q[vert]\n + (1 - self._theta)\n / 2.0\n * (self._q[self._north_neighbors] + self._q[self._south_neighbors])\n - self._g\n * self._h_links[vert]\n * self._dt\n * self._water_surface_slope[self._vertical_ids]\n ) / (\n 1\n + self._g\n * self._dt\n * self._mannings_n[vert] ** 2.0\n * abs(self._q[vert])\n / self._h_links[vert] ** _SEVEN_OVER_THREE\n )\n\n # Now to return the array to its original length (length of number\n # of all links), we delete the extra 0.0 value from the end of the\n # array.\n self._q = np.delete(self._q, len(self._q) - 1)\n\n # Updating the discharge array to have the boundary links set to\n # their neighbor\n if self._default_fixed_links is True:\n self._q[self._grid.fixed_links] = self._q[self._active_neighbors]\n\n if self._steep_slopes is True:\n # To prevent water from draining too fast for our time steps...\n # Our Froude number.\n Fr = 1.0\n # Our two limiting factors, the froude number and courant\n # number.\n # Looking a calculated q to be compared to our Fr number.\n calculated_q = (self._q / self._h_links) / np.sqrt(\n self._g * self._h_links\n )\n\n # Looking at our calculated q and comparing it to Courant no.,\n q_courant = self._q * self._dt / self._grid.dx\n\n # Water depth split equally between four links..\n water_div_4 = self._h_links / 4.0\n\n # IDs where water discharge is positive...\n (positive_q,) = np.where(self._q > 0)\n\n # ... and negative.\n (negative_q,) = np.where(self._q < 0)\n\n # Where does our calculated q exceed the Froude number? If q\n # does exceed the Froude number, we are getting supercritical\n # flow and discharge needs to be reduced to maintain stability.\n (Froude_logical,) = np.where((calculated_q) > Fr)\n (Froude_abs_logical,) = np.where(abs(calculated_q) > Fr)\n\n # Where does our calculated q exceed the Courant number and\n # water depth divided amongst 4 links? If the calculated q\n # exceeds the Courant number and is greater than the water\n # depth divided by 4 links, we reduce discharge to maintain\n # stability.\n (water_logical,) = np.where(q_courant > water_div_4)\n (water_abs_logical,) = np.where(abs(q_courant) > water_div_4)\n\n # Where are these conditions met? For positive and negative q,\n # there are specific rules to reduce q. This step finds where\n # the discharge values are positive or negative and where\n # discharge exceeds the Froude or Courant number.\n self._if_statement_1 = np.intersect1d(positive_q, Froude_logical)\n self._if_statement_2 = np.intersect1d(negative_q, Froude_abs_logical)\n self._if_statement_3 = np.intersect1d(positive_q, water_logical)\n self._if_statement_4 = np.intersect1d(negative_q, water_abs_logical)\n\n # Rules 1 and 2 reduce discharge by the Froude number.\n self._q[self._if_statement_1] = self._h_links[self._if_statement_1] * (\n np.sqrt(self._g * self._h_links[self._if_statement_1]) * Fr\n )\n\n self._q[self._if_statement_2] = 0.0 - (\n self._h_links[self._if_statement_2]\n * np.sqrt(self._g * self._h_links[self._if_statement_2])\n * Fr\n )\n\n # Rules 3 and 4 reduce discharge by the Courant number.\n self._q[self._if_statement_3] = (\n (self._h_links[self._if_statement_3] * self._grid.dx) / 5.0\n ) / self._dt\n\n self._q[self._if_statement_4] = (\n 0.0\n - (self._h_links[self._if_statement_4] * self._grid.dx / 5.0)\n / self._dt\n )\n\n # Once stability has been restored, we calculate the change in\n # water depths on all core nodes by finding the difference between\n # inputs (rainfall) and the inputs/outputs (flux divergence of\n # discharge)\n self._dhdt = self._rainfall_intensity - self._grid.calc_flux_div_at_node(\n self._q\n )\n\n # Updating our water depths...\n self._h[self._core_nodes] = (\n self._h[self._core_nodes] + self._dhdt[self._core_nodes] * self._dt\n )\n\n # To prevent divide by zero errors, a minimum threshold water depth\n # must be maintained. To reduce mass imbalances, this is set to\n # find locations where water depth is smaller than h_init (default\n # is 0.001) and the new value is self._h_init * 10^-3. This was set\n # as it showed the smallest amount of mass creation in the grid\n # during testing.\n if self._steep_slopes is True:\n self._h[self._h < self._h_init] = self._h_init * 10.0**-3\n\n # And reset our field values with the newest water depth and\n # discharge.\n self._grid.at_node[\"surface_water__depth\"] = self._h\n self._grid.at_link[\"surface_water__discharge\"] = self._q\n #\n #\n # self._helper_q = self._grid.map_upwind_node_link_max_to_node(self._q)\n # self._helper_s = self._grid.map_upwind_node_link_max_to_node(\n # self._water_surface_slope)\n #\n # self._helper_q = self._grid.map_max_of_link_nodes_to_link(self._helper_q)\n # self._helper_s = self._grid.map_max_of_link_nodes_to_link(self._helper_s)\n #\n # self._grid['link']['surface_water__discharge'][\n # self._active_links_at_open_bdy] = self._helper_q[\n # self._active_links_at_open_bdy]\n #\n # self._grid['link']['water_surface__gradient'][\n # self._active_links_at_open_bdy] = self._helper_s[\n # self._active_links_at_open_bdy]\n # Update nodes near boundary locations - nodes adjacent to\n # boundaries may have discharge and water surface slopes\n # artifically reduced due to boundary effects. This step removes\n # those errors.\n\n if dt is np.inf:\n break\n local_elapsed_time += self._dt\n\n def run_one_step(self, dt=None):\n \"\"\"Generate overland flow across a grid.\n\n For one time step, this generates 'overland flow' across a given grid\n by calculating discharge at each node.\n\n Using the depth slope product, shear stress is calculated at every\n node.\n\n Outputs water depth, discharge and shear stress values through time at\n every point in the input grid.\n \"\"\"\n self.overland_flow(dt=dt)\n\n def discharge_mapper(self, input_discharge, convert_to_volume=False):\n \"\"\"Maps discharge value from links onto nodes.\n\n This method takes the discharge values on links and determines the\n links that are flowing INTO a given node. The fluxes moving INTO a\n given node are summed.\n\n This method ignores all flow moving OUT of a given node.\n\n This takes values from the OverlandFlow component (by default) in\n units of [L^2/T]. If the convert_to_cms flag is raised as True, this\n method converts discharge to units [L^3/T] - as of Aug 2016, only\n operates for square RasterModelGrid instances.\n\n The output array is of length grid.number_of_nodes and can be used\n with the Landlab imshow_grid plotter.\n\n Returns a numpy array (discharge_vals)\n \"\"\"\n\n discharge_vals = np.zeros(self._grid.number_of_links)\n discharge_vals[:] = input_discharge[:]\n\n if convert_to_volume:\n discharge_vals *= self._grid.dx\n\n discharge_vals = (\n discharge_vals[self._grid.links_at_node] * self._grid.link_dirs_at_node\n )\n\n discharge_vals = discharge_vals.flatten()\n\n discharge_vals[np.where(discharge_vals < 0)] = 0.0\n\n discharge_vals = discharge_vals.reshape(self._grid.number_of_nodes, 4)\n\n discharge_vals = discharge_vals.sum(axis=1)\n\n return discharge_vals\n\n\ndef find_active_neighbors_for_fixed_links(grid):\n \"\"\"Find active link neighbors for every fixed link.\n\n Specialized link ID function used to ID the active links that neighbor\n fixed links in the vertical and horizontal directions.\n\n If the user wants to assign fixed gradients or values to the fixed\n links dynamically, this function identifies the nearest active_link\n neighbor.\n\n Each fixed link can either have 0 or 1 active neighbor. This function\n finds if and where that active neighbor is and stores those IDs in\n an array.\n\n Parameters\n ----------\n grid : RasterModelGrid\n A landlab grid.\n\n Returns\n -------\n ndarray of int, shape `(*, )`\n Flat array of links.\n\n\n Examples\n --------\n >>> from landlab import NodeStatus, RasterModelGrid\n >>> from landlab.components.overland_flow.generate_overland_flow_deAlmeida import find_active_neighbors_for_fixed_links\n\n >>> grid = RasterModelGrid((4, 5))\n >>> grid.status_at_node[:5] = NodeStatus.FIXED_GRADIENT\n >>> grid.status_at_node[::5] = NodeStatus.FIXED_GRADIENT\n >>> grid.status_at_node # doctest: +NORMALIZE_WHITESPACE\n array([2, 2, 2, 2, 2,\n 2, 0, 0, 0, 1,\n 2, 0, 0, 0, 1,\n 2, 1, 1, 1, 1], dtype=uint8)\n\n >>> grid.fixed_links\n array([ 5, 6, 7, 9, 18])\n >>> grid.active_links\n array([10, 11, 12, 14, 15, 16, 19, 20, 21, 23, 24, 25])\n\n >>> find_active_neighbors_for_fixed_links(grid)\n array([14, 15, 16, 10, 19])\n\n >>> rmg = RasterModelGrid((4, 7))\n\n >>> rmg.at_node['topographic__elevation'] = rmg.zeros(at='node')\n >>> rmg.at_link['topographic__slope'] = rmg.zeros(at='link')\n >>> rmg.status_at_node[rmg.perimeter_nodes] = rmg.BC_NODE_IS_FIXED_GRADIENT\n >>> find_active_neighbors_for_fixed_links(rmg)\n array([20, 21, 22, 23, 24, 14, 17, 27, 30, 20, 21, 22, 23, 24])\n \"\"\"\n neighbors = links.neighbors_at_link(grid.shape, grid.fixed_links).flat\n return neighbors[np.in1d(neighbors, grid.active_links)]\n", "import numpy as np\nimport pytest\nfrom hypothesis import given\nfrom hypothesis.strategies import integers, lists\nfrom numpy.testing import assert_array_almost_equal, assert_array_equal\nfrom pytest import approx\n\nfrom landlab.graph import TriGraph\nfrom landlab.graph.hex.hex import (\n HorizontalHexTriGraph,\n HorizontalRectTriGraph,\n VerticalHexTriGraph,\n VerticalRectTriGraph,\n)\n\n\ndef test_number_of_nodes_horizontal_rect():\n assert HorizontalRectTriGraph.number_of_nodes((1, 2)) == 2\n assert HorizontalRectTriGraph.number_of_nodes((1, 3)) == 3\n assert HorizontalRectTriGraph.number_of_nodes((2, 2)) == 4\n assert HorizontalRectTriGraph.number_of_nodes((2, 3)) == 6\n assert HorizontalRectTriGraph.number_of_nodes((3, 2)) == 6\n assert HorizontalRectTriGraph.number_of_nodes((3, 3)) == 9\n\n\ndef test_number_of_nodes_vertical_rect():\n assert VerticalRectTriGraph.number_of_nodes((1, 2)) == 2\n assert VerticalRectTriGraph.number_of_nodes((1, 3)) == 3\n assert VerticalRectTriGraph.number_of_nodes((2, 2)) == 4\n assert VerticalRectTriGraph.number_of_nodes((2, 3)) == 6\n assert VerticalRectTriGraph.number_of_nodes((3, 2)) == 6\n assert VerticalRectTriGraph.number_of_nodes((3, 3)) == 9\n\n\ndef test_number_of_nodes_horizontal_hex():\n assert HorizontalHexTriGraph.number_of_nodes((1, 2)) == 2\n assert HorizontalHexTriGraph.number_of_nodes((1, 3)) == 3\n assert HorizontalHexTriGraph.number_of_nodes((2, 2)) == 5\n assert HorizontalHexTriGraph.number_of_nodes((2, 3)) == 7\n assert HorizontalHexTriGraph.number_of_nodes((3, 2)) == 7\n assert HorizontalHexTriGraph.number_of_nodes((3, 3)) == 10\n\n\ndef test_number_of_nodes_vertical_hex():\n assert VerticalHexTriGraph.number_of_nodes((1, 2)) == 3\n assert VerticalHexTriGraph.number_of_nodes((1, 3)) == 4\n assert VerticalHexTriGraph.number_of_nodes((2, 2)) == 5\n assert VerticalHexTriGraph.number_of_nodes((2, 3)) == 7\n assert VerticalHexTriGraph.number_of_nodes((3, 2)) == 7\n assert VerticalHexTriGraph.number_of_nodes((3, 3)) == 10\n\n\n@given(shape=lists(integers(min_value=3, max_value=1024), min_size=2, max_size=2))\ndef test_number_of_nodes_symetric_rect(shape):\n assert VerticalRectTriGraph.number_of_nodes(\n shape\n ) == HorizontalRectTriGraph.number_of_nodes(shape[::-1])\n\n\n@given(shape=lists(integers(min_value=3, max_value=1024), min_size=2, max_size=2))\ndef test_number_of_nodes_symetric_hex(shape):\n assert VerticalHexTriGraph.number_of_nodes(\n shape\n ) == HorizontalHexTriGraph.number_of_nodes(shape[::-1])\n\n\[email protected](\"n_rows\", (3,))\[email protected](\"node_layout\", (\"rect\", \"hex\"))\[email protected](\"orientation\", (\"horizontal\", \"vertical\"))\[email protected](\"at\", (\"nodes\", \"links\", \"patches\"))\ndef test_create_hex_graph(n_rows, node_layout, orientation, at):\n expected = {\n \"rect\": {\n \"horizontal\": {\"nodes\": 6, \"links\": 9, \"patches\": 4},\n \"vertical\": {\"nodes\": 6, \"links\": 9, \"patches\": 4},\n },\n \"hex\": {\n \"horizontal\": {\"nodes\": 7, \"links\": 12, \"patches\": 6},\n \"vertical\": {\"nodes\": 7, \"links\": 12, \"patches\": 6},\n },\n }\n if orientation == \"vertical\":\n shape = (2, n_rows)\n else:\n shape = (n_rows, 2)\n graph = TriGraph(shape, node_layout=node_layout, orientation=orientation, sort=True)\n assert (\n getattr(graph, \"number_of_{at}\".format(at=at))\n == expected[node_layout][orientation][at]\n )\n\n\ndef test_create_rect():\n \"\"\"Test creating a hex graph with rectangular layout.\"\"\"\n graph = TriGraph((3, 2), node_layout=\"rect\", sort=True)\n\n assert graph.number_of_nodes == 6\n assert graph.number_of_links == 9\n assert graph.number_of_patches == 4\n\n\ndef test_create_hex():\n \"\"\"Test creating a hex graph with hex layout.\"\"\"\n graph = TriGraph((3, 2), node_layout=\"hex\", sort=True)\n\n assert graph.number_of_nodes == 7\n assert graph.number_of_links == 12\n assert graph.number_of_patches == 6\n\n\n@given(shape=lists(integers(min_value=3, max_value=32), min_size=2, max_size=2))\ndef test_spacing(shape):\n \"\"\"Test spacing of nodes.\"\"\"\n graph = TriGraph(shape)\n assert_array_almost_equal(graph.length_of_link, 1.0)\n\n graph = TriGraph(shape, spacing=2)\n assert_array_almost_equal(graph.length_of_link, 2.0)\n\n\n@given(shape=lists(integers(min_value=3, max_value=32), min_size=2, max_size=2))\[email protected](\"orientation\", (\"horizontal\", \"vertical\"))\[email protected](\"node_layout\", (\"hex\", \"rect\"))\ndef test_origin_keyword(node_layout, orientation, shape):\n \"\"\"Test setting the origin.\"\"\"\n graph = TriGraph(shape)\n\n assert np.min(graph.x_of_node) == approx(0.0)\n assert np.min(graph.y_of_node) == approx(0.0)\n\n graph = TriGraph(shape, xy_of_lower_left=(0.5, 0.25))\n\n assert np.min(graph.x_of_node[0]) == approx(0.5)\n assert np.min(graph.y_of_node[0]) == approx(0.25)\n\n\ndef test_orientation():\n \"\"\"Test vertical and horizontal orientation.\"\"\"\n graph = TriGraph((3, 3), orientation=\"vertical\")\n assert_array_almost_equal(\n graph.y_of_node, [0.0, 0.0, 0.5, 1.0, 1.0, 1.5, 2.0, 2.0, 2.5]\n )\n\n graph = TriGraph((3, 3), orientation=\"horizontal\")\n assert_array_almost_equal(\n graph.x_of_node, [0.0, 1.0, 2.0, 0.5, 1.5, 2.5, 0.0, 1.0, 2.0]\n )\n\n\ndef test_perimeter_nodes_rect():\n graph = TriGraph((3, 4), node_layout=\"rect\")\n assert_array_equal(graph.perimeter_nodes, [3, 7, 11, 10, 9, 8, 4, 0, 1, 2])\n\n\ndef test_perimeter_nodes_hex():\n graph = TriGraph((4, 2), node_layout=\"hex\")\n assert_array_equal(graph.perimeter_nodes, [8, 11, 10, 9, 5, 2, 0, 1, 4])\n\n\ndef test_adjacent_nodes_at_node():\n graph = TriGraph((3, 3), node_layout=\"hex\", sort=True)\n assert_array_equal(\n graph.adjacent_nodes_at_node,\n [\n [1, 4, 3, -1, -1, -1],\n [2, 5, 4, 0, -1, -1],\n [6, 5, 1, -1, -1, -1],\n [4, 7, 0, -1, -1, -1],\n [5, 8, 7, 3, 0, 1],\n [6, 9, 8, 4, 1, 2],\n [9, 5, 2, -1, -1, -1],\n [8, 3, 4, -1, -1, -1],\n [9, 7, 4, 5, -1, -1],\n [8, 5, 6, -1, -1, -1],\n ],\n )\n\n\ndef test_patches_at_node():\n grid = TriGraph((3, 3), node_layout=\"hex\", sort=True)\n assert_array_equal(\n grid.patches_at_node,\n [\n [0, 2, -1, -1, -1, -1],\n [1, 3, 0, -1, -1, -1],\n [4, 1, -1, -1, -1, -1],\n [5, 2, -1, -1, -1, -1],\n [6, 8, 5, 2, 0, 3],\n [7, 9, 6, 3, 1, 4],\n [7, 4, -1, -1, -1, -1],\n [5, 8, -1, -1, -1, -1],\n [8, 6, 9, -1, -1, -1],\n [9, 7, -1, -1, -1, -1],\n ],\n )\n\n\[email protected](\"n_cols\", (2, 3))\[email protected](\"n_rows\", (2, 3))\ndef test_xy_of_node_rect_vertical(n_rows, n_cols):\n expected = {\n (2, 2): ([0, 1, 0, 1], [0, 0.5, 1, 1.5]),\n (2, 3): ([0, 2, 1, 0, 2, 1], [0, 0, 0.5, 1, 1, 1.5]),\n (3, 2): ([0, 1, 0, 1, 0, 1], [0, 0.5, 1, 1.5, 2, 2.5]),\n (3, 3): ([0, 2, 1, 0, 2, 1, 0, 2, 1], [0, 0, 0.5, 1, 1, 1.5, 2, 2, 2.5]),\n }\n x_of_node, y_of_node = VerticalRectTriGraph.xy_of_node((n_rows, n_cols))\n\n assert np.all(\n x_of_node / np.sin(np.pi / 3.0) == approx(expected[(n_rows, n_cols)][0])\n )\n assert np.all(y_of_node == approx(expected[(n_rows, n_cols)][1]))\n\n\[email protected](\"n_cols\", (2, 3))\[email protected](\"n_rows\", (1, 2, 3))\ndef test_xy_of_node_hex_vertical(n_rows, n_cols):\n expected = {\n (1, 2): ([1.0, 0, 1.0], [0, 0.5, 1]),\n (1, 3): ([1.0, 0, 2, 1.0], [0, 0.5, 0.5, 1]),\n (2, 2): ([1.0, 0, 1.0, 0, 1.0], [0, 0.5, 1, 1.5, 2]),\n (2, 3): ([1.0, 0, 2, 1.0, 0, 2, 1.0], [0, 0.5, 0.5, 1, 1.5, 1.5, 2]),\n (3, 2): ([1.0, 0, 1.0, 0, 1.0, 0.0, 1.0], [0, 0.5, 1, 1.5, 2, 2.5, 3]),\n (3, 3): (\n [1.0, 0, 2, 1.0, 0, 2, 1.0, 0, 2, 1.0],\n [0, 0.5, 0.5, 1, 1.5, 1.5, 2, 2.5, 2.5, 3],\n ),\n }\n x_of_node, y_of_node = VerticalHexTriGraph.xy_of_node((n_rows, n_cols))\n\n assert np.all(\n x_of_node / np.sin(np.pi / 3.0) == approx(expected[(n_rows, n_cols)][0])\n )\n assert np.all(y_of_node == approx(expected[(n_rows, n_cols)][1]))\n\n\ndef test_xy_of_node_spacing(hex_layout):\n x_of_node_expected, y_of_node_expected = hex_layout.xy_of_node((3, 4))\n x_of_node, y_of_node = hex_layout.xy_of_node((3, 4), spacing=2.0)\n\n assert_array_almost_equal(x_of_node / 2.0, x_of_node_expected)\n assert_array_almost_equal(y_of_node / 2.0, y_of_node_expected)\n\n\[email protected](\"n_cols\", (2, 3))\[email protected](\"n_rows\", (1, 2, 3))\ndef test_xy_of_node_lower_left(hex_layout, n_rows, n_cols):\n (x_of_node, y_of_node) = hex_layout.xy_of_node((n_rows, n_cols))\n\n assert np.min(x_of_node) == approx(0.0)\n assert np.min(y_of_node) == approx(0.0)\n", "import numpy as np\n\nfrom landlab import Component, RasterModelGrid\nfrom landlab.utils.return_array import return_array_at_node\n\nfrom ..depression_finder.lake_mapper import _FLOODED\n\nDEFAULT_MINIMUM_TIME_STEP = 0.001 # default minimum time step duration\n\n\nclass _GeneralizedErosionDeposition(Component):\n \"\"\"Base class for erosion-deposition type components.\n\n More documenation here.\n \"\"\"\n\n _name = \"_GeneralizedErosionDeposition\"\n\n _unit_agnostic = True\n\n _info = {\n \"flow__link_to_receiver_node\": {\n \"dtype\": int,\n \"intent\": \"in\",\n \"optional\": False,\n \"units\": \"-\",\n \"mapping\": \"node\",\n \"doc\": \"ID of link downstream of each node, which carries the discharge\",\n },\n \"flow__receiver_node\": {\n \"dtype\": int,\n \"intent\": \"in\",\n \"optional\": False,\n \"units\": \"-\",\n \"mapping\": \"node\",\n \"doc\": \"Node array of receivers (node that receives flow from current node)\",\n },\n \"flow__upstream_node_order\": {\n \"dtype\": int,\n \"intent\": \"in\",\n \"optional\": False,\n \"units\": \"-\",\n \"mapping\": \"node\",\n \"doc\": \"Node array containing downstream-to-upstream ordered list of node IDs\",\n },\n \"sediment__influx\": {\n \"dtype\": float,\n \"intent\": \"out\",\n \"optional\": False,\n \"units\": \"m3/s\",\n \"mapping\": \"node\",\n \"doc\": \"Sediment flux (volume per unit time of sediment entering each node)\",\n },\n \"sediment__outflux\": {\n \"dtype\": float,\n \"intent\": \"out\",\n \"optional\": False,\n \"units\": \"m3/s\",\n \"mapping\": \"node\",\n \"doc\": \"Sediment flux (volume per unit time of sediment leaving each node)\",\n },\n \"surface_water__discharge\": {\n \"dtype\": float,\n \"intent\": \"in\",\n \"optional\": False,\n \"units\": \"m**2/s\",\n \"mapping\": \"node\",\n \"doc\": \"Volumetric discharge of surface water\",\n },\n \"topographic__elevation\": {\n \"dtype\": float,\n \"intent\": \"inout\",\n \"optional\": False,\n \"units\": \"m\",\n \"mapping\": \"node\",\n \"doc\": \"Land surface topographic elevation\",\n },\n \"topographic__steepest_slope\": {\n \"dtype\": float,\n \"intent\": \"in\",\n \"optional\": False,\n \"units\": \"-\",\n \"mapping\": \"node\",\n \"doc\": \"The steepest *downhill* slope\",\n },\n }\n\n def __init__(\n self,\n grid,\n m_sp,\n n_sp,\n F_f,\n v_s,\n discharge_field=\"surface_water__discharge\",\n dt_min=DEFAULT_MINIMUM_TIME_STEP,\n ):\n \"\"\"Initialize the GeneralizedErosionDeposition model.\n\n Parameters\n ----------\n grid : ModelGrid\n Landlab ModelGrid object\n m_sp : float\n Discharge exponent (units vary)\n n_sp : float\n Slope exponent (units vary)\n F_f : float\n Fraction of eroded material that turns into \"fines\" that do not\n contribute to (coarse) sediment load. Defaults to zero.\n v_s : float\n Effective settling velocity for chosen grain size metric [L/T].\n discharge_field : float, field name, or array\n Discharge [L^2/T].\n dt_min : float, optional\n Only applies when adaptive solver is used. Minimum timestep that\n adaptive solver will use when subdividing unstable timesteps.\n Default values is 0.001. [T].\n \"\"\"\n super().__init__(grid)\n\n self._flow_receivers = grid.at_node[\"flow__receiver_node\"]\n self._stack = grid.at_node[\"flow__upstream_node_order\"]\n self._topographic__elevation = grid.at_node[\"topographic__elevation\"]\n self._slope = grid.at_node[\"topographic__steepest_slope\"]\n self._link_to_reciever = grid.at_node[\"flow__link_to_receiver_node\"]\n self._cell_area_at_node = grid.cell_area_at_node\n\n if isinstance(grid, RasterModelGrid):\n self._link_lengths = grid.length_of_d8\n else:\n self._link_lengths = grid.length_of_link\n\n self.initialize_output_fields()\n\n self._qs = grid.at_node[\"sediment__outflux\"]\n self._q = return_array_at_node(grid, discharge_field)\n\n # For backward compatibility (remove in 3.0.0+)\n grid.at_node[\"sediment__flux\"] = grid.at_node[\"sediment__outflux\"]\n\n self._Q_to_the_m = np.zeros(grid.number_of_nodes)\n self._S_to_the_n = np.zeros(grid.number_of_nodes)\n self._depo_rate = np.zeros(grid.number_of_nodes)\n\n # store other constants\n self._m_sp = float(m_sp)\n self._n_sp = float(n_sp)\n self._v_s = float(v_s)\n self._dt_min = dt_min\n self._F_f = float(F_f)\n\n if F_f > 1.0:\n raise ValueError(\"Fraction of fines must be <= 1.0\")\n\n if F_f < 0.0:\n raise ValueError(\"Fraction of fines must be > 0.0\")\n\n @property\n def sediment_influx(self):\n \"\"\"Volumetric sediment influx to each node.\"\"\"\n return self.grid.at_node[\"sediment__influx\"]\n\n def _update_flow_link_slopes(self):\n \"\"\"Updates gradient between each core node and its receiver.\n\n Used to update slope values between sub-time-steps, when we do not\n re-run flow routing.\n\n Examples\n --------\n >>> from landlab import RasterModelGrid\n >>> from landlab.components import FlowAccumulator\n >>> from landlab.components.erosion_deposition.generalized_erosion_deposition import _GeneralizedErosionDeposition\n >>> rg = RasterModelGrid((3, 4))\n >>> z = rg.add_zeros('node', 'topographic__elevation')\n >>> z[:] = rg.x_of_node + rg.y_of_node\n >>> fa = FlowAccumulator(rg, flow_director='FlowDirectorD8')\n >>> fa.run_one_step()\n >>> rg.at_node['topographic__steepest_slope'][5:7]\n array([ 1.41421356, 1.41421356])\n >>> sp = _GeneralizedErosionDeposition(rg, v_s=0.001,\n ... m_sp=0.5, n_sp=1.0, F_f=0)\n >>> z *= 0.1\n >>> sp._update_flow_link_slopes()\n >>> rg.at_node['topographic__steepest_slope'][5:7]\n array([ 0.14142136, 0.14142136])\n \"\"\"\n self._slope[:] = (\n self._topographic__elevation\n - self._topographic__elevation[self._flow_receivers]\n ) / self._link_lengths[self._link_to_reciever]\n\n def _calc_hydrology(self):\n self._Q_to_the_m[:] = np.power(self._q, self._m_sp)\n\n def _depressions_are_handled(self):\n \"\"\"Return True if a depression-handling component is present.\"\"\"\n return \"flood_status_code\" in self._grid.at_node\n\n def _get_flooded_core_nodes(self):\n \"\"\"Return boolean node array\n\n True where core node is flooded or self-draining.\n \"\"\"\n is_core = self._grid.status_at_node == self._grid.BC_NODE_IS_CORE\n if self._depressions_are_handled():\n is_flooded_core = is_core & (\n self._grid.at_node[\"flood_status_code\"] == _FLOODED\n )\n else:\n is_flooded_core = is_core & (\n self._flow_receivers == self._grid.nodes.flatten()\n )\n return np.asarray(is_flooded_core)\n", "import copy as cp\n\nimport numpy as np\nimport pytest\nfrom numpy import testing\n\nfrom landlab import FieldError, HexModelGrid, RasterModelGrid\nfrom landlab.components import (\n FlowAccumulator,\n PriorityFloodFlowRouter,\n SpaceLargeScaleEroder,\n)\n\n\ndef test_inputFields_flowRouter():\n \"\"\"\n SpaceLargeScaleEroder should throw an error when topograhy is not equal to the sum of\n bedrock and soil thickness\n \"\"\"\n # %%\n # Make a raster model grid and create a plateau\n mg = RasterModelGrid((5, 5))\n z = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n br = mg.add_zeros(\"bedrock__elevation\", at=\"node\")\n br[:] = mg.x_of_node + mg.y_of_node\n soil = mg.add_zeros(\"soil__depth\", at=\"node\")\n z[:] = br + soil\n fa = FlowAccumulator(mg, flow_director=\"D8\")\n fa.run_one_step()\n\n # make plateau at 10m\n br += 10\n\n # Instanciate the slider\n with pytest.raises(AssertionError):\n _ = SpaceLargeScaleEroder(mg)\n\n\n# %%\ndef test_inputFields_soil():\n \"\"\"\n SpaceLargeScaleEroder should throw an error when the soil__depth field is not provided\n \"\"\"\n # %%\n mg = RasterModelGrid((5, 5))\n _ = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n _ = mg.add_zeros(\"bedrock__elevation\", at=\"node\")\n fa = FlowAccumulator(mg, flow_director=\"D8\")\n fa.run_one_step()\n\n # Instanciate the slider\n with pytest.raises(FieldError):\n _ = SpaceLargeScaleEroder(mg)\n\n\n# %%\ndef test_inputFields_bedrock():\n \"\"\"\n SpaceLargeScaleEroder should instanciate the bedrock__elevation field\n when it is not provided\n \"\"\"\n # %%\n mg = RasterModelGrid((5, 5))\n _ = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n _ = mg.add_zeros(\"soil__depth\", at=\"node\")\n fa = FlowAccumulator(mg, flow_director=\"D8\")\n fa.run_one_step()\n _ = SpaceLargeScaleEroder(mg)\n\n assert \"bedrock__elevation\" in mg.at_node.keys()\n\n\n# %%\ndef test_properties_phi_fraction_fines_LS():\n \"\"\"\n SpaceLargeScaleEroder should throw an error when phi/fraction_fines_LS < 0 or phi > 0\n \"\"\"\n # %%\n mg = RasterModelGrid((5, 5))\n z = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n br = mg.add_zeros(\"bedrock__elevation\", at=\"node\")\n br[:] = mg.x_of_node + mg.y_of_node\n soil = mg.add_zeros(\"soil__depth\", at=\"node\")\n z[:] = br + soil\n fa = FlowAccumulator(mg, flow_director=\"D8\")\n fa.run_one_step()\n\n # Instanciate the slider\n with pytest.raises(ValueError):\n _ = SpaceLargeScaleEroder(mg, phi=-0.2)\n # Instanciate the slider\n with pytest.raises(ValueError):\n _ = SpaceLargeScaleEroder(mg, phi=1.2)\n # Instanciate the slider\n with pytest.raises(ValueError):\n _ = SpaceLargeScaleEroder(mg, F_f=-0.2)\n # Instanciate the slider\n with pytest.raises(ValueError):\n _ = SpaceLargeScaleEroder(mg, F_f=1.2)\n\n\n# %%\n\n\ndef test_route_to_multiple_error_raised():\n # %%\n mg = RasterModelGrid((10, 10))\n z = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n br = mg.add_zeros(\"bedrock__elevation\", at=\"node\")\n br[:] = mg.x_of_node + mg.y_of_node\n soil = mg.add_zeros(\"soil__depth\", at=\"node\")\n z[:] = br + soil\n fa = FlowAccumulator(mg, flow_director=\"MFD\")\n fa.run_one_step()\n\n with pytest.raises(NotImplementedError):\n SpaceLargeScaleEroder(\n mg,\n K_sed=0.1,\n K_br=0.1,\n F_f=0.5,\n phi=0.1,\n H_star=1.0,\n v_s=0.001,\n m_sp=1.0,\n n_sp=0.5,\n sp_crit_sed=0,\n sp_crit_br=0,\n )\n\n\n# %%\ndef test_soil_field_already_on_grid():\n # %%\n \"\"\"\n Test that an existing soil grid field is not changed by instantiating\n SpaceLargeScaleEroder.\n \"\"\"\n\n # set up a 5x5 grid with one open outlet node and low initial elevations.\n nr = 5\n nc = 5\n mg = RasterModelGrid((nr, nc), xy_spacing=10.0)\n\n z = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n br = mg.add_zeros(\"bedrock__elevation\", at=\"node\")\n soil = mg.add_zeros(\"soil__depth\", at=\"node\")\n soil += 1.0 # add 1m of soil everywehre\n\n mg[\"node\"][\"topographic__elevation\"] += (\n mg.node_y / 10000 + mg.node_x / 10000 + np.random.rand(len(mg.node_y)) / 10000\n )\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n mg.set_watershed_boundary_condition_outlet_id(\n 0, mg[\"node\"][\"topographic__elevation\"], -9999.0\n )\n br[:] = z - soil\n\n # Create a D8 flow handler\n FlowAccumulator(mg, flow_director=\"D8\")\n\n # Instantiate SpaceLargeScaleEroder\n sp = SpaceLargeScaleEroder(\n mg,\n K_sed=0.01,\n K_br=0.01,\n F_f=0.0,\n phi=0.0,\n v_s=0.001,\n m_sp=0.5,\n n_sp=1.0,\n sp_crit_sed=0,\n sp_crit_br=0,\n )\n\n # ensure that 'soil__depth' field is everywhere equal to 1.0 m.\n testing.assert_array_equal(\n np.ones(mg.number_of_nodes),\n sp._soil__depth,\n err_msg=\"SpaceLargeScaleEroder soil depth field test failed\",\n verbose=True,\n )\n\n # %% Check getters\n testing.assert_array_equal(\n 0.01,\n sp.K_br,\n err_msg=\"Parameter value issue\",\n verbose=True,\n )\n testing.assert_array_equal(\n 0.01,\n sp.K_sed,\n err_msg=\"Parameter value issue\",\n verbose=True,\n )\n # sediment erosion is zero before running the component\n testing.assert_array_equal(\n np.zeros(mg.number_of_nodes),\n sp.Es,\n err_msg=\"Parameter value issue\",\n verbose=True,\n )\n # rock erosion is zero before running the component\n testing.assert_array_equal(\n np.zeros(mg.number_of_nodes),\n sp.Er,\n err_msg=\"Parameter value issue\",\n verbose=True,\n )\n # %% Check setters\n sp.K_br = 0.02\n testing.assert_array_equal(\n 0.02,\n sp.K_br,\n err_msg=\"Parameter value issue\",\n verbose=True,\n )\n sp.K_sed = 0.02\n testing.assert_array_equal(\n 0.02,\n sp.K_sed,\n err_msg=\"Parameter value issue\",\n verbose=True,\n )\n\n with pytest.raises(AttributeError):\n sp.Es = np.zeros(mg.number_of_nodes)\n\n with pytest.raises(AttributeError):\n sp.Er = np.zeros(mg.number_of_nodes)\n\n\n# %%\n\n\ndef test_br_field_already_on_grid():\n # %%\n \"\"\"\n Test that an existing bedrock elevation grid field is not changed by\n instantiating SpaceLargeScaleEroder.\n \"\"\"\n\n # set up a 5x5 grid with one open outlet node and low initial elevations.\n nr = 5\n nc = 5\n mg = RasterModelGrid((nr, nc), xy_spacing=10.0)\n\n z = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n br = mg.add_zeros(\"bedrock__elevation\", at=\"node\")\n br += 1.0 # make bedrock elevation 5m below surface\n soil = mg.add_zeros(\"soil__depth\", at=\"node\")\n\n mg[\"node\"][\"topographic__elevation\"] += (\n mg.node_y / 10000 + mg.node_x / 10000 + np.random.rand(len(mg.node_y)) / 10000\n )\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n mg.set_watershed_boundary_condition_outlet_id(\n 0, mg[\"node\"][\"topographic__elevation\"], -9999.0\n )\n z[:] = br + soil\n\n # Create a D8 flow handler\n FlowAccumulator(mg, flow_director=\"D8\")\n\n # Instantiate SpaceLargeScaleEroder\n sp = SpaceLargeScaleEroder(\n mg,\n K_sed=0.01,\n K_br=0.01,\n F_f=0.0,\n phi=0.0,\n v_s=0.001,\n m_sp=0.5,\n n_sp=1.0,\n sp_crit_sed=0,\n sp_crit_br=0,\n )\n\n # ensure that 'bedrock__elevation' field is everywhere equal to 1.0 m.\n testing.assert_array_equal(\n np.ones(mg.number_of_nodes),\n sp._bedrock__elevation,\n err_msg=\"SpaceLargeScaleEroder bedrock field test failed\",\n verbose=True,\n )\n\n\n# %%\ndef test_matches_detachment_solution():\n # %%\n \"\"\"\n Test that model matches the detachment-limited analytical solution\n for slope/area relationship at steady state: S=(U/K_br)^(1/n)*A^(-m/n).\n \"\"\"\n\n # %% set up a 5x5 grid with one open outlet node and low initial elevations.\n nr = 5\n nc = 5\n mg = RasterModelGrid((nr, nc), xy_spacing=10.0)\n\n z = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n br = mg.add_zeros(\"bedrock__elevation\", at=\"node\")\n soil = mg.add_zeros(\"soil__depth\", at=\"node\")\n\n mg[\"node\"][\"topographic__elevation\"] += (\n mg.node_y / 10000 + mg.node_x / 10000 + np.random.rand(len(mg.node_y)) / 10000\n )\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n mg.set_watershed_boundary_condition_outlet_id(\n 0, mg[\"node\"][\"topographic__elevation\"], -9999.0\n )\n br[:] = z[:] - soil[:]\n\n # Create a D8 flow handler\n fa = FlowAccumulator(mg, flow_director=\"D8\")\n\n # Parameter values for detachment-limited test\n K_br = 0.01\n U = 0.0001\n dt = 1.0\n F_f = 1.0 # all detached rock disappears; detachment-ltd end-member\n m_sp = 0.5\n n_sp = 1.0\n\n # Instantiate the SpaceLargeScaleEroder component...\n sp = SpaceLargeScaleEroder(\n mg,\n K_sed=0.00001,\n K_br=K_br,\n F_f=F_f,\n phi=0.1,\n H_star=1.0,\n v_s=0.001,\n m_sp=m_sp,\n n_sp=n_sp,\n sp_crit_sed=0,\n sp_crit_br=0,\n )\n\n # ... and run it to steady state (2000x1-year timesteps).\n for _ in range(2000):\n fa.run_one_step()\n sp.run_one_step(dt=dt)\n z[mg.core_nodes] += U * dt # m\n br[mg.core_nodes] = z[mg.core_nodes] - soil[mg.core_nodes]\n\n # compare numerical and analytical slope solutions\n num_slope = mg.at_node[\"topographic__steepest_slope\"][mg.core_nodes]\n analytical_slope = np.power(U / K_br, 1.0 / n_sp) * np.power(\n mg.at_node[\"drainage_area\"][mg.core_nodes], -m_sp / n_sp\n )\n\n # test for match with analytical slope-area relationship\n testing.assert_array_almost_equal(\n num_slope,\n analytical_slope,\n decimal=8,\n err_msg=\"SpaceLargeScaleEroder detachment-limited test failed\",\n verbose=True,\n )\n\n\n# %%\ndef test_matches_detachment_solution_n_gr_1():\n # %%\n \"\"\"\n Test that model matches the detachment-limited analytical solution\n for slope/area relationship at steady state: S=(U/K_br)^(1/n)*A^(-m/n).\n \"\"\"\n\n # %% set up a 5x5 grid with one open outlet node and low initial elevations.\n nr = 5\n nc = 5\n mg = RasterModelGrid((nr, nc), xy_spacing=10.0)\n\n z = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n br = mg.add_zeros(\"bedrock__elevation\", at=\"node\")\n soil = mg.add_zeros(\"soil__depth\", at=\"node\")\n\n mg[\"node\"][\"topographic__elevation\"] += (\n mg.node_y / 10000 + mg.node_x / 10000 + np.random.rand(len(mg.node_y)) / 10000\n )\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n mg.set_watershed_boundary_condition_outlet_id(\n 0, mg[\"node\"][\"topographic__elevation\"], -9999.0\n )\n br[:] = z[:] - soil[:]\n\n # Create a D8 flow handler\n fa = FlowAccumulator(mg, flow_director=\"D8\")\n\n # Parameter values for detachment-limited test\n K_br = 0.01\n U = 0.0001\n dt = 1.0\n F_f = 1.0 # all detached rock disappears; detachment-ltd end-member\n m_sp = 0.5\n n_sp = 1.1\n\n # Instantiate the SpaceLargeScaleEroder component...\n sp = SpaceLargeScaleEroder(\n mg,\n K_sed=0.00001,\n K_br=K_br,\n F_f=F_f,\n phi=0.1,\n H_star=1.0,\n v_s=0.001,\n m_sp=m_sp,\n n_sp=n_sp,\n sp_crit_sed=0,\n sp_crit_br=0,\n )\n\n # ... and run it to steady state (2000x1-year timesteps).\n for _ in range(4000):\n fa.run_one_step()\n sp.run_one_step(dt=dt)\n z[mg.core_nodes] += U * dt # m\n br[mg.core_nodes] = z[mg.core_nodes] - soil[mg.core_nodes]\n\n # compare numerical and analytical slope solutions\n num_slope = mg.at_node[\"topographic__steepest_slope\"][mg.core_nodes]\n analytical_slope = np.power(U / K_br, 1.0 / n_sp) * np.power(\n mg.at_node[\"drainage_area\"][mg.core_nodes], -m_sp / n_sp\n )\n\n # test for match with analytical slope-area relationship\n testing.assert_array_almost_equal(\n num_slope,\n analytical_slope,\n decimal=8,\n err_msg=\"SpaceLargeScaleEroder detachment-limited test failed\",\n verbose=True,\n )\n\n\n# %%\n\n\[email protected]\ndef test_matches_transport_solution():\n # %%\n \"\"\"\n Test that model matches the transport-limited analytical solution\n for slope/area relationship at steady state: S=((U * v_s) / (K_sed * A^m)\n + U / (K_sed * A^m))^(1/n).\n\n Also test that model matches the analytical solution for steady-state\n sediment flux: Qs = U * A * (1 - phi).\n \"\"\"\n\n # set up a 5x5 grid with one open outlet node and low initial elevations.\n nr = 5\n nc = 5\n mg = RasterModelGrid((nr, nc), xy_spacing=10.0)\n\n z = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n br = mg.add_zeros(\"bedrock__elevation\", at=\"node\")\n soil = mg.add_zeros(\"soil__depth\", at=\"node\")\n\n mg[\"node\"][\"topographic__elevation\"] += (\n mg.node_y / 100000 + mg.node_x / 100000 + np.random.rand(len(mg.node_y)) / 10000\n )\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n mg.set_watershed_boundary_condition_outlet_id(\n 0, mg[\"node\"][\"topographic__elevation\"], -9999.0\n )\n soil[:] += 100.0 # initial soil depth of 100 m\n br[:] = z[:]\n z[:] += soil[:]\n\n # Create a D8 flow handler\n fa = FlowAccumulator(\n mg, flow_director=\"D8\", depression_finder=\"DepressionFinderAndRouter\"\n )\n\n # Parameter values for detachment-limited test\n K_sed = 0.01\n U = 0.0001\n dt = 1.0\n F_f = 1.0 # all detached rock disappears; detachment-ltd end-member\n m_sp = 0.5\n n_sp = 1.0\n v_s = 0.5\n phi = 0.5\n\n # Instantiate the SpaceLargeScaleEroder component...\n sp = SpaceLargeScaleEroder(\n mg,\n K_sed=K_sed,\n K_br=0.01,\n F_f=F_f,\n phi=phi,\n H_star=1.0,\n v_s=v_s,\n m_sp=m_sp,\n n_sp=n_sp,\n sp_crit_sed=0,\n sp_crit_br=0,\n )\n\n # ... and run it to steady state (5000x1-year timesteps).\n for _ in range(5000):\n fa.run_one_step()\n sp.run_one_step(dt=dt)\n br[mg.core_nodes] += U * dt # m\n soil[\n 0\n ] = 100.0 # enforce constant soil depth at boundary to keep lowering steady\n z[:] = br[:] + soil[:]\n\n # compare numerical and analytical slope solutions\n num_slope = mg.at_node[\"topographic__steepest_slope\"][mg.core_nodes]\n analytical_slope = np.power(\n (\n (U * v_s * (1 - phi))\n / (K_sed * np.power(mg.at_node[\"drainage_area\"][mg.core_nodes], m_sp))\n )\n + (\n (U * (1 - phi))\n / (K_sed * np.power(mg.at_node[\"drainage_area\"][mg.core_nodes], m_sp))\n ),\n 1.0 / n_sp,\n )\n\n # test for match with analytical slope-area relationship\n testing.assert_array_almost_equal(\n num_slope,\n analytical_slope,\n decimal=8,\n err_msg=\"SpaceLargeScaleEroder transport-limited slope-area test failed\",\n verbose=True,\n )\n\n # compare numerical and analytical sediment flux solutions\n num_sedflux = mg.at_node[\"sediment__outflux\"][mg.core_nodes]\n analytical_sedflux = U * mg.at_node[\"drainage_area\"][mg.core_nodes] * (1 - phi)\n\n # test for match with anakytical sediment flux\n testing.assert_array_almost_equal(\n num_sedflux,\n analytical_sedflux,\n decimal=8,\n err_msg=\"SpaceLargeScaleEroder transport-limited sediment flux test failed\",\n verbose=True,\n )\n # %%\n\n\[email protected]\ndef test_matches_bedrock_alluvial_solution():\n # %%\n \"\"\"\n Test that model matches the bedrock-alluvial analytical solution\n for slope/area relationship at steady state:\n S=((U * v_s * (1 - F_f)) / (K_sed * A^m) + U / (K_br * A^m))^(1/n).\n\n Also test that the soil depth everywhere matches the bedrock-alluvial\n analytical solution at steady state:\n H = -H_star * ln(1 - (v_s / (K_sed / (K_br * (1 - F_f)) + v_s))).\n \"\"\"\n\n # set up a 5x5 grid with one open outlet node and low initial elevations.\n nr = 5\n nc = 5\n mg = RasterModelGrid((nr, nc), xy_spacing=10.0)\n\n z = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n br = mg.add_zeros(\"bedrock__elevation\", at=\"node\")\n soil = mg.add_zeros(\"soil__depth\", at=\"node\")\n\n mg[\"node\"][\"topographic__elevation\"] += (\n mg.node_y / 100000 + mg.node_x / 100000 + np.random.rand(len(mg.node_y)) / 10000\n )\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n mg.set_watershed_boundary_condition_outlet_id(\n 0, mg[\"node\"][\"topographic__elevation\"], -9999.0\n )\n soil[:] += 0.0 # initial condition of no soil depth.\n br[:] = z[:]\n z[:] += soil[:]\n\n # Create a D8 flow handler\n fa = FlowAccumulator(\n mg, flow_director=\"D8\", depression_finder=\"DepressionFinderAndRouter\"\n )\n\n # Parameter values for detachment-limited test\n K_br = 0.002\n K_sed = 0.002\n U = 0.0001\n dt = 10.0\n F_f = 0.2 # all detached rock disappears; detachment-ltd end-member\n m_sp = 0.5\n n_sp = 1.0\n v_s = 0.25\n H_star = 0.1\n\n # Instantiate the SpaceLargeScaleEroder component...\n sp = SpaceLargeScaleEroder(\n mg,\n K_sed=K_sed,\n K_br=K_br,\n F_f=F_f,\n phi=0.0,\n H_star=H_star,\n v_s=v_s,\n m_sp=m_sp,\n n_sp=n_sp,\n sp_crit_sed=0,\n sp_crit_br=0,\n )\n\n # ... and run it to steady state (10000x1-year timesteps).\n for _ in range(10000):\n fa.run_one_step()\n sp.run_one_step(dt=dt)\n br[mg.core_nodes] += U * dt # m\n soil[0] = 0.0 # enforce 0 soil depth at boundary to keep lowering steady\n z[:] = br[:] + soil[:]\n\n # compare numerical and analytical slope solutions\n num_slope = mg.at_node[\"topographic__steepest_slope\"][mg.core_nodes]\n analytical_slope = np.power(\n (\n (U * v_s * (1 - F_f))\n / (K_sed * np.power(mg.at_node[\"drainage_area\"][mg.core_nodes], m_sp))\n )\n + (U / (K_br * np.power(mg.at_node[\"drainage_area\"][mg.core_nodes], m_sp))),\n 1.0 / n_sp,\n )\n\n # test for match with analytical slope-area relationship\n testing.assert_array_almost_equal(\n num_slope,\n analytical_slope,\n decimal=8,\n err_msg=\"SpaceLargeScaleEroder bedrock-alluvial slope-area test failed\",\n verbose=True,\n )\n\n # compare numerical and analytical soil depth solutions\n num_h = mg.at_node[\"soil__depth\"][mg.core_nodes]\n analytical_h = -H_star * np.log(1 - (v_s / (K_sed / (K_br * (1 - F_f)) + v_s)))\n\n # test for match with analytical sediment depth\n testing.assert_array_almost_equal(\n num_h,\n analytical_h,\n decimal=5,\n err_msg=\"SpaceLargeScaleEroder bedrock-alluvial soil thickness test failed\",\n verbose=True,\n )\n # %%\n\n\ndef test_can_run_with_hex():\n \"\"\"Test that model can run with hex model grid.\"\"\"\n # %%\n # Set up a 5x5 grid with open boundaries and low initial elevations.\n mg = HexModelGrid((7, 7))\n z = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n _ = mg.add_zeros(\"soil__depth\", at=\"node\")\n z[:] = 0.01 * mg.x_of_node\n\n # Create a D8 flow handler\n fa = FlowAccumulator(mg, flow_director=\"FlowDirectorSteepest\")\n\n # Parameter values for test 1\n U = 0.001\n dt = 10.0\n\n # Create the SpaceLargeScaleEroder component...\n sp = SpaceLargeScaleEroder(\n mg,\n K_sed=0.00001,\n K_br=0.00000000001,\n F_f=0.5,\n phi=0.1,\n H_star=1.0,\n v_s=0.001,\n m_sp=0.5,\n n_sp=1.0,\n sp_crit_sed=0,\n sp_crit_br=0,\n )\n\n # ... and run it to steady state.\n for _ in range(2000):\n fa.run_one_step()\n sp.run_one_step(dt=dt)\n z[mg.core_nodes] += U * dt\n\n\n# %%\ndef test_matches_detachment_solution_PF():\n # %%\n \"\"\"\n Test that model matches the detachment-limited analytical solution\n for slope/area relationship at steady state: S=(U/K_br)^(1/n)*A^(-m/n).\n \"\"\"\n\n # set up a 5x5 grid with one open outlet node and low initial elevations.\n nr = 5\n nc = 5\n mg = RasterModelGrid((nr, nc), xy_spacing=10.0)\n\n z = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n br = mg.add_zeros(\"bedrock__elevation\", at=\"node\")\n soil = mg.add_zeros(\"soil__depth\", at=\"node\")\n\n mg[\"node\"][\"topographic__elevation\"] += (\n mg.node_y / 10000 + mg.node_x / 10000 + np.random.rand(len(mg.node_y)) / 10000\n )\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n mg.set_watershed_boundary_condition_outlet_id(\n 0, mg[\"node\"][\"topographic__elevation\"], -9999.0\n )\n br[:] = z[:] - soil[:]\n\n fa = PriorityFloodFlowRouter(\n mg, surface=\"topographic__elevation\", flow_metric=\"D8\", suppress_out=True\n )\n fa.run_one_step()\n\n # Parameter values for detachment-limited test\n K_br = 0.01\n U = 0.0001\n dt = 1.0\n F_f = 1.0 # all detached rock disappears; detachment-ltd end-member\n m_sp = 0.5\n n_sp = 1.0\n\n # Instantiate the SpaceLargeScaleEroder component...\n sp = SpaceLargeScaleEroder(\n mg,\n K_sed=0.00001,\n K_br=K_br,\n F_f=F_f,\n phi=0.1,\n H_star=1.0,\n v_s=0.001,\n v_s_lake=0.001,\n m_sp=m_sp,\n n_sp=n_sp,\n sp_crit_sed=0,\n sp_crit_br=0,\n )\n\n # ... and run it to steady state (2000x1-year timesteps).\n for _ in range(2000):\n fa.run_one_step()\n sp.run_one_step(dt=dt)\n z[mg.core_nodes] += U * dt # m\n br[mg.core_nodes] = z[mg.core_nodes] - soil[mg.core_nodes]\n\n # compare numerical and analytical slope solutions\n num_slope = mg.at_node[\"topographic__steepest_slope\"][mg.core_nodes]\n analytical_slope = np.power(U / K_br, 1.0 / n_sp) * np.power(\n mg.at_node[\"drainage_area\"][mg.core_nodes], -m_sp / n_sp\n )\n\n # test for match with analytical slope-area relationship\n testing.assert_array_almost_equal(\n num_slope,\n analytical_slope,\n decimal=8,\n err_msg=\"SpaceLargeScaleEroder detachment-limited test failed\",\n verbose=True,\n )\n\n\n# %%\[email protected]\ndef test_matches_transport_solution_PF():\n\n \"\"\"\n Test that model matches the transport-limited analytical solution\n for slope/area relationship at steady state: S=((U * v_s) / (K_sed * A^m)\n + U / (K_sed * A^m))^(1/n).\n\n Also test that model matches the analytical solution for steady-state\n sediment flux: Qs = U * A * (1 - phi).\n \"\"\"\n # %%\n # set up a 5x5 grid with one open outlet node and low initial elevations.\n nr = 5\n nc = 5\n mg = RasterModelGrid((nr, nc), xy_spacing=10.0)\n\n z = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n br = mg.add_zeros(\"bedrock__elevation\", at=\"node\")\n soil = mg.add_zeros(\"soil__depth\", at=\"node\")\n\n mg[\"node\"][\"topographic__elevation\"] += (\n mg.node_y / 100000 + mg.node_x / 100000 + np.random.rand(len(mg.node_y)) / 10000\n )\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n mg.set_watershed_boundary_condition_outlet_id(\n 0, mg[\"node\"][\"topographic__elevation\"], -9999.0\n )\n soil[:] += 100.0 # initial soil depth of 100 m\n br[:] = z[:]\n z[:] += soil[:]\n\n # Create a D8 flow handler\n fa = PriorityFloodFlowRouter(\n mg, surface=\"topographic__elevation\", flow_metric=\"D8\", suppress_out=True\n )\n fa.run_one_step()\n\n # Parameter values for detachment-limited test\n K_sed = 0.01\n U = 0.0001\n dt = 1.0\n F_f = 1.0 # all detached rock disappears; detachment-ltd end-member\n m_sp = 0.5\n n_sp = 1.0\n v_s = 0.5\n phi = 0.5\n\n # Instantiate the SpaceLargeScaleEroder component...\n sp = SpaceLargeScaleEroder(\n mg,\n K_sed=K_sed,\n K_br=0.01,\n F_f=F_f,\n phi=phi,\n H_star=1.0,\n v_s=v_s,\n m_sp=m_sp,\n n_sp=n_sp,\n sp_crit_sed=0,\n sp_crit_br=0,\n )\n\n # ... and run it to steady state (5000x1-year timesteps).\n for _ in range(5000):\n fa.run_one_step()\n sp.run_one_step(dt=dt)\n br[mg.core_nodes] += U * dt # m\n soil[\n 0\n ] = 100.0 # enforce constant soil depth at boundary to keep lowering steady\n z[:] = br[:] + soil[:]\n\n # compare numerical and analytical slope solutions\n num_slope = mg.at_node[\"topographic__steepest_slope\"][mg.core_nodes]\n analytical_slope = np.power(\n (\n (U * v_s * (1 - phi))\n / (K_sed * np.power(mg.at_node[\"drainage_area\"][mg.core_nodes], m_sp))\n )\n + (\n (U * (1 - phi))\n / (K_sed * np.power(mg.at_node[\"drainage_area\"][mg.core_nodes], m_sp))\n ),\n 1.0 / n_sp,\n )\n\n # test for match with analytical slope-area relationship\n testing.assert_array_almost_equal(\n num_slope,\n analytical_slope,\n decimal=8,\n err_msg=\"SpaceLargeScaleEroder transport-limited slope-area test failed\",\n verbose=True,\n )\n\n # compare numerical and analytical sediment flux solutions\n num_sedflux = mg.at_node[\"sediment__outflux\"][mg.core_nodes]\n analytical_sedflux = U * mg.at_node[\"drainage_area\"][mg.core_nodes] * (1 - phi)\n\n # test for match with anakytical sediment flux\n testing.assert_array_almost_equal(\n num_sedflux,\n analytical_sedflux,\n decimal=8,\n err_msg=\"SpaceLargeScaleEroder transport-limited sediment flux test failed\",\n verbose=True,\n )\n\n\n# %%\[email protected]\ndef test_matches_bedrock_alluvial_solution_PF():\n \"\"\"\n Test that model matches the bedrock-alluvial analytical solution\n for slope/area relationship at steady state:\n S=((U * v_s * (1 - F_f)) / (K_sed * A^m) + U / (K_br * A^m))^(1/n).\n\n Also test that the soil depth everywhere matches the bedrock-alluvial\n analytical solution at steady state:\n H = -H_star * ln(1 - (v_s / (K_sed / (K_br * (1 - F_f)) + v_s))).\n \"\"\"\n # %%\n # set up a 5x5 grid with one open outlet node and low initial elevations.\n nr = 5\n nc = 5\n mg = RasterModelGrid((nr, nc), xy_spacing=10.0)\n\n z = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n br = mg.add_zeros(\"bedrock__elevation\", at=\"node\")\n soil = mg.add_zeros(\"soil__depth\", at=\"node\")\n\n mg[\"node\"][\"topographic__elevation\"] += (\n mg.node_y / 100000 + mg.node_x / 100000 + np.random.rand(len(mg.node_y)) / 10000\n )\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n mg.set_watershed_boundary_condition_outlet_id(\n 0, mg[\"node\"][\"topographic__elevation\"], -9999.0\n )\n soil[:] += 0.0 # initial condition of no soil depth.\n br[:] = z[:]\n z[:] += soil[:]\n\n # Create a D8 flow handler\n fa = PriorityFloodFlowRouter(\n mg, surface=\"topographic__elevation\", flow_metric=\"D8\", suppress_out=True\n )\n fa.run_one_step()\n\n # Parameter values for detachment-limited test\n K_br = 0.002\n K_sed = 0.002\n U = 0.0001\n dt = 10.0\n F_f = 0.2 # all detached rock disappears; detachment-ltd end-member\n m_sp = 0.5\n n_sp = 1.0\n v_s = 0.25\n H_star = 0.1\n\n # Instantiate the SpaceLargeScaleEroder component...\n sp = SpaceLargeScaleEroder(\n mg,\n K_sed=K_sed,\n K_br=K_br,\n F_f=F_f,\n phi=0.0,\n H_star=H_star,\n v_s=v_s,\n m_sp=m_sp,\n n_sp=n_sp,\n sp_crit_sed=0,\n sp_crit_br=0,\n )\n\n # ... and run it to steady state (10000x1-year timesteps).\n for _ in range(10000):\n fa.run_one_step()\n sp.run_one_step(dt=dt)\n br[mg.core_nodes] += U * dt # m\n soil[0] = 0.0 # enforce 0 soil depth at boundary to keep lowering steady\n z[:] = br[:] + soil[:]\n\n # compare numerical and analytical slope solutions\n num_slope = mg.at_node[\"topographic__steepest_slope\"][mg.core_nodes]\n analytical_slope = np.power(\n (\n (U * v_s * (1 - F_f))\n / (K_sed * np.power(mg.at_node[\"drainage_area\"][mg.core_nodes], m_sp))\n )\n + (U / (K_br * np.power(mg.at_node[\"drainage_area\"][mg.core_nodes], m_sp))),\n 1.0 / n_sp,\n )\n\n # test for match with analytical slope-area relationship\n testing.assert_array_almost_equal(\n num_slope,\n analytical_slope,\n decimal=8,\n err_msg=\"SpaceLargeScaleEroder bedrock-alluvial slope-area test failed\",\n verbose=True,\n )\n\n # compare numerical and analytical soil depth solutions\n num_h = mg.at_node[\"soil__depth\"][mg.core_nodes]\n analytical_h = -H_star * np.log(1 - (v_s / (K_sed / (K_br * (1 - F_f)) + v_s)))\n\n # test for match with analytical sediment depth\n testing.assert_array_almost_equal(\n num_h,\n analytical_h,\n decimal=5,\n err_msg=\"SpaceLargeScaleEroder bedrock-alluvial soil thickness test failed\",\n verbose=True,\n )\n # %%\n\n\ndef test_MassBalance():\n # %%\n # set up a 15x15 grid with one open outlet node and low initial elevations.\n nr = 15\n nc = 15\n mg = RasterModelGrid((nr, nc), xy_spacing=10.0)\n\n z = mg.add_zeros(\"topographic__elevation\", at=\"node\")\n br = mg.add_zeros(\"bedrock__elevation\", at=\"node\")\n soil = mg.add_zeros(\"soil__depth\", at=\"node\")\n\n mg[\"node\"][\"topographic__elevation\"] += (\n mg.node_y / 100000 + mg.node_x / 100000 + np.random.rand(len(mg.node_y)) / 10000\n )\n mg.set_closed_boundaries_at_grid_edges(\n bottom_is_closed=True,\n left_is_closed=True,\n right_is_closed=True,\n top_is_closed=True,\n )\n mg.set_watershed_boundary_condition_outlet_id(\n 0, mg[\"node\"][\"topographic__elevation\"], -9999.0\n )\n soil[:] += 0.0 # initial condition of no soil depth.\n br[:] = z[:]\n z[:] += soil[:]\n\n # Create a D8 flow handler\n # fa = PriorityFloodFlowRouter(mg, surface=\"topographic__elevation\", flow_metric = 'D8',suppress_out=True)\n # fa.run_one_step()\n\n # Create a D8 flow handler\n fa = FlowAccumulator(\n mg, flow_director=\"D8\", depression_finder=\"DepressionFinderAndRouter\"\n )\n\n # Parameter values for detachment-limited test\n K_br = 0.002\n K_sed = 0.002\n U = 0.0001\n dt = 10.0\n F_f = 0.2 # all detached rock disappears; detachment-ltd end-member\n m_sp = 0.5\n n_sp = 1.0\n v_s = 0.25\n H_star = 0.1\n\n # Instantiate the Space component...\n sp = SpaceLargeScaleEroder(\n mg,\n K_sed=K_sed,\n K_br=K_br,\n F_f=F_f,\n phi=0.0,\n H_star=H_star,\n v_s=v_s,\n m_sp=m_sp,\n n_sp=n_sp,\n sp_crit_sed=0,\n sp_crit_br=0,\n )\n # Get values before run\n z = mg.at_node[\"topographic__elevation\"]\n br = mg.at_node[\"bedrock__elevation\"]\n H = mg.at_node[\"soil__depth\"]\n cores = mg.core_nodes\n area = mg.cell_area_at_node\n # ... and run it to steady state (10000x1-year timesteps).\n for _ in range(10000):\n fa.run_one_step()\n soil_B = cp.deepcopy(H)\n bed_B = cp.deepcopy(br)\n vol_SSY_riv, V_leaving_riv = sp.run_one_step(dt=dt)\n diff_MB = (\n np.sum((bed_B[cores] - br[cores]) * area[cores])\n + np.sum((soil_B[cores] - H[cores]) * area[cores]) * (1 - sp._phi)\n - vol_SSY_riv * dt\n - V_leaving_riv\n )\n\n br[mg.core_nodes] += U * dt # m\n soil[0] = 0.0 # enforce 0 soil depth at boundary to keep lowering steady\n z[:] = br[:] + soil[:]\n\n # Test Every iteration\n testing.assert_array_almost_equal(\n z[cores],\n br[cores] + H[cores],\n decimal=5,\n err_msg=\"Topography does not equal sum of bedrock and soil! Decrease timestep\",\n verbose=True,\n )\n testing.assert_array_less(\n abs(diff_MB),\n 1e-8 * mg.number_of_nodes,\n err_msg=\"Mass balance error SpaceLargeScaleEroder! Try to resolve by becreasing timestep\",\n verbose=True,\n )\n", "#! /usr/env/python\n\"\"\"Python implementation of HexModelGrid, a grid class used to create and\nmanage structured Voronoi-Delaunay grids for 2D numerical models.\n\nDo NOT add new documentation here. Grid documentation is now built in a\nsemi- automated fashion. To modify the text seen on the web, edit the\nfiles `docs/text_for_[gridfile].py.txt`.\n\"\"\"\n\nimport numpy\nimport xarray as xr\n\nfrom ..core.utils import as_id_array\nfrom ..graph import DualHexGraph\nfrom .base import ModelGrid\n\n\nclass HexModelGrid(DualHexGraph, ModelGrid):\n \"\"\"A grid of hexagonal cells.\n\n This inherited class implements a regular 2D grid with hexagonal cells and\n triangular patches. It is a special type of VoronoiDelaunay grid in which\n the initial set of points is arranged in a triangular/hexagonal lattice.\n\n Examples\n --------\n Create a hex grid with 2 rows of nodes. The first and third rows will\n have 2 nodes, and the second nodes.\n\n >>> from landlab import HexModelGrid\n >>> grid = HexModelGrid((3, 2), spacing=1.0)\n >>> grid.number_of_nodes\n 7\n\n >>> grid = HexModelGrid((3, 3), node_layout=\"rect\", spacing=2.0)\n >>> grid.status_at_node\n array([1, 1, 1, 1, 0, 1, 1, 1, 1], dtype=uint8)\n >>> grid = HexModelGrid((3, 3), node_layout=\"rect\", orientation=\"vertical\")\n >>> grid.status_at_node\n array([1, 1, 1, 1, 1, 0, 1, 1, 1], dtype=uint8)\n >>> grid = HexModelGrid((4, 4), node_layout='rect', orientation=\"vertical\")\n >>> grid.status_at_node\n array([1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1], dtype=uint8)\n >>> grid.boundary_nodes\n array([ 0, 1, 2, 3, 4, 7, 8, 11, 12, 13, 14, 15])\n >>> grid = HexModelGrid((3, 4), node_layout=\"rect\")\n >>> grid.status_at_node\n array([1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1], dtype=uint8)\n \"\"\"\n\n def __init__(\n self,\n shape,\n spacing=1.0,\n xy_of_lower_left=(0.0, 0.0),\n orientation=\"horizontal\",\n node_layout=\"hex\",\n reorient_links=True,\n xy_of_reference=(0.0, 0.0),\n xy_axis_name=(\"x\", \"y\"),\n xy_axis_units=\"-\",\n ):\n \"\"\"Create a grid of hexagonal cells.\n\n Create a regular 2D grid with hexagonal cells and triangular patches.\n It is a special type of VoronoiDelaunay grid in which the initial set\n of points is arranged in a triangular/hexagonal lattice.\n\n Parameters\n ----------\n shape : tuple of int\n Number of rows and columns of nodes.\n spacing : float, optional\n Node spacing.\n xy_of_lower_left : tuple, optional\n Minimum x-of-node and y-of-node values. Depending on the grid\n no node may be present at this coordinate. Default is (0., 0.).\n xy_of_reference : tuple, optional\n Coordinate value in projected space of the reference point,\n `xy_of_lower_left`. Default is (0., 0.)\n orientation : string, optional\n One of the 3 cardinal directions in the grid, either 'horizontal'\n (default) or 'vertical'\n node_layout : {\"hex\", \"rect\"}\n The grid layout of nodes.\n reorient_links : bool, optional\n Whether or not to re-orient all links to point between -45 deg\n and +135 deg clockwise from \"north\" (i.e., along y axis). default\n is True.\n\n Returns\n -------\n HexModelGrid\n A newly-created grid.\n\n Examples\n --------\n Create a hex grid with 2 rows of nodes. The first and third rows will\n have 2 nodes, and the second nodes.\n\n >>> from landlab import HexModelGrid\n >>> hmg = HexModelGrid((3, 2), spacing=1.0)\n >>> hmg.number_of_nodes\n 7\n \"\"\"\n self._xy_of_lower_left = tuple(numpy.asfarray(xy_of_lower_left))\n\n DualHexGraph.__init__(\n self,\n shape,\n spacing=spacing,\n xy_of_lower_left=self.xy_of_lower_left,\n orientation=orientation,\n node_layout=node_layout,\n sort=True,\n )\n ModelGrid.__init__(\n self,\n xy_axis_name=xy_axis_name,\n xy_axis_units=xy_axis_units,\n xy_of_reference=xy_of_reference,\n )\n\n self._node_status = numpy.full(\n self.number_of_nodes, self.BC_NODE_IS_CORE, dtype=numpy.uint8\n )\n self._node_status[self.perimeter_nodes] = self.BC_NODE_IS_FIXED_VALUE\n\n @classmethod\n def from_dict(cls, kwds):\n args = (kwds.pop(\"shape\"),)\n return cls(*args, **kwds)\n\n @classmethod\n def from_dataset(cls, dataset):\n return cls(\n tuple(dataset[\"shape\"].values),\n spacing=dataset[\"spacing\"],\n xy_of_lower_left=dataset[\"xy_of_lower_left\"],\n orientation=dataset.attrs[\"orientation\"],\n node_layout=dataset.attrs[\"node_layout\"],\n )\n\n def as_dataset(self, include=\"*\", exclude=None, time=None):\n dataset = xr.Dataset(\n {\n \"shape\": ((\"dim\",), list(self.shape)),\n \"spacing\": self.spacing,\n \"xy_of_lower_left\": ((\"dim\",), list(self.xy_of_lower_left)),\n },\n attrs={\n \"grid_type\": \"triangular\",\n \"node_layout\": self.node_layout,\n \"orientation\": self.orientation,\n },\n )\n return dataset.update(\n super(HexModelGrid, self).as_dataset(\n include=include, exclude=exclude, time=None\n )\n )\n\n @property\n def xy_of_lower_left(self):\n \"\"\"Return (x, y) of the reference point.\"\"\"\n return self._xy_of_lower_left\n\n @xy_of_lower_left.setter\n def xy_of_lower_left(self, xy_of_lower_left):\n \"\"\"Set a new value for the xy_of_lower_left.\"\"\"\n dx = self.xy_of_lower_left[0] - xy_of_lower_left[0]\n dy = self.xy_of_lower_left[1] - xy_of_lower_left[1]\n # self._xy_of_node -= (dx, dy)\n with self.thawed():\n self.x_of_node[:] -= dx\n self.y_of_node[:] -= dy\n\n self._xy_of_lower_left = tuple(xy_of_lower_left)\n\n @property\n def number_of_node_columns(self):\n \"\"\"Number of node columns hex grid.\n\n Number of node columns in a rectangular-shaped and/or\n vertically oriented hex grid.\n\n Returns the number of columns, including boundaries.\n\n Notes\n -----\n Will generate an error if called with a hex-shaped, horizontally\n aligned grid.\n\n Examples\n --------\n >>> from landlab import HexModelGrid\n >>> grid = HexModelGrid((5, 5), node_layout=\"rect\")\n >>> grid.number_of_node_columns\n 5\n\n LLCATS: GINF NINF\n \"\"\"\n return self.shape[1]\n\n @property\n def number_of_node_rows(self):\n \"\"\"Number of node rows in a rectangular-shaped and/or horizontally\n oriented hex grid.\n\n Returns the number of rows, including boundaries.\n\n Notes\n -----\n Will generate an error if called with a hex-shaped, vertically\n aligned grid.\n\n Examples\n --------\n >>> from landlab import HexModelGrid\n >>> grid = HexModelGrid((5, 5), node_layout=\"rect\")\n >>> grid.number_of_node_rows\n 5\n\n LLCATS: GINF NINF\n \"\"\"\n return self._shape[0]\n\n def node_row_and_column(self, node_id):\n \"\"\"Row and column from node ID, FOR VERT RECT CONFIGURATION ONLY.\n\n Examples\n --------\n >>> from landlab import HexModelGrid\n >>> grid = HexModelGrid((3, 4), node_layout='rect', orientation=\"vertical\")\n >>> grid.node_row_and_column(5)\n (1, 2)\n >>> grid = HexModelGrid((3, 5), node_layout='rect', orientation=\"vertical\")\n >>> grid.node_row_and_column(13)\n (2, 1)\n \"\"\"\n assert self.orientation[0] == \"v\", \"grid orientation must be vertical\"\n try:\n (nr, nc) = self._shape\n except AttributeError:\n raise AttributeError(\n \"Only rectangular Hex grids have defined rows and columns.\"\n )\n\n row = node_id // nc\n n_mod_nc = node_id % nc\n half_nc = (nc + 1) // 2\n col = 2 * (n_mod_nc % half_nc) + n_mod_nc // half_nc\n return (row, col)\n\n def _configure_hexplot(self, data, data_label=None, color_map=None):\n \"\"\"Sets up necessary information for making plots of the hexagonal grid\n colored by a given data element.\n\n Parameters\n ----------\n data : str OR node array (1d numpy array with number_of_nodes entries)\n Data field to be colored\n data_label : str, optional\n Label for colorbar\n color_map : matplotlib colormap object, None\n Color map to apply (defaults to \"jet\")\n\n Returns\n -------\n (none)\n\n Notes\n -----\n Creates and stores a PatchCollection representing the hexagons. Also\n stores a handle to the current plotting axis. Both of these are then\n used by hexplot().\n \"\"\"\n import matplotlib\n from matplotlib.collections import PatchCollection\n from matplotlib.patches import Polygon\n from numpy import array, sqrt, zeros\n\n # color\n if color_map is None:\n color_map = matplotlib.cm.jet\n\n # geometry\n apothem = self.spacing / 2.0\n # distance from node to each hexagon cell vertex\n radius = 2.0 * apothem / sqrt(3.0)\n\n # offsets from node x,y position\n offsets = zeros((6, 2))\n poly_verts = zeros((6, 2))\n\n # Figure out whether the orientation is horizontal or vertical\n if self.orientation[0] == \"h\": # horizontal\n offsets[:, 0] = array([0.0, apothem, apothem, 0.0, -apothem, -apothem])\n offsets[:, 1] = array(\n [\n radius,\n radius / 2.0,\n -radius / 2.0,\n -radius,\n -radius / 2.0,\n radius / 2.0,\n ]\n )\n else: # vertical\n offsets[:, 0] = array(\n [\n radius / 2.0,\n radius,\n radius / 2.0,\n -radius / 2.0,\n -radius,\n -radius / 2.0,\n ]\n )\n offsets[:, 1] = array([apothem, 0.0, -apothem, -apothem, 0.0, apothem])\n\n patches = []\n for i in range(self.number_of_nodes):\n poly_verts[:, 0] = self.node_x[i] + offsets[:, 0]\n poly_verts[:, 1] = self.node_y[i] + offsets[:, 1]\n p = Polygon(poly_verts, True)\n patches.append(p)\n\n self._hexplot_pc = PatchCollection(\n patches, cmap=color_map, edgecolor=\"none\", linewidth=0.0\n )\n\n self._hexplot_configured = True\n\n def hexplot(self, data, data_label=None, color_map=None):\n \"\"\"Create a plot of the grid elements.\n\n Creates a plot of the grid and one node-data field, showing hexagonal\n cells colored by values in the field.\n\n Parameters\n ----------\n data : str or node array (1d numpy array with number_of_nodes entries)\n Data field to be colored.\n data_label : str, optional\n Label for colorbar.\n color_map : matplotlib colormap object, None\n Color map to apply (defaults to \"jet\")\n\n See also\n --------\n plot.imshow_grid\n Another Landlab function capable of producing hexplots, with a\n fuller-featured set of options.\n\n LLCATS: GINF\n \"\"\"\n import copy\n\n import matplotlib.pyplot as plt\n from numpy import amax, amin, array\n\n try:\n self._hexplot_configured\n except AttributeError:\n self._configure_hexplot(data, data_label, color_map)\n else:\n if self._hexplot_pc.cmap != color_map:\n self._configure_hexplot(data, data_label, color_map)\n\n # Handle *data*: if it's a numpy array, then we consider it the\n # data to be plotted. If it's a string, we consider it the name of the\n # node-field to plot, and we fetch it.\n if type(data) is str:\n data_label = data\n data = self.at_node[data]\n\n ax = plt.gca()\n self._hexplot_pc.set_array(array(data))\n copy_of_pc = copy.copy(self._hexplot_pc)\n ax.add_collection(copy_of_pc)\n plt.xlim([amin(self.node_x) - self.spacing, amax(self.node_x) + self.spacing])\n plt.ylim([amin(self.node_y) - self.spacing, amax(self.node_y) + self.spacing])\n\n return ax\n\n def set_watershed_boundary_condition_outlet_id(\n self, outlet_id, node_data, nodata_value=-9999.0\n ):\n \"\"\"Set the boundary conditions for a watershed on a HexModelGrid.\n\n All nodes with nodata_value are set to BC_NODE_IS_CLOSED.\n All nodes with data values are set to BC_NODE_IS_CORE, with the\n exception that the outlet node is set to a BC_NODE_IS_FIXED_VALUE.\n\n Note that the outer ring of the HexModelGrid is set to BC_NODE_IS_CLOSED, even\n if there are nodes that have values. The only exception to this would\n be if the outlet node is on the boundary, which is acceptable.\n\n Assumes that the id of the outlet is already known.\n\n This assumes that the grid has a single watershed. If this is not\n the case this will not work.\n\n Parameters\n ----------\n outlet_id : integer\n id of the outlet node\n node_data : field name or ndarray\n At-node field name or at-node data values to use for identifying\n watershed location.\n nodata_value : float, optional\n Value that indicates an invalid value.\n\n Examples\n --------\n The example will use a *HexModelGrid* with node data values\n as illustrated::\n\n 1. , 2. , 3. , 4. ,\n 0.5, 1.5, 2.5, 3.5, 4.5,\n 0. , 1. , 2. , 3. , 4. , 5.,\n 0.5, 1.5, 2.5, 3.5, 4.5,\n 1. , 2. , 3. , 4.\n\n >>> from landlab import HexModelGrid\n >>> hmg = HexModelGrid((5, 4))\n >>> z = hmg.add_zeros(\"topographic__elevation\", at=\"node\")\n >>> z += hmg.x_of_node + 1.0\n\n >>> hmg.status_at_node\n array([1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1,\n 1], dtype=uint8)\n\n >>> outlet = hmg.set_watershed_boundary_condition_outlet_id(9, z, -9999.)\n >>> hmg.status_at_node\n array([4, 4, 4, 4, 4, 0, 0, 0, 4, 1, 0, 0, 0, 0, 4, 4, 0, 0, 0, 4, 4, 4, 4,\n 4], dtype=uint8)\n\n LLCATS: BC\n \"\"\"\n # get node_data if a field name\n node_data = self.return_array_or_field_values(\"node\", node_data)\n\n # make ring of no data nodes\n self.status_at_node[self.boundary_nodes] = self.BC_NODE_IS_CLOSED\n\n # set no data nodes to inactive boundaries\n self.set_nodata_nodes_to_closed(node_data, nodata_value)\n\n # set the boundary condition (fixed value) at the outlet_node\n self.status_at_node[outlet_id] = self.BC_NODE_IS_FIXED_VALUE\n\n def set_watershed_boundary_condition(\n self, node_data, nodata_value=-9999.0, return_outlet_id=False\n ):\n \"\"\"Finds the node adjacent to a boundary node with the smallest value.\n This node is set as the outlet. The outlet node must have a data\n value. Can return the outlet id as a one element numpy array if\n return_outlet_id is set to True.\n\n All nodes with nodata_value are set to `NodeStatus.CLOSED`\n (grid.status_at_node == 4). All nodes with data values are set to\n `NodeStatus.CORE` (grid.status_at_node == 0), with the exception that the\n outlet node is set to a `NodeStatus.FIXED_VALUE` (grid.status_at_node == 1).\n\n Note that the outer ring (perimeter) of the grid is set to\n `NodeStatus.CLOSED`, even if there are nodes that have values. The only\n exception to this would be if the outlet node is on the perimeter, which\n is acceptable.\n\n This routine assumes that all of the nodata_values are on the outside of\n the data values. In other words, there are no islands of nodata_values\n surrounded by nodes with data.\n\n This also assumes that the grid has a single watershed (that is a single\n outlet node).\n\n Parameters\n ----------\n node_data : field name or ndarray\n At-node field name or at-node data values to use for identifying\n watershed location.\n nodata_value : float, optional\n Value that indicates an invalid value.\n return_outlet_id : boolean, optional\n Indicates whether or not to return the id of the found outlet\n\n Examples\n --------\n The example will use a HexModelGrid with node data values\n as illustrated::\n\n 1. , 2. , 3. , 4. ,\n 0.5, 1.5, 2.5, 3.5, 4.5,\n 0. , 1. , 2. , 3. , 4. , 5.,\n 0.5, 1.5, 2.5, 3.5, 4.5,\n 1. , 2. , 3. , 4.\n\n >>> from landlab import HexModelGrid\n >>> hmg = HexModelGrid((5, 4))\n >>> z = hmg.add_zeros(\"topographic__elevation\", at=\"node\")\n >>> z += hmg.x_of_node + 1.0\n >>> out_id = hmg.set_watershed_boundary_condition(z, -9999., True)\n >>> out_id\n array([9])\n >>> hmg.status_at_node\n array([4, 4, 4, 4, 4, 0, 0, 0, 4, 1, 0, 0, 0, 0, 4, 4, 0, 0, 0, 4, 4, 4, 4,\n 4], dtype=uint8)\n\n LLCATS: BC\n \"\"\"\n # get node_data if a field name\n node_data = self.return_array_or_field_values(\"node\", node_data)\n\n # make ring of no data nodes\n self.status_at_node[self.boundary_nodes] = self.BC_NODE_IS_CLOSED\n\n # set no data nodes to inactive boundaries\n self.set_nodata_nodes_to_closed(node_data, nodata_value)\n\n # locs is a list that contains locations where\n # node data is not equal to the nodata value\n locs = numpy.where(node_data != nodata_value)\n if len(locs) < 1:\n raise ValueError(\"All data values are no_data values\")\n\n # now find minimum of the data values\n min_val = numpy.min(node_data[locs])\n\n # now find where minimum values are\n min_locs = numpy.where(node_data == min_val)[0]\n\n # check all the locations with the minimum value to see if one\n not_found = True\n while not_found:\n # now check the min locations to see if any are next to\n # a boundary node\n local_not_found = True\n # next_to_boundary = []\n\n # check all nodes rather than selecting the first node that meets\n # the criteria\n # for i in range(len(min_locs)):\n # next_to_boundary.append(self.node_has_boundary_neighbor()[min_locs[i])]\n next_to_boundary = self.node_has_boundary_neighbor()[(min_locs,)]\n # if any of those nodes were adjacent to the boundary, check\n # that there is only one. If only one, set as outlet loc, else,\n # raise a value error\n if numpy.any(next_to_boundary):\n local_not_found = False\n if sum(next_to_boundary) > 1:\n potential_locs = min_locs[\n numpy.where(numpy.asarray(next_to_boundary))[0]\n ]\n raise ValueError(\n (\n \"Grid has two potential outlet nodes.\"\n \"They have the following node IDs: \\n\"\n + str(potential_locs)\n + \"\\nUse the method set_watershed_boundary_condition_outlet_id \"\n \"to explicitly select one of these \"\n \"IDs as the outlet node.\"\n )\n )\n else:\n outlet_loc = min_locs[numpy.where(next_to_boundary)[0][0]]\n\n # checked all of the min vals, (so done with inner while)\n # and none of the min values were outlet candidates\n if local_not_found:\n # need to find the next largest minimum value\n # first find the locations of all values greater\n # than the old minimum\n # not done with outer while\n locs = numpy.where((node_data > min_val) & (node_data != nodata_value))\n # now find new minimum of these values\n min_val = numpy.min(node_data[locs])\n min_locs = numpy.where(node_data == min_val)[0]\n else:\n # if locally found, it is also globally found\n # so done with outer while\n not_found = False\n\n # set outlet boundary condition\n self.status_at_node[outlet_loc] = self.BC_NODE_IS_FIXED_VALUE\n\n if return_outlet_id:\n return as_id_array(numpy.array([outlet_loc]))\n" ]
[ [ "numpy.amax", "numpy.maximum", "numpy.sqrt", "numpy.in1d", "numpy.intersect1d", "numpy.append", "numpy.broadcast_arrays", "numpy.where", "numpy.zeros" ], [ "numpy.testing.assert_array_equal", "numpy.min", "numpy.sin", "numpy.testing.assert_array_almost_equal" ], [ "numpy.asarray", "numpy.zeros", "numpy.power" ], [ "numpy.log", "numpy.power", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.zeros", "numpy.sum", "numpy.testing.assert_array_almost_equal" ], [ "matplotlib.pyplot.gca", "numpy.amax", "matplotlib.collections.PatchCollection", "numpy.sqrt", "matplotlib.patches.Polygon", "numpy.min", "numpy.amin", "numpy.asarray", "numpy.full", "numpy.asfarray", "numpy.any", "numpy.array", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MouvementMondial/OccupancyGridSLAM
[ "6473c2c33025933b937a8ed5b04fb1bcb563ebe0" ]
[ "lib/mapping.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n@author: Thorsten\n\"\"\"\n\nimport numpy as np\nfrom numba import jit\n\nimport os\nimport sys\nnb_dir = os.path.split(os.getcwd())[0]\nif nb_dir not in sys.path:\n sys.path.append(nb_dir)\n\nfrom lib import bresenham\n\n@jit\ndef addMeasurement(grid, x, y, pos_sensor, offset, resolution, l_occupied, l_free, l_min, l_max):\n \n for i in range(x.size):\n # round points to cells \n xi=int( (x[i,0]-offset[0]) / resolution )\n yi=int( (y[i,0]-offset[1]) / resolution )\n\n # set beam endpoint-cells as occupied\n grid[xi,yi] += l_occupied\n \n # value > threshold? -> clamping \n if grid[xi,yi] > l_max:\n grid[xi,yi] = l_max\n\n # calculate cells between sensor and endpoint as free\n path = bresenham.bresenham2D( ((pos_sensor-offset)/resolution).astype(int), np.array([[xi,yi]]))\n \n # set cells between sensor and endpoint as free\n updateFree(path,grid,l_free,l_min)\n \n@jit(nopython=True)\ndef updateFree(path,grid,l_free,l_min):\n for nr in range(path.shape[0]):\n path_x = int(path[nr,0])\n path_y = int(path[nr,1])\n \n grid[path_x, path_y] += l_free\n \n # value < threshold? -> clamping\n if grid[path_x, path_y] < l_min:\n grid[path_x, path_y] = l_min\n \n@jit(nopython=True)\ndef scan2mapDistance(grid,pcl,offset,resolution):\n distance = 0;\n for i in range(pcl.shape[0]):\n # round points to cells\n xi = int ( (pcl[i,0]-offset[0]) / resolution )\n yi = int ( (pcl[i,1]-offset[1]) / resolution ) \n distance += grid[xi,yi]\n return distance" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
i-aki-y/librosa
[ "a464b336c23a94e00943fc50e936180f503367eb" ]
[ "tests/test_onset.py" ]
[ "#!/usr/bin/env python\n# CREATED:2013-03-11 18:14:30 by Brian McFee <[email protected]>\n# unit tests for librosa.onset\n\nfrom __future__ import print_function\nimport pytest\nfrom contextlib2 import nullcontext as dnr\n\n# Disable cache\nimport os\n\ntry:\n os.environ.pop(\"LIBROSA_CACHE_DIR\")\nexcept:\n pass\n\n\nimport warnings\n\nimport numpy as np\nimport librosa\n\nfrom test_core import srand\n\n__EXAMPLE_FILE = os.path.join(\"tests\", \"data\", \"test1_22050.wav\")\n\n\[email protected](scope=\"module\")\ndef ysr():\n return librosa.load(__EXAMPLE_FILE)\n\n\[email protected](\n \"feature\", [None, librosa.feature.melspectrogram, librosa.feature.chroma_stft]\n)\[email protected](\"n_fft\", [512, 2048])\[email protected](\"hop_length\", [256, 512])\[email protected](\"lag\", [1, 2])\[email protected](\"max_size\", [1, 2])\[email protected](\"detrend\", [False, True])\[email protected](\"center\", [False, True])\[email protected](\"aggregate\", [None, np.mean, np.max])\ndef test_onset_strength_audio(\n ysr, feature, n_fft, hop_length, lag, max_size, detrend, center, aggregate\n):\n\n y, sr = ysr\n oenv = librosa.onset.onset_strength(\n y=y,\n sr=sr,\n S=None,\n detrend=detrend,\n center=center,\n aggregate=aggregate,\n feature=feature,\n n_fft=n_fft,\n hop_length=hop_length,\n lag=lag,\n max_size=max_size,\n )\n\n assert oenv.ndim == 1\n\n S = librosa.feature.melspectrogram(y=y, n_fft=n_fft, hop_length=hop_length)\n\n target_shape = S.shape[-1]\n\n if not detrend:\n assert np.all(oenv >= 0)\n\n assert oenv.shape[-1] == target_shape\n\n\[email protected](raises=librosa.ParameterError)\ndef test_onset_strength_badlag(ysr):\n y, sr = ysr\n librosa.onset.onset_strength(y=y, sr=sr, lag=0)\n\n\[email protected](raises=librosa.ParameterError)\ndef test_onset_strength_badmax(ysr):\n y, sr = ysr\n librosa.onset.onset_strength(y=y, sr=sr, max_size=0)\n\n\[email protected](raises=librosa.ParameterError)\ndef test_onset_strength_noinput():\n librosa.onset.onset_strength(y=None, S=None)\n\n\[email protected](scope=\"module\")\ndef melspec_sr(ysr):\n y, sr = ysr\n S = librosa.feature.melspectrogram(y=y, sr=sr)\n return S, sr\n\n\[email protected](\n \"feature\", [None, librosa.feature.melspectrogram, librosa.feature.chroma_stft]\n)\[email protected](\"n_fft\", [512, 2048])\[email protected](\"hop_length\", [256, 512])\[email protected](\"detrend\", [False, True])\[email protected](\"center\", [False, True])\[email protected](\"aggregate\", [None, np.mean, np.max])\ndef test_onset_strength_spectrogram(\n melspec_sr, feature, n_fft, hop_length, detrend, center, aggregate\n):\n S, sr = melspec_sr\n oenv = librosa.onset.onset_strength(\n y=None,\n sr=sr,\n S=S,\n detrend=detrend,\n center=center,\n aggregate=aggregate,\n feature=feature,\n n_fft=n_fft,\n hop_length=hop_length,\n )\n\n assert oenv.ndim == 1\n\n target_shape = S.shape[-1]\n\n if not detrend:\n assert np.all(oenv >= 0)\n\n assert oenv.shape[-1] == target_shape\n\n\[email protected](\"lag\", [1, 2, 3])\[email protected](\"aggregate\", [np.mean, np.max])\ndef test_onset_strength_multi_noagg(melspec_sr, lag, aggregate):\n\n S, sr = melspec_sr\n # We only test with max_size=1 here to make the sub-band slicing test simple\n odf_multi = librosa.onset.onset_strength_multi(\n S=S, lag=lag, max_size=1, aggregate=False\n )\n odf_mean = librosa.onset.onset_strength_multi(\n S=S, lag=lag, max_size=1, aggregate=aggregate\n )\n\n # With no aggregation, output shape should = input shape\n assert odf_multi.shape == S.shape\n\n # Result should average out to the same as mean aggregation\n assert np.allclose(odf_mean, aggregate(odf_multi, axis=0))\n\n\[email protected](scope=\"module\")\ndef channels(melspec_sr):\n S, _ = melspec_sr\n return np.linspace(0, S.shape[0], num=5, dtype=int)\n\n\[email protected](\"lag\", [1, 2, 3])\ndef test_onset_strength_multi(melspec_sr, lag, channels):\n\n S, sr = melspec_sr\n # We only test with max_size=1 here to make the sub-band slicing test simple\n odf_multi = librosa.onset.onset_strength_multi(\n S=S, lag=lag, max_size=1, channels=channels\n )\n\n assert len(odf_multi) == len(channels) - 1\n\n for i, (s, t) in enumerate(zip(channels, channels[1:])):\n odf_single = librosa.onset.onset_strength(S=S[s:t], lag=lag, max_size=1)\n assert np.allclose(odf_single, odf_multi[i])\n\n\[email protected](scope=\"module\", params=[64, 512, 2048])\ndef hop(request):\n return request.param\n\n\[email protected](scope=\"module\", params=[False, True], ids=[\"audio\", \"oenv\"])\ndef oenv(ysr, hop, request):\n\n if request.param:\n y, sr = ysr\n return librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop)\n else:\n return None\n\n\[email protected](\"bt\", [False, True])\[email protected](\"normalize\", [False, True])\ndef test_onset_detect_real(ysr, oenv, hop, bt, normalize):\n\n y, sr = ysr\n onsets = librosa.onset.onset_detect(\n y=y,\n sr=sr,\n onset_envelope=oenv,\n hop_length=hop,\n backtrack=bt,\n normalize=normalize,\n )\n if bt:\n assert np.all(onsets >= 0)\n else:\n assert np.all(onsets > 0)\n\n assert np.all(onsets < len(y) * sr // hop)\n if oenv is not None:\n assert np.all(onsets < len(oenv))\n\n\[email protected](raises=librosa.ParameterError)\ndef test_onset_detect_nosignal():\n librosa.onset.onset_detect(y=None, onset_envelope=None)\n\n\[email protected](\"sr\", [4000])\[email protected](\"y\", [np.zeros(4000), np.ones(4000), -np.ones(4000)])\[email protected](\"hop_length\", [64, 512, 2048])\ndef test_onset_detect_const(y, sr, hop_length):\n\n # Disable padding here\n onsets = librosa.onset.onset_detect(\n y=y, sr=sr, onset_envelope=None, hop_length=hop_length,\n )\n\n # We'll allow one onset at the start of the signal for these examples\n # when y is all-ones, zero-padding induces an onset at the beginning of the\n # signal\n assert len(onsets) == 0 or (y[0] != 0 and len(onsets) == 1)\n\n\[email protected](\n \"units, ctx\",\n [\n (\"frames\", dnr()),\n (\"time\", dnr()),\n (\"samples\", dnr()),\n (\"bad units\", pytest.raises(librosa.ParameterError)),\n ],\n)\[email protected](\"hop_length\", [512, 1024])\ndef test_onset_units(ysr, hop_length, units, ctx):\n\n y, sr = ysr\n\n with ctx:\n b1 = librosa.onset.onset_detect(y=y, sr=sr, hop_length=hop_length)\n b2 = librosa.onset.onset_detect(y=y, sr=sr, hop_length=hop_length, units=units)\n\n t1 = librosa.frames_to_time(b1, sr=sr, hop_length=hop_length)\n\n if units == \"time\":\n t2 = b2\n\n elif units == \"samples\":\n t2 = librosa.samples_to_time(b2, sr=sr)\n\n elif units == \"frames\":\n t2 = librosa.frames_to_time(b2, sr=sr, hop_length=hop_length)\n\n assert np.allclose(t1, t2)\n\n\[email protected](scope=\"module\", params=[False, True], ids=[\"oenv\", \"rms\"])\ndef energy(ysr, hop, request):\n y, sr = ysr\n if request.param:\n return librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop)\n else:\n return librosa.feature.rms(y=y, hop_length=hop)\n\n\ndef test_onset_backtrack(ysr, oenv, hop, energy):\n y, sr = ysr\n\n onsets = librosa.onset.onset_detect(\n y=y, sr=sr, onset_envelope=oenv, hop_length=hop, backtrack=False\n )\n\n # Test backtracking\n onsets_bt = librosa.onset.onset_backtrack(onsets, energy)\n\n # Make sure there are no negatives\n assert np.all(onsets_bt >= 0)\n\n # And that we never roll forward\n assert np.all(onsets_bt <= onsets)\n\n # And that the detected peaks are actually minima\n assert np.all(energy[onsets_bt] <= energy[np.maximum(0, onsets_bt - 1)])\n\n\[email protected](raises=librosa.ParameterError)\ndef test_onset_strength_noagg():\n S = np.zeros((3, 3))\n librosa.onset.onset_strength(S=S, aggregate=False)\n\n\[email protected](raises=librosa.ParameterError)\ndef test_onset_strength_badref():\n S = np.zeros((3, 3))\n librosa.onset.onset_strength(S=S, ref=S[:, :2])\n\n\ndef test_onset_strength_multi_ref():\n srand()\n\n # Make a random positive spectrum\n S = 1 + np.abs(np.random.randn(1025, 10))\n\n # Test with a null reference\n null_ref = np.zeros_like(S)\n\n onsets = librosa.onset.onset_strength_multi(\n S=S, ref=null_ref, aggregate=False, center=False\n )\n\n # since the reference is zero everywhere, S - ref = S\n # past the setup phase (first frame)\n assert np.allclose(onsets[:, 1:], S[:, 1:])\n\n\ndef test_onset_detect_inplace_normalize():\n\n # This test will fail if the in-place normalization modifies\n # the input onset envelope\n oenv_in = np.ones(50)\n oenv_in[10] = 2\n oenv_orig = oenv_in.copy()\n\n librosa.onset.onset_detect(onset_envelope=oenv_in, normalize=True)\n\n assert np.allclose(oenv_in, oenv_orig) and oenv_in is not oenv_orig\n" ]
[ [ "numpy.maximum", "numpy.allclose", "numpy.linspace", "numpy.ones", "numpy.all", "numpy.zeros_like", "numpy.random.randn", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
littlejgogo/MDCPE-co-training-method-for-hyperspectral-image-classification
[ "b7d367abd97ada77adc45a1120149cf247f9713c", "b7d367abd97ada77adc45a1120149cf247f9713c", "b7d367abd97ada77adc45a1120149cf247f9713c", "b7d367abd97ada77adc45a1120149cf247f9713c", "b7d367abd97ada77adc45a1120149cf247f9713c", "b7d367abd97ada77adc45a1120149cf247f9713c" ]
[ "training code/paviau/rnn/test/logitsmulti.py", "training code/paviac/rf/code/rf_traintest.py", "training code/paviac/dcpe/test/logitsmulti.py", "training code/paviac/cnn/code/cnn_indices.py", "training code/salinas/mdcpe/code/predict_unlabel.py", "training code/salinas/dcpe/code/overall_test.py" ]
[ "\nimport tensorflow as tf\nimport cnn_indices\n\ndata = cnn_indices.read_data_sets()\nimport final_index\nimport numpy as np\nsaver = tf.train.import_meta_graph('/home/asdf/Documents/juyan/paper/salinas/cnn/model/NEW/'\n 'CNN0507.ckpt.meta')\nbatch_size = data.valid._num_examples\nwith tf.Session() as sess:\n saver.restore(sess, '/home/asdf/Documents/juyan/paper/salinas/cnn/model/NEW/'\n 'CNN0507.ckpt')\n y = sess.graph.get_tensor_by_name('Softmax:0')\n X = sess.graph.get_operation_by_name('X').outputs[0]\n keep_prob = sess.graph.get_operation_by_name('keep_prob').outputs[0]\n\n batch, Y = data.valid.next_batch_test(batch_size)\n predict_label = sess.run(y, feed_dict={X: batch, keep_prob: 1.0})\npredict_label = np.argmax(predict_label, 1) + 1\ntrue_label = np.argmax(Y, 1) + 1\nevery_class, confusion_mat = final_index.test_data_index(true_label, predict_label, 16)\nnp.savez('/home/asdf/Documents/juyan/paper/salinas/cnn/test/zhibiao0513.npz',\n every_class=every_class, confusion_mat=confusion_mat)\nprint(\"ok\")\n\n\n", "# coding=utf-8\nimport skimage.io\nimport numpy as np\nimport time\nimport pickle\nfrom sklearn.ensemble import RandomForestClassifier\nimport final_index\nimport rnn_indices\ndata = rnn_indices.read_data_sets()\ntrue_label = np.zeros((1, ), dtype=np.int32)\npre_label = np.zeros((1, ), dtype=np.int32)\n\nbatch_size1 = data.train.num_examples\ntrain_x, train_y = data.train.next_batch(batch_size1)\ntrain_x = train_x[:600]\ntrain_y = train_y[:600]\nbatch_size2 = data.valid.num_examples\nvalid_x, valid_y = data.valid.next_batch(batch_size2)\nbest = 0\n# for i in range(20, 301):\nrf = RandomForestClassifier(criterion=\"gini\", max_features=\"sqrt\",\n n_estimators=50, min_samples_leaf=2, n_jobs=-1, oob_score=False)\nrf.fit(train_x, train_y)\nvalid_acc = rf.score(valid_x, valid_y)\nall_sets_index = np.load(\"/home/asdf/Documents/juyan/paper/paviac/cnn/data/all_index.npy\")\n# test_batch = 5000\n# for index in range((data.test._num_examples // test_batch) + 1):\n# test_data, true_lab = data.test.next_batch_test(test_batch)\n# pre_lab = rf.predict(test_data)\n# true_label = np.concatenate((true_label, true_lab), axis=0)\n# pre_label = np.concatenate((pre_label, pre_lab), axis=0)\n#\n# true_label = true_label[1:]\n# pre_label = pre_label[1:]\n#\n# every_class, confusion_mat = final_index.test_data_index(true_label, pre_label, 9)\n# np.savez('/home/asdf/Documents/juyan/paper/paviac/rf/test/zhibiao0521.npz',\n# every_class=every_class, confusion_mat=confusion_mat)\n\n\ntest_batch = 5000\nfor index in range((data.all._num_examples // test_batch) + 1):\n test_data = data.all.next_batch_testall(test_batch)\n pre_lab = rf.predict(test_data)\n # true_label = np.concatenate((true_label, true_lab), axis=0)\n pre_label = np.concatenate((pre_label, pre_lab), axis=0)\n\npre_label = pre_label[1:]\nnp.save(\"/home/asdf/Documents/juyan/paper/paviac/rf/test/pre_alllabel.npy\", pre_label)\nprint(\"ok\")", "import tensorflow as tf\nimport cnn_indices\n\ndata = cnn_indices.read_data_sets()\nimport final_index\nimport numpy as np\nsaver = tf.train.import_meta_graph('/home/asdf/Documents/juyan/paper/paviac/dcpe/model/7_cnn/'\n 'CNN05117.ckpt.meta')\nbatch_size = 5000\n# prediction = np.zeros((1, 9), dtype=np.int32)\n# true_label = np.zeros((1, 9), dtype=np.int32)\ncnnlogits = np.zeros((1, 9), dtype=np.float64)\nwith tf.Session() as sess:\n saver.restore(sess, '/home/asdf/Documents/juyan/paper/paviac/dcpe/model/7_cnn/'\n 'CNN05117.ckpt')\n y = sess.graph.get_tensor_by_name('Softmax:0')\n X = sess.graph.get_operation_by_name('X').outputs[0]\n keep_prob = sess.graph.get_operation_by_name('keep_prob').outputs[0]\n proba = sess.graph.get_tensor_by_name('Add_1:0')\n for index in range((data.all._num_examples // batch_size) + 1):\n batch = data.all.next_batch_test(batch_size)\n cnn_logits = sess.run(proba, feed_dict={X: batch, keep_prob: 1.0})\n # prediction = np.concatenate((prediction, pre_pro), axis=0)\n # true_label = np.concatenate((true_label, Y), axis=0)\n cnnlogits = np.concatenate((cnnlogits, cnn_logits), axis=0)\n# predict_label = np.argmax(prediction[1:], 1) + 1\n# true_label = np.argmax(true_label[1:], 1) + 1\n# prediction = prediction[1:]\ncnnlogits = cnnlogits[1:]\nrnnlogtis = np.load(\"/home/asdf/Documents/juyan/paper/paviac/dcpe/testresult/all_logits.npy\")\n\nnorm_rnn = np.zeros((cnnlogits.shape[0], cnnlogits.shape[1]), dtype=np.float32)\nnorm_cnn = np.zeros((cnnlogits.shape[0], cnnlogits.shape[1]), dtype=np.float32)\nmax_cnn = np.amax(cnnlogits, axis=1)\nmin_cnn = np.amin(cnnlogits, axis=1)\nsubstract_cnn = [x-y for x, y in zip(max_cnn, min_cnn)]\nmax_rnn = np.amax(rnnlogtis, axis=1)\nmin_rnn = np.amin(rnnlogtis, axis=1)\nsubstract_rnn = [x-y for x, y in zip(max_rnn, min_rnn)]\nfor i in range(cnnlogits.shape[0]):\n for j in range(cnnlogits.shape[1]):\n norm_cnn[i][j] = (cnnlogits[i][j] - min_cnn[i]) / substract_cnn[i]\n norm_rnn[i][j] = (rnnlogtis[i][j] - min_rnn[i]) / substract_rnn[i]\n\n\nalllogits = [x * y for x, y in zip(norm_cnn, norm_rnn)]\n\npredict_label = np.argmax(alllogits, 1) + 1\n\n# every_class, confusion_mat = final_index.test_data_index(true_label, predict_label, 9)\n# np.savez('/home/asdf/Documents/juyan/paper/paviac/dcpe/testresult/zhibiao0517.npz',\n# every_class=every_class, confusion_mat=confusion_mat)\n# print(\"ok\")\nnp.save(\"/home/asdf/Documents/juyan/paper/paviac/dcpe/test/pre_alllabel.npy\", predict_label)\n# # zhibiao = np.load('/home/asdf/Documents/juyan/paper/data/salinas/0418_15each_class/zhibiao0421_cnnco.npz')\n# # every_class = zhibiao['every_class']\n# # confusion_mat = zhibiao['confusion_mat']\n#\n\n# import tensorflow as tf\n# import cnn_indices\n#\n# data = cnn_indices.read_data_sets()\n# import final_index\n# import numpy as np\n# saver = tf.train.import_meta_graph('/home/asdf/Documents/juyan/paper/paviac/dcpe/model/7_cnn/'\n# 'CNN05117.ckpt.meta')\n# batch_size = data.valid._num_examples\n# # prediction = np.zeros((1, 9), dtype=np.int32)\n# # true_label = np.zeros((1, 9), dtype=np.int32)\n# # cnnlogits = np.zeros((1, 9), dtype=np.float64)\n# with tf.Session() as sess:\n# saver.restore(sess, '/home/asdf/Documents/juyan/paper/paviac/dcpe/model/7_cnn/'\n# 'CNN05117.ckpt')\n# y = sess.graph.get_tensor_by_name('Softmax:0')\n# X = sess.graph.get_operation_by_name('X').outputs[0]\n# keep_prob = sess.graph.get_operation_by_name('keep_prob').outputs[0]\n# proba = sess.graph.get_tensor_by_name('Add_1:0')\n# batch, Y = data.valid.next_batch_test(batch_size)\n# cnn_logits, pre_pro = sess.run([proba, y], feed_dict={X: batch, keep_prob: 1.0})\n# # prediction = np.concatenate((prediction, pre_pro), axis=0)\n# # true_label = np.concatenate((true_label, Y), axis=0)\n# # cnnlogits = np.concatenate((cnnlogits, cnn_logits), axis=0)\n# # predict_label = np.argmax(prediction[1:], 1) + 1\n# true_label = np.argmax(Y, 1) + 1\n# # prediction = prediction[1:]\n# cnnlogits = cnn_logits\n# rnnlogtis = np.load(\"/home/asdf/Documents/juyan/paper/paviac/dcpe/testresult/logits.npy\")\n#\n# norm_rnn = np.zeros((cnnlogits.shape[0], cnnlogits.shape[1]), dtype=np.float32)\n# norm_cnn = np.zeros((cnnlogits.shape[0], cnnlogits.shape[1]), dtype=np.float32)\n# max_cnn = np.amax(cnnlogits, axis=1)\n# min_cnn = np.amin(cnnlogits, axis=1)\n# substract_cnn = [x-y for x, y in zip(max_cnn, min_cnn)]\n# max_rnn = np.amax(rnnlogtis, axis=1)\n# min_rnn = np.amin(rnnlogtis, axis=1)\n# substract_rnn = [x-y for x, y in zip(max_rnn, min_rnn)]\n# for i in range(cnnlogits.shape[0]):\n# for j in range(cnnlogits.shape[1]):\n# norm_cnn[i][j] = (cnnlogits[i][j] - min_cnn[i]) / substract_cnn[i]\n# norm_rnn[i][j] = (rnnlogtis[i][j] - min_rnn[i]) / substract_rnn[i]\n#\n#\n# alllogits = [x * y for x, y in zip(norm_cnn, norm_rnn)]\n#\n# predict_label = np.argmax(alllogits, 1) + 1\n#\n# every_class, confusion_mat = final_index.test_data_index(true_label, predict_label, 9)\n# np.savez('/home/asdf/Documents/juyan/paper/paviac/dcpe/testresult/zhibiao0517.npz',\n# every_class=every_class, confusion_mat=confusion_mat)\n# print(\"ok\")\n#\n# # zhibiao = np.load('/home/asdf/Documents/juyan/paper/data/salinas/0418_15each_class/zhibiao0421_cnnco.npz')\n# # every_class = zhibiao['every_class']\n# # confusion_mat = zhibiao['confusion_mat']\n#\n\n", "from __future__ import print_function\nimport numpy as np\nimport scipy.io as sio\n\n\ndef zeroPadding_3D(old_matrix, pad_length, pad_depth=0):\n new_matrix = np.lib.pad(old_matrix, ((pad_length, pad_length), (pad_length, pad_length), (pad_depth, pad_depth)), 'constant', constant_values=0)\n return new_matrix\n\n\nnb_classes = 9\nINPUT_DIMENSION = 3\nPATCH_LENGTH = 11\nbatch = 23\n\npca_data = sio.loadmat(\"/home/asdf/Documents/juyan/paper/paviac/cnn/data/pca3_pavia.mat\")\ndata_IN = pca_data['newdata']\nnormdata = np.zeros((data_IN.shape[0], data_IN.shape[1], data_IN.shape[2]), dtype=np.float32)\nfor dim in range(data_IN.shape[2]):\n normdata[:, :, dim] = (data_IN[:, :, dim] - np.amin(data_IN[:, :, dim])) / \\\n float((np.amax(data_IN[:, :, dim]) - np.amin(data_IN[:, :, dim])))\n\npadded_data = zeroPadding_3D(normdata, PATCH_LENGTH)\n\nmat_gt = sio.loadmat(\"/home/asdf/Documents/juyan/paper/paviac/cnn/data/Pavia_gt.mat\")\nGT = mat_gt['pavia_gt']\nGT = GT.reshape(np.prod(GT.shape[:2]),)\nlabeled_sets = np.load('/home/asdf/Documents/juyan/paper/paviac/cnn/data/labeled_index.npy')\n# test_sets = np.load('/home/asdf/Documents/juyan/paper/data/salinas/cnn/test_index.npy')\nvalid_sets = np.load('/home/asdf/Documents/juyan/paper/paviac/cnn/data/valid_index.npy')\n# unlabeled_sets = np.load('/home/asdf/Documents/juyan/paper/salinas/cnn/data/unlabeled_index.npy')\n\n\ndef dense_to_one_hot(labels_dense, num_classes=9):\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()-1] = 1\n return labels_one_hot\n\nclass DataSet(object):\n def __init__(self, images):\n self._num_examples = len(images)\n self._images = images\n self._epochs_completed = 0\n self._index_in_epoch = 0\n @property\n def images(self):\n return self._images\n @property\n def num_examples(self):\n return self._num_examples\n @property\n def epochs_completed(self):\n return self._epochs_completed\n def next_batch(self, batch_size):\n \"\"\"Return the next `batch_size` examples from this data set.\"\"\"\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._images = self._images[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n hsi_batch_patch = np.zeros((batch_size, batch, batch, INPUT_DIMENSION), dtype=np.float32)\n col = data_IN.shape[1]\n for q1 in range(batch_size):\n hsi_batch_patch[q1] = padded_data[(self._images[start+q1] // col):\n ((self._images[start+q1] // col) + batch),\n (self._images[start+q1] % col):\n ((self._images[start+q1] % col) + batch), :]\n block = self._images[start:end]\n hsi_batch_label = GT[block]\n hsi_batch_label = dense_to_one_hot(hsi_batch_label, num_classes=9)\n return hsi_batch_patch, hsi_batch_label\n #\n # def next_batch_test(self, batch_size):\n # start = self._index_in_epoch\n # self._index_in_epoch += batch_size\n # if self._index_in_epoch > self._num_examples:\n # self._index_in_epoch = self._num_examples\n # end = self._index_in_epoch\n # hsi_batch_patch = np.zeros((end-start, 15, 15, INPUT_DIMENSION), dtype=np.float32)\n # col = data_IN.shape[1]\n # for q1 in range(end-start):\n # hsi_batch_patch[q1] = padded_data[(self._images[start + q1] // col):\n # ((self._images[start + q1] // col) + 15),\n # (self._images[start + q1] % col):\n # ((self._images[start + q1] % col) + 15), :]\n #\n # return hsi_batch_patch\n\n\ndef read_data_sets():\n class DataSets(object):\n pass\n data_sets = DataSets()\n data_sets.train = DataSet(labeled_sets)\n data_sets.valid = DataSet(valid_sets)\n # data_sets.unlabel = DataSet(unlabeled_sets)\n # data_sets.test = DataSet(test_sets)\n return data_sets\n\n", "import tensorflow as tf\n\nclass Predict(object):\n def __init__(self, path1, path2):\n self.graph = tf.Graph()\n with self.graph.as_default():\n self.saver = tf.train.import_meta_graph(path1)\n self.sess = tf.Session(graph=self.graph)\n with self.sess.as_default():\n with self.graph.as_default():\n self.saver.restore(self.sess, path2)\n\n def predict1(self, batch):\n y = self.graph.get_tensor_by_name('Softmax:0')\n proba = self.graph.get_tensor_by_name('Add_1:0')\n X = self.graph.get_operation_by_name('X').outputs[0]\n keep_prob = self.graph.get_operation_by_name('keep_prob').outputs[0]\n cnn_logits, pre_pro = self.sess.run([proba, y], feed_dict={X: batch, keep_prob: 1.0})\n return cnn_logits, pre_pro\n\n def predict2(self, batch):\n # tf.reset_default_graph()\n y = self.graph.get_tensor_by_name('Softmax:0')\n proba = self.graph.get_tensor_by_name('add:0')\n X = self.graph.get_operation_by_name('X').outputs[0]\n # batch = batch.reshape((batch_size, timesteps, num_input))\n rnn_logits, pre_pro = self.sess.run([proba, y], feed_dict={X: batch})\n return rnn_logits, pre_pro\n\n\ndef predict_models():\n class DataSets(object):\n pass\n pre_label = DataSets()\n pre_label.cnn = Predict('/home/asdf/Documents/juyan/paper/salinas/mdcpe_result/newmdcpe/model/cnn/'\n 'CNN0507.ckpt.meta',\n '/home/asdf/Documents/juyan/paper/salinas/mdcpe_result/newmdcpe/model/cnn/'\n 'CNN0507.ckpt')\n pre_label.rnn = Predict('/home/asdf/Documents/juyan/paper/salinas/mdcpe_result/newmdcpe/model/rnn/'\n 'RNN0507.ckpt.meta',\n '/home/asdf/Documents/juyan/paper/salinas/mdcpe_result/newmdcpe/model/rnn/'\n 'RNN0507.ckpt')\n return pre_label", "import numpy as np\nimport os\nimport sys\nimport three_cnn\nimport recurrent_network\nimport tensorflow as tf\nimport keras.backend.tensorflow_backend as KTF\n\n# # #\ndef get_session(gpu_fraction=0.3):\n \"\"\"\n This function is to allocate GPU memory a specific fraction\n Assume that you have 6GB of GPU memory and want to allocate ~2GB\n \"\"\"\n\n num_threads = os.environ.get('OMP_NUM_THREADS')\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)\n\n if num_threads:\n return tf.Session(config=tf.ConfigProto(\n gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))\n else:\n return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\n\nKTF.set_session(get_session(0.3)) # using 60% of total GPU Memory\nos.system(\"nvidia-smi\") # Execute the command (a string) in a subshell\n#\nbest_train_cnn = np.zeros((20,), dtype=np.float32)\nbest_train_rnn = np.zeros((20,), dtype=np.float32)\n\n\nfor name_num in range(20):\n print(\"update sample step:\", name_num + 1)\n execfile('/home/asdf/Documents/juyan/paper/salinas/dcpe_result/code/unlabel_test.py')\n print(\"training cnn step:\", name_num + 1)\n best_train_cnn[name_num] = three_cnn.train_cnn(name_index=name_num+1)\n print(\"training rnn step:\", name_num + 1)\n best_train_rnn[name_num] = recurrent_network.train_rnn(name_index=name_num+1)\nnp.save(\"/home/asdf/Documents/juyan/paper/salinas/dcpe_result/model/cnn20.npy\", best_train_cnn)\nnp.save(\"/home/asdf/Documents/juyan/paper/salinas/dcpe_result/model/rnn20.npy\", best_train_rnn)" ]
[ [ "tensorflow.Session", "numpy.savez", "numpy.argmax", "tensorflow.train.import_meta_graph" ], [ "sklearn.ensemble.RandomForestClassifier", "numpy.save", "numpy.concatenate", "numpy.load", "numpy.zeros" ], [ "numpy.amax", "numpy.amin", "tensorflow.train.import_meta_graph", "numpy.save", "numpy.concatenate", "numpy.argmax", "tensorflow.Session", "numpy.load", "numpy.zeros" ], [ "numpy.lib.pad", "numpy.amax", "numpy.amin", "numpy.arange", "scipy.io.loadmat", "numpy.random.shuffle", "numpy.prod", "numpy.load", "numpy.zeros" ], [ "tensorflow.train.import_meta_graph", "tensorflow.Graph", "tensorflow.Session" ], [ "tensorflow.ConfigProto", "numpy.zeros", "tensorflow.GPUOptions", "numpy.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
galipremsagar/dask
[ "134182e05009dbb20bd8e59ccf8bf771e5d4399a", "134182e05009dbb20bd8e59ccf8bf771e5d4399a" ]
[ "dask/dataframe/io/parquet/utils.py", "dask/array/tests/test_dispatch.py" ]
[ "import re\n\nimport pandas as pd\n\nfrom ....core import flatten\nfrom ....utils import natural_sort_key\n\n\nclass Engine:\n \"\"\" The API necessary to provide a new Parquet reader/writer \"\"\"\n\n @classmethod\n def read_metadata(\n cls,\n fs,\n paths,\n categories=None,\n index=None,\n gather_statistics=None,\n filters=None,\n **kwargs\n ):\n \"\"\"Gather metadata about a Parquet Dataset to prepare for a read\n\n This function is called once in the user's Python session to gather\n important metadata about the parquet dataset.\n\n Parameters\n ----------\n fs: FileSystem\n paths: List[str]\n A list of paths to files (or their equivalents)\n categories: list, dict or None\n Column(s) containing categorical data.\n index: str, List[str], or False\n The column name(s) to be used as the index.\n If set to ``None``, pandas metadata (if available) can be used\n to reset the value in this function\n gather_statistics: bool\n Whether or not to gather statistics data. If ``None``, we only\n gather statistics data if there is a _metadata file available to\n query (cheaply)\n filters: list\n List of filters to apply, like ``[('x', '>', 0), ...]``.\n **kwargs: dict (of dicts)\n User-specified arguments to pass on to backend.\n Top level key can be used by engine to select appropriate dict.\n\n Returns\n -------\n meta: pandas.DataFrame\n An empty DataFrame object to use for metadata.\n Should have appropriate column names and dtypes but need not have\n any actual data\n statistics: Optional[List[Dict]]\n Either None, if no statistics were found, or a list of dictionaries\n of statistics data, one dict for every partition (see the next\n return value). The statistics should look like the following:\n\n [\n {'num-rows': 1000, 'columns': [\n {'name': 'id', 'min': 0, 'max': 100},\n {'name': 'x', 'min': 0.0, 'max': 1.0},\n ]},\n ...\n ]\n parts: List[object]\n A list of objects to be passed to ``Engine.read_partition``.\n Each object should represent a piece of data (usually a row-group).\n The type of each object can be anything, as long as the\n engine's read_partition function knows how to interpret it.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def read_partition(cls, fs, piece, columns, index, **kwargs):\n \"\"\"Read a single piece of a Parquet dataset into a Pandas DataFrame\n\n This function is called many times in individual tasks\n\n Parameters\n ----------\n fs: FileSystem\n piece: object\n This is some token that is returned by Engine.read_metadata.\n Typically it represents a row group in a Parquet dataset\n columns: List[str]\n List of column names to pull out of that row group\n index: str, List[str], or False\n The index name(s).\n **kwargs:\n Includes `\"kwargs\"` values stored within the `parts` output\n of `engine.read_metadata`. May also include arguments to be\n passed to the backend (if stored under a top-level `\"read\"` key).\n\n Returns\n -------\n A Pandas DataFrame\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def initialize_write(\n cls,\n df,\n fs,\n path,\n append=False,\n partition_on=None,\n ignore_divisions=False,\n division_info=None,\n **kwargs\n ):\n \"\"\"Perform engine-specific initialization steps for this dataset\n\n Parameters\n ----------\n df: dask.dataframe.DataFrame\n fs: FileSystem\n path: str\n Destination directory for data. Prepend with protocol like ``s3://``\n or ``hdfs://`` for remote data.\n append: bool\n If True, may use existing metadata (if any) and perform checks\n against the new data being stored.\n partition_on: List(str)\n Column(s) to use for dataset partitioning in parquet.\n ignore_divisions: bool\n Whether or not to ignore old divisions when appending. Otherwise,\n overlapping divisions will lead to an error being raised.\n division_info: dict\n Dictionary containing the divisions and corresponding column name.\n **kwargs: dict\n Other keyword arguments (including `index_cols`)\n\n Returns\n -------\n tuple:\n engine-specific instance\n list of filenames, one per partition\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n def write_partition(\n cls, df, path, fs, filename, partition_on, return_metadata, **kwargs\n ):\n \"\"\"\n Output a partition of a dask.DataFrame. This will correspond to\n one output file, unless partition_on is set, in which case, it will\n correspond to up to one file in each sub-directory.\n\n Parameters\n ----------\n df: dask.dataframe.DataFrame\n path: str\n Destination directory for data. Prepend with protocol like ``s3://``\n or ``hdfs://`` for remote data.\n fs: FileSystem\n filename: str\n partition_on: List(str)\n Column(s) to use for dataset partitioning in parquet.\n return_metadata : bool\n Whether to return list of instances from this write, one for each\n output file. These will be passed to write_metadata if an output\n metadata file is requested.\n **kwargs: dict\n Other keyword arguments (including `fmd` and `index_cols`)\n\n Returns\n -------\n List of metadata-containing instances (if `return_metadata` is `True`)\n or empty list\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n def write_metadata(cls, parts, meta, fs, path, append=False, **kwargs):\n \"\"\"\n Write the shared metadata file for a parquet dataset.\n\n Parameters\n ----------\n parts: List\n Contains metadata objects to write, of the type undrestood by the\n specific implementation\n meta: non-chunk metadata\n Details that do not depend on the specifics of each chunk write,\n typically the schema and pandas metadata, in a format the writer\n can use.\n fs: FileSystem\n path: str\n Output file to write to, usually ``\"_metadata\"`` in the root of\n the output dataset\n append: boolean\n Whether or not to consolidate new metadata with existing (True)\n or start from scratch (False)\n **kwargs: dict\n Other keyword arguments (including `compression`)\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def collect_file_metadata(cls, path, fs, file_path):\n \"\"\"\n Collect parquet metadata from a file and set the file_path.\n\n Parameters\n ----------\n path: str\n Parquet-file path to extract metadata from.\n fs: FileSystem\n file_path: str\n Relative path to set as `file_path` in the metadata.\n\n Returns\n -------\n A metadata object. The specific type should be recognized\n by the aggregate_metadata method.\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def aggregate_metadata(cls, meta_list, fs, out_path):\n \"\"\"\n Aggregate a list of metadata objects and optionally\n write out the final result as a _metadata file.\n\n Parameters\n ----------\n meta_list: list\n List of metadata objects to be aggregated into a single\n metadata object, and optionally written to disk. The\n specific element type can be engine specific.\n fs: FileSystem\n out_path: str or None\n Directory to write the final _metadata file. If None\n is specified, the aggregated metadata will be returned,\n and nothing will be written to disk.\n\n Returns\n -------\n If out_path is None, an aggregate metadata object is returned.\n Otherwise, None is returned.\n \"\"\"\n raise NotImplementedError()\n\n\ndef _parse_pandas_metadata(pandas_metadata):\n \"\"\"Get the set of names from the pandas metadata section\n\n Parameters\n ----------\n pandas_metadata : dict\n Should conform to the pandas parquet metadata spec\n\n Returns\n -------\n index_names : list\n List of strings indicating the actual index names\n column_names : list\n List of strings indicating the actual column names\n storage_name_mapping : dict\n Pairs of storage names (e.g. the field names for\n PyArrow) and actual names. The storage and field names will\n differ for index names for certain writers (pyarrow > 0.8).\n column_indexes_names : list\n The names for ``df.columns.name`` or ``df.columns.names`` for\n a MultiIndex in the columns\n\n Notes\n -----\n This should support metadata written by at least\n\n * fastparquet>=0.1.3\n * pyarrow>=0.7.0\n \"\"\"\n index_storage_names = [\n n[\"name\"] if isinstance(n, dict) else n\n for n in pandas_metadata[\"index_columns\"]\n ]\n index_name_xpr = re.compile(r\"__index_level_\\d+__\")\n\n # older metadatas will not have a 'field_name' field so we fall back\n # to the 'name' field\n pairs = [\n (x.get(\"field_name\", x[\"name\"]), x[\"name\"]) for x in pandas_metadata[\"columns\"]\n ]\n\n # Need to reconcile storage and real names. These will differ for\n # pyarrow, which uses __index_leveL_d__ for the storage name of indexes.\n # The real name may be None (e.g. `df.index.name` is None).\n pairs2 = []\n for storage_name, real_name in pairs:\n if real_name and index_name_xpr.match(real_name):\n real_name = None\n pairs2.append((storage_name, real_name))\n index_names = [name for (storage_name, name) in pairs2 if name != storage_name]\n\n # column_indexes represents df.columns.name\n # It was added to the spec after pandas 0.21.0+, and implemented\n # in PyArrow 0.8. It was added to fastparquet in 0.3.1.\n column_index_names = pandas_metadata.get(\"column_indexes\", [{\"name\": None}])\n column_index_names = [x[\"name\"] for x in column_index_names]\n\n # Now we need to disambiguate between columns and index names. PyArrow\n # 0.8.0+ allows for duplicates between df.index.names and df.columns\n if not index_names:\n # For PyArrow < 0.8, Any fastparquet. This relies on the facts that\n # 1. Those versions used the real index name as the index storage name\n # 2. Those versions did not allow for duplicate index / column names\n # So we know that if a name is in index_storage_names, it must be an\n # index name\n if index_storage_names and isinstance(index_storage_names[0], dict):\n # Cannot handle dictionary case\n index_storage_names = []\n index_names = list(index_storage_names) # make a copy\n index_storage_names2 = set(index_storage_names)\n column_names = [\n name for (storage_name, name) in pairs if name not in index_storage_names2\n ]\n else:\n # For newer PyArrows the storage names differ from the index names\n # iff it's an index level. Though this is a fragile assumption for\n # other systems...\n column_names = [name for (storage_name, name) in pairs2 if name == storage_name]\n\n storage_name_mapping = dict(pairs2) # TODO: handle duplicates gracefully\n\n return index_names, column_names, storage_name_mapping, column_index_names\n\n\ndef _normalize_index_columns(user_columns, data_columns, user_index, data_index):\n \"\"\"Normalize user and file-provided column and index names\n\n Parameters\n ----------\n user_columns : None, str or list of str\n data_columns : list of str\n user_index : None, str, or list of str\n data_index : list of str\n\n Returns\n -------\n column_names : list of str\n index_names : list of str\n \"\"\"\n specified_columns = user_columns is not None\n specified_index = user_index is not None\n\n if user_columns is None:\n user_columns = list(data_columns)\n elif isinstance(user_columns, str):\n user_columns = [user_columns]\n else:\n user_columns = list(user_columns)\n\n if user_index is None:\n user_index = data_index\n elif user_index is False:\n # When index is False, use no index and all fields should be treated as\n # columns (unless `columns` provided).\n user_index = []\n data_columns = data_index + data_columns\n elif isinstance(user_index, str):\n user_index = [user_index]\n else:\n user_index = list(user_index)\n\n if specified_index and not specified_columns:\n # Only `index` provided. Use specified index, and all column fields\n # that weren't specified as indices\n index_names = user_index\n column_names = [x for x in data_columns if x not in index_names]\n elif specified_columns and not specified_index:\n # Only `columns` provided. Use specified columns, and all index fields\n # that weren't specified as columns\n column_names = user_columns\n index_names = [x for x in data_index if x not in column_names]\n elif specified_index and specified_columns:\n # Both `index` and `columns` provided. Use as specified, but error if\n # they intersect.\n column_names = user_columns\n index_names = user_index\n if set(column_names).intersection(index_names):\n raise ValueError(\"Specified index and column names must not intersect\")\n else:\n # Use default columns and index from the metadata\n column_names = data_columns\n index_names = data_index\n\n return column_names, index_names\n\n\ndef _sort_and_analyze_paths(file_list, fs, root=False):\n file_list = sorted(file_list, key=natural_sort_key)\n base, fns = _analyze_paths(file_list, fs, root=root)\n return file_list, base, fns\n\n\ndef _analyze_paths(file_list, fs, root=False):\n \"\"\"Consolidate list of file-paths into parquet relative paths\n\n Note: This function was mostly copied from dask/fastparquet to\n use in both `FastParquetEngine` and `ArrowEngine`.\"\"\"\n\n def _join_path(*path):\n def _scrub(i, p):\n # Convert path to standard form\n # this means windows path separators are converted to linux\n p = p.replace(fs.sep, \"/\")\n if p == \"\": # empty path is assumed to be a relative path\n return \".\"\n if p[-1] == \"/\": # trailing slashes are not allowed\n p = p[:-1]\n if i > 0 and p[0] == \"/\": # only the first path can start with /\n p = p[1:]\n return p\n\n abs_prefix = \"\"\n if path and path[0]:\n if path[0][0] == \"/\":\n abs_prefix = \"/\"\n path = list(path)\n path[0] = path[0][1:]\n elif fs.sep == \"\\\\\" and path[0][1:].startswith(\":/\"):\n # If windows, then look for the \"c:/\" prefix\n abs_prefix = path[0][0:3]\n path = list(path)\n path[0] = path[0][3:]\n\n _scrubbed = []\n for i, p in enumerate(path):\n _scrubbed.extend(_scrub(i, p).split(\"/\"))\n simpler = []\n for s in _scrubbed:\n if s == \".\":\n pass\n elif s == \"..\":\n if simpler:\n if simpler[-1] == \"..\":\n simpler.append(s)\n else:\n simpler.pop()\n elif abs_prefix:\n raise Exception(\"can not get parent of root\")\n else:\n simpler.append(s)\n else:\n simpler.append(s)\n\n if not simpler:\n if abs_prefix:\n joined = abs_prefix\n else:\n joined = \".\"\n else:\n joined = abs_prefix + (\"/\".join(simpler))\n return joined\n\n path_parts_list = [_join_path(fn).split(\"/\") for fn in file_list]\n if root is False:\n basepath = path_parts_list[0][:-1]\n for i, path_parts in enumerate(path_parts_list):\n j = len(path_parts) - 1\n for k, (base_part, path_part) in enumerate(zip(basepath, path_parts)):\n if base_part != path_part:\n j = k\n break\n basepath = basepath[:j]\n l = len(basepath)\n else:\n basepath = _join_path(root).split(\"/\")\n l = len(basepath)\n assert all(\n p[:l] == basepath for p in path_parts_list\n ), \"All paths must begin with the given root\"\n out_list = []\n for path_parts in path_parts_list:\n out_list.append(\n \"/\".join(path_parts[l:])\n ) # use '/'.join() instead of _join_path to be consistent with split('/')\n\n return (\n \"/\".join(basepath),\n out_list,\n ) # use '/'.join() instead of _join_path to be consistent with split('/')\n\n\ndef _flatten_filters(filters):\n \"\"\"Flatten DNF-formatted filters (list of tuples)\"\"\"\n return (\n set(flatten(tuple(flatten(filters, container=list)), container=tuple))\n if filters\n else []\n )\n\n\ndef _aggregate_stats(\n file_path,\n file_row_group_stats,\n file_row_group_column_stats,\n stat_col_indices,\n):\n \"\"\"Utility to aggregate the statistics for N row-groups\n into a single dictionary.\n\n Used by `Engine._construct_parts`\n \"\"\"\n if len(file_row_group_stats) < 1:\n # Empty statistics\n return {}\n elif len(file_row_group_column_stats) == 0:\n assert len(file_row_group_stats) == 1\n return file_row_group_stats[0]\n else:\n # Note: It would be better to avoid df_rgs and df_cols\n # construction altogether. It makes it fast to aggregate\n # the statistics for many row groups, but isn't\n # worthwhile for a small number of row groups.\n if len(file_row_group_stats) > 1:\n df_rgs = pd.DataFrame(file_row_group_stats)\n s = {\n \"file_path_0\": file_path,\n \"num-rows\": df_rgs[\"num-rows\"].sum(),\n \"total_byte_size\": df_rgs[\"total_byte_size\"].sum(),\n \"columns\": [],\n }\n else:\n s = {\n \"file_path_0\": file_path,\n \"num-rows\": file_row_group_stats[0][\"num-rows\"],\n \"total_byte_size\": file_row_group_stats[0][\"total_byte_size\"],\n \"columns\": [],\n }\n\n df_cols = None\n if len(file_row_group_column_stats) > 1:\n df_cols = pd.DataFrame(file_row_group_column_stats)\n for ind, name in enumerate(stat_col_indices):\n i = ind * 2\n if df_cols is None:\n s[\"columns\"].append(\n {\n \"name\": name,\n \"min\": file_row_group_column_stats[0][i],\n \"max\": file_row_group_column_stats[0][i + 1],\n }\n )\n else:\n s[\"columns\"].append(\n {\n \"name\": name,\n \"min\": df_cols.iloc[:, i].min(),\n \"max\": df_cols.iloc[:, i + 1].max(),\n }\n )\n return s\n\n\ndef _row_groups_to_parts(\n gather_statistics,\n split_row_groups,\n file_row_groups,\n file_row_group_stats,\n file_row_group_column_stats,\n stat_col_indices,\n make_part_func,\n make_part_kwargs,\n):\n\n # Construct `parts` and `stats`\n parts = []\n stats = []\n if split_row_groups:\n # Create parts from each file,\n # limiting the number of row_groups in each piece\n split_row_groups = int(split_row_groups)\n for filename, row_groups in file_row_groups.items():\n row_group_count = len(row_groups)\n for i in range(0, row_group_count, split_row_groups):\n i_end = i + split_row_groups\n rg_list = row_groups[i:i_end]\n\n part = make_part_func(\n filename,\n rg_list,\n **make_part_kwargs,\n )\n if part is None:\n continue\n\n parts.append(part)\n if gather_statistics:\n stat = _aggregate_stats(\n filename,\n file_row_group_stats[filename][i:i_end],\n file_row_group_column_stats[filename][i:i_end],\n stat_col_indices,\n )\n stats.append(stat)\n else:\n for filename, row_groups in file_row_groups.items():\n\n part = make_part_func(\n filename,\n row_groups,\n **make_part_kwargs,\n )\n if part is None:\n continue\n\n parts.append(part)\n if gather_statistics:\n stat = _aggregate_stats(\n filename,\n file_row_group_stats[filename],\n file_row_group_column_stats[filename],\n stat_col_indices,\n )\n stats.append(stat)\n\n return parts, stats\n", "import operator\n\nimport numpy as np\nimport pytest\n\nimport dask.array as da\nfrom dask.array import Array\nfrom dask.array.chunk_types import is_valid_array_chunk, is_valid_chunk_type\nfrom dask.array.utils import assert_eq\n\n\ndef wrap(func_name):\n \"\"\"\n Wrap a function.\n \"\"\"\n\n def wrapped(self, *a, **kw):\n a = getattr(self.arr, func_name)(*a, **kw)\n return a if not isinstance(a, np.ndarray) else type(self)(a)\n\n return wrapped\n\n\ndef dispatch_property(prop_name):\n \"\"\"\n Wrap a simple property.\n \"\"\"\n\n @property\n def wrapped(self, *a, **kw):\n return getattr(self.arr, prop_name)\n\n return wrapped\n\n\nclass EncapsulateNDArray(np.lib.mixins.NDArrayOperatorsMixin):\n \"\"\"\n A class that \"mocks\" ndarray by encapsulating an ndarray and using\n protocols to \"look like\" an ndarray. Basically tests whether Dask\n works fine with something that is essentially an array but uses\n protocols instead of being an actual array. Must be manually\n registered as a valid chunk type to be considered a downcast type\n of Dask array in the type casting hierarchy.\n \"\"\"\n\n __array_priority__ = 20\n\n def __init__(self, arr):\n self.arr = arr\n\n def __array__(self, *args, **kwargs):\n return np.asarray(self.arr, *args, **kwargs)\n\n def __array_function__(self, f, t, arrs, kw):\n if not all(\n issubclass(ti, (type(self), np.ndarray) + np.ScalarType) for ti in t\n ):\n return NotImplemented\n arrs = tuple(\n arr if not isinstance(arr, type(self)) else arr.arr for arr in arrs\n )\n t = tuple(ti for ti in t if not issubclass(ti, type(self)))\n print(t)\n a = self.arr.__array_function__(f, t, arrs, kw)\n return a if not isinstance(a, np.ndarray) else type(self)(a)\n\n __getitem__ = wrap(\"__getitem__\")\n\n __setitem__ = wrap(\"__setitem__\")\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n if not all(\n isinstance(i, (type(self), np.ndarray) + np.ScalarType) for i in inputs\n ):\n return NotImplemented\n inputs = tuple(i if not isinstance(i, type(self)) else i.arr for i in inputs)\n a = getattr(ufunc, method)(*inputs, **kwargs)\n return a if not isinstance(a, np.ndarray) else type(self)(a)\n\n shape = dispatch_property(\"shape\")\n ndim = dispatch_property(\"ndim\")\n dtype = dispatch_property(\"dtype\")\n\n astype = wrap(\"astype\")\n sum = wrap(\"sum\")\n prod = wrap(\"prod\")\n reshape = wrap(\"reshape\")\n\n\nda.register_chunk_type(EncapsulateNDArray)\n\n\nclass WrappedArray(np.lib.mixins.NDArrayOperatorsMixin):\n \"\"\"\n Another mock duck array class (like EncapsulateNDArray), but\n designed to be above Dask in the type casting hierarchy (that is,\n WrappedArray wraps Dask Array) and be even more minimal in API.\n Tests that Dask defers properly to upcast types.\n \"\"\"\n\n def __init__(self, arr, **attrs):\n self.arr = arr\n self.attrs = attrs\n\n def __array__(self, *args, **kwargs):\n return np.asarray(self.arr, *args, **kwargs)\n\n def _downcast_args(self, args):\n for arg in args:\n if isinstance(arg, type(self)):\n yield arg.arr\n elif isinstance(arg, (tuple, list)):\n yield type(arg)(self._downcast_args(arg))\n else:\n yield arg\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n inputs = tuple(self._downcast_args(inputs))\n return type(self)(getattr(ufunc, method)(*inputs, **kwargs), **self.attrs)\n\n def __array_function__(self, func, types, args, kwargs):\n args = tuple(self._downcast_args(args))\n return type(self)(func(*args, **kwargs), **self.attrs)\n\n def __dask_graph__(self):\n # Note: make sure that dask dusk arrays do not interfere with the\n # dispatch mechanism. The return value here, doesn't matter.\n return ...\n\n shape = dispatch_property(\"shape\")\n ndim = dispatch_property(\"ndim\")\n dtype = dispatch_property(\"dtype\")\n\n def __getitem__(self, key):\n return type(self)(self.arr[key], **self.attrs)\n\n def __setitem__(self, key, value):\n self.arr[key] = value\n\n\[email protected](\n \"op\",\n [\n operator.add,\n operator.eq,\n operator.gt,\n operator.ge,\n operator.lt,\n operator.le,\n operator.mod,\n operator.mul,\n operator.ne,\n operator.pow,\n operator.sub,\n operator.truediv,\n operator.floordiv,\n np.add,\n np.subtract,\n ],\n)\[email protected](\n \"arr_upcast, arr_downcast\",\n [\n (\n WrappedArray(np.random.random((10, 10))),\n da.random.random((10, 10), chunks=(5, 5)),\n ),\n (\n da.random.random((10, 10), chunks=(5, 5)),\n EncapsulateNDArray(np.random.random((10, 10))),\n ),\n (\n WrappedArray(np.random.random((10, 10))),\n EncapsulateNDArray(np.random.random((10, 10))),\n ),\n ],\n)\ndef test_binary_operation_type_precedence(op, arr_upcast, arr_downcast):\n \"\"\" Test proper dispatch on binary operators and NumPy ufuncs\"\"\"\n assert (\n type(op(arr_upcast, arr_downcast))\n == type(op(arr_downcast, arr_upcast))\n == type(arr_upcast)\n )\n\n\[email protected](\n \"arr, result\",\n [\n (WrappedArray(np.arange(4)), False),\n (da.from_array(np.arange(4)), False),\n (EncapsulateNDArray(np.arange(4)), True),\n (np.ma.masked_array(np.arange(4), [True, False, True, False]), True),\n (np.arange(4), True),\n (None, True),\n # float/int/str scalars are not valid array chunks,\n # but ops on float/int/str etc scalars do get handled\n # by Dask\n (0.0, False),\n (0, False),\n (\"\", False),\n ],\n)\ndef test_is_valid_array_chunk(arr, result):\n \"\"\" Test is_valid_array_chunk for correctness\"\"\"\n assert is_valid_array_chunk(arr) is result\n\n\[email protected](\n \"arr_type, result\",\n [\n (WrappedArray, False),\n (da.Array, False),\n (EncapsulateNDArray, True),\n (np.ma.MaskedArray, True),\n (np.ndarray, True),\n (float, False),\n (int, False),\n ],\n)\ndef test_is_valid_chunk_type(arr_type, result):\n \"\"\" Test is_valid_chunk_type for correctness\"\"\"\n assert is_valid_chunk_type(arr_type) is result\n\n\ndef test_direct_deferral_wrapping_override():\n \"\"\" Directly test Dask defering to an upcast type and the ability to still wrap it.\"\"\"\n a = da.from_array(np.arange(4))\n b = WrappedArray(np.arange(4))\n assert a.__add__(b) is NotImplemented\n # Note: remove dask_graph to be able to wrap b in a dask array\n setattr(b, \"__dask_graph__\", None)\n res = a + da.from_array(b)\n assert isinstance(res, da.Array)\n assert_eq(res, 2 * np.arange(4))\n\n\nclass UnknownScalarThatUnderstandsArrayOps(np.lib.mixins.NDArrayOperatorsMixin):\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n outputs = kwargs.get(\"out\", ())\n for item in inputs + outputs:\n if hasattr(item, \"__array_ufunc__\") and not isinstance(\n item, (np.ndarray, Array, UnknownScalarThatUnderstandsArrayOps)\n ):\n return NotImplemented\n # This is a dummy scalar that just returns a new object for every op\n return UnknownScalarThatUnderstandsArrayOps()\n\n\[email protected](\"arr\", [da.from_array([1, 2]), np.asarray([1, 2])])\ndef test_delegation_unknown_scalar_that_understands_arr_ops(arr):\n s = UnknownScalarThatUnderstandsArrayOps()\n assert type(arr * s) == UnknownScalarThatUnderstandsArrayOps\n assert type(s * arr) == UnknownScalarThatUnderstandsArrayOps\n # Explicit tests of numpy NEP-13 dispatching\n assert type(np.multiply(s, arr)) == UnknownScalarThatUnderstandsArrayOps\n assert type(np.multiply(arr, s)) == UnknownScalarThatUnderstandsArrayOps\n\n\nclass UnknownScalar:\n __array_ufunc__ = None\n\n def __mul__(self, other):\n return 42\n\n __rmul__ = __mul__\n\n\[email protected](\"arr\", [da.from_array([1, 2]), np.asarray([1, 2])])\ndef test_delegation_unknown_scalar(arr):\n s = UnknownScalar()\n assert arr * s == 42\n assert s * arr == 42\n with pytest.raises(\n TypeError, match=\"operand 'UnknownScalar' does not support ufuncs\"\n ):\n np.multiply(s, arr)\n\n\ndef test_delegation_specific_cases():\n a = da.from_array([\"a\", \"b\", \".\", \"d\"])\n # Fixes GH6631\n assert_eq(a == \".\", [False, False, True, False])\n assert_eq(\".\" == a, [False, False, True, False])\n # Fixes GH6611\n assert \"b\" in a\n" ]
[ [ "pandas.DataFrame" ], [ "numpy.asarray", "numpy.arange", "numpy.random.random", "numpy.multiply" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
isaiahnixon/python-google-trends
[ "7d8535885bf4e39c0954172bfe0dae1451c8007a" ]
[ "plot-data.py" ]
[ "import matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom datetime import datetime as dt\nimport pandas as pd \nimport numpy as np\n\n# Load the csv\ndf1 = pd.read_csv('data/aggregate-daily-values.csv')\ndf2 = pd.read_csv('data/aggregate-daily-values-covid-19.csv')\ndf3 = pd.read_csv('data/aggregate-daily-values-cats.csv')\n\ntime_values1 = []\ninterest_values1 = []\n\ntime_values2 = []\ninterest_values2 = []\n\ntime_values3 = []\ninterest_values3 = []\n\nfor i, row in df1.iterrows():\n\ttime_values1.append(mdates.date2num(dt.strptime(row['date'], '%Y-%m-%d'))) #dates\n\t# time_values.append(mdates.date2num(dt.strptime(row['date'], '%Y-%m-%d %H:%M:%S'))) #datetime\n\tinterest_values1.append(row['interest'])\n\nfor i, row in df2.iterrows():\n\ttime_values2.append(mdates.date2num(dt.strptime(row['date'], '%Y-%m-%d'))) #dates\n\t# time_values.append(mdates.date2num(dt.strptime(row['date'], '%Y-%m-%d %H:%M:%S'))) #datetime\n\tinterest_values2.append(row['interest'])\n\nfor i, row in df3.iterrows():\n\ttime_values3.append(mdates.date2num(dt.strptime(row['date'], '%Y-%m-%d'))) #dates\n\t# time_values.append(mdates.date2num(dt.strptime(row['date'], '%Y-%m-%d %H:%M:%S'))) #datetime\n\tinterest_values3.append(row['interest'])\n\nfig, ax = plt.subplots()\n\nax.set_title('Interest in COVID-19 (Google Trends)')\nax.set_ylabel('Interest')\nax.plot_date(time_values1, interest_values1, linestyle='-', marker='o', color='blue', label='Toilet Paper')\nax.plot_date(time_values2, interest_values2, linestyle='-', marker='o', color='black', label='COVID-19')\nax.plot_date(time_values3, interest_values3, linestyle='-', marker='o', color='red', label='Cats')\n\n\n# For data within the same day.\n#hfmt = mdates.DateFormatter('%H:%M:%S')\n#ax.set_xlabel('Time')\n\n# For larger data sets.\nhfmt = mdates.DateFormatter('%b %d %Y')\nax.set_xlabel('Date')\n\nax.xaxis.set_major_formatter(hfmt)\nplt.gcf().autofmt_xdate()\nplt.legend()\nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.dates.DateFormatter", "pandas.read_csv", "matplotlib.pyplot.subplots", "matplotlib.pyplot.gcf", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
slettner/jina
[ "4140961c62359e3acd540a6d88931665c6313824", "4140961c62359e3acd540a6d88931665c6313824" ]
[ "tests/unit/drivers/test_kv_search_driver.py", "tests/unit/flow/test_flow_multimode.py" ]
[ "from typing import Optional, Iterable\n\nimport numpy as np\nimport pytest\n\nfrom jina import Document, DocumentArray\nfrom jina.drivers.search import KVSearchDriver\nfrom jina.executors.indexers import BaseKVIndexer\nfrom jina.types.ndarray.generic import NdArray\n\n\nclass MockIndexer(BaseKVIndexer):\n def add(\n self, keys: Iterable[str], values: Iterable[bytes], *args, **kwargs\n ) -> None:\n pass\n\n def query(self, keys: Iterable[str]) -> Optional[bytes]:\n values = []\n for k in keys:\n values.append(self.db.get(k, None))\n return values\n\n def get_query_handler(self):\n pass\n\n def get_add_handler(self):\n pass\n\n def get_create_handler(self):\n pass\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.db = {}\n doc_ids = ['1', '2', '3', '4']\n doc_ids = [item * 16 for item in doc_ids]\n for doc_id in doc_ids:\n with Document() as doc:\n doc.id = doc_id\n doc.embedding = np.array([doc.id])\n self.db[doc.id] = doc.SerializeToString()\n\n\nclass SimpleKVSearchDriver(KVSearchDriver):\n def __init__(self, docs=None, traversal_paths=['r'], *args, **kwargs):\n super().__init__(traversal_paths=traversal_paths, *args, **kwargs)\n self._docs = docs\n\n @property\n def docs(self):\n return self._docs\n\n @property\n def exec_fn(self):\n return self._exec_fn\n\n\[email protected](scope='function')\ndef document():\n # 1-D embedding\n # doc: 0\n # - chunk: 1\n # - chunk: 2\n # - chunk: 3\n # - chunk: 4\n # - chunk: 5 - will be missing from KV indexer\n doc = Document()\n doc.id = '0' * 16\n for c in range(5):\n with Document() as chunk:\n chunk.id = str(c + 1) * 16\n doc.chunks.add(chunk)\n return doc\n\n\[email protected](scope='function')\ndef document_with_matches_on_chunks():\n # 1-D embedding\n # doc: 0\n # - chunk: 1\n # - match: 2\n # - match: 3\n # - match: 4\n # - match: 5 - will be missing from KV indexer\n # - match: 6 - will be missing from KV indexer\n with Document() as doc:\n doc.id = '0' * 16\n with Document() as chunk:\n chunk.id = '1' * 16\n for m in range(5):\n with Document() as match:\n match.id = str(m + 2) * 16\n match.score.value = 1.0\n chunk.matches.append(match)\n doc.chunks.append(chunk)\n return doc\n\n\ndef test_vectorsearch_driver_mock_indexer_apply_all(document):\n driver = SimpleKVSearchDriver()\n\n executor = MockIndexer()\n driver.attach(executor=executor, runtime=None)\n\n dcs = list(document.chunks)\n assert len(dcs) == 5\n for chunk in dcs:\n assert chunk.embedding is None\n\n driver._apply_all([DocumentArray(document.chunks)])\n\n dcs = list(document.chunks)\n\n # chunk idx: 5 had no matched and is removed as missing idx\n assert len(dcs) == 4\n for chunk in dcs:\n assert chunk.embedding is not None\n embedding_array = chunk.embedding\n np.testing.assert_equal(embedding_array, np.array([chunk.id]))\n\n\ndef test_vectorsearch_driver_mock_indexer(document):\n dcs = list(document.chunks)\n assert len(dcs) == 5\n for chunk in dcs:\n assert chunk.embedding is None\n\n driver = SimpleKVSearchDriver(\n docs=DocumentArray([document]), traversal_paths=('c',)\n )\n\n executor = MockIndexer()\n driver.attach(executor=executor, runtime=None)\n\n driver()\n\n # chunk idx: 5 had no matched and is removed as missing idx\n dcs = list(document.chunks)\n assert len(dcs) == 4\n for chunk in dcs:\n assert chunk.embedding is not None\n embedding_array = chunk.embedding\n np.testing.assert_equal(embedding_array, np.array([chunk.id]))\n\n\ndef test_vectorsearch_driver_mock_indexer_with_matches_on_chunks(\n document_with_matches_on_chunks,\n):\n driver = SimpleKVSearchDriver(\n docs=DocumentArray([document_with_matches_on_chunks]), traversal_paths=('cm',)\n )\n executor = MockIndexer()\n driver.attach(executor=executor, runtime=None)\n\n driver()\n\n dcs = list(document_with_matches_on_chunks.chunks)\n assert len(dcs) == 1\n chunk = dcs[0]\n matches = list(chunk.matches)\n assert len(matches) == 3\n for match in matches:\n assert NdArray(match.embedding).value is not None\n embedding_array = NdArray(match.embedding).value\n np.testing.assert_equal(embedding_array, np.array([match.id]))\n", "import os\nfrom typing import List, Dict\n\nimport pytest\nimport numpy as np\n\nfrom jina.executors.segmenters import BaseSegmenter\nfrom jina.executors.encoders import BaseEncoder\nfrom jina.executors.indexers.keyvalue import BinaryPbIndexer\nfrom jina.executors.decorators import single\nfrom jina.flow import Flow\nfrom jina.proto import jina_pb2\n\ncur_dir = os.path.dirname(os.path.abspath(__file__))\n\n\nclass MockSegmenter(BaseSegmenter):\n @single\n def segment(self, text: str, *args, **kwargs) -> List[Dict]:\n split = text.split(',')\n chunks = [\n dict(text=split[0], offset=0, weight=1.0, modality='mode1'),\n dict(text=split[1], offset=1, weight=1.0, modality='mode2'),\n ]\n return chunks\n\n\nclass MockEncoder(BaseEncoder):\n def encode(self, content: 'np.ndarray', *args, **kwargs) -> 'np.ndarray':\n output = []\n for r in content:\n if \"mode1\" in r:\n output.append([0.0, 0.0, 0.0])\n elif \"mode2\" in r:\n output.append([1.0, 1.0, 1.0])\n\n return np.array(output)\n\n\[email protected]('restful', [False, True])\ndef test_flow_with_modalities(tmpdir, restful):\n os.environ['JINA_TEST_FLOW_MULTIMODE_WORKSPACE'] = str(tmpdir)\n\n def input_function():\n doc1 = jina_pb2.DocumentProto()\n doc1.text = 'title: this is mode1 from doc1, body: this is mode2 from doc1'\n doc1.id = '1'\n\n doc2 = jina_pb2.DocumentProto()\n doc2.text = 'title: this is mode1 from doc2, body: this is mode2 from doc2'\n doc2.id = '2'\n\n doc3 = jina_pb2.DocumentProto()\n doc3.text = 'title: this is mode1 from doc3, body: this is mode2 from doc3'\n doc3.id = '3'\n\n return [doc1, doc2, doc3]\n\n flow = (\n Flow(restful=restful)\n .add(name='segmenter', uses='!MockSegmenter')\n .add(name='encoder1', uses=os.path.join(cur_dir, 'yaml/mockencoder-mode1.yml'))\n .add(\n name='indexer1',\n uses=os.path.join(cur_dir, 'yaml/numpy-indexer-1.yml'),\n needs=['encoder1'],\n )\n .add(\n name='encoder2',\n uses=os.path.join(cur_dir, 'yaml/mockencoder-mode2.yml'),\n needs=['segmenter'],\n )\n .add(name='indexer2', uses=os.path.join(cur_dir, 'yaml/numpy-indexer-2.yml'))\n .join(['indexer1', 'indexer2'])\n )\n\n with flow:\n flow.index(inputs=input_function)\n\n with open(os.path.join(tmpdir, 'compound', 'vecidx1-0', 'vec1.gz'), 'rb') as fp:\n result = np.frombuffer(fp.read(), dtype='float').reshape([-1, 3])\n np.testing.assert_equal(\n result, np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n )\n\n with open(os.path.join(tmpdir, 'compound', 'vecidx2-0', 'vec2.gz'), 'rb') as fp:\n result = np.frombuffer(fp.read(), dtype='float').reshape([-1, 3])\n np.testing.assert_equal(\n result, np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])\n )\n\n chunkIndexer1 = BinaryPbIndexer.load(\n os.path.join(tmpdir, 'compound', 'kvidx1-0', 'kvidx1.bin')\n )\n assert chunkIndexer1.size == 3\n d_id = list(chunkIndexer1.query_handler.header.keys())[0]\n\n query_doc = jina_pb2.DocumentProto()\n query_doc.ParseFromString(chunkIndexer1.query([d_id])[0])\n assert query_doc.text == 'title: this is mode1 from doc1'\n assert query_doc.modality == 'mode1'\n\n chunkIndexer2 = BinaryPbIndexer.load(\n os.path.join(tmpdir, 'compound', 'kvidx2-0', 'kvidx2.bin')\n )\n assert chunkIndexer2.size == 3\n d_id = list(chunkIndexer2.query_handler.header.keys())[0]\n\n query_doc = jina_pb2.DocumentProto()\n query_doc.ParseFromString(chunkIndexer2.query([d_id])[0])\n assert query_doc.text == ' body: this is mode2 from doc1'\n assert query_doc.modality == 'mode2'\n\n del os.environ['JINA_TEST_FLOW_MULTIMODE_WORKSPACE']\n" ]
[ [ "numpy.array" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shkolnick-kun/kalman_h_infinity_filters
[ "4e76c38d91d5cb44e5f43f675aced4b917a5dbfd" ]
[ "EKHFPost.py" ]
[ "# -*- coding: utf-8 -*-\n# pylint: disable=invalid-name,too-many-instance-attributes, too-many-arguments\n\"\"\"\nCopyright 2019 Paul A Beltyukov\nCopyright 2015 Roger R Labbe Jr.\n\nFilterPy library.\nhttp://github.com/rlabbe/filterpy\n\nDocumentation at:\nhttps://filterpy.readthedocs.org\n\nSupporting book at:\nhttps://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python\n\nThis is licensed under an MIT license. See the readme.MD file\nfor more information.\n\"\"\"\nfrom copy import deepcopy\nimport numpy as np\nfrom numpy import dot, eye, outer\nfrom scipy.stats import chi2\nimport scipy.linalg as linalg\nfrom filterpy.kalman import ExtendedKalmanFilter\n\nclass ExtendedKalmanHinfFilterPosterior(ExtendedKalmanFilter):\n def __init__(self, dim_x, dim_z, dim_u=0, alpha = 0.01, eps_mul=1.0):\n ExtendedKalmanFilter.__init__(self, dim_x, dim_z, dim_u)\n self.beta_n = chi2.ppf(1.0 - alpha, dim_z)\n self._eps_mul = eps_mul\n \n def update(self, z, HJacobian, Hx, R=None, args=(), hx_args=(),\n residual=np.subtract):\n \"\"\" Performs the update innovation of the extended Kalman/Hinfinity \n filter with posterior residuals used for Hinfinity correction.\n\n Parameters\n ----------\n\n z : np.array\n measurement for this step.\n If `None`, posterior is not computed\n\n HJacobian : function\n function which computes the Jacobian of the H matrix (measurement\n function). Takes state variable (self.x) as input, returns H.\n\n Hx : function\n function which takes as input the state variable (self.x) along\n with the optional arguments in hx_args, and returns the measurement\n that would correspond to that state.\n\n R : np.array, scalar, or None\n Optionally provide R to override the measurement noise for this\n one call, otherwise self.R will be used.\n\n args : tuple, optional, default (,)\n arguments to be passed into HJacobian after the required state\n variable. for robot localization you might need to pass in\n information about the map and time of day, so you might have\n `args=(map_data, time)`, where the signature of HCacobian will\n be `def HJacobian(x, map, t)`\n\n hx_args : tuple, optional, default (,)\n arguments to be passed into Hx function after the required state\n variable.\n\n residual : function (z, z2), optional\n Optional function that computes the residual (difference) between\n the two measurement vectors. If you do not provide this, then the\n built in minus operator will be used. You will normally want to use\n the built in unless your residual computation is nonlinear (for\n example, if they are angles)\n \"\"\"\n if z is None:\n self.z = np.array([[None]*self.dim_z]).T\n self.x_post = self.x.copy()\n self.P_post = self.P.copy()\n return\n\n if not isinstance(args, tuple):\n args = (args,)\n\n if not isinstance(hx_args, tuple):\n hx_args = (hx_args,)\n\n if R is None:\n R = self.R\n elif np.isscalar(R):\n R = eye(self.dim_z) * R\n\n if np.isscalar(z) and self.dim_z == 1:\n z = np.asarray([z], float)\n \n H = HJacobian(self.x, *args)\n hx = Hx(self.x, *hx_args)\n self.y = residual(z, hx)\n\n PHT = self.P.dot(H.T)\n self.S = H.dot(PHT) + R\n self.SI = linalg.inv(self.S)\n \n #Now we may update self.K, self.P, self.y, self.x\n self.K = PHT.dot(self.SI)\n x = self.x + dot(self.K, self.y)\n \n # P = (I-KH)P(I-KH)' + KRK' is more numerically stable\n # and works for non-optimal K vs the equation\n # P = (I-KH)P usually seen in the literature.\n I_KH = self._I - dot(self.K, H)\n self.P = dot(I_KH, self.P).dot(I_KH.T) + dot(self.K, R).dot(self.K.T)\n \n #Will test for filter divergence\n H_hat = HJacobian(x, *args)\n hx_hat = Hx(x, *hx_args)\n eta = residual(z, hx_hat)\n \n PHT = self.P.dot(H_hat.T)\n S = H_hat.dot(PHT) + R\n SI = linalg.inv(S)\n \n thr = self.beta_n\n if dot(eta.T, dot(SI, eta)) > thr: \n #Divergence detected, H-infinity correction needed\n A = outer(eta, eta.T)/thr - S\n \n H_tilde = dot(H_hat, I_KH)\n PHT = dot(self.P, H_tilde.T)\n C = dot(H_hat, PHT)\n \n D = PHT.dot(linalg.pinv(C))\n newP = self.P + D.dot(A.dot(D.T))\n PHT = newP.dot(H.T)\n newS = H.dot(PHT) + R\n #Check H-infinity correction quality\n ev = np.linalg.eigvals(newS)\n eps = np.finfo(ev.dtype).eps * self._eps_mul\n if np.all(ev > eps * np.max(ev)):\n self.P = newP\n #Recompute self.S and self.SI for debug purposes\n self.S = newS\n self.SI = linalg.inv(self.S)\n #Need to recompute self.K and self.x\n self.K = dot(dot(self.P, H.T), linalg.inv(R))\n x = self.x + dot(self.K, self.y)\n\n self.x = x\n \n # set to None to force recompute\n self._log_likelihood = None\n self._likelihood = None\n self._mahalanobis = None\n\n # save measurement and posterior state\n self.z = deepcopy(z)\n self.x_post = self.x.copy()\n self.P_post = self.P.copy()\n \n def predict_update(self, z, HJacobian, Hx, args=(), hx_args=(), u=0):\n \"\"\" Performs the predict/update innovation of the extended Kalman\n filter.\n\n Parameters\n ----------\n\n z : np.array\n measurement for this step.\n If `None`, only predict step is perfomed.\n\n HJacobian : function\n function which computes the Jacobian of the H matrix (measurement\n function). Takes state variable (self.x) as input, along with the\n optional arguments in args, and returns H.\n\n Hx : function\n function which takes as input the state variable (self.x) along\n with the optional arguments in hx_args, and returns the measurement\n that would correspond to that state.\n\n args : tuple, optional, default (,)\n arguments to be passed into HJacobian after the required state\n variable.\n\n hx_args : tuple, optional, default (,)\n arguments to be passed into Hx after the required state\n variable.\n\n u : np.array or scalar\n optional control vector input to the filter.\n \"\"\"\n self.predict(u)\n self.update(z, HJacobian, Hx, self.R, args, hx_args, residual=np.subtract) \n" ]
[ [ "scipy.stats.chi2.ppf", "numpy.dot", "scipy.linalg.pinv", "numpy.linalg.eigvals", "numpy.asarray", "numpy.eye", "numpy.finfo", "numpy.max", "numpy.isscalar", "scipy.linalg.inv", "numpy.outer", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] } ]
Ronak1958/blog
[ "b477bda7641970ed1f1438994aa7a084c921b898" ]
[ "docs/downloads/code/digitize-graph/digitize-data.py" ]
[ "from pynput import mouse\n\nclass MyException(Exception):pass\n\nX = []\nY = []\nNumberOfMouseClicks = 0\nprint('Click Origin')\n\ndef on_click(x, y, button, pressed):\n button = str(button)\n global NumberOfMouseClicks\n\n NumberOfMouseClicks = NumberOfMouseClicks + 1\n if NumberOfMouseClicks==1:\n print('Click Top Right') \n if NumberOfMouseClicks==3:\n print('Click data points. Right-click to end.')\n \n X.append(x)\n Y.append(y)\n \n if button!='Button.left':\n raise MyException(button)\n\n\ndef plot_data(X, Y, Xmin, Xmax, Ymin, Ymax):\n import matplotlib.pyplot as plt\n\n plt.plot(X,Y,'b-')\n plt.xlim((Xmin, Xmax))\n plt.ylim((Ymin, Ymax))\n plt.show() \n\n\ndef main(X,Y):\n with mouse.Listener(on_click=on_click) as listener:\n try:\n listener.join()\n except MyException as e:\n pass\n\n # drop duplicates\n X = X[::2]\n Y = Y[::2]\n\n # input boundaries\n Xmin = float(input('Input X-min: '))\n Xmax = float(input('Input X-max: '))\n Ymin = float(input('Input Y-min: '))\n Ymax = float(input('Input Y-max: '))\n\n # define scales from data\n origin = [X[0],Y[0]]\n topRight = [X[1],Y[1]]\n XminScale = origin[0]\n XmaxScale = topRight[0]\n YminScale = origin[1]\n YmaxScale = topRight[1]\n\n # drop extras\n X = X[2:-1]\n Y = Y[2:-1]\n\n # scale\n ## (old_value - old_min) / (old_max - old_min) * (new_max - new_min) + new_min\n Xplot = [(i - XminScale) / (XmaxScale - XminScale) * (Xmax - Xmin) + Xmin for i in X]\n Yplot = [(i - YminScale) / (YmaxScale - YminScale) * (Ymax - Ymin) + Ymin for i in Y]\n\n # print outputs\n print('Origin: {}'.format([round(i, 2) for i in origin]))\n print('Top Right: {}'.format([round(i, 2) for i in topRight]))\n print('X: {}'.format([round(i, 2) for i in Xplot]))\n print('Y: {}'.format([round(i, 2) for i in Yplot]))\n\n # plot\n plot_data(Xplot, Yplot, Xmin, Xmax, Ymin, Ymax)\n\n\nif __name__ == '__main__':\n main(X,Y)" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.ylim", "matplotlib.pyplot.xlim", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Komutsou/StructPy
[ "6b25b369ff14b31dbff4eb2cb4d6c43963ec7b3b" ]
[ "Examples/In progress/CEE213 CP4.py" ]
[ "import numpy as np\nimport cross_sections as xs\n\nxs1 = xs.IBeam(1, 1, 0.1, 0.1)\n\nL = 10\np = 1\nE = 29000\n\ndef constant(x, **kwargs):\n\treturn 1\n\ndef linearup(s, **kwargs):\n\treturn x\n\nload = constant\n\ndef simpsons(f, a, b, n): #function, start, stop, intervals\n\tif n % 2 == 0:\n\t\th = (b-a)/n\n\t\tk = 0.0\n\t\tx = a + h\n\t\t\n\t\tfor i in range(1, int(n/2) + 1):\n\t\t\tk += 4*f(x)\n\t\t\tx += 2*h\n\t\t\n\t\tx = a + 2*h\n\t\tfor i in range(1, n//2):\n\t\t\tk += 2*f(x)\n\t\t\tx += 2*h\n\t\treturn (h/3)*(f(a) + f(b) + k)\n\telse:\n\t\tprint('n must be even')\n\nI0 = lambda x: p * load(x, L=L)\nI1 = lambda x: p * (L-x) * load(x, L=L)\nI2 = lambda x: p * (L-x)**2 * load(x, L=L)\nI3 = lambda x: p * (L-x)**3 * load(x, L=L)\n\nInt0 = simpsons(I0, 0, L, 100)\nInt1 = simpsons(I0, 0, L, 100)\nInt2 = simpsons(I0, 0, L, 100)\nInt3 = -simpsons(I0, 0, L, 100)\n\nz = np.array([Int0, Int1, Int2, Int3])\n\na = L\nb = L/(E * xs1.Ix)\nc = L**2/(2 * E * xs1.Ix)\nd = L**3/(6 * E * xs1.Ix)\n\nB = np.matrix([[1, 0, 0, 0, -1, 0, 0, 0], \n\t\t\t\t\t\t\t [a, 1, 0, 0, 0, -1, 0, 0],\n\t\t\t\t\t\t\t [c, b, 1, 0, 0, 0, -1, 0],\n\t\t\t\t\t\t\t [-d, -c, -a, 1, 0, 0, 0, -1]])\n\t\t\t\t\t\t\t \nfixed = [1, 1, 0, 0]\nfree = [0, 0, 1, 1]\n\nBC = np.array(fixed + free)\n\nC = B[:, BC==1]\n\ns = np.linalg.solve(C, z)\n" ]
[ [ "numpy.matrix", "numpy.array", "numpy.linalg.solve" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
kiminh/tequila
[ "464085265e125222c63e65446861e9c0a2428bab" ]
[ "src/tequila/quantumchemistry/qc_base.py" ]
[ "from dataclasses import dataclass\nfrom tequila import TequilaException, BitString, TequilaWarning\nfrom tequila.hamiltonian import QubitHamiltonian\n\nfrom tequila.circuit import QCircuit, gates\nfrom tequila.objective.objective import Variable, Variables, ExpectationValue\n\nfrom tequila.simulators.simulator_api import simulate\nfrom tequila.utils import to_float\n\nimport typing, numpy, numbers\nfrom itertools import product\n\nimport openfermion\nfrom openfermion.hamiltonians import MolecularData\n\nimport warnings\n\n\ndef prepare_product_state(state: BitString) -> QCircuit:\n \"\"\"Small convenience function\n\n Parameters\n ----------\n state :\n product state encoded into a bitstring\n state: BitString :\n \n\n Returns\n -------\n type\n unitary circuit which prepares the product state\n\n \"\"\"\n result = QCircuit()\n for i, v in enumerate(state.array):\n if v == 1:\n result += gates.X(target=i)\n return result\n\n\n@dataclass\nclass ParametersQC:\n \"\"\"Specialization of ParametersHamiltonian\"\"\"\n basis_set: str = '' # Quantum chemistry basis set\n geometry: str = '' # geometry of the underlying molecule (units: Angstrom!),\n # this can be a filename leading to an .xyz file or the geometry given as a string\n description: str = ''\n multiplicity: int = 1\n charge: int = 0\n closed_shell: bool = True\n name: str = \"molecule\"\n\n @property\n def filename(self):\n \"\"\" \"\"\"\n return \"{}_{}\".format(self.name, self.basis_set)\n\n @property\n def molecular_data_param(self) -> dict:\n \"\"\":return: Give back all parameters for the MolecularData format from openfermion as dictionary\"\"\"\n return {'basis': self.basis_set, 'geometry': self.get_geometry(), 'description': self.description,\n 'charge': self.charge, 'multiplicity': self.multiplicity, 'filename': self.filename\n }\n\n @staticmethod\n def format_element_name(string):\n \"\"\"OpenFermion uses case sensitive hash tables for chemical elements\n I.e. you need to name Lithium: 'Li' and 'li' or 'LI' will not work\n this convenience function does the naming\n :return: first letter converted to upper rest to lower\n\n Parameters\n ----------\n string :\n \n\n Returns\n -------\n\n \"\"\"\n assert (len(string) > 0)\n assert (isinstance(string, str))\n fstring = string[0].upper() + string[1:].lower()\n return fstring\n\n @staticmethod\n def convert_to_list(geometry):\n \"\"\"Convert a molecular structure given as a string into a list suitable for openfermion\n\n Parameters\n ----------\n geometry :\n a string specifying a mol. structure. E.g. geometry=\"h 0.0 0.0 0.0\\n h 0.0 0.0 1.0\"\n\n Returns\n -------\n type\n A list with the correct format for openfermion E.g return [ ['h',[0.0,0.0,0.0], [..]]\n\n \"\"\"\n result = []\n for line in geometry.split('\\n'):\n words = line.split()\n if len(words) != 4: break\n try:\n tmp = (ParametersQC.format_element_name(words[0]),\n (float(words[1]), float(words[2]), float(words[3])))\n result.append(tmp)\n except ValueError:\n print(\"get_geometry list unknown line:\\n \", line, \"\\n proceed with caution!\")\n return result\n\n def get_geometry_string(self) -> str:\n \"\"\"returns the geometry as a string\n :return: geometry string\n\n Parameters\n ----------\n\n Returns\n -------\n\n \"\"\"\n if self.geometry.split('.')[-1] == 'xyz':\n geomstring, comment = self.read_xyz_from_file(self.geometry)\n if comment is not None:\n self.description = comment\n return geomstring\n else:\n return self.geometry\n\n def get_geometry(self):\n \"\"\"Returns the geometry\n If a xyz filename was given the file is read out\n otherwise it is assumed that the geometry was given as string\n which is then reformatted as a list usable as input for openfermion\n :return: geometry as list\n e.g. [(h,(0.0,0.0,0.35)),(h,(0.0,0.0,-0.35))]\n Units: Angstrom!\n\n Parameters\n ----------\n\n Returns\n -------\n\n \"\"\"\n if self.geometry.split('.')[-1] == 'xyz':\n geomstring, comment = self.read_xyz_from_file(self.geometry)\n if self.description == '':\n self.description = comment\n if self.name == \"molecule\":\n self.name = self.geometry.split('.')[0]\n return self.convert_to_list(geomstring)\n elif self.geometry is not None:\n return self.convert_to_list(self.geometry)\n else:\n raise Exception(\"Parameters.qc.geometry is None\")\n\n @staticmethod\n def read_xyz_from_file(filename):\n \"\"\"Read XYZ filetype for molecular structures\n https://en.wikipedia.org/wiki/XYZ_file_format\n Units: Angstrom!\n\n Parameters\n ----------\n filename :\n return:\n\n Returns\n -------\n\n \"\"\"\n with open(filename, 'r') as file:\n content = file.readlines()\n natoms = int(content[0])\n comment = str(content[1]).strip('\\n')\n coord = ''\n for i in range(natoms):\n coord += content[2 + i]\n return coord, comment\n\n\n@dataclass\nclass ClosedShellAmplitudes:\n \"\"\" \"\"\"\n tIjAb: numpy.ndarray = None\n tIA: numpy.ndarray = None\n\n def make_parameter_dictionary(self, threshold=1.e-8):\n \"\"\"\n\n Parameters\n ----------\n threshold :\n (Default value = 1.e-8)\n\n Returns\n -------\n\n \"\"\"\n variables = {}\n if self.tIjAb is not None:\n nvirt = self.tIjAb.shape[2]\n nocc = self.tIjAb.shape[0]\n assert (self.tIjAb.shape[1] == nocc and self.tIjAb.shape[3] == nvirt)\n for (I, J, A, B), value in numpy.ndenumerate(self.tIjAb):\n if not numpy.isclose(value, 0.0, atol=threshold):\n variables[(nocc + A, I, nocc + B, J)] = value\n if self.tIA is not None:\n nocc = self.tIA.shape[0]\n for (I, A), value, in numpy.ndenumerate(self.tIA):\n if not numpy.isclose(value, 0.0, atol=threshold):\n variables[(A + nocc, I)] = value\n\n return dict(sorted(variables.items(), key=lambda x: numpy.abs(x[1]), reverse=True))\n\n\n@dataclass\nclass Amplitudes:\n \"\"\"Coupled-Cluster Amplitudes\n We adopt the Psi4 notation for consistency\n I,A for alpha\n i,a for beta\n\n Parameters\n ----------\n\n Returns\n -------\n\n \"\"\"\n\n @classmethod\n def from_closed_shell(cls, cs: ClosedShellAmplitudes):\n \"\"\"\n Initialize from closed-shell Amplitude structure\n\n Parameters\n ----------\n cs: ClosedShellAmplitudes :\n \n\n Returns\n -------\n\n \"\"\"\n tijab = cs.tIjAb - numpy.einsum(\"ijab -> ijba\", cs.tIjAb, optimize='greedy')\n return cls(tIjAb=cs.tIjAb, tIA=cs.tIA, tiJaB=cs.tIjAb, tia=cs.tIA, tijab=tijab, tIJAB=tijab)\n\n tIjAb: numpy.ndarray = None\n tIA: numpy.ndarray = None\n tiJaB: numpy.ndarray = None\n tijab: numpy.ndarray = None\n tIJAB: numpy.ndarray = None\n tia: numpy.ndarray = None\n\n def make_parameter_dictionary(self, threshold=1.e-8):\n \"\"\"\n\n Parameters\n ----------\n threshold :\n (Default value = 1.e-8)\n Neglect amplitudes below the threshold\n\n Returns\n -------\n Dictionary of tequila variables (hash is in the style of (a,i,b,j))\n\n \"\"\"\n variables = {}\n if self.tIjAb is not None:\n nvirt = self.tIjAb.shape[2]\n nocc = self.tIjAb.shape[0]\n assert (self.tIjAb.shape[1] == nocc and self.tIjAb.shape[3] == nvirt)\n\n for (I, j, A, b), value in numpy.ndenumerate(self.tIjAb):\n if not numpy.isclose(value, 0.0, atol=threshold):\n variables[(2 * (nocc + A), 2 * I, 2 * (nocc + b) + 1, j + 1)] = value\n for (i, J, a, B), value in numpy.ndenumerate(self.tiJaB):\n if not numpy.isclose(value, 0.0, atol=threshold):\n variables[(2 * (nocc + a) + 1, 2 * i + 1, 2 * (nocc + B), J)] = value\n for (i, j, a, b), value in numpy.ndenumerate(self.tijab):\n if not numpy.isclose(value, 0.0, atol=threshold):\n variables[(2 * (nocc + a) + 1, 2 * i + 1, 2 * (nocc + b) + 1, j + 1)] = value\n for (I, J, A, B), value in numpy.ndenumerate(self.tijab):\n if not numpy.isclose(value, 0.0, atol=threshold):\n variables[(2 * (nocc + A), 2 * I, 2 * (nocc + B), J)] = value\n\n if self.tIA is not None:\n nocc = self.tIjAb.shape[0]\n assert (self.tia.shape[0] == nocc)\n for (I, A), value, in numpy.ndenumerate(self.tIA):\n if not numpy.isclose(value, 0.0, atol=threshold):\n variables[(2 * (A + nocc), 2 * I)] = value\n for (i, a), value, in numpy.ndenumerate(self.tIA):\n if not numpy.isclose(value, 0.0, atol=threshold):\n variables[(2 * (a + nocc) + 1, 2 * i + 1)] = value\n\n return variables\n\n\nclass NBodyTensor:\n \"\"\" Convenience class for handling N-body tensors \"\"\"\n\n def __init__(self, elems: numpy.ndarray = None, active_indices: list = None, scheme: str = None,\n size_full: int = None):\n\n # Set elements\n self.elems = elems\n # Active indices only as list of indices (e.g. spatial orbital indices), not as a dictionary of irreducible\n # representations\n if active_indices is not None:\n self.active_indices = active_indices\n self._passive_indices = None\n self._full_indices = None\n self._indices_set: bool = False\n\n # Determine order of tensor\n # Assume, that tensor is entered in desired shape, not as flat array.\n self.order = len(self.elems.shape)\n # Can use size_full < self.elems.shape[0] -> 'full' space is to be considered a subspace as well\n if size_full is None:\n self._size_full = self.elems.shape[0]\n else:\n self._size_full = size_full\n # 2-body tensors (<=> order 4) currently allow reordering\n if self.order == 4:\n if scheme is None:\n self.scheme = 'chem'\n else:\n self.scheme = scheme.lower()\n else:\n if scheme is not None:\n raise Exception(\"Ordering only implemented for tensors of order 4 / 2-body tensors.\")\n self.scheme = None\n\n def sub_lists(self, idx_lists: list = None) -> numpy.ndarray:\n \"\"\"\n Get subspace of tensor by a set of index lists\n according to hPQ.sub_lists(idx_lists=[p, q]) = [hPQ for P in p and Q in q]\n\n This essentially is an implementation of a non-contiguous slicing using numpy.take\n\n Parameters\n ----------\n idx_lists :\n List of lists, each defining the desired subspace per axis\n Size needs to match order of tensor, and lists successively correspond to axis=0,1,2,...,N\n\n Returns\n -------\n out :\n Sliced tensor as numpy.ndarray\n \"\"\"\n # Check if index list has correct size\n if len(idx_lists) != self.order:\n raise Exception(\"Need to pass an index list for each dimension!\" +\n \" Length of idx_lists needs to match order of tensor.\")\n\n # Perform slicing via numpy.take\n out = self.elems\n for ax in range(self.order):\n if idx_lists[ax] is not None: # None means, we want the full space in this direction\n out = numpy.take(out, idx_lists[ax], axis=ax)\n\n return out\n\n def set_index_lists(self):\n \"\"\" Set passive and full index lists based on class inputs \"\"\"\n tmp_size = self._size_full\n if self._size_full is None:\n tmp_size = self.elems.shape[0]\n\n self._passive_indices = [i for i in range(tmp_size)\n if i not in self.active_indices]\n self._full_indices = [i for i in range(tmp_size)]\n\n def sub_str(self, name: str) -> numpy.ndarray:\n \"\"\"\n Get subspace of tensor by a string\n Currently is able to resolve an active space, named 'a', full space 'f', and the complement 'p' = 'f' - 'a'.\n Full space in this context may also be smaller than actual tensor dimension.\n\n The specification of active space in this context only allows to pick a set from a list of orbitals, and\n is not able to resolve an active space from irreducible representations.\n\n Example for one-body tensor:\n hPQ.sub_lists(name='ap') = [hPQ for P in active_indices and Q in _passive_indices]\n\n Parameters\n ----------\n name :\n String specifying the desired subspace, elements need to be a (active), f (full), p (full - active)\n\n Returns\n -------\n out :\n Sliced tensor as numpy.ndarray\n \"\"\"\n if not self._indices_set:\n self.set_index_lists()\n self._indices_set = True\n\n if name is None:\n raise Exception(\"No name specified.\")\n if len(name) != self.order:\n raise Exception(\"Name does not match order of the tensor.\")\n if self.active_indices is None:\n raise Exception(\"Need to set an active space in order to call this function.\")\n\n idx_lists = []\n # Parse name as string of space indices\n for char in name:\n if char.lower() == 'a':\n idx_lists.append(self.active_indices)\n elif char.lower() == 'p':\n idx_lists.append(self._passive_indices)\n elif char.lower() == 'f':\n if self._size_full is None:\n idx_lists.append(None)\n else:\n idx_lists.append(self._full_indices)\n else:\n raise Exception(\"Need to specify a valid letter (a,p,f).\")\n\n out = self.sub_lists(idx_lists)\n\n return out\n\n def is_openfermion(self) -> bool:\n \"\"\"\n Checks whether current ordering scheme is 'openfermion'\n \"\"\"\n if self.scheme == 'openfermion' or self.scheme == 'of':\n return True\n else:\n return False\n\n def is_chem(self) -> bool:\n \"\"\"\n Checks whether current ordering scheme is 'chem'\n \"\"\"\n if self.scheme == 'chem' or self.scheme == 'c':\n return True\n else:\n return False\n\n def is_phys(self) -> bool:\n \"\"\"\n Checks whether current ordering scheme is 'phys'\n \"\"\"\n if self.scheme == 'phys' or self.scheme == 'p':\n return True\n else:\n return False\n\n def reorder(self, to: str = 'of'):\n \"\"\"\n Function to reorder tensors according to some convention.\n\n Parameters\n ----------\n to :\n Ordering scheme of choice.\n 'openfermion', 'of' (default) :\n openfermion - ordering, corresponds to integrals of the type\n h^pq_rs = int p(1)* q(2)* O(1,2) r(2) s(1) (O(1,2)\n with operators a^pq_rs = a^p a^q a_r a_s (a^p == a^dagger_p)\n currently needed for dependencies on openfermion-library\n 'chem', 'c' :\n quantum chemistry ordering, collect particle terms,\n more convenient for real-space methods\n h^pq_rs = int p(1) q(1) O(1,2) r(2) s(2)\n This is output by psi4\n 'phys', 'p' :\n typical physics ordering, integrals of type\n h^pq_rs = int p(1)* q(2)* O(1,2) r(1) s(2)\n with operators a^pq_rs = a^p a^q a_s a_r\n\n Returns\n -------\n \"\"\"\n if self.order != 4:\n raise Exception('Reordering currently only implemented for two-body tensors.')\n to = to.lower()\n\n if self.is_chem():\n if to == 'chem' or to == 'c':\n pass\n elif to == 'openfermion' or to == 'of':\n self.elems = numpy.einsum(\"psqr -> pqrs\", self.elems, optimize='greedy')\n self.scheme = 'openfermion'\n elif to == 'phys' or to == 'p':\n self.elems = numpy.einsum(\"prqs -> pqrs\", self.elems, optimize='greedy')\n self.scheme = 'phys'\n elif self.is_openfermion():\n if to == 'chem' or to == 'c':\n self.elems = numpy.einsum(\"pqrs -> psqr\", self.elems, optimize='greedy')\n self.scheme = 'chem'\n elif to == 'openfermion' or to == 'of':\n pass\n elif to == 'phys' or to == 'p':\n self.elems = numpy.einsum(\"pqrs -> pqsr\", self.elems, optimize='greedy')\n self.scheme = 'phys'\n elif self.is_phys():\n if to == 'chem' or to == 'c':\n self.elems = numpy.einsum(\"pqrs -> prqs\", self.elems, optimize='greedy')\n self.scheme = 'chem'\n elif to == 'openfermion' or to == 'of':\n self.elems = numpy.einsum(\"pqsr -> pqrs\", self.elems, optimize='greedy')\n self.scheme = 'openfermion'\n elif to == 'phys' or to == 'p':\n pass\n\n\nclass QuantumChemistryBase:\n \"\"\" \"\"\"\n\n class _QubitEncoding:\n \"\"\"\n Small wrapper class for the Qubit Transformation\n Provides more controlled output and handles special cases\n \"\"\"\n\n def __init__(self, transformation: typing.Callable, **kwargs):\n self._trafo = transformation\n self._kwargs = kwargs\n\n def __call__(self, op):\n errlog = \"\"\n try:\n try:\n # return self._trafo(op, **self._kwargs)\n return self._trafo(op, **self._kwargs)\n except TypeError as E:\n print(\"converting to interaction operator\")\n errlog += \"\\n\" + str(E)\n return self._trafo(openfermion.get_interaction_operator(op), **self._kwargs)\n except Exception as E:\n errlog += \"\\n\" + str(E)\n raise TequilaException(\"Error in QubitEncoding \" + str(self) + errlog)\n\n def __repr__(self):\n if len(self._kwargs) > 0:\n return \"transformation=\" + str(self._trafo) + \"\\nadditional keys: \" + str(self._kwargs)\n else:\n return \"transformation=\" + str(self._trafo)\n\n def __str__(self):\n return self.__repr__()\n\n def __init__(self, parameters: ParametersQC,\n transformation: typing.Union[str, typing.Callable] = None,\n active_orbitals: list = None,\n reference: list = None,\n *args,\n **kwargs):\n\n self.parameters = parameters\n\n if \"molecule\" in kwargs:\n self.molecule = kwargs[\"molecule\"]\n else:\n self.molecule = self.make_molecule(*args, **kwargs)\n\n assert (parameters.basis_set.lower() == self.molecule.basis.lower())\n assert (parameters.multiplicity == self.molecule.multiplicity)\n assert (parameters.charge == self.molecule.charge)\n\n self.active_space = None\n if active_orbitals is not None:\n self.active_space = self._make_active_space_data(active_orbitals=active_orbitals, reference=reference)\n\n if reference is None:\n self.reference = [i for i in range(self.n_electrons // 2)]\n else:\n self.reference = reference\n\n self.transformation = self._initialize_transformation(transformation=transformation, *args, **kwargs)\n\n self._rdm1 = None\n self._rdm2 = None\n\n def _initialize_transformation(self, transformation, *args, **kwargs):\n # filter out arguments to the transformation\n trafo_args = {k.split(\"__\")[1]: v for k, v in kwargs.items() if\n (hasattr(k, \"lower\") and \"transformation__\" in k.lower())}\n\n if transformation is None:\n trafo = openfermion.jordan_wigner\n elif hasattr(transformation, \"lower\") and transformation.lower() in [\"jordan-wigner\", \"jw\", \"j-w\",\n \"jordanwigner\"]:\n trafo = openfermion.jordan_wigner\n elif hasattr(transformation, \"lower\") and transformation.lower() in [\"bravyi-kitaev\", \"bk\", \"b-k\",\n \"bravyikitaev\"]:\n trafo = openfermion.bravyi_kitaev\n elif hasattr(transformation, \"lower\") and transformation.lower() in [\"bravyi-kitaev-tree\", \"bkt\",\n \"bravykitaevtree\", \"b-k-t\"]:\n trafo = openfermion.bravyi_kitaev_tree\n elif hasattr(transformation, \"lower\") and transformation.lower() in [\"tapered_bravyi_kitaev\", \"tbk\", \"t-b-k\",\n \"symmetry_conserving_bravyi_kitaev\"]:\n if \"active_orbitals\" not in trafo_args:\n trafo_args[\"active_orbitals\"] = self.n_orbitals * 2\n if \"active_fermions\" not in trafo_args:\n trafo_args[\"active_fermions\"] = self.n_electrons\n print(\"trafo_args = \", trafo_args)\n # trafo = openfermion.symmetry_conserving_bravyi_kitaev\n # Current hotfix, to be changed once it works again straightforward with OpenFermion\n from ._openfermion_symmetry_conserving_bk_hotfix import symmetry_conserving_bravyi_kitaev_HOTFIX\n trafo = symmetry_conserving_bravyi_kitaev_HOTFIX\n elif hasattr(transformation, \"lower\"):\n trafo = getattr(openfermion, transformation.lower())\n else:\n assert (callable(transformation))\n trafo = transformation\n\n return self._QubitEncoding(transformation=trafo, **trafo_args)\n\n def _make_active_space_data(self, active_orbitals, reference=None):\n \"\"\"\n Small helper function\n Internal use only\n Parameters\n ----------\n active_orbitals: dictionary :\n list: Give a list of spatial orbital indices\n i.e. occ = [0,1,3] means that spatial orbital 0, 1 and 3 are used\n reference: (Default value=None)\n List of orbitals which form the reference\n Can be given in the same format as active_orbitals\n If given as None then the first N_electron/2 orbitals are taken\n for closed-shell systems.\n\n Returns\n -------\n Dataclass with active indices and reference indices (in spatial notation)\n\n \"\"\"\n\n if active_orbitals is None:\n return None\n\n @dataclass\n class ActiveSpaceData:\n active_orbitals: list # active orbitals (spatial, c1)\n reference_orbitals: list # reference orbitals (spatial, c1)\n\n def __str__(self):\n result = \"Active Space Data:\\n\"\n result += \"{key:15} : {value:15} \\n\".format(key=\"active_orbitals\", value=str(self.active_orbitals))\n result += \"{key:15} : {value:15} \\n\".format(key=\"reference_orbitals\",\n value=str(self.reference_orbitals))\n result += \"{key:15} : {value:15} \\n\".format(key=\"frozen_docc\", value=str(self.frozen_docc))\n result += \"{key:15} : {value:15} \\n\".format(key=\"frozen_uocc\", value=str(self.frozen_uocc))\n return result\n\n @property\n def frozen_reference_orbitals(self):\n return [i for i in self.reference_orbitals if i not in self.active_orbitals]\n\n @property\n def active_reference_orbitals(self):\n return [i for i in self.reference_orbitals if i in self.active_orbitals]\n\n if reference is None:\n # auto assignment only for closed-shell\n assert (self.n_electrons % 2 == 0)\n reference = sorted([i for i in range(self.n_electrons // 2)])\n\n return ActiveSpaceData(active_orbitals=sorted(active_orbitals),\n reference_orbitals=sorted(reference))\n\n @classmethod\n def from_openfermion(cls, molecule: openfermion.MolecularData,\n transformation: typing.Union[str, typing.Callable] = None,\n *args,\n **kwargs):\n \"\"\"\n Initialize direclty from openfermion MolecularData object\n\n Parameters\n ----------\n molecule\n The openfermion molecule\n Returns\n -------\n The Tequila molecule\n \"\"\"\n parameters = ParametersQC(basis_set=molecule.basis, geometry=molecule.geometry,\n description=molecule.description, multiplicity=molecule.multiplicity,\n charge=molecule.charge)\n return cls(parameters=parameters, transformation=transformation, molecule=molecule, *args, **kwargs)\n\n def make_excitation_generator(self, indices: typing.Iterable[typing.Tuple[int, int]]) -> QubitHamiltonian:\n \"\"\"\n Notes\n ----------\n Creates the transformed hermitian generator of UCC type unitaries:\n M(a^\\dagger_{a_0} a_{i_0} a^\\dagger{a_1}a_{i_1} ... - h.c.)\n where the qubit map M depends is self.transformation\n\n Parameters\n ----------\n indices : typing.Iterable[typing.Tuple[int, int]] :\n List of tuples [(a_0, i_0), (a_1, i_1), ... ] - recommended format, in spin-orbital notation (alpha odd numbers, beta even numbers)\n can also be given as one big list: [a_0, i_0, a_1, i_1 ...]\n Returns\n -------\n type\n 1j*Transformed qubit excitation operator, depends on self.transformation\n \"\"\"\n\n if self.transformation._trafo == openfermion.bravyi_kitaev_fast:\n raise TequilaException(\n \"The Bravyi-Kitaev-Superfast transformation does not support general FermionOperators yet\")\n\n # check indices and convert to list of tuples if necessary\n if len(indices) == 0:\n raise TequilaException(\"make_excitation_operator: no indices given\")\n elif not isinstance(indices[0], typing.Iterable):\n if len(indices) % 2 != 0:\n raise TequilaException(\"make_excitation_generator: unexpected input format of indices\\n\"\n \"use list of tuples as [(a_0, i_0),(a_1, i_1) ...]\\n\"\n \"or list as [a_0, i_0, a_1, i_1, ... ]\\n\"\n \"you gave: {}\".format(indices))\n converted = [(indices[2 * i], indices[2 * i + 1]) for i in range(len(indices) // 2)]\n else:\n converted = indices\n\n # convert to openfermion input format\n ofi = []\n dag = []\n for pair in converted:\n assert (len(pair) == 2)\n ofi += [(int(pair[0]), 1),\n (int(pair[1]), 0)] # openfermion does not take other types of integers like numpy.int64\n dag += [(int(pair[0]), 0), (int(pair[1]), 1)]\n\n op = openfermion.FermionOperator(tuple(ofi), 1.j) # 1j makes it hermitian\n op += openfermion.FermionOperator(tuple(reversed(dag)), -1.j)\n qop = QubitHamiltonian(qubit_hamiltonian=self.transformation(op))\n # check if the operator is hermitian and cast coefficients to floats\n # in order to avoid trouble with the simulation backends\n assert qop.is_hermitian()\n for k, v in qop.qubit_operator.terms.items():\n qop.qubit_operator.terms[k] = to_float(v)\n\n qop = qop.simplify()\n\n if len(qop) == 0:\n warnings.warn(\"Excitation generator is a unit operator.\\n\"\n \"Non-standard transformations might not work with general fermionic operators\\n\"\n \"indices = \" + str(indices), category=TequilaWarning)\n return qop\n\n def reference_state(self, reference_orbitals: list = None, n_qubits: int = None) -> BitString:\n \"\"\"Does a really lazy workaround ... but it works\n :return: Hartree-Fock Reference as binary-number\n\n Parameters\n ----------\n reference_orbitals: list:\n give list of doubly occupied orbitals\n default is None which leads to automatic list of the\n first n_electron/2 orbitals\n\n Returns\n -------\n\n \"\"\"\n\n if n_qubits is None:\n n_qubits = 2 * self.n_orbitals\n\n if self.transformation._trafo == openfermion.symmetry_conserving_bravyi_kitaev:\n def tapering(fop):\n fermion_hamiltonian_reorder = openfermion.utils.reorder(fop, openfermion.utils.up_then_down,\n num_modes=n_qubits)\n qubit_hamiltonian = openfermion.bravyi_kitaev_tree(fermion_hamiltonian_reorder, n_qubits=n_qubits)\n qubit_hamiltonian.compress()\n return qubit_hamiltonian\n\n transformation = tapering\n elif self.transformation._trafo == openfermion.bravyi_kitaev_fast:\n raise TequilaException(\n \"The Bravyi-Kitaev-Superfast transformation does not support general FermionOperators yet\")\n\n else:\n transformation = self.transformation\n\n if reference_orbitals is None:\n reference_orbitals = self.reference\n\n spin_orbitals = sorted([2 * i for i in reference_orbitals] + [2 * i + 1 for i in reference_orbitals])\n\n string = \"1.0 [\"\n for i in spin_orbitals:\n string += str(i) + \"^ \"\n string += \"]\"\n\n fop = openfermion.FermionOperator(string, 1.0)\n op = QubitHamiltonian(qubit_hamiltonian=transformation(fop))\n from tequila.wavefunction.qubit_wavefunction import QubitWaveFunction\n wfn = QubitWaveFunction.from_int(0, n_qubits=n_qubits)\n wfn = wfn.apply_qubitoperator(operator=op)\n assert (len(wfn.keys()) == 1)\n key = list(wfn.keys())[0]\n if self.transformation._trafo == openfermion.symmetry_conserving_bravyi_kitaev:\n active_qubits = [i for i in range(n_qubits) if i not in [n_qubits - 1, n_qubits // 2 - 1]]\n array = [key.array[i] for i in active_qubits]\n key = BitString.from_array(array=array)\n return key\n\n def make_molecule(self, *args, **kwargs) -> MolecularData:\n \"\"\"Creates a molecule in openfermion format by running psi4 and extracting the data\n Will check for previous outputfiles before running\n Will not recompute if a file was found\n\n Parameters\n ----------\n parameters :\n An instance of ParametersQC, which also holds an instance of ParametersPsi4 via parameters.psi4\n The molecule will be saved in parameters.filename, if this file exists before the call the molecule will be imported from the file\n\n Returns\n -------\n type\n the molecule in openfermion.MolecularData format\n\n \"\"\"\n molecule = MolecularData(**self.parameters.molecular_data_param)\n # try to load\n\n do_compute = True\n try:\n import os\n if os.path.exists(self.parameters.filename):\n molecule.load()\n do_compute = False\n except OSError:\n do_compute = True\n\n if do_compute:\n molecule = self.do_make_molecule(*args, **kwargs)\n\n molecule.save()\n return molecule\n\n def do_make_molecule(self, *args, **kwargs):\n \"\"\"\n\n Parameters\n ----------\n args\n kwargs\n\n Returns\n -------\n\n \"\"\"\n # integrals need to be passed in base class\n assert (\"one_body_integrals\" in kwargs)\n assert (\"two_body_integrals\" in kwargs)\n one_body_integrals = kwargs[\"one_body_integrals\"]\n two_body_integrals = kwargs[\"two_body_integrals\"]\n if \"nuclear_repulsion\" in kwargs:\n nuclear_repulsion = kwargs[\"nuclear_repulsion\"]\n else:\n nuclear_repulsion = 0.0\n warnings.warn(\"No nuclear_repulsion given for custom molecule, setting to zero\", category=TequilaWarning)\n\n\n if (\"n_orbitals\" in kwargs):\n n_orbitals = kwargs[\"n_orbitals\"]\n else:\n n_orbitals = one_body_integrals.shape[0]\n for i in [0,1,2,3]:\n assert n_orbitals == two_body_integrals.shape[i]\n \n molecule = MolecularData(**self.parameters.molecular_data_param)\n\n molecule.one_body_integrals = one_body_integrals\n molecule.two_body_integrals = two_body_integrals\n molecule.nuclear_repulsion = nuclear_repulsion\n molecule.n_orbitals = n_orbitals\n molecule.save()\n return molecule\n\n @property\n def n_orbitals(self) -> int:\n \"\"\" \"\"\"\n if self.active_space is None:\n return self.molecule.n_orbitals\n else:\n return len(self.active_space.active_orbitals)\n\n @property\n def n_electrons(self) -> int:\n \"\"\" \"\"\"\n if self.active_space is None:\n return self.molecule.n_electrons\n else:\n return 2 * len(self.active_space.active_reference_orbitals)\n\n def make_hamiltonian(self, occupied_indices=None, active_indices=None) -> QubitHamiltonian:\n \"\"\" \"\"\"\n if occupied_indices is None and self.active_space is not None:\n occupied_indices = self.active_space.frozen_reference_orbitals\n if active_indices is None and self.active_space is not None:\n active_indices = self.active_space.active_orbitals\n\n fop = openfermion.transforms.get_fermion_operator(\n self.molecule.get_molecular_hamiltonian(occupied_indices, active_indices))\n try:\n qop = self.transformation(fop)\n except TypeError:\n qop = self.transformation(openfermion.transforms.get_interaction_operator(fop))\n return QubitHamiltonian(qubit_hamiltonian=qop)\n\n def compute_one_body_integrals(self):\n \"\"\" \"\"\"\n if hasattr(self, \"molecule\"):\n return self.molecule.one_body_integrals\n\n def compute_two_body_integrals(self):\n \"\"\" \"\"\"\n if hasattr(self, \"molecule\"):\n return self.molecule.two_body_integrals\n\n def compute_ccsd_amplitudes(self) -> ClosedShellAmplitudes:\n \"\"\" \"\"\"\n raise Exception(\"BaseClass Method\")\n\n def prepare_reference(self, *args, **kwargs):\n \"\"\"\n\n Returns\n -------\n A tequila circuit object which prepares the reference of this molecule in the chosen transformation\n \"\"\"\n\n return prepare_product_state(self.reference_state(*args, **kwargs))\n\n def make_upccgsd_ansatz(self,\n include_singles:bool=True,\n include_reference:bool=True,\n indices:list=None,\n label: str=None,\n order:int =1,\n *args, **kwargs):\n \"\"\"\n UpGCCSD Ansatz similar as described by Lee et. al.\n\n Parameters\n ----------\n include_singles\n include singles excitations\n include_reference\n include the HF reference state as initial state\n indices\n pass custom defined set of indices from which the ansatz will be created\n List of tuples of tuples spin-indices e.g. [((2*p,2*q),(2*p+1,2*q+1)), ...]\n label\n An additional label that is set with the variables\n default is None and no label will be set: variables names will be\n (x, (p,q)) for x in range(order)\n with a label the variables will be named\n (label, (x, (p,q))) \n order\n Order of the ansatz (default is 1)\n determines how often the ordering gets repeated\n parameters of repeating layers are independent\n Returns\n -------\n UpGCCSD ansatz\n \"\"\"\n\n # indices defining the UpCCD ansatz\n if indices is None:\n indices = []\n for i in range(self.n_orbitals):\n for a in range(i + 1, self.n_orbitals):\n indices.append(((2 * i, 2 * a), (2 * i + 1, 2 * a + 1)))\n if include_singles:\n indices.append(((2 * i, 2 * a)))\n indices.append(((2 * i + 1, 2 * a + 1)))\n\n U = QCircuit()\n if include_reference:\n U = self.prepare_reference()\n\n generators = [self.make_excitation_generator(i, *args, **kwargs) for i in indices]\n\n for k in range(order):\n idx = [(k,i) for i in indices]\n prefix = order\n if label is not None:\n prefix = (label, order)\n names = [(prefix, i) for i in idx]\n U += gates.Trotterized(generators=generators, angles=names, steps=1)\n return U\n\n def make_uccsd_ansatz(self, trotter_steps: int,\n initial_amplitudes: typing.Union[str, Amplitudes, ClosedShellAmplitudes] = \"mp2\",\n include_reference_ansatz=True,\n parametrized=True,\n threshold=1.e-8,\n trotter_parameters: gates.TrotterParameters = None) -> QCircuit:\n \"\"\"\n\n Parameters\n ----------\n initial_amplitudes :\n initial amplitudes given as ManyBodyAmplitudes structure or as string\n where 'mp2', 'cc2' or 'ccsd' are possible initializations\n include_reference_ansatz :\n Also do the reference ansatz (prepare closed-shell Hartree-Fock) (Default value = True)\n parametrized :\n Initialize with variables, otherwise with static numbers (Default value = True)\n trotter_steps: int :\n\n initial_amplitudes: typing.Union[str :\n\n Amplitudes :\n\n ClosedShellAmplitudes] :\n (Default value = \"mp2\")\n trotter_parameters: gates.TrotterParameters :\n (Default value = None)\n\n Returns\n -------\n type\n Parametrized QCircuit\n\n \"\"\"\n\n if self.n_electrons % 2 != 0:\n raise TequilaException(\"make_uccsd_ansatz currently only for closed shell systems\")\n\n nocc = self.n_electrons // 2\n nvirt = self.n_orbitals // 2 - nocc\n\n Uref = QCircuit()\n if include_reference_ansatz:\n Uref = self.prepare_reference()\n\n amplitudes = initial_amplitudes\n if hasattr(initial_amplitudes, \"lower\"):\n if initial_amplitudes.lower() == \"mp2\":\n amplitudes = self.compute_mp2_amplitudes()\n elif initial_amplitudes.lower() == \"ccsd\":\n amplitudes = self.compute_ccsd_amplitudes()\n else:\n try:\n amplitudes = self.compute_amplitudes(method=initial_amplitudes.lower())\n except Exception as exc:\n raise TequilaException(\n \"{}\\nDon't know how to initialize \\'{}\\' amplitudes\".format(exc, initial_amplitudes))\n\n if amplitudes is None:\n amplitudes = ClosedShellAmplitudes(\n tIjAb=numpy.zeros(shape=[nocc, nocc, nvirt, nvirt]),\n tIA=numpy.zeros(shape=[nocc, nvirt]))\n\n closed_shell = isinstance(amplitudes, ClosedShellAmplitudes)\n generators = []\n variables = []\n\n if not isinstance(amplitudes, dict):\n amplitudes = amplitudes.make_parameter_dictionary(threshold=threshold)\n amplitudes = dict(sorted(amplitudes.items(), key=lambda x: x[1]))\n\n for key, t in amplitudes.items():\n assert (len(key) % 2 == 0)\n if not numpy.isclose(t, 0.0, atol=threshold):\n\n if closed_shell:\n spin_indices = []\n if len(key) == 2:\n spin_indices = [[2 * key[0], 2 * key[1]], [2 * key[0] + 1, 2 * key[1] + 1]]\n partner = None\n else:\n spin_indices.append([2 * key[0] + 1, 2 * key[1] + 1, 2 * key[2], 2 * key[3]])\n spin_indices.append([2 * key[0], 2 * key[1], 2 * key[2] + 1, 2 * key[3] + 1])\n if key[0] != key[2] and key[1] != key[3]:\n spin_indices.append([2 * key[0], 2 * key[1], 2 * key[2], 2 * key[3]])\n spin_indices.append([2 * key[0] + 1, 2 * key[1] + 1, 2 * key[2] + 1, 2 * key[3] + 1])\n partner = tuple([key[2], key[1], key[0], key[3]]) # taibj -> tbiaj\n for idx in spin_indices:\n idx = [(idx[2 * i], idx[2 * i + 1]) for i in range(len(idx) // 2)]\n generators.append(self.make_excitation_generator(indices=idx))\n\n if parametrized:\n variables.append(Variable(name=key)) # abab\n variables.append(Variable(name=key)) # baba\n if partner is not None and key[0] != key[1] and key[2] != key[3]:\n variables.append(Variable(name=key) - Variable(partner)) # aaaa\n variables.append(Variable(name=key) - Variable(partner)) # bbbb\n else:\n variables.append(t)\n variables.append(t)\n if partner is not None and key[0] != key[1] and key[2] != key[3]:\n variables.append(t - amplitudes[partner])\n variables.append(t - amplitudes[partner])\n else:\n generators.append(self.make_excitation_operator(indices=spin_indices))\n if parametrized:\n variables.append(Variable(name=key))\n else:\n variables.append(t)\n\n return Uref + gates.Trotterized(generators=generators, angles=variables, steps=trotter_steps,\n parameters=trotter_parameters)\n\n def compute_amplitudes(self, method: str, *args, **kwargs):\n \"\"\"\n Compute closed-shell CC amplitudes\n\n Parameters\n ----------\n method :\n coupled-cluster methods like cc2, ccsd, cc3, ccsd(t)\n Success might depend on backend\n got an extra function for MP2\n *args :\n\n **kwargs :\n\n\n Returns\n -------\n\n \"\"\"\n raise TequilaException(\"compute amplitudes: Needs to be overwritten by backend\")\n\n def compute_mp2_amplitudes(self) -> ClosedShellAmplitudes:\n \"\"\"\n\n Compute closed-shell mp2 amplitudes\n\n .. math::\n t(a,i,b,j) = 0.25 * g(a,i,b,j)/(e(i) + e(j) -a(i) - b(j) )\n\n :return:\n\n Parameters\n ----------\n\n Returns\n -------\n\n \"\"\"\n assert self.parameters.closed_shell\n g = self.molecule.two_body_integrals\n fij = self.molecule.orbital_energies\n nocc = self.molecule.n_electrons // 2 # this is never the active space\n ei = fij[:nocc]\n ai = fij[nocc:]\n abgij = g[nocc:, nocc:, :nocc, :nocc]\n amplitudes = abgij * 1.0 / (\n ei.reshape(1, 1, -1, 1) + ei.reshape(1, 1, 1, -1) - ai.reshape(-1, 1, 1, 1) - ai.reshape(1, -1, 1, 1))\n E = 2.0 * numpy.einsum('abij,abij->', amplitudes, abgij) - numpy.einsum('abji,abij', amplitudes, abgij,\n optimize='greedy')\n\n self.molecule.mp2_energy = E + self.molecule.hf_energy\n return ClosedShellAmplitudes(tIjAb=numpy.einsum('abij -> ijab', amplitudes, optimize='greedy'))\n\n def compute_cis_amplitudes(self):\n \"\"\"\n Compute the CIS amplitudes of the molecule\n \"\"\"\n\n @dataclass\n class ResultCIS:\n \"\"\" \"\"\"\n omegas: typing.List[numbers.Real] # excitation energies [omega0, ...]\n amplitudes: typing.List[ClosedShellAmplitudes] # corresponding amplitudes [x_{ai}_0, ...]\n\n def __getitem__(self, item):\n return (self.omegas[item], self.amplitudes[item])\n\n def __len__(self):\n return len(self.omegas)\n\n g = self.molecule.two_body_integrals\n fij = self.molecule.orbital_energies\n\n nocc = self.n_alpha_electrons\n nvirt = self.n_orbitals - nocc\n\n pairs = []\n for i in range(nocc):\n for a in range(nocc, nocc + nvirt):\n pairs.append((a, i))\n M = numpy.ndarray(shape=[len(pairs), len(pairs)])\n\n for xx, x in enumerate(pairs):\n eia = fij[x[0]] - fij[x[1]]\n a, i = x\n for yy, y in enumerate(pairs):\n b, j = y\n delta = float(y == x)\n gpart = 2.0 * g[a, i, b, j] - g[a, i, j, b]\n M[xx, yy] = eia * delta + gpart\n\n omega, xvecs = numpy.linalg.eigh(M)\n\n # convert amplitudes to ndarray sorted by excitation energy\n nex = len(omega)\n amplitudes = []\n for ex in range(nex):\n t = numpy.ndarray(shape=[nvirt, nocc])\n exvec = xvecs[ex]\n for xx, x in enumerate(pairs):\n a, i = x\n t[a - nocc, i] = exvec[xx]\n amplitudes.append(ClosedShellAmplitudes(tIA=t))\n\n return ResultCIS(omegas=list(omega), amplitudes=amplitudes)\n\n @property\n def rdm1(self):\n \"\"\" \"\"\"\n if self._rdm1 is not None:\n return self._rdm1\n else:\n print(\"1-RDM has not been computed. Return None for 1-RDM.\")\n return None\n\n @property\n def rdm2(self):\n \"\"\" \"\"\"\n if self._rdm2 is not None:\n return self._rdm2\n else:\n print(\"2-RDM has not been computed. Return None for 2-RDM.\")\n return None\n\n def compute_rdms(self, U: QCircuit = None, variables: Variables = None, spin_free: bool = True,\n get_rdm1: bool = True, get_rdm2: bool = True):\n \"\"\"\n Computes the one- and two-particle reduced density matrices (rdm1 and rdm2) given\n a unitary U. This method uses the standard ordering in physics as denoted below.\n Note, that the representation of the density matrices depends on the qubit transformation\n used. The Jordan-Wigner encoding corresponds to 'classical' second quantized density\n matrices in the occupation picture.\n\n We only consider real orbitals and thus real-valued RDMs.\n The matrices are set as private members _rdm1, _rdm2 and can be accessed via the properties rdm1, rdm2.\n\n .. math :\n \\\\text{rdm1: } \\\\gamma^p_q = \\\\langle \\\\psi | a^p a_q | \\\\psi \\\\rangle\n = \\\\langle U 0 | a^p a_q | U 0 \\\\rangle\n \\\\text{rdm2: } \\\\gamma^{pq}_{rs} = \\\\langle \\\\psi | a^p a^q a_s a_r | \\\\psi \\\\rangle\n = \\\\langle U 0 | a^p a^q a_s a_r | U 0 \\\\rangle\n\n Parameters\n ----------\n U :\n Quantum Circuit to achieve the desired state \\\\psi = U |0\\\\rangle, non-optional\n variables :\n If U is parametrized, then need to hand over a set of fixed variables\n spin_free :\n Set whether matrices should be spin-free (summation over spin) or defined by spin-orbitals\n get_rdm1, get_rdm2 :\n Set whether either one or both rdm1, rdm2 should be computed. If both are needed at some point,\n it is recommended to compute them at once.\n\n Returns\n -------\n \"\"\"\n # Check whether unitary circuit is not 0\n if U is None:\n raise TequilaException('Need to specify a Quantum Circuit.')\n\n # Check whether transformation is BKSF.\n # Issue here: when a single operator acts only on a subset of qubits, BKSF might not yield the correct\n # transformation, because it computes the number of qubits incorrectly in this case.\n # A hotfix such as for symmetry_conserving_bravyi_kitaev would require deeper changes, thus omitted for now\n if self.transformation._trafo == openfermion.bravyi_kitaev_fast:\n raise TequilaException(\n \"The Bravyi-Kitaev-Superfast transformation does not support general FermionOperators yet.\")\n\n # Set up number of spin-orbitals and molecular orbitals respectively\n n_SOs = 2 * self.n_orbitals\n n_MOs = self.n_orbitals\n\n # Check whether unitary circuit is not 0\n if U is None:\n raise TequilaException('Need to specify a Quantum Circuit.')\n\n def _get_of_op(operator_tuple):\n \"\"\" Returns operator given by a operator tuple as OpenFermion - Fermion operator \"\"\"\n op = openfermion.FermionOperator(operator_tuple)\n return op\n\n def _get_qop_hermitian(of_operator) -> QubitHamiltonian:\n \"\"\" Returns Hermitian part of Fermion operator as QubitHamiltonian \"\"\"\n qop = QubitHamiltonian(self.transformation(of_operator))\n real, imag = qop.split(hermitian=True)\n if real:\n return real\n elif not real:\n print(of_operator)\n raise TequilaException(\"Qubit Hamiltonian does not have a Hermitian part. Check this...\")\n\n def _build_1bdy_operators_spinful() -> list:\n \"\"\" Returns spinful one-body operators as a symmetry-reduced list of QubitHamiltonians \"\"\"\n # Exploit symmetry pq = qp\n ops = []\n for p in range(n_SOs):\n for q in range(p + 1):\n op_tuple = ((p, 1), (q, 0))\n op = _get_of_op(op_tuple)\n ops += [op]\n\n return ops\n\n def _build_2bdy_operators_spinful() -> list:\n \"\"\" Returns spinful two-body operators as a symmetry-reduced list of QubitHamiltonians \"\"\"\n # Exploit symmetries pqrs = -pqsr = -qprs = qpsr\n # and = rspq\n ops = []\n for p in range(n_SOs):\n for q in range(p):\n for r in range(n_SOs):\n for s in range(r):\n if p * n_SOs + q >= r * n_SOs + s:\n op_tuple = ((p, 1), (q, 1), (s, 0), (r, 0))\n op = _get_of_op(op_tuple)\n ops += [op]\n\n return ops\n\n def _build_1bdy_operators_spinfree() -> list:\n \"\"\" Returns spinfree one-body operators as a symmetry-reduced list of QubitHamiltonians \"\"\"\n # Exploit symmetry pq = qp (not changed by spin-summation)\n ops = []\n for p in range(n_MOs):\n for q in range(p + 1):\n # Spin aa\n op_tuple = ((2 * p, 1), (2 * q, 0))\n op = _get_of_op(op_tuple)\n # Spin bb\n op_tuple = ((2 * p + 1, 1), (2 * q + 1, 0))\n op += _get_of_op(op_tuple)\n ops += [op]\n\n return ops\n\n def _build_2bdy_operators_spinfree() -> list:\n \"\"\" Returns spinfree two-body operators as a symmetry-reduced list of QubitHamiltonians \"\"\"\n # Exploit symmetries pqrs = qpsr (due to spin summation, '-pqsr = -qprs' drops out)\n # and = rspq\n ops = []\n for p, q, r, s in product(range(n_MOs), repeat=4):\n if p * n_MOs + q >= r * n_MOs + s and (p >= q or r >= s):\n # Spin aaaa\n op_tuple = ((2 * p, 1), (2 * q, 1), (2 * s, 0), (2 * r, 0)) if (p!=q and r!=s) else '0.0 []'\n op = _get_of_op(op_tuple)\n # Spin abab\n op_tuple = ((2 * p, 1), (2 * q + 1, 1), (2 * s + 1, 0), (2 * r, 0)) if (2*p!=2*q+1 and 2*r!=2*s+1) else '0.0 []'\n op += _get_of_op(op_tuple)\n # Spin baba\n op_tuple = ((2 * p + 1, 1), (2 * q, 1), (2 * s, 0), (2 * r + 1, 0)) if (2*p+1!=2*q and 2*r+1!=2*s) else '0.0 []'\n op += _get_of_op(op_tuple)\n # Spin bbbb\n op_tuple = ((2 * p + 1, 1), (2 * q + 1, 1), (2 * s + 1, 0), (2 * r + 1, 0)) if (p!=q and r!=s) else '0.0 []'\n op += _get_of_op(op_tuple)\n\n ops += [op]\n\n return ops\n\n def _assemble_rdm1(evals) -> numpy.ndarray:\n \"\"\"\n Returns spin-ful or spin-free one-particle RDM built by symmetry conditions\n Same symmetry with or without spin, so we can use the same function\n \"\"\"\n N = n_MOs if spin_free else n_SOs\n rdm1 = numpy.zeros([N, N])\n ctr: int = 0\n for p in range(N):\n for q in range(p + 1):\n rdm1[p, q] = evals[ctr]\n # Symmetry pq = qp\n rdm1[q, p] = rdm1[p, q]\n ctr += 1\n\n return rdm1\n\n def _assemble_rdm2_spinful(evals) -> numpy.ndarray:\n \"\"\" Returns spin-ful two-particle RDM built by symmetry conditions \"\"\"\n ctr: int = 0\n rdm2 = numpy.zeros([n_SOs, n_SOs, n_SOs, n_SOs])\n for p in range(n_SOs):\n for q in range(p):\n for r in range(n_SOs):\n for s in range(r):\n if p * n_SOs + q >= r * n_SOs + s:\n rdm2[p, q, r, s] = evals[ctr]\n # Symmetry pqrs = rspq\n rdm2[r, s, p, q] = rdm2[p, q, r, s]\n ctr += 1\n\n # Further permutational symmetries due to anticommutation relations\n for p in range(n_SOs):\n for q in range(p):\n for r in range(n_SOs):\n for s in range(r):\n rdm2[p, q, s, r] = -1 * rdm2[p, q, r, s] # pqrs = -pqsr\n rdm2[q, p, r, s] = -1 * rdm2[p, q, r, s] # pqrs = -qprs\n rdm2[q, p, s, r] = rdm2[p, q, r, s] # pqrs = qpsr\n\n return rdm2\n\n def _assemble_rdm2_spinfree(evals) -> numpy.ndarray:\n \"\"\" Returns spin-free two-particle RDM built by symmetry conditions \"\"\"\n ctr: int = 0\n rdm2 = numpy.zeros([n_MOs, n_MOs, n_MOs, n_MOs])\n for p, q, r, s in product(range(n_MOs), repeat=4):\n if p * n_MOs + q >= r * n_MOs + s and (p >= q or r >= s):\n rdm2[p, q, r, s] = evals[ctr]\n # Symmetry pqrs = rspq\n rdm2[r, s, p, q] = rdm2[p, q, r, s]\n ctr += 1\n\n # Further permutational symmetry: pqrs = qpsr\n for p, q, r, s in product(range(n_MOs), repeat=4):\n if p >= q or r >= s:\n rdm2[q, p, s, r] = rdm2[p, q, r, s]\n\n return rdm2\n\n # Build operator lists\n qops = []\n if spin_free:\n qops += _build_1bdy_operators_spinfree() if get_rdm1 else []\n qops += _build_2bdy_operators_spinfree() if get_rdm2 else []\n else:\n qops += _build_1bdy_operators_spinful() if get_rdm1 else []\n qops += _build_2bdy_operators_spinful() if get_rdm2 else []\n\n # Transform operator lists to QubitHamiltonians\n qops = [_get_qop_hermitian(op) for op in qops]\n # Compute expected values\n evals = simulate(ExpectationValue(H=qops, U=U, shape=[len(qops)]), variables=variables)\n\n # Assemble density matrices\n # If self._rdm1, self._rdm2 exist, reset them if they are of the other spin-type\n def _reset_rdm(rdm):\n if rdm is not None:\n if spin_free and rdm.shape[0] != n_MOs:\n return None\n if not spin_free and rdm.shape[0] != n_SOs:\n return None\n return rdm\n\n self._rdm1 = _reset_rdm(self._rdm1)\n self._rdm2 = _reset_rdm(self._rdm2)\n # Split expectation values in 1- and 2-particle expectation values\n if get_rdm1:\n len_1 = n_MOs * (n_MOs + 1) // 2 if spin_free else n_SOs * (n_SOs + 1) // 2\n else:\n len_1 = 0\n evals_1, evals_2 = evals[:len_1], evals[len_1:]\n # Build matrices using the expectation values\n self._rdm1 = _assemble_rdm1(evals_1) if get_rdm1 else self._rdm1\n if spin_free:\n self._rdm2 = _assemble_rdm2_spinfree(evals_2) if get_rdm2 else self._rdm2\n else:\n self._rdm2 = _assemble_rdm2_spinful(evals_2) if get_rdm2 else self._rdm2\n\n def rdm_spinsum(self, sum_rdm1: bool = True, sum_rdm2: bool = True) -> tuple:\n \"\"\"\n Given the spin-ful 1- and 2-particle reduced density matrices, compute the spin-free RDMs by spin summation.\n\n Parameters\n ----------\n sum_rdm1, sum_rdm2 :\n If set to true, perform spin summation on rdm1, rdm2\n\n Returns\n -------\n rdm1_spinsum, rdm2_spinsum :\n The desired spin-free matrices\n \"\"\"\n n_MOs = self.n_orbitals\n rdm1_spinsum = None\n rdm2_spinsum = None\n\n # Spin summation on rdm1\n if sum_rdm1:\n # Check whether spin-rdm2 exists\n if self._rdm1 is None:\n raise TequilaException(\"The spin-RDM for the 1-RDM does not exist!\")\n # Check whether existing rdm1 is in spin-orbital basis\n if self._rdm1.shape[0] != 2 * n_MOs:\n raise TequilaException(\"The existing RDM needs to be in spin-orbital basis, it is already spin-free!\")\n # Do summation\n rdm1_spinsum = numpy.zeros([n_MOs, n_MOs])\n for p in range(n_MOs):\n for q in range(p + 1):\n rdm1_spinsum[p, q] += self._rdm1[2 * p, 2 * q]\n rdm1_spinsum[p, q] += self._rdm1[2 * p + 1, 2 * q + 1]\n for p in range(n_MOs):\n for q in range(p):\n rdm1_spinsum[q, p] = rdm1_spinsum[p, q]\n\n # Spin summation on rdm2\n if sum_rdm2:\n # Check whether spin-rdm2 exists\n if self._rdm2 is None:\n raise TequilaException(\"The spin-RDM for the 2-RDM does not exist!\")\n # Check whether existing rdm2 is in spin-orbital basis\n if self._rdm2.shape[0] != 2 * n_MOs:\n raise TequilaException(\"The existing RDM needs to be in spin-orbital basis, it is already spin-free!\")\n # Do summation\n rdm2_spinsum = numpy.zeros([n_MOs, n_MOs, n_MOs, n_MOs])\n for p, q, r, s in product(range(n_MOs), repeat=4):\n rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p, 2 * q, 2 * r, 2 * s]\n rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p + 1, 2 * q, 2 * r + 1, 2 * s]\n rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p, 2 * q + 1, 2 * r, 2 * s + 1]\n rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p + 1, 2 * q + 1, 2 * r + 1, 2 * s + 1]\n\n return rdm1_spinsum, rdm2_spinsum\n\n def __str__(self) -> str:\n result = str(type(self)) + \"\\n\"\n result += \"Qubit Encoding\\n\"\n result += str(self.transformation) + \"\\n\"\n for k, v in self.parameters.__dict__.items():\n result += \"{key:15} : {value:15} \\n\".format(key=str(k), value=str(v))\n return result\n" ]
[ [ "numpy.take", "numpy.einsum", "numpy.abs", "numpy.ndarray", "numpy.linalg.eigh", "numpy.ndenumerate", "numpy.zeros", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
justindujardin/allennlp
[ "c4559f3751775aa8bc018db417edc119d29d8051", "c4559f3751775aa8bc018db417edc119d29d8051", "c4559f3751775aa8bc018db417edc119d29d8051", "c4559f3751775aa8bc018db417edc119d29d8051" ]
[ "allennlp/modules/elmo_lstm.py", "allennlp/models/constituency_parser.py", "allennlp/training/optimizers.py", "allennlp/modules/attention/bilinear_attention.py" ]
[ "\"\"\"\nA stacked bidirectional LSTM with skip connections between layers.\n\"\"\"\nfrom typing import Optional, Tuple, List\nimport warnings\n\nimport torch\nfrom torch.nn.utils.rnn import PackedSequence, pad_packed_sequence\n\nwith warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=FutureWarning)\n import h5py\nimport numpy\n\nfrom allennlp.modules.lstm_cell_with_projection import LstmCellWithProjection\nfrom allennlp.common.checks import ConfigurationError\nfrom allennlp.modules.encoder_base import _EncoderBase\nfrom allennlp.common.file_utils import cached_path\n\n\nclass ElmoLstm(_EncoderBase):\n \"\"\"\n A stacked, bidirectional LSTM which uses\n [`LstmCellWithProjection`'s](./lstm_cell_with_projection.md)\n with highway layers between the inputs to layers.\n The inputs to the forward and backward directions are independent - forward and backward\n states are not concatenated between layers.\n\n Additionally, this LSTM maintains its `own` state, which is updated every time\n `forward` is called. It is dynamically resized for different batch sizes and is\n designed for use with non-continuous inputs (i.e inputs which aren't formatted as a stream,\n such as text used for a language modeling task, which is how stateful RNNs are typically used).\n This is non-standard, but can be thought of as having an \"end of sentence\" state, which is\n carried across different sentences.\n\n # Parameters\n\n input_size : `int`, required\n The dimension of the inputs to the LSTM.\n hidden_size : `int`, required\n The dimension of the outputs of the LSTM.\n cell_size : `int`, required.\n The dimension of the memory cell of the `LstmCellWithProjection`.\n num_layers : `int`, required\n The number of bidirectional LSTMs to use.\n requires_grad : `bool`, optional\n If True, compute gradient of ELMo parameters for fine tuning.\n recurrent_dropout_probability : `float`, optional (default = 0.0)\n The dropout probability to be used in a dropout scheme as stated in\n [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks]\n (https://arxiv.org/abs/1512.05287).\n state_projection_clip_value : `float`, optional, (default = None)\n The magnitude with which to clip the hidden_state after projecting it.\n memory_cell_clip_value : `float`, optional, (default = None)\n The magnitude with which to clip the memory cell.\n \"\"\"\n\n def __init__(\n self,\n input_size: int,\n hidden_size: int,\n cell_size: int,\n num_layers: int,\n requires_grad: bool = False,\n recurrent_dropout_probability: float = 0.0,\n memory_cell_clip_value: Optional[float] = None,\n state_projection_clip_value: Optional[float] = None,\n ) -> None:\n super().__init__(stateful=True)\n\n # Required to be wrapped with a `PytorchSeq2SeqWrapper`.\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.cell_size = cell_size\n self.requires_grad = requires_grad\n\n forward_layers = []\n backward_layers = []\n\n lstm_input_size = input_size\n go_forward = True\n for layer_index in range(num_layers):\n forward_layer = LstmCellWithProjection(\n lstm_input_size,\n hidden_size,\n cell_size,\n go_forward,\n recurrent_dropout_probability,\n memory_cell_clip_value,\n state_projection_clip_value,\n )\n backward_layer = LstmCellWithProjection(\n lstm_input_size,\n hidden_size,\n cell_size,\n not go_forward,\n recurrent_dropout_probability,\n memory_cell_clip_value,\n state_projection_clip_value,\n )\n lstm_input_size = hidden_size\n\n self.add_module(\"forward_layer_{}\".format(layer_index), forward_layer)\n self.add_module(\"backward_layer_{}\".format(layer_index), backward_layer)\n forward_layers.append(forward_layer)\n backward_layers.append(backward_layer)\n self.forward_layers = forward_layers\n self.backward_layers = backward_layers\n\n def forward(self, inputs: torch.Tensor, mask: torch.LongTensor) -> torch.Tensor:\n \"\"\"\n # Parameters\n\n inputs : `torch.Tensor`, required.\n A Tensor of shape `(batch_size, sequence_length, hidden_size)`.\n mask : `torch.LongTensor`, required.\n A binary mask of shape `(batch_size, sequence_length)` representing the\n non-padded elements in each sequence in the batch.\n\n # Returns\n\n A `torch.Tensor` of shape (num_layers, batch_size, sequence_length, hidden_size),\n where the num_layers dimension represents the LSTM output from that layer.\n \"\"\"\n batch_size, total_sequence_length = mask.size()\n stacked_sequence_output, final_states, restoration_indices = self.sort_and_run_forward(\n self._lstm_forward, inputs, mask\n )\n\n num_layers, num_valid, returned_timesteps, encoder_dim = stacked_sequence_output.size()\n # Add back invalid rows which were removed in the call to sort_and_run_forward.\n if num_valid < batch_size:\n zeros = stacked_sequence_output.new_zeros(\n num_layers, batch_size - num_valid, returned_timesteps, encoder_dim\n )\n stacked_sequence_output = torch.cat([stacked_sequence_output, zeros], 1)\n\n # The states also need to have invalid rows added back.\n new_states = []\n for state in final_states:\n state_dim = state.size(-1)\n zeros = state.new_zeros(num_layers, batch_size - num_valid, state_dim)\n new_states.append(torch.cat([state, zeros], 1))\n final_states = new_states\n\n # It's possible to need to pass sequences which are padded to longer than the\n # max length of the sequence to a Seq2StackEncoder. However, packing and unpacking\n # the sequences mean that the returned tensor won't include these dimensions, because\n # the RNN did not need to process them. We add them back on in the form of zeros here.\n sequence_length_difference = total_sequence_length - returned_timesteps\n if sequence_length_difference > 0:\n zeros = stacked_sequence_output.new_zeros(\n num_layers,\n batch_size,\n sequence_length_difference,\n stacked_sequence_output[0].size(-1),\n )\n stacked_sequence_output = torch.cat([stacked_sequence_output, zeros], 2)\n\n self._update_states(final_states, restoration_indices)\n\n # Restore the original indices and return the sequence.\n # Has shape (num_layers, batch_size, sequence_length, hidden_size)\n return stacked_sequence_output.index_select(1, restoration_indices)\n\n def _lstm_forward(\n self,\n inputs: PackedSequence,\n initial_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,\n ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"\n # Parameters\n\n inputs : `PackedSequence`, required.\n A batch first `PackedSequence` to run the stacked LSTM over.\n initial_state : `Tuple[torch.Tensor, torch.Tensor]`, optional, (default = None)\n A tuple (state, memory) representing the initial hidden state and memory\n of the LSTM, with shape (num_layers, batch_size, 2 * hidden_size) and\n (num_layers, batch_size, 2 * cell_size) respectively.\n\n # Returns\n\n output_sequence : `torch.FloatTensor`\n The encoded sequence of shape (num_layers, batch_size, sequence_length, hidden_size)\n final_states : `Tuple[torch.FloatTensor, torch.FloatTensor]`\n The per-layer final (state, memory) states of the LSTM, with shape\n (num_layers, batch_size, 2 * hidden_size) and (num_layers, batch_size, 2 * cell_size)\n respectively. The last dimension is duplicated because it contains the state/memory\n for both the forward and backward layers.\n \"\"\"\n if initial_state is None:\n hidden_states: List[Optional[Tuple[torch.Tensor, torch.Tensor]]] = [None] * len(\n self.forward_layers\n )\n elif initial_state[0].size()[0] != len(self.forward_layers):\n raise ConfigurationError(\n \"Initial states were passed to forward() but the number of \"\n \"initial states does not match the number of layers.\"\n )\n else:\n hidden_states = list(zip(initial_state[0].split(1, 0), initial_state[1].split(1, 0)))\n\n inputs, batch_lengths = pad_packed_sequence(inputs, batch_first=True)\n forward_output_sequence = inputs\n backward_output_sequence = inputs\n\n final_states = []\n sequence_outputs = []\n for layer_index, state in enumerate(hidden_states):\n forward_layer = getattr(self, \"forward_layer_{}\".format(layer_index))\n backward_layer = getattr(self, \"backward_layer_{}\".format(layer_index))\n\n forward_cache = forward_output_sequence\n backward_cache = backward_output_sequence\n\n if state is not None:\n forward_hidden_state, backward_hidden_state = state[0].split(self.hidden_size, 2)\n forward_memory_state, backward_memory_state = state[1].split(self.cell_size, 2)\n forward_state = (forward_hidden_state, forward_memory_state)\n backward_state = (backward_hidden_state, backward_memory_state)\n else:\n forward_state = None\n backward_state = None\n\n forward_output_sequence, forward_state = forward_layer(\n forward_output_sequence, batch_lengths, forward_state\n )\n backward_output_sequence, backward_state = backward_layer(\n backward_output_sequence, batch_lengths, backward_state\n )\n # Skip connections, just adding the input to the output.\n if layer_index != 0:\n forward_output_sequence += forward_cache\n backward_output_sequence += backward_cache\n\n sequence_outputs.append(\n torch.cat([forward_output_sequence, backward_output_sequence], -1)\n )\n # Append the state tuples in a list, so that we can return\n # the final states for all the layers.\n final_states.append(\n (\n torch.cat([forward_state[0], backward_state[0]], -1),\n torch.cat([forward_state[1], backward_state[1]], -1),\n )\n )\n\n stacked_sequence_outputs: torch.FloatTensor = torch.stack(sequence_outputs)\n # Stack the hidden state and memory for each layer into 2 tensors of shape\n # (num_layers, batch_size, hidden_size) and (num_layers, batch_size, cell_size)\n # respectively.\n final_hidden_states, final_memory_states = zip(*final_states)\n final_state_tuple: Tuple[torch.FloatTensor, torch.FloatTensor] = (\n torch.cat(final_hidden_states, 0),\n torch.cat(final_memory_states, 0),\n )\n return stacked_sequence_outputs, final_state_tuple\n\n def load_weights(self, weight_file: str) -> None:\n \"\"\"\n Load the pre-trained weights from the file.\n \"\"\"\n requires_grad = self.requires_grad\n\n with h5py.File(cached_path(weight_file), \"r\") as fin:\n for i_layer, lstms in enumerate(zip(self.forward_layers, self.backward_layers)):\n for j_direction, lstm in enumerate(lstms):\n # lstm is an instance of LSTMCellWithProjection\n cell_size = lstm.cell_size\n\n dataset = fin[\"RNN_%s\" % j_direction][\"RNN\"][\"MultiRNNCell\"][\n \"Cell%s\" % i_layer\n ][\"LSTMCell\"]\n\n # tensorflow packs together both W and U matrices into one matrix,\n # but pytorch maintains individual matrices. In addition, tensorflow\n # packs the gates as input, memory, forget, output but pytorch\n # uses input, forget, memory, output. So we need to modify the weights.\n tf_weights = numpy.transpose(dataset[\"W_0\"][...])\n torch_weights = tf_weights.copy()\n\n # split the W from U matrices\n input_size = lstm.input_size\n input_weights = torch_weights[:, :input_size]\n recurrent_weights = torch_weights[:, input_size:]\n tf_input_weights = tf_weights[:, :input_size]\n tf_recurrent_weights = tf_weights[:, input_size:]\n\n # handle the different gate order convention\n for torch_w, tf_w in [\n [input_weights, tf_input_weights],\n [recurrent_weights, tf_recurrent_weights],\n ]:\n torch_w[(1 * cell_size) : (2 * cell_size), :] = tf_w[\n (2 * cell_size) : (3 * cell_size), :\n ]\n torch_w[(2 * cell_size) : (3 * cell_size), :] = tf_w[\n (1 * cell_size) : (2 * cell_size), :\n ]\n\n lstm.input_linearity.weight.data.copy_(torch.FloatTensor(input_weights))\n lstm.state_linearity.weight.data.copy_(torch.FloatTensor(recurrent_weights))\n lstm.input_linearity.weight.requires_grad = requires_grad\n lstm.state_linearity.weight.requires_grad = requires_grad\n\n # the bias weights\n tf_bias = dataset[\"B\"][...]\n # tensorflow adds 1.0 to forget gate bias instead of modifying the\n # parameters...\n tf_bias[(2 * cell_size) : (3 * cell_size)] += 1\n torch_bias = tf_bias.copy()\n torch_bias[(1 * cell_size) : (2 * cell_size)] = tf_bias[\n (2 * cell_size) : (3 * cell_size)\n ]\n torch_bias[(2 * cell_size) : (3 * cell_size)] = tf_bias[\n (1 * cell_size) : (2 * cell_size)\n ]\n lstm.state_linearity.bias.data.copy_(torch.FloatTensor(torch_bias))\n lstm.state_linearity.bias.requires_grad = requires_grad\n\n # the projection weights\n proj_weights = numpy.transpose(dataset[\"W_P_0\"][...])\n lstm.state_projection.weight.data.copy_(torch.FloatTensor(proj_weights))\n lstm.state_projection.weight.requires_grad = requires_grad\n", "from typing import Dict, Tuple, List, NamedTuple, Any\nfrom overrides import overrides\n\nimport torch\nfrom torch.nn.modules.linear import Linear\nfrom nltk import Tree\n\nfrom allennlp.common.checks import check_dimensions_match\nfrom allennlp.data import TextFieldTensors, Vocabulary\nfrom allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder, FeedForward\nfrom allennlp.modules.token_embedders import Embedding\nfrom allennlp.modules.span_extractors.span_extractor import SpanExtractor\nfrom allennlp.models.model import Model\nfrom allennlp.nn import InitializerApplicator\nfrom allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits\nfrom allennlp.nn.util import masked_softmax, get_lengths_from_binary_sequence_mask\nfrom allennlp.training.metrics import CategoricalAccuracy\nfrom allennlp.training.metrics import EvalbBracketingScorer, DEFAULT_EVALB_DIR\nfrom allennlp.common.checks import ConfigurationError\n\n\nclass SpanInformation(NamedTuple):\n \"\"\"\n A helper namedtuple for handling decoding information.\n\n # Parameters\n\n start : `int`\n The start index of the span.\n end : `int`\n The exclusive end index of the span.\n no_label_prob : `float`\n The probability of this span being assigned the `NO-LABEL` label.\n label_prob : `float`\n The probability of the most likely label.\n \"\"\"\n\n start: int\n end: int\n label_prob: float\n no_label_prob: float\n label_index: int\n\n\[email protected](\"constituency_parser\")\nclass SpanConstituencyParser(Model):\n \"\"\"\n This `SpanConstituencyParser` simply encodes a sequence of text\n with a stacked `Seq2SeqEncoder`, extracts span representations using a\n `SpanExtractor`, and then predicts a label for each span in the sequence.\n These labels are non-terminal nodes in a constituency parse tree, which we then\n greedily reconstruct.\n\n # Parameters\n\n vocab : `Vocabulary`, required\n A Vocabulary, required in order to compute sizes for input/output projections.\n text_field_embedder : `TextFieldEmbedder`, required\n Used to embed the `tokens` `TextField` we get as input to the model.\n span_extractor : `SpanExtractor`, required.\n The method used to extract the spans from the encoded sequence.\n encoder : `Seq2SeqEncoder`, required.\n The encoder that we will use in between embedding tokens and\n generating span representations.\n feedforward : `FeedForward`, required.\n The FeedForward layer that we will use in between the encoder and the linear\n projection to a distribution over span labels.\n pos_tag_embedding : `Embedding`, optional.\n Used to embed the `pos_tags` `SequenceLabelField` we get as input to the model.\n initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)\n Used to initialize the model parameters.\n evalb_directory_path : `str`, optional (default=`DEFAULT_EVALB_DIR`)\n The path to the directory containing the EVALB executable used to score\n bracketed parses. By default, will use the EVALB included with allennlp,\n which is located at allennlp/tools/EVALB . If `None`, EVALB scoring\n is not used.\n \"\"\"\n\n def __init__(\n self,\n vocab: Vocabulary,\n text_field_embedder: TextFieldEmbedder,\n span_extractor: SpanExtractor,\n encoder: Seq2SeqEncoder,\n feedforward: FeedForward = None,\n pos_tag_embedding: Embedding = None,\n initializer: InitializerApplicator = InitializerApplicator(),\n evalb_directory_path: str = DEFAULT_EVALB_DIR,\n **kwargs,\n ) -> None:\n super().__init__(vocab, **kwargs)\n\n self.text_field_embedder = text_field_embedder\n self.span_extractor = span_extractor\n self.num_classes = self.vocab.get_vocab_size(\"labels\")\n self.encoder = encoder\n self.feedforward_layer = TimeDistributed(feedforward) if feedforward else None\n self.pos_tag_embedding = pos_tag_embedding or None\n if feedforward is not None:\n output_dim = feedforward.get_output_dim()\n else:\n output_dim = span_extractor.get_output_dim()\n\n self.tag_projection_layer = TimeDistributed(Linear(output_dim, self.num_classes))\n\n representation_dim = text_field_embedder.get_output_dim()\n if pos_tag_embedding is not None:\n representation_dim += pos_tag_embedding.get_output_dim()\n check_dimensions_match(\n representation_dim,\n encoder.get_input_dim(),\n \"representation dim (tokens + optional POS tags)\",\n \"encoder input dim\",\n )\n check_dimensions_match(\n encoder.get_output_dim(),\n span_extractor.get_input_dim(),\n \"encoder input dim\",\n \"span extractor input dim\",\n )\n if feedforward is not None:\n check_dimensions_match(\n span_extractor.get_output_dim(),\n feedforward.get_input_dim(),\n \"span extractor output dim\",\n \"feedforward input dim\",\n )\n\n self.tag_accuracy = CategoricalAccuracy()\n\n if evalb_directory_path is not None:\n self._evalb_score = EvalbBracketingScorer(evalb_directory_path)\n else:\n self._evalb_score = None\n initializer(self)\n\n @overrides\n def forward(\n self, # type: ignore\n tokens: TextFieldTensors,\n spans: torch.LongTensor,\n metadata: List[Dict[str, Any]],\n pos_tags: TextFieldTensors = None,\n span_labels: torch.LongTensor = None,\n ) -> Dict[str, torch.Tensor]:\n\n \"\"\"\n # Parameters\n\n tokens : TextFieldTensors, required\n The output of `TextField.as_array()`, which should typically be passed directly to a\n `TextFieldEmbedder`. This output is a dictionary mapping keys to `TokenIndexer`\n tensors. At its most basic, using a `SingleIdTokenIndexer` this is : `{\"tokens\":\n Tensor(batch_size, num_tokens)}`. This dictionary will have the same keys as were used\n for the `TokenIndexers` when you created the `TextField` representing your\n sequence. The dictionary is designed to be passed directly to a `TextFieldEmbedder`,\n which knows how to combine different word representations into a single vector per\n token in your input.\n spans : `torch.LongTensor`, required.\n A tensor of shape `(batch_size, num_spans, 2)` representing the\n inclusive start and end indices of all possible spans in the sentence.\n metadata : List[Dict[str, Any]], required.\n A dictionary of metadata for each batch element which has keys:\n tokens : `List[str]`, required.\n The original string tokens in the sentence.\n gold_tree : `nltk.Tree`, optional (default = None)\n Gold NLTK trees for use in evaluation.\n pos_tags : `List[str]`, optional.\n The POS tags for the sentence. These can be used in the\n model as embedded features, but they are passed here\n in addition for use in constructing the tree.\n pos_tags : `torch.LongTensor`, optional (default = None)\n The output of a `SequenceLabelField` containing POS tags.\n span_labels : `torch.LongTensor`, optional (default = None)\n A torch tensor representing the integer gold class labels for all possible\n spans, of shape `(batch_size, num_spans)`.\n\n # Returns\n\n An output dictionary consisting of:\n class_probabilities : `torch.FloatTensor`\n A tensor of shape `(batch_size, num_spans, span_label_vocab_size)`\n representing a distribution over the label classes per span.\n spans : `torch.LongTensor`\n The original spans tensor.\n tokens : `List[List[str]]`, required.\n A list of tokens in the sentence for each element in the batch.\n pos_tags : `List[List[str]]`, required.\n A list of POS tags in the sentence for each element in the batch.\n num_spans : `torch.LongTensor`, required.\n A tensor of shape (batch_size), representing the lengths of non-padded spans\n in `enumerated_spans`.\n loss : `torch.FloatTensor`, optional\n A scalar loss to be optimised.\n \"\"\"\n embedded_text_input = self.text_field_embedder(tokens)\n if pos_tags is not None and self.pos_tag_embedding is not None:\n embedded_pos_tags = self.pos_tag_embedding(pos_tags)\n embedded_text_input = torch.cat([embedded_text_input, embedded_pos_tags], -1)\n elif self.pos_tag_embedding is not None:\n raise ConfigurationError(\"Model uses a POS embedding, but no POS tags were passed.\")\n\n mask = get_text_field_mask(tokens)\n # Looking at the span start index is enough to know if\n # this is padding or not. Shape: (batch_size, num_spans)\n span_mask = (spans[:, :, 0] >= 0).squeeze(-1).long()\n if span_mask.dim() == 1:\n # This happens if you use batch_size 1 and encounter\n # a length 1 sentence in PTB, which do exist. -.-\n span_mask = span_mask.unsqueeze(-1)\n if span_labels is not None and span_labels.dim() == 1:\n span_labels = span_labels.unsqueeze(-1)\n\n num_spans = get_lengths_from_binary_sequence_mask(span_mask)\n\n encoded_text = self.encoder(embedded_text_input, mask)\n\n span_representations = self.span_extractor(encoded_text, spans, mask, span_mask)\n\n if self.feedforward_layer is not None:\n span_representations = self.feedforward_layer(span_representations)\n\n logits = self.tag_projection_layer(span_representations)\n class_probabilities = masked_softmax(logits, span_mask.unsqueeze(-1))\n\n output_dict = {\n \"class_probabilities\": class_probabilities,\n \"spans\": spans,\n \"tokens\": [meta[\"tokens\"] for meta in metadata],\n \"pos_tags\": [meta.get(\"pos_tags\") for meta in metadata],\n \"num_spans\": num_spans,\n }\n if span_labels is not None:\n loss = sequence_cross_entropy_with_logits(logits, span_labels, span_mask)\n self.tag_accuracy(class_probabilities, span_labels, span_mask)\n output_dict[\"loss\"] = loss\n\n # The evalb score is expensive to compute, so we only compute\n # it for the validation and test sets.\n batch_gold_trees = [meta.get(\"gold_tree\") for meta in metadata]\n if all(batch_gold_trees) and self._evalb_score is not None and not self.training:\n gold_pos_tags: List[List[str]] = [\n list(zip(*tree.pos()))[1] for tree in batch_gold_trees\n ]\n predicted_trees = self.construct_trees(\n class_probabilities.cpu().data,\n spans.cpu().data,\n num_spans.data,\n output_dict[\"tokens\"],\n gold_pos_tags,\n )\n self._evalb_score(predicted_trees, batch_gold_trees)\n\n return output_dict\n\n @overrides\n def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n \"\"\"\n Constructs an NLTK `Tree` given the scored spans. We also switch to exclusive\n span ends when constructing the tree representation, because it makes indexing\n into lists cleaner for ranges of text, rather than individual indices.\n\n Finally, for batch prediction, we will have padded spans and class probabilities.\n In order to make this less confusing, we remove all the padded spans and\n distributions from `spans` and `class_probabilities` respectively.\n \"\"\"\n all_predictions = output_dict[\"class_probabilities\"].cpu().data\n all_spans = output_dict[\"spans\"].cpu().data\n\n all_sentences = output_dict[\"tokens\"]\n all_pos_tags = output_dict[\"pos_tags\"] if all(output_dict[\"pos_tags\"]) else None\n num_spans = output_dict[\"num_spans\"].data\n trees = self.construct_trees(\n all_predictions, all_spans, num_spans, all_sentences, all_pos_tags\n )\n\n batch_size = all_predictions.size(0)\n output_dict[\"spans\"] = [all_spans[i, : num_spans[i]] for i in range(batch_size)]\n output_dict[\"class_probabilities\"] = [\n all_predictions[i, : num_spans[i], :] for i in range(batch_size)\n ]\n\n output_dict[\"trees\"] = trees\n return output_dict\n\n def construct_trees(\n self,\n predictions: torch.FloatTensor,\n all_spans: torch.LongTensor,\n num_spans: torch.LongTensor,\n sentences: List[List[str]],\n pos_tags: List[List[str]] = None,\n ) -> List[Tree]:\n \"\"\"\n Construct `nltk.Tree`'s for each batch element by greedily nesting spans.\n The trees use exclusive end indices, which contrasts with how spans are\n represented in the rest of the model.\n\n # Parameters\n\n predictions : `torch.FloatTensor`, required.\n A tensor of shape `(batch_size, num_spans, span_label_vocab_size)`\n representing a distribution over the label classes per span.\n all_spans : `torch.LongTensor`, required.\n A tensor of shape (batch_size, num_spans, 2), representing the span\n indices we scored.\n num_spans : `torch.LongTensor`, required.\n A tensor of shape (batch_size), representing the lengths of non-padded spans\n in `enumerated_spans`.\n sentences : `List[List[str]]`, required.\n A list of tokens in the sentence for each element in the batch.\n pos_tags : `List[List[str]]`, optional (default = None).\n A list of POS tags for each word in the sentence for each element\n in the batch.\n\n # Returns\n\n A `List[Tree]` containing the decoded trees for each element in the batch.\n \"\"\"\n # Switch to using exclusive end spans.\n exclusive_end_spans = all_spans.clone()\n exclusive_end_spans[:, :, -1] += 1\n no_label_id = self.vocab.get_token_index(\"NO-LABEL\", \"labels\")\n\n trees: List[Tree] = []\n for batch_index, (scored_spans, spans, sentence) in enumerate(\n zip(predictions, exclusive_end_spans, sentences)\n ):\n selected_spans = []\n for prediction, span in zip(\n scored_spans[: num_spans[batch_index]], spans[: num_spans[batch_index]]\n ):\n start, end = span\n no_label_prob = prediction[no_label_id]\n label_prob, label_index = torch.max(prediction, -1)\n\n # Does the span have a label != NO-LABEL or is it the root node?\n # If so, include it in the spans that we consider.\n if int(label_index) != no_label_id or (start == 0 and end == len(sentence)):\n selected_spans.append(\n SpanInformation(\n start=int(start),\n end=int(end),\n label_prob=float(label_prob),\n no_label_prob=float(no_label_prob),\n label_index=int(label_index),\n )\n )\n\n # The spans we've selected might overlap, which causes problems when we try\n # to construct the tree as they won't nest properly.\n consistent_spans = self.resolve_overlap_conflicts_greedily(selected_spans)\n\n spans_to_labels = {\n (span.start, span.end): self.vocab.get_token_from_index(span.label_index, \"labels\")\n for span in consistent_spans\n }\n sentence_pos = pos_tags[batch_index] if pos_tags is not None else None\n trees.append(self.construct_tree_from_spans(spans_to_labels, sentence, sentence_pos))\n\n return trees\n\n @staticmethod\n def resolve_overlap_conflicts_greedily(spans: List[SpanInformation]) -> List[SpanInformation]:\n \"\"\"\n Given a set of spans, removes spans which overlap by evaluating the difference\n in probability between one being labeled and the other explicitly having no label\n and vice-versa. The worst case time complexity of this method is `O(k * n^4)` where `n`\n is the length of the sentence that the spans were enumerated from (and therefore\n `k * m^2` complexity with respect to the number of spans `m`) and `k` is the\n number of conflicts. However, in practice, there are very few conflicts. Hopefully.\n\n This function modifies `spans` to remove overlapping spans.\n\n # Parameters\n\n spans : `List[SpanInformation]`, required.\n A list of spans, where each span is a `namedtuple` containing the\n following attributes:\n\n start : `int`\n The start index of the span.\n end : `int`\n The exclusive end index of the span.\n no_label_prob : `float`\n The probability of this span being assigned the `NO-LABEL` label.\n label_prob : `float`\n The probability of the most likely label.\n\n # Returns\n\n A modified list of `spans`, with the conflicts resolved by considering local\n differences between pairs of spans and removing one of the two spans.\n \"\"\"\n conflicts_exist = True\n while conflicts_exist:\n conflicts_exist = False\n for span1_index, span1 in enumerate(spans):\n for span2_index, span2 in list(enumerate(spans))[span1_index + 1 :]:\n if (\n span1.start < span2.start < span1.end < span2.end\n or span2.start < span1.start < span2.end < span1.end\n ):\n # The spans overlap.\n conflicts_exist = True\n # What's the more likely situation: that span2 was labeled\n # and span1 was unlabled, or that span1 was labeled and span2\n # was unlabled? In the first case, we delete span2 from the\n # set of spans to form the tree - in the second case, we delete\n # span1.\n if (\n span1.no_label_prob + span2.label_prob\n < span2.no_label_prob + span1.label_prob\n ):\n spans.pop(span2_index)\n else:\n spans.pop(span1_index)\n break\n return spans\n\n @staticmethod\n def construct_tree_from_spans(\n spans_to_labels: Dict[Tuple[int, int], str], sentence: List[str], pos_tags: List[str] = None\n ) -> Tree:\n \"\"\"\n # Parameters\n\n spans_to_labels : `Dict[Tuple[int, int], str]`, required.\n A mapping from spans to constituency labels.\n sentence : `List[str]`, required.\n A list of tokens forming the sentence to be parsed.\n pos_tags : `List[str]`, optional (default = None)\n A list of the pos tags for the words in the sentence, if they\n were either predicted or taken as input to the model.\n\n # Returns\n\n An `nltk.Tree` constructed from the labelled spans.\n \"\"\"\n\n def assemble_subtree(start: int, end: int):\n if (start, end) in spans_to_labels:\n # Some labels contain nested spans, e.g S-VP.\n # We actually want to create (S (VP ...)) nodes\n # for these labels, so we split them up here.\n labels: List[str] = spans_to_labels[(start, end)].split(\"-\")\n else:\n labels = None\n\n # This node is a leaf.\n if end - start == 1:\n word = sentence[start]\n pos_tag = pos_tags[start] if pos_tags is not None else \"XX\"\n tree = Tree(pos_tag, [word])\n if labels is not None and pos_tags is not None:\n # If POS tags were passed explicitly,\n # they are added as pre-terminal nodes.\n while labels:\n tree = Tree(labels.pop(), [tree])\n elif labels is not None:\n # Otherwise, we didn't want POS tags\n # at all.\n tree = Tree(labels.pop(), [word])\n while labels:\n tree = Tree(labels.pop(), [tree])\n return [tree]\n\n argmax_split = start + 1\n # Find the next largest subspan such that\n # the left hand side is a constituent.\n for split in range(end - 1, start, -1):\n if (start, split) in spans_to_labels:\n argmax_split = split\n break\n\n left_trees = assemble_subtree(start, argmax_split)\n right_trees = assemble_subtree(argmax_split, end)\n children = left_trees + right_trees\n if labels is not None:\n while labels:\n children = [Tree(labels.pop(), children)]\n return children\n\n tree = assemble_subtree(0, len(sentence))\n return tree[0]\n\n @overrides\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\n all_metrics = {}\n all_metrics[\"tag_accuracy\"] = self.tag_accuracy.get_metric(reset=reset)\n if self._evalb_score is not None:\n evalb_metrics = self._evalb_score.get_metric(reset=reset)\n all_metrics.update(evalb_metrics)\n return all_metrics\n", "\"\"\"\nAllenNLP just uses\n`PyTorch optimizers <https://pytorch.org/docs/master/optim.html>`_ ,\nwith a thin wrapper to allow registering them and instantiating them `from_params`.\n\nThe available optimizers are\n\n* `\"adadelta\" <https://pytorch.org/docs/master/optim.html#torch.optim.Adadelta>`_\n* `\"adagrad\" <https://pytorch.org/docs/master/optim.html#torch.optim.Adagrad>`_\n* `\"adam\" <https://pytorch.org/docs/master/optim.html#torch.optim.Adam>`_\n* `\"adamw\" <https://pytorch.org/docs/master/optim.html#torch.optim.AdamW>`_\n* `\"huggingface_adamw\"\n <https://huggingface.co/transformers/main_classes/optimizer_schedules.html#transformers.AdamW>`_\n* `\"sparse_adam\" <https://pytorch.org/docs/master/optim.html#torch.optim.SparseAdam>`_\n* `\"sgd\" <https://pytorch.org/docs/master/optim.html#torch.optim.SGD>`_\n* `\"rmsprop <https://pytorch.org/docs/master/optim.html#torch.optim.RMSprop>`_\n* `\"adamax <https://pytorch.org/docs/master/optim.html#torch.optim.Adamax>`_\n* `\"averaged_sgd <https://pytorch.org/docs/master/optim.html#torch.optim.ASGD>`_\n\"\"\"\n\nimport logging\nimport re\nimport math\nfrom typing import Any, Dict, List, Tuple, Union\n\nimport torch\nimport transformers\n\nfrom allennlp.common import Params, Registrable\n\nlogger = logging.getLogger(__name__)\n\n\ndef make_parameter_groups(\n model_parameters: List[Tuple[str, torch.nn.Parameter]],\n groups: List[Tuple[List[str], Dict[str, Any]]] = None,\n) -> Union[List[Dict[str, Any]], List[torch.nn.Parameter]]:\n \"\"\"\n Takes a list of model parameters with associated names (typically coming from something like\n `model.parameters`), along with a grouping (as specified below), and prepares them to be passed\n to the `__init__` function of a `torch.Optimizer`. This means separating the parameters into\n groups with the given regexes, and prepping whatever keyword arguments are given for those\n regexes in `groups`.\n\n `groups` contains something like:\n\n ```\n [\n ([\"regex1\", \"regex2\"], {\"lr\": 1e-3}),\n ([\"regex3\"], {\"lr\": 1e-4})\n ]\n ```\n\n The return value in the right format to be passed directly as the `params` argument to a pytorch\n `Optimizer`. If there are multiple groups specified, this is list of dictionaries, where each\n dict contains a \"parameter group\" and groups specific options, e.g., {'params': [list of\n parameters], 'lr': 1e-3, ...}. Any config option not specified in the additional options (e.g.\n for the default group) is inherited from the top level arguments given in the constructor. See:\n https://pytorch.org/docs/0.3.0/optim.html?#per-parameter-options. See also our\n `test_optimizer_parameter_groups` test for an example of how this works in this code.\n\n The dictionary's return type is labeled as `Any`, because it can be a `List[torch.nn.Parameter]`\n (for the \"params\" key), or anything else (typically a float) for the other keys.\n \"\"\"\n if groups:\n # In addition to any parameters that match group specific regex,\n # we also need a group for the remaining \"default\" group.\n # Those will be included in the last entry of parameter_groups.\n parameter_groups: Union[List[Dict[str, Any]], List[torch.nn.Parameter]] = [\n {\"params\": []} for _ in range(len(groups) + 1)\n ]\n # add the group specific kwargs\n for k in range(len(groups)):\n parameter_groups[k].update(groups[k][1])\n\n regex_use_counts: Dict[str, int] = {}\n parameter_group_names: List[set] = [set() for _ in range(len(groups) + 1)]\n for name, param in model_parameters:\n # Determine the group for this parameter.\n group_index = None\n for k, group_regexes in enumerate(groups):\n for regex in group_regexes[0]:\n if regex not in regex_use_counts:\n regex_use_counts[regex] = 0\n if re.search(regex, name):\n if group_index is not None and group_index != k:\n raise ValueError(\n \"{} was specified in two separate parameter groups\".format(name)\n )\n group_index = k\n regex_use_counts[regex] += 1\n\n if group_index is not None:\n parameter_groups[group_index][\"params\"].append(param)\n parameter_group_names[group_index].add(name)\n else:\n # the default group\n parameter_groups[-1][\"params\"].append(param)\n parameter_group_names[-1].add(name)\n\n # log the parameter groups\n logger.info(\"Done constructing parameter groups.\")\n for k in range(len(groups) + 1):\n group_options = {\n key: val for key, val in parameter_groups[k].items() if key != \"params\"\n }\n logger.info(\"Group %s: %s, %s\", k, list(parameter_group_names[k]), group_options)\n # check for unused regex\n for regex, count in regex_use_counts.items():\n if count == 0:\n logger.warning(\n \"When constructing parameter groups, %s does not match any parameter name\",\n regex,\n )\n\n else:\n parameter_groups = [param for name, param in model_parameters]\n\n # Log the number of parameters to optimize\n num_parameters = 0\n for parameter_group in parameter_groups:\n if isinstance(parameter_group, dict):\n num_parameters += sum(parameter.numel() for parameter in parameter_group[\"params\"])\n else:\n num_parameters += parameter_group.numel() # type: ignore\n logger.info(\"Number of trainable parameters: %s\", num_parameters)\n return parameter_groups\n\n\nclass Optimizer(Registrable):\n \"\"\"\n This class just allows us to implement `Registrable` for Pytorch Optimizers. We do something a\n little bit different with `Optimizers`, because they are implemented as classes in PyTorch, and\n we want to use those classes. To make things easy, we just inherit from those classes, using\n multiple inheritance to also inherit from `Optimizer`. The only reason we do this is to make\n type inference on parameters possible, so we can construct these objects using our configuration\n framework. If you are writing your own script, you can safely ignore these classes and just use\n the `torch.optim` classes directly.\n\n If you are implementing one of these classes, the `model_parameters` and `parameter_groups`\n arguments to `__init__` are important, and should always be present. The trainer will pass\n the trainable parameters in the model to the optimizer using the name `model_parameters`, so if\n you use a different name, your code will crash. Nothing will technically crash if you use a\n name other than `parameter_groups` for your second argument, it will just be annoyingly\n inconsistent.\n \"\"\"\n\n default_implementation = \"adam\"\n\n @staticmethod\n def default(model_parameters: List) -> \"Optimizer\":\n return Optimizer.from_params(model_parameters=model_parameters, params=Params({}))\n\n\[email protected](\"adam\")\nclass AdamOptimizer(Optimizer, torch.optim.Adam):\n def __init__(\n self,\n model_parameters: List[Tuple[str, torch.nn.Parameter]],\n parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,\n lr: float = 0.001,\n betas: Tuple[float, float] = (0.9, 0.999),\n eps: float = 1e-08,\n weight_decay: float = 0.0,\n amsgrad: bool = False,\n ):\n super().__init__(\n params=make_parameter_groups(model_parameters, parameter_groups),\n lr=lr,\n betas=betas,\n eps=eps,\n weight_decay=weight_decay,\n amsgrad=amsgrad,\n )\n\n\[email protected](\"sparse_adam\")\nclass SparseAdamOptimizer(Optimizer, torch.optim.SparseAdam):\n def __init__(\n self,\n model_parameters: List[Tuple[str, torch.nn.Parameter]],\n parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,\n lr: float = 0.001,\n betas: Tuple[float, float] = (0.9, 0.999),\n eps: float = 1e-08,\n ):\n super().__init__(\n params=make_parameter_groups(model_parameters, parameter_groups),\n lr=lr,\n betas=betas,\n eps=eps,\n )\n\n\[email protected](\"adamax\")\nclass AdamaxOptimizer(Optimizer, torch.optim.Adamax):\n def __init__(\n self,\n model_parameters: List[Tuple[str, torch.nn.Parameter]],\n parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,\n lr: float = 0.002,\n betas: Tuple[float, float] = (0.9, 0.999),\n eps: float = 1e-08,\n weight_decay: float = 0.0,\n ):\n super().__init__(\n params=make_parameter_groups(model_parameters, parameter_groups),\n lr=lr,\n betas=betas,\n eps=eps,\n weight_decay=weight_decay,\n )\n\n\[email protected](\"adamw\")\nclass AdamWOptimizer(Optimizer, torch.optim.AdamW):\n def __init__(\n self,\n model_parameters: List[Tuple[str, torch.nn.Parameter]],\n parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,\n lr: float = 0.001,\n betas: Tuple[float, float] = (0.9, 0.999),\n eps: float = 1e-08,\n weight_decay: float = 0.01,\n amsgrad: bool = False,\n ):\n super().__init__(\n params=make_parameter_groups(model_parameters, parameter_groups),\n lr=lr,\n betas=betas,\n eps=eps,\n weight_decay=weight_decay,\n amsgrad=amsgrad,\n )\n\n\[email protected](\"huggingface_adamw\")\nclass HuggingfaceAdamWOptimizer(Optimizer, transformers.AdamW):\n def __init__(\n self,\n model_parameters: List[Tuple[str, torch.nn.Parameter]],\n parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,\n lr: float = 0.001,\n betas: Tuple[float, float] = (0.9, 0.999),\n eps: float = 1e-06,\n weight_decay: float = 0.0,\n correct_bias: bool = False,\n ):\n super().__init__(\n params=make_parameter_groups(model_parameters, parameter_groups),\n lr=lr,\n betas=betas,\n eps=eps,\n weight_decay=weight_decay,\n correct_bias=correct_bias,\n )\n\n\[email protected](\"adagrad\")\nclass AdagradOptimizer(Optimizer, torch.optim.Adagrad):\n def __init__(\n self,\n model_parameters: List[Tuple[str, torch.nn.Parameter]],\n parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,\n lr: float = 0.01,\n lr_decay: float = 0.0,\n weight_decay: float = 0.0,\n initial_accumulator_value: float = 0.0,\n eps: float = 1e-10,\n ):\n super().__init__(\n params=make_parameter_groups(model_parameters, parameter_groups),\n lr=lr,\n lr_decay=lr_decay,\n weight_decay=weight_decay,\n initial_accumulator_value=initial_accumulator_value,\n eps=eps,\n )\n\n\[email protected](\"adadelta\")\nclass AdadeltaOptimizer(Optimizer, torch.optim.Adadelta):\n def __init__(\n self,\n model_parameters: List[Tuple[str, torch.nn.Parameter]],\n parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,\n lr: float = 1.0,\n rho: float = 0.9,\n eps: float = 1e-06,\n weight_decay: float = 0.0,\n ):\n super().__init__(\n params=make_parameter_groups(model_parameters, parameter_groups),\n lr=lr,\n rho=rho,\n eps=eps,\n weight_decay=weight_decay,\n )\n\n\[email protected](\"sgd\")\nclass SgdOptimizer(Optimizer, torch.optim.SGD):\n def __init__(\n self,\n model_parameters: List[Tuple[str, torch.nn.Parameter]],\n lr: float,\n parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,\n momentum: float = 0.0,\n dampening: float = 0,\n weight_decay: float = 0.0,\n nesterov: bool = False,\n ):\n super().__init__(\n params=make_parameter_groups(model_parameters, parameter_groups),\n lr=lr,\n momentum=momentum,\n dampening=dampening,\n weight_decay=weight_decay,\n nesterov=nesterov,\n )\n\n\[email protected](\"rmsprop\")\nclass RmsPropOptimizer(Optimizer, torch.optim.RMSprop):\n def __init__(\n self,\n model_parameters: List[Tuple[str, torch.nn.Parameter]],\n parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,\n lr: float = 0.01,\n alpha: float = 0.99,\n eps: float = 1e-08,\n weight_decay: float = 0.0,\n momentum: float = 0.0,\n centered: bool = False,\n ):\n super().__init__(\n params=make_parameter_groups(model_parameters, parameter_groups),\n lr=lr,\n alpha=alpha,\n eps=eps,\n weight_decay=weight_decay,\n momentum=momentum,\n centered=centered,\n )\n\n\[email protected](\"averaged_sgd\")\nclass AveragedSgdOptimizer(Optimizer, torch.optim.ASGD):\n def __init__(\n self,\n model_parameters: List[Tuple[str, torch.nn.Parameter]],\n parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,\n lr: float = 0.01,\n lambd: float = 0.0001,\n alpha: float = 0.75,\n t0: float = 1000000.0,\n weight_decay: float = 0.0,\n ):\n super().__init__(\n params=make_parameter_groups(model_parameters, parameter_groups),\n lr=lr,\n lambd=lambd,\n alpha=alpha,\n t0=t0,\n weight_decay=weight_decay,\n )\n\n\[email protected](\"dense_sparse_adam\")\nclass DenseSparseAdam(Optimizer, torch.optim.Optimizer):\n \"\"\"\n NOTE: This class has been copied verbatim from the separate Dense and\n Sparse versions of Adam in Pytorch.\n\n Implements Adam algorithm with dense & sparse gradients.\n It has been proposed in Adam: A Method for Stochastic Optimization.\n\n # Parameters\n\n params : `iterable`\n iterable of parameters to optimize or dicts defining parameter groups\n lr : `float`, optional (default: 1e-3)\n The learning rate.\n betas : `Tuple[float, float]`, optional (default: (0.9, 0.999))\n coefficients used for computing running averages of gradient\n and its square.\n eps : `float`, optional, (default: 1e-8)\n A term added to the denominator to improve numerical stability.\n \"\"\"\n\n def __init__(\n self,\n model_parameters: List[Tuple[str, torch.nn.Parameter]],\n parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n ):\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0.0 <= eps:\n raise ValueError(\"Invalid epsilon value: {}\".format(eps))\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 0: {}\".format(betas[0]))\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(\"Invalid beta parameter at index 1: {}\".format(betas[1]))\n defaults = dict(lr=lr, betas=betas, eps=eps)\n super().__init__(make_parameter_groups(model_parameters, parameter_groups), defaults)\n\n def step(self, closure=None):\n \"\"\"\n Performs a single optimization step.\n\n # Parameters\n\n closure : `callable`, optional.\n A closure that reevaluates the model and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n grad = p.grad.data\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state[\"step\"] = 0\n # Exponential moving average of gradient values\n state[\"exp_avg\"] = torch.zeros_like(p.data)\n # Exponential moving average of squared gradient values\n state[\"exp_avg_sq\"] = torch.zeros_like(p.data)\n\n state[\"step\"] += 1\n\n exp_avg, exp_avg_sq = state[\"exp_avg\"], state[\"exp_avg_sq\"]\n beta1, beta2 = group[\"betas\"]\n\n if grad.is_sparse:\n grad = grad.coalesce() # the update is non-linear so indices must be unique\n grad_indices = grad._indices()\n grad_values = grad._values()\n size = grad.size()\n\n def make_sparse(values):\n constructor = grad.new\n if grad_indices.dim() == 0 or values.dim() == 0:\n return constructor().resize_as_(grad)\n return constructor(grad_indices, values, size)\n\n # Decay the first and second moment running average coefficient\n # old <- b * old + (1 - b) * new\n # <==> old += (1 - b) * (new - old)\n old_exp_avg_values = exp_avg.sparse_mask(grad)._values()\n exp_avg_update_values = grad_values.sub(old_exp_avg_values).mul_(1 - beta1)\n exp_avg.add_(make_sparse(exp_avg_update_values))\n old_exp_avg_sq_values = exp_avg_sq.sparse_mask(grad)._values()\n exp_avg_sq_update_values = (\n grad_values.pow(2).sub_(old_exp_avg_sq_values).mul_(1 - beta2)\n )\n exp_avg_sq.add_(make_sparse(exp_avg_sq_update_values))\n\n # Dense addition again is intended, avoiding another sparse_mask\n numer = exp_avg_update_values.add_(old_exp_avg_values)\n exp_avg_sq_update_values.add_(old_exp_avg_sq_values)\n denom = exp_avg_sq_update_values.sqrt_().add_(group[\"eps\"])\n del exp_avg_update_values, exp_avg_sq_update_values\n\n bias_correction1 = 1 - beta1 ** state[\"step\"]\n bias_correction2 = 1 - beta2 ** state[\"step\"]\n step_size = group[\"lr\"] * math.sqrt(bias_correction2) / bias_correction1\n\n p.data.add_(make_sparse(-step_size * numer.div_(denom)))\n\n else:\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(1 - beta1, grad)\n exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n denom = exp_avg_sq.sqrt().add_(group[\"eps\"])\n\n bias_correction1 = 1 - beta1 ** state[\"step\"]\n bias_correction2 = 1 - beta2 ** state[\"step\"]\n step_size = group[\"lr\"] * math.sqrt(bias_correction2) / bias_correction1\n\n p.data.addcdiv_(-step_size, exp_avg, denom)\n\n return loss\n", "from overrides import overrides\nimport torch\nfrom torch.nn.parameter import Parameter\n\nfrom allennlp.modules.attention.attention import Attention\nfrom allennlp.nn import Activation\n\n\[email protected](\"bilinear\")\nclass BilinearAttention(Attention):\n \"\"\"\n Computes attention between a vector and a matrix using a bilinear attention function. This\n function has a matrix of weights `W` and a bias `b`, and the similarity between the vector\n `x` and the matrix `y` is computed as `x^T W y + b`.\n\n # Parameters\n\n vector_dim : `int`, required\n The dimension of the vector, `x`, described above. This is `x.size()[-1]` - the length\n of the vector that will go into the similarity computation. We need this so we can build\n the weight matrix correctly.\n matrix_dim : `int`, required\n The dimension of the matrix, `y`, described above. This is `y.size()[-1]` - the length\n of the vector that will go into the similarity computation. We need this so we can build\n the weight matrix correctly.\n activation : `Activation`, optional (default=linear (i.e. no activation))\n An activation function applied after the `x^T W y + b` calculation. Default is no\n activation.\n normalize : `bool`, optional (default : `True`)\n If true, we normalize the computed similarities with a softmax, to return a probability\n distribution for your attention. If false, this is just computing a similarity score.\n \"\"\"\n\n def __init__(\n self,\n vector_dim: int,\n matrix_dim: int,\n activation: Activation = None,\n normalize: bool = True,\n ) -> None:\n super().__init__(normalize)\n self._weight_matrix = Parameter(torch.Tensor(vector_dim, matrix_dim))\n self._bias = Parameter(torch.Tensor(1))\n self._activation = activation or Activation.by_name(\"linear\")()\n self.reset_parameters()\n\n def reset_parameters(self):\n torch.nn.init.xavier_uniform_(self._weight_matrix)\n self._bias.data.fill_(0)\n\n @overrides\n def _forward_internal(self, vector: torch.Tensor, matrix: torch.Tensor) -> torch.Tensor:\n intermediate = vector.mm(self._weight_matrix).unsqueeze(1)\n return self._activation(intermediate.bmm(matrix.transpose(1, 2)).squeeze(1) + self._bias)\n" ]
[ [ "torch.cat", "torch.nn.utils.rnn.pad_packed_sequence", "torch.FloatTensor", "numpy.transpose", "torch.stack" ], [ "torch.nn.modules.linear.Linear", "torch.max", "torch.cat" ], [ "torch.zeros_like" ], [ "torch.Tensor", "torch.nn.init.xavier_uniform_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
chukren/seisflows
[ "c4a5a8a9411b365c9bba818f6ed3ba03f24e681b" ]
[ "seisflows/postprocess/total_variation.py" ]
[ "\nimport numpy as np\n\nfrom seisflows.tools import unix\nfrom seisflows.tools.array import loadnpy, savenpy\nfrom seisflows.tools.array import grid2mesh, mesh2grid, stack\nfrom seisflows.tools.code import exists\nfrom seisflows.tools.config import SeisflowsParameters, SeisflowsPaths, \\\n ParameterError, custom_import\nfrom seisflows.tools.math import nabla, tv\n\n\nPAR = SeisflowsParameters()\nPATH = SeisflowsPaths()\n\nimport system\nimport solver\n\n\nclass total_variation(custom_import('postprocess', 'regularize')):\n \"\"\" Adds regularization options to base class\n\n So far, can only be used for 2D inversion, because the required spatial\n derivative operator \"nabla\" is not yet available for 3D grids.\n \"\"\"\n\n def check(self):\n \"\"\" Checks parameters and paths\n \"\"\"\n super(total_variation, self).check()\n\n if not PAR.LAMBDA:\n raise ValueError\n\n if not hasattr(PAR, 'EPSILON'):\n setattr(PAR, 'EPSILON', 0.)\n\n\n def nabla(self, mesh, m, g):\n M, grid = mesh2grid(g, mesh)\n DM = tv(M, epsilon=PAR.EPSILON)\n dm = grid2mesh(DM, grid, mesh)\n return dm/np.mean(m)\n\n" ]
[ [ "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhenlohuang/tvm
[ "fd2e6d17120a79533852c6bb705429d9c7bc286b", "fd2e6d17120a79533852c6bb705429d9c7bc286b", "fd2e6d17120a79533852c6bb705429d9c7bc286b", "fd2e6d17120a79533852c6bb705429d9c7bc286b", "fd2e6d17120a79533852c6bb705429d9c7bc286b", "fd2e6d17120a79533852c6bb705429d9c7bc286b", "fd2e6d17120a79533852c6bb705429d9c7bc286b", "fd2e6d17120a79533852c6bb705429d9c7bc286b", "fd2e6d17120a79533852c6bb705429d9c7bc286b" ]
[ "vta/tests/python/integration/test_benchmark_topi_conv2d_transpose.py", "tests/python/frontend/pytorch/test_forward.py", "tests/python/contrib/test_vitis_ai/test_vitis_ai_runtime_cpu_part.py", "tests/python/unittest/test_auto_scheduler_task_scheduler.py", "rust/tvm/examples/resnet/src/build_resnet.py", "tutorials/frontend/deploy_model_on_android.py", "tutorials/auto_scheduler/tune_matmul_x86.py", "tests/micro/qemu/test_zephyr.py", "tests/python/topi/python/test_topi_bitserial_conv2d_rasp.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Testing topi conv2d_transpose operator for VTA\"\"\"\n\nimport json\nimport os\n\nimport pytest\nimport numpy as np\nfrom collections import namedtuple\n\nimport tvm\nfrom tvm import te\nfrom tvm import relay\nfrom tvm import autotvm\nfrom tvm.contrib import utils\nfrom tvm.contrib.pickle_memoize import memoize\nfrom tvm import topi\nimport tvm.topi.testing\nimport vta\nfrom vta import program_fpga, reconfig_runtime\nimport vta.testing\nfrom vta.testing import simulator\n\n\nWorkload = namedtuple(\n \"Conv2DTransposeWorkload\",\n [\n \"batch\",\n \"height\",\n \"width\",\n \"in_filter\",\n \"out_filter\",\n \"hkernel\",\n \"wkernel\",\n \"hpad\",\n \"wpad\",\n \"hstride\",\n \"wstride\",\n \"o_hpad\",\n \"o_wpad\",\n ],\n)\n\n# Get batch info from env\nenv = vta.get_env()\n\n# DCGAN workloads\ndcgan_wklds = [\n # dcgan\n (\"DCGAN.CT1\", Workload(env.BATCH, 4, 4, 1024, 512, 4, 4, 1, 1, 2, 2, 0, 0)),\n (\"DCGAN.CT2\", Workload(env.BATCH, 8, 8, 512, 256, 4, 4, 1, 1, 2, 2, 0, 0)),\n (\"DCGAN.CT3\", Workload(env.BATCH, 16, 16, 256, 128, 4, 4, 1, 1, 2, 2, 0, 0)),\n]\n\n# FIXME: we need a custom clip operator to circumvent a pattern detection limitation\[email protected]_scope(tag=topi.tag.ELEMWISE)\ndef my_clip(x, a_min, a_max):\n \"\"\"Unlike topi's current clip, put min and max into two stages.\"\"\"\n const_min = tvm.tir.const(a_min, x.dtype)\n const_max = tvm.tir.const(a_max, x.dtype)\n x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name=\"clipA\")\n x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name=\"clipB\")\n return x\n\n\n# Helper function to get factors\ndef _find_factors(n):\n factors = []\n for f in range(1, n + 1):\n if n % f == 0:\n factors.append(f)\n return factors\n\n\ndef run_conv2d_transpose(\n env, remote, wl, target, check_correctness=True, print_ir=False, samples=4\n):\n\n # Workload assertions\n assert wl.hpad == wl.wpad\n\n # Perform packing only if we are targeting the accelerator\n if \"arm_cpu\" in target.keys:\n data_pack = False\n layout = \"NCHW\"\n fcompute = topi.arm_cpu.conv2d_transpose_nchw\n fschedule = topi.arm_cpu.schedule_conv2d_transpose_nchw\n elif \"vta\" in target.keys:\n data_pack = True\n layout = \"NCHW%dn%dc\" % (env.BATCH, env.BLOCK_IN)\n fcompute = vta.top.conv2d_transpose_packed\n fschedule = vta.top.schedule_conv2d_transpose_packed\n\n # Derive shapes depending upon packing\n\n a_shape = (wl.batch, wl.in_filter, wl.height, wl.width)\n w_shape = (wl.in_filter, wl.out_filter, wl.hkernel, wl.wkernel)\n if data_pack:\n data_shape = (\n wl.batch // env.BATCH,\n wl.in_filter // env.BLOCK_IN,\n wl.height,\n wl.width,\n env.BATCH,\n env.BLOCK_IN,\n )\n kernel_shape = (\n wl.out_filter // env.BLOCK_OUT,\n wl.in_filter // env.BLOCK_IN,\n wl.hkernel,\n wl.wkernel,\n env.BLOCK_OUT,\n env.BLOCK_IN,\n )\n else:\n data_shape = a_shape\n kernel_shape = w_shape\n data = te.placeholder(data_shape, name=\"data\", dtype=env.inp_dtype)\n kernel = te.placeholder(kernel_shape, name=\"kernel\", dtype=env.wgt_dtype)\n padding = relay.nn.get_pad_tuple2d((wl.hpad, wl.wpad))\n\n # Define base computation schedule\n with target:\n\n res = fcompute(\n data, kernel, (wl.hstride, wl.wstride), padding, env.acc_dtype, (wl.o_hpad, wl.o_wpad)\n )\n res = topi.right_shift(res, env.WGT_WIDTH)\n res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)\n res = topi.cast(res, env.out_dtype)\n # Derive base schedule\n s = fschedule([res])\n if print_ir:\n print(vta.lower(s, [data, kernel, res], simple_mode=True))\n\n # Derive number of ops\n fout_height = (wl.height - 1) * wl.hstride - 2 * wl.hpad + wl.hkernel + wl.o_hpad\n fout_width = (wl.width - 1) * wl.wstride - 2 * wl.wpad + wl.wkernel + wl.o_wpad\n num_ops = (\n 2\n * wl.batch\n * fout_height\n * fout_width\n * wl.hkernel\n * wl.wkernel\n * wl.out_filter\n * wl.in_filter\n )\n\n # @memoize(\"vta.tests.test_benchmark_topi.conv2d.verify_nhwc\")\n def get_ref_data():\n # derive min max for act and wgt types (max non inclusive)\n a_min, a_max = 0 - (1 << (env.INP_WIDTH - 1)), (1 << (env.INP_WIDTH - 1))\n w_min, w_max = 0 - (1 << (env.WGT_WIDTH - 1)), (1 << (env.WGT_WIDTH - 1))\n a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype)\n w_np = np.random.randint(\n w_min, w_max, size=(wl.in_filter, wl.out_filter, wl.hkernel, wl.wkernel)\n ).astype(kernel.dtype)\n r_np = tvm.topi.testing.conv2d_transpose_nchw_python(\n a_np.astype(env.acc_dtype),\n w_np.astype(env.acc_dtype),\n (wl.hstride, wl.wstride),\n wl.hpad,\n (wl.o_hpad, wl.o_wpad),\n ).astype(env.acc_dtype)\n return a_np, w_np, r_np\n\n # Data in original format\n data_np, kernel_np, res_ref = get_ref_data()\n if data_pack:\n data_np = data_np.reshape(\n wl.batch // env.BATCH,\n env.BATCH,\n wl.in_filter // env.BLOCK_IN,\n env.BLOCK_IN,\n wl.height,\n wl.width,\n ).transpose((0, 2, 4, 5, 1, 3))\n kernel_np = kernel_np.reshape(\n wl.in_filter // env.BLOCK_IN,\n env.BLOCK_IN,\n wl.out_filter // env.BLOCK_OUT,\n env.BLOCK_OUT,\n wl.hkernel,\n wl.wkernel,\n ).transpose((2, 0, 4, 5, 3, 1))\n kernel_np = np.flip(kernel_np, 2)\n kernel_np = np.flip(kernel_np, 3)\n\n # Build\n if \"vta\" in target.keys:\n mod = vta.build(\n s,\n [data, kernel, res],\n target=target,\n target_host=env.target_host,\n name=\"conv2d_transpose\",\n )\n else:\n mod = tvm.build(\n s,\n [data, kernel, res],\n target=target,\n target_host=env.target_host,\n name=\"conv2d_transpose\",\n )\n temp = utils.tempdir()\n mod.save(temp.relpath(\"conv2d_transpose.o\"))\n remote.upload(temp.relpath(\"conv2d_transpose.o\"))\n f = remote.load_module(\"conv2d_transpose.o\")\n ctx = remote.context(str(target))\n\n res_np = np.zeros(topi.utils.get_const_tuple(res.shape)).astype(res.dtype)\n data_arr = tvm.nd.array(data_np, ctx)\n kernel_arr = tvm.nd.array(kernel_np, ctx)\n res_arr = tvm.nd.array(res_np, ctx)\n time_f = f.time_evaluator(\"conv2d_transpose\", ctx, number=samples)\n\n # In vta sim mode, collect simulator runtime statistics\n stats = {}\n cost = None\n if env.TARGET in [\"sim\", \"tsim\"]:\n # Check if we're in local RPC mode (allows us to rebuild the\n # runtime on the fly when varying the VTA designs)\n local_rpc = int(os.environ.get(\"VTA_LOCAL_SIM_RPC\", \"0\"))\n if local_rpc:\n if env.TARGET == \"sim\":\n remote.get_function(\"vta.simulator.profiler_clear\")()\n else:\n remote.get_function(\"vta.tsim.profiler_clear\")()\n cost = time_f(data_arr, kernel_arr, res_arr)\n if env.TARGET == \"sim\":\n stats = json.loads(remote.get_function(\"vta.simulator.profiler_status\")())\n else:\n stats = json.loads(remote.get_function(\"vta.tsim.profiler_status\")())\n else:\n simulator.clear_stats()\n cost = time_f(data_arr, kernel_arr, res_arr)\n stats = simulator.stats()\n else:\n cost = time_f(data_arr, kernel_arr, res_arr)\n\n # Check correctness\n correct = False\n if check_correctness:\n res_orig = res_arr.asnumpy()\n if data_pack:\n res_orig = res_orig.transpose((0, 4, 1, 5, 2, 3)).reshape(\n wl.batch, wl.out_filter, fout_height, fout_width\n )\n res_ref = res_ref >> env.WGT_WIDTH\n res_ref = np.clip(res_ref, 0, (1 << env.OUT_WIDTH - 1) - 1)\n res_ref = res_ref.astype(env.out_dtype)\n correct = np.allclose(res_orig, res_ref)\n\n gops = (num_ops / cost.mean) / float(10 ** 9)\n status = \"PASSED\" if correct else \"FAILED\"\n if \"arm_cpu\" in target.keys:\n device = \"CPU\"\n elif \"vta\" in target.keys:\n device = \"VTA\"\n print(\"%s CONV2D TEST %s: Time cost = %g sec/op, %g GOPS\" % (device, status, cost.mean, gops))\n\n return correct, cost, stats\n\n\[email protected](\"device\", [\"vta\", \"arm_cpu\"])\ndef test_conv2d_transpose(device):\n def _run(env, remote):\n if device == \"vta\":\n target = env.target\n if env.TARGET not in [\"sim\", \"tsim\"]:\n assert tvm.runtime.enabled(\"rpc\")\n program_fpga(remote, bitstream=None)\n reconfig_runtime(remote)\n elif device == \"arm_cpu\":\n target = env.target_vta_cpu\n with autotvm.tophub.context(target): # load pre-tuned schedule parameters\n for _, wl in dcgan_wklds:\n print(wl)\n run_conv2d_transpose(env, remote, wl, target)\n\n vta.testing.run(_run)\n\n\nif __name__ == \"__main__\":\n test_conv2d_transpose(device=\"arm_cpu\")\n test_conv2d_transpose(device=\"vta\")\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=import-self, invalid-name, unused-argument\n\"\"\"Unit tests for various models and operators\"\"\"\nfrom time import time\nimport os\nimport sys\nfrom scipy.stats import t as tdistr\nimport numpy as np\nimport torch\nimport torchvision\nfrom torch.nn import Module\nimport tvm\nfrom tvm import relay\nfrom tvm.contrib import graph_runtime\nfrom tvm.contrib.nvcc import have_fp16\nimport tvm.testing\nfrom packaging import version as package_version\n\nsys.setrecursionlimit(10000)\n\n\ndef list_ops(expr):\n class OpLister(tvm.relay.ExprVisitor):\n def visit_op(self, expr):\n if expr not in self.node_set:\n self.node_list.append(expr)\n return super().visit_op(expr)\n\n def list_nodes(self, expr):\n self.node_set = {}\n self.node_list = []\n self.visit(expr)\n return self.node_list\n\n return OpLister().list_nodes(expr)\n\n\ndef assert_shapes_match(tru, est):\n if tru.shape != est.shape:\n msg = \"Output shapes {} and {} don't match\"\n raise AssertionError(msg.format(tru.shape, est.shape))\n\n\ndef load_torchvision(model_name):\n \"\"\"Given a model name, returns a Torchvision model in eval mode as well\n as an example input.\"\"\"\n with torch.no_grad():\n if model_name.startswith(\"inception\"):\n height = width = 299\n mean = [0.5, 0.5, 0.5]\n std = [0.5, 0.5, 0.5]\n else:\n height = width = 224\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n input_shape = [1, 3, height, width]\n input_data = torch.randn(input_shape).float()\n for channel in range(3):\n input_data[:, channel] -= mean[channel]\n input_data[:, channel] /= std[channel]\n\n if model_name.startswith(\"googlenet\"):\n model = getattr(torchvision.models, model_name)(pretrained=True, aux_logits=True)\n else:\n model = getattr(torchvision.models, model_name)(pretrained=True)\n model = model.float().eval()\n return model, [input_data]\n\n\ndef load_pretrainedmodels(model_name):\n \"\"\"Given a model name, returns a pretrainedmodels.pytorch model in eval\n mode as well as an example input.\"\"\"\n import pretrainedmodels # https://github.com/Cadene/pretrained-models.pytorch\n\n model = getattr(pretrainedmodels, model_name)().float().eval()\n input_shape = [1, *model.input_size]\n input_data = torch.rand(input_shape).float() * 256\n for channel in range(3):\n input_data[:, channel] -= model.mean[channel]\n input_data[:, channel] /= model.std[channel]\n return model, [input_data]\n\n\ndef load_model(model_name):\n \"\"\"Given a model name, returns a model as well as an example input.\"\"\"\n if hasattr(torchvision.models, model_name):\n return load_torchvision(model_name)\n try:\n import pretrainedmodels\n\n if hasattr(pretrainedmodels, model_name):\n return load_pretrainedmodels(model_name)\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\"Please install pretrainedmodels.pytorch\")\n raise RuntimeError(\"Model not supported\")\n\n\ndef confidence_interval(mean, stdev, count, alpha=0.01):\n \"\"\"Returns the lower and upper bounds of the confidence interval of a random\n variable. Confidence is 1 - alpha (default confidence is 99%).\"\"\"\n stdval = tdistr.ppf(1 - alpha / 2, count - 1)\n lower, upper = mean + np.array([-1, 1]) * stdval * stdev / np.sqrt(count)\n return lower, upper\n\n\ndef measure_latency(model, input_shapes, output_shapes, thresh, dryruns=40):\n \"\"\"Compute the latency of the given model\"\"\"\n latencies = []\n count = 0\n while True:\n if isinstance(model, Module):\n input_data = [torch.rand(shape).float() for shape in input_shapes]\n if torch.cuda.is_available():\n input_data = list(map(lambda x: x.cuda(), input_data))\n model = model.cuda()\n t_start = time()\n with torch.no_grad():\n model(*input_data)\n t_end = time()\n latencies.append(t_end - t_start)\n else:\n input_data = {}\n for i, shape in enumerate(input_shapes):\n name = \"input\" + str(i)\n arr = np.random.random(shape).astype(\"float32\")\n input_data[name] = tvm.nd.array(arr)\n t_start = time()\n model.set_input(**input_data)\n model.run()\n for i, shape in enumerate(output_shapes):\n arr = np.zeros(shape).astype(\"float32\")\n model.get_output(i, tvm.nd.array(arr))\n t_end = time()\n count += 1\n if count < dryruns:\n continue\n latencies.append(t_end - t_start)\n mean = np.mean(latencies)\n stdev = np.std(latencies)\n sample_size = len(latencies)\n if sample_size > dryruns:\n lower, upper = confidence_interval(mean, stdev, sample_size)\n est = (upper + lower) / 2\n err = (upper - lower) / 2\n if err < thresh:\n return est\n\n\ndef verify_model(model_name, input_data=[], custom_convert_map={}, rtol=1e-5, atol=1e-5):\n \"\"\"Assert that the output of a compiled model matches with that of its\n baseline.\"\"\"\n if isinstance(model_name, str):\n baseline_model, baseline_input = load_model(model_name)\n elif isinstance(input_data, list):\n baseline_model = model_name\n baseline_input = input_data\n elif isinstance(input_data, torch.Tensor) or len(input_data.shape) == 0:\n baseline_model = model_name\n baseline_input = [input_data]\n else:\n assert False, \"Unexpected input format\"\n\n if torch.cuda.is_available():\n if isinstance(baseline_model, torch.nn.Module):\n baseline_model = baseline_model.cuda()\n baseline_input = [inp.cuda() for inp in baseline_input]\n\n with torch.no_grad():\n baseline_outputs = baseline_model(*[input.clone() for input in baseline_input])\n\n if isinstance(baseline_outputs, tuple):\n baseline_outputs = tuple(out.cpu().numpy() for out in baseline_outputs)\n else:\n baseline_outputs = (baseline_outputs.cpu().numpy(),)\n\n trace = torch.jit.trace(baseline_model, [input.clone() for input in baseline_input])\n if isinstance(baseline_model, torch.nn.Module):\n trace = trace.float().eval()\n\n if torch.cuda.is_available():\n trace = trace.cuda()\n else:\n trace = trace.cpu()\n\n input_names = [\"input{}\".format(idx) for idx, inp in enumerate(baseline_input)]\n input_shapes = list(zip(input_names, [inp.shape for inp in baseline_input]))\n mod, params = relay.frontend.from_pytorch(trace, input_shapes, custom_convert_map)\n compiled_input = dict(zip(input_names, [inp.clone().cpu().numpy() for inp in baseline_input]))\n\n with tvm.transform.PassContext(opt_level=3):\n for target, ctx in tvm.testing.enabled_targets():\n relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params)\n relay_model = graph_runtime.create(relay_graph, relay_lib, ctx)\n relay_model.set_input(**relay_params)\n for name, inp in compiled_input.items():\n relay_model.set_input(name, inp)\n relay_model.run()\n\n for i, baseline_output in enumerate(baseline_outputs):\n compiled_output = relay_model.get_output(i).asnumpy()\n\n assert_shapes_match(baseline_output, compiled_output)\n tvm.testing.assert_allclose(baseline_output, compiled_output, rtol=rtol, atol=atol)\n\n del model_name\n del baseline_model\n torch.cuda.empty_cache()\n\n\n# Single operator tests\[email protected]_gpu\ndef test_forward_pixel_shuffle():\n torch.set_grad_enabled(False)\n input_shape = [1, 144, 16, 16]\n\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.PixelShuffle(2).float().eval(), input_data=input_data)\n verify_model(torch.nn.PixelShuffle(3).float().eval(), input_data=input_data)\n verify_model(torch.nn.PixelShuffle(4).float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_add():\n torch.set_grad_enabled(False)\n input_shape = [10]\n\n class Add1(Module):\n def forward(self, *args):\n return args[0] + args[0]\n\n class Add2(Module):\n def forward(self, *args):\n return args[0] + 1\n\n class Add3(Module):\n def forward(self, *args):\n ones = torch.ones(input_shape, dtype=torch.float)\n if torch.cuda.is_available():\n ones = ones.cuda()\n return args[0] + ones\n\n class Add4(Module):\n def forward(self, *args):\n ones = torch.ones([], dtype=torch.float)\n if torch.cuda.is_available():\n ones = ones.cuda()\n return args[0] + ones\n\n input_data = torch.rand(input_shape).float()\n verify_model(Add1().float().eval(), input_data=input_data)\n verify_model(Add2().float().eval(), input_data=input_data)\n verify_model(Add3().float().eval(), input_data=input_data)\n verify_model(Add4().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_subtract():\n torch.set_grad_enabled(False)\n input_shape = [10]\n\n class Subtract1(Module):\n def forward(self, *args):\n return args[0] - args[0]\n\n class Subtract2(Module):\n def forward(self, *args):\n return args[0] - 1\n\n class Subtract3(Module):\n def forward(self, *args):\n ones = torch.ones(input_shape)\n if torch.cuda.is_available():\n ones = ones.cuda()\n return args[0] - ones\n\n class Subtract4(Module):\n def forward(self, *args):\n ones = torch.ones([])\n if torch.cuda.is_available():\n ones = ones.cuda()\n return args[0] - ones\n\n input_data = torch.rand(input_shape).float()\n verify_model(Subtract1().float().eval(), input_data=input_data)\n verify_model(Subtract2().float().eval(), input_data=input_data)\n verify_model(Subtract3().float().eval(), input_data=input_data)\n verify_model(Subtract4().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_multiply():\n torch.set_grad_enabled(False)\n input_shape = [10]\n\n class Multiply1(Module):\n def forward(self, *args):\n return args[0] * args[0]\n\n class Multiply2(Module):\n def forward(self, *args):\n return args[0] * 1.0\n\n class Multiply3(Module):\n def forward(self, *args):\n ones = torch.ones(input_shape)\n if torch.cuda.is_available():\n ones = ones.cuda()\n return args[0] * ones\n\n class Multiply4(Module):\n def forward(self, *args):\n ones = torch.ones([])\n if torch.cuda.is_available():\n ones = ones.cuda()\n return args[0] * ones\n\n input_data = torch.rand(input_shape).float()\n verify_model(Multiply1().float().eval(), input_data=input_data)\n verify_model(Multiply2().float().eval(), input_data=input_data)\n verify_model(Multiply3().float().eval(), input_data=input_data)\n verify_model(Multiply4().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_min_max():\n class Max(Module):\n def forward(self, inp):\n return torch.max(inp)\n\n class Min(Module):\n def forward(self, inp):\n return torch.min(inp)\n\n class Max2(Module):\n def forward(self, inp):\n out, _ = torch.max(inp, 1, keepdim=True)\n return out\n\n class Min2(Module):\n def forward(self, inp):\n out, _ = torch.min(inp, 0, keepdim=False)\n return out\n\n class Max3(Module):\n def forward(self, lhs, rhs):\n return torch.max(lhs, rhs)\n\n class Min3(Module):\n def forward(self, lhs, rhs):\n return torch.min(lhs, rhs)\n\n input_data = [torch.rand((10, 10)), torch.rand((10, 10))]\n\n verify_model(Max(), input_data=input_data[0])\n verify_model(Min(), input_data=input_data[0])\n verify_model(Max2(), input_data=input_data[0])\n verify_model(Min2(), input_data=input_data[0])\n verify_model(Max3(), input_data=input_data)\n verify_model(Min3(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_reciprocal():\n torch.set_grad_enabled(False)\n input_shape = [2, 1, 10, 1, 10]\n\n class Reciprocal1(Module):\n def forward(self, *args):\n return args[0].reciprocal()\n\n input_data = torch.rand(input_shape).float()\n verify_model(Reciprocal1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_repeat():\n torch.set_grad_enabled(False)\n input_shape = [1, 3]\n\n class Repeat1(Module):\n def forward(self, *args):\n return args[0].repeat(1, 1)\n\n class Repeat2(Module):\n def forward(self, *args):\n return args[0].repeat(4, 2)\n\n class Repeat3(Module):\n def forward(self, *args):\n return args[0].repeat(4, 2, 1)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Repeat1().float().eval(), input_data=input_data)\n verify_model(Repeat2().float().eval(), input_data=input_data)\n verify_model(Repeat3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_repeat_interleave():\n torch.set_grad_enabled(False)\n input_shape = [2, 2, 3]\n\n class RepeatInterleave1(Module):\n def forward(self, *args):\n return args[0].repeat_interleave(2)\n\n class RepeatInterleave2(Module):\n def forward(self, *args):\n return args[0].repeat_interleave(3, dim=0)\n\n class RepeatInterleave3(Module):\n def forward(self, *args):\n return args[0].repeat_interleave(2, dim=1)\n\n class RepeatInterleave4(Module):\n def forward(self, *args):\n return args[0].repeat_interleave(4, dim=2)\n\n input_data = torch.rand(input_shape).float()\n verify_model(RepeatInterleave1().float().eval(), input_data=input_data)\n verify_model(RepeatInterleave2().float().eval(), input_data=input_data)\n verify_model(RepeatInterleave3().float().eval(), input_data=input_data)\n verify_model(RepeatInterleave4().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_unsqueeze():\n torch.set_grad_enabled(False)\n input_shape = [10, 10]\n\n class Unsqueeze1(Module):\n def forward(self, *args):\n return args[0].unsqueeze(2)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Unsqueeze1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_squeeze():\n torch.set_grad_enabled(False)\n input_shape = [2, 1, 10, 1, 10]\n\n class Squeeze1(Module):\n def forward(self, *args):\n return args[0].squeeze()\n\n class Squeeze2(Module):\n def forward(self, *args):\n return args[0].squeeze(1)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Squeeze1().float().eval(), input_data=input_data)\n verify_model(Squeeze2().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_arange():\n torch.set_grad_enabled(False)\n\n class Arange1(Module):\n def forward(self, *args):\n return torch.arange(5)\n\n class Arange2(Module):\n def forward(self, *args):\n return torch.arange(2.5)\n\n class Arange3(Module):\n def forward(self, *args):\n return torch.arange(1, 4)\n\n class Arange4(Module):\n def forward(self, *args):\n return torch.arange(1, 2.5, 0.5)\n\n class Arange5(Module):\n def forward(self, *args):\n return torch.arange(1, 2, 1, dtype=torch.int32)\n\n class Arange6(Module):\n def forward(self, *args):\n return torch.arange(start=1, end=6, step=2)\n\n class Arange7(Module):\n def forward(self, *args):\n return torch.arange(1, 4, dtype=torch.float32)\n\n class Arange8(Module):\n def forward(self, *args):\n return torch.arange(1, 2, 1, dtype=torch.int16)\n\n class Arange9(Module):\n def forward(self, *args):\n end = torch.add(torch.tensor(4), 1)\n return torch.arange(end) + torch.ones((5,), dtype=torch.int64)\n\n class Arange10(Module):\n def forward(self, *args):\n end = torch.add(torch.tensor(4.0), torch.tensor(1.0))\n return torch.arange(end) + torch.ones((5,), dtype=torch.float)\n\n class Arange11(Module):\n def forward(self, *args):\n start = torch.add(torch.tensor(1), 1)\n end = torch.add(torch.tensor(4), 1)\n step = torch.add(torch.tensor(2), 1)\n out = torch.arange(start, end, step)\n return out + torch.ones((3,), dtype=torch.int64)\n\n class Arange12(Module):\n def forward(self, *args):\n start = torch.add(torch.tensor(1), 1)\n end = torch.add(torch.tensor(4), 1)\n step = torch.add(torch.tensor(2.5), torch.tensor(4.1))\n out = torch.arange(start, end, step)\n return out + torch.ones((3,), dtype=torch.float)\n\n verify_model(Arange1().float().eval())\n verify_model(Arange2().float().eval())\n verify_model(Arange3().float().eval())\n verify_model(Arange4().float().eval())\n verify_model(Arange5().float().eval())\n verify_model(Arange6().float().eval())\n verify_model(Arange7().float().eval())\n verify_model(Arange8().float().eval())\n verify_model(Arange9().float().eval())\n verify_model(Arange10().float().eval())\n verify_model(Arange11().float().eval())\n verify_model(Arange12().float().eval())\n\n\[email protected]_gpu\ndef test_forward_mesh_grid():\n torch.set_grad_enabled(False)\n\n class MeshGrid1(Module):\n def forward(self, *args):\n x = torch.tensor([1, 2, 3])\n y = torch.tensor([4, 5, 6])\n grid_x, grid_y = torch.meshgrid([x, y])\n return grid_x, grid_y\n\n class MeshGrid2(Module):\n def forward(self, *args):\n x = torch.tensor([1, 2, 3], dtype=torch.float32)\n y = torch.add(torch.tensor(5, dtype=torch.float32), 1)\n grid_x, grid_y = torch.meshgrid([x, y])\n return grid_x, grid_y\n\n verify_model(MeshGrid1().float().eval())\n verify_model(MeshGrid2().float().eval())\n\n\[email protected]_gpu\ndef test_forward_abs():\n torch.set_grad_enabled(False)\n input_shape = [2, 1, 10, 1, 10]\n\n class Abs1(Module):\n def forward(self, *args):\n return args[0].abs()\n\n input_data = torch.rand(input_shape).float()\n verify_model(Abs1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_concatenate():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Concatenate1(Module):\n def forward(self, *args):\n return torch.cat([args[0][:, 0].unsqueeze(1), args[0][:, 1].unsqueeze(1)], 1)\n\n class Concatenate2(Module):\n def forward(self, *args):\n a = (args[0][:, :, 0] + 2) * 7\n b = (args[0][:, :, 1] + 3) * 11\n c = (args[0][:, :, 2] + 5) * 13\n return torch.cat([t.unsqueeze(2) for t in [a, b, c]], 2)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Concatenate1().float().eval(), input_data=input_data)\n verify_model(Concatenate2().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_relu():\n torch.set_grad_enabled(False)\n input_shape = [10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.ReLU().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_prelu():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.PReLU(num_parameters=3).eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_leakyrelu():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.LeakyReLU().eval(), input_data=input_data)\n verify_model(torch.nn.LeakyReLU(negative_slope=0.05).eval(), input_data=input_data)\n verify_model(torch.nn.LeakyReLU(negative_slope=1.0, inplace=True).eval(), input_data=input_data)\n verify_model(\n torch.nn.LeakyReLU(negative_slope=1.25, inplace=True).eval(), input_data=input_data\n )\n\n\[email protected]_gpu\ndef test_forward_elu():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.ELU().eval(), input_data=input_data)\n verify_model(torch.nn.ELU(alpha=0.3).eval(), input_data=input_data)\n verify_model(torch.nn.ELU(alpha=1.0).eval(), input_data=input_data)\n verify_model(torch.nn.ELU(alpha=1.3).eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_celu():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.CELU().eval(), input_data=input_data)\n verify_model(torch.nn.CELU(alpha=0.3).eval(), input_data=input_data)\n verify_model(torch.nn.CELU(alpha=1.0).eval(), input_data=input_data)\n verify_model(torch.nn.CELU(alpha=1.3).eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_gelu():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.GELU().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_selu():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.SELU().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_softplus():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.Softplus().eval(), input_data=input_data)\n verify_model(torch.nn.Softplus(beta=1.5, threshold=20).eval(), input_data=input_data)\n verify_model(torch.nn.Softplus(beta=5, threshold=10).eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_softsign():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.Softsign().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_log_sigmoid():\n torch.set_grad_enabled(False)\n input_shape = [10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.LogSigmoid().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_adaptiveavgpool():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.AdaptiveAvgPool2d([1, 1]).eval(), input_data=input_data)\n verify_model(torch.nn.AdaptiveAvgPool2d([10, 10]).eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_maxpool2d():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n\n verify_model(torch.nn.MaxPool2d(kernel_size=[1, 1]).eval(), input_data)\n verify_model(torch.nn.MaxPool2d(kernel_size=[10, 10]).eval(), input_data)\n verify_model(torch.nn.MaxPool2d(kernel_size=[4, 4], padding=2, stride=2).eval(), input_data)\n\n # A functional variant (default strides = None case)\n class MaxPool2D(Module):\n def forward(self, *args):\n return torch.nn.functional.max_pool2d(args[0], kernel_size=[10, 10])\n\n verify_model(MaxPool2D(), input_data=input_data)\n\n class MaxPool2DWithIndices(Module):\n def __init__(self):\n super(MaxPool2DWithIndices, self).__init__()\n self.pool = torch.nn.MaxPool2d(kernel_size=[1, 1], return_indices=True)\n\n def forward(self, *args):\n output, indices = self.pool(args[0])\n return output\n\n verify_model(MaxPool2DWithIndices().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_maxpool1d():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10]\n input_data = torch.rand(input_shape).float()\n\n verify_model(torch.nn.MaxPool1d(kernel_size=1).eval(), input_data)\n verify_model(torch.nn.MaxPool1d(kernel_size=10).eval(), input_data)\n verify_model(torch.nn.MaxPool1d(kernel_size=4, padding=2, stride=2).eval(), input_data)\n\n # A functional variant (default strides = None case)\n class MaxPool1D(Module):\n def forward(self, *args):\n return torch.nn.functional.max_pool1d(args[0], kernel_size=10)\n\n verify_model(MaxPool1D(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_maxpool3d():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10, 10]\n input_data = torch.rand(input_shape).float()\n\n verify_model(torch.nn.MaxPool3d(kernel_size=[1, 1, 1]).eval(), input_data)\n verify_model(torch.nn.MaxPool3d(kernel_size=[10, 10, 10]).eval(), input_data)\n verify_model(torch.nn.MaxPool3d(kernel_size=[4, 4, 4], padding=2, stride=2).eval(), input_data)\n\n # A functional variant (default strides = None case)\n class MaxPool3D(Module):\n def forward(self, *args):\n return torch.nn.functional.max_pool3d(args[0], kernel_size=[10, 10, 10])\n\n verify_model(MaxPool3D(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_split():\n torch.set_grad_enabled(False)\n input_shape = [4, 10]\n\n class Split(Module):\n def __init__(self, split_size_or_sections, dim):\n super(Split, self).__init__()\n self.split_size_or_sections = split_size_or_sections\n self.dim = dim\n\n def forward(self, *args):\n return torch.split(args[0], self.split_size_or_sections, self.dim)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Split(2, 0).float().eval(), input_data=input_data)\n verify_model(Split(3, 1).float().eval(), input_data=input_data)\n verify_model(Split(4, 1).float().eval(), input_data=input_data)\n verify_model(Split([2, 3, 5], 1).float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_avgpool():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class AvgPool2D2(Module):\n def forward(self, *args):\n return torch.nn.functional.avg_pool2d(args[0], kernel_size=[10, 10])\n\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.AvgPool2d(kernel_size=[10, 10]).eval(), input_data=input_data)\n verify_model(AvgPool2D2().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_avgpool3d():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10, 10]\n\n class AvgPool3D1(Module):\n def forward(self, *args):\n return torch.nn.functional.avg_pool3d(args[0], kernel_size=[10, 10, 10])\n\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.AvgPool3d(kernel_size=[10, 10, 10]).eval(), input_data=input_data)\n verify_model(AvgPool3D1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_hardtanh():\n torch.set_grad_enabled(False)\n input_shape = [10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.Hardtanh().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_conv():\n torch.set_grad_enabled(False)\n conv1d_input_shape = [1, 3, 10]\n conv2d_input_shape = [1, 3, 10, 10]\n\n class Conv2D1(Module):\n def __init__(self):\n super(Conv2D1, self).__init__()\n self.conv = torch.nn.Conv2d(3, 6, 7, bias=True)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, *args):\n return self.softmax(self.conv(args[0]))\n\n class Conv2D2(Module):\n def __init__(self):\n super(Conv2D2, self).__init__()\n self.conv = torch.nn.Conv2d(3, 6, 7, bias=False)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, *args):\n return self.softmax(self.conv(args[0]))\n\n class Conv2D3(Module):\n def __init__(self):\n super(Conv2D3, self).__init__()\n self.conv = torch.nn.Conv2d(3, 6, 7, groups=3, bias=False)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, *args):\n return self.softmax(self.conv(args[0]))\n\n class Conv1D1(Module):\n def __init__(self):\n super(Conv1D1, self).__init__()\n self.conv = torch.nn.Conv1d(3, 6, 7)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, *args):\n return self.softmax(self.conv(args[0]))\n\n class Conv1D2(Module):\n def __init__(self):\n super(Conv1D2, self).__init__()\n self.conv = torch.nn.Conv1d(3, 6, 7, bias=False)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, *args):\n return self.softmax(self.conv(args[0]))\n\n class Conv1D3(Module):\n def __init__(self):\n super(Conv1D3, self).__init__()\n self.conv = torch.nn.Conv1d(3, 6, 7, groups=3, bias=False)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, *args):\n return self.softmax(self.conv(args[0]))\n\n conv2d_input_data = torch.rand(conv2d_input_shape).float()\n verify_model(Conv2D1().float().eval(), input_data=conv2d_input_data)\n verify_model(Conv2D2().float().eval(), input_data=conv2d_input_data)\n # depth wise conv with channel mult 2\n verify_model(Conv2D3().float().eval(), input_data=conv2d_input_data)\n # group conv\n verify_model(\n torch.nn.Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), groups=2).eval(),\n input_data=torch.randn((1, 8, 16, 16)),\n )\n\n conv1d_input_data = torch.rand(conv1d_input_shape).float()\n verify_model(Conv1D1().float().eval(), input_data=conv1d_input_data)\n verify_model(Conv1D2().float().eval(), input_data=conv1d_input_data)\n verify_model(Conv1D3().float().eval(), input_data=conv1d_input_data)\n\n\[email protected]_gpu\ndef test_forward_conv_transpose():\n torch.set_grad_enabled(False)\n conv2d_input_shape = [1, 3, 10, 10]\n conv2d_input_data = torch.rand(conv2d_input_shape).float()\n verify_model(torch.nn.ConvTranspose2d(3, 6, 7, bias=True), input_data=conv2d_input_data)\n verify_model(torch.nn.ConvTranspose2d(3, 12, 3, bias=False), input_data=conv2d_input_data)\n\n conv1d_input_shape = [1, 3, 10]\n conv1d_input_data = torch.rand(conv1d_input_shape).float()\n verify_model(torch.nn.ConvTranspose1d(3, 6, 7, bias=True), input_data=conv1d_input_data)\n verify_model(torch.nn.ConvTranspose1d(3, 12, 3, bias=False), input_data=conv1d_input_data)\n\n\[email protected]_gpu\ndef test_forward_threshold():\n torch.set_grad_enabled(False)\n input_shape = [1, 3]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.Threshold(0, 0).float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_contiguous():\n torch.set_grad_enabled(False)\n input_shape = [10]\n\n class Contiguous1(Module):\n def forward(self, *args):\n return args[0].contiguous()\n\n input_data = torch.rand(input_shape).float()\n verify_model(Contiguous1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_batchnorm():\n def init_weight(m):\n torch.nn.init.normal_(m.weight, 0, 0.01)\n torch.nn.init.normal_(m.bias)\n\n inp_2d = torch.rand((1, 16, 10, 10))\n inp_3d = torch.rand((1, 16, 10, 10, 10))\n\n for bn, inp in [(torch.nn.BatchNorm2d(16), inp_2d), (torch.nn.BatchNorm3d(16), inp_3d)]:\n init_weight(bn.eval())\n verify_model(bn.eval(), input_data=inp)\n\n\[email protected]_gpu\ndef test_forward_instancenorm():\n inp_2d = torch.rand((1, 16, 10, 10))\n inp_3d = torch.rand((1, 16, 10, 10, 10))\n\n for ins_norm, inp in [\n (torch.nn.InstanceNorm2d(16), inp_2d),\n (torch.nn.InstanceNorm3d(16), inp_3d),\n ]:\n verify_model(ins_norm.eval(), input_data=inp)\n\n\[email protected]_gpu\ndef test_forward_layernorm():\n def init_weight(m):\n torch.nn.init.normal_(m.weight, 0, 0.01)\n torch.nn.init.normal_(m.bias, 0.02)\n\n inp_2d = torch.rand((1, 16, 10, 10))\n inp_3d = torch.rand((1, 16, 10, 10, 10))\n for ln, inp in [(torch.nn.LayerNorm(10), inp_2d), (torch.nn.LayerNorm(10), inp_3d)]:\n init_weight(ln.eval())\n verify_model(ln.eval(), input_data=inp)\n\n\[email protected]_gpu\ndef test_forward_groupnorm():\n input_shape = [10, 6, 5, 5]\n input_data = torch.rand(input_shape).float()\n\n # Separate 6 channels into 3 groups\n verify_model(torch.nn.GroupNorm(3, 6).eval(), input_data=input_data)\n\n # Put all 6 channels into a single group (equivalent with LayerNorm)\n verify_model(torch.nn.GroupNorm(1, 6).eval(), input_data=input_data)\n\n # Separate 6 channels into 6 groups (equivalent with InstanceNorm)\n verify_model(torch.nn.GroupNorm(6, 6).eval(), input_data=input_data)\n\n input_shape = [1, 10, 4, 7]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.GroupNorm(1, 10).eval(), input_data=input_data)\n verify_model(torch.nn.GroupNorm(2, 10).eval(), input_data=input_data)\n verify_model(torch.nn.GroupNorm(5, 10).eval(), input_data=input_data)\n verify_model(torch.nn.GroupNorm(10, 10).eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_reshape():\n torch.set_grad_enabled(False)\n input_shape = [2, 1, 10, 1, 10]\n new_shape = [2, 1, 10, 10]\n\n class Reshape1(Module):\n def forward(self, *args):\n return args[0].reshape(new_shape)\n\n class Reshape2(Module):\n def forward(self, *args):\n return args[0].reshape([-1])\n\n class Reshape3(torch.nn.Module):\n def forward(self, x):\n x_shape = x.shape\n return x.reshape((x_shape[0] * x_shape[1], x_shape[2]))\n\n input_data = torch.rand(input_shape).float()\n verify_model(Reshape1(), input_data=input_data)\n verify_model(Reshape2(), input_data=input_data)\n verify_model(Reshape3(), input_data=torch.randn(2, 3, 4))\n\n\[email protected]_gpu\ndef test_flatten():\n class Flatten(Module):\n def forward(self, x):\n return torch.flatten(x)\n\n class BatchFlatten(Module):\n def forward(self, x):\n return torch.flatten(x, start_dim=1)\n\n inp = torch.rand((5, 2, 2))\n verify_model(Flatten(), input_data=inp)\n verify_model(BatchFlatten(), input_data=inp)\n\n\[email protected]_gpu\ndef test_forward_transpose():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Transpose1(Module):\n def forward(self, *args):\n return args[0].transpose(2, 3)\n\n class Transpose2(Module):\n def forward(self, *args):\n return args[0].transpose(-2, -1)\n\n class Transpose3(Module):\n def forward(self, *args):\n return args[0].permute(0, 2, 3, 1)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Transpose1().float().eval(), input_data=input_data)\n verify_model(Transpose2().float().eval(), input_data=input_data)\n verify_model(Transpose3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_size():\n torch.set_grad_enabled(False)\n input_shape = [1, 3]\n\n class Size1(Module):\n def forward(self, *args):\n return float(args[0].size(0)) * args[0]\n\n input_data = torch.rand(input_shape).float()\n verify_model(Size1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_type_as():\n torch.set_grad_enabled(False)\n input_shape = [1, 3]\n\n def _create_module(dtype):\n class TypeAs(Module):\n def forward(self, *args):\n expected_type_tensor = torch.zeros(1, 3, dtype=dtype)\n return args[0].type_as(expected_type_tensor)\n\n return TypeAs()\n\n input_data = torch.randn(input_shape).float()\n verify_model(_create_module(torch.float64), input_data=input_data)\n verify_model(_create_module(torch.float32), input_data=input_data)\n verify_model(_create_module(torch.int64), input_data=input_data)\n verify_model(_create_module(torch.int32), input_data=input_data)\n verify_model(_create_module(torch.int16), input_data=input_data)\n verify_model(_create_module(torch.int8), input_data=input_data)\n\n if torch.cuda.is_available():\n check_fp16 = False\n try:\n # Only check half precision on supported hardwares.\n if have_fp16(tvm.gpu(0).compute_version):\n check_fp16 = True\n except Exception as e:\n # If GPU is not enabled in TVM, skip the fp16 test.\n pass\n\n # Temporary disable fp16 test\n check_fp16 = False\n\n if check_fp16:\n verify_model(_create_module(torch.float16), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_view():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class View1(Module):\n def forward(self, *args):\n return args[0].view((1, 3 * 10 * 10))\n\n class View2(Module):\n def forward(self, *args):\n return args[0].view(args[0].shape[0], -1)\n\n class View3(Module):\n def forward(self, *args):\n d1 = torch.tensor(3) * torch.tensor(10) * torch.tensor(10)\n return args[0].view(args[0].shape[0], d1)\n\n input_data = torch.rand(input_shape).float()\n verify_model(View1().float().eval(), input_data=input_data)\n verify_model(View2().float().eval(), input_data=input_data)\n verify_model(View3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_select():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Select1(Module):\n def forward(self, *args):\n return args[0].select(1, 1)\n\n class IndexedSelect(Module):\n def __init__(self, inp, dim):\n super().__init__()\n self.inp = inp\n self.dim = dim\n if torch.cuda.is_available():\n self.inp = self.inp.cuda()\n\n def forward(self, index):\n return torch.index_select(self.inp, self.dim, index)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Select1().float().eval(), input_data=input_data)\n\n x = torch.randn(3, 4)\n indices = torch.tensor([0, 2])\n verify_model(IndexedSelect(x, 0).eval(), input_data=indices)\n verify_model(IndexedSelect(x, 1).eval(), input_data=indices)\n\n\[email protected]_gpu\ndef test_forward_clone():\n torch.set_grad_enabled(False)\n input_shape = [10]\n\n class Clone1(Module):\n def forward(self, *args):\n return args[0].clone()\n\n input_data = torch.rand(input_shape).float()\n verify_model(Clone1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_gather():\n torch.set_grad_enabled(False)\n\n class Gather1(Module):\n def forward(self, *args):\n return torch.gather(args[0], 0, args[1])\n\n class Gather2(Module):\n def forward(self, *args):\n return torch.gather(args[0], 1, args[1])\n\n class Gather3(Module):\n def forward(self, *args):\n return torch.gather(args[0], 2, args[1])\n\n input_data = torch.rand((4,)).float()\n index = torch.tensor([1])\n verify_model(Gather1().float().eval(), input_data=[input_data, index])\n\n input_data = torch.rand((2, 2)).float()\n index = torch.tensor([[1, 0], [0, 1]])\n verify_model(Gather1().float().eval(), input_data=[input_data, index])\n\n input_data = torch.tensor([[1, 2], [3, 4]])\n index = torch.tensor([[0, 0], [1, 0]])\n verify_model(Gather2().float().eval(), input_data=[input_data, index])\n\n input_data = torch.rand((2, 2)).float()\n index = torch.tensor([[1, 0], [0, 1]])\n verify_model(Gather2().float().eval(), input_data=[input_data, index])\n\n input_data = torch.rand((3, 3, 3)).float()\n index = torch.tensor(\n [\n [[1, 0, 0], [1, 0, 1], [0, 1, 1]],\n [[1, 1, 1], [1, 2, 1], [1, 0, 1]],\n [[1, 2, 1], [1, 2, 1], [1, 2, 1]],\n ]\n )\n verify_model(Gather3().float().eval(), input_data=[input_data, index])\n\n\[email protected]_gpu\ndef test_forward_logsoftmax():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class LogSoftmax1(Module):\n def forward(self, *args):\n return torch.nn.LogSoftmax(dim=1)(args[0][0, 0])\n\n input_data = torch.rand(input_shape).float()\n verify_model(LogSoftmax1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_norm():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Norm1(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(\"inf\"), dim=None, keepdim=False)\n\n class Norm2(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(\"-inf\"), dim=None, keepdim=False)\n\n class Norm3(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(\"-inf\"), dim=None, keepdim=True)\n\n class Norm4(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(\"inf\"), dim=(1, 2), keepdim=False)\n\n class Norm5(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(\"inf\"), dim=(1), keepdim=True)\n\n class Norm6(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(0.5), dim=(1), keepdim=True)\n\n class Norm7(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(1), dim=None, keepdim=False)\n\n class Norm8(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(2.0), dim=(1), keepdim=True)\n\n class Norm9(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(-0.5), dim=(1, 2), keepdim=True)\n\n class Norm10(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=float(-2), dim=(1), keepdim=False)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Norm1().float().eval(), input_data=input_data)\n verify_model(Norm2().float().eval(), input_data=input_data)\n verify_model(Norm3().float().eval(), input_data=input_data)\n verify_model(Norm4().float().eval(), input_data=input_data)\n verify_model(Norm5().float().eval(), input_data=input_data)\n verify_model(Norm6().float().eval(), input_data=input_data)\n verify_model(Norm7().float().eval(), input_data=input_data)\n verify_model(Norm8().float().eval(), input_data=input_data)\n verify_model(Norm9().float().eval(), input_data=input_data)\n verify_model(Norm10().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_frobenius_norm():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class FroNorm1(Module):\n def forward(self, *args):\n return torch.norm(args[0])\n\n class FroNorm2(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=\"fro\", dim=None, keepdim=True)\n\n class FroNorm3(Module):\n def forward(self, *args):\n return torch.norm(args[0], p=\"fro\", dim=(1), keepdim=True)\n\n class FroNorm4(Module):\n def forward(self, *args):\n return torch.norm(args[0], dim=None, keepdim=False)\n\n input_data = torch.rand(input_shape).float()\n verify_model(FroNorm1().float().eval(), input_data=input_data)\n verify_model(FroNorm2().float().eval(), input_data=input_data)\n verify_model(FroNorm3().float().eval(), input_data=input_data)\n verify_model(FroNorm4().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_sigmoid():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.Sigmoid().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_dense():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Dense1(Module):\n def __init__(self):\n super(Dense1, self).__init__()\n self.linear = torch.nn.Linear(10, 7, bias=True)\n\n def forward(self, *args):\n return self.linear(args[0][0, 0])\n\n class Dense2(Module):\n def __init__(self):\n super(Dense2, self).__init__()\n self.linear = torch.nn.Linear(10, 7, bias=False)\n\n def forward(self, *args):\n return self.linear(args[0][0, 0])\n\n input_data = torch.rand(input_shape).float()\n verify_model(Dense1().float().eval(), input_data=input_data)\n verify_model(Dense2().float().eval(), input_data=input_data)\n\n trace = torch.jit.trace(Dense1(), [input_data])\n mod, params = relay.frontend.from_pytorch(\n trace,\n [(\"input\", input_shape)],\n )\n assert not any([op.name == \"multiply\" for op in list_ops(mod[\"main\"])])\n\n\[email protected]_gpu\ndef test_forward_dropout():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(torch.nn.Dropout(p=0.5).eval(), input_data=input_data[0, 0])\n verify_model(torch.nn.Dropout2d(p=0.5).eval(), input_data=input_data[0])\n verify_model(torch.nn.Dropout3d(p=0.5).eval(), input_data=input_data)\n verify_model(torch.nn.AlphaDropout(p=0.5).eval(), input_data=input_data[0, 0])\n\n\[email protected]_gpu\ndef test_forward_slice():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Slice1(Module):\n def forward(self, *args):\n return args[0][:, :, :, :3]\n\n class Slice2(Module):\n def forward(self, *args):\n return args[0][0, :, :-3, :]\n\n class Slice3(Module):\n def forward(self, *args):\n x0 = torch.tensor(2) - torch.tensor(1)\n x1 = torch.tensor(3) + torch.tensor(1)\n return args[0][:, x0:, 1:x1, :]\n\n class SliceWithStride(torch.nn.Module):\n def forward(self, x):\n return x[..., 0::2] + x[..., 1::2]\n\n class SliceWithStride2(torch.nn.Module):\n def forward(self, x):\n return x[0::2, 0::2] + x[1::2, 1::2]\n\n input_data = torch.rand(input_shape).float()\n verify_model(Slice1(), input_data=input_data)\n verify_model(Slice2(), input_data=input_data)\n verify_model(Slice3(), input_data=input_data)\n verify_model(SliceWithStride(), input_data=torch.randn(1, 4))\n verify_model(SliceWithStride2(), input_data=torch.randn(4, 4))\n\n\[email protected]_gpu\ndef test_forward_mean():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Mean1(Module):\n def forward(self, *args):\n return args[0].mean(2)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Mean1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_expand():\n torch.set_grad_enabled(False)\n\n class Expand1(Module):\n def forward(self, *args):\n return args[0].expand((3, -1, -1, -1))\n\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(Expand1().float().eval(), input_data=input_data)\n\n class Expand2(Module):\n def forward(self, *args):\n return args[0].expand((3, 3, 3, 1))\n\n input_shape = [3, 1]\n input_data = torch.rand(input_shape).float()\n verify_model(Expand2().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_pow():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Pow1(Module):\n def forward(self, *args):\n return args[0] ** 2\n\n input_data = torch.rand(input_shape).float()\n verify_model(Pow1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_chunk():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 14, 14]\n\n class Chunk1(Module):\n def forward(self, *args):\n chunks = args[0].chunk(7, 2)\n return torch.cat(chunks, 2)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Chunk1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_upsample():\n class Upsample(Module):\n def __init__(self, size=None, scale=None, mode=\"nearest\", align_corners=None):\n super().__init__()\n self.size = size\n self.scale = scale\n self.mode = mode\n self.align_corners = align_corners\n\n def forward(self, x):\n return torch.nn.functional.interpolate(\n x,\n size=self.size,\n scale_factor=self.scale,\n mode=self.mode,\n align_corners=self.align_corners,\n )\n\n inp = torch.rand((1, 3, 32, 32))\n verify_model(Upsample(size=(64, 64), mode=\"nearest\"), inp)\n verify_model(Upsample(scale=2, mode=\"nearest\"), inp)\n verify_model(Upsample(size=(50, 50), mode=\"nearest\"), inp)\n verify_model(Upsample(size=(64, 64), mode=\"bilinear\", align_corners=True), inp)\n verify_model(Upsample(scale=2, mode=\"bilinear\", align_corners=True), inp)\n verify_model(Upsample(size=(50, 50), mode=\"bilinear\", align_corners=True), inp)\n\n\[email protected]_gpu\ndef test_to():\n \"\"\" test for aten::to(...) \"\"\"\n\n class ToCPU(Module):\n def forward(self, x):\n return x.to(\"cpu\")\n\n class ToFloat(Module):\n def forward(self, x):\n return x.float()\n\n class ToInt(Module):\n def forward(self, x):\n return x.int()\n\n class ToLong(Module):\n def forward(self, x):\n return x.long()\n\n class ToDouble(Module):\n def forward(self, x):\n return x.double()\n\n class ToFloat16(Module):\n def forward(self, x):\n return x.to(torch.float16)\n\n verify_model(ToCPU().eval(), torch.rand((1, 3, 32, 32)))\n verify_model(ToFloat().eval(), torch.zeros((1, 3, 32, 32), dtype=torch.int))\n verify_model(ToFloat().eval(), torch.tensor(2, dtype=torch.int))\n verify_model(ToInt().eval(), torch.zeros((1, 3, 32, 32)))\n verify_model(ToInt().eval(), torch.tensor(0.8))\n verify_model(ToLong().eval(), torch.tensor(0.8))\n verify_model(ToDouble().eval(), torch.tensor(0.8))\n verify_model(ToFloat16().eval(), torch.tensor(2, dtype=torch.float32))\n verify_model(ToFloat16().eval(), torch.zeros((1, 3, 32, 32), dtype=torch.int))\n\n\[email protected]_gpu\ndef test_adaptive_pool3d():\n for ishape in [(1, 32, 16, 16, 16), (1, 32, 9, 15, 15), (1, 32, 13, 7, 7)]:\n inp = torch.rand(ishape)\n verify_model(torch.nn.AdaptiveMaxPool3d((1, 1, 1)).eval(), inp)\n verify_model(torch.nn.AdaptiveMaxPool3d((2, 2, 2)).eval(), inp)\n verify_model(torch.nn.AdaptiveAvgPool3d((1, 1, 1)).eval(), inp)\n verify_model(torch.nn.AdaptiveAvgPool3d((2, 2, 2)).eval(), inp)\n verify_model(torch.nn.AdaptiveAvgPool3d((4, 8, 8)).eval(), inp)\n verify_model(torch.nn.AdaptiveMaxPool3d((7, 8, 9)).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_functional_pad():\n torch.set_grad_enabled(False)\n pad = (0, 0)\n\n class Pad1(Module):\n def forward(self, *args):\n return torch.nn.functional.pad(args[0], pad, \"constant\", 0)\n\n input_data = torch.rand((3, 3, 4, 2))\n pad = (1, 1)\n verify_model(Pad1().float().eval(), input_data=input_data)\n\n pad = (1, 1, 2, 2)\n verify_model(Pad1().float().eval(), input_data=input_data)\n\n pad = (0, 1, 2, 1, 3, 3)\n verify_model(Pad1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_zero_pad2d():\n inp = torch.rand((1, 1, 3, 3))\n verify_model(torch.nn.ZeroPad2d(2).eval(), inp)\n verify_model(torch.nn.ZeroPad2d((1, 1, 2, 0)).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_constant_pad1d():\n inp = torch.rand((1, 2, 4))\n verify_model(torch.nn.ConstantPad2d(2, 3.5).eval(), inp)\n\n inp = torch.rand((1, 2, 3))\n verify_model(torch.nn.ConstantPad2d((3, 1), 3.5).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_constant_pad2d():\n inp = torch.rand((1, 2, 2, 2))\n verify_model(torch.nn.ConstantPad2d(2, 3.5).eval(), inp)\n verify_model(torch.nn.ConstantPad2d((3, 0, 2, 1), 3.5).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_constant_pad3d():\n inp = torch.rand((1, 3, 2, 2, 2))\n verify_model(torch.nn.ConstantPad3d(3, 3.5).eval(), inp)\n verify_model(torch.nn.ConstantPad3d((3, 4, 5, 6, 0, 1), 3.5).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_reflection_pad1d():\n inp = torch.rand((1, 2, 4))\n verify_model(torch.nn.ReflectionPad1d(2).eval(), inp)\n verify_model(torch.nn.ReflectionPad1d((3, 1)).eval(), inp)\n\n inp = torch.rand((2, 4, 5))\n verify_model(torch.nn.ReflectionPad1d((2, 3)).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_reflection_pad2d():\n inp = torch.rand((1, 1, 3, 3))\n verify_model(torch.nn.ReflectionPad2d(2).eval(), inp)\n verify_model(torch.nn.ReflectionPad2d((1, 1, 2, 0)).eval(), inp)\n\n inp = torch.rand((2, 4, 5, 6))\n verify_model(torch.nn.ReflectionPad2d((1, 3, 2, 4)).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_replication_pad1d():\n inp = torch.rand((1, 2, 4))\n verify_model(torch.nn.ReplicationPad1d(2).eval(), inp)\n verify_model(torch.nn.ReplicationPad1d((3, 1)).eval(), inp)\n\n inp = torch.rand((2, 4, 5))\n verify_model(torch.nn.ReplicationPad1d((2, 3)).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_replication_pad2d():\n inp = torch.rand((1, 1, 3, 3))\n verify_model(torch.nn.ReplicationPad2d(2).eval(), inp)\n verify_model(torch.nn.ReplicationPad2d((1, 1, 2, 0)).eval(), inp)\n\n inp = torch.rand((2, 4, 5, 6))\n verify_model(torch.nn.ReplicationPad2d((1, 3, 2, 4)).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_replication_pad3d():\n inp = torch.rand((1, 1, 3, 3, 3))\n verify_model(torch.nn.ReplicationPad3d(3).eval(), inp)\n verify_model(torch.nn.ReplicationPad3d((1, 1, 2, 2, 1, 1)).eval(), inp)\n\n inp = torch.rand((7, 5, 4, 5, 6))\n verify_model(torch.nn.ReplicationPad3d((2, 3, 2, 5, 1, 4)).eval(), inp)\n\n\[email protected]_gpu\ndef test_forward_upsample3d():\n inp = torch.arange(1, 9, dtype=torch.float32).view(1, 1, 2, 2, 2)\n verify_model(torch.nn.Upsample(scale_factor=2, mode=\"nearest\").eval(), inp)\n verify_model(torch.nn.Upsample(scale_factor=2, mode=\"trilinear\").eval(), inp)\n verify_model(\n torch.nn.Upsample(scale_factor=2, mode=\"trilinear\", align_corners=True).eval(), inp\n )\n\n\ndef test_forward_nms():\n \"\"\"dynamic Non-Maximum Suppression\"\"\"\n torch.set_grad_enabled(False)\n\n class NonMaxSupression(Module):\n def __init__(self, iou_thres):\n super().__init__()\n self.iou_threshold = iou_thres\n\n def forward(self, *args):\n return torchvision.ops.nms(args[0], args[1], self.iou_threshold)\n\n # Generate random input data\n def _gen_rand_inputs(num_boxes):\n box_len = 4\n boxes = torch.rand(num_boxes, box_len, dtype=torch.float) * 0.5\n boxes[:, 2] += boxes[:, 0]\n boxes[:, 3] += boxes[:, 1]\n scores = torch.from_numpy(np.random.uniform(-1, 1, size=(num_boxes,)).astype(np.float32))\n return boxes, scores\n\n targets = [\"llvm\", \"cuda\"]\n\n for num_boxes, iou_thres in [(10, 0.3), (100, 0.5), (500, 0.9)]:\n in_boxes, in_scores = _gen_rand_inputs(num_boxes)\n verify_trace_model(NonMaxSupression(iou_thres), [in_boxes, in_scores], targets)\n\n\ndef test_forward_roi_align():\n \"\"\"ROI align\"\"\"\n torch.set_grad_enabled(False)\n\n class ROIAlgin(Module):\n def __init__(self, output_sizes, spatial_scale=1.0, sampling_ratio=-1):\n super().__init__()\n self.spatial_scale = spatial_scale\n self.sampling_ratio = sampling_ratio\n self.output_sizes = output_sizes\n\n def forward(self, *args):\n return torchvision.ops.roi_align(\n args[0],\n args[1],\n self.output_sizes,\n self.spatial_scale,\n self.sampling_ratio,\n )\n\n in_data = torch.Tensor(np.random.uniform(size=(1, 8, 100, 100)))\n in_boxes = torch.Tensor(np.random.uniform(0.0, 100.0, size=(35, 4)))\n in_batch = torch.zeros((35, 1), dtype=torch.float)\n in_boxes = torch.cat([in_batch, in_boxes], dim=1)\n\n verify_model(ROIAlgin(7), [in_data, in_boxes])\n verify_model(ROIAlgin((10, 10), 0.7, 5), [in_data, in_boxes])\n verify_model(ROIAlgin(15, 0.9, 3), [in_data, in_boxes])\n\n\[email protected]_gpu\ndef test_conv3d():\n for ishape in [(1, 32, 16, 16, 16), (1, 32, 9, 15, 15), (1, 32, 13, 7, 7)]:\n inp = torch.rand(ishape)\n verify_model(torch.nn.Conv3d(32, 16, (3, 3, 3), padding=(1, 1, 1)).eval(), inp),\n verify_model(torch.nn.Conv3d(32, 16, (5, 5, 5), padding=(2, 2, 2)).eval(), inp),\n verify_model(torch.nn.Conv3d(32, 16, kernel_size=1).eval(), inp)\n # downsample\n verify_model(torch.nn.Conv3d(32, 16, kernel_size=1, stride=2).eval(), inp)\n\n\[email protected]_gpu\ndef test_conv3d_transpose():\n for ishape in [(1, 8, 10, 5, 10), (1, 8, 5, 8, 8), (1, 8, 13, 7, 7)]:\n inp = torch.rand(ishape)\n verify_model(\n torch.nn.ConvTranspose3d(\n in_channels=8, out_channels=33, kernel_size=3, stride=2\n ).eval(),\n inp,\n ),\n verify_model(\n torch.nn.ConvTranspose3d(\n in_channels=8,\n out_channels=20,\n kernel_size=(3, 5, 2),\n stride=(2, 1, 1),\n padding=(0, 4, 2),\n ).eval(),\n inp,\n ),\n verify_model(\n torch.nn.ConvTranspose3d(in_channels=8, out_channels=20, kernel_size=1).eval(), inp\n )\n verify_model(\n torch.nn.ConvTranspose3d(in_channels=8, out_channels=5, kernel_size=1, stride=2).eval(),\n inp,\n )\n\n\n# Model tests\[email protected]_gpu\ndef test_resnet18():\n torch.set_grad_enabled(False)\n verify_model(\"resnet18\", atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_squeezenet1_0():\n torch.set_grad_enabled(False)\n verify_model(\"squeezenet1_0\", atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_squeezenet1_1():\n torch.set_grad_enabled(False)\n verify_model(\"squeezenet1_1\", atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_densenet121():\n torch.set_grad_enabled(False)\n verify_model(\"densenet121\", atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_inception_v3():\n torch.set_grad_enabled(False)\n verify_model(\"inception_v3\", atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_googlenet():\n torch.set_grad_enabled(False)\n verify_model(\"googlenet\", atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_mnasnet0_5():\n torch.set_grad_enabled(False)\n verify_model(\"mnasnet0_5\", atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_mobilenet_v2():\n torch.set_grad_enabled(False)\n verify_model(\"mobilenet_v2\", atol=1e-4, rtol=1e-4)\n\n\n\"\"\"\n#TODO: Fix VGG and AlexNet issues (probably due to pooling)\[email protected]_gpu\ndef test_alexnet():\n torch.set_grad_enabled(False)\n verify_model(\"alexnet\")\n\[email protected]_gpu\ndef test_vgg11():\n torch.set_grad_enabled(False)\n verify_model(\"vgg11\")\n\[email protected]_gpu\ndef test_vgg11_bn():\n torch.set_grad_enabled(False)\n verify_model(\"vgg11_bn\")\n\"\"\"\n\n\[email protected]_gpu\ndef test_custom_conversion_map():\n def get_roi_align():\n pool_size = 5\n n_channels = 2 * (pool_size ** 2)\n x = torch.rand(2, n_channels, 10, 10)\n rois = torch.tensor(\n [\n [0, 0, 0, 9, 9], # format is (xyxy)\n [0, 0, 5, 4, 9],\n [0, 5, 5, 9, 9],\n [1, 0, 0, 9, 9],\n ],\n dtype=torch.float,\n )\n roi_align = torchvision.ops.RoIAlign(pool_size, spatial_scale=1, sampling_ratio=-1)\n return roi_align.eval(), [x, rois]\n\n def convert_roi_align():\n def _impl(inputs, input_types):\n spatial_scale = inputs[2]\n pooled_size = (inputs[3], inputs[4])\n sampling_ratio = inputs[5]\n return relay.op.vision.roi_align(\n inputs[0], inputs[1], pooled_size, spatial_scale, sampling_ratio\n )\n\n return _impl\n\n custom_map = {\"torchvision::roi_align\": convert_roi_align()}\n model, inputs = get_roi_align()\n\n verify_model(model, inputs, custom_map)\n\n\[email protected]_gpu\ndef test_segmentaton_models():\n class SegmentationModelWrapper(Module):\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n def forward(self, inp):\n out = self.model(inp)\n return out[\"out\"]\n\n fcn = torchvision.models.segmentation.fcn_resnet101(pretrained=True)\n deeplab = torchvision.models.segmentation.deeplabv3_resnet101(pretrained=True)\n\n inp = [torch.rand((1, 3, 300, 300), dtype=torch.float)]\n\n verify_model(SegmentationModelWrapper(fcn.eval()), inp, atol=1e-4, rtol=1e-4)\n verify_model(SegmentationModelWrapper(deeplab.eval()), inp, atol=1e-4, rtol=1e-4)\n\n\[email protected]_gpu\ndef test_3d_models():\n input_shape = (1, 3, 4, 56, 56)\n resnet3d = torchvision.models.video.r3d_18(pretrained=True).eval()\n verify_model(resnet3d, [torch.rand(input_shape)], atol=1e-4, rtol=1e-4)\n\n\ndef _get_default_vm_targets():\n return [tgt for (tgt, _) in tvm.testing.enabled_targets()]\n\n\ndef verify_script_model(pt_model, ishapes, targets, idtype=None):\n script_module = torch.jit.script(pt_model)\n\n verify_model_vm(script_module, ishapes, idtype=idtype, targets=targets)\n\n\ndef verify_trace_model(pt_model, idata, targets):\n traced_model = torch.jit.trace(pt_model, idata)\n ishapes = [data.shape for data in idata]\n verify_model_vm(traced_model, ishapes, idata=idata, targets=targets)\n\n\ndef convert_pt_to_tvm_type(idtype):\n \"\"\" Accepts a pytorch dtype and returns string TVM dtype.\"\"\"\n # TVM does not support PyTorch complex dtypes\n if idtype == torch.float64:\n curr_dtype = \"float64\"\n elif idtype == torch.float32:\n curr_dtype = \"float32\"\n elif idtype == torch.float16:\n curr_dtype = \"float16\"\n elif idtype == torch.bfloat16:\n curr_dtype = \"bfloat16\"\n elif idtype == torch.int64:\n curr_dtype = \"int64\"\n elif idtype == torch.int32:\n curr_dtype = \"int32\"\n elif idtype == torch.int16:\n curr_dtype = \"int16\"\n elif idtype == torch.int8:\n curr_dtype = \"int8\"\n elif idtype == torch.uint8:\n curr_dtype = \"uint8\"\n elif idtype == torch.bool:\n curr_dtype = \"bool\"\n else:\n raise NotImplementedError(\"Unsupported dtype: {}\".format(idtype))\n return curr_dtype\n\n\ndef verify_model_vm(input_model, ishapes, idtype=None, idata=None, targets=[\"llvm\"]):\n if not idtype:\n idtype = torch.float\n\n input_names = [\"i{}\".format(idx) for idx, ish in enumerate(ishapes)]\n tvm_dtype = convert_pt_to_tvm_type(idtype)\n input_dtypes = [tvm_dtype] * len(input_names)\n input_shapes = list(zip(input_names, list(zip(ishapes, input_dtypes))))\n\n if idata:\n input_data = idata\n # If no input_data provided, generate random data of specified dtype\n else:\n if idtype == torch.bool:\n input_data = [\n torch.Tensor.bool(torch.randint(low=0, high=2, size=shape)) for shape in ishapes\n ]\n # Torch dtype can be float, complex, int, or Bool. Complex not supported, so if not float or Bool,\n # dtype must be int!\n elif not idtype.is_floating_point:\n input_data = [\n torch.randint(low=0, high=10, size=shape, dtype=idtype) for shape in ishapes\n ]\n else:\n input_data = [torch.randn(shape, dtype=idtype) for shape in ishapes]\n\n # Compile via VM\n mod, params = relay.frontend.from_pytorch(input_model, input_shapes)\n\n for tgt in targets:\n print(\"Running on target\", tgt)\n ctx = tvm.context(tgt, 0)\n\n executor = relay.create_executor(\"vm\", mod=mod, ctx=ctx, target=tgt)\n evaluator = executor.evaluate()\n\n # Inference\n for name, inp in zip(input_names, input_data):\n params[name] = inp.numpy()\n vm_res = evaluator(**params)\n\n # Baseline result\n with torch.no_grad():\n pt_result = input_model(*input_data)\n\n # Verify the accuracy\n if not isinstance(pt_result, torch.Tensor):\n tvm_res = vm_res.asnumpy().item()\n assert pt_result == tvm_res\n else:\n tvm.testing.assert_allclose(vm_res.asnumpy(), pt_result.numpy(), rtol=1e-5, atol=1e-5)\n\n\[email protected]_gpu\ndef test_control_flow():\n class SimpleIf(torch.nn.Module):\n def __init__(self, N, M):\n super().__init__()\n self.weight = torch.nn.Parameter(torch.rand(N, M))\n\n def forward(self, inp):\n if inp.sum() > 0.0:\n output = self.weight + inp\n else:\n output = self.weight - inp\n return output\n\n class NestedIf(torch.nn.Module):\n def __init__(self, N, M):\n super().__init__()\n self.weight = torch.nn.Parameter(torch.rand(N, M))\n\n def forward(self, inp):\n if inp.sum() > 0.0:\n if inp.mean() > 0.0:\n output = self.weight + inp\n else:\n output = self.weight - inp\n else:\n if inp.mean() >= 0.0:\n output = self.weight * inp\n else:\n output = self.weight / inp\n\n return output\n\n class ScalarLoop(torch.nn.Module):\n def forward(self, inp):\n a = 0\n for i in range(inp.size(0)):\n b = i * i\n b = b + 1\n a += b\n if a != 0:\n a += 1\n else:\n a += 2\n return a\n\n class SimpleLoop(torch.nn.Module):\n def forward(self, inp):\n a = inp\n for i in range(inp.size(0)):\n b = a * 2.0\n c = a + b\n a += c\n return a\n\n class LoopWithIf(torch.nn.Module):\n def forward(self, inp):\n a = inp\n for i in range(inp.size(0)):\n b = a * 2.0\n b = a + b\n if b.sum() > 0.0:\n a += b\n else:\n a -= b\n return a\n\n class NestedLoop(torch.nn.Module):\n def forward(self, inp):\n a = inp\n for i in range(inp.size(0)):\n b = a * float(i)\n for j in range(inp.size(1)):\n a += b * float(j)\n return a\n\n class SimpleScalarWhileLoop(torch.nn.Module):\n def forward(self, inp):\n a = 1\n i = 0\n while i <= inp.size(0):\n a += i\n i += 2\n i = 0\n # also test constant init cond\n while i < 10:\n a += i\n i += 3\n return a\n\n class SimpleWhileLoop(torch.nn.Module):\n def forward(self, inp):\n a = inp\n i = 0\n while i < inp.size(0):\n a += a * float(i) * 2.0\n i += 1\n return a\n\n models = [\n SimpleIf(10, 20),\n NestedIf(10, 20),\n ScalarLoop(),\n SimpleLoop(),\n LoopWithIf(),\n SimpleScalarWhileLoop(),\n SimpleWhileLoop(),\n NestedLoop(),\n ]\n\n for pt_model in models:\n verify_script_model(pt_model.eval(), [(10, 20)], _get_default_vm_targets())\n\n\[email protected]_gpu\ndef test_simple_rnn():\n # The mixed tracing and scripting example from\n # https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html#mixing-scripting-and-tracing\n class DecisionGate(torch.nn.Module):\n def forward(self, x):\n if x.sum() > 0:\n return x\n else:\n return -x\n\n class Cell(torch.nn.Module):\n def __init__(self, dg):\n super(Cell, self).__init__()\n self.dg = dg\n self.linear = torch.nn.Linear(4, 4)\n\n def forward(self, x, h):\n new_h = torch.tanh(self.dg(self.linear(x)) + h)\n return new_h, new_h\n\n class RNNLoop(torch.nn.Module):\n def __init__(self):\n super().__init__()\n x = torch.rand(10, 4, dtype=torch.float)\n h = torch.rand(10, 4, dtype=torch.float)\n self.cell = torch.jit.trace(Cell(DecisionGate()), (x, h))\n\n def forward(self, xs):\n h = torch.zeros(10, 4, dtype=torch.float)\n y = torch.zeros(10, 4, dtype=torch.float)\n for i in range(xs.size(0)):\n y, h = self.cell(xs[i], h)\n return y\n\n verify_script_model(RNNLoop().eval(), [(10, 10, 4)], _get_default_vm_targets())\n\n\[email protected]_gpu\ndef test_forward_reduce_sum():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class ReduceSum1(Module):\n def forward(self, *args):\n return args[0].sum(1)\n\n class ReduceSum2(Module):\n def forward(self, *args):\n return args[0].sum(dim=1, keepdim=False)\n\n class ReduceSum3(Module):\n def forward(self, *args):\n return args[0].sum(dim=2, keepdim=True)\n\n class ReduceSum4(Module):\n def forward(self, *args):\n return args[0].sum(dim=(2, 3), keepdim=True)\n\n class ReduceSum5(Module):\n def forward(self, *args):\n return args[0].sum(dim=(2, 3), keepdim=False)\n\n input_data = torch.rand(input_shape).float()\n verify_model(ReduceSum1().float().eval(), input_data=input_data)\n verify_model(ReduceSum2().float().eval(), input_data=input_data)\n verify_model(ReduceSum3().float().eval(), input_data=input_data)\n verify_model(ReduceSum4().float().eval(), input_data=input_data)\n verify_model(ReduceSum5().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_reduce_prod():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class ReduceProd1(Module):\n def forward(self, *args):\n return args[0].prod(1)\n\n class ReduceProd2(Module):\n def forward(self, *args):\n return args[0].prod(dim=1, keepdim=False)\n\n class ReduceProd3(Module):\n def forward(self, *args):\n return args[0].prod(dim=2, keepdim=True)\n\n input_data = torch.rand(input_shape).float()\n verify_model(ReduceProd1().float().eval(), input_data=input_data)\n verify_model(ReduceProd2().float().eval(), input_data=input_data)\n verify_model(ReduceProd3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_argmin():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class ArgMin1(Module):\n def forward(self, *args):\n return args[0].argmin(1)\n\n class ArgMin2(Module):\n def forward(self, *args):\n return args[0].argmin(dim=1, keepdim=False)\n\n class ArgMin3(Module):\n def forward(self, *args):\n return args[0].argmin(dim=2, keepdim=True)\n\n input_data = torch.rand(input_shape).float()\n verify_model(ArgMin1().float().eval(), input_data=input_data)\n verify_model(ArgMin2().float().eval(), input_data=input_data)\n verify_model(ArgMin3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_argmax():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class ArgMax1(Module):\n def forward(self, *args):\n return args[0].argmax(1)\n\n class ArgMax2(Module):\n def forward(self, *args):\n return args[0].argmax(dim=1, keepdim=False)\n\n class ArgMax3(Module):\n def forward(self, *args):\n return args[0].argmax(dim=2, keepdim=True)\n\n input_data = torch.rand(input_shape).float()\n verify_model(ArgMax1().float().eval(), input_data=input_data)\n verify_model(ArgMax2().float().eval(), input_data=input_data)\n verify_model(ArgMax3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_std():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Std1(Module):\n def forward(self, *args):\n return args[0].std(1, unbiased=False)\n\n class Std2(Module):\n def forward(self, *args):\n return args[0].std(dim=1, keepdim=False, unbiased=False)\n\n class Std3(Module):\n def forward(self, *args):\n return args[0].std(dim=2, keepdim=True, unbiased=False)\n\n class Std4(Module):\n def forward(self, *args):\n return args[0].std(dim=(2, 3), keepdim=True, unbiased=False)\n\n class Std5(Module):\n def forward(self, *args):\n return args[0].std(dim=(2, 3), keepdim=False, unbiased=False)\n\n class Std6(Module):\n def forward(self, *args):\n return args[0].std(unbiased=False)\n\n class Std7(Module):\n def forward(self, *args):\n return args[0].std(dim=1, keepdim=False, unbiased=True)\n\n class Std8(Module):\n def forward(self, *args):\n return args[0].std(dim=(2, 3), keepdim=True, unbiased=True)\n\n class Std9(Module):\n def forward(self, *args):\n return args[0].std(unbiased=True)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Std1().float().eval(), input_data=input_data)\n verify_model(Std2().float().eval(), input_data=input_data)\n verify_model(Std3().float().eval(), input_data=input_data)\n verify_model(Std4().float().eval(), input_data=input_data)\n verify_model(Std5().float().eval(), input_data=input_data)\n verify_model(Std6().float().eval(), input_data=input_data)\n verify_model(Std7().float().eval(), input_data=input_data)\n verify_model(Std8().float().eval(), input_data=input_data)\n verify_model(Std9().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_variance():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Variance1(Module):\n def forward(self, *args):\n return args[0].var(1, unbiased=False)\n\n class Variance2(Module):\n def forward(self, *args):\n return args[0].var(dim=1, keepdim=False, unbiased=False)\n\n class Variance3(Module):\n def forward(self, *args):\n return args[0].var(dim=2, keepdim=True, unbiased=False)\n\n class Variance4(Module):\n def forward(self, *args):\n return args[0].var(dim=(2, 3), keepdim=True, unbiased=False)\n\n class Variance5(Module):\n def forward(self, *args):\n return args[0].var(dim=(2, 3), keepdim=False, unbiased=False)\n\n class Variance6(Module):\n def forward(self, *args):\n return args[0].var(unbiased=False)\n\n class Variance7(Module):\n def forward(self, *args):\n return args[0].var(dim=1, keepdim=False, unbiased=True)\n\n class Variance8(Module):\n def forward(self, *args):\n return args[0].var(dim=(2, 3), keepdim=True, unbiased=True)\n\n class Variance9(Module):\n def forward(self, *args):\n return args[0].var(unbiased=True)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Variance1().float().eval(), input_data=input_data)\n verify_model(Variance2().float().eval(), input_data=input_data)\n verify_model(Variance3().float().eval(), input_data=input_data)\n verify_model(Variance4().float().eval(), input_data=input_data)\n verify_model(Variance5().float().eval(), input_data=input_data)\n verify_model(Variance6().float().eval(), input_data=input_data)\n verify_model(Variance7().float().eval(), input_data=input_data)\n verify_model(Variance8().float().eval(), input_data=input_data)\n verify_model(Variance9().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_rsub():\n torch.set_grad_enabled(False)\n\n class Rsub1(Module):\n def forward(self, *args):\n return torch.rsub(args[0], args[1])\n\n class Rsub2(Module):\n def forward(self, *args):\n return torch.rsub(args[0], args[1], alpha=0.5)\n\n d1 = torch.rand([1, 3]).float()\n d2 = torch.rand([1, 3]).float()\n d3 = torch.rand([1, 3]).int()\n verify_model(Rsub1().float().eval(), input_data=[d1, d2])\n verify_model(Rsub1().float().eval(), input_data=[d1, d3])\n verify_model(Rsub2().float().eval(), input_data=[d1, d2])\n verify_model(Rsub2().float().eval(), input_data=[d1, d3])\n\n\[email protected]_gpu\ndef test_forward_embedding():\n torch.set_grad_enabled(False)\n\n input_data = torch.randint(0, 10, [2, 4]).long()\n verify_model(torch.nn.Embedding(10, 3).float().eval(), input_data=input_data)\n\n input_data = torch.randint(0, 4, [2, 3, 4]).long()\n verify_model(torch.nn.Embedding(4, 5, sparse=False).float().eval(), input_data=input_data)\n\n input_data = torch.randint(0, 4, [2, 3, 4]).long()\n verify_model(torch.nn.Embedding(4, 5, sparse=True).float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_onehot():\n torch.set_grad_enabled(False)\n\n class OneHot1(Module):\n def forward(self, *args):\n return torch.nn.functional.one_hot(args[0], num_classes=3)\n\n class OneHot2(Module):\n def forward(self, *args):\n return torch.nn.functional.one_hot(args[0], num_classes=5)\n\n input_data = torch.arange(0, 5) % 3\n verify_model(OneHot1().float().eval(), input_data=input_data)\n\n input_data = torch.arange(0, 5) % 4\n verify_model(OneHot2().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_isfinite():\n torch.set_grad_enabled(False)\n\n class IsFinite1(Module):\n def forward(self, *args):\n return torch.isfinite(args[0])\n\n input_data = torch.tensor([1, float(\"inf\"), 2, float(\"-inf\"), float(\"nan\")]).float()\n verify_model(IsFinite1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_isnan():\n torch.set_grad_enabled(False)\n\n class IsNan1(Module):\n def forward(self, *args):\n return torch.isnan(args[0])\n\n input_data = torch.tensor([1, float(\"inf\"), 2, float(\"-inf\"), float(\"nan\")]).float()\n verify_model(IsNan1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_isinf():\n torch.set_grad_enabled(False)\n\n class IsInf1(Module):\n def forward(self, *args):\n return torch.isinf(args[0])\n\n input_data = torch.tensor([1, float(\"inf\"), 2, float(\"-inf\"), float(\"nan\")]).float()\n verify_model(IsInf1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_clamp():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class Clamp1(Module):\n def forward(self, *args):\n return torch.clamp(args[0], min=-0.5, max=0.5)\n\n class Clamp2(Module):\n def forward(self, *args):\n return torch.clamp(args[0], min=-0.3)\n\n class Clamp3(Module):\n def forward(self, *args):\n return torch.clamp(args[0], max=1.0)\n\n input_data = torch.rand(input_shape).float()\n verify_model(Clamp1().float().eval(), input_data=input_data)\n verify_model(Clamp2().float().eval(), input_data=input_data)\n verify_model(Clamp3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_clamp_():\n torch.set_grad_enabled(False)\n\n class ClampInPlace(Module):\n def __init__(self, min, max):\n super(ClampInPlace, self).__init__()\n self.min = min\n self.max = max\n\n def forward(self, *args):\n return torch.clamp_(args[0], self.min, self.max)\n\n for ishape, min, max in (([4, 8], 0.1, 0.9), ([7, 6], 0.2, 0.5)):\n input_data = torch.rand(ishape).float()\n verify_model(ClampInPlace(min, max).float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_ones():\n torch.set_grad_enabled(False)\n\n class Ones1(Module):\n def forward(self, *args):\n return torch.ones(2, 3)\n\n verify_model(Ones1().float().eval(), input_data=[])\n\n\[email protected]_gpu\ndef test_forward_ones_like():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class OnesLike1(Module):\n def forward(self, *args):\n return torch.ones_like(args[0])\n\n class OnesLike2(Module):\n def forward(self, *args):\n return torch.ones_like(args[0], dtype=torch.int8)\n\n class OnesLike3(Module):\n def forward(self, *args):\n return torch.ones_like(args[0], dtype=torch.float)\n\n input_data = torch.rand(input_shape).float()\n verify_model(OnesLike1().float().eval(), input_data=input_data)\n verify_model(OnesLike2().float().eval(), input_data=input_data)\n verify_model(OnesLike3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_zeros():\n torch.set_grad_enabled(False)\n\n class Zeros1(Module):\n def forward(self, *args):\n return torch.zeros(2, 3)\n\n verify_model(Zeros1().float().eval(), input_data=[])\n\n\[email protected]_gpu\ndef test_forward_zeros_like():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class ZerosLike1(Module):\n def forward(self, *args):\n return torch.zeros_like(args[0])\n\n class ZerosLike2(Module):\n def forward(self, *args):\n return torch.zeros_like(args[0], dtype=torch.int32)\n\n class ZerosLike3(Module):\n def forward(self, *args):\n return torch.zeros_like(args[0], dtype=torch.float)\n\n input_data = torch.rand(input_shape).float()\n verify_model(ZerosLike1().float().eval(), input_data=input_data)\n verify_model(ZerosLike2().float().eval(), input_data=input_data)\n verify_model(ZerosLike3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_full():\n torch.set_grad_enabled(False)\n\n class Full1(Module):\n def forward(self, *args):\n return torch.full((2, 3), 3.14)\n\n class Full2(Module):\n def forward(self, *args):\n return torch.full((1, 2, 3), 1.0, dtype=torch.int32)\n\n verify_model(Full1().float().eval(), input_data=[])\n verify_model(Full2().float().eval(), input_data=[])\n\n\[email protected]_gpu\ndef test_forward_full_like():\n torch.set_grad_enabled(False)\n input_shape = [1, 3, 10, 10]\n\n class FullLike1(Module):\n def forward(self, *args):\n return torch.full_like(args[0], 3.14)\n\n class FullLike2(Module):\n def forward(self, *args):\n return torch.full_like(args[0], 22.22, dtype=torch.int32)\n\n class FullLike3(Module):\n def forward(self, *args):\n return torch.full_like(args[0], 1.4, dtype=torch.float)\n\n input_data = torch.rand(input_shape).float()\n verify_model(FullLike1().float().eval(), input_data=input_data)\n verify_model(FullLike2().float().eval(), input_data=input_data)\n verify_model(FullLike3().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_linspace():\n torch.set_grad_enabled(False)\n\n class Linspace1(Module):\n def forward(self, *args):\n return torch.linspace(5, 10, steps=100)\n\n class Linspace2(Module):\n def forward(self, *args):\n return torch.linspace(-10, 10, steps=5)\n\n class Linspace3(Module):\n def forward(self, *args):\n return torch.linspace(start=-10, end=10, steps=5)\n\n class Linspace4(Module):\n def forward(self, *args):\n return torch.linspace(start=-10, end=10, steps=1)\n\n class Linspace5(Module):\n def forward(self, *args):\n return torch.linspace(1, 2, 1, dtype=torch.int32)\n\n class Linspace6(Module):\n def forward(self, *args):\n return torch.linspace(start=1, end=6, steps=2)\n\n class Linspace7(Module):\n def forward(self, *args):\n return torch.linspace(1, 4, steps=100, dtype=torch.float32)\n\n class Linspace8(Module):\n def forward(self, *args):\n return torch.linspace(1, 2, 1, dtype=torch.int16)\n\n verify_model(Linspace1().float().eval())\n verify_model(Linspace2().float().eval())\n verify_model(Linspace3().float().eval())\n verify_model(Linspace4().float().eval())\n verify_model(Linspace5().float().eval())\n verify_model(Linspace6().float().eval())\n verify_model(Linspace7().float().eval())\n verify_model(Linspace8().float().eval())\n\n\[email protected]_gpu\ndef test_forward_take():\n torch.set_grad_enabled(False)\n\n class Take1(Module):\n def forward(self, *args):\n indices = torch.tensor([[0, 0], [1, 0]])\n if torch.cuda.is_available():\n indices = indices.cuda()\n return torch.take(args[0], indices)\n\n class Take2(Module):\n def forward(self, *args):\n return torch.take(args[0], args[1])\n\n input_data = torch.tensor([[1, 2], [3, 4]])\n verify_model(Take1().float().eval(), input_data=input_data)\n indices = torch.tensor([[0, 0], [1, 0]])\n verify_model(Take2().float().eval(), input_data=[input_data, indices])\n\n\[email protected]_gpu\ndef test_forward_topk():\n torch.set_grad_enabled(False)\n\n class Topk1(Module):\n def forward(self, *args):\n return torch.topk(args[0], k=3)\n\n class Topk2(Module):\n def forward(self, *args):\n return torch.topk(args[0], k=3, dim=-2)\n\n class Topk3(Module):\n def forward(self, *args):\n return torch.topk(args[0], k=3, dim=3)\n\n class Topk4(Module):\n def forward(self, *args):\n return torch.topk(args[0], k=3, largest=True)\n\n class Topk5(Module):\n def forward(self, *args):\n return torch.topk(args[0], k=3, largest=False)\n\n class Topk6(Module):\n def forward(self, *args):\n return torch.topk(args[0], k=3, sorted=True)\n\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(Topk1().float().eval(), input_data=input_data)\n verify_model(Topk2().float().eval(), input_data=input_data)\n verify_model(Topk3().float().eval(), input_data=input_data)\n verify_model(Topk4().float().eval(), input_data=input_data)\n verify_model(Topk5().float().eval(), input_data=input_data)\n verify_model(Topk6().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_logical_not():\n torch.set_grad_enabled(False)\n\n class LogicalNot1(Module):\n def forward(self, *args):\n return torch.logical_not(args[0])\n\n input_data = torch.tensor([True, False])\n verify_model(LogicalNot1().float().eval(), input_data=input_data)\n\n input_data = torch.tensor([0, 1, -10], dtype=torch.int8)\n verify_model(LogicalNot1().float().eval(), input_data=input_data)\n\n input_data = torch.tensor([0.0, 1.5, -10.0], dtype=torch.double)\n verify_model(LogicalNot1().float().eval(), input_data=input_data)\n\n input_data = torch.tensor([0.0, 1.0, -10.0], dtype=torch.int32)\n verify_model(LogicalNot1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_bitwise_not():\n torch.set_grad_enabled(False)\n\n class BitwiseNot1(Module):\n def forward(self, *args):\n return torch.bitwise_not(args[0])\n\n input_data = torch.tensor([0, 1, -10], dtype=torch.int8)\n verify_model(BitwiseNot1().float().eval(), input_data=input_data)\n\n input_data = torch.tensor([0.0, 1.0, -10.0], dtype=torch.int32)\n verify_model(BitwiseNot1().float().eval(), input_data=input_data)\n\n input_data = torch.tensor([True, False])\n verify_model(BitwiseNot1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_bitwise_xor():\n torch.set_grad_enabled(False)\n\n class BitwiseXor1(Module):\n def forward(self, *args):\n return torch.bitwise_xor(args[0], args[1])\n\n class BitwiseXor2(Module):\n def forward(self, *args):\n rhs = torch.tensor([1, 0, 3], dtype=torch.int8)\n if torch.cuda.is_available():\n rhs = rhs.cuda()\n return torch.bitwise_xor(args[0], rhs)\n\n lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)\n rhs = torch.tensor([1, 0, 3], dtype=torch.int8)\n verify_model(BitwiseXor1().float().eval(), input_data=[lhs, rhs])\n\n lhs = torch.tensor([True, True, False])\n rhs = torch.tensor([False, True, False])\n verify_model(BitwiseXor1().float().eval(), input_data=[lhs, rhs])\n\n lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)\n verify_model(BitwiseXor2().float().eval(), input_data=[lhs])\n\n\[email protected]_gpu\ndef test_forward_logical_xor():\n torch.set_grad_enabled(False)\n\n class LogicalXor1(Module):\n def forward(self, *args):\n return torch.logical_xor(args[0], args[1])\n\n class LogicalXor2(Module):\n def forward(self, *args):\n rhs = torch.tensor([1, 0, 3], dtype=torch.int8)\n if torch.cuda.is_available():\n rhs = rhs.cuda()\n return torch.logical_xor(args[0], rhs)\n\n lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)\n rhs = torch.tensor([1, 0, 3], dtype=torch.int8)\n verify_model(LogicalXor1().float().eval(), input_data=[lhs, rhs])\n\n lhs = torch.tensor([True, True, False])\n rhs = torch.tensor([False, True, False])\n verify_model(LogicalXor1().float().eval(), input_data=[lhs, rhs])\n\n lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)\n verify_model(LogicalXor2().float().eval(), input_data=[lhs])\n\n\[email protected]_gpu\ndef test_forward_unary():\n torch.set_grad_enabled(False)\n\n class Sqrt1(Module):\n def forward(self, *args):\n return torch.sqrt(args[0])\n\n class RSqrt1(Module):\n def forward(self, *args):\n return torch.rsqrt(args[0])\n\n class Ceil1(Module):\n def forward(self, *args):\n return torch.ceil(args[0])\n\n class Floor1(Module):\n def forward(self, *args):\n return torch.floor(args[0])\n\n class Round1(Module):\n def forward(self, *args):\n return torch.round(args[0])\n\n class Cos1(Module):\n def forward(self, *args):\n return torch.cos(args[0])\n\n class Sin1(Module):\n def forward(self, *args):\n return torch.sin(args[0])\n\n class Tan1(Module):\n def forward(self, *args):\n return torch.tan(args[0])\n\n class Tanh1(Module):\n def forward(self, *args):\n return torch.tanh(args[0])\n\n class Acos1(Module):\n def forward(self, *args):\n return torch.acos(args[0])\n\n class Asin1(Module):\n def forward(self, *args):\n return torch.asin(args[0])\n\n class Atan1(Module):\n def forward(self, *args):\n return torch.atan(args[0])\n\n class Log1(Module):\n def forward(self, *args):\n return torch.log(args[0])\n\n class Exp1(Module):\n def forward(self, *args):\n return torch.exp(args[0])\n\n class Erf1(Module):\n def forward(self, *args):\n return torch.erf(args[0])\n\n class Trunc1(Module):\n def forward(self, *args):\n return torch.trunc(args[0])\n\n class Sign1(Module):\n def forward(self, *args):\n return torch.sign(args[0])\n\n class Neg1(Module):\n def forward(self, *args):\n return torch.neg(args[0])\n\n class Sinh1(Module):\n def forward(self, *args):\n return torch.sinh(args[0])\n\n class Cosh1(Module):\n def forward(self, *args):\n return torch.cosh(args[0])\n\n class Log2_1(Module):\n def forward(self, *args):\n return torch.log2(args[0])\n\n class Log10_1(Module):\n def forward(self, *args):\n return torch.log10(args[0])\n\n class Log1p_1(Module):\n def forward(self, *args):\n return torch.log1p(args[0])\n\n input_shape = [1, 3, 10, 10]\n input_data = torch.rand(input_shape).float()\n verify_model(Sqrt1().float().eval(), input_data=input_data)\n verify_model(RSqrt1().float().eval(), input_data=input_data)\n verify_model(Ceil1().float().eval(), input_data=input_data)\n verify_model(Floor1().float().eval(), input_data=input_data)\n verify_model(Round1().float().eval(), input_data=input_data)\n verify_model(Cos1().float().eval(), input_data=input_data)\n verify_model(Cosh1().float().eval(), input_data=input_data)\n verify_model(Sin1().float().eval(), input_data=input_data)\n verify_model(Sinh1().float().eval(), input_data=input_data)\n verify_model(Tan1().float().eval(), input_data=input_data)\n verify_model(Tanh1().float().eval(), input_data=input_data)\n verify_model(Acos1().float().eval(), input_data=input_data)\n verify_model(Asin1().float().eval(), input_data=input_data)\n verify_model(Atan1().float().eval(), input_data=input_data)\n verify_model(Log1().float().eval(), input_data=input_data)\n verify_model(Log2_1().float().eval(), input_data=input_data)\n verify_model(Log10_1().float().eval(), input_data=input_data)\n verify_model(Log1p_1().float().eval(), input_data=input_data)\n verify_model(Exp1().float().eval(), input_data=input_data)\n verify_model(Erf1().float().eval(), input_data=input_data)\n verify_model(Trunc1().float().eval(), input_data=input_data)\n verify_model(Sign1().float().eval(), input_data=input_data)\n verify_model(Neg1().float().eval(), input_data=input_data)\n\n\[email protected]_gpu\ndef test_forward_where():\n torch.set_grad_enabled(False)\n\n class Where1(Module):\n def forward(self, *args):\n y = torch.ones([3, 2])\n if torch.cuda.is_available():\n y = y.cuda()\n return torch.where(args[0] > 0, args[0], y)\n\n class Where2(Module):\n def forward(self, *args):\n return torch.where(args[0] > 0, args[0], args[1])\n\n class Where3(Module):\n def forward(self, *args):\n return torch.where(args[0])[0]\n\n x = torch.rand([3, 2]).float()\n verify_model(Where1(), input_data=[x])\n y = torch.rand([3, 2])\n verify_model(Where2(), input_data=[x, y])\n\n # a single argument variant, equivalent to torch.nonzero(..., as_tuple=True)\n inp = torch.rand([10])\n inp[3:8] = 0\n verify_trace_model(Where3(), [inp], [\"llvm\"])\n\n\[email protected]_gpu\ndef test_forward_addcdiv():\n torch.set_grad_enabled(False)\n\n class Addcdiv1(Module):\n def forward(self, *args):\n t1 = torch.ones([3, 1])\n t2 = torch.ones([1, 3])\n if torch.cuda.is_available():\n t1 = t1.cuda()\n t2 = t2.cuda()\n return torch.addcdiv(args[0], 0.1, t1, t2)\n\n class Addcdiv2(Module):\n def forward(self, *args):\n return torch.addcdiv(args[0], 0.5, args[1], args[2])\n\n input_data = torch.rand([1, 3]).float()\n verify_model(Addcdiv1().float().eval(), input_data=input_data)\n t1 = torch.rand([3, 1]).float()\n t2 = torch.rand([1, 3]).float()\n verify_model(Addcdiv2().float().eval(), input_data=[input_data, t1, t2])\n\n\[email protected]_gpu\ndef test_forward_addcmul():\n torch.set_grad_enabled(False)\n\n class Addcmul1(Module):\n def forward(self, *args):\n t1 = torch.ones([3, 1])\n t2 = torch.ones([1, 3])\n if torch.cuda.is_available():\n t1 = t1.cuda()\n t2 = t2.cuda()\n return torch.addcmul(args[0], 0.1, t1, t2)\n\n class Addcmul2(Module):\n def forward(self, *args):\n return torch.addcmul(args[0], 0.5, args[1], args[2])\n\n input_data = torch.rand([1, 3]).float()\n verify_model(Addcmul1().float().eval(), input_data=input_data)\n t1 = torch.rand([3, 1]).float()\n t2 = torch.rand([1, 3]).float()\n verify_model(Addcmul2().float().eval(), input_data=[input_data, t1, t2])\n\n\[email protected]_gpu\ndef test_forward_true_divide():\n if package_version.parse(torch.__version__) < package_version.parse(\"1.5.0\"):\n return\n torch.set_grad_enabled(False)\n\n class TrueDivide(Module):\n def forward(self, *args):\n return torch.true_divide(args[0], args[1])\n\n dividend = torch.rand([5, 3]).float()\n # divisor could be either tensor or scalar\n divisor_tensor = torch.rand([5, 3]).float() + 0.5\n divisor_scalar = torch.tensor(1.0, dtype=torch.float32)\n verify_model(\n TrueDivide().float().eval(), input_data=[dividend, divisor_tensor], atol=1e-4, rtol=1e-4\n )\n verify_model(\n TrueDivide().float().eval(), input_data=[dividend, divisor_scalar], atol=1e-4, rtol=1e-4\n )\n\n\[email protected]_gpu\ndef test_forward_is_floating_point():\n torch.set_grad_enabled(False)\n\n class IsFloatingPoint(Module):\n def forward(self, arg):\n # `torch.jit.trace` cannot accept something that outputs\n # a Bool, so `torch.jit.script` will be used instead\n return torch.is_floating_point(arg)\n\n targets = _get_default_vm_targets()\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.float64)\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.float32)\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.float16)\n # todo(dvisnty): Run the test for bfloat16 when full bfloat16 support is implemented\n # verify_script_model(IsFloatingPoint(), [(1,1)], targets, idtype=torch.bfloat16)\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int64)\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int32)\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int16)\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int8)\n verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.uint8)\n\n\[email protected]_gpu\ndef test_forward_traced_function():\n def fn(t1, t2):\n return t1 + t2\n\n tensor1 = torch.randn(3, 4)\n tensor2 = torch.randn(3, 4)\n verify_model(fn, input_data=[tensor1, tensor2])\n\n\[email protected]_gpu\ndef test_forward_dtypes():\n def fn(t1, t2):\n return 2.5 * t1 + t2\n\n for dt in [torch.int32, torch.int64, torch.double]:\n tensor1 = torch.randn(3, 4).to(dtype=dt)\n tensor2 = torch.randn(3, 4).to(dtype=dt)\n verify_model(fn, input_data=[tensor1, tensor2])\n\n class ModuleWithIntParameters(Module):\n def __init__(self, arr):\n super().__init__()\n self.param = torch.nn.Parameter(torch.LongTensor(arr), requires_grad=False)\n\n def forward(self, x):\n return x.long() + self.param\n\n shape = (10, 10)\n param = torch.ones(shape, dtype=torch.long)\n inp = torch.ones(shape, dtype=torch.int)\n verify_model(ModuleWithIntParameters(param), input_data=inp)\n\n\[email protected]_gpu\ndef test_weight_names():\n tm = torch.jit.trace(torch.nn.Linear(3, 4), [torch.randn(2, 3)])\n mod, params = relay.frontend.from_pytorch(tm, [(\"input\", (2, 3))])\n assert set(params.keys()) == set(n for n, p in tm.named_parameters())\n\n\[email protected]_gpu\ndef test_duplicate_weight_use():\n # The test cases doesn't make any sense as a neural network,\n # the issue popped up in shared input/output embeddings of bert,\n # but this is quicker\n class Test(Module):\n def __init__(self):\n super().__init__()\n self.lin = torch.nn.Linear(5, 3)\n\n def forward(self, x):\n x = self.lin(x)\n x = x @ self.lin.weight\n return x\n\n verify_model(Test(), input_data=[torch.randn(5, 5)])\n\n\[email protected]_gpu\ndef test_forward_matmul():\n torch.set_grad_enabled(False)\n\n class MatMul1(Module):\n def forward(self, *args):\n return torch.matmul(args[0], args[1])\n\n # matrix x vector\n tensor1 = torch.randn(3, 4)\n tensor2 = torch.randn(4)\n verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])\n\n # matrix x matrix\n tensor1 = torch.randn(10, 4)\n tensor2 = torch.randn(4, 10)\n verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])\n\n # batched matrix x batched matrix\n tensor1 = torch.randn(10, 3, 4)\n tensor2 = torch.randn(10, 4, 5)\n verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])\n\n # batched matrix x broadcasted matrix\n tensor1 = torch.randn(10, 3, 4)\n tensor2 = torch.randn(4, 5)\n verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])\n\n # batched matrix x batched matrix\n tensor1 = torch.randn(1, 12, 14, 64)\n tensor2 = torch.randn(1, 12, 64, 14)\n verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])\n\n\ndef test_forward_index():\n torch.set_grad_enabled(False)\n input_shape = [3, 4, 5, 6]\n\n class Index0(Module):\n def forward(self, x):\n return x[[0, 1], [0, 2], :2, 4]\n\n input_data = torch.rand(input_shape).float()\n verify_model(Index0().eval(), input_data=input_data)\n\n class Index1(Module):\n def forward(self, x):\n return x[[0], [1, 2, 3, 0], [3, 1, 2, 2], [4, 2, 1, 0]]\n\n input_data = torch.rand(input_shape).float()\n verify_model(Index1().eval(), input_data=input_data)\n\n\ndef test_logsumexp():\n class Logsumexp(Module):\n def __init__(self, dim, keepdim=False):\n super().__init__()\n self.dim = dim\n self.keepdim = keepdim\n\n def forward(self, x):\n return torch.logsumexp(x, self.dim, self.keepdim)\n\n input_shape = (100, 100)\n input_data = torch.rand(input_shape)\n\n verify_model(Logsumexp(0), input_data=input_data)\n verify_model(Logsumexp(0, keepdim=True), input_data=input_data)\n # Also test on double\n verify_model(Logsumexp(1, keepdim=True), input_data=input_data.double())\n\n\ndef test_stack():\n class Stack(torch.nn.Module):\n def __init__(self, axis=0):\n super().__init__()\n self.axis = axis\n\n def forward(self, x):\n return torch.stack((x, x), dim=self.axis)\n\n inp = torch.randn(8, 8, 8)\n verify_model(Stack(), input_data=inp)\n verify_model(Stack(axis=-1), input_data=inp)\n verify_model(Stack(axis=3), input_data=inp)\n verify_model(Stack(axis=-4), input_data=inp)\n\n\ndef test_stack_dynamic():\n class Stack(torch.nn.Module):\n def forward(self, x):\n tensor_list = []\n for i in range(x.size(0)):\n # this is a workaround to avoid generating impure aten::append op\n tensor_list += [x[i]]\n # relay tensor array only supports stacking on the first axis\n return torch.stack(tensor_list, dim=0)\n\n verify_script_model(Stack(), [(8, 8, 8)], _get_default_vm_targets())\n\n\ndef test_forward_unbind():\n class Unbind(torch.nn.Module):\n def __init__(self, axis=0):\n super().__init__()\n self.axis = axis\n\n def forward(self, x):\n return torch.unbind(x, self.axis)\n\n inp = torch.randn(8, 8, 8)\n verify_model(Unbind(0), input_data=inp)\n verify_model(Unbind(1), input_data=inp)\n verify_model(Unbind(2), input_data=inp)\n\n\ndef test_forward_nonzero():\n class Nonzero(Module):\n def __init__(self, as_tuple=False):\n super().__init__()\n self.as_tuple = as_tuple\n\n def forward(self, data):\n return torch.nonzero(data, as_tuple=self.as_tuple)\n\n inp = torch.Tensor(np.array([[0, 1, 0], [2, 0, 9], [-1, -1, 0]]).astype(\"float32\"))\n verify_trace_model(Nonzero(), [inp], [\"llvm\"])\n\n\ndef test_forward_scatter():\n # integer cannot be traced\n def test_fn_scatter(dim):\n return lambda data, index, src: torch.scatter(data, dim=dim, index=index, src=src)\n\n def test_fn_scatter_add(dim):\n return lambda data, index, src: torch.scatter_add(data, dim=dim, index=index, src=src)\n\n in_data = torch.zeros(3, 5)\n in_index = torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]])\n in_src = torch.rand(2, 5)\n\n targets = [\"llvm\", \"cuda\"]\n verify_trace_model(test_fn_scatter(0), [in_data, in_index, in_src], targets)\n verify_trace_model(test_fn_scatter_add(0), [in_data, in_index, in_src], targets)\n\n in_data = torch.zeros(2, 4)\n in_index = torch.tensor([[2], [3]])\n in_src = torch.rand(2, 1)\n\n verify_trace_model(test_fn_scatter(1), [in_data, in_index, in_src], targets)\n verify_trace_model(test_fn_scatter_add(1), [in_data, in_index, in_src], targets)\n\n\ndef test_numel():\n class Numel(Module):\n def forward(self, data):\n return torch.tensor(torch.numel(data))\n\n targets = _get_default_vm_targets()\n verify_script_model(Numel(), [(1,)], targets)\n verify_script_model(Numel(), [(3, 5)], targets)\n verify_script_model(Numel(), [(3, 5, 8)], targets)\n\n\ndef test_forward_pretrained_bert_base_uncased():\n ######################################################################\n # This is an example how to run BERT models using TVM\n # ---------------------------------------------------\n \"\"\"\n Refer the bert example given in https://pypi.org/project/pytorch-pretrained-bert\n\n # To get started, pretrained bert package needs to be installed as prerequisite.\n\n .. code-block:: bash\n\n # install bert package\n pip install pytorch_pretrained_bert==0.6.2 --user\n \"\"\"\n\n try:\n from pytorch_pretrained_bert import BertTokenizer, BertForMaskedLM\n except:\n print(\"Torch pretrained bert package must be installed to run this script.\")\n return\n\n ######################################################################\n # Load the tokenizer and tokenize the input\n # -----------------------------------------\n\n # Load pre-trained model tokenizer (vocabulary)\n tokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\")\n\n # Tokenized input\n text = \"[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]\"\n tokenized_text = tokenizer.tokenize(text)\n\n # Mask a token that we will try to predict back with `BertForMaskedLM`\n masked_index = 8\n tokenized_text[masked_index] = \"[MASK]\"\n assert tokenized_text == [\n \"[CLS]\",\n \"who\",\n \"was\",\n \"jim\",\n \"henson\",\n \"?\",\n \"[SEP]\",\n \"jim\",\n \"[MASK]\",\n \"was\",\n \"a\",\n \"puppet\",\n \"##eer\",\n \"[SEP]\",\n ]\n\n # Convert token to vocabulary indices\n indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)\n # Define sentence A and B indices associated to 1st and 2nd sentences (see paper)\n segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]\n\n # Convert inputs to PyTorch tensors\n tokens_tensor = torch.tensor([indexed_tokens])\n segments_tensors = torch.tensor([segments_ids])\n\n ######################################################################\n # Load a pretrained PyTorch model bert-base-uncased\n # -------------------------------------------------\n\n # Bert Model with a language modeling\n model = BertForMaskedLM.from_pretrained(\"bert-base-uncased\")\n model.eval()\n\n ######################################################################\n # Predict all tokens with pytorch\n # -------------------------------\n\n with torch.no_grad():\n torch_preds = model(tokens_tensor, segments_tensors)\n\n ######################################################################\n # Make TorchScripted model via jit trace\n # --------------------------------------\n\n scripted_model = torch.jit.trace(model, (tokens_tensor, segments_tensors)).eval()\n\n ######################################################################\n # Import the graph to Relay\n # -------------------------\n # Convert PyTorch graph to Relay graph. The input name can be arbitrary.\n\n input_1 = \"input_ids\"\n input_2 = \"input.2\"\n shape_list = [(input_1, list(tokens_tensor.shape)), (input_2, list(segments_tensors.shape))]\n\n mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)\n\n ######################################################################\n # Compile the model with relay\n # ----------------------------\n\n target = \"llvm\"\n with tvm.transform.PassContext(opt_level=3):\n relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params)\n\n ######################################################################\n # Execute on TVM\n # --------------\n\n ctx = tvm.context(target, 0)\n relay_model = graph_runtime.create(relay_graph, relay_lib, ctx)\n relay_model.set_input(**relay_params)\n relay_model.set_input(input_1, tokens_tensor)\n relay_model.set_input(input_2, segments_tensors)\n relay_model.run()\n compiled_output = relay_model.get_output(0).asnumpy()\n\n ######################################################################\n # Validate the outputs\n # --------------------\n # Compare the torch and tvm outputs\n\n tvm.testing.assert_allclose(torch_preds, compiled_output, rtol=1e-3, atol=1e-3)\n\n ######################################################################\n # Process the output\n # ------------------\n # Process the model output to token.\n\n # Torch output to token\n torch_pred_idx = torch.argmax(torch_preds[0, masked_index]).item()\n torch_pred_token = tokenizer.convert_ids_to_tokens([torch_pred_idx])[0]\n\n # TVM output to token\n tvm_pred_idx = compiled_output[0, masked_index].argmax()\n tvm_pred_token = tokenizer.convert_ids_to_tokens([tvm_pred_idx])[0]\n\n assert torch_pred_idx == tvm_pred_idx\n assert torch_pred_token == tvm_pred_token\n\n # Print the outputs\n print(\"Torch top-1 id: {}, token: {}\".format(torch_pred_idx, torch_pred_token))\n print(\"TVM top-1 id: {}, token: {}\".format(tvm_pred_idx, tvm_pred_token))\n\n\ndef test_convert_torch_script_with_input_types():\n def model_fn(x, y):\n x = x.to(dtype=torch.int32)\n y = x + y\n return y\n\n ishape = (4, 5)\n input_x = torch.rand(ishape, dtype=torch.float32)\n input_y = torch.randint(low=0, high=100, size=ishape, dtype=torch.int32)\n inputs = [input_x, input_y]\n script_module = torch.jit.trace(model_fn, inputs)\n\n fname = \"tmp.pt\"\n torch.jit.save(script_module, fname)\n loaded = torch.jit.load(fname)\n os.remove(fname)\n\n verify_model(loaded.eval(), input_data=inputs)\n\n def expected(x_shape, y_shape):\n # use a fixed order of args so alpha equal check can pass\n x = relay.var(\"x\", shape=x_shape, dtype=\"float32\")\n y = relay.var(\"y\", shape=y_shape, dtype=\"int32\")\n args = [x, y]\n x1 = relay.cast(x, \"int32\")\n y1 = relay.add(x1, y)\n mod = tvm.IRModule.from_expr(relay.Function(args, y1))\n return mod[\"main\"]\n\n input_infos = [(\"input0\", (ishape, \"float\")), (\"input1\", (ishape, \"int\"))]\n mod, params = relay.frontend.from_pytorch(loaded, input_infos)\n\n expected_mod = expected(ishape, ishape)\n\n assert tvm.ir.structural_equal(expected_mod, mod[\"main\"], map_free_vars=True)\n\n\ndef test_bincount():\n def test_fn(x, weights=None):\n return torch.bincount(x, weights=weights)\n\n inp = torch.randint(0, 100, (10000,), dtype=torch.int64)\n weights = torch.linspace(0, 100, steps=10000)\n\n targets = [\"llvm\", \"cuda\"]\n verify_trace_model(test_fn, [inp], targets)\n verify_trace_model(test_fn, [inp, weights], targets)\n\n\ndef test_hard_swish():\n examples = [torch.rand(8).float(), torch.rand(8, 10).float(), torch.rand(1, 1, 10).float()]\n for input in examples:\n verify_model(torch.nn.Hardswish().eval(), input_data=input)\n verify_model(torch.nn.Hardswish(inplace=True).eval(), input_data=input)\n\n\nif __name__ == \"__main__\":\n # some structural tests\n test_forward_traced_function()\n test_forward_dtypes()\n test_weight_names()\n test_duplicate_weight_use()\n\n # Single operator tests\n test_forward_pixel_shuffle()\n test_forward_add()\n test_forward_subtract()\n test_forward_multiply()\n test_forward_matmul()\n test_forward_rsub()\n test_forward_onehot()\n test_forward_embedding()\n test_forward_reshape()\n test_forward_reciprocal()\n test_forward_repeat()\n test_forward_repeat_interleave()\n test_forward_squeeze()\n test_forward_unsqueeze()\n test_forward_concatenate()\n test_forward_reduce_sum()\n test_forward_reduce_prod()\n test_forward_argmin()\n test_forward_argmax()\n test_forward_norm()\n test_forward_frobenius_norm()\n test_forward_std()\n test_forward_variance()\n test_forward_relu()\n test_forward_prelu()\n test_forward_leakyrelu()\n test_forward_elu()\n test_forward_celu()\n test_forward_gelu()\n test_forward_selu()\n test_forward_log_sigmoid()\n test_forward_adaptiveavgpool()\n test_forward_maxpool2d()\n test_forward_maxpool1d()\n test_forward_maxpool3d()\n test_forward_hardtanh()\n test_forward_conv()\n test_forward_conv_transpose()\n test_forward_threshold()\n test_forward_contiguous()\n test_forward_batchnorm()\n test_forward_instancenorm()\n test_forward_layernorm()\n test_forward_groupnorm()\n test_forward_transpose()\n test_forward_size()\n test_forward_view()\n test_forward_select()\n test_forward_take()\n test_forward_topk()\n test_forward_where()\n test_forward_addcdiv()\n test_forward_addcmul()\n test_forward_true_divide()\n test_forward_is_floating_point()\n test_forward_clone()\n test_forward_softplus()\n test_forward_softsign()\n test_forward_logsoftmax()\n test_forward_sigmoid()\n test_forward_dense()\n test_forward_avgpool()\n test_forward_avgpool3d()\n test_forward_dropout()\n test_forward_slice()\n test_forward_mean()\n test_forward_expand()\n test_forward_pow()\n test_forward_unary()\n test_forward_clamp()\n test_forward_clamp_()\n test_forward_logical_not()\n test_forward_bitwise_not()\n test_forward_bitwise_xor()\n test_forward_logical_xor()\n test_forward_isfinite()\n test_forward_isnan()\n test_forward_isinf()\n test_forward_ones()\n test_forward_ones_like()\n test_forward_zeros()\n test_forward_zeros_like()\n test_forward_full()\n test_forward_full_like()\n test_forward_linspace()\n test_forward_arange()\n test_forward_mesh_grid()\n test_forward_chunk()\n test_forward_split()\n test_forward_gather()\n test_upsample()\n test_forward_upsample3d()\n test_forward_nms()\n test_forward_roi_align()\n test_to()\n test_flatten()\n test_type_as()\n test_forward_functional_pad()\n test_forward_zero_pad2d()\n test_forward_constant_pad1d()\n test_forward_constant_pad2d()\n test_forward_constant_pad3d()\n test_forward_reflection_pad1d()\n test_forward_reflection_pad2d()\n test_forward_replication_pad1d()\n test_forward_replication_pad2d()\n test_forward_replication_pad3d()\n test_adaptive_pool3d()\n test_conv3d()\n test_conv3d_transpose()\n test_forward_index()\n test_min_max()\n test_logsumexp()\n test_stack()\n test_stack_dynamic()\n test_forward_unbind()\n test_forward_nonzero()\n test_forward_scatter()\n test_numel()\n test_bincount()\n\n # Model tests\n test_resnet18()\n test_squeezenet1_0()\n test_squeezenet1_1()\n test_densenet121()\n # disable inception test for now, since loading it takes ~5min on torchvision-0.5 due to scipy bug\n # See https://discuss.pytorch.org/t/torchvisions-inception-v3-takes-much-longer-to-load-than-other-models/68756\n # test_inception_v3()\n test_googlenet()\n test_mnasnet0_5()\n test_mobilenet_v2()\n\n test_custom_conversion_map()\n\n test_segmentaton_models()\n test_3d_models()\n\n # Quantization test\n from qnn_test import test_quantized_imagenet, test_quantized_modules\n\n test_quantized_modules()\n test_quantized_imagenet()\n\n # Test simple conditionals and loop\n test_control_flow()\n test_simple_rnn()\n\n # More complex recurrent models\n from test_lstm import test_custom_lstm\n\n test_custom_lstm()\n\n # Test bert model\n test_forward_pretrained_bert_base_uncased()\n\n # Test convert torch script(jit) with specific inputs' types\n test_convert_torch_script_with_input_types()\n test_hard_swish()\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=no-else-return, unidiomatic-typecheck, invalid-name, W0611, C0413\n\n\"\"\"Vitis-AI runtime test for CPU only part\n\nThis test verifies as much as possible whether the a model can be correctly offloaded\nand executed for Vitis-AI acceleration. This entails:\n - Annotating and partitioning model for Vitis-AI acceleration\n - Building a Vitis-AI PyXIR runtime module with on-the-fly quantization enabled\n - Run first iteration of on-the-fly quantization flow. This will always be run\n on CPU as the first N (parameter) will be used for collecting calibration data\n for quantization.\n\nNOTE This is not a full end-to-end test as we need the full Vitis-AI docker environment\nand access to an FPGA instance for that. This test verifies the Vitis-AI flow as much as\npossible without requiring access to dedicated docker environment and/or hardware setup.\nNOTE Quantization is not being tested (we need to be inside Vitis-AI docker environment\nfor that) buth the internal representation used for quantization is being generated and\nfunctionally tested (CPU).\n\"\"\"\n\nimport sys\nimport numpy as np\n\nimport pytest\n\npytest.importorskip(\"pyxir\")\nimport pyxir.contrib.target.DPUCADX8G\n\nimport tvm\nimport tvm.relay.testing\nfrom tvm import relay\n\nfrom .infrastructure import skip_test, verify_result\n\n\ndef test_extern_vitis_ai_resnet18():\n \"\"\"Test first part of Vitis-AI on-the-fly quantization runtime with ResNet 18 model\"\"\"\n if skip_test():\n return\n\n dtype = \"float32\"\n ishape = (1, 3, 224, 224)\n mod, params = relay.testing.resnet.get_workload(num_layers=18, batch_size=1)\n ref_mod, params = relay.testing.resnet.get_workload(num_layers=18, batch_size=1)\n\n ref_ex = relay.create_executor(\"graph\", mod=ref_mod, ctx=tvm.cpu(0))\n i_data = np.random.uniform(0, 1, ishape).astype(dtype)\n\n ref_res = ref_ex.evaluate()(i_data, **params)\n verify_result(\n mod,\n {\"data\": i_data},\n (1, 1000),\n ref_res.asnumpy(),\n tol=1e-5,\n params=params,\n dpu_target=\"DPUCADX8G\",\n tvm_ops=4,\n )\n\n\nif __name__ == \"__main__\":\n if sys.platform == \"win32\":\n print(\"Skip test on Windows for now\")\n sys.exit(0)\n test_extern_vitis_ai_resnet18()\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\" Test task scheduler \"\"\"\n\nimport tempfile\n\nimport multiprocessing\nimport numpy as np\n\nimport tvm\nimport tvm.testing\nfrom tvm import auto_scheduler\n\nfrom test_auto_scheduler_common import matmul_auto_scheduler_test\n\n\[email protected]_llvm\ndef test_task_scheduler_round_robin():\n tasks = []\n for n in [2, 4, 8]:\n tasks.append(\n auto_scheduler.SearchTask(\n func=matmul_auto_scheduler_test, args=(n, n, n), target=\"llvm\"\n )\n )\n\n with tempfile.NamedTemporaryFile() as fp:\n log_file = fp.name\n num_trials_per_task = 2\n\n # Tune all tasks\n measure_ctx = auto_scheduler.LocalRPCMeasureContext()\n tune_option = auto_scheduler.TuningOptions(\n num_measure_trials=num_trials_per_task * len(tasks),\n runner=measure_ctx.runner,\n num_measures_per_round=1,\n measure_callbacks=[auto_scheduler.RecordToFile(log_file)],\n )\n task_scheduler = auto_scheduler.TaskScheduler(tasks, strategy=\"round-robin\")\n task_scheduler.tune(tune_option, search_policy=\"sketch.random\")\n\n # Check the result of round robin\n counters = {}\n for task in tasks:\n counters[task.workload_key] = 0\n\n for inp, _ in auto_scheduler.load_records(log_file):\n counters[inp.task.workload_key] += 1\n\n for task in tasks:\n assert counters[task.workload_key] == num_trials_per_task\n\n # test continuous tuning (restoring the status)\n task_scheduler = auto_scheduler.TaskScheduler(\n tasks, strategy=\"round-robin\", load_log_file=log_file\n )\n tune_option = auto_scheduler.TuningOptions(\n num_measure_trials=len(tasks),\n num_measures_per_round=1,\n )\n task_scheduler.tune(tune_option, search_policy=\"sketch.random\")\n del measure_ctx\n\n\[email protected]_llvm\ndef task_scheduler_round_robin_spawn():\n assert multiprocessing.get_start_method(False) == \"spawn\"\n test_task_scheduler_round_robin()\n\n\[email protected]_llvm\ndef test_task_scheduler_round_robin_spawn():\n ctx = multiprocessing.get_context(\"spawn\")\n p = ctx.Process(target=task_scheduler_round_robin_spawn)\n p.start()\n p.join()\n\n\[email protected]_llvm\ndef test_task_scheduler_gradient():\n tasks = []\n for n in [2, 4]:\n tasks.append(\n auto_scheduler.SearchTask(\n func=matmul_auto_scheduler_test, args=(n, n, n), target=\"llvm\"\n )\n )\n\n def objective_func(costs):\n return costs[0]\n\n with tempfile.NamedTemporaryFile() as fp:\n log_file = fp.name\n\n n_trials = 5\n\n # Tune all tasks\n measure_ctx = auto_scheduler.LocalRPCMeasureContext()\n tune_option = auto_scheduler.TuningOptions(\n num_measure_trials=n_trials,\n runner=measure_ctx.runner,\n num_measures_per_round=1,\n measure_callbacks=[auto_scheduler.RecordToFile(log_file)],\n )\n task_scheduler = auto_scheduler.TaskScheduler(tasks, objective_func=objective_func)\n\n # Forcely rewrite the initial values.\n # This can make this test more stable on the slow CI machines\n task_scheduler.best_costs = np.array([1e2, 1e-8])\n\n task_scheduler.tune(tune_option, search_policy=\"sketch.random\")\n\n # Check the allocation results\n counters = {}\n for task in tasks:\n counters[task.workload_key] = 0\n\n for inp, _ in auto_scheduler.load_records(log_file):\n counters[inp.task.workload_key] += 1\n\n assert counters[tasks[0].workload_key] == n_trials - 1\n assert counters[tasks[1].workload_key] == 1\n del measure_ctx\n\n\nif __name__ == \"__main__\":\n test_task_scheduler_round_robin()\n test_task_scheduler_round_robin_spawn()\n test_task_scheduler_gradient()\n", "#!/usr/bin/env python3\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport argparse\nimport csv\nimport logging\nfrom os import path as osp\nimport sys\nimport shutil\n\nimport numpy as np\n\nimport tvm\nfrom tvm import te\nfrom tvm import relay\nfrom tvm.relay import testing\nfrom tvm.contrib import graph_runtime, cc\nfrom PIL import Image\nfrom tvm.contrib.download import download_testdata\nfrom mxnet.gluon.model_zoo.vision import get_model\n\nlogging.basicConfig(\n level=logging.INFO, format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n)\nlogger = logging.getLogger(__name__)\n\nparser = argparse.ArgumentParser(description=\"Resnet build example\")\naa = parser.add_argument\naa(\"--build-dir\", type=str, required=True, help=\"directory to put the build artifacts\")\naa(\"--batch-size\", type=int, default=1, help=\"input image batch size\")\naa(\n \"--opt-level\",\n type=int,\n default=3,\n help=\"level of optimization. 0 is unoptimized and 3 is the highest level\",\n)\naa(\"--target\", type=str, default=\"llvm\", help=\"target context for compilation\")\naa(\"--image-shape\", type=str, default=\"3,224,224\", help=\"input image dimensions\")\naa(\"--image-name\", type=str, default=\"cat.png\", help=\"name of input image to download\")\nargs = parser.parse_args()\n\nbuild_dir = args.build_dir\nbatch_size = args.batch_size\nopt_level = args.opt_level\ntarget = tvm.target.create(args.target)\nimage_shape = tuple(map(int, args.image_shape.split(\",\")))\ndata_shape = (batch_size,) + image_shape\n\n\ndef build(target_dir):\n \"\"\" Compiles resnet18 with TVM\"\"\"\n # Download the pretrained model in MxNet's format.\n block = get_model(\"resnet18_v1\", pretrained=True)\n\n shape_dict = {\"data\": (1, 3, 224, 224)}\n mod, params = relay.frontend.from_mxnet(block, shape_dict)\n # Add softmax to do classification in last layer.\n func = mod[\"main\"]\n func = relay.Function(\n func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs\n )\n\n target = \"llvm\"\n with tvm.transform.PassContext(opt_level=3):\n graph, lib, params = relay.build(func, target, params=params)\n\n # save the model artifacts\n deploy_lib = osp.join(target_dir, \"deploy_lib.o\")\n lib.save(deploy_lib)\n cc.create_shared(osp.join(target_dir, \"deploy_lib.so\"), [osp.join(target_dir, \"deploy_lib.o\")])\n\n with open(osp.join(target_dir, \"deploy_graph.json\"), \"w\") as fo:\n fo.write(graph)\n\n with open(osp.join(target_dir, \"deploy_param.params\"), \"wb\") as fo:\n fo.write(relay.save_param_dict(params))\n\n\ndef download_img_labels():\n \"\"\" Download an image and imagenet1k class labels for test\"\"\"\n from mxnet.gluon.utils import download\n\n synset_url = \"\".join(\n [\n \"https://gist.githubusercontent.com/zhreshold/\",\n \"4d0b62f3d01426887599d4f7ede23ee5/raw/\",\n \"596b27d23537e5a1b5751d2b0481ef172f58b539/\",\n \"imagenet1000_clsid_to_human.txt\",\n ]\n )\n synset_name = \"synset.txt\"\n synset_path = download_testdata(synset_url, synset_name + \".raw\", module=\"data\", overwrite=True)\n\n with open(synset_path) as fin:\n data = fin.read()\n synset = eval(data)\n\n with open(synset_name, \"w\") as f:\n for key in synset:\n f.write(synset[key])\n f.write(\"\\n\")\n\n return synset\n\n\ndef transform_image(image):\n image = np.array(image) - np.array([123.0, 117.0, 104.0])\n image /= np.array([58.395, 57.12, 57.375])\n image = image.transpose((2, 0, 1))\n image = image[np.newaxis, :]\n return image\n\n\ndef get_cat_image():\n img_url = \"https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true\"\n img_path = download_testdata(img_url, \"cat.png\", module=\"data\")\n shutil.copyfile(img_path, \"cat.png\")\n img = Image.open(img_path).resize((224, 224))\n return transform_image(img)\n\n\ndef test_build(build_dir):\n \"\"\" Sanity check with the cat image we download.\"\"\"\n graph = open(osp.join(build_dir, \"deploy_graph.json\")).read()\n lib = tvm.runtime.load_module(osp.join(build_dir, \"deploy_lib.so\"))\n params = bytearray(open(osp.join(build_dir, \"deploy_param.params\"), \"rb\").read())\n input_data = get_cat_image()\n ctx = tvm.cpu()\n module = graph_runtime.create(graph, lib, ctx)\n module.load_params(params)\n module.run(data=input_data)\n out = module.get_output(0).asnumpy()\n top1 = np.argmax(out[0])\n synset = download_img_labels()\n print(\"TVM prediction top-1:\", top1, synset[top1])\n\n\nif __name__ == \"__main__\":\n logger.info(\"Compiling the model to graph runtime.\")\n build(build_dir)\n logger.info(\"Testing the model's predication on test data.\")\n test_build(build_dir)\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\n.. _tutorial-deploy-model-on-android:\n\nDeploy the Pretrained Model on Android\n=======================================\n**Author**: `Tomohiro Kato <https://tkat0.github.io/>`_\n\nThis is an example of using Relay to compile a keras model and deploy it on Android device.\n\"\"\"\n\nimport os\nimport numpy as np\nfrom PIL import Image\nimport keras\nfrom keras.applications.mobilenet_v2 import MobileNetV2\nimport tvm\nfrom tvm import te\nimport tvm.relay as relay\nfrom tvm import rpc\nfrom tvm.contrib import utils, ndk, graph_runtime as runtime\nfrom tvm.contrib.download import download_testdata\n\n\n######################################################################\n# Setup Environment\n# -----------------\n# Since there are many required packages for Android, it is recommended to use the official Docker Image.\n#\n# First, to build and run Docker Image, we can run the following command.\n#\n# .. code-block:: bash\n#\n# git clone --recursive https://github.com/apache/tvm tvm\n# cd tvm\n# docker build -t tvm.demo_android -f docker/Dockerfile.demo_android ./docker\n# docker run --pid=host -h tvm -v $PWD:/workspace \\\n# -w /workspace -p 9190:9190 --name tvm -it tvm.demo_android bash\n#\n# You are now inside the container. The cloned TVM directory is mounted on /workspace.\n# At this time, mount the 9190 port used by RPC described later.\n#\n# .. note::\n#\n# Please execute the following steps in the container.\n# We can execute :code:`docker exec -it tvm bash` to open a new terminal in the container.\n#\n# Next we build the TVM.\n#\n# .. code-block:: bash\n#\n# mkdir build\n# cd build\n# cmake -DUSE_LLVM=llvm-config-8 \\\n# -DUSE_RPC=ON \\\n# -DUSE_SORT=ON \\\n# -DUSE_VULKAN=ON \\\n# -DUSE_GRAPH_RUNTIME=ON \\\n# ..\n# make -j10\n#\n# After building TVM successfully, Please set PYTHONPATH.\n#\n# .. code-block:: bash\n#\n# echo 'export PYTHONPATH=/workspace/python:/workspace/vta/python:${PYTHONPATH}' >> ~/.bashrc\n# source ~/.bashrc\n\n#################################################################\n# Start RPC Tracker\n# -----------------\n# TVM uses RPC session to communicate with Android device.\n#\n# To start an RPC tracker, run this command in the container. The tracker is\n# required during the whole tuning process, so we need to open a new terminal for\n# this command:\n#\n# .. code-block:: bash\n#\n# python3 -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190\n#\n# The expected output is\n#\n# .. code-block:: bash\n#\n# INFO:RPCTracker:bind to 0.0.0.0:9190\n\n#################################################################\n# Register Android device to RPC Tracker\n# --------------------------------------\n# Now we can register our Android device to the tracker.\n#\n# Follow this `readme page <https://github.com/apache/tvm/tree/main/apps/android_rpc>`_ to\n# install TVM RPC APK on the android device.\n#\n# Here is an example of config.mk. I enabled OpenCL and Vulkan.\n#\n#\n# .. code-block:: bash\n#\n# APP_ABI = arm64-v8a\n#\n# APP_PLATFORM = android-24\n#\n# # whether enable OpenCL during compile\n# USE_OPENCL = 1\n#\n# # whether to enable Vulkan during compile\n# USE_VULKAN = 1\n#\n# ifeq ($(USE_VULKAN), 1)\n# # Statically linking vulkan requires API Level 24 or higher\n# APP_PLATFORM = android-24\n# endif\n#\n# # the additional include headers you want to add, e.g., SDK_PATH/adrenosdk/Development/Inc\n# ADD_C_INCLUDES += /work/adrenosdk-linux-5_0/Development/Inc\n# # downloaded from https://github.com/KhronosGroup/OpenCL-Headers\n# ADD_C_INCLUDES += /usr/local/OpenCL-Headers/\n#\n# # the additional link libs you want to add, e.g., ANDROID_LIB_PATH/libOpenCL.so\n# ADD_LDLIBS = /workspace/pull-from-android-device/libOpenCL.so\n#\n# .. note::\n#\n# At this time, don't forget to `create a standalone toolchain <https://github.com/apache/tvm/tree/main/apps/android_rpc#architecture-and-android-standalone-toolchain>`_ .\n#\n# for example\n#\n# .. code-block:: bash\n#\n# $ANDROID_NDK_HOME/build/tools/make-standalone-toolchain.sh \\\n# --platform=android-24 --use-llvm --arch=arm64 --install-dir=/opt/android-toolchain-arm64\n# export TVM_NDK_CC=/opt/android-toolchain-arm64/bin/aarch64-linux-android-g++\n#\n# Next, start the Android application and enter the IP address and port of RPC Tracker.\n# Then you have already registered your device.\n#\n# After registering devices, we can confirm it by querying rpc_tracker\n#\n# .. code-block:: bash\n#\n# python3 -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190\n#\n# For example, if we have 1 Android device.\n# the output can be\n#\n# .. code-block:: bash\n#\n# Queue Status\n# ----------------------------------\n# key total free pending\n# ----------------------------------\n# android 1 1 0\n# ----------------------------------\n#\n# To confirm that you can communicate with Android, we can run following test script.\n# If you use OpenCL and Vulkan, please set :code:`test_opencl` and :code:`test_vulkan` in the script.\n#\n# .. code-block:: bash\n#\n# export TVM_TRACKER_HOST=0.0.0.0\n# export TVM_TRACKER_PORT=9190\n#\n# .. code-block:: bash\n#\n# cd /workspace/apps/android_rpc\n# python3 tests/android_rpc_test.py\n#\n\n######################################################################\n# Load pretrained keras model\n# ---------------------------\n# We load a pretrained MobileNetV2(alpha=0.5) classification model provided by keras.\nkeras.backend.clear_session() # Destroys the current TF graph and creates a new one.\nweights_url = \"\".join(\n [\n \"https://github.com/JonathanCMitchell/\",\n \"mobilenet_v2_keras/releases/download/v1.1/\",\n \"mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5\",\n ]\n)\nweights_file = \"mobilenet_v2_weights.h5\"\nweights_path = download_testdata(weights_url, weights_file, module=\"keras\")\nkeras_mobilenet_v2 = MobileNetV2(\n alpha=0.5, include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000\n)\nkeras_mobilenet_v2.load_weights(weights_path)\n\n######################################################################\n# In order to test our model, here we download an image of cat and\n# transform its format.\nimg_url = \"https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true\"\nimg_name = \"cat.png\"\nimg_path = download_testdata(img_url, img_name, module=\"data\")\nimage = Image.open(img_path).resize((224, 224))\ndtype = \"float32\"\n\n\ndef transform_image(image):\n image = np.array(image) - np.array([123.0, 117.0, 104.0])\n image /= np.array([58.395, 57.12, 57.375])\n image = image.transpose((2, 0, 1))\n image = image[np.newaxis, :]\n return image\n\n\nx = transform_image(image)\n\n######################################################################\n# synset is used to transform the label from number of ImageNet class to\n# the word human can understand.\nsynset_url = \"\".join(\n [\n \"https://gist.githubusercontent.com/zhreshold/\",\n \"4d0b62f3d01426887599d4f7ede23ee5/raw/\",\n \"596b27d23537e5a1b5751d2b0481ef172f58b539/\",\n \"imagenet1000_clsid_to_human.txt\",\n ]\n)\nsynset_name = \"imagenet1000_clsid_to_human.txt\"\nsynset_path = download_testdata(synset_url, synset_name, module=\"data\")\nwith open(synset_path) as f:\n synset = eval(f.read())\n\n\n######################################################################\n# Compile the model with relay\n# ----------------------------\n# If we run the example on our x86 server for demonstration, we can simply\n# set it as :code:`llvm`. If running it on the Android device, we need to\n# specify its instruction set. Set :code:`local_demo` to False if you want\n# to run this tutorial with a real device.\n\nlocal_demo = True\n\n# by default on CPU target will execute.\n# select 'cpu', 'opencl' and 'vulkan'\ntest_target = \"cpu\"\n\n# Change target configuration.\n# Run `adb shell cat /proc/cpuinfo` to find the arch.\narch = \"arm64\"\ntarget = \"llvm -mtriple=%s-linux-android\" % arch\ntarget_host = None\n\nif local_demo:\n target_host = None\n target = \"llvm\"\nelif test_target == \"opencl\":\n target_host = target\n target = \"opencl\"\nelif test_target == \"vulkan\":\n target_host = target\n target = \"vulkan\"\n\ninput_name = \"input_1\"\nshape_dict = {input_name: x.shape}\nmod, params = relay.frontend.from_keras(keras_mobilenet_v2, shape_dict)\n\nwith tvm.transform.PassContext(opt_level=3):\n lib = relay.build(mod, target=target, target_host=target_host, params=params)\n\n# After `relay.build`, you will get three return values: graph,\n# library and the new parameter, since we do some optimization that will\n# change the parameters but keep the result of model as the same.\n\n# Save the library at local temporary directory.\ntmp = utils.tempdir()\nlib_fname = tmp.relpath(\"net.so\")\nfcompile = ndk.create_shared if not local_demo else None\nlib.export_library(lib_fname, fcompile)\n\n######################################################################\n# Deploy the Model Remotely by RPC\n# --------------------------------\n# With RPC, you can deploy the model remotely from your host machine\n# to the remote android device.\n\ntracker_host = os.environ.get(\"TVM_TRACKER_HOST\", \"0.0.0.0\")\ntracker_port = int(os.environ.get(\"TVM_TRACKER_PORT\", 9190))\nkey = \"android\"\n\nif local_demo:\n remote = rpc.LocalSession()\nelse:\n tracker = rpc.connect_tracker(tracker_host, tracker_port)\n # When running a heavy model, we should increase the `session_timeout`\n remote = tracker.request(key, priority=0, session_timeout=60)\n\nif local_demo:\n ctx = remote.cpu(0)\nelif test_target == \"opencl\":\n ctx = remote.cl(0)\nelif test_target == \"vulkan\":\n ctx = remote.vulkan(0)\nelse:\n ctx = remote.cpu(0)\n\n# upload the library to remote device and load it\nremote.upload(lib_fname)\nrlib = remote.load_module(\"net.so\")\n\n# create the remote runtime module\nmodule = runtime.GraphModule(rlib[\"default\"](ctx))\n\n######################################################################\n# Execute on TVM\n# --------------\n\n# set input data\nmodule.set_input(input_name, tvm.nd.array(x.astype(dtype)))\n# run\nmodule.run()\n# get output\nout = module.get_output(0)\n\n# get top1 result\ntop1 = np.argmax(out.asnumpy())\nprint(\"TVM prediction top-1: {}\".format(synset[top1]))\n\nprint(\"Evaluate inference time cost...\")\nftimer = module.module.time_evaluator(\"run\", ctx, number=1, repeat=10)\nprof_res = np.array(ftimer().results) * 1000 # convert to millisecond\nprint(\"Mean inference time (std dev): %.2f ms (%.2f ms)\" % (np.mean(prof_res), np.std(prof_res)))\n\n######################################################################\n# Sample Output\n# -------------\n# The following is the result of 'cpu', 'opencl' and 'vulkan' using Adreno 530 on Snapdragon 820\n#\n# Although we can run on a GPU, it is slower than CPU.\n# To speed up, we need to write and optimize the schedule according to the GPU architecture.\n#\n# .. code-block:: bash\n#\n# # cpu\n# TVM prediction top-1: tiger cat\n# Evaluate inference time cost...\n# Mean inference time (std dev): 37.92 ms (19.67 ms)\n#\n# # opencl\n# TVM prediction top-1: tiger cat\n# Evaluate inference time cost...\n# Mean inference time (std dev): 419.83 ms (7.49 ms)\n#\n# # vulkan\n# TVM prediction top-1: tiger cat\n# Evaluate inference time cost...\n# Mean inference time (std dev): 465.80 ms (4.52 ms)\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"\nAuto-scheduling Matrix Multiplication for CPU\n=============================================\n**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, \\\n `Chengfan Jia <https://github.com/jcf94/>`_\n\nThis is a tutorial on how to use the auto-scheduler for CPUs.\n\nDifferent from the template-based :ref:`autotvm <tutorials-autotvm-sec>` which relies on\nmanual templates to define the search space, the auto-scheduler does not require any templates.\nUsers only need to write the computation declaration without any schedule commands or templates.\nThe auto-scheduler can automatically generate a large search space and\nfind a good schedule in the space.\n\nWe use matrix multiplication as an example in this tutorial.\n\nNote that this tutorial will not run on Windows or recent versions of macOS. To\nget it to run, you will need to wrap the body of this tutorial in a :code:`if\n__name__ == \"__main__\":` block.\n\"\"\"\n\nimport os\n\nimport numpy as np\nimport tvm\nfrom tvm import te, auto_scheduler\n\n######################################################################\n# Define the computation\n# ^^^^^^^^^^^^^^^^^^^^^^\n# To begin with, let us define the computation of a matmul with bias add.\n# The function should return the list of input/output tensors.\n# From these tensors, the auto-scheduler can get the whole computational graph.\n\n\n@auto_scheduler.register_workload\ndef matmul_add(N, L, M, dtype):\n A = te.placeholder((N, L), name=\"A\", dtype=dtype)\n B = te.placeholder((L, M), name=\"B\", dtype=dtype)\n C = te.placeholder((N, M), name=\"C\", dtype=dtype)\n\n k = te.reduce_axis((0, L), name=\"k\")\n matmul = te.compute(\n (N, M),\n lambda i, j: te.sum(A[i, k] * B[k, j], axis=k),\n name=\"matmul\",\n attrs={\"layout_free_placeholders\": [B]}, # enable automatic layout transform for tensor B\n )\n out = te.compute((N, M), lambda i, j: matmul[i, j] + C[i, j], name=\"out\")\n\n return [A, B, C, out]\n\n\n######################################################################\n# Create the search task\n# ^^^^^^^^^^^^^^^^^^^^^^\n# We then create a search task with N=L=M=1024 and dtype=\"float32\"\n# If your machine supports avx instructions, you can\n#\n# - replace \"llvm\" below with \"llvm -mcpu=core-avx2\" to enable AVX2\n# - replace \"llvm\" below with \"llvm -mcpu=skylake-avx512\" to enable AVX-512\n\ntarget = tvm.target.Target(\"llvm\")\nN = L = M = 1024\ntask = tvm.auto_scheduler.SearchTask(func=matmul_add, args=(N, L, M, \"float32\"), target=target)\n\n# Inspect the computational graph\nprint(\"Computational DAG:\")\nprint(task.compute_dag)\n\n######################################################################\n# Next, we set parameters for the auto-scheduler.\n#\n# * :code:`num_measure_trials` is the number of measurement trials we can use during the search.\n# We only make 10 trials in this tutorial for a fast demonstration. In practice, 1000 is a\n# good value for the search to converge. You can do more trials according to your time budget.\n# * In addition, we use :code:`RecordToFile` to dump measurement records into a file `matmul.json`.\n# The measurement records can be used to query the history best, resume the search,\n# and do more analyses later.\n# * see :any:`auto_scheduler.TuningOptions` for more parameters\n\nlog_file = \"matmul.json\"\ntune_option = auto_scheduler.TuningOptions(\n num_measure_trials=10,\n measure_callbacks=[auto_scheduler.RecordToFile(log_file)],\n verbose=2,\n)\n\n######################################################################\n# Run the search\n# ^^^^^^^^^^^^^^\n# Now we get all inputs ready. Pretty simple, isn't it?\n# We can kick off the search and let the auto-scheduler do its magic.\n# After some measurement trials, we can load the best schedule from the log\n# file and apply it.\n\n# Run auto-tuning (search)\ntask.tune(tune_option)\n# Apply the best schedule\nsch, args = task.apply_best(log_file)\n\n######################################################################\n# We can lower the schedule to see the IR after auto-scheduling.\n# The auto-scheduler correctly performs optimizations including multi-level tiling,\n# layout transformation, parallelization, vectorization, unrolling, and operator fusion.\n\nprint(\"Lowered TIR:\")\nprint(tvm.lower(sch, args, simple_mode=True))\n\n######################################################################\n# Check correctness and evaluate performance\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n# We build the binary and check its correctness and performance.\n\nfunc = tvm.build(sch, args, target)\na_np = np.random.uniform(size=(N, L)).astype(np.float32)\nb_np = np.random.uniform(size=(L, M)).astype(np.float32)\nc_np = np.random.uniform(size=(N, M)).astype(np.float32)\nout_np = a_np.dot(b_np) + c_np\n\nctx = tvm.cpu()\na_tvm = tvm.nd.array(a_np, ctx=ctx)\nb_tvm = tvm.nd.array(b_np, ctx=ctx)\nc_tvm = tvm.nd.array(c_np, ctx=ctx)\nout_tvm = tvm.nd.empty(out_np.shape, ctx=ctx)\nfunc(a_tvm, b_tvm, c_tvm, out_tvm)\n\n# Check results\nnp.testing.assert_allclose(out_np, out_tvm.asnumpy(), rtol=1e-3)\n\n# Evaluate execution time.\nevaluator = func.time_evaluator(func.entry_name, ctx, min_repeat_ms=500)\nprint(\n \"Execution time of this operator: %.3f ms\"\n % (np.median(evaluator(a_tvm, b_tvm, c_tvm, out_tvm).results) * 1000)\n)\n\n\n######################################################################\n# Using the record file\n# ^^^^^^^^^^^^^^^^^^^^^\n# During the search, all measurement records are dumped into the record\n# file \"matmul.json\". The measurement records can be used to re-apply search results,\n# resume the search, and perform other analyses.\n\n######################################################################\n# Here is an example where we load the best schedule from a file,\n# and print the equivalent python schedule API. This can be used for\n# debugging and learning the behavior of the auto-scheduler.\n\nprint(\"Equivalent python schedule:\")\nprint(task.print_best(log_file))\n\n######################################################################\n# A more complicated example is to resume the search.\n# In this case, we need to create the search policy and cost model by ourselves\n# and resume the status of search policy and cost model with the log file.\n# In the example below we resume the status and do more 5 trials.\n\n\ndef resume_search(task, log_file):\n print(\"Resume search:\")\n cost_model = auto_scheduler.XGBModel()\n cost_model.update_from_file(log_file)\n search_policy = auto_scheduler.SketchPolicy(\n task, cost_model, init_search_callbacks=[auto_scheduler.PreloadMeasuredStates(log_file)]\n )\n tune_option = auto_scheduler.TuningOptions(\n num_measure_trials=5, measure_callbacks=[auto_scheduler.RecordToFile(log_file)]\n )\n task.tune(tune_option, search_policy=search_policy)\n\n\nresume_search(task, log_file)\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport contextlib\nimport copy\nimport datetime\nimport glob\nimport os\nimport subprocess\nimport sys\n\nimport pytest\nimport numpy as np\n\nimport tvm\nimport tvm.rpc\nimport tvm.micro\nimport tvm.relay as relay\n\nfrom tvm.micro.contrib import zephyr\nfrom tvm.contrib import utils\n\nBUILD = True\nDEBUG = False\n\n\nTARGET = None\n\n\ndef _make_sess_from_op(model, zephyr_board, op_name, sched, arg_bufs):\n target = tvm.target.target.micro(model)\n with tvm.transform.PassContext(opt_level=3, config={\"tir.disable_vectorize\": True}):\n mod = tvm.build(sched, arg_bufs, target, target_host=target, name=op_name)\n\n return _make_session(model, target, zephyr_board, mod)\n\n\ndef _make_session(model, target, zephyr_board, mod):\n test_name = f\"{os.path.splitext(os.path.abspath(__file__))[0]}-{model}\"\n prev_build = f\"{test_name}-last-build.micro-binary\"\n workspace_root = (\n f'{test_name}-workspace/{datetime.datetime.now().strftime(\"%Y-%m-%dT%H-%M-%S\")}'\n )\n workspace_parent = os.path.dirname(workspace_root)\n if not os.path.exists(workspace_parent):\n os.makedirs(workspace_parent)\n workspace = tvm.micro.Workspace(debug=True, root=workspace_root)\n\n project_dir = os.path.join(os.path.dirname(__file__) or \".\", \"zephyr-runtime\")\n compiler = zephyr.ZephyrCompiler(\n project_dir=project_dir,\n board=\"nucleo_f746zg\" if \"stm32f746\" in str(target) else \"qemu_x86\",\n zephyr_toolchain_variant=\"zephyr\",\n )\n\n opts = tvm.micro.default_options(f\"{project_dir}/crt\")\n # TODO(weberlo) verify this is necessary\n opts[\"bin_opts\"][\"ccflags\"] = [\"-std=gnu++14\"]\n opts[\"lib_opts\"][\"ccflags\"] = [\"-std=gnu++14\"]\n\n flasher_kw = {}\n if DEBUG:\n flasher_kw[\"debug_rpc_session\"] = tvm.rpc.connect(\"127.0.0.1\", 9090)\n\n session_kw = {\n \"flasher\": compiler.flasher(**flasher_kw),\n }\n\n if BUILD:\n session_kw[\"binary\"] = tvm.micro.build_static_runtime(\n # the x86 compiler *expects* you to give the exact same dictionary for both\n # lib_opts and bin_opts. so the library compiler is mutating lib_opts and\n # the binary compiler is expecting those mutations to be in bin_opts.\n # TODO(weberlo) fix this very bizarre behavior\n workspace,\n compiler,\n mod,\n lib_opts=opts[\"lib_opts\"],\n bin_opts=opts[\"bin_opts\"],\n )\n if os.path.exists(prev_build):\n os.unlink(prev_build)\n session_kw[\"binary\"].archive(prev_build, metadata_only=True)\n else:\n unarchive_dir = utils.tempdir()\n session_kw[\"binary\"] = tvm.micro.MicroBinary.unarchive(\n prev_build, unarchive_dir.relpath(\"binary\")\n )\n\n return tvm.micro.Session(**session_kw)\n\n\ndef _make_add_sess(model, zephyr_board):\n A = tvm.te.placeholder((2,), dtype=\"int8\")\n B = tvm.te.placeholder((1,), dtype=\"int8\")\n C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name=\"C\")\n sched = tvm.te.create_schedule(C.op)\n return _make_sess_from_op(model, zephyr_board, \"add\", sched, [A, B, C])\n\n\n# The models that should pass this configuration. Maps a short, identifying platform string to\n# (model, zephyr_board).\nPLATFORMS = {\n \"host\": (\"host\", \"qemu_x86\"),\n \"stm32f746xx\": (\"stm32f746xx\", \"nucleo_f746zg\"),\n}\n\n\n# The same test code can be executed on both the QEMU simulation and on real hardware.\ndef test_compile_runtime(platform):\n \"\"\"Test compiling the on-device runtime.\"\"\"\n\n model, zephyr_board = PLATFORMS[platform]\n\n # NOTE: run test in a nested function so cPython will delete arrays before closing the session.\n def test_basic_add(sess):\n A_data = tvm.nd.array(np.array([2, 3], dtype=\"int8\"), ctx=sess.context)\n assert (A_data.asnumpy() == np.array([2, 3])).all()\n B_data = tvm.nd.array(np.array([4], dtype=\"int8\"), ctx=sess.context)\n assert (B_data.asnumpy() == np.array([4])).all()\n C_data = tvm.nd.array(np.array([0, 0], dtype=\"int8\"), ctx=sess.context)\n assert (C_data.asnumpy() == np.array([0, 0])).all()\n\n system_lib = sess.get_system_lib()\n system_lib.get_function(\"add\")(A_data, B_data, C_data)\n assert (C_data.asnumpy() == np.array([6, 7])).all()\n\n with _make_add_sess(model, zephyr_board) as sess:\n test_basic_add(sess)\n\n\ndef test_platform_timer(platform):\n \"\"\"Test compiling the on-device runtime.\"\"\"\n\n model, zephyr_board = PLATFORMS[platform]\n\n # NOTE: run test in a nested function so cPython will delete arrays before closing the session.\n def test_basic_add(sess):\n A_data = tvm.nd.array(np.array([2, 3], dtype=\"int8\"), ctx=sess.context)\n assert (A_data.asnumpy() == np.array([2, 3])).all()\n B_data = tvm.nd.array(np.array([4], dtype=\"int8\"), ctx=sess.context)\n assert (B_data.asnumpy() == np.array([4])).all()\n C_data = tvm.nd.array(np.array([0, 0], dtype=\"int8\"), ctx=sess.context)\n assert (C_data.asnumpy() == np.array([0, 0])).all()\n\n system_lib = sess.get_system_lib()\n time_eval_f = system_lib.time_evaluator(\n \"add\", sess.context, number=20, repeat=3, min_repeat_ms=40\n )\n result = time_eval_f(A_data, B_data, C_data)\n assert (C_data.asnumpy() == np.array([6, 7])).all()\n assert result.mean > 0\n assert len(result.results) == 3\n\n with _make_add_sess(model, zephyr_board) as sess:\n test_basic_add(sess)\n\n\ndef test_relay(platform):\n \"\"\"Testing a simple relay graph\"\"\"\n model, zephyr_board = PLATFORMS[platform]\n shape = (10,)\n dtype = \"int8\"\n\n # Construct Relay program.\n x = relay.var(\"x\", relay.TensorType(shape=shape, dtype=dtype))\n xx = relay.multiply(x, x)\n z = relay.add(xx, relay.const(np.ones(shape=shape, dtype=dtype)))\n func = relay.Function([x], z)\n\n target = tvm.target.target.micro(model)\n with tvm.transform.PassContext(opt_level=3, config={\"tir.disable_vectorize\": True}):\n graph, mod, params = tvm.relay.build(func, target=target)\n\n with _make_session(model, target, zephyr_board, mod) as session:\n graph_mod = tvm.micro.create_local_graph_runtime(\n graph, session.get_system_lib(), session.context\n )\n graph_mod.set_input(**params)\n x_in = np.random.randint(10, size=shape[0], dtype=dtype)\n graph_mod.run(x=x_in)\n result = graph_mod.get_output(0).asnumpy()\n tvm.testing.assert_allclose(graph_mod.get_input(0).asnumpy(), x_in)\n tvm.testing.assert_allclose(result, x_in * x_in + 1)\n\n\nif __name__ == \"__main__\":\n sys.exit(pytest.main([os.path.dirname(__file__)] + sys.argv[1:]))\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport os\nimport re\nimport numpy as np\nimport tvm\nfrom tvm import te\nfrom tvm import topi\nimport tvm.topi.testing\nfrom tvm.topi.utils import get_const_tuple\n\n\ndef generate_quantized_np(shape, bits, out_dtype):\n np.random.seed(0)\n min_val = 0\n max_val = 1 << bits\n return np.random.randint(min_val, max_val, size=shape).astype(out_dtype)\n\n\n# Verify that certain special instructions from the tensorize pass exist\ndef verify_bitserial_conv2d_nhwc(\n batch,\n in_size,\n in_channel,\n num_filter,\n kernel,\n stride,\n padding,\n activation_bits,\n weight_bits,\n unipolar,\n):\n in_height = in_width = in_size\n input_type = \"uint32\"\n out_dtype = \"int16\"\n\n device = \"llvm -device=arm_cpu -model=bcm2837 -mtriple=armv7l-linux-gnueabihf -mattr=+neon\"\n with tvm.target.Target(device):\n A = te.placeholder((batch, in_height, in_width, in_channel), dtype=input_type, name=\"A\")\n W = te.placeholder((kernel, kernel, in_channel, num_filter), dtype=input_type, name=\"W\")\n B = topi.arm_cpu.bitserial_conv2d_nhwc(\n A, W, stride, padding, activation_bits, weight_bits, \"uint8\", out_dtype, unipolar\n )\n s = topi.arm_cpu.schedule_bitserial_conv2d_nhwc([B])\n\n func = tvm.build(s, [A, W, B], device)\n\n assembly = func.get_source(\"asm\")\n matches = re.findall(\"vpadal\", assembly)\n assert len(matches) > 0\n matches = re.findall(\"vcnt\", assembly)\n assert len(matches) > 0\n matches = re.findall(\"vpadd\", assembly)\n assert len(matches) > 0\n\n ctx = tvm.context(device, 0)\n if \"arm\" not in os.uname()[4]:\n print(\"Skipped running code, not an arm device\")\n return\n\n print(\"Running on target: %s\" % device)\n\n def get_ref_data():\n a_np = generate_quantized_np(get_const_tuple(A.shape), activation_bits, input_type)\n w_np = generate_quantized_np(get_const_tuple(W.shape), weight_bits, input_type)\n if unipolar:\n w_ = np.copy(w_np).astype(out_dtype)\n for x in np.nditer(w_, op_flags=[\"readwrite\"]):\n x[...] = 1 if x == 1 else -1\n b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype)\n else:\n b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype(\n out_dtype\n )\n return a_np, w_np, b_np\n\n a_np, w_np, b_np = get_ref_data()\n a = tvm.nd.array(a_np, ctx)\n w = tvm.nd.array(w_np, ctx)\n b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)\n func = tvm.build(s, [A, W, B], device)\n\n func(a, w, b)\n np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)\n\n\ndef test_bitserial_conv2d():\n in_size = 56\n ic, oc = 64, 64\n k = 3\n stride = 1\n pad = 1\n\n verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 1, 1, False)\n verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, False)\n\n verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 1, 1, True)\n verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, True)\n\n\nif __name__ == \"__main__\":\n test_bitserial_conv2d()\n" ]
[ [ "numpy.random.randint", "numpy.flip", "numpy.allclose", "numpy.clip" ], [ "torch.jit.load", "torch.randint", "numpy.sqrt", "torch.max", "torch.zeros", "torch.sin", "torch.neg", "torch.bitwise_xor", "torch.numel", "torch.rsqrt", "torch.acos", "torch.where", "torch.topk", "torch.log10", "torch.sqrt", "torch.randn", "torch.nn.Softsign", "torch.logical_xor", "torch.scatter", "torch.nn.SELU", "torch.rsub", "torch.nn.GroupNorm", "numpy.zeros", "torch.ones_like", "torch.full", "torch.nn.ReplicationPad1d", "torch.min", "torch.nn.functional.avg_pool2d", "torch.nn.Conv2d", "torch.exp", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.nn.init.normal_", "torch.nn.InstanceNorm2d", "torch.log1p", "torch.nn.BatchNorm2d", "numpy.array", "torch.nn.InstanceNorm3d", "torch.take", "torch.nn.ReflectionPad2d", "torch.tan", "torch.nn.ConvTranspose3d", "scipy.stats.t.ppf", "torch.unbind", "torch.gather", "numpy.random.uniform", "torch.bincount", "torch.bitwise_not", "torch.sign", "torch.nn.Hardswish", "torch.set_grad_enabled", "torch.cuda.is_available", "torch.split", "torch.nn.ReplicationPad2d", "torch.norm", "torch.nn.functional.max_pool3d", "torch.nn.ConstantPad2d", "torch.tensor", "torch.nn.Sigmoid", "numpy.std", "torch.nn.LogSigmoid", "torch.rand", "torch.nonzero", "torch.atan", "torch.LongTensor", "torch.nn.LogSoftmax", "torch.isinf", "torch.floor", "torch.trunc", "torch.zeros_like", "torch.stack", "torch.nn.ReflectionPad1d", "torch.nn.Dropout3d", "torch.matmul", "torch.nn.Upsample", "torch.nn.ReLU", "torch.meshgrid", "torch.nn.BatchNorm3d", "torch.nn.Dropout2d", "torch.cat", "torch.nn.ELU", "torch.addcdiv", "torch.nn.Embedding", "numpy.mean", "torch.nn.functional.interpolate", "torch.full_like", "torch.scatter_add", "torch.nn.AvgPool3d", "torch.jit.trace", "torch.ones", "torch.addcmul", "torch.nn.Softplus", "torch.round", "torch.is_floating_point", "torch.nn.MaxPool1d", "torch.isfinite", "torch.nn.CELU", "torch.arange", "torch.index_select", "torch.nn.functional.pad", "torch.cos", "torch.nn.ConvTranspose2d", "torch.nn.AlphaDropout", "torch.nn.PixelShuffle", "torch.cuda.empty_cache", "torch.nn.Conv3d", "torch.log", "torch.nn.LeakyReLU", "torch.cosh", "torch.jit.save", "torch.ceil", "torch.nn.GELU", "torch.true_divide", "torch.nn.AdaptiveMaxPool3d", "torch.nn.AdaptiveAvgPool3d", "torch.sinh", "torch.nn.functional.max_pool1d", "torch.nn.Softmax", "torch.clamp_", "torch.nn.ReplicationPad3d", "torch.tanh", "torch.no_grad", "torch.nn.functional.avg_pool3d", "torch.flatten", "torch.logsumexp", "torch.logical_not", "torch.jit.script", "torch.nn.Dropout", "torch.log2", "torch.asin", "torch.nn.ZeroPad2d", "torch.nn.functional.max_pool2d", "torch.nn.Threshold", "torch.nn.ConstantPad3d", "torch.linspace", "torch.nn.PReLU", "torch.nn.Conv1d", "torch.nn.Hardtanh", "numpy.random.random", "torch.isnan", "torch.nn.LayerNorm", "torch.nn.MaxPool2d", "torch.nn.MaxPool3d", "torch.nn.AdaptiveAvgPool2d", "torch.erf", "torch.nn.functional.one_hot", "torch.nn.ConvTranspose1d", "torch.clamp", "torch.argmax" ], [ "numpy.random.uniform" ], [ "numpy.array" ], [ "numpy.array", "numpy.argmax" ], [ "numpy.std", "numpy.array", "numpy.mean" ], [ "numpy.random.uniform" ], [ "numpy.ones", "numpy.array", "numpy.random.randint" ], [ "numpy.copy", "numpy.nditer", "numpy.random.seed", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jaidevd/scikit-image
[ "62d6a3d7e95a228c729c9ff99b4f45336a210885", "62d6a3d7e95a228c729c9ff99b4f45336a210885", "62d6a3d7e95a228c729c9ff99b4f45336a210885" ]
[ "skimage/morphology/selem.py", "skimage/novice/_novice.py", "skimage/measure/_find_contours.py" ]
[ "\"\"\"\n:author: Damian Eads, 2009\n:license: modified BSD\n\"\"\"\n\nimport numpy as np\nfrom scipy import ndimage\nfrom skimage import draw\n\ndef square(width, dtype=np.uint8):\n \"\"\"Generates a flat, square-shaped structuring element.\n\n Every pixel along the perimeter has a chessboard distance\n no greater than radius (radius=floor(width/2)) pixels.\n\n Parameters\n ----------\n width : int\n The width and height of the square.\n\n Other Parameters\n ----------------\n dtype : data-type\n The data type of the structuring element.\n\n Returns\n -------\n selem : ndarray\n A structuring element consisting only of ones, i.e. every\n pixel belongs to the neighborhood.\n\n \"\"\"\n return np.ones((width, width), dtype=dtype)\n\n\ndef rectangle(width, height, dtype=np.uint8):\n \"\"\"Generates a flat, rectangular-shaped structuring element.\n\n Every pixel in the rectangle generated for a given width and given height\n belongs to the neighboorhood.\n\n Parameters\n ----------\n width : int\n The width of the rectangle.\n height : int\n The height of the rectangle.\n\n Other Parameters\n ----------------\n dtype : data-type\n The data type of the structuring element.\n\n Returns\n -------\n selem : ndarray\n A structuring element consisting only of ones, i.e. every\n pixel belongs to the neighborhood.\n\n \"\"\"\n return np.ones((width, height), dtype=dtype)\n\n\ndef diamond(radius, dtype=np.uint8):\n \"\"\"Generates a flat, diamond-shaped structuring element.\n\n A pixel is part of the neighborhood (i.e. labeled 1) if\n the city block/manhattan distance between it and the center of\n the neighborhood is no greater than radius.\n\n Parameters\n ----------\n radius : int\n The radius of the diamond-shaped structuring element.\n\n Other Parameters\n ----------------\n dtype : data-type\n The data type of the structuring element.\n\n Returns\n -------\n\n selem : ndarray\n The structuring element where elements of the neighborhood\n are 1 and 0 otherwise.\n \"\"\"\n L = np.arange(0, radius * 2 + 1)\n I, J = np.meshgrid(L, L)\n return np.array(np.abs(I - radius) + np.abs(J - radius) <= radius,\n dtype=dtype)\n\n\ndef disk(radius, dtype=np.uint8):\n \"\"\"Generates a flat, disk-shaped structuring element.\n\n A pixel is within the neighborhood if the euclidean distance between\n it and the origin is no greater than radius.\n\n Parameters\n ----------\n radius : int\n The radius of the disk-shaped structuring element.\n\n Other Parameters\n ----------------\n dtype : data-type\n The data type of the structuring element.\n\n Returns\n -------\n selem : ndarray\n The structuring element where elements of the neighborhood\n are 1 and 0 otherwise.\n \"\"\"\n L = np.arange(-radius, radius + 1)\n X, Y = np.meshgrid(L, L)\n return np.array((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype)\n\n\ndef ellipse(width, height, dtype=np.uint8):\n \"\"\"Generates a flat, ellipse-shaped structuring element.\n\n Every pixel along the perimeter of ellipse satisfies\n the equation ``(x/width+1)**2 + (y/height+1)**2 = 1``.\n\n Parameters\n ----------\n width : int\n The width of the ellipse-shaped structuring element.\n height : int\n The height of the ellipse-shaped structuring element.\n\n Other Parameters\n ----------------\n dtype : data-type\n The data type of the structuring element.\n\n Returns\n -------\n selem : ndarray\n The structuring element where elements of the neighborhood\n are 1 and 0 otherwise.\n\n Examples\n --------\n >>> from skimage.morphology import selem\n >>> selem.ellipse(5, 3)\n array([[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0]], dtype=uint8)\n\n \"\"\"\n selem = np.zeros((2 * height + 1, 2 * width + 1), dtype=dtype)\n rows, cols = draw.ellipse(height, width, height + 1, width + 1)\n selem[rows, cols] = 1\n return selem\n\n\ndef cube(width, dtype=np.uint8):\n \"\"\" Generates a cube-shaped structuring element.\n\n This is the 3D equivalent of a square.\n Every pixel along the perimeter has a chessboard distance\n no greater than radius (radius=floor(width/2)) pixels.\n\n Parameters\n ----------\n width : int\n The width, height and depth of the cube.\n\n Other Parameters\n ----------------\n dtype : data-type\n The data type of the structuring element.\n\n Returns\n -------\n selem : ndarray\n A structuring element consisting only of ones, i.e. every\n pixel belongs to the neighborhood.\n\n \"\"\"\n return np.ones((width, width, width), dtype=dtype)\n\n\ndef octahedron(radius, dtype=np.uint8):\n \"\"\"Generates a octahedron-shaped structuring element.\n\n This is the 3D equivalent of a diamond.\n A pixel is part of the neighborhood (i.e. labeled 1) if\n the city block/manhattan distance between it and the center of\n the neighborhood is no greater than radius.\n\n Parameters\n ----------\n radius : int\n The radius of the octahedron-shaped structuring element.\n\n Other Parameters\n ----------------\n dtype : data-type\n The data type of the structuring element.\n\n Returns\n -------\n\n selem : ndarray\n The structuring element where elements of the neighborhood\n are 1 and 0 otherwise.\n \"\"\"\n # note that in contrast to diamond(), this method allows non-integer radii\n n = 2 * radius + 1\n Z, Y, X = np.mgrid[-radius:radius:n * 1j,\n -radius:radius:n * 1j,\n -radius:radius:n * 1j]\n s = np.abs(X) + np.abs(Y) + np.abs(Z)\n return np.array(s <= radius, dtype=dtype)\n\n\ndef ball(radius, dtype=np.uint8):\n \"\"\"Generates a ball-shaped structuring element.\n\n This is the 3D equivalent of a disk.\n A pixel is within the neighborhood if the euclidean distance between\n it and the origin is no greater than radius.\n\n Parameters\n ----------\n radius : int\n The radius of the ball-shaped structuring element.\n\n Other Parameters\n ----------------\n dtype : data-type\n The data type of the structuring element.\n\n Returns\n -------\n selem : ndarray\n The structuring element where elements of the neighborhood\n are 1 and 0 otherwise.\n \"\"\"\n n = 2 * radius + 1\n Z, Y, X = np.mgrid[-radius:radius:n * 1j,\n -radius:radius:n * 1j,\n -radius:radius:n * 1j]\n s = X ** 2 + Y ** 2 + Z ** 2\n return np.array(s <= radius * radius, dtype=dtype)\n\n\ndef octagon(m, n, dtype=np.uint8):\n \"\"\"Generates an octagon shaped structuring element.\n\n For a given size of (m) horizontal and vertical sides\n and a given (n) height or width of slanted sides octagon is generated.\n The slanted sides are 45 or 135 degrees to the horizontal axis\n and hence the widths and heights are equal.\n\n Parameters\n ----------\n m : int\n The size of the horizontal and vertical sides.\n n : int\n The height or width of the slanted sides.\n\n Other Parameters\n ----------------\n dtype : data-type\n The data type of the structuring element.\n\n Returns\n -------\n selem : ndarray\n The structuring element where elements of the neighborhood\n are 1 and 0 otherwise.\n\n \"\"\"\n from . import convex_hull_image\n selem = np.zeros((m + 2 * n, m + 2 * n))\n selem[0, n] = 1\n selem[n, 0] = 1\n selem[0, m + n - 1] = 1\n selem[m + n - 1, 0] = 1\n selem[-1, n] = 1\n selem[n, -1] = 1\n selem[-1, m + n - 1] = 1\n selem[m + n - 1, -1] = 1\n selem = convex_hull_image(selem).astype(dtype)\n return selem\n\n\ndef star(a, dtype=np.uint8):\n \"\"\"Generates a star shaped structuring element.\n\n Start has 8 vertices and is an overlap of square of size `2*a + 1`\n with its 45 degree rotated version.\n The slanted sides are 45 or 135 degrees to the horizontal axis.\n\n Parameters\n ----------\n a : int\n Parameter deciding the size of the star structural element. The side\n of the square array returned is `2*a + 1 + 2*floor(a / 2)`.\n\n Other Parameters\n ----------------\n dtype : data-type\n The data type of the structuring element.\n\n Returns\n -------\n selem : ndarray\n The structuring element where elements of the neighborhood\n are 1 and 0 otherwise.\n\n \"\"\"\n from . import convex_hull_image\n\n if a == 1:\n bfilter = np.zeros((3, 3), dtype)\n bfilter[:] = 1\n return bfilter\n\n m = 2 * a + 1\n n = a // 2\n selem_square = np.zeros((m + 2 * n, m + 2 * n))\n selem_square[n: m + n, n: m + n] = 1\n\n c = (m + 2 * n - 1) // 2\n selem_rotated = np.zeros((m + 2 * n, m + 2 * n))\n selem_rotated[0, c] = selem_rotated[-1, c] = 1\n selem_rotated[c, 0] = selem_rotated[c, -1] = 1\n selem_rotated = convex_hull_image(selem_rotated).astype(int)\n\n selem = selem_square + selem_rotated\n selem[selem > 0] = 1\n\n return selem.astype(dtype)\n\n\ndef _default_selem(ndim):\n \"\"\"Generates a cross-shaped structuring element (connectivity=1).\n\n This is the default structuring element (selem) if no selem was specified.\n\n Parameters\n ----------\n ndim : int\n Number of dimensions of the image.\n\n Returns\n -------\n selem : ndarray\n The structuring element where elements of the neighborhood\n are 1 and 0 otherwise.\n\n \"\"\"\n return ndimage.morphology.generate_binary_structure(ndim, 1)\n", "import os\nimport imghdr\nfrom collections import namedtuple\nfrom io import BytesIO\n\nimport numpy as np\nfrom skimage import io\nfrom skimage import img_as_ubyte\nfrom skimage.transform import resize\nfrom skimage.color import color_dict\nfrom skimage.io.util import file_or_url_context, is_url\n\nimport six\nfrom six.moves.urllib_parse import urlparse\nfrom six.moves.urllib import request\nurlopen = request.urlopen\n\n# Convert colors from `skimage.color` to uint8 and allow access through\n# dict or a named tuple.\ncolor_dict = dict((name, tuple(int(255 * c + 0.5) for c in rgb))\n for name, rgb in six.iteritems(color_dict))\ncolors = namedtuple('colors', color_dict.keys())(**color_dict)\n\n\ndef open(path):\n \"\"\"Return Picture object from the given image path.\"\"\"\n return Picture(path)\n\n\ndef _verify_picture_index(index):\n \"\"\"Raise error if picture index is not a 2D index/slice.\"\"\"\n if not (isinstance(index, tuple) and len(index) == 2):\n raise IndexError(\"Expected 2D index but got {0!r}\".format(index))\n\n if all(isinstance(i, int) for i in index):\n return index\n\n # In case we need to fix the array index, convert tuple to list.\n index = list(index)\n\n for i, dim_slice in enumerate(index):\n # If either index is a slice, ensure index object returns 2D array.\n if isinstance(dim_slice, int):\n index[i] = dim_slice = slice(dim_slice, dim_slice + 1)\n\n return tuple(index)\n\n\ndef rgb_transpose(array):\n \"\"\"Return RGB array with first 2 axes transposed.\"\"\"\n return np.transpose(array, (1, 0, 2))\n\n\ndef array_to_xy_origin(image):\n \"\"\"Return view of image transformed from array to Cartesian origin.\"\"\"\n return rgb_transpose(image[::-1])\n\n\ndef xy_to_array_origin(image):\n \"\"\"Return view of image transformed from Cartesian to array origin.\"\"\"\n return rgb_transpose(image[:, ::-1])\n\n\nclass Pixel(object):\n \"\"\"A single pixel in a Picture.\n\n Attributes\n ----------\n pic : Picture\n The Picture object that this pixel references.\n array : array_like\n Byte array with raw image data (RGB).\n x : int\n Horizontal coordinate of this pixel (left = 0).\n y : int\n Vertical coordinate of this pixel (bottom = 0).\n rgb : tuple\n RGB tuple with red, green, and blue components (0-255)\n alpha : int\n Transparency component (0-255), 255 (opaque) by default\n\n \"\"\"\n\n def __init__(self, pic, array, x, y, rgb, alpha=255):\n self._picture = pic\n self._x = x\n self._y = y\n self._red = self._validate(rgb[0])\n self._green = self._validate(rgb[1])\n self._blue = self._validate(rgb[2])\n self._alpha = self._validate(alpha)\n\n @property\n def x(self):\n \"\"\"Horizontal location of this pixel in the parent image(left = 0).\"\"\"\n return self._x\n\n @property\n def y(self):\n \"\"\"Vertical location of this pixel in the parent image (bottom = 0).\"\"\"\n return self._y\n\n @property\n def red(self):\n \"\"\"The red component of the pixel (0-255).\"\"\"\n return self._red\n\n @red.setter\n def red(self, value):\n self._red = self._validate(value)\n self._setpixel()\n\n @property\n def green(self):\n \"\"\"The green component of the pixel (0-255).\"\"\"\n return self._green\n\n @green.setter\n def green(self, value):\n self._green = self._validate(value)\n self._setpixel()\n\n @property\n def blue(self):\n \"\"\"The blue component of the pixel (0-255).\"\"\"\n return self._blue\n\n @blue.setter\n def blue(self, value):\n self._blue = self._validate(value)\n self._setpixel()\n\n @property\n def alpha(self):\n \"\"\"The transparency component of the pixel (0-255).\"\"\"\n return self._alpha\n\n @alpha.setter\n def alpha(self, value):\n self._alpha = self._validate(value)\n self._setpixel()\n\n @property\n def rgb(self):\n \"\"\"The RGB color components of the pixel (3 values 0-255).\"\"\"\n return (self.red, self.green, self.blue)\n\n @rgb.setter\n def rgb(self, value):\n if len(value) == 4:\n self.rgba = value\n else:\n self._red, self._green, self._blue \\\n = (self._validate(v) for v in value)\n self._alpha = 255\n self._setpixel()\n\n @property\n def rgba(self):\n \"\"\"The RGB color and transparency components of the pixel\n (4 values 0-255).\n \"\"\"\n return (self.red, self.green, self.blue, self.alpha)\n\n @rgba.setter\n def rgba(self, value):\n self._red, self._green, self._blue, self._alpha \\\n = (self._validate(v) for v in value)\n self._setpixel()\n\n def _validate(self, value):\n \"\"\"Verifies that the pixel value is in [0, 255].\"\"\"\n try:\n value = int(value)\n if (value < 0) or (value > 255):\n raise ValueError()\n except ValueError:\n msg = \"Expected an integer between 0 and 255, but got {0} instead!\"\n raise ValueError(msg.format(value))\n\n return value\n\n def _setpixel(self):\n # RGB + alpha\n self._picture.xy_array[self._x, self._y] = self.rgba\n self._picture._array_modified()\n\n def __eq__(self, other):\n if isinstance(other, Pixel):\n return self.rgba == other.rgba\n\n def __repr__(self):\n args = self.red, self.green, self.blue, self.alpha\n return \"Pixel(red={0}, green={1}, blue={2}, alpha={3})\".format(*args)\n\n\nclass Picture(object):\n \"\"\"A 2-D picture made up of pixels.\n\n Attributes\n ----------\n path : str\n Path to an image file to load / URL of an image\n array : array\n Raw RGB or RGBA image data [0-255], with origin at top-left.\n xy_array : array\n Raw RGB or RGBA image data [0-255], with origin at bottom-left.\n\n Examples\n --------\n Load an image from a file\n >>> from skimage import novice\n >>> from skimage import data\n >>> picture = novice.open(data.data_dir + '/chelsea.png')\n\n Load an image from a URL. URL must start with http(s):// or ftp(s)://\n >>> picture = novice.open('http://scikit-image.org/_static/img/logo.png')\n\n Create a blank 100 pixel wide, 200 pixel tall white image\n >>> pic = Picture.from_size((100, 200), color=(255, 255, 255))\n\n Use numpy to make an RGB byte array (shape is height x width x 3)\n >>> import numpy as np\n >>> data = np.zeros(shape=(200, 100, 3), dtype=np.uint8)\n >>> data[:, :, 0] = 255 # Set red component to maximum\n >>> pic = Picture(array=data)\n\n Get the bottom-left pixel\n >>> pic[0, 0]\n Pixel(red=255, green=0, blue=0, alpha=255)\n\n Get the top row of the picture\n >>> pic[:, pic.height-1]\n Picture(100 x 1)\n\n Set the bottom-left pixel to black\n >>> pic[0, 0] = (0, 0, 0)\n\n Set the top row to red\n >>> pic[:, pic.height-1] = (255, 0, 0)\n\n \"\"\"\n\n def __init__(self, path=None, array=None, xy_array=None):\n self._modified = False\n self.scale = 1\n self._path = None\n self._format = None\n\n n_args = len([a for a in [path, array, xy_array] if a is not None])\n if n_args != 1:\n msg = \"Must provide a single keyword arg (path, array, xy_array).\"\n ValueError(msg)\n elif path is not None:\n if not is_url(path):\n path = os.path.abspath(path)\n self._path = path\n with file_or_url_context(path) as context:\n self.array = img_as_ubyte(io.imread(context))\n self._format = imghdr.what(context)\n elif array is not None:\n self.array = array\n elif xy_array is not None:\n self.xy_array = xy_array\n\n # Force RGBA internally (use max alpha)\n if self.array.shape[-1] == 3:\n self.array = np.insert(self.array, 3, values=255, axis=2)\n\n @staticmethod\n def from_size(size, color='black'):\n \"\"\"Return a Picture of the specified size and a uniform color.\n\n Parameters\n ----------\n size : tuple\n Width and height of the picture in pixels.\n color : tuple or str\n RGB or RGBA tuple with the fill color for the picture [0-255] or\n a valid key in `color_dict`.\n \"\"\"\n if isinstance(color, six.string_types):\n color = color_dict[color]\n rgb_size = tuple(size) + (len(color),)\n array = np.ones(rgb_size, dtype=np.uint8) * color\n\n # Force RGBA internally (use max alpha)\n if array.shape[-1] == 3:\n array = np.insert(array, 3, values=255, axis=2)\n\n return Picture(array=array)\n\n @property\n def array(self):\n \"\"\"Image data stored as numpy array.\"\"\"\n return self._array\n\n @array.setter\n def array(self, array):\n self._array = array\n self._xy_array = array_to_xy_origin(array)\n\n @property\n def xy_array(self):\n \"\"\"Image data stored as numpy array with origin at the bottom-left.\"\"\"\n return self._xy_array\n\n @xy_array.setter\n def xy_array(self, array):\n self._xy_array = array\n self._array = xy_to_array_origin(array)\n\n def save(self, path):\n \"\"\"Saves the picture to the given path.\n\n Parameters\n ----------\n path : str\n Path (with file extension) where the picture is saved.\n \"\"\"\n io.imsave(path, self._rescale(self.array))\n self._modified = False\n self._path = os.path.abspath(path)\n self._format = imghdr.what(path)\n\n @property\n def path(self):\n \"\"\"The path to the picture.\"\"\"\n return self._path\n\n @property\n def modified(self):\n \"\"\"True if the picture has changed.\"\"\"\n return self._modified\n\n def _array_modified(self):\n self._modified = True\n self._path = None\n\n @property\n def format(self):\n \"\"\"The image format of the picture.\"\"\"\n return self._format\n\n @property\n def size(self):\n \"\"\"The size (width, height) of the picture.\"\"\"\n return self.xy_array.shape[:2]\n\n @size.setter\n def size(self, value):\n # Don't resize if no change in size\n if (value[0] != self.width) or (value[1] != self.height):\n # skimage dimensions are flipped: y, x\n new_size = (int(value[1]), int(value[0]))\n new_array = resize(self.array, new_size, order=0,\n preserve_range=True)\n self.array = new_array.astype(np.uint8)\n\n self._array_modified()\n\n @property\n def width(self):\n \"\"\"The width of the picture.\"\"\"\n return self.size[0]\n\n @width.setter\n def width(self, value):\n self.size = (value, self.height)\n\n @property\n def height(self):\n \"\"\"The height of the picture.\"\"\"\n return self.size[1]\n\n @height.setter\n def height(self, value):\n self.size = (self.width, value)\n\n def _repr_png_(self):\n return io.Image(self._rescale(self.array))._repr_png_()\n\n def show(self):\n \"\"\"Display the image.\"\"\"\n io.imshow(self._rescale(self.array))\n io.show()\n\n def _makepixel(self, x, y):\n \"\"\"Create a Pixel object for a given x, y location.\"\"\"\n rgb = self.xy_array[x, y]\n return Pixel(self, self.array, x, y, rgb)\n\n def _rescale(self, array):\n \"\"\"Rescale image according to scale factor.\"\"\"\n if self.scale == 1:\n return array\n new_size = (self.height * self.scale, self.width * self.scale)\n return img_as_ubyte(resize(array, new_size, order=0))\n\n def _get_channel(self, channel):\n \"\"\"Return a specific dimension out of the raw image data slice.\"\"\"\n return self._array[:, :, channel]\n\n def _set_channel(self, channel, value):\n \"\"\"Set a specific dimension in the raw image data slice.\"\"\"\n self._array[:, :, channel] = value\n\n @property\n def red(self):\n \"\"\"The red component of the pixel (0-255).\"\"\"\n return self._get_channel(0).ravel()\n\n @red.setter\n def red(self, value):\n self._set_channel(0, value)\n\n @property\n def green(self):\n \"\"\"The green component of the pixel (0-255).\"\"\"\n return self._get_channel(1).ravel()\n\n @green.setter\n def green(self, value):\n self._set_channel(1, value)\n\n @property\n def blue(self):\n \"\"\"The blue component of the pixel (0-255).\"\"\"\n return self._get_channel(2).ravel()\n\n @blue.setter\n def blue(self, value):\n self._set_channel(2, value)\n\n @property\n def alpha(self):\n \"\"\"The transparency component of the pixel (0-255).\"\"\"\n return self._get_channel(3).ravel()\n\n @alpha.setter\n def alpha(self, value):\n self._set_channel(3, value)\n\n @property\n def rgb(self):\n \"\"\"The RGB color components of the pixel (3 values 0-255).\"\"\"\n return self.xy_array[:, :, :3]\n\n @rgb.setter\n def rgb(self, value):\n self.xy_array[:, :, :3] = value\n\n @property\n def rgba(self):\n \"\"\"The RGBA color components of the pixel (4 values 0-255).\"\"\"\n return self.xy_array\n\n @rgba.setter\n def rgba(self, value):\n self.xy_array[:] = value\n\n def __iter__(self):\n \"\"\"Iterates over all pixels in the image.\"\"\"\n for x in range(self.width):\n for y in range(self.height):\n yield self._makepixel(x, y)\n\n def __getitem__(self, xy_index):\n \"\"\"Return `Picture`s for slices and `Pixel`s for indexes.\"\"\"\n xy_index = _verify_picture_index(xy_index)\n if all(isinstance(index, int) for index in xy_index):\n return self._makepixel(*xy_index)\n else:\n return Picture(xy_array=self.xy_array[xy_index])\n\n def __setitem__(self, xy_index, value):\n xy_index = _verify_picture_index(xy_index)\n if isinstance(value, tuple):\n self[xy_index].rgb = value\n elif isinstance(value, Picture):\n self.xy_array[xy_index] = value.xy_array\n else:\n raise TypeError(\"Invalid value type\")\n self._array_modified()\n\n def __eq__(self, other):\n if not isinstance(other, Picture):\n raise NotImplementedError()\n return np.all(self.array == other.array)\n\n def __repr__(self):\n return \"Picture({0} x {1})\".format(*self.size)\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n", "import numpy as np\nfrom . import _find_contours_cy\n\nfrom collections import deque\n\n_param_options = ('high', 'low')\n\n\ndef find_contours(array, level,\n fully_connected='low', positive_orientation='low'):\n \"\"\"Find iso-valued contours in a 2D array for a given level value.\n\n Uses the \"marching squares\" method to compute a the iso-valued contours of\n the input 2D array for a particular level value. Array values are linearly\n interpolated to provide better precision for the output contours.\n\n Parameters\n ----------\n array : 2D ndarray of double\n Input data in which to find contours.\n level : float\n Value along which to find contours in the array.\n fully_connected : str, {'low', 'high'}\n Indicates whether array elements below the given level value are to be\n considered fully-connected (and hence elements above the value will\n only be face connected), or vice-versa. (See notes below for details.)\n positive_orientation : either 'low' or 'high'\n Indicates whether the output contours will produce positively-oriented\n polygons around islands of low- or high-valued elements. If 'low' then\n contours will wind counter- clockwise around elements below the\n iso-value. Alternately, this means that low-valued elements are always\n on the left of the contour. (See below for details.)\n\n Returns\n -------\n contours : list of (n,2)-ndarrays\n Each contour is an ndarray of shape ``(n, 2)``,\n consisting of n ``(row, column)`` coordinates along the contour.\n\n Notes\n -----\n The marching squares algorithm is a special case of the marching cubes\n algorithm [1]_. A simple explanation is available here::\n\n http://www.essi.fr/~lingrand/MarchingCubes/algo.html\n\n There is a single ambiguous case in the marching squares algorithm: when\n a given ``2 x 2``-element square has two high-valued and two low-valued\n elements, each pair diagonally adjacent. (Where high- and low-valued is\n with respect to the contour value sought.) In this case, either the\n high-valued elements can be 'connected together' via a thin isthmus that\n separates the low-valued elements, or vice-versa. When elements are\n connected together across a diagonal, they are considered 'fully\n connected' (also known as 'face+vertex-connected' or '8-connected'). Only\n high-valued or low-valued elements can be fully-connected, the other set\n will be considered as 'face-connected' or '4-connected'. By default,\n low-valued elements are considered fully-connected; this can be altered\n with the 'fully_connected' parameter.\n\n Output contours are not guaranteed to be closed: contours which intersect\n the array edge will be left open. All other contours will be closed. (The\n closed-ness of a contours can be tested by checking whether the beginning\n point is the same as the end point.)\n\n Contours are oriented. By default, array values lower than the contour\n value are to the left of the contour and values greater than the contour\n value are to the right. This means that contours will wind\n counter-clockwise (i.e. in 'positive orientation') around islands of\n low-valued pixels. This behavior can be altered with the\n 'positive_orientation' parameter.\n\n The order of the contours in the output list is determined by the position\n of the smallest ``x,y`` (in lexicographical order) coordinate in the\n contour. This is a side-effect of how the input array is traversed, but\n can be relied upon.\n\n .. warning::\n\n Array coordinates/values are assumed to refer to the *center* of the\n array element. Take a simple example input: ``[0, 1]``. The interpolated\n position of 0.5 in this array is midway between the 0-element (at\n ``x=0``) and the 1-element (at ``x=1``), and thus would fall at\n ``x=0.5``.\n\n This means that to find reasonable contours, it is best to find contours\n midway between the expected \"light\" and \"dark\" values. In particular,\n given a binarized array, *do not* choose to find contours at the low or\n high value of the array. This will often yield degenerate contours,\n especially around structures that are a single array element wide. Instead\n choose a middle value, as above.\n\n References\n ----------\n .. [1] Lorensen, William and Harvey E. Cline. Marching Cubes: A High\n Resolution 3D Surface Construction Algorithm. Computer Graphics\n (SIGGRAPH 87 Proceedings) 21(4) July 1987, p. 163-170).\n\n Examples\n --------\n >>> a = np.zeros((3, 3))\n >>> a[0, 0] = 1\n >>> a\n array([[ 1., 0., 0.],\n [ 0., 0., 0.],\n [ 0., 0., 0.]])\n >>> find_contours(a, 0.5)\n [array([[ 0. , 0.5],\n [ 0.5, 0. ]])]\n \"\"\"\n array = np.asarray(array, dtype=np.double)\n if array.ndim != 2:\n raise ValueError('Only 2D arrays are supported.')\n level = float(level)\n if (fully_connected not in _param_options or\n positive_orientation not in _param_options):\n raise ValueError('Parameters \"fully_connected\" and'\n ' \"positive_orientation\" must be either \"high\" or \"low\".')\n point_list = _find_contours_cy.iterate_and_store(array, level,\n fully_connected == 'high')\n contours = _assemble_contours(_take_2(point_list))\n if positive_orientation == 'high':\n contours = [c[::-1] for c in contours]\n return contours\n\n\ndef _take_2(seq):\n iterator = iter(seq)\n while(True):\n n1 = next(iterator)\n n2 = next(iterator)\n yield (n1, n2)\n\n\ndef _assemble_contours(points_iterator):\n current_index = 0\n contours = {}\n starts = {}\n ends = {}\n for from_point, to_point in points_iterator:\n # Ignore degenerate segments.\n # This happens when (and only when) one vertex of the square is\n # exactly the contour level, and the rest are above or below.\n # This degnerate vertex will be picked up later by neighboring squares.\n if from_point == to_point:\n continue\n\n tail_data = starts.get(to_point)\n head_data = ends.get(from_point)\n\n if tail_data is not None and head_data is not None:\n tail, tail_num = tail_data\n head, head_num = head_data\n # We need to connect these two contours.\n if tail is head:\n # We need to closed a contour.\n # Add the end point, and remove the contour from the\n # 'starts' and 'ends' dicts.\n head.append(to_point)\n del starts[to_point]\n del ends[from_point]\n else: # tail is not head\n # We need to join two distinct contours.\n # We want to keep the first contour segment created, so that\n # the final contours are ordered left->right, top->bottom.\n if tail_num > head_num:\n # tail was created second. Append tail to head.\n head.extend(tail)\n # remove all traces of tail:\n del starts[to_point]\n del ends[tail[-1]]\n del contours[tail_num]\n # remove the old end of head and add the new end.\n del ends[from_point]\n ends[head[-1]] = (head, head_num)\n else: # tail_num <= head_num\n # head was created second. Prepend head to tail.\n tail.extendleft(reversed(head))\n # remove all traces of head:\n del starts[head[0]]\n del ends[from_point]\n del contours[head_num]\n # remove the old start of tail and add the new start.\n del starts[to_point]\n starts[tail[0]] = (tail, tail_num)\n elif tail_data is None and head_data is None:\n # we need to add a new contour\n current_index += 1\n new_num = current_index\n new_contour = deque((from_point, to_point))\n contours[new_num] = new_contour\n starts[from_point] = (new_contour, new_num)\n ends[to_point] = (new_contour, new_num)\n elif tail_data is not None and head_data is None:\n tail, tail_num = tail_data\n # We've found a single contour to which the new segment should be\n # prepended.\n tail.appendleft(from_point)\n del starts[to_point]\n starts[from_point] = (tail, tail_num)\n elif tail_data is None and head_data is not None:\n head, head_num = head_data\n # We've found a single contour to which the new segment should be\n # appended\n head.append(to_point)\n del ends[from_point]\n ends[to_point] = (head, head_num)\n # end iteration over from_ and to_ points\n\n return [np.array(contour) for (num, contour) in sorted(contours.items())]\n" ]
[ [ "numpy.abs", "numpy.meshgrid", "numpy.arange", "numpy.ones", "scipy.ndimage.morphology.generate_binary_structure", "numpy.array", "numpy.zeros" ], [ "numpy.all", "numpy.ones", "numpy.insert", "numpy.transpose" ], [ "numpy.asarray", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "1.0", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.10", "0.17", "1.3" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shan18/taxi
[ "286e2c9a97c1e0b52d63bbb3508045001f449714" ]
[ "jnt/freq.py" ]
[ "from pandas import read_csv\nimport _pickle as pickle\nfrom traceback import format_exc\n\nfrom .common import exists, preprocess_pandas_csv\nfrom .common import try_remove\n\n\nDEFAULT_FREQ = 1\n\n\ndef load_freq(freq_fpath, min_freq=1, preprocess=True, sep='\\t', strip_pos=True, use_pickle=True):\n f = FreqDictionary(freq_fpath, min_freq=min_freq, preprocess=preprocess, sep=sep, strip_pos=strip_pos, use_pickle=use_pickle)\n return f.data\n\n\nclass FreqDictionary(object):\n def __init__(self, freq_fpath, min_freq=1, preprocess=True, sep='\\t', strip_pos=True, use_pickle=True):\n \"\"\" Reads a word frequency list in CSV format \"word<TAB>freq\" \"\"\"\n\n if not exists(freq_fpath):\n self._freq = {}\n return\n\n pkl_fpath = freq_fpath + \".pkl\"\n if use_pickle and exists(pkl_fpath):\n voc = pickle.load(open(pkl_fpath, \"rb\"))\n else:\n # load words to datafame\n if preprocess:\n freq_cln_fpath = freq_fpath + \"-cln\"\n preprocess_pandas_csv(freq_fpath, freq_cln_fpath)\n word_df = read_csv(freq_cln_fpath, sep, encoding='utf-8', error_bad_lines=False)\n try_remove(freq_cln_fpath)\n else:\n word_df = read_csv(freq_fpath, sep, encoding='utf-8', error_bad_lines=False)\n\n # load from dataframe to dictionary\n word_df = word_df.drop(word_df[word_df[\"freq\"] < min_freq].index)\n if strip_pos:\n voc = {}\n for i, row in word_df.iterrows():\n try:\n word = str(row[\"word\"]).split(\"#\")[0]\n freq = int(row[\"freq\"])\n if word not in voc or voc[word] < freq: voc[word] = freq\n except:\n print(\"Bad row:\", row)\n print(format_exc())\n else:\n voc = { row[\"word\"]: row[\"freq\"] for i, row in word_df.iterrows() }\n\n print(\"dictionary is loaded:\", len(voc))\n\n if use_pickle:\n pickle.dump(voc, open(pkl_fpath, \"wb\"))\n print(\"Pickled voc:\", pkl_fpath)\n\n print(\"Loaded %d words from: %s\" % (len(voc), pkl_fpath if pkl_fpath else freq_fpath))\n\n self._freq = voc\n\n\n @property\n def data(self):\n return self._freq\n\n def freq(self, word):\n \"\"\" Returns frequency of the word or 1 \"\"\"\n\n if word in self._freq: return self._freq[word]\n else: return DEFAULT_FREQ\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
hmthanh/LaTeX_OCR
[ "bf5cf4642aff9cbbd5c4f8f232cd993a38ee6d81" ]
[ "models/layers/norm_act.py" ]
[ "from typing import Union, List\n\nimport torch\nfrom torch import nn as nn\nfrom torch.nn import functional as F\n\nfrom models.layers.create_act import get_act_layer\nfrom .trace_utils import _assert\n\n\nclass BatchNormAct2d(nn.BatchNorm2d):\n \"\"\"BatchNorm + Activation\n This module performs BatchNorm + Activation in a manner that will remain backwards\n compatible with weights trained with separate bn, act. This is why we inherit from BN\n instead of composing it as a .bn member.\n \"\"\"\n\n def __init__(\n self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True,\n apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None):\n super(BatchNormAct2d, self).__init__(\n num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)\n self.drop = drop_layer() if drop_layer is not None else nn.Identity()\n act_layer = get_act_layer(act_layer) # string -> nn.Module\n if act_layer is not None and apply_act:\n act_args = dict(inplace=True) if inplace else {}\n self.act = act_layer(**act_args)\n else:\n self.act = nn.Identity()\n\n def forward(self, x):\n # cut & paste of torch.nn.BatchNorm2d.forward impl to avoid issues with torchscript and tracing\n _assert(x.ndim == 4, f'expected 4D input (got {x.ndim}D input)')\n\n # exponential_average_factor is set to self.momentum\n # (when it is available) only so that it gets updated\n # in ONNX graph when this node is exported to ONNX.\n if self.momentum is None:\n exponential_average_factor = 0.0\n else:\n exponential_average_factor = self.momentum\n\n if self.training and self.track_running_stats:\n # TODO: if statement only here to tell the jit to skip emitting this when it is None\n if self.num_batches_tracked is not None: # type: ignore[has-type]\n self.num_batches_tracked = self.num_batches_tracked + \\\n 1 # type: ignore[has-type]\n if self.momentum is None: # use cumulative moving average\n exponential_average_factor = 1.0 / \\\n float(self.num_batches_tracked)\n else: # use exponential moving average\n exponential_average_factor = self.momentum\n\n r\"\"\"\n Decide whether the mini-batch stats should be used for normalization rather than the buffers.\n Mini-batch stats are used in training mode, and in eval mode when buffers are None.\n \"\"\"\n if self.training:\n bn_training = True\n else:\n bn_training = (self.running_mean is None) and (\n self.running_var is None)\n\n r\"\"\"\n Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be\n passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are\n used for normalization (i.e. in eval mode when buffers are not None).\n \"\"\"\n x = F.batch_norm(\n x,\n # If buffers are not to be tracked, ensure that they won't be updated\n self.running_mean if not self.training or self.track_running_stats else None,\n self.running_var if not self.training or self.track_running_stats else None,\n self.weight,\n self.bias,\n bn_training,\n exponential_average_factor,\n self.eps,\n )\n x = self.drop(x)\n x = self.act(x)\n return x\n\n\ndef _num_groups(num_channels, num_groups, group_size):\n if group_size:\n assert num_channels % group_size == 0\n return num_channels // group_size\n return num_groups\n\n\nclass GroupNormAct(nn.GroupNorm):\n # NOTE num_channel and num_groups order flipped for easier layer swaps / binding of fixed args\n def __init__(\n self, num_channels, num_groups=32, eps=1e-5, affine=True, group_size=None,\n apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None):\n super(GroupNormAct, self).__init__(\n _num_groups(num_channels, num_groups, group_size), num_channels, eps=eps, affine=affine)\n self.drop = drop_layer() if drop_layer is not None else nn.Identity()\n act_layer = get_act_layer(act_layer) # string -> nn.Module\n if act_layer is not None and apply_act:\n act_args = dict(inplace=True) if inplace else {}\n self.act = act_layer(**act_args)\n else:\n self.act = nn.Identity()\n\n def forward(self, x):\n x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps)\n x = self.drop(x)\n x = self.act(x)\n return x\n\n\nclass LayerNormAct(nn.LayerNorm):\n def __init__(\n self, normalization_shape: Union[int, List[int], torch.Size], eps=1e-5, affine=True,\n apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None):\n super(LayerNormAct, self).__init__(\n normalization_shape, eps=eps, elementwise_affine=affine)\n self.drop = drop_layer() if drop_layer is not None else nn.Identity()\n act_layer = get_act_layer(act_layer) # string -> nn.Module\n if act_layer is not None and apply_act:\n act_args = dict(inplace=True) if inplace else {}\n self.act = act_layer(**act_args)\n else:\n self.act = nn.Identity()\n\n def forward(self, x):\n x = F.layer_norm(x, self.normalized_shape,\n self.weight, self.bias, self.eps)\n x = self.drop(x)\n x = self.act(x)\n return x\n\n\nclass LayerNormAct2d(nn.LayerNorm):\n def __init__(\n self, num_channels, eps=1e-5, affine=True,\n apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None):\n super(LayerNormAct2d, self).__init__(\n num_channels, eps=eps, elementwise_affine=affine)\n self.drop = drop_layer() if drop_layer is not None else nn.Identity()\n act_layer = get_act_layer(act_layer) # string -> nn.Module\n if act_layer is not None and apply_act:\n act_args = dict(inplace=True) if inplace else {}\n self.act = act_layer(**act_args)\n else:\n self.act = nn.Identity()\n\n def forward(self, x):\n x = F.layer_norm(\n x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2)\n x = self.drop(x)\n x = self.act(x)\n return x\n" ]
[ [ "torch.nn.functional.layer_norm", "torch.nn.functional.batch_norm", "torch.nn.Identity", "torch.nn.functional.group_norm" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
joycenerd/bird-images-classification
[ "9430f65ba22523809d62b3d84c3e40d8bc47111f" ]
[ "dataset.py" ]
[ "from torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms\nfrom PIL import Image\nimport torch.nn as nn\nimport numpy as np\nimport torch\n\nfrom pathlib import Path\nimport collections\nimport numbers\nimport random\nimport os\n\n\nclass BirdDataset(Dataset):\n def __init__(self, root_dir, mode, transform=None):\n self.root_dir = root_dir\n self.x = []\n self.y = []\n self.transform = transform\n\n if mode == \"train\":\n labels = open(os.path.join(self.root_dir, 'new_train_label.txt'))\n\n elif mode == 'eval':\n labels = open(os.path.join(self.root_dir, 'new_eval_label.txt'))\n\n for label in labels:\n label_list = label.split(',')\n self.x.append(label_list[0])\n self.y.append(int(label_list[1]))\n\n def __len__(self):\n return len(self.x)\n\n def __getitem__(self, index):\n image_path = self.x[index]\n image = Image.open(image_path).convert('RGB')\n image = image.copy()\n\n if self.transform:\n image = self.transform(image)\n\n return image, self.y[index]\n\n\ndef Dataloader(dataset, batch_size, shuffle, num_workers):\n data_loader = DataLoader(\n dataset=dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)\n return data_loader\n\n\ndef _random_colour_space(x):\n output = x.convert(\"HSV\")\n return output\n\n\nclass RandomShift(object):\n def __init__(self, shift):\n self.shift = shift\n\n @staticmethod\n def get_params(shift):\n \"\"\"Get parameters for ``rotate`` for a random rotation.\n Returns:\n sequence: params to be passed to ``rotate`` for random rotation.\n \"\"\"\n hshift, vshift = np.random.uniform(-shift, shift, size=2)\n\n return hshift, vshift\n\n def __call__(self, img):\n hshift, vshift = self.get_params(self.shift)\n\n return img.transform(img.size, Image.AFFINE, (1, 0, hshift, 0, 1, vshift), resample=Image.BICUBIC, fill=1)\n\n\ndef make_dataset(mode, data_root, img_size):\n colour_transform = transforms.Lambda(lambda x: _random_colour_space(x))\n\n transform = [\n transforms.RandomAffine(degrees=30, shear=50, fillcolor=0),\n transforms.RandomGrayscale(p=0.5),\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.RandomPerspective(\n distortion_scale=0.5, p=0.5, fill=0),\n transforms.RandomVerticalFlip(p=0.5),\n transforms.ColorJitter(\n brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5),\n RandomShift(3),\n transforms.RandomApply([colour_transform]),\n ]\n\n data_transform_train = transforms.Compose([\n transforms.RandomResizedCrop(img_size),\n transforms.RandomApply(transform, p=0.5),\n transforms.RandomApply([transforms.RandomRotation(\n (-90, 90), expand=False, center=None)], p=0.5),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[\n 0.229, 0.224, 0.225])\n ])\n\n data_transform_dev = transforms.Compose([\n transforms.Resize((img_size, img_size)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[\n 0.229, 0.224, 0.225])\n ])\n\n data_transform_test = transforms.Compose([\n transforms.Resize((img_size, img_size)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n\n if (mode == \"train\"):\n data_set = BirdDataset(data_root, mode, data_transform_train)\n elif (mode == \"eval\"):\n data_set = BirdDataset(data_root, mode, data_transform_dev)\n elif (mode == \"test\"):\n data_set = BirdDataset(data_root, mode, data_transform_test)\n\n return data_set\n" ]
[ [ "numpy.random.uniform", "torch.utils.data.DataLoader" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BB88Lee/mmdetection3d
[ "62aeeadf70ac1229c595e3a4fe09d8a49df808f1", "62aeeadf70ac1229c595e3a4fe09d8a49df808f1", "62aeeadf70ac1229c595e3a4fe09d8a49df808f1", "62aeeadf70ac1229c595e3a4fe09d8a49df808f1" ]
[ "tests/test_voxel_encoders.py", "mmdet3d/core/evaluation/kitti_utils/eval.py", "mmdet3d/models/backbones/pointnet2_sa_msg.py", "tests/test_backbones.py" ]
[ "import torch\n\nfrom mmdet3d.models.builder import build_voxel_encoder\n\n\ndef test_pillar_feature_net():\n pillar_feature_net_cfg = dict(\n type='PillarFeatureNet',\n in_channels=5,\n feat_channels=[64],\n with_distance=False,\n voxel_size=(0.2, 0.2, 8),\n point_cloud_range=(-51.2, -51.2, -5.0, 51.2, 51.2, 3.0),\n norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01))\n\n pillar_feature_net = build_voxel_encoder(pillar_feature_net_cfg)\n\n features = torch.rand([97297, 20, 5])\n num_voxels = torch.randint(1, 100, [97297])\n coors = torch.randint(0, 100, [97297, 4])\n\n features = pillar_feature_net(features, num_voxels, coors)\n assert features.shape == torch.Size([97297, 64])\n\n\ndef test_hard_simple_VFE():\n hard_simple_VFE_cfg = dict(type='HardSimpleVFE', num_features=5)\n hard_simple_VFE = build_voxel_encoder(hard_simple_VFE_cfg)\n features = torch.rand([240000, 10, 5])\n num_voxels = torch.randint(1, 10, [240000])\n\n outputs = hard_simple_VFE(features, num_voxels, None)\n assert outputs.shape == torch.Size([240000, 5])\n", "import gc\nimport io as sysio\nimport numba\nimport numpy as np\n\n\[email protected]\ndef get_thresholds(scores: np.ndarray, num_gt, num_sample_pts=41):\n scores.sort()\n scores = scores[::-1]\n current_recall = 0\n thresholds = []\n for i, score in enumerate(scores):\n l_recall = (i + 1) / num_gt\n if i < (len(scores) - 1):\n r_recall = (i + 2) / num_gt\n else:\n r_recall = l_recall\n if (((r_recall - current_recall) < (current_recall - l_recall))\n and (i < (len(scores) - 1))):\n continue\n # recall = l_recall\n thresholds.append(score)\n current_recall += 1 / (num_sample_pts - 1.0)\n return thresholds\n\n\ndef clean_data(gt_anno, dt_anno, current_class, difficulty):\n CLASS_NAMES = ['car', 'pedestrian', 'cyclist']\n MIN_HEIGHT = [40, 25, 25]\n MAX_OCCLUSION = [0, 1, 2]\n MAX_TRUNCATION = [0.15, 0.3, 0.5]\n dc_bboxes, ignored_gt, ignored_dt = [], [], []\n current_cls_name = CLASS_NAMES[current_class].lower()\n num_gt = len(gt_anno['name'])\n num_dt = len(dt_anno['name'])\n num_valid_gt = 0\n for i in range(num_gt):\n bbox = gt_anno['bbox'][i]\n gt_name = gt_anno['name'][i].lower()\n height = bbox[3] - bbox[1]\n valid_class = -1\n if (gt_name == current_cls_name):\n valid_class = 1\n elif (current_cls_name == 'Pedestrian'.lower()\n and 'Person_sitting'.lower() == gt_name):\n valid_class = 0\n elif (current_cls_name == 'Car'.lower() and 'Van'.lower() == gt_name):\n valid_class = 0\n else:\n valid_class = -1\n ignore = False\n if ((gt_anno['occluded'][i] > MAX_OCCLUSION[difficulty])\n or (gt_anno['truncated'][i] > MAX_TRUNCATION[difficulty])\n or (height <= MIN_HEIGHT[difficulty])):\n ignore = True\n if valid_class == 1 and not ignore:\n ignored_gt.append(0)\n num_valid_gt += 1\n elif (valid_class == 0 or (ignore and (valid_class == 1))):\n ignored_gt.append(1)\n else:\n ignored_gt.append(-1)\n # for i in range(num_gt):\n if gt_anno['name'][i] == 'DontCare':\n dc_bboxes.append(gt_anno['bbox'][i])\n for i in range(num_dt):\n if (dt_anno['name'][i].lower() == current_cls_name):\n valid_class = 1\n else:\n valid_class = -1\n height = abs(dt_anno['bbox'][i, 3] - dt_anno['bbox'][i, 1])\n if height < MIN_HEIGHT[difficulty]:\n ignored_dt.append(1)\n elif valid_class == 1:\n ignored_dt.append(0)\n else:\n ignored_dt.append(-1)\n\n return num_valid_gt, ignored_gt, ignored_dt, dc_bboxes\n\n\[email protected](nopython=True)\ndef image_box_overlap(boxes, query_boxes, criterion=-1):\n N = boxes.shape[0]\n K = query_boxes.shape[0]\n overlaps = np.zeros((N, K), dtype=boxes.dtype)\n for k in range(K):\n qbox_area = ((query_boxes[k, 2] - query_boxes[k, 0]) *\n (query_boxes[k, 3] - query_boxes[k, 1]))\n for n in range(N):\n iw = (\n min(boxes[n, 2], query_boxes[k, 2]) -\n max(boxes[n, 0], query_boxes[k, 0]))\n if iw > 0:\n ih = (\n min(boxes[n, 3], query_boxes[k, 3]) -\n max(boxes[n, 1], query_boxes[k, 1]))\n if ih > 0:\n if criterion == -1:\n ua = ((boxes[n, 2] - boxes[n, 0]) *\n (boxes[n, 3] - boxes[n, 1]) + qbox_area -\n iw * ih)\n elif criterion == 0:\n ua = ((boxes[n, 2] - boxes[n, 0]) *\n (boxes[n, 3] - boxes[n, 1]))\n elif criterion == 1:\n ua = qbox_area\n else:\n ua = 1.0\n overlaps[n, k] = iw * ih / ua\n return overlaps\n\n\ndef bev_box_overlap(boxes, qboxes, criterion=-1):\n from .rotate_iou import rotate_iou_gpu_eval\n riou = rotate_iou_gpu_eval(boxes, qboxes, criterion)\n return riou\n\n\[email protected](nopython=True, parallel=True)\ndef d3_box_overlap_kernel(boxes, qboxes, rinc, criterion=-1):\n # ONLY support overlap in CAMERA, not lidar.\n # TODO: change to use prange for parallel mode, should check the difference\n N, K = boxes.shape[0], qboxes.shape[0]\n for i in numba.prange(N):\n for j in numba.prange(K):\n if rinc[i, j] > 0:\n # iw = (min(boxes[i, 1] + boxes[i, 4], qboxes[j, 1] +\n # qboxes[j, 4]) - max(boxes[i, 1], qboxes[j, 1]))\n iw = (\n min(boxes[i, 1], qboxes[j, 1]) -\n max(boxes[i, 1] - boxes[i, 4],\n qboxes[j, 1] - qboxes[j, 4]))\n\n if iw > 0:\n area1 = boxes[i, 3] * boxes[i, 4] * boxes[i, 5]\n area2 = qboxes[j, 3] * qboxes[j, 4] * qboxes[j, 5]\n inc = iw * rinc[i, j]\n if criterion == -1:\n ua = (area1 + area2 - inc)\n elif criterion == 0:\n ua = area1\n elif criterion == 1:\n ua = area2\n else:\n ua = inc\n rinc[i, j] = inc / ua\n else:\n rinc[i, j] = 0.0\n\n\ndef d3_box_overlap(boxes, qboxes, criterion=-1):\n from .rotate_iou import rotate_iou_gpu_eval\n rinc = rotate_iou_gpu_eval(boxes[:, [0, 2, 3, 5, 6]],\n qboxes[:, [0, 2, 3, 5, 6]], 2)\n d3_box_overlap_kernel(boxes, qboxes, rinc, criterion)\n return rinc\n\n\[email protected](nopython=True)\ndef compute_statistics_jit(overlaps,\n gt_datas,\n dt_datas,\n ignored_gt,\n ignored_det,\n dc_bboxes,\n metric,\n min_overlap,\n thresh=0,\n compute_fp=False,\n compute_aos=False):\n\n det_size = dt_datas.shape[0]\n gt_size = gt_datas.shape[0]\n dt_scores = dt_datas[:, -1]\n dt_alphas = dt_datas[:, 4]\n gt_alphas = gt_datas[:, 4]\n dt_bboxes = dt_datas[:, :4]\n # gt_bboxes = gt_datas[:, :4]\n\n assigned_detection = [False] * det_size\n ignored_threshold = [False] * det_size\n if compute_fp:\n for i in range(det_size):\n if (dt_scores[i] < thresh):\n ignored_threshold[i] = True\n NO_DETECTION = -10000000\n tp, fp, fn, similarity = 0, 0, 0, 0\n # thresholds = [0.0]\n # delta = [0.0]\n thresholds = np.zeros((gt_size, ))\n thresh_idx = 0\n delta = np.zeros((gt_size, ))\n delta_idx = 0\n for i in range(gt_size):\n if ignored_gt[i] == -1:\n continue\n det_idx = -1\n valid_detection = NO_DETECTION\n max_overlap = 0\n assigned_ignored_det = False\n\n for j in range(det_size):\n if (ignored_det[j] == -1):\n continue\n if (assigned_detection[j]):\n continue\n if (ignored_threshold[j]):\n continue\n overlap = overlaps[j, i]\n dt_score = dt_scores[j]\n if (not compute_fp and (overlap > min_overlap)\n and dt_score > valid_detection):\n det_idx = j\n valid_detection = dt_score\n elif (compute_fp and (overlap > min_overlap)\n and (overlap > max_overlap or assigned_ignored_det)\n and ignored_det[j] == 0):\n max_overlap = overlap\n det_idx = j\n valid_detection = 1\n assigned_ignored_det = False\n elif (compute_fp and (overlap > min_overlap)\n and (valid_detection == NO_DETECTION)\n and ignored_det[j] == 1):\n det_idx = j\n valid_detection = 1\n assigned_ignored_det = True\n\n if (valid_detection == NO_DETECTION) and ignored_gt[i] == 0:\n fn += 1\n elif ((valid_detection != NO_DETECTION)\n and (ignored_gt[i] == 1 or ignored_det[det_idx] == 1)):\n assigned_detection[det_idx] = True\n elif valid_detection != NO_DETECTION:\n tp += 1\n # thresholds.append(dt_scores[det_idx])\n thresholds[thresh_idx] = dt_scores[det_idx]\n thresh_idx += 1\n if compute_aos:\n # delta.append(gt_alphas[i] - dt_alphas[det_idx])\n delta[delta_idx] = gt_alphas[i] - dt_alphas[det_idx]\n delta_idx += 1\n\n assigned_detection[det_idx] = True\n if compute_fp:\n for i in range(det_size):\n if (not (assigned_detection[i] or ignored_det[i] == -1\n or ignored_det[i] == 1 or ignored_threshold[i])):\n fp += 1\n nstuff = 0\n if metric == 0:\n overlaps_dt_dc = image_box_overlap(dt_bboxes, dc_bboxes, 0)\n for i in range(dc_bboxes.shape[0]):\n for j in range(det_size):\n if (assigned_detection[j]):\n continue\n if (ignored_det[j] == -1 or ignored_det[j] == 1):\n continue\n if (ignored_threshold[j]):\n continue\n if overlaps_dt_dc[j, i] > min_overlap:\n assigned_detection[j] = True\n nstuff += 1\n fp -= nstuff\n if compute_aos:\n tmp = np.zeros((fp + delta_idx, ))\n # tmp = [0] * fp\n for i in range(delta_idx):\n tmp[i + fp] = (1.0 + np.cos(delta[i])) / 2.0\n # tmp.append((1.0 + np.cos(delta[i])) / 2.0)\n # assert len(tmp) == fp + tp\n # assert len(delta) == tp\n if tp > 0 or fp > 0:\n similarity = np.sum(tmp)\n else:\n similarity = -1\n return tp, fp, fn, similarity, thresholds[:thresh_idx]\n\n\ndef get_split_parts(num, num_part):\n same_part = num // num_part\n remain_num = num % num_part\n if remain_num == 0:\n return [same_part] * num_part\n else:\n return [same_part] * num_part + [remain_num]\n\n\[email protected](nopython=True)\ndef fused_compute_statistics(overlaps,\n pr,\n gt_nums,\n dt_nums,\n dc_nums,\n gt_datas,\n dt_datas,\n dontcares,\n ignored_gts,\n ignored_dets,\n metric,\n min_overlap,\n thresholds,\n compute_aos=False):\n gt_num = 0\n dt_num = 0\n dc_num = 0\n for i in range(gt_nums.shape[0]):\n for t, thresh in enumerate(thresholds):\n overlap = overlaps[dt_num:dt_num + dt_nums[i],\n gt_num:gt_num + gt_nums[i]]\n\n gt_data = gt_datas[gt_num:gt_num + gt_nums[i]]\n dt_data = dt_datas[dt_num:dt_num + dt_nums[i]]\n ignored_gt = ignored_gts[gt_num:gt_num + gt_nums[i]]\n ignored_det = ignored_dets[dt_num:dt_num + dt_nums[i]]\n dontcare = dontcares[dc_num:dc_num + dc_nums[i]]\n tp, fp, fn, similarity, _ = compute_statistics_jit(\n overlap,\n gt_data,\n dt_data,\n ignored_gt,\n ignored_det,\n dontcare,\n metric,\n min_overlap=min_overlap,\n thresh=thresh,\n compute_fp=True,\n compute_aos=compute_aos)\n pr[t, 0] += tp\n pr[t, 1] += fp\n pr[t, 2] += fn\n if similarity != -1:\n pr[t, 3] += similarity\n gt_num += gt_nums[i]\n dt_num += dt_nums[i]\n dc_num += dc_nums[i]\n\n\ndef calculate_iou_partly(gt_annos, dt_annos, metric, num_parts=50):\n \"\"\"Fast iou algorithm. this function can be used independently to do result\n analysis. Must be used in CAMERA coordinate system.\n\n Args:\n gt_annos (dict): Must from get_label_annos() in kitti_common.py.\n dt_annos (dict): Must from get_label_annos() in kitti_common.py.\n metric (int): Eval type. 0: bbox, 1: bev, 2: 3d.\n num_parts (int): A parameter for fast calculate algorithm.\n \"\"\"\n assert len(gt_annos) == len(dt_annos)\n total_dt_num = np.stack([len(a['name']) for a in dt_annos], 0)\n total_gt_num = np.stack([len(a['name']) for a in gt_annos], 0)\n num_examples = len(gt_annos)\n split_parts = get_split_parts(num_examples, num_parts)\n parted_overlaps = []\n example_idx = 0\n\n for num_part in split_parts:\n gt_annos_part = gt_annos[example_idx:example_idx + num_part]\n dt_annos_part = dt_annos[example_idx:example_idx + num_part]\n if metric == 0:\n gt_boxes = np.concatenate([a['bbox'] for a in gt_annos_part], 0)\n dt_boxes = np.concatenate([a['bbox'] for a in dt_annos_part], 0)\n overlap_part = image_box_overlap(gt_boxes, dt_boxes)\n elif metric == 1:\n loc = np.concatenate(\n [a['location'][:, [0, 2]] for a in gt_annos_part], 0)\n dims = np.concatenate(\n [a['dimensions'][:, [0, 2]] for a in gt_annos_part], 0)\n rots = np.concatenate([a['rotation_y'] for a in gt_annos_part], 0)\n gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],\n axis=1)\n loc = np.concatenate(\n [a['location'][:, [0, 2]] for a in dt_annos_part], 0)\n dims = np.concatenate(\n [a['dimensions'][:, [0, 2]] for a in dt_annos_part], 0)\n rots = np.concatenate([a['rotation_y'] for a in dt_annos_part], 0)\n dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],\n axis=1)\n overlap_part = bev_box_overlap(gt_boxes,\n dt_boxes).astype(np.float64)\n elif metric == 2:\n loc = np.concatenate([a['location'] for a in gt_annos_part], 0)\n dims = np.concatenate([a['dimensions'] for a in gt_annos_part], 0)\n rots = np.concatenate([a['rotation_y'] for a in gt_annos_part], 0)\n gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],\n axis=1)\n loc = np.concatenate([a['location'] for a in dt_annos_part], 0)\n dims = np.concatenate([a['dimensions'] for a in dt_annos_part], 0)\n rots = np.concatenate([a['rotation_y'] for a in dt_annos_part], 0)\n dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],\n axis=1)\n overlap_part = d3_box_overlap(gt_boxes,\n dt_boxes).astype(np.float64)\n else:\n raise ValueError('unknown metric')\n parted_overlaps.append(overlap_part)\n example_idx += num_part\n overlaps = []\n example_idx = 0\n for j, num_part in enumerate(split_parts):\n gt_annos_part = gt_annos[example_idx:example_idx + num_part]\n dt_annos_part = dt_annos[example_idx:example_idx + num_part]\n gt_num_idx, dt_num_idx = 0, 0\n for i in range(num_part):\n gt_box_num = total_gt_num[example_idx + i]\n dt_box_num = total_dt_num[example_idx + i]\n overlaps.append(\n parted_overlaps[j][gt_num_idx:gt_num_idx + gt_box_num,\n dt_num_idx:dt_num_idx + dt_box_num])\n gt_num_idx += gt_box_num\n dt_num_idx += dt_box_num\n example_idx += num_part\n\n return overlaps, parted_overlaps, total_gt_num, total_dt_num\n\n\ndef _prepare_data(gt_annos, dt_annos, current_class, difficulty):\n gt_datas_list = []\n dt_datas_list = []\n total_dc_num = []\n ignored_gts, ignored_dets, dontcares = [], [], []\n total_num_valid_gt = 0\n for i in range(len(gt_annos)):\n rets = clean_data(gt_annos[i], dt_annos[i], current_class, difficulty)\n num_valid_gt, ignored_gt, ignored_det, dc_bboxes = rets\n ignored_gts.append(np.array(ignored_gt, dtype=np.int64))\n ignored_dets.append(np.array(ignored_det, dtype=np.int64))\n if len(dc_bboxes) == 0:\n dc_bboxes = np.zeros((0, 4)).astype(np.float64)\n else:\n dc_bboxes = np.stack(dc_bboxes, 0).astype(np.float64)\n total_dc_num.append(dc_bboxes.shape[0])\n dontcares.append(dc_bboxes)\n total_num_valid_gt += num_valid_gt\n gt_datas = np.concatenate(\n [gt_annos[i]['bbox'], gt_annos[i]['alpha'][..., np.newaxis]], 1)\n dt_datas = np.concatenate([\n dt_annos[i]['bbox'], dt_annos[i]['alpha'][..., np.newaxis],\n dt_annos[i]['score'][..., np.newaxis]\n ], 1)\n gt_datas_list.append(gt_datas)\n dt_datas_list.append(dt_datas)\n total_dc_num = np.stack(total_dc_num, axis=0)\n return (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares,\n total_dc_num, total_num_valid_gt)\n\n\ndef eval_class(gt_annos,\n dt_annos,\n current_classes,\n difficultys,\n metric,\n min_overlaps,\n compute_aos=False,\n num_parts=200):\n \"\"\"Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP.\n\n Args:\n gt_annos (dict): Must from get_label_annos() in kitti_common.py.\n dt_annos (dict): Must from get_label_annos() in kitti_common.py.\n current_classes (list[int]): 0: car, 1: pedestrian, 2: cyclist.\n difficultys (list[int]): Eval difficulty, 0: easy, 1: normal, 2: hard\n metric (int): Eval type. 0: bbox, 1: bev, 2: 3d\n min_overlaps (float): Min overlap. format:\n [num_overlap, metric, class].\n num_parts (int): A parameter for fast calculate algorithm\n\n Returns:\n dict[str, np.ndarray]: recall, precision and aos\n \"\"\"\n assert len(gt_annos) == len(dt_annos)\n num_examples = len(gt_annos)\n if num_examples < num_parts:\n num_parts = num_examples\n split_parts = get_split_parts(num_examples, num_parts)\n\n rets = calculate_iou_partly(dt_annos, gt_annos, metric, num_parts)\n overlaps, parted_overlaps, total_dt_num, total_gt_num = rets\n N_SAMPLE_PTS = 41\n num_minoverlap = len(min_overlaps)\n num_class = len(current_classes)\n num_difficulty = len(difficultys)\n precision = np.zeros(\n [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])\n recall = np.zeros(\n [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])\n aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])\n for m, current_class in enumerate(current_classes):\n for idx_l, difficulty in enumerate(difficultys):\n rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty)\n (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets,\n dontcares, total_dc_num, total_num_valid_gt) = rets\n for k, min_overlap in enumerate(min_overlaps[:, metric, m]):\n thresholdss = []\n for i in range(len(gt_annos)):\n rets = compute_statistics_jit(\n overlaps[i],\n gt_datas_list[i],\n dt_datas_list[i],\n ignored_gts[i],\n ignored_dets[i],\n dontcares[i],\n metric,\n min_overlap=min_overlap,\n thresh=0.0,\n compute_fp=False)\n tp, fp, fn, similarity, thresholds = rets\n thresholdss += thresholds.tolist()\n thresholdss = np.array(thresholdss)\n thresholds = get_thresholds(thresholdss, total_num_valid_gt)\n thresholds = np.array(thresholds)\n pr = np.zeros([len(thresholds), 4])\n idx = 0\n for j, num_part in enumerate(split_parts):\n gt_datas_part = np.concatenate(\n gt_datas_list[idx:idx + num_part], 0)\n dt_datas_part = np.concatenate(\n dt_datas_list[idx:idx + num_part], 0)\n dc_datas_part = np.concatenate(\n dontcares[idx:idx + num_part], 0)\n ignored_dets_part = np.concatenate(\n ignored_dets[idx:idx + num_part], 0)\n ignored_gts_part = np.concatenate(\n ignored_gts[idx:idx + num_part], 0)\n fused_compute_statistics(\n parted_overlaps[j],\n pr,\n total_gt_num[idx:idx + num_part],\n total_dt_num[idx:idx + num_part],\n total_dc_num[idx:idx + num_part],\n gt_datas_part,\n dt_datas_part,\n dc_datas_part,\n ignored_gts_part,\n ignored_dets_part,\n metric,\n min_overlap=min_overlap,\n thresholds=thresholds,\n compute_aos=compute_aos)\n idx += num_part\n for i in range(len(thresholds)):\n recall[m, idx_l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2])\n precision[m, idx_l, k, i] = pr[i, 0] / (\n pr[i, 0] + pr[i, 1])\n if compute_aos:\n aos[m, idx_l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])\n for i in range(len(thresholds)):\n precision[m, idx_l, k, i] = np.max(\n precision[m, idx_l, k, i:], axis=-1)\n recall[m, idx_l, k, i] = np.max(\n recall[m, idx_l, k, i:], axis=-1)\n if compute_aos:\n aos[m, idx_l, k, i] = np.max(\n aos[m, idx_l, k, i:], axis=-1)\n ret_dict = {\n 'recall': recall,\n 'precision': precision,\n 'orientation': aos,\n }\n\n # clean temp variables\n del overlaps\n del parted_overlaps\n\n gc.collect()\n return ret_dict\n\n\ndef get_mAP(prec):\n sums = 0\n for i in range(0, prec.shape[-1], 4):\n sums = sums + prec[..., i]\n return sums / 11 * 100\n\n\ndef print_str(value, *arg, sstream=None):\n if sstream is None:\n sstream = sysio.StringIO()\n sstream.truncate(0)\n sstream.seek(0)\n print(value, *arg, file=sstream)\n return sstream.getvalue()\n\n\ndef do_eval(gt_annos,\n dt_annos,\n current_classes,\n min_overlaps,\n eval_types=['bbox', 'bev', '3d']):\n # min_overlaps: [num_minoverlap, metric, num_class]\n difficultys = [0, 1, 2]\n mAP_bbox = None\n mAP_aos = None\n if 'bbox' in eval_types:\n ret = eval_class(\n gt_annos,\n dt_annos,\n current_classes,\n difficultys,\n 0,\n min_overlaps,\n compute_aos=('aos' in eval_types))\n # ret: [num_class, num_diff, num_minoverlap, num_sample_points]\n mAP_bbox = get_mAP(ret['precision'])\n if 'aos' in eval_types:\n mAP_aos = get_mAP(ret['orientation'])\n\n mAP_bev = None\n if 'bev' in eval_types:\n ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 1,\n min_overlaps)\n mAP_bev = get_mAP(ret['precision'])\n\n mAP_3d = None\n if '3d' in eval_types:\n ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 2,\n min_overlaps)\n mAP_3d = get_mAP(ret['precision'])\n return mAP_bbox, mAP_bev, mAP_3d, mAP_aos\n\n\ndef do_coco_style_eval(gt_annos, dt_annos, current_classes, overlap_ranges,\n compute_aos):\n # overlap_ranges: [range, metric, num_class]\n min_overlaps = np.zeros([10, *overlap_ranges.shape[1:]])\n for i in range(overlap_ranges.shape[1]):\n for j in range(overlap_ranges.shape[2]):\n min_overlaps[:, i, j] = np.linspace(*overlap_ranges[:, i, j])\n mAP_bbox, mAP_bev, mAP_3d, mAP_aos = do_eval(gt_annos, dt_annos,\n current_classes, min_overlaps,\n compute_aos)\n # ret: [num_class, num_diff, num_minoverlap]\n mAP_bbox = mAP_bbox.mean(-1)\n mAP_bev = mAP_bev.mean(-1)\n mAP_3d = mAP_3d.mean(-1)\n if mAP_aos is not None:\n mAP_aos = mAP_aos.mean(-1)\n return mAP_bbox, mAP_bev, mAP_3d, mAP_aos\n\n\ndef kitti_eval(gt_annos,\n dt_annos,\n current_classes,\n eval_types=['bbox', 'bev', '3d']):\n \"\"\"KITTI evaluation.\n\n Args:\n gt_annos (list[dict]): Contain gt information of each sample.\n dt_annos (list[dict]): Contain detected information of each sample.\n current_classes (list[str]): Classes to evaluation.\n eval_types (list[str], optional): Types to eval.\n Defaults to ['bbox', 'bev', '3d'].\n\n Returns:\n tuple: String and dict of evaluation results.\n \"\"\"\n assert len(eval_types) > 0, 'must contain at least one evaluation type'\n if 'aos' in eval_types:\n assert 'bbox' in eval_types, 'must evaluate bbox when evaluating aos'\n overlap_0_7 = np.array([[0.7, 0.5, 0.5, 0.7,\n 0.5], [0.7, 0.5, 0.5, 0.7, 0.5],\n [0.7, 0.5, 0.5, 0.7, 0.5]])\n overlap_0_5 = np.array([[0.7, 0.5, 0.5, 0.7, 0.5],\n [0.5, 0.25, 0.25, 0.5, 0.25],\n [0.5, 0.25, 0.25, 0.5, 0.25]])\n min_overlaps = np.stack([overlap_0_7, overlap_0_5], axis=0) # [2, 3, 5]\n class_to_name = {\n 0: 'Car',\n 1: 'Pedestrian',\n 2: 'Cyclist',\n 3: 'Van',\n 4: 'Person_sitting',\n }\n name_to_class = {v: n for n, v in class_to_name.items()}\n if not isinstance(current_classes, (list, tuple)):\n current_classes = [current_classes]\n current_classes_int = []\n for curcls in current_classes:\n if isinstance(curcls, str):\n current_classes_int.append(name_to_class[curcls])\n else:\n current_classes_int.append(curcls)\n current_classes = current_classes_int\n min_overlaps = min_overlaps[:, :, current_classes]\n result = ''\n # check whether alpha is valid\n compute_aos = False\n pred_alpha = False\n valid_alpha_gt = False\n for anno in dt_annos:\n if anno['alpha'].shape[0] != 0:\n pred_alpha = True\n break\n for anno in gt_annos:\n if anno['alpha'][0] != -10:\n valid_alpha_gt = True\n break\n compute_aos = (pred_alpha and valid_alpha_gt)\n if compute_aos:\n eval_types.append('aos')\n\n mAPbbox, mAPbev, mAP3d, mAPaos = do_eval(gt_annos, dt_annos,\n current_classes, min_overlaps,\n eval_types)\n\n ret_dict = {}\n difficulty = ['easy', 'moderate', 'hard']\n for j, curcls in enumerate(current_classes):\n # mAP threshold array: [num_minoverlap, metric, class]\n # mAP result: [num_class, num_diff, num_minoverlap]\n curcls_name = class_to_name[curcls]\n for i in range(min_overlaps.shape[0]):\n # prepare results for print\n result += ('{} AP@{:.2f}, {:.2f}, {:.2f}:\\n'.format(\n curcls_name, *min_overlaps[i, :, j]))\n if mAPbbox is not None:\n result += 'bbox AP:{:.4f}, {:.4f}, {:.4f}\\n'.format(\n *mAPbbox[j, :, i])\n if mAPbev is not None:\n result += 'bev AP:{:.4f}, {:.4f}, {:.4f}\\n'.format(\n *mAPbev[j, :, i])\n if mAP3d is not None:\n result += '3d AP:{:.4f}, {:.4f}, {:.4f}\\n'.format(\n *mAP3d[j, :, i])\n\n if compute_aos:\n result += 'aos AP:{:.2f}, {:.2f}, {:.2f}\\n'.format(\n *mAPaos[j, :, i])\n\n # prepare results for logger\n for idx in range(3):\n if i == 0:\n postfix = f'{difficulty[idx]}_strict'\n else:\n postfix = f'{difficulty[idx]}_loose'\n prefix = f'KITTI/{curcls_name}'\n if mAP3d is not None:\n ret_dict[f'{prefix}_3D_{postfix}'] = mAP3d[j, idx, i]\n if mAPbev is not None:\n ret_dict[f'{prefix}_BEV_{postfix}'] = mAPbev[j, idx, i]\n if mAPbbox is not None:\n ret_dict[f'{prefix}_2D_{postfix}'] = mAPbbox[j, idx, i]\n\n # calculate mAP over all classes if there are multiple classes\n if len(current_classes) > 1:\n # prepare results for print\n result += ('\\nOverall AP@{}, {}, {}:\\n'.format(*difficulty))\n if mAPbbox is not None:\n mAPbbox = mAPbbox.mean(axis=0)\n result += 'bbox AP:{:.4f}, {:.4f}, {:.4f}\\n'.format(*mAPbbox[:, 0])\n if mAPbev is not None:\n mAPbev = mAPbev.mean(axis=0)\n result += 'bev AP:{:.4f}, {:.4f}, {:.4f}\\n'.format(*mAPbev[:, 0])\n if mAP3d is not None:\n mAP3d = mAP3d.mean(axis=0)\n result += '3d AP:{:.4f}, {:.4f}, {:.4f}\\n'.format(*mAP3d[:, 0])\n if compute_aos:\n mAPaos = mAPaos.mean(axis=0)\n result += 'aos AP:{:.2f}, {:.2f}, {:.2f}\\n'.format(*mAPaos[:, 0])\n\n # prepare results for logger\n for idx in range(3):\n postfix = f'{difficulty[idx]}'\n if mAP3d is not None:\n ret_dict[f'KITTI/Overall_3D_{postfix}'] = mAP3d[idx, 0]\n if mAPbev is not None:\n ret_dict[f'KITTI/Overall_BEV_{postfix}'] = mAPbev[idx, 0]\n if mAPbbox is not None:\n ret_dict[f'KITTI/Overall_2D_{postfix}'] = mAPbbox[idx, 0]\n\n return result, ret_dict\n\n\ndef kitti_eval_coco_style(gt_annos, dt_annos, current_classes):\n \"\"\"coco style evaluation of kitti.\n\n Args:\n gt_annos (list[dict]): Contain gt information of each sample.\n dt_annos (list[dict]): Contain detected information of each sample.\n current_classes (list[str]): Classes to evaluation.\n\n Returns:\n string: Evaluation results.\n \"\"\"\n class_to_name = {\n 0: 'Car',\n 1: 'Pedestrian',\n 2: 'Cyclist',\n 3: 'Van',\n 4: 'Person_sitting',\n }\n class_to_range = {\n 0: [0.5, 0.95, 10],\n 1: [0.25, 0.7, 10],\n 2: [0.25, 0.7, 10],\n 3: [0.5, 0.95, 10],\n 4: [0.25, 0.7, 10],\n }\n name_to_class = {v: n for n, v in class_to_name.items()}\n if not isinstance(current_classes, (list, tuple)):\n current_classes = [current_classes]\n current_classes_int = []\n for curcls in current_classes:\n if isinstance(curcls, str):\n current_classes_int.append(name_to_class[curcls])\n else:\n current_classes_int.append(curcls)\n current_classes = current_classes_int\n overlap_ranges = np.zeros([3, 3, len(current_classes)])\n for i, curcls in enumerate(current_classes):\n overlap_ranges[:, :, i] = np.array(class_to_range[curcls])[:,\n np.newaxis]\n result = ''\n # check whether alpha is valid\n compute_aos = False\n for anno in dt_annos:\n if anno['alpha'].shape[0] != 0:\n if anno['alpha'][0] != -10:\n compute_aos = True\n break\n mAPbbox, mAPbev, mAP3d, mAPaos = do_coco_style_eval(\n gt_annos, dt_annos, current_classes, overlap_ranges, compute_aos)\n for j, curcls in enumerate(current_classes):\n # mAP threshold array: [num_minoverlap, metric, class]\n # mAP result: [num_class, num_diff, num_minoverlap]\n o_range = np.array(class_to_range[curcls])[[0, 2, 1]]\n o_range[1] = (o_range[2] - o_range[0]) / (o_range[1] - 1)\n result += print_str((f'{class_to_name[curcls]} '\n 'coco AP@{:.2f}:{:.2f}:{:.2f}:'.format(*o_range)))\n result += print_str((f'bbox AP:{mAPbbox[j, 0]:.2f}, '\n f'{mAPbbox[j, 1]:.2f}, '\n f'{mAPbbox[j, 2]:.2f}'))\n result += print_str((f'bev AP:{mAPbev[j, 0]:.2f}, '\n f'{mAPbev[j, 1]:.2f}, '\n f'{mAPbev[j, 2]:.2f}'))\n result += print_str((f'3d AP:{mAP3d[j, 0]:.2f}, '\n f'{mAP3d[j, 1]:.2f}, '\n f'{mAP3d[j, 2]:.2f}'))\n if compute_aos:\n result += print_str((f'aos AP:{mAPaos[j, 0]:.2f}, '\n f'{mAPaos[j, 1]:.2f}, '\n f'{mAPaos[j, 2]:.2f}'))\n return result\n", "import torch\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import auto_fp16\nfrom torch import nn as nn\n\nfrom mmdet3d.ops import build_sa_module\nfrom mmdet.models import BACKBONES\nfrom .base_pointnet import BasePointNet\n\n\[email protected]_module()\nclass PointNet2SAMSG(BasePointNet):\n \"\"\"PointNet2 with Multi-scale grouping.\n\n Args:\n in_channels (int): Input channels of point cloud.\n num_points (tuple[int]): The number of points which each SA\n module samples.\n radii (tuple[float]): Sampling radii of each SA module.\n num_samples (tuple[int]): The number of samples for ball\n query in each SA module.\n sa_channels (tuple[tuple[int]]): Out channels of each mlp in SA module.\n aggregation_channels (tuple[int]): Out channels of aggregation\n multi-scale grouping features.\n fps_mods (tuple[int]): Mod of FPS for each SA module.\n fps_sample_range_lists (tuple[tuple[int]]): The number of sampling\n points which each SA module samples.\n dilated_group (tuple[bool]): Whether to use dilated ball query for\n out_indices (Sequence[int]): Output from which stages.\n norm_cfg (dict): Config of normalization layer.\n sa_cfg (dict): Config of set abstraction module, which may contain\n the following keys and values:\n\n - pool_mod (str): Pool method ('max' or 'avg') for SA modules.\n - use_xyz (bool): Whether to use xyz as a part of features.\n - normalize_xyz (bool): Whether to normalize xyz with radii in\n each SA module.\n \"\"\"\n\n def __init__(self,\n in_channels,\n num_points=(2048, 1024, 512, 256),\n radii=((0.2, 0.4, 0.8), (0.4, 0.8, 1.6), (1.6, 3.2, 4.8)),\n num_samples=((32, 32, 64), (32, 32, 64), (32, 32, 32)),\n sa_channels=(((16, 16, 32), (16, 16, 32), (32, 32, 64)),\n ((64, 64, 128), (64, 64, 128), (64, 96, 128)),\n ((128, 128, 256), (128, 192, 256), (128, 256,\n 256))),\n aggregation_channels=(64, 128, 256),\n fps_mods=(('D-FPS'), ('FS'), ('F-FPS', 'D-FPS')),\n fps_sample_range_lists=((-1), (-1), (512, -1)),\n dilated_group=(True, True, True),\n out_indices=(2, ),\n norm_cfg=dict(type='BN2d'),\n sa_cfg=dict(\n type='PointSAModuleMSG',\n pool_mod='max',\n use_xyz=True,\n normalize_xyz=False)):\n super().__init__()\n self.num_sa = len(sa_channels)\n self.out_indices = out_indices\n assert max(out_indices) < self.num_sa\n assert len(num_points) == len(radii) == len(num_samples) == len(\n sa_channels) == len(aggregation_channels)\n\n self.SA_modules = nn.ModuleList()\n self.aggregation_mlps = nn.ModuleList()\n sa_in_channel = in_channels - 3 # number of channels without xyz\n skip_channel_list = [sa_in_channel]\n\n for sa_index in range(self.num_sa):\n cur_sa_mlps = list(sa_channels[sa_index])\n sa_out_channel = 0\n for radius_index in range(len(radii[sa_index])):\n cur_sa_mlps[radius_index] = [sa_in_channel] + list(\n cur_sa_mlps[radius_index])\n sa_out_channel += cur_sa_mlps[radius_index][-1]\n\n if isinstance(fps_mods[sa_index], tuple):\n cur_fps_mod = list(fps_mods[sa_index])\n else:\n cur_fps_mod = list([fps_mods[sa_index]])\n\n if isinstance(fps_sample_range_lists[sa_index], tuple):\n cur_fps_sample_range_list = list(\n fps_sample_range_lists[sa_index])\n else:\n cur_fps_sample_range_list = list(\n [fps_sample_range_lists[sa_index]])\n\n self.SA_modules.append(\n build_sa_module(\n num_point=num_points[sa_index],\n radii=radii[sa_index],\n sample_nums=num_samples[sa_index],\n mlp_channels=cur_sa_mlps,\n fps_mod=cur_fps_mod,\n fps_sample_range_list=cur_fps_sample_range_list,\n dilated_group=dilated_group[sa_index],\n norm_cfg=norm_cfg,\n cfg=sa_cfg,\n bias=True))\n skip_channel_list.append(sa_out_channel)\n self.aggregation_mlps.append(\n ConvModule(\n sa_out_channel,\n aggregation_channels[sa_index],\n conv_cfg=dict(type='Conv1d'),\n norm_cfg=dict(type='BN1d'),\n kernel_size=1,\n bias=True))\n sa_in_channel = aggregation_channels[sa_index]\n\n @auto_fp16(apply_to=('points', ))\n def forward(self, points):\n \"\"\"Forward pass.\n\n Args:\n points (torch.Tensor): point coordinates with features,\n with shape (B, N, 3 + input_feature_dim).\n\n Returns:\n dict[str, torch.Tensor]: Outputs of the last SA module.\n\n - sa_xyz (torch.Tensor): The coordinates of sa features.\n - sa_features (torch.Tensor): The features from the\n last Set Aggregation Layers.\n - sa_indices (torch.Tensor): Indices of the \\\n input points.\n \"\"\"\n xyz, features = self._split_point_feats(points)\n\n batch, num_points = xyz.shape[:2]\n indices = xyz.new_tensor(range(num_points)).unsqueeze(0).repeat(\n batch, 1).long()\n\n sa_xyz = [xyz]\n sa_features = [features]\n sa_indices = [indices]\n\n out_sa_xyz = []\n out_sa_features = []\n out_sa_indices = []\n\n for i in range(self.num_sa):\n cur_xyz, cur_features, cur_indices = self.SA_modules[i](\n sa_xyz[i], sa_features[i])\n cur_features = self.aggregation_mlps[i](cur_features)\n sa_xyz.append(cur_xyz)\n sa_features.append(cur_features)\n sa_indices.append(\n torch.gather(sa_indices[-1], 1, cur_indices.long()))\n if i in self.out_indices:\n out_sa_xyz.append(sa_xyz[-1])\n out_sa_features.append(sa_features[-1])\n out_sa_indices.append(sa_indices[-1])\n\n return dict(\n sa_xyz=out_sa_xyz,\n sa_features=out_sa_features,\n sa_indices=out_sa_indices)\n", "import numpy as np\nimport pytest\nimport torch\n\nfrom mmdet3d.models import build_backbone\n\n\ndef test_pointnet2_sa_ssg():\n if not torch.cuda.is_available():\n pytest.skip()\n\n cfg = dict(\n type='PointNet2SASSG',\n in_channels=6,\n num_points=(32, 16),\n radius=(0.8, 1.2),\n num_samples=(16, 8),\n sa_channels=((8, 16), (16, 16)),\n fp_channels=((16, 16), (16, 16)))\n self = build_backbone(cfg)\n self.cuda()\n assert self.SA_modules[0].mlps[0].layer0.conv.in_channels == 6\n assert self.SA_modules[0].mlps[0].layer0.conv.out_channels == 8\n assert self.SA_modules[0].mlps[0].layer1.conv.out_channels == 16\n assert self.SA_modules[1].mlps[0].layer1.conv.out_channels == 16\n assert self.FP_modules[0].mlps.layer0.conv.in_channels == 32\n assert self.FP_modules[0].mlps.layer0.conv.out_channels == 16\n assert self.FP_modules[1].mlps.layer0.conv.in_channels == 19\n\n xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', dtype=np.float32)\n xyz = torch.from_numpy(xyz).view(1, -1, 6).cuda() # (B, N, 6)\n # test forward\n ret_dict = self(xyz)\n fp_xyz = ret_dict['fp_xyz']\n fp_features = ret_dict['fp_features']\n fp_indices = ret_dict['fp_indices']\n assert len(fp_xyz) == len(fp_features) == len(fp_indices) == 3\n assert fp_xyz[0].shape == torch.Size([1, 16, 3])\n assert fp_xyz[1].shape == torch.Size([1, 32, 3])\n assert fp_xyz[2].shape == torch.Size([1, 100, 3])\n assert fp_features[2].shape == torch.Size([1, 16, 100])\n assert fp_indices[2].shape == torch.Size([1, 100])\n\n\ndef test_multi_backbone():\n if not torch.cuda.is_available():\n pytest.skip()\n\n # test list config\n cfg_list = dict(\n type='MultiBackbone',\n num_streams=4,\n suffixes=['net0', 'net1', 'net2', 'net3'],\n backbones=[\n dict(\n type='PointNet2SASSG',\n in_channels=4,\n num_points=(256, 128, 64, 32),\n radius=(0.2, 0.4, 0.8, 1.2),\n num_samples=(64, 32, 16, 16),\n sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256),\n (128, 128, 256)),\n fp_channels=((256, 256), (256, 256)),\n norm_cfg=dict(type='BN2d')),\n dict(\n type='PointNet2SASSG',\n in_channels=4,\n num_points=(256, 128, 64, 32),\n radius=(0.2, 0.4, 0.8, 1.2),\n num_samples=(64, 32, 16, 16),\n sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256),\n (128, 128, 256)),\n fp_channels=((256, 256), (256, 256)),\n norm_cfg=dict(type='BN2d')),\n dict(\n type='PointNet2SASSG',\n in_channels=4,\n num_points=(256, 128, 64, 32),\n radius=(0.2, 0.4, 0.8, 1.2),\n num_samples=(64, 32, 16, 16),\n sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256),\n (128, 128, 256)),\n fp_channels=((256, 256), (256, 256)),\n norm_cfg=dict(type='BN2d')),\n dict(\n type='PointNet2SASSG',\n in_channels=4,\n num_points=(256, 128, 64, 32),\n radius=(0.2, 0.4, 0.8, 1.2),\n num_samples=(64, 32, 16, 16),\n sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256),\n (128, 128, 256)),\n fp_channels=((256, 256), (256, 256)),\n norm_cfg=dict(type='BN2d'))\n ])\n\n self = build_backbone(cfg_list)\n self.cuda()\n\n assert len(self.backbone_list) == 4\n\n xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', dtype=np.float32)\n xyz = torch.from_numpy(xyz).view(1, -1, 6).cuda() # (B, N, 6)\n # test forward\n ret_dict = self(xyz[:, :, :4])\n\n assert ret_dict['hd_feature'].shape == torch.Size([1, 256, 128])\n assert ret_dict['fp_xyz_net0'][-1].shape == torch.Size([1, 128, 3])\n assert ret_dict['fp_features_net0'][-1].shape == torch.Size([1, 256, 128])\n\n # test dict config\n cfg_dict = dict(\n type='MultiBackbone',\n num_streams=2,\n suffixes=['net0', 'net1'],\n aggregation_mlp_channels=[512, 128],\n backbones=dict(\n type='PointNet2SASSG',\n in_channels=4,\n num_points=(256, 128, 64, 32),\n radius=(0.2, 0.4, 0.8, 1.2),\n num_samples=(64, 32, 16, 16),\n sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256),\n (128, 128, 256)),\n fp_channels=((256, 256), (256, 256)),\n norm_cfg=dict(type='BN2d')))\n\n self = build_backbone(cfg_dict)\n self.cuda()\n\n assert len(self.backbone_list) == 2\n\n # test forward\n ret_dict = self(xyz[:, :, :4])\n\n assert ret_dict['hd_feature'].shape == torch.Size([1, 128, 128])\n assert ret_dict['fp_xyz_net0'][-1].shape == torch.Size([1, 128, 3])\n assert ret_dict['fp_features_net0'][-1].shape == torch.Size([1, 256, 128])\n\n # Length of backbone configs list should be equal to num_streams\n with pytest.raises(AssertionError):\n cfg_list['num_streams'] = 3\n build_backbone(cfg_list)\n\n # Length of suffixes list should be equal to num_streams\n with pytest.raises(AssertionError):\n cfg_dict['suffixes'] = ['net0', 'net1', 'net2']\n build_backbone(cfg_dict)\n\n # Type of 'backbones' should be Dict or List[Dict].\n with pytest.raises(AssertionError):\n cfg_dict['backbones'] = 'PointNet2SASSG'\n build_backbone(cfg_dict)\n\n\ndef test_pointnet2_sa_msg():\n if not torch.cuda.is_available():\n pytest.skip()\n cfg = dict(\n type='PointNet2SAMSG',\n in_channels=4,\n num_points=(256, 64, (32, 32)),\n radii=((0.2, 0.4, 0.8), (0.4, 0.8, 1.6), (1.6, 3.2, 4.8)),\n num_samples=((8, 8, 16), (8, 8, 16), (8, 8, 8)),\n sa_channels=(((8, 8, 16), (8, 8, 16),\n (8, 8, 16)), ((16, 16, 32), (16, 16, 32), (16, 24, 32)),\n ((32, 32, 64), (32, 24, 64), (32, 64, 64))),\n aggregation_channels=(16, 32, 64),\n fps_mods=(('D-FPS'), ('FS'), ('F-FPS', 'D-FPS')),\n fps_sample_range_lists=((-1), (-1), (64, -1)),\n norm_cfg=dict(type='BN2d'),\n sa_cfg=dict(\n type='PointSAModuleMSG',\n pool_mod='max',\n use_xyz=True,\n normalize_xyz=False))\n\n self = build_backbone(cfg)\n self.cuda()\n assert self.SA_modules[0].mlps[0].layer0.conv.in_channels == 4\n assert self.SA_modules[0].mlps[0].layer0.conv.out_channels == 8\n assert self.SA_modules[0].mlps[1].layer1.conv.out_channels == 8\n assert self.SA_modules[2].mlps[2].layer2.conv.out_channels == 64\n\n xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', dtype=np.float32)\n xyz = torch.from_numpy(xyz).view(1, -1, 6).cuda() # (B, N, 6)\n # test forward\n ret_dict = self(xyz[:, :, :4])\n sa_xyz = ret_dict['sa_xyz'][-1]\n sa_features = ret_dict['sa_features'][-1]\n sa_indices = ret_dict['sa_indices'][-1]\n\n assert sa_xyz.shape == torch.Size([1, 64, 3])\n assert sa_features.shape == torch.Size([1, 64, 64])\n assert sa_indices.shape == torch.Size([1, 64])\n\n # out_indices should smaller than the length of SA Modules.\n with pytest.raises(AssertionError):\n build_backbone(\n dict(\n type='PointNet2SAMSG',\n in_channels=4,\n num_points=(256, 64, (32, 32)),\n radii=((0.2, 0.4, 0.8), (0.4, 0.8, 1.6), (1.6, 3.2, 4.8)),\n num_samples=((8, 8, 16), (8, 8, 16), (8, 8, 8)),\n sa_channels=(((8, 8, 16), (8, 8, 16), (8, 8, 16)),\n ((16, 16, 32), (16, 16, 32), (16, 24, 32)),\n ((32, 32, 64), (32, 24, 64), (32, 64, 64))),\n aggregation_channels=(16, 32, 64),\n fps_mods=(('D-FPS'), ('FS'), ('F-FPS', 'D-FPS')),\n fps_sample_range_lists=((-1), (-1), (64, -1)),\n out_indices=(2, 3),\n norm_cfg=dict(type='BN2d'),\n sa_cfg=dict(\n type='PointSAModuleMSG',\n pool_mod='max',\n use_xyz=True,\n normalize_xyz=False)))\n" ]
[ [ "torch.Size", "torch.randint", "torch.rand" ], [ "numpy.linspace", "numpy.cos", "numpy.stack", "numpy.concatenate", "numpy.max", "numpy.array", "numpy.zeros", "numpy.sum" ], [ "torch.nn.ModuleList" ], [ "torch.Size", "numpy.fromfile", "torch.from_numpy", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bhklab/ptl-oar-segmentation
[ "354c3ee7f042a025f74e210a7b8462beac9b727d", "354c3ee7f042a025f74e210a7b8462beac9b727d" ]
[ "utils/models/unetplusplus/model.py", "utils/sliding_window.py" ]
[ "import torch\nfrom torch import nn\nfrom .parts import *\n\n__all__ = [\"VGGUNet\", \"NestedUNet\"]\n\n\nclass VGGUNet(nn.Module):\n def __init__(self, num_classes, input_channels=3, leak_p=0.1, factor=1, **kwargs):\n super().__init__()\n\n nb_filter = [\n 32 // factor,\n 64 // factor,\n 128 // factor,\n 256 // factor,\n 512 // factor,\n ]\n\n self.pool = nn.MaxPool3d(2, 2)\n self.up = nn.Upsample(scale_factor=2, mode=\"trilinear\", align_corners=True)\n\n self.conv0_0 = VGGBlock(input_channels, nb_filter[0], nb_filter[0])\n self.conv1_0 = VGGBlock(nb_filter[0], nb_filter[1], nb_filter[1])\n self.conv2_0 = VGGBlock(nb_filter[1], nb_filter[2], nb_filter[2])\n self.conv3_0 = VGGBlock(nb_filter[2], nb_filter[3], nb_filter[3])\n self.conv4_0 = VGGBlock(nb_filter[3], nb_filter[4], nb_filter[4])\n\n self.conv3_1 = VGGBlock(nb_filter[3] + nb_filter[4], nb_filter[3], nb_filter[3])\n self.conv2_2 = VGGBlock(nb_filter[2] + nb_filter[3], nb_filter[2], nb_filter[2])\n self.conv1_3 = VGGBlock(nb_filter[1] + nb_filter[2], nb_filter[1], nb_filter[1])\n self.conv0_4 = VGGBlock(nb_filter[0] + nb_filter[1], nb_filter[0], nb_filter[0])\n\n self.final = nn.Conv3d(nb_filter[0], num_classes, kernel_size=1)\n\n def forward(self, input):\n x0_0 = self.conv0_0(input)\n x1_0 = self.conv1_0(self.pool(x0_0))\n x2_0 = self.conv2_0(self.pool(x1_0))\n x3_0 = self.conv3_0(self.pool(x2_0))\n x4_0 = self.conv4_0(self.pool(x3_0))\n\n x3_1 = self.conv3_1(torch.cat([x3_0, self.up(x4_0)], 1))\n x2_2 = self.conv2_2(torch.cat([x2_0, self.up(x3_1)], 1))\n x1_3 = self.conv1_3(torch.cat([x1_0, self.up(x2_2)], 1))\n x0_4 = self.conv0_4(torch.cat([x0_0, self.up(x1_3)], 1))\n\n output = self.final(x0_4)\n return output\n\n\nclass NestedUNet(nn.Module):\n def __init__(\n self,\n num_classes,\n input_channels=1,\n deep_supervision=False,\n leak_p=0.1,\n factor=1,\n **kwargs,\n ):\n super().__init__()\n\n nb_filter = [\n 40 // factor,\n 80 // factor,\n 160 // factor,\n 320 // factor,\n 640 // factor,\n ]\n\n self.deep_supervision = deep_supervision\n self.pool = nn.MaxPool3d(2, 2)\n # self.up = nn.Upsample(scale_factor=2, mode=\"trilinear\", align_corners=True)\n self.conv0_0 = VGGBlock(input_channels, nb_filter[0], nb_filter[0])\n self.conv1_0 = VGGBlock(nb_filter[0], nb_filter[1], nb_filter[1])\n self.conv2_0 = VGGBlock(nb_filter[1], nb_filter[2], nb_filter[2])\n self.conv3_0 = VGGBlock(nb_filter[2], nb_filter[3], nb_filter[3])\n self.conv4_0 = VGGBlock(nb_filter[3], nb_filter[4], nb_filter[4])\n self.conv0_1 = VGGBlock(nb_filter[0] + nb_filter[1], nb_filter[0], nb_filter[0])\n self.conv1_1 = VGGBlock(nb_filter[1] + nb_filter[2], nb_filter[1], nb_filter[1])\n self.conv2_1 = VGGBlock(nb_filter[2] + nb_filter[3], nb_filter[2], nb_filter[2])\n self.conv3_1 = VGGBlock(nb_filter[3] + nb_filter[4], nb_filter[3], nb_filter[3])\n\n self.conv0_2 = VGGBlock(\n nb_filter[0] * 2 + nb_filter[1], nb_filter[0], nb_filter[0]\n )\n self.conv1_2 = VGGBlock(\n nb_filter[1] * 2 + nb_filter[2], nb_filter[1], nb_filter[1]\n )\n self.conv2_2 = VGGBlock(\n nb_filter[2] * 2 + nb_filter[3], nb_filter[2], nb_filter[2]\n )\n\n self.conv0_3 = VGGBlock(\n nb_filter[0] * 3 + nb_filter[1], nb_filter[0], nb_filter[0]\n )\n self.conv1_3 = VGGBlock(\n nb_filter[1] * 3 + nb_filter[2], nb_filter[1], nb_filter[1]\n )\n\n self.conv0_4 = VGGBlock(\n nb_filter[0] * 4 + nb_filter[1], nb_filter[0], nb_filter[0]\n )\n\n if self.deep_supervision:\n\n self.final1 = nn.Conv3d(nb_filter[0], num_classes, kernel_size=1)\n self.final2 = nn.Conv3d(nb_filter[0], num_classes, kernel_size=1)\n self.final3 = nn.Conv3d(nb_filter[0], num_classes, kernel_size=1)\n self.final4 = nn.Conv3d(nb_filter[0], num_classes, kernel_size=1)\n self.final_bn = nn.BatchNorm3d(num_classes * 4)\n self.final_relu = nn.LeakyReLU(leak_p, inplace=True)\n self.final = nn.Conv3d(num_classes * 4, num_classes, kernel_size=1)\n\n else:\n\n self.final = nn.Conv3d(nb_filter[0], num_classes, kernel_size=1)\n\n self.upconv1_0 = VGGBlockUP(nb_filter[1], nb_filter[1])\n self.upconv2_0 = VGGBlockUP(nb_filter[2], nb_filter[2])\n self.upconv2_1 = VGGBlockUP(nb_filter[1], nb_filter[1])\n self.upconv3_0 = VGGBlockUP(nb_filter[3], nb_filter[3])\n self.upconv3_1 = VGGBlockUP(nb_filter[2], nb_filter[2])\n self.upconv3_2 = VGGBlockUP(nb_filter[1], nb_filter[1])\n self.upconv4_0 = VGGBlockUP(nb_filter[4], nb_filter[4])\n self.upconv4_1 = VGGBlockUP(nb_filter[3], nb_filter[3])\n self.upconv4_2 = VGGBlockUP(nb_filter[2], nb_filter[2])\n self.upconv4_3 = VGGBlockUP(nb_filter[1], nb_filter[1])\n\n def forward(self, input):\n x0_0 = self.conv0_0(input)\n x1_0 = self.conv1_0(self.pool(x0_0))\n x0_1 = self.conv0_1(torch.cat([x0_0, self.upconv1_0(x1_0)], 1))\n\n x2_0 = self.conv2_0(self.pool(x1_0))\n x1_1 = self.conv1_1(torch.cat([x1_0, self.upconv2_0(x2_0)], 1))\n x0_2 = self.conv0_2(torch.cat([x0_0, x0_1, self.upconv2_1(x1_1)], 1))\n\n x3_0 = self.conv3_0(self.pool(x2_0))\n x2_1 = self.conv2_1(torch.cat([x2_0, self.upconv3_0(x3_0)], 1))\n x1_2 = self.conv1_2(torch.cat([x1_0, x1_1, self.upconv3_1(x2_1)], 1))\n x0_3 = self.conv0_3(torch.cat([x0_0, x0_1, x0_2, self.upconv3_2(x1_2)], 1))\n\n x4_0 = self.conv4_0(self.pool(x3_0))\n x3_1 = self.conv3_1(torch.cat([x3_0, self.upconv4_0(x4_0)], 1))\n x2_2 = self.conv2_2(torch.cat([x2_0, x2_1, self.upconv4_1(x3_1)], 1))\n x1_3 = self.conv1_3(torch.cat([x1_0, x1_1, x1_2, self.upconv4_2(x2_2)], 1))\n x0_4 = self.conv0_4(torch.cat([x0_0, x0_1, x0_2, x0_3, self.upconv4_3(x1_3)], 1))\n\n if self.deep_supervision:\n\n output1 = self.final1(x0_1)\n output2 = self.final2(x0_2)\n output3 = self.final3(x0_3)\n output4 = self.final4(x0_4)\n\n # added this...\n final = self.final_relu(\n self.final_bn(torch.cat((output1, output2, output3, output4), 1))\n )\n final = self.final(final)\n\n return final # [output1, output2, output3, output4]\n\n else:\n output = self.final(x0_4)\n return output\n", "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, Callable, List, Sequence, Tuple, Union\nimport torch\nimport torch.nn.functional as F\nfrom monai.data.utils import compute_importance_map, dense_patch_slices, get_valid_patch_size\nfrom monai.utils import BlendMode, PytorchPadMode, fall_back_tuple\n\n__all__ = [\"sliding_window_inference\"]\n\ndef sliding_window_inference(\n inputs: torch.Tensor,\n roi_size: Union[Sequence[int], int],\n sw_batch_size: int,\n predictor: Callable[..., torch.Tensor],\n overlap: float = 0.3,\n mode: Union[BlendMode, str] = BlendMode.CONSTANT,\n sigma_scale: Union[Sequence[float], float] = 0.125,\n padding_mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT,\n cval: float = 0.0,\n sw_device: Union[torch.device, str, None] = None,\n device: Union[torch.device, str, None] = None,\n *args: Any,\n **kwargs: Any,\n) -> torch.Tensor:\n \"\"\"\n Sliding window inference on `inputs` with `predictor`.\n\n When roi_size is larger than the inputs' spatial size, the input image are padded during inference.\n To maintain the same spatial sizes, the output image will be cropped to the original input size.\n\n Args:\n inputs: input image to be processed (assuming NCHW[D])\n roi_size: the spatial window size for inferences.\n When its components have None or non-positives, the corresponding inputs dimension will be used.\n if the components of the `roi_size` are non-positive values, the transform will use the\n corresponding components of img size. For example, `roi_size=(32, -1)` will be adapted\n to `(32, 64)` if the second spatial dimension size of img is `64`.\n sw_batch_size: the batch size to run window slices.\n predictor: given input tensor `patch_data` in shape NCHW[D], `predictor(patch_data)`\n should return a prediction with the same spatial shape and batch_size, i.e. NMHW[D];\n where HW[D] represents the patch spatial size, M is the number of output channels, N is `sw_batch_size`.\n overlap: Amount of overlap between scans.\n mode: {``\"constant\"``, ``\"gaussian\"``}\n How to blend output of overlapping windows. Defaults to ``\"constant\"``.\n\n - ``\"constant``\": gives equal weight to all predictions.\n - ``\"gaussian``\": gives less weight to predictions on edges of windows.\n\n sigma_scale: the standard deviation coefficient of the Gaussian window when `mode` is ``\"gaussian\"``.\n Default: 0.125. Actual window sigma is ``sigma_scale`` * ``dim_size``.\n When sigma_scale is a sequence of floats, the values denote sigma_scale at the corresponding\n spatial dimensions.\n padding_mode: {``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}\n Padding mode for ``inputs``, when ``roi_size`` is larger than inputs. Defaults to ``\"constant\"``\n See also: https://pytorch.org/docs/stable/nn.functional.html#pad\n cval: fill value for 'constant' padding mode. Default: 0\n sw_device: device for the window data.\n By default the device (and accordingly the memory) of the `inputs` is used.\n Normally `sw_device` should be consistent with the device where `predictor` is defined.\n device: device for the stitched output prediction.\n By default the device (and accordingly the memory) of the `inputs` is used. If for example\n set to device=torch.device('cpu') the gpu memory consumption is less and independent of the\n `inputs` and `roi_size`. Output is on the `device`.\n args: optional args to be passed to ``predictor``.\n kwargs: optional keyword args to be passed to ``predictor``.\n\n Note:\n - input must be channel-first and have a batch dim, supports N-D sliding window.\n\n \"\"\"\n if inputs.dim() < 5:\n inputs = inputs.unsqueeze(0)\n num_spatial_dims = len(inputs.shape) - 2\n assert 0 <= overlap < 1, \"overlap must be >= 0 and < 1.\"\n\n # determine image spatial size and batch size\n # Note: all input images must have the same image size and batch size\n image_size_ = list(inputs.shape[2:])\n batch_size = inputs.shape[0]\n\n if device is None:\n device = inputs.device\n if sw_device is None:\n sw_device = inputs.device\n\n roi_size = fall_back_tuple(roi_size, image_size_)\n # in case that image size is smaller than roi size\n image_size = tuple(max(image_size_[i], roi_size[i]) for i in range(num_spatial_dims))\n pad_size = []\n for k in range(len(inputs.shape) - 1, 1, -1):\n diff = max(roi_size[k - 2] - inputs.shape[k], 0)\n half = diff // 2\n pad_size.extend([half, diff - half])\n inputs = F.pad(inputs, pad=pad_size, mode=PytorchPadMode(padding_mode).value, value=cval)\n\n scan_interval = _get_scan_interval(image_size, roi_size, num_spatial_dims, overlap)\n\n # Store all slices in list\n slices = dense_patch_slices(image_size, roi_size, scan_interval)\n num_win = len(slices) # number of windows per image\n total_slices = num_win * batch_size # total number of windows\n\n # Create window-level importance map\n importance_map = compute_importance_map(\n get_valid_patch_size(image_size, roi_size), mode=mode, sigma_scale=sigma_scale, device=device\n )\n\n # Perform predictions\n output_image, count_map = torch.tensor(0.0, device=device), torch.tensor(0.0, device=device)\n _initialized = False\n for slice_g in range(0, total_slices, sw_batch_size):\n slice_range = range(slice_g, min(slice_g + sw_batch_size, total_slices))\n unravel_slice = [\n [slice(int(idx / num_win), int(idx / num_win) + 1), slice(None)] + list(slices[idx % num_win])\n for idx in slice_range\n ]\n window_data = torch.cat([inputs[win_slice] for win_slice in unravel_slice]).to(sw_device)\n seg_prob = predictor(window_data[0]).to(sw_device) # , *args, **kwargs).to(device) # batched patch segmentation\n\n if not _initialized: # init. buffer at the first iteration\n output_classes = seg_prob.shape[1]\n output_shape = [batch_size, output_classes] + list(image_size)\n # allocate memory to store the full output and the count for overlapping parts\n output_image = torch.zeros(output_shape, dtype=torch.float32, device=device)\n count_map = torch.zeros(output_shape, dtype=torch.float32, device=device)\n _initialized = True\n\n # store the result in the proper location of the full output. Apply weights from importance map.\n for idx, original_idx in zip(slice_range, unravel_slice):\n output_image[original_idx] += importance_map * seg_prob[idx - slice_g]\n count_map[original_idx] += importance_map\n\n # account for any overlapping sections\n output_image = output_image / count_map\n\n final_slicing: List[slice] = []\n for sp in range(num_spatial_dims):\n slice_dim = slice(pad_size[sp * 2], image_size_[num_spatial_dims - sp - 1] + pad_size[sp * 2])\n final_slicing.insert(0, slice_dim)\n while len(final_slicing) < len(output_image.shape):\n final_slicing.insert(0, slice(None))\n return output_image[final_slicing]\n\n\n\ndef _get_scan_interval(\n image_size: Sequence[int], roi_size: Sequence[int], num_spatial_dims: int, overlap: float\n) -> Tuple[int, ...]:\n \"\"\"\n Compute scan interval according to the image size, roi size and overlap.\n Scan interval will be `int((1 - overlap) * roi_size)`, if interval is 0,\n use 1 instead to make sure sliding window works.\n\n \"\"\"\n if len(image_size) != num_spatial_dims:\n raise ValueError(\"image coord different from spatial dims.\")\n if len(roi_size) != num_spatial_dims:\n raise ValueError(\"roi coord different from spatial dims.\")\n\n scan_interval = []\n for i in range(num_spatial_dims):\n if roi_size[i] == image_size[i]:\n scan_interval.append(int(roi_size[i]))\n else:\n interval = int(roi_size[i] * (1 - overlap))\n scan_interval.append(interval if interval > 0 else 1)\n return tuple(scan_interval)\n" ]
[ [ "torch.cat", "torch.nn.MaxPool3d", "torch.nn.Conv3d", "torch.nn.Upsample", "torch.nn.LeakyReLU", "torch.nn.BatchNorm3d" ], [ "torch.zeros", "torch.cat", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
WendyBaiYunwei/FSL
[ "e20470872d52332efdb1449b4593445c5d94e4fb", "e20470872d52332efdb1449b4593445c5d94e4fb", "e20470872d52332efdb1449b4593445c5d94e4fb", "e20470872d52332efdb1449b4593445c5d94e4fb" ]
[ "cifar/trans_trans.py", "miniimgnet/KD-gan/task_generator.py", "miniimgnet/miniimagenet_train_few_shot.py", "miniimgnet/KD/kd_train_rel.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.optim.lr_scheduler import StepLR\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\nfrom self_attention_cv import TransformerEncoder\nimport argparse\nimport math\nimport numpy as np\nfrom torchvision import datasets, models\nimport os\nfrom cifar_generator import CIFAR10\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-test\",\"--isTest\",type = bool, default=False)\nargs = parser.parse_args()\n\ntorch.manual_seed(0)\n\nisTest = args.isTest\nCHECKTEACHER = False\nEPOCH = 1\nBATCH_SIZE = 1\nDIM = 28\nDIM2 = 6\nHIDDEN = False\nstudentPth = './trans_learnt_student.pth'\nteacherPth = './trans_teacher_test.pth'\nlFunc = nn.CrossEntropyLoss()\ntokenSize = 8\ncropIs = [tokenSize * i for i in range(1, DIM // tokenSize + 1)]\n\nclass Classifier(nn.Module):\n def __init__(self):\n super(Classifier, self).__init__()\n self.hidden = nn.Linear(12 * 192, 100)\n self.out = nn.Linear(100, 10)\n\n def forward(self, x):\n x = x.reshape(len(x), -1)\n x = self.hidden(x)\n x = self.out(x)\n return x\n\ndef getCrops(inputs):\n batch = np.zeros((len(inputs), (DIM ** 2) // (tokenSize ** 2), 3, tokenSize, tokenSize))\n for batchI, input in enumerate(inputs):\n tokenI = 0\n for i in cropIs:\n for j in cropIs:\n token = input[:, i - tokenSize:i, j - tokenSize:j]\n batch[batchI, tokenI, :, :, :] = token\n tokenI += 1\n batch = torch.from_numpy(batch)\n batch = torch.flatten(batch, start_dim = -3)\n return batch\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n m.weight.data.normal_(0, 0.01)\n m.bias.data = torch.ones(m.bias.data.size())\n\ndef get_loss(out, target):\n loss = torch.square(out - target)\n return loss\n\ndef train(trainloader, student, teacher, optimizer, scheduler, device):\n print(\"Training...\")\n student.train()\n\n for i in range(EPOCH):\n epoch_loss = 0\n count = 0\n for inputs, _ in trainloader:\n inputs = getCrops(inputs).float()\n sample_features = student(Variable(inputs).to(device))\n\n baseline_features = teacher(Variable(inputs).to(device)) # 16 * 32 * 7 * 7\n\n optimizer.zero_grad()\n\n loss = get_loss(sample_features, baseline_features)\n\n loss.backward(torch.ones_like(sample_features))\n\n optimizer.step()\n\n epoch_loss += torch.sum(torch.sum(loss)).item()\n if count % 1000 == 0:\n print(count, epoch_loss / (count + 1))\n count += 1\n scheduler.step()\n torch.save(student.state_dict(), studentPth)\n\ndef trainClassifier(trainloader, student, classifier, optimizer, device):\n student.train()\n\n count = 0\n for inputs, label in trainloader:\n count += 1\n if count % 100 == 0:\n print(count)\n inputs = getCrops(inputs).float()\n \n sample_features = student(Variable(inputs).to(device))\n\n # print(sample_features.shape)\n y = classifier(sample_features)\n optimizer.zero_grad()\n\n label = Variable(label).to(device)\n loss = lFunc(y, label)\n loss.backward()\n\n optimizer.step()\n\ndef test(testloader, model, classifier, device):\n print(\"Testing...\")\n model.eval()\n accuracy = 0\n count = 0\n for inputs, labels in testloader:\n inputs = getCrops(inputs).float()\n sample_features = model(Variable(inputs).to(device))\n y = classifier(sample_features)\n pred_y = torch.max(y, 1)[1].data.squeeze()\n labels = Variable(labels).to(device)\n accuracy += (pred_y == labels).sum().item()\n count += 1\n if count % 1000 == 0:\n print(count)\n print('Test Accuracy of the model on the 10000 test images:', accuracy / 10000 * 100)\n return accuracy\n\ndef main():\n device = torch.device(\"cuda\")\n\n assert os.path.exists(teacherPth)\n teacher = TransformerEncoder(dim=tokenSize ** 2 * 3,blocks=2,heads=8)\n for param in teacher.parameters():\n param.requires_grad = False\n\n teacher.to(device)\n\n student = TransformerEncoder(dim=tokenSize ** 2 * 3,blocks=6,heads=8)\n student.to(device)\n\n classifier = Classifier()\n classifier.apply(weights_init)\n classifier.to(device)\n\n optimizer = torch.optim.Adam([\n #{\"params\": student.hidden.parameters(), \"lr\": 0.001}, ##train classifier\n {\"params\": student.parameters(), \"lr\": 0.00001},\n ])\n\n scheduler = StepLR(optimizer,step_size=10000,gamma=1.1)\n\n transform = transforms.Compose(\n [#transforms.Resize((DIM, DIM)),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n train_data = CIFAR10(\n root = 'data',\n train = True, \n transform = transform,\n download = False, \n )\n\n trainloader = torch.utils.data.DataLoader(train_data, \n batch_size=BATCH_SIZE, \n shuffle=True, \n num_workers=1)\n\n \n student.load_state_dict(torch.load(teacherPth))\n student.to(device)\n\n # train(trainloader, student, teacher, optimizer, scheduler, device)\n\n test_data = datasets.CIFAR10(\n root = 'data',\n train = False, \n transform = transforms.Compose([transforms.Resize((56, 56)), transforms.ToTensor()]),\n download = True, \n )\n\n testloader = torch.utils.data.DataLoader(test_data, \n batch_size=50, \n shuffle=True, \n num_workers=1)\n \n optimizer = torch.optim.Adam([\n #{\"params\": student.hidden.parameters(), \"lr\": 0.001}, ##train classifier\n {\"params\": student.parameters(), \"lr\": 0.001},\n {\"params\": classifier.hidden.parameters(), \"lr\": 0.01},\n {\"params\": classifier.out.parameters(), \"lr\": 0.005},\n ])\n\n trainloader = torch.utils.data.DataLoader(train_data, \n batch_size=100, \n shuffle=True, \n num_workers=1)\n for i in range(3):\n trainClassifier(trainloader, student, classifier, optimizer, device) ##try freezing encoder\n test(testloader, student, classifier, device)\n \n print('Done.')\n\nif __name__ == '__main__':\n main()", "# code is based on https://github.com/katerakelly/pytorch-maml\nimport torchvision\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torch\nfrom torch.utils.data import DataLoader,Dataset\nimport random\nimport os\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom torch.utils.data.sampler import Sampler\n\ndef imshow(img):\n npimg = img.numpy()\n plt.axis(\"off\")\n plt.imshow(np.transpose(npimg,(1,2,0)))\n plt.show()\n\nclass Rotate(object):\n def __init__(self, angle):\n self.angle = angle\n def __call__(self, x, mode=\"reflect\"):\n x = x.rotate(self.angle)\n return x\n\ndef mini_imagenet_folders():\n train_folder = './train'\n test_folder = './test'\n\n metatrain_folders = [os.path.join(train_folder, label) \\\n for label in os.listdir(train_folder) \\\n if os.path.isdir(os.path.join(train_folder, label)) \\\n ]\n metatest_folders = [os.path.join(test_folder, label) \\\n for label in os.listdir(test_folder) \\\n if os.path.isdir(os.path.join(test_folder, label)) \\\n ]\n\n random.seed(1)\n random.shuffle(metatrain_folders)\n random.shuffle(metatest_folders)\n\n return metatrain_folders,metatest_folders\n\nclass MiniImagenetTask(object):\n\n def __init__(self, character_folders, num_classes, train_num,test_num):\n\n self.character_folders = character_folders\n self.num_classes = num_classes\n self.train_num = train_num\n self.test_num = test_num\n\n class_folders = random.sample(self.character_folders,self.num_classes)\n labels = np.array(range(len(class_folders)))\n labels = dict(zip(class_folders, labels))\n samples = dict()\n\n self.train_roots = []\n self.test_roots = []\n for c in class_folders:\n\n temp = [os.path.join(c, x) for x in os.listdir(c)]\n samples[c] = random.sample(temp, len(temp))\n random.shuffle(samples[c])\n\n self.train_roots += samples[c][:train_num]\n self.test_roots += samples[c][train_num:train_num+test_num]\n\n self.train_labels = [labels[self.get_class(x)] for x in self.train_roots]\n self.test_labels = [labels[self.get_class(x)] for x in self.test_roots]\n\n def get_class(self, sample):\n return os.path.join(*sample.split('/')[:-1])\n\n\nclass FewShotDataset(Dataset):\n\n def __init__(self, task, split='train', transform=None, target_transform=None):\n self.transform = transform # Torch operations on the input image\n self.target_transform = target_transform\n self.task = task\n self.split = split\n self.image_roots = self.task.train_roots if self.split == 'train' else self.task.test_roots\n self.labels = self.task.train_labels if self.split == 'train' else self.task.test_labels\n\n def __len__(self):\n return len(self.image_roots)\n\n def __getitem__(self, idx):\n raise NotImplementedError(\"This is an abstract class. Subclass this class for your particular dataset.\")\n\nclass MiniImagenet(FewShotDataset):\n\n def __init__(self, *args, **kwargs):\n super(MiniImagenet, self).__init__(*args, **kwargs)\n\n def __getitem__(self, idx):\n image_root = self.image_roots[idx]\n image = Image.open(image_root)\n image = image.convert('RGB')\n if self.transform is not None:\n image = self.transform(image)\n label = self.labels[idx]\n if self.target_transform is not None:\n label = self.target_transform(label)\n return image, label, image_root\n\n\nclass ClassBalancedSampler(Sampler):\n ''' Samples 'num_inst' examples each from 'num_cl' pools\n of examples of size 'num_per_class' '''\n\n def __init__(self, num_per_class, num_cl, num_inst,shuffle=True):\n self.num_per_class = num_per_class\n self.num_cl = num_cl\n self.num_inst = num_inst\n self.shuffle = shuffle\n\n def __iter__(self):\n # return a single list of indices, assuming that items will be grouped by class\n if self.shuffle:\n batch = [[i+j*self.num_inst for i in torch.randperm(self.num_inst)[:self.num_per_class]] for j in range(self.num_cl)]\n else:\n batch = [[i+j*self.num_inst for i in range(self.num_inst)[:self.num_per_class]] for j in range(self.num_cl)]\n batch = [item for sublist in batch for item in sublist]\n\n if self.shuffle:\n random.shuffle(batch)\n return iter(batch)\n\n def __len__(self):\n return 1\n\n\ndef get_mini_imagenet_data_loader(task, num_per_class=1, split='train',shuffle = False):\n normalize = transforms.Normalize(mean=[0.92206, 0.92206, 0.92206], std=[0.08426, 0.08426, 0.08426])\n\n dataset = MiniImagenet(task,split=split,transform=transforms.Compose([transforms.ToTensor(),normalize]))\n\n if split == 'train':\n sampler = ClassBalancedSampler(num_per_class, task.num_classes, task.train_num,shuffle=shuffle)\n else:\n sampler = ClassBalancedSampler(num_per_class, task.num_classes, task.test_num,shuffle=shuffle)\n\n loader = DataLoader(dataset, batch_size=num_per_class*task.num_classes, sampler=sampler)\n\n return loader\n\n", "#-------------------------------------\n# Project: Learning to Compare: Relation Network for Few-Shot Learning\n# Date: 2017.9.21\n# Author: Flood Sung\n# All Rights Reserved\n#-------------------------------------\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.optim.lr_scheduler import StepLR\nimport numpy as np\nimport task_generator_test as tg\nimport os\nimport math\nimport argparse\nimport scipy as sp\nimport scipy.stats\n\nparser = argparse.ArgumentParser(description=\"One Shot Visual Recognition\")\nparser.add_argument(\"-f\",\"--feature_dim\",type = int, default = 128)\nparser.add_argument(\"-r\",\"--relation_dim\",type = int, default = 8)\nparser.add_argument(\"-w\",\"--class_num\",type = int, default = 5)\nparser.add_argument(\"-s\",\"--sample_num_per_class\",type = int, default = 5)\nparser.add_argument(\"-b\",\"--batch_num_per_class\",type = int, default = 10)\nparser.add_argument(\"-e\",\"--episode\",type = int, default= 500000)\nparser.add_argument(\"-t\",\"--test_episode\", type = int, default = 600)\nparser.add_argument(\"-l\",\"--learning_rate\", type = float, default = 0.001)\nparser.add_argument(\"-g\",\"--gpu\",type=int, default=0)\nparser.add_argument(\"-u\",\"--hidden_unit\",type=int,default=10)\nargs = parser.parse_args()\n\n\n# Hyper Parameters\nFEATURE_DIM = args.feature_dim\nRELATION_DIM = args.relation_dim\nCLASS_NUM = args.class_num\nSAMPLE_NUM_PER_CLASS = args.sample_num_per_class\nBATCH_NUM_PER_CLASS = args.batch_num_per_class\nEPISODE = args.episode\nTEST_EPISODE = args.test_episode\nLEARNING_RATE = args.learning_rate\nGPU = args.gpu\nHIDDEN_UNIT = args.hidden_unit\n\ndef mean_confidence_interval(data, confidence=0.95):\n a = 1.0*np.array(data)\n n = len(a)\n m, se = np.mean(a), scipy.stats.sem(a)\n h = se * sp.stats.t._ppf((1+confidence)/2., n-1)\n return m,h\n\nclass CNNEncoder(nn.Module):\n \"\"\"\n This is the standard way to define your own network in PyTorch. You typically choose the components\n (e.g. LSTMs, linear layers etc.) of your network in the __init__ function. You then apply these layers\n on the input step-by-step in the forward function. You can use torch.nn.functional to apply functions\n\n such as F.relu, F.sigmoid, F.softmax, F.max_pool2d. Be careful to ensure your dimensions are correct after each\n step. You are encouraged to have a look at the network in pytorch/nlp/model/net.py to get a better sense of how\n you can go about defining your own network.\n\n The documentation for all the various components available o you is here: http://pytorch.org/docs/master/nn.html\n \"\"\"\n\n def __init__(self):\n \"\"\"\n We define an convolutional network that predicts the sign from an image. The components\n required are:\n\n Args:\n params: (Params) contains num_channels\n \"\"\"\n super(CNNEncoder, self).__init__()\n self.num_channels = 32\n \n # each of the convolution layers below have the arguments (input_channels, output_channels, filter_size,\n # stride, padding). We also include batch normalisation layers that help stabilise training.\n # For more details on how to use these layers, check out the documentation.\n self.conv1 = nn.Conv2d(3, self.num_channels, 3, stride=1, padding=1)\n self.bn1 = nn.BatchNorm2d(self.num_channels)\n self.conv2 = nn.Conv2d(self.num_channels, self.num_channels*2, 3, stride=1, padding=1)\n self.bn2 = nn.BatchNorm2d(self.num_channels*2)\n self.conv3 = nn.Conv2d(self.num_channels*2, self.num_channels*4, 3, stride=1, padding=1)\n self.bn3 = nn.BatchNorm2d(self.num_channels*4)\n\n # 2 fully connected layers to transform the output of the convolution layers to the final output\n self.fc1 = nn.Linear(4*4*self.num_channels*4, self.num_channels*4)\n self.fcbn1 = nn.BatchNorm1d(self.num_channels*4)\n self.fc2 = nn.Linear(self.num_channels*4, 10) \n self.dropout_rate = 0.5\n\n def forward(self, s):\n \"\"\"\n This function defines how we use the components of our network to operate on an input batch.\n\n Args:\n s: (Variable) contains a batch of images, of dimension batch_size x 3 x 32 x 32 .\n\n Returns:\n out: (Variable) dimension batch_size x 6 with the log probabilities for the labels of each image.\n\n Note: the dimensions after each step are provided\n \"\"\"\n # -> batch_size x 3 x 32 x 32\n # we apply the convolution layers, followed by batch normalisation, maxpool and relu x 3\n s = self.bn1(self.conv1(s)) # batch_size x num_channels x 32 x 32\n s = F.relu(F.max_pool2d(s, 2)) # batch_size x num_channels x 16 x 16\n s = self.bn2(self.conv2(s)) # batch_size x num_channels*2 x 16 x 16\n s = F.relu(F.max_pool2d(s, 2)) # batch_size x num_channels*2 x 8 x 8\n s = self.bn3(self.conv3(s)) # batch_size x num_channels*4 x 8 x 8\n s = F.relu(F.max_pool2d(s, 2)) # batch_size x num_channels*4 x 4 x 4\n\n # # flatten the output for each image\n # s = s.view(-1, 4*4*self.num_channels*4) # batch_size x 4*4*num_channels*4\n\n # # apply 2 fully connected layers with dropout\n # s = F.dropout(F.relu(self.fcbn1(self.fc1(s))), \n # p=self.dropout_rate, training=self.training) # batch_size x self.num_channels*4\n # s = self.fc2(s) # batch_size x 10\n\n return s\n\nclass RelationNetwork(nn.Module):\n \"\"\"docstring for RelationNetwork\"\"\"\n def __init__(self,input_size,hidden_size):\n super(RelationNetwork, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(64*2*2,64*2,kernel_size=3,padding=0),\n nn.BatchNorm2d(64*2, momentum=1, affine=True),\n nn.ReLU(),\n nn.MaxPool2d(2))\n self.layer2 = nn.Sequential(\n nn.Conv2d(64*2,64,kernel_size=3,padding=0),\n nn.BatchNorm2d(64, momentum=1, affine=True),\n nn.ReLU(),\n nn.MaxPool2d(2))\n # self.fc1 = nn.Linear(input_size*3*3,hidden_size)\n self.fc1 = nn.Linear(64,hidden_size)\n self.fc2 = nn.Linear(hidden_size,1)\n\n def forward(self,x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = out.view(out.size(0),-1)\n out = F.relu(self.fc1(out))\n out = F.sigmoid(self.fc2(out))\n return out\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif classname.find('BatchNorm') != -1:\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif classname.find('Linear') != -1:\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data = torch.ones(m.bias.data.size())\n\ndef main():\n # Step 1: init data folders\n print(\"init data folders\")\n # init character folders for dataset construction\n metatrain_folders,metatest_folders = tg.mini_imagenet_folders()\n\n # Step 2: init neural networks\n print(\"init neural networks\")\n\n feature_encoder = CNNEncoder()\n relation_network = RelationNetwork(FEATURE_DIM,RELATION_DIM)\n feature_encoder.load_state_dict(torch.load(os.path.abspath('/home/vipuser/Documents/student_entry2.pth')))\n \n # feature_encoder.load_state_dict(torch.load(os.path.abspath('/home/vipuser/Documents/knowledge-distillation-pytorch/experiments/cnn_distill/best.pth.tar'))['state_dict'])\n relation_network.apply(weights_init)\n\n feature_encoder.cuda(GPU)\n relation_network.cuda(GPU)\n\n feature_encoder_optim = torch.optim.Adam(feature_encoder.parameters(),lr=LEARNING_RATE)\n feature_encoder_scheduler = StepLR(feature_encoder_optim,step_size=100000,gamma=0.5)\n relation_network_optim = torch.optim.Adam(relation_network.parameters(),lr=LEARNING_RATE)\n relation_network_scheduler = StepLR(relation_network_optim,step_size=100000,gamma=0.5)\n\n # if os.path.exists(str(\"./models/miniimagenet_feature_encoder_\" + str(CLASS_NUM) +\"way_\" + str(SAMPLE_NUM_PER_CLASS) +\"shot.pkl\")):\n # feature_encoder.load_state_dict(torch.load(str(\"./models/miniimagenet_feature_encoder_\" + str(CLASS_NUM) +\"way_\" + str(SAMPLE_NUM_PER_CLASS) +\"shot.pkl\")))\n # print(\"load feature encoder success\")\n # if os.path.exists(str(\"./models/miniimagenet_relation_network_\"+ str(CLASS_NUM) +\"way_\" + str(SAMPLE_NUM_PER_CLASS) +\"shot.pkl\")):\n # relation_network.load_state_dict(torch.load(str(\"./models/miniimagenet_relation_network_\"+ str(CLASS_NUM) +\"way_\" + str(SAMPLE_NUM_PER_CLASS) +\"shot.pkl\")))\n # print(\"load relation network success\")\n\n # Step 3: build graph\n print(\"Training...\")\n\n last_accuracy = 0.0\n\n for episode in range(EPISODE):\n\n feature_encoder_scheduler.step(episode)\n relation_network_scheduler.step(episode)\n\n # init dataset\n # sample_dataloader is to obtain previous samples for compare\n # batch_dataloader is to batch samples for training\n task = tg.MiniImagenetTask(metatrain_folders,CLASS_NUM,SAMPLE_NUM_PER_CLASS,BATCH_NUM_PER_CLASS)\n sample_dataloader = tg.get_mini_imagenet_data_loader(task,num_per_class=SAMPLE_NUM_PER_CLASS,split=\"train\",shuffle=False)\n batch_dataloader = tg.get_mini_imagenet_data_loader(task,num_per_class=BATCH_NUM_PER_CLASS,split=\"test\",shuffle=True)\n\n # sample datas\n samples,sample_labels = sample_dataloader.__iter__().next() #25*3*84*84\n batches,batch_labels = batch_dataloader.__iter__().next()\n\n # calculate features\n sample_features = feature_encoder(Variable(samples).cuda(GPU)) # 25*64*19*19\n sample_features = sample_features.view(CLASS_NUM,SAMPLE_NUM_PER_CLASS,FEATURE_DIM,10,10)\n sample_features = torch.sum(sample_features,1).squeeze(1)\n batch_features = feature_encoder(Variable(batches).cuda(GPU)) # 20x64*5*5\n\n # calculate relations\n # each batch sample link to every samples to calculate relations\n # to form a 100x128 matrix for relation network\n sample_features_ext = sample_features.unsqueeze(0).repeat(BATCH_NUM_PER_CLASS*CLASS_NUM,1,1,1,1)\n batch_features_ext = batch_features.unsqueeze(0).repeat(CLASS_NUM,1,1,1,1)\n batch_features_ext = torch.transpose(batch_features_ext,0,1)\n relation_pairs = torch.cat((sample_features_ext,batch_features_ext),2).view(-1,FEATURE_DIM*2,10,10) #19,19\n relations = relation_network(relation_pairs).view(-1,CLASS_NUM)\n\n mse = nn.MSELoss().cuda(GPU)\n one_hot_labels = Variable(torch.zeros(BATCH_NUM_PER_CLASS*CLASS_NUM, CLASS_NUM).scatter_(1, batch_labels.view(-1,1), 1).cuda(GPU))\n loss = mse(relations,one_hot_labels)\n\n\n # training\n\n feature_encoder.zero_grad()\n relation_network.zero_grad()\n\n loss.backward()\n\n torch.nn.utils.clip_grad_norm(feature_encoder.parameters(),0.5)\n torch.nn.utils.clip_grad_norm(relation_network.parameters(),0.5)\n\n feature_encoder_optim.step()\n relation_network_optim.step()\n\n\n if (episode+1)%100 == 0:\n print(\"episode:\",episode+1,\"loss\",loss.item())\n\n if episode%5000 == 0:\n\n # test\n print(\"Testing...\")\n accuracies = []\n for i in range(TEST_EPISODE):\n total_rewards = 0\n task = tg.MiniImagenetTask(metatest_folders,CLASS_NUM,SAMPLE_NUM_PER_CLASS,15)\n sample_dataloader = tg.get_mini_imagenet_data_loader(task,num_per_class=SAMPLE_NUM_PER_CLASS,split=\"train\",shuffle=False)\n num_per_class = 5\n test_dataloader = tg.get_mini_imagenet_data_loader(task,num_per_class=num_per_class,split=\"test\",shuffle=False)\n\n sample_images,sample_labels = sample_dataloader.__iter__().next()\n for test_images,test_labels in test_dataloader:\n batch_size = test_labels.shape[0]\n # calculate features\n sample_features = feature_encoder(Variable(sample_images).cuda(GPU)) # 5x64\n sample_features = sample_features.view(CLASS_NUM,SAMPLE_NUM_PER_CLASS,FEATURE_DIM,10,10)\n sample_features = torch.sum(sample_features,1).squeeze(1)\n test_features = feature_encoder(Variable(test_images).cuda(GPU)) # 20x64\n\n # calculate relations\n # each batch sample link to every samples to calculate relations\n # to form a 100x128 matrix for relation network\n sample_features_ext = sample_features.unsqueeze(0).repeat(batch_size,1,1,1,1)\n\n test_features_ext = test_features.unsqueeze(0).repeat(1*CLASS_NUM,1,1,1,1)\n test_features_ext = torch.transpose(test_features_ext,0,1)\n relation_pairs = torch.cat((sample_features_ext,test_features_ext),2).view(-1,FEATURE_DIM*2,10,10)\n relations = relation_network(relation_pairs).view(-1,CLASS_NUM)\n\n _,predict_labels = torch.max(relations.data,1)\n\n rewards = [1 if predict_labels[j]==test_labels[j] else 0 for j in range(batch_size)]\n\n total_rewards += np.sum(rewards)\n\n\n accuracy = total_rewards/1.0/CLASS_NUM/15\n accuracies.append(accuracy)\n\n\n test_accuracy,h = mean_confidence_interval(accuracies)\n\n print(\"test accuracy:\",test_accuracy,\"h:\",h)\n\n if test_accuracy > last_accuracy:\n\n # save networks\n torch.save(feature_encoder.state_dict(),str(\"./models/miniimagenet_feature_encoder_\" + str(CLASS_NUM) +\"way_\" + str(SAMPLE_NUM_PER_CLASS) +\"shot_no_distil.pkl\"))\n torch.save(relation_network.state_dict(),str(\"./models/miniimagenet_relation_network_\"+ str(CLASS_NUM) +\"way_\" + str(SAMPLE_NUM_PER_CLASS) +\"shot_no_distil.pkl\"))\n\n print(\"save networks for episode:\",episode)\n\n last_accuracy = test_accuracy\n\n\n\n\n\nif __name__ == '__main__':\n main()\n", "#-------------------------------------\n# Project: Learning to Compare: Relation Network for Few-Shot Learning\n# Date: 2017.9.21\n# Author: Flood Sung\n# All Rights Reserved\n#-------------------------------------\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport numpy as np\nimport KD_tg as tg\nimport math\nimport argparse\nimport scipy as sp\nimport scipy.stats\nfrom networks.res12 import ResNet\nimport neptune.new as neptune\nfrom torchvision import models\nimport cv2\n\ntorch.manual_seed(0)\n\nparser = argparse.ArgumentParser(description=\"One Shot Visual Recognition\")\nparser.add_argument(\"-f\",\"--feature_dim\",type = int, default = 640)#td, 512\nparser.add_argument(\"-r\",\"--relation_dim\",type = int, default = 8)\nparser.add_argument(\"-w\",\"--class_num\",type = int, default = 5)\nparser.add_argument(\"-s\",\"--sample_num_per_class\",type = int, default = 1)\nparser.add_argument(\"-b\",\"--batch_num_per_class\",type = int, default = 15)\nparser.add_argument(\"-e\",\"--episode\",type = int, default= 100000) #500000 ####\nparser.add_argument(\"-t\",\"--test_episode\", type = int, default = 600)\nparser.add_argument(\"-l\",\"--learning_rate\", type = float, default = 0.001)\nparser.add_argument(\"-g\",\"--gpu\",type=int, default=0)\nparser.add_argument(\"-u\",\"--hidden_unit\",type=int,default=10)\nparser.add_argument(\"-n\",\"--name\",type=str,default='51-kd')\nparser.add_argument(\"-nt\",\"--type\",type=str,default='resnet12')\nargs = parser.parse_args()\n\n# Hyper Parameters\nFEATURE_DIM = args.feature_dim\nRELATION_DIM = args.relation_dim\nCLASS_NUM = args.class_num\nSAMPLE_NUM_PER_CLASS = args.sample_num_per_class\nBATCH_NUM_PER_CLASS = args.batch_num_per_class\nEPISODE = args.episode\nTEST_EPISODE = args.test_episode\nLEARNING_RATE = args.learning_rate\nGPU = args.gpu\nHIDDEN_UNIT = args.hidden_unit\nEXPERIMENT_NAME = args.name\nNET_TYPE = args.type\nDIM = 3\nDIM2 = 14\nif NET_TYPE == 'resnet18':\n DIM = 3\n DIM2 = 7\n FEATURE_DIM = 512\n\ndef mean_confidence_interval(data, confidence=0.95):\n a = 1.0*np.array(data)\n n = len(a)\n m, se = np.mean(a), scipy.stats.sem(a)\n h = se * sp.stats.t._ppf((1+confidence)/2., n-1)\n return m,h\n\nclass RelationNetwork(nn.Module):\n \"\"\"docstring for RelationNetwork\"\"\"\n def __init__(self,in_channel, hidden_size=8):\n super(RelationNetwork, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(in_channel*2,512,kernel_size=1,padding=0),\n nn.BatchNorm2d(512, momentum=1, affine=True),\n nn.ReLU())\n self.layer2 = nn.Sequential(\n nn.Conv2d(512,256,kernel_size=1,padding=0),\n nn.BatchNorm2d(256, momentum=1, affine=True),\n nn.ReLU())\n self.layer3 = nn.Sequential(\n nn.Conv2d(256,64,kernel_size=1,padding=0),\n nn.BatchNorm2d(64, momentum=1, affine=True),\n nn.ReLU(),\n nn.MaxPool2d(2))\n self.fc1 = nn.Linear(64*DIM*DIM,hidden_size)\n self.fc2 = nn.Linear(hidden_size,1)\n\n def forward(self,x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = self.layer3(out)\n out = out.view(out.size(0),-1)\n out = F.relu(self.fc1(out))\n out = F.sigmoid(self.fc2(out))\n return out\n\nclass StuRelationNetwork(nn.Module):\n \"\"\"docstring for StuRelationNetwork\"\"\"\n def __init__(self,hidden_size=8):\n super(StuRelationNetwork, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(640*2,512,kernel_size=1,padding=0),\n nn.BatchNorm2d(512, momentum=1, affine=True),\n nn.ReLU())\n self.layer2 = nn.Sequential(\n nn.Conv2d(512,256,kernel_size=1,padding=0),\n nn.BatchNorm2d(256, momentum=1, affine=True),\n nn.ReLU())\n self.layer3 = nn.Sequential(\n nn.Conv2d(256,64,kernel_size=1,padding=0),\n nn.BatchNorm2d(64, momentum=1, affine=True),\n nn.ReLU())\n # nn.MaxPool2d(2))\n self.fc1 = nn.Linear(64*5*5,hidden_size)\n self.fc2 = nn.Linear(hidden_size,1)\n\n def forward(self,x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = self.layer3(out)\n out = out.view(out.size(0),-1)\n out = F.relu(self.fc1(out))\n out = F.sigmoid(self.fc2(out))\n return out\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif classname.find('BatchNorm') != -1:\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif classname.find('Linear') != -1:\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data = torch.ones(m.bias.data.size())\n\ndef loss_fn_kd(outputs, labels, teacher_outputs):\n T = 20\n alpha = 0.9\n KD_loss = nn.KLDivLoss()(F.log_softmax(outputs/T, dim=1),\n F.softmax(teacher_outputs/T, dim=1)) * (alpha * T * T) + \\\n F.cross_entropy(outputs, labels.cuda(GPU)) * (1. - alpha)\n\n return KD_loss\n\ndef getBiggerImg(imgs):\n res = []\n for img in imgs:\n img = np.swapaxes(img, 0, 1) #3,84,84 -> 84,84,3 -> 3,84,84\n img = np.swapaxes(img, 1, 2) \n img = cv2.resize(img, dsize=(224, 224), interpolation=cv2.INTER_CUBIC)\n img = np.swapaxes(img, 1, 2) #3,84,84 -> 84,84,3 -> 3,84,84\n img = np.swapaxes(img, 0, 1) \n res.append(img)\n return torch.from_numpy(np.array(res))\n\ndef get_teacher_out(teacher_enc, teacher_rel, support, query):\n sample_features = teacher_enc(Variable(support).cuda(GPU)) # 5x640*5*5\n batch_features = teacher_enc(Variable(query).cuda(GPU)) # 20x640*5*5\n sample_features_ext = sample_features.unsqueeze(0).repeat(BATCH_NUM_PER_CLASS*CLASS_NUM,1,1,1,1) #support\n batch_features_ext = batch_features.unsqueeze(0).repeat(SAMPLE_NUM_PER_CLASS*CLASS_NUM,1,1,1,1) #query\n batch_features_ext = torch.transpose(batch_features_ext,0,1)\n\n relation_pairs = torch.cat((sample_features_ext,batch_features_ext),2).view(-1,512*2,7,7) #dubious\n return teacher_rel(relation_pairs).view(-1,CLASS_NUM*SAMPLE_NUM_PER_CLASS)\n\ndef main():\n run = neptune.init(\n project=\"ywb/kd-brenaic\",\n api_token=\"eyJhcGlfYWRkcmVzcyI6Imh0dHBzOi8vYXBwLm5lcHR1bmUuYWkiLCJhcGlfdXJsIjoiaHR0cHM6Ly9hcHAubmVwdHVuZS5haSIsImFwaV9rZXkiOiI4Yjk0ZDlkMi04ZTBhLTQ4YzktYWE2Ni02Njg0OGQwOWFiNjkifQ==\",\n ) \n params = {\"name\": EXPERIMENT_NAME}\n print(EXPERIMENT_NAME)\n run[\"parameters\"] = params\n # Step 1: init data folders\n print(\"init data folders\")\n # init character folders for dataset construction\n metatrain_folders,metatest_folders = tg.mini_imagenet_folders()\n\n # Step 2: init neural networks\n print(\"init neural networks\")\n\n stu_enc = ResNet()\n stu_rel = StuRelationNetwork(640)\n\n stu_enc.apply(weights_init)\n stu_rel.apply(weights_init)\n\n stu_enc.cuda(GPU)\n stu_rel.cuda(GPU)\n\n stu_enc_optim = torch.optim.Adam(stu_enc.parameters(),lr=LEARNING_RATE)\n stu_rel_optim = torch.optim.Adam(stu_rel.parameters(),lr=LEARNING_RATE)\n\n model_dict = stu_enc.state_dict()\n pretrained_dict = torch.load(\"../models/Res12-pre.pth\")['params']\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n stu_enc.load_state_dict(model_dict)\n\n resnet18 = models.resnet18(pretrained=True)\n modules=list(resnet18.children())[:-2]\n teacher_enc=nn.Sequential(*modules)\n teacher_enc.load_state_dict(torch.load(\"./models/miniimagenet_feature_encoder_\" +\\\n '51-res18-baseline' +\".pkl\"))\n for param in teacher_enc.parameters():\n param.requires_grad = False\n teacher_rel = RelationNetwork(512)\n teacher_rel.load_state_dict(torch.load(\"./models/miniimagenet_relation_network_\" +\\\n '51-res18-baseline' +\".pkl\"))\n for param in teacher_rel.parameters():\n param.requires_grad = False\n teacher_enc.cuda(GPU)\n teacher_rel.cuda(GPU)\n\n # Step 3: build graph\n print(\"Training...\")\n\n last_accuracy = 0.0\n losses = []\n\n for episode in range(EPISODE):\n # init dataset\n # sample_dataloader is to obtain previous samples for compare\n # batch_dataloader is to batch samples for training\n task = tg.MiniImagenetTask(metatrain_folders,CLASS_NUM,SAMPLE_NUM_PER_CLASS,BATCH_NUM_PER_CLASS)\n sample_dataloader = tg.get_mini_imagenet_data_loader(task,num_per_class=SAMPLE_NUM_PER_CLASS,split=\"train\",\\\n shuffle=False)\n batch_dataloader = tg.get_mini_imagenet_data_loader(task,num_per_class=BATCH_NUM_PER_CLASS,split=\"test\",\\\n shuffle=True) #true\n\n # sample datas\n samples,sample_labels = sample_dataloader.__iter__().next()\n batches,batch_labels = batch_dataloader.__iter__().next()\n \n # calculate features\n sample_features = stu_enc(Variable(samples).cuda(GPU)) # 5x640*5*5\n batch_features = stu_enc(Variable(batches).cuda(GPU)) # 20x640*5*5\n\n # calculate relations\n # each batch sample link to every samples to calculate relations\n # to form a 100x128 matrix for relation network\n sample_features_ext = sample_features.unsqueeze(0).repeat(BATCH_NUM_PER_CLASS*CLASS_NUM,1,1,1,1) #support\n batch_features_ext = batch_features.unsqueeze(0).repeat(SAMPLE_NUM_PER_CLASS*CLASS_NUM,1,1,1,1) #query\n batch_features_ext = torch.transpose(batch_features_ext,0,1)\n\n relation_pairs = torch.cat((sample_features_ext,batch_features_ext),2).view(-1,FEATURE_DIM*2,5,5)\n relations = stu_rel(relation_pairs).view(-1,CLASS_NUM*SAMPLE_NUM_PER_CLASS)\n\n one_hot_labels = Variable(torch.zeros(BATCH_NUM_PER_CLASS*CLASS_NUM, CLASS_NUM).scatter_(1,\\\n batch_labels.view(-1,1), 1)).cuda(GPU)\n\n teacher_relations = get_teacher_out(teacher_enc, teacher_rel, \\\n getBiggerImg(samples.numpy()), getBiggerImg(batches.numpy()))\n loss = loss_fn_kd(relations,one_hot_labels,teacher_relations).cuda(GPU)\n\n # training\n stu_enc.zero_grad()\n stu_rel.zero_grad()\n\n loss.backward()\n losses.append(loss.item())\n\n torch.nn.utils.clip_grad_norm(stu_enc.parameters(),0.5)\n torch.nn.utils.clip_grad_norm(stu_rel.parameters(),0.5)\n\n stu_enc_optim.step()\n stu_rel_optim.step()\n\n if (episode+1)%100 == 0:\n inf = str(sum(losses)/len(losses))\n print(inf)\n run[\"loss\"].log(sum(losses)/len(losses))\n losses.clear()\n\n if (episode)%5000 == 0: #5000\n # test\n print(\"Testing...\")\n accuracies = []\n for i in range(TEST_EPISODE):\n total_rewards = 0\n counter = 0\n task = tg.MiniImagenetTask(metatest_folders,CLASS_NUM,1,15)\n sample_dataloader = tg.get_mini_imagenet_data_loader(task,num_per_class=1,\\\n split=\"train\",shuffle=False)\n\n num_per_class = 3\n test_dataloader = tg.get_mini_imagenet_data_loader(task,num_per_class=num_per_class,\\\n split=\"test\",shuffle=True)\n sample_images,sample_labels = sample_dataloader.__iter__().next()\n for test_images,test_labels in test_dataloader:\n batch_size = test_labels.shape[0]\n # calculate features\n sample_features = stu_enc(Variable(sample_images).cuda(GPU)) # 5x64\n test_features = stu_enc(Variable(test_images).cuda(GPU)) # 20x64\n\n # calculate relations\n # each batch sample link to every samples to calculate relations\n # to form a 100x128 matrix for relation network\n sample_features_ext = sample_features.unsqueeze(0).repeat(batch_size,1,1,1,1)\n test_features_ext = test_features.unsqueeze(0).repeat(1*CLASS_NUM,1,1,1,1)\n test_features_ext = torch.transpose(test_features_ext,0,1)\n relation_pairs = torch.cat((sample_features_ext,test_features_ext),2).view(-1,FEATURE_DIM*2,5,5)\n relations = stu_rel(relation_pairs).view(-1,CLASS_NUM)\n\n _,predict_labels = torch.max(relations.data,1)\n\n rewards = [1 if predict_labels[j]==test_labels[j] else 0 for j in range(batch_size)]\n\n total_rewards += np.sum(rewards)\n counter += batch_size\n accuracy = total_rewards/1.0/counter\n accuracies.append(accuracy)\n\n test_accuracy,h = mean_confidence_interval(accuracies)\n\n print(\"test accuracy:\",test_accuracy,\"h:\",h)\n\n if test_accuracy > last_accuracy:\n\n # save networks\n torch.save(stu_enc.state_dict(),str(\"./models/miniimagenet_stu_enc_\" + EXPERIMENT_NAME +\".pkl\"))\n torch.save(stu_rel.state_dict(),str(\"./models/miniimagenet_stu_rel_\" + EXPERIMENT_NAME +\".pkl\"))\n\n print(\"save networks for episode:\",episode)\n\n inf = str(episode) + ' ' + str(test_accuracy)\n print(inf)\n run[\"val_acc\"].log(test_accuracy)\n last_accuracy = test_accuracy\n run.stop()\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.max", "torch.load", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.from_numpy", "torch.sum", "torch.autograd.Variable", "torch.nn.Linear", "torch.square", "torch.flatten", "torch.device", "torch.ones_like", "torch.optim.lr_scheduler.StepLR" ], [ "torch.randperm", "torch.utils.data.DataLoader", "matplotlib.pyplot.axis", "numpy.transpose", "matplotlib.pyplot.show" ], [ "torch.transpose", "torch.max", "torch.cat", "torch.zeros", "torch.sum", "numpy.mean", "torch.autograd.Variable", "torch.nn.functional.max_pool2d", "torch.optim.lr_scheduler.StepLR", "torch.nn.BatchNorm1d", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.BatchNorm2d", "numpy.array", "numpy.sum", "scipy.stats.t._ppf", "torch.nn.MaxPool2d", "torch.nn.ReLU", "torch.nn.MSELoss" ], [ "torch.nn.functional.softmax", "torch.transpose", "torch.max", "torch.load", "torch.cat", "torch.zeros", "numpy.mean", "torch.autograd.Variable", "numpy.swapaxes", "torch.nn.Sequential", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.BatchNorm2d", "numpy.array", "numpy.sum", "torch.nn.KLDivLoss", "scipy.stats.t._ppf", "torch.nn.functional.log_softmax", "torch.manual_seed", "torch.nn.MaxPool2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
k4ntz/mushroom-rl
[ "17c8e9b2a9648a59169f3599c4ef8d259afc39f4", "2625ee7f64d5613b3b9fba00f0b7a39fece88ca5" ]
[ "mushroom_rl/algorithms/value/td/q_lambda.py", "mushroom_rl/features/tensors/random_fourier_tensor.py" ]
[ "import numpy as np\n\nfrom mushroom_rl.algorithms.value.td import TD\nfrom mushroom_rl.utils.eligibility_trace import EligibilityTrace\nfrom mushroom_rl.utils.table import Table\n\n\nclass QLambda(TD):\n \"\"\"\n Q(Lambda) algorithm.\n \"Learning from Delayed Rewards\". Watkins C.J.C.H.. 1989.\n\n \"\"\"\n def __init__(self, mdp_info, policy, learning_rate, lambda_coeff,\n trace='replacing'):\n \"\"\"\n Constructor.\n\n Args:\n lambda_coeff (float): eligibility trace coefficient;\n trace (str, 'replacing'): type of eligibility trace to use.\n\n \"\"\"\n Q = Table(mdp_info.size)\n self._lambda = lambda_coeff\n\n self.e = EligibilityTrace(Q.shape, trace)\n self._add_save_attr(\n _lambda='primitive',\n e='pickle'\n )\n\n super().__init__(mdp_info, policy, Q, learning_rate)\n\n def _update(self, state, action, reward, next_state, absorbing):\n q_current = self.Q[state, action]\n\n q_next = np.max(self.Q[next_state, :]) if not absorbing else 0.\n\n delta = reward + self.mdp_info.gamma*q_next - q_current\n self.e.update(state, action)\n\n self.Q.table += self.alpha(state, action) * delta * self.e.table\n self.e.table *= self.mdp_info.gamma * self._lambda\n\n def episode_start(self):\n self.e.reset()\n\n super().episode_start()\n", "import torch\nimport torch.nn as nn\n\nfrom mushroom_rl.features.tensors import ConstantTensor\nfrom mushroom_rl.utils.torch import to_float_tensor\n\nimport numpy as np\n\n\nclass RandomFourierBasis(nn.Module):\n r\"\"\"\n Class implementing Random Fourier basis functions. The value of the feature\n is computed using the formula:\n\n .. math::\n \\sin{\\dfrac{PX}{\\nu}+\\varphi}\n\n where X is the input, m is the vector of the minumum input values (for each\n dimensions) , \\Delta is the vector of maximum\n\n This features have been presented in:\n\n \"Towards generalization and simplicity in continuous control\". Rajeswaran A. et Al..\n 2017.\n\n \"\"\"\n def __init__(self, P, phi, nu, use_cuda):\n r\"\"\"\n Constructor.\n\n Args:\n P (np.ndarray): weights matrix, every weight should be drawn from a normal distribution;\n phi (np.ndarray): bias vector, every weight should be drawn from a uniform distribution in the interval\n [-\\pi, \\pi);\n values of the input variables, i.e. delta = high - low;\n nu (float): bandwidth parameter, it should be chosen approximately as the average pairwise distances\n between different observation vectors;\n use_cuda (bool): whether to use cuda for the computation or not.\n\n \"\"\"\n self._P = to_float_tensor(P, use_cuda)\n self._phi = to_float_tensor(phi, use_cuda)\n self._nu = nu\n\n self._use_cuda = use_cuda\n\n def forward(self, x):\n if self._use_cuda:\n x = x.cuda()\n return torch.sin(x @ self._P / self._nu + self._phi)\n\n def __str__(self):\n return str(self._P) + ' ' + str(self._phi)\n\n @staticmethod\n def generate(nu, n_output, input_size, use_cuda=False, use_bias=True):\n \"\"\"\n Factory method to build random fourier basis. Includes a constant tensor into the output.\n\n Args:\n nu (float): bandwidth parameter, it should be chosen approximately as the average pairwise distances\n between different observation vectors.\n n_output (int): number of basis to use;\n input_size (int): size of the input;\n use_cuda (bool): whether to use cuda for the computation or not.\n\n Returns:\n The list of the generated fourier basis functions.\n\n \"\"\"\n if use_bias:\n n_output -= 1\n\n P = np.random.randn(input_size, n_output)\n phi = np.random.uniform(-np.pi, np.pi, n_output)\n\n tensor_list = [RandomFourierBasis(P, phi, nu, use_cuda)]\n\n if use_bias:\n tensor_list.append(ConstantTensor())\n\n return tensor_list\n" ]
[ [ "numpy.max" ], [ "numpy.random.uniform", "numpy.random.randn", "torch.sin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
manda-creator/probability
[ "5238303f39973b7a365914732fe72f179a86cc97" ]
[ "tensorflow_probability/python/experimental/mcmc/sample_sequential_monte_carlo.py" ]
[ "# Copyright 2020 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Experimental MCMC driver, `sample_sequential_monte_carlo`.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.math.generic import reduce_logmeanexp\nfrom tensorflow_probability.python.mcmc import hmc\nfrom tensorflow_probability.python.mcmc import random_walk_metropolis\nfrom tensorflow_probability.python.mcmc import transformed_kernel\nfrom tensorflow_probability.python.mcmc.internal import util as mcmc_util\nfrom tensorflow_probability.python.mcmc.sample_annealed_importance import _find_inner_mh_results\nfrom tensorflow_probability.python.util.seed_stream import SeedStream\n\n\n__all__ = [\n 'sample_sequential_monte_carlo',\n]\n\n\nPRINT_DEBUG = False\nTUNE_STEPS = True\n\nParticleInfo = collections.namedtuple(\n 'ParticleInfo',\n [\n 'accept_prob', # acceptance probability per particle\n 'scalings',\n 'tempered_log_prob',\n 'likelihood_log_prob',\n ])\n\nSMCResults = collections.namedtuple(\n 'SMCResults',\n [\n 'num_steps',\n 'inverse_temperature',\n 'log_marginal_likelihood',\n 'particle_info', # A namedtuple of ParticleInfo\n ])\n\n\ndef _make_tempered_target_log_prob_fn(\n prior_log_prob_fn, likelihood_log_prob_fn, temperatures):\n \"\"\"Helper which creates inner kernel target_log_prob_fn.\"\"\"\n def _tempered_target_log_prob(*args):\n priorlogprob = tf.identity(prior_log_prob_fn(*args),\n name='prior_log_prob')\n loglike = tf.identity(likelihood_log_prob_fn(*args),\n name='likelihood_log_prob')\n return tf.identity(priorlogprob + loglike * temperatures,\n name='tempered_logp')\n return _tempered_target_log_prob\n\n\ndef make_rwmh_kernel_fn(target_log_prob_fn, init_state, scalings, seed=None):\n \"\"\"Generate a Random Walk MH kernel.\"\"\"\n with tf.name_scope('make_rwmh_kernel_fn'):\n seed = SeedStream(seed, salt='make_rwmh_kernel_fn')\n state_std = [\n tf.math.reduce_std(x, axis=0, keepdims=True) for x in init_state\n ]\n step_size = [\n s * ps.cast( # pylint: disable=g-complex-comprehension\n mcmc_util.left_justified_expand_dims_like(scalings, s),\n s.dtype) for s in state_std\n ]\n return random_walk_metropolis.RandomWalkMetropolis(\n target_log_prob_fn,\n new_state_fn=random_walk_metropolis.random_walk_normal_fn(\n scale=step_size),\n seed=seed)\n\n\ndef compute_hmc_step_size(scalings, state_std, num_leapfrog_steps):\n return [\n s / ps.cast(num_leapfrog_steps, s.dtype) * ps.cast( # pylint: disable=g-complex-comprehension\n mcmc_util.left_justified_expand_dims_like(scalings, s),\n s.dtype) for s in state_std\n ]\n\n\ndef gen_make_transform_hmc_kernel_fn(unconstraining_bijectors,\n num_leapfrog_steps=10):\n \"\"\"Generate a transformed hmc kernel.\"\"\"\n\n def make_transform_hmc_kernel_fn(\n target_log_prob_fn,\n init_state,\n scalings,\n seed=None):\n \"\"\"Generate a transform hmc kernel.\"\"\"\n\n with tf.name_scope('make_transformed_hmc_kernel_fn'):\n seed = SeedStream(seed, salt='make_transformed_hmc_kernel_fn')\n state_std = [\n bij.inverse(\n tf.math.reduce_std(bij.forward(x), axis=0, keepdims=True))\n for x, bij in zip(init_state, unconstraining_bijectors)\n ]\n step_size = compute_hmc_step_size(scalings, state_std, num_leapfrog_steps)\n return transformed_kernel.TransformedTransitionKernel(\n hmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target_log_prob_fn,\n num_leapfrog_steps=num_leapfrog_steps,\n step_size=step_size,\n seed=seed),\n unconstraining_bijectors)\n\n return make_transform_hmc_kernel_fn\n\n\ndef gen_make_hmc_kernel_fn(num_leapfrog_steps=10):\n \"\"\"Generate a transformed hmc kernel.\"\"\"\n def make_hmc_kernel_fn(\n target_log_prob_fn,\n init_state,\n scalings,\n seed=None):\n \"\"\"Generate a hmc without transformation kernel.\"\"\"\n\n with tf.name_scope('make_hmc_kernel_fn'):\n seed = SeedStream(seed, salt='make_hmc_kernel_fn')\n state_std = [\n tf.math.reduce_std(x, axis=0, keepdims=True)\n for x in init_state\n ]\n step_size = compute_hmc_step_size(scalings, state_std, num_leapfrog_steps)\n return hmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target_log_prob_fn,\n num_leapfrog_steps=num_leapfrog_steps,\n step_size=step_size,\n seed=seed)\n\n return make_hmc_kernel_fn\n\n# Generate a default `make_hmc_kernel_fn`\ndefault_make_hmc_kernel_fn = gen_make_hmc_kernel_fn()\n\n\n# TODO(b/152412213) Experitment to improve recommendation on static parmaeters\ndef sample_sequential_monte_carlo(\n prior_log_prob_fn,\n likelihood_log_prob_fn,\n current_state,\n max_num_steps=25,\n max_stage=100,\n make_kernel_fn=make_rwmh_kernel_fn,\n optimal_accept=0.234,\n target_accept_prob=0.99,\n ess_threshold_ratio=0.5,\n parallel_iterations=10,\n seed=None,\n name=None):\n \"\"\"Runs Sequential Monte Carlo to sample from the posterior distribution.\n\n This function uses an MCMC transition operator (e.g., Hamiltonian Monte Carlo)\n to sample from a series of distributions that slowly interpolates between\n an initial 'prior' distribution:\n\n `exp(prior_log_prob_fn(x))`\n\n and the target 'posterior' distribution:\n\n `exp(prior_log_prob_fn(x) + target_log_prob_fn(x))`,\n\n by mutating a collection of MC samples (i.e., particles). The approach is also\n known as Particle Filter in some literature.\n\n Args:\n prior_log_prob_fn: Python callable that returns the log density of the\n prior distribution.\n likelihood_log_prob_fn: Python callable which takes an argument like\n `current_state` (or `*current_state` if it's a list) and returns its\n (possibly unnormalized) log-density under the likelihood distribution.\n current_state: `Tensor` or Python `list` of `Tensor`s representing the\n current state(s) of the Markov chain(s). The first `r` dimensions index\n independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.\n max_num_steps: The maximum number of kernel transition steps in one mutation\n of the MC samples. Note that the actual number of steps in one mutation is\n tuned during sampling and likely lower than the max_num_steps.\n max_stage: Integer number of the stage for increasing the temperature\n from 0 to 1.\n make_kernel_fn: Python `callable` which returns a `TransitionKernel`-like\n object. Must take one argument representing the `TransitionKernel`'s\n `target_log_prob_fn`. The `target_log_prob_fn` argument represents the\n `TransitionKernel`'s target log distribution. Note:\n `sample_annealed_importance_chain` creates a new `target_log_prob_fn`\n which is an interpolation between the supplied `target_log_prob_fn` and\n `proposal_log_prob_fn`; it is this interpolated function which is used as\n an argument to `make_kernel_fn`.\n optimal_accept: Optimal acceptance ratio for a Transitional Kernel. Default\n to 0.234 for Random Walk Metropolis kernel.\n target_accept_prob: Target acceptance probability at the end of one mutation\n step.\n ess_threshold_ratio: Target ratio for effective sample size.\n parallel_iterations: The number of iterations allowed to run in parallel.\n It must be a positive integer. See `tf.while_loop` for more details.\n seed: Python integer or TFP seedstream to seed the random number generator.\n name: Python `str` name prefixed to Ops created by this function.\n Default value: `None` (i.e., 'sample_annealed_importance_chain').\n\n Returns:\n n_stage: Number of the mutation stage SMC ran.\n final_state: `Tensor` or Python `list` of `Tensor`s representing the\n final state(s) of the Markov chain(s). The output are the posterior\n samples.\n final_kernel_results: `collections.namedtuple` of internal calculations used\n to advance the chain.\n\n \"\"\"\n\n with tf.name_scope(name or 'sample_sequential_monte_carlo'):\n seed_stream = SeedStream(seed, salt='smc_seed')\n\n unwrap_state_list = not tf.nest.is_nested(current_state)\n if unwrap_state_list:\n current_state = [current_state]\n current_state = [\n tf.convert_to_tensor(s, dtype_hint=tf.float32) for s in current_state\n ]\n\n num_replica = ps.size0(current_state[0])\n effective_sample_size_threshold = tf.cast(\n num_replica * ess_threshold_ratio, tf.int32)\n\n def preprocess_state(init_state):\n \"\"\"Initial preprocessing at Stage 0.\"\"\"\n dimension = ps.reduce_sum([\n ps.reduce_prod(ps.shape(x)[1:]) for x in init_state])\n likelihood_log_prob = likelihood_log_prob_fn(*init_state)\n\n # Default to the optimal for normal distributed targets.\n # TODO(b/152412213): Revisit this tuning.\n scale_start = (\n tf.constant(2.38 ** 2, dtype=likelihood_log_prob.dtype) /\n tf.constant(dimension, dtype=likelihood_log_prob.dtype))\n # TODO(b/152412213): Enable batch of batches style by using non-scalar\n # inverse_temperature\n inverse_temperature = tf.zeros([], dtype=likelihood_log_prob.dtype)\n scalings = ps.ones_like(likelihood_log_prob) * ps.minimum(scale_start, 1.)\n kernel = make_kernel_fn(\n _make_tempered_target_log_prob_fn(\n prior_log_prob_fn,\n likelihood_log_prob_fn,\n inverse_temperature),\n init_state,\n scalings,\n seed=seed_stream())\n pkr = kernel.bootstrap_results(current_state)\n mh_results = _find_inner_mh_results(pkr)\n\n particle_info = ParticleInfo(\n accept_prob=ps.ones_like(likelihood_log_prob),\n scalings=scalings,\n tempered_log_prob=mh_results.accepted_results.target_log_prob,\n likelihood_log_prob=likelihood_log_prob,\n )\n\n return SMCResults(\n num_steps=tf.convert_to_tensor(\n max_num_steps, dtype=tf.int32, name='num_steps'),\n inverse_temperature=inverse_temperature,\n log_marginal_likelihood=tf.constant(\n 0., dtype=likelihood_log_prob.dtype),\n particle_info=particle_info\n )\n\n def update_weights_temperature(inverse_temperature, likelihood_log_prob):\n \"\"\"Calculate the next inverse temperature and update weights.\"\"\"\n\n likelihood_diff = likelihood_log_prob - tf.reduce_max(likelihood_log_prob)\n\n def _body_fn(new_beta, upper_beta, lower_beta, eff_size, log_weights):\n \"\"\"One iteration of the temperature and weight update.\"\"\"\n new_beta = (lower_beta + upper_beta) / 2.0\n log_weights = (new_beta - inverse_temperature) * likelihood_diff\n log_weights_norm = (log_weights -\n tf.math.reduce_logsumexp(log_weights))\n eff_size = tf.cast(\n tf.exp(-tf.math.reduce_logsumexp(2 * log_weights_norm)), tf.int32)\n upper_beta = tf.where(\n eff_size < effective_sample_size_threshold,\n new_beta, upper_beta)\n lower_beta = tf.where(\n eff_size < effective_sample_size_threshold,\n lower_beta, new_beta)\n return new_beta, upper_beta, lower_beta, eff_size, log_weights\n\n (new_beta, upper_beta, lower_beta, eff_size, log_weights) = tf.while_loop( # pylint: disable=unused-variable\n cond=lambda new_beta, upper_beta, lower_beta, eff_size, *_: # pylint: disable=g-long-lambda\n (upper_beta - lower_beta > 1e-6) &\n (eff_size != effective_sample_size_threshold),\n body=_body_fn,\n loop_vars=(\n tf.zeros_like(inverse_temperature),\n tf.cast(2.0, inverse_temperature.dtype),\n inverse_temperature,\n tf.cast(0, tf.int32),\n tf.zeros_like(likelihood_diff)),\n parallel_iterations=parallel_iterations\n )\n\n log_weights = tf.where(new_beta < 1.,\n log_weights,\n (1. - inverse_temperature) * likelihood_diff)\n marginal_loglike_ = reduce_logmeanexp(\n (new_beta - inverse_temperature) * likelihood_log_prob)\n\n return marginal_loglike_, tf.clip_by_value(new_beta, 0., 1.), log_weights\n\n def resample(log_weights, current_state, particle_info, seed=None):\n \"\"\"Resample particles based on importance weights.\"\"\"\n with tf.name_scope('resample_particles'):\n seed = SeedStream(seed, salt='resample_particles')\n resampling_indexes = tf.random.categorical(\n [log_weights], ps.reduce_prod(*ps.shape(log_weights)), seed=seed())\n next_state = tf.nest.map_structure(\n lambda x: tf.reshape(tf.gather(x, resampling_indexes), ps.shape(x)),\n current_state)\n next_particle_info = tf.nest.map_structure(\n lambda x: tf.reshape(tf.gather(x, resampling_indexes), ps.shape(x)),\n particle_info)\n\n return next_state, next_particle_info\n\n def tuning(num_steps, scalings, accept_prob):\n \"\"\"Tune scaling and/or num_steps based on the acceptance rate.\"\"\"\n num_proposed = num_replica * num_steps\n accept_prob = tf.cast(accept_prob, dtype=scalings.dtype)\n avg_scaling = tf.exp(tf.math.log(tf.reduce_mean(scalings))\n + (tf.reduce_mean(accept_prob) - optimal_accept))\n scalings = 0.5 * (\n avg_scaling +\n tf.exp(tf.math.log(scalings) +\n (accept_prob - optimal_accept))\n )\n\n if TUNE_STEPS:\n avg_accept = tf.math.maximum(\n 1.0 / tf.cast(num_proposed, dtype=accept_prob.dtype),\n tf.reduce_mean(accept_prob))\n num_steps = tf.clip_by_value(\n tf.cast(\n tf.math.log1p(\n -tf.cast(target_accept_prob, dtype=avg_accept.dtype)) /\n tf.math.log1p(-avg_accept),\n dtype=num_steps.dtype), 2, max_num_steps)\n\n return num_steps, scalings\n\n def mutate(\n current_state,\n scalings,\n num_steps,\n inverse_temperature):\n \"\"\"Mutate the state using a Transition kernel.\"\"\"\n with tf.name_scope('mutate_states'):\n kernel = make_kernel_fn(\n _make_tempered_target_log_prob_fn(\n prior_log_prob_fn,\n likelihood_log_prob_fn,\n inverse_temperature),\n current_state,\n scalings,\n seed=seed_stream())\n pkr = kernel.bootstrap_results(current_state)\n mh_results = _find_inner_mh_results(pkr)\n\n def mutate_onestep(i, state, pkr, accept_count):\n next_state, next_kernel_results = kernel.one_step(state, pkr)\n mh_results = _find_inner_mh_results(pkr)\n # TODO(b/152412213) Cumulate log_acceptance_ratio instead.\n accept_count += tf.cast(mh_results.is_accepted, accept_count.dtype)\n return i+1, next_state, next_kernel_results, accept_count\n\n (\n _,\n next_state,\n next_kernel_results,\n accept_count\n ) = tf.while_loop(\n cond=lambda i, *args: i < num_steps,\n body=mutate_onestep,\n loop_vars=(\n tf.zeros([], dtype=tf.int32),\n current_state,\n pkr,\n tf.zeros_like(mh_results.is_accepted, tf.float32)),\n parallel_iterations=parallel_iterations\n )\n next_mh_results = _find_inner_mh_results(next_kernel_results)\n\n return (next_state,\n accept_count / tf.cast(num_steps + 1, accept_count.dtype),\n next_mh_results.accepted_results.target_log_prob)\n\n pkr = preprocess_state(current_state)\n # Run once\n new_marginal, new_inv_temperature, log_weights = update_weights_temperature(\n pkr.inverse_temperature,\n pkr.particle_info.likelihood_log_prob)\n if PRINT_DEBUG:\n tf.print(\n 'Stage:', 0,\n 'Beta:', new_inv_temperature,\n 'n_steps:', pkr.num_steps,\n 'accept:', tf.reduce_mean(\n pkr.particle_info.accept_prob),\n 'scaling:', tf.reduce_mean(pkr.particle_info.scalings)\n )\n resampled_state, resampled_particle_info = resample(\n log_weights, current_state, pkr.particle_info, seed_stream())\n next_state, acceptance_rate, tempered_log_prob = mutate(\n resampled_state,\n resampled_particle_info.scalings,\n pkr.num_steps,\n new_inv_temperature)\n next_pkr = SMCResults(\n num_steps=pkr.num_steps,\n inverse_temperature=new_inv_temperature,\n log_marginal_likelihood=pkr.log_marginal_likelihood + new_marginal,\n particle_info=ParticleInfo(\n accept_prob=acceptance_rate,\n scalings=resampled_particle_info.scalings,\n tempered_log_prob=tempered_log_prob,\n likelihood_log_prob=likelihood_log_prob_fn(*next_state),\n ))\n\n # Stage > 0\n def smc_body_fn(stage, state, smc_kernel_result):\n \"\"\"Run one stage of SMC with constant temperature.\"\"\"\n (\n new_marginal,\n new_inv_temperature,\n log_weights\n ) = update_weights_temperature(\n smc_kernel_result.inverse_temperature,\n smc_kernel_result.particle_info.likelihood_log_prob)\n # TODO(b/152412213) Use a tf.scan to better collect debug info.\n if PRINT_DEBUG:\n tf.print(\n 'Stage:', stage,\n 'Beta:', new_inv_temperature,\n 'n_steps:', smc_kernel_result.num_steps,\n 'accept:', tf.reduce_mean(\n smc_kernel_result.particle_info.accept_prob),\n 'scaling:', tf.reduce_mean(smc_kernel_result.particle_info.scalings)\n )\n resampled_state, resampled_particle_info = resample(\n log_weights, state, smc_kernel_result.particle_info, seed_stream())\n num_steps, scalings = tuning(\n smc_kernel_result.num_steps,\n resampled_particle_info.scalings,\n resampled_particle_info.accept_prob)\n next_state, acceptance_rate, tempered_log_prob = mutate(\n resampled_state, scalings, num_steps, new_inv_temperature)\n next_pkr = SMCResults(\n num_steps=num_steps,\n inverse_temperature=new_inv_temperature,\n log_marginal_likelihood=(new_marginal +\n smc_kernel_result.log_marginal_likelihood),\n particle_info=ParticleInfo(\n accept_prob=acceptance_rate,\n scalings=scalings,\n tempered_log_prob=tempered_log_prob,\n likelihood_log_prob=likelihood_log_prob_fn(*next_state),\n ))\n return stage + 1, next_state, next_pkr\n\n (\n n_stage,\n final_state,\n final_kernel_results\n ) = tf.while_loop(\n cond=lambda i, state, pkr: (i < max_stage) & ( # pylint: disable=g-long-lambda\n pkr.inverse_temperature < 1.\n ),\n body=smc_body_fn,\n loop_vars=(\n tf.ones([], dtype=tf.int32),\n next_state,\n next_pkr),\n parallel_iterations=parallel_iterations\n )\n if unwrap_state_list:\n final_state = final_state[0]\n return n_stage, final_state, final_kernel_results\n" ]
[ [ "tensorflow.compat.v2.zeros_like", "tensorflow.compat.v2.math.reduce_logsumexp", "tensorflow.compat.v2.math.log", "tensorflow.compat.v2.nest.is_nested", "tensorflow.compat.v2.clip_by_value", "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.cast", "tensorflow.compat.v2.where", "tensorflow.compat.v2.reduce_mean", "tensorflow.compat.v2.math.reduce_std", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.math.log1p", "tensorflow.compat.v2.identity", "tensorflow.compat.v2.zeros", "tensorflow.compat.v2.reduce_max", "tensorflow.compat.v2.ones", "tensorflow.compat.v2.gather", "tensorflow.compat.v2.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
raj713335/AI-102
[ "15f4b61dbcbf84abf25ce2f967afc0d52795e9f8" ]
[ "15-computer-vision/Python/image-analysis/image-analysis.py" ]
[ "from dotenv import load_dotenv\nimport os\nfrom array import array\nfrom PIL import Image, ImageDraw\nimport sys\nimport time\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n\n# import namespaces\nfrom azure.cognitiveservices.vision.computervision import ComputerVisionClient\nfrom azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes\nfrom msrest.authentication import CognitiveServicesCredentials\n\n\ndef main():\n global cv_client\n\n try:\n # Get Configuration Settings\n load_dotenv()\n cog_endpoint = os.getenv('COG_SERVICE_ENDPOINT')\n cog_key = os.getenv('COG_SERVICE_KEY')\n\n # Get image\n image_file = 'images/street.jpg'\n if len(sys.argv) > 1:\n image_file = sys.argv[1]\n\n # Authenticate Computer Vision client\n credential = CognitiveServicesCredentials(cog_key) \n cv_client = ComputerVisionClient(cog_endpoint, credential)\n\n\n # Analyze image\n AnalyzeImage(image_file)\n\n # Generate thumbnail\n GetThumbnail(image_file)\n\n except Exception as ex:\n print(ex)\n\ndef AnalyzeImage(image_file):\n print('Analyzing', image_file)\n\n # Specify features to be retrieved\n features = [VisualFeatureTypes.description,\n VisualFeatureTypes.tags,\n VisualFeatureTypes.categories,\n VisualFeatureTypes.brands,\n VisualFeatureTypes.objects,\n VisualFeatureTypes.adult]\n \n \n # Get image analysis\n with open(image_file, mode=\"rb\") as image_data:\n analysis = cv_client.analyze_image_in_stream(image_data , features)\n\n # Get image description\n for caption in analysis.description.captions:\n print(\"Description: '{}' (confidence: {:.2f}%)\".format(caption.text, caption.confidence * 100))\n\n # Get image tags\n if (len(analysis.tags) > 0):\n print(\"Tags: \")\n for tag in analysis.tags:\n print(\" -'{}' (confidence: {:.2f}%)\".format(tag.name, tag.confidence * 100))\n\n # Get image categories (including celebrities and landmarks)\n if (len(analysis.categories) > 0):\n print(\"Categories:\")\n landmarks = []\n celebrities = []\n for category in analysis.categories:\n # Print the category\n print(\" -'{}' (confidence: {:.2f}%)\".format(category.name, category.score * 100))\n if category.detail:\n # Get landmarks in this category\n if category.detail.landmarks:\n for landmark in category.detail.landmarks:\n if landmark not in landmarks:\n landmarks.append(landmark)\n\n # Get celebrities in this category\n if category.detail.celebrities:\n for celebrity in category.detail.celebrities:\n if celebrity not in celebrities:\n celebrities.append(celebrity)\n\n # If there were landmarks, list them\n if len(landmarks) > 0:\n print(\"Landmarks:\")\n for landmark in landmarks:\n print(\" -'{}' (confidence: {:.2f}%)\".format(landmark.name, landmark.confidence * 100))\n\n # If there were celebrities, list them\n if len(celebrities) > 0:\n print(\"Celebrities:\")\n for celebrity in celebrities:\n print(\" -'{}' (confidence: {:.2f}%)\".format(celebrity.name, celebrity.confidence * 100))\n\n # Get brands in the image\n if (len(analysis.brands) > 0):\n print(\"Brands: \")\n for brand in analysis.brands:\n print(\" -'{}' (confidence: {:.2f}%)\".format(brand.name, brand.confidence * 100))\n\n # Get objects in the image\n if len(analysis.objects) > 0:\n print(\"Objects in image:\")\n\n # Prepare image for drawing\n fig = plt.figure(figsize=(8, 8))\n plt.axis('off')\n image = Image.open(image_file)\n draw = ImageDraw.Draw(image)\n color = 'cyan'\n for detected_object in analysis.objects:\n # Print object name\n print(\" -{} (confidence: {:.2f}%)\".format(detected_object.object_property, detected_object.confidence * 100))\n \n # Draw object bounding box\n r = detected_object.rectangle\n bounding_box = ((r.x, r.y), (r.x + r.w, r.y + r.h))\n draw.rectangle(bounding_box, outline=color, width=3)\n plt.annotate(detected_object.object_property,(r.x, r.y), backgroundcolor=color)\n # Save annotated image\n plt.imshow(image)\n outputfile = 'objects.jpg'\n fig.savefig(outputfile)\n print(' Results saved in', outputfile)\n\n # Get moderation ratings\n ratings = 'Ratings:\\n -Adult: {}\\n -Racy: {}\\n -Gore: {}'.format(analysis.adult.is_adult_content,\n analysis.adult.is_racy_content,\n analysis.adult.is_gory_content)\n print(ratings)\n\n \n\ndef GetThumbnail(image_file):\n print('Generating thumbnail')\n\n # Generate a thumbnail\n with open(image_file, mode=\"rb\") as image_data:\n # Get thumbnail data\n thumbnail_stream = cv_client.generate_thumbnail_in_stream(100, 100, image_data, True)\n\n # Save thumbnail image\n thumbnail_file_name = 'thumbnail.png'\n with open(thumbnail_file_name, \"wb\") as thumbnail_file:\n for chunk in thumbnail_stream:\n thumbnail_file.write(chunk)\n\n print('Thumbnail saved in.', thumbnail_file_name)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.annotate", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
coltonbh/QCElemental
[ "b75fb72d7f45c8b605ae1a54773d4a8be6655752", "b75fb72d7f45c8b605ae1a54773d4a8be6655752" ]
[ "qcelemental/datum.py", "qcelemental/testing.py" ]
[ "\"\"\"\nDatum Object Model\n\"\"\"\n\nfrom decimal import Decimal\nfrom typing import Any, Dict, Optional\n\nimport numpy as np\nfrom pydantic import BaseModel, validator\n\n\nclass Datum(BaseModel):\n r\"\"\"Facilitates the storage of quantum chemical results by labeling them with basic metadata.\n\n Attributes\n ----------\n label : str\n Official label for `data`, often qcvar. May contain spaces.\n units : str\n ASCII, LaTeX-like representation of units, without square brackets.\n data : float or decimal.Decimal or numpy.ndarray\n Value for `label`.\n comment : str\n Additional notes.\n doi : str\n Literature citation or definition DOI link.\n glossary : str\n Extended description or definition.\n numeric : bool\n Whether `data` is numeric. Pass `True` to disable validating `data` as float/Decimal/np.ndarray.\n\n \"\"\"\n\n numeric: bool\n label: str\n units: str\n data: Any\n comment: str = \"\"\n doi: Optional[str] = None\n glossary: str = \"\"\n\n class Config:\n extra = \"forbid\"\n allow_mutation = False\n json_encoders = {np.ndarray: lambda v: v.flatten().tolist(), complex: lambda v: (v.real, v.imag)}\n\n def __init__(self, label, units, data, *, comment=None, doi=None, glossary=None, numeric=True):\n kwargs = {\"label\": label, \"units\": units, \"data\": data, \"numeric\": numeric}\n if comment is not None:\n kwargs[\"comment\"] = comment\n if doi is not None:\n kwargs[\"doi\"] = doi\n if glossary is not None:\n kwargs[\"glossary\"] = glossary\n\n super().__init__(**kwargs)\n\n @validator(\"data\")\n def must_be_numerical(cls, v, values, **kwargs):\n try:\n 1.0 * v\n except TypeError:\n try:\n Decimal(\"1.0\") * v\n except TypeError:\n if values[\"numeric\"]:\n raise ValueError(f\"Datum data should be float, Decimal, or np.ndarray, not {type(v)}.\")\n else:\n values[\"numeric\"] = True\n else:\n values[\"numeric\"] = True\n\n return v\n\n def __str__(self, label=\"\"):\n width = 40\n text = [\"-\" * width, \"{:^{width}}\".format(\"Datum \" + self.label, width=width)]\n if label:\n text.append(\"{:^{width}}\".format(label, width=width))\n text.append(\"-\" * width)\n text.append(\"Data: {}\".format(self.data))\n text.append(\"Units: [{}]\".format(self.units))\n text.append(\"doi: {}\".format(self.doi))\n text.append(\"Comment: {}\".format(self.comment))\n text.append(\"Glossary: {}\".format(self.glossary))\n text.append(\"-\" * width)\n return \"\\n\".join(text)\n\n def dict(self, *args, **kwargs):\n return super().dict(*args, **{**kwargs, **{\"exclude_unset\": True}})\n\n def to_units(self, units=None):\n from .physical_constants import constants\n\n to_unit = self.units if units is None else units\n factor = constants.conversion_factor(self.units, to_unit)\n\n if isinstance(self.data, Decimal):\n return factor * float(self.data)\n else:\n return factor * self.data\n\n\ndef print_variables(qcvars: Dict[str, \"Datum\"]) -> str:\n r\"\"\"Form a printable representation of qcvariables.\n\n Parameters\n ----------\n qcvars\n Group of Datum objects to print.\n\n Returns\n -------\n str\n Printable string representation of label, data, and unit in Datum-s.\n\n \"\"\"\n text = [\"\\n Variable Map:\", \" ----------------------------------------------------------------------------\"]\n\n if len(qcvars) == 0:\n text.append(\" (none)\")\n return \"\\n\".join(text)\n\n largest_key = max(len(k) for k in qcvars) + 2 # for quotation marks\n largest_characteristic = 8\n for k, v in qcvars.items():\n try:\n exp = int(str(v.data).split(\"E\")[1])\n except IndexError:\n pass\n else:\n largest_characteristic = max(exp, largest_characteristic)\n\n for k, qca in sorted(qcvars.items()):\n # if k != qca.lbl:\n # raise ValidationError('Huh? {} != {}'.format(k, qca.label))\n\n if isinstance(qca.data, np.ndarray):\n data = np.array_str(qca.data, max_line_width=120, precision=8, suppress_small=True)\n data = \"\\n\".join(\" \" + ln for ln in data.splitlines())\n text.append(\n \"\"\" {:{keywidth}} => {:{width}} [{}]\"\"\".format(\n '\"' + k + '\"', \"\", qca.units, keywidth=largest_key, width=largest_characteristic + 14\n )\n )\n text.append(data)\n elif isinstance(qca.data, Decimal):\n text.append(\n \"\"\" {:{keywidth}} => {:{width}} [{}]\"\"\".format(\n '\"' + k + '\"', qca.data, qca.units, keywidth=largest_key, width=largest_characteristic + 14\n )\n )\n elif not qca.numeric:\n text.append(\n \"\"\" {:{keywidth}} => {:>{width}} [{}]\"\"\".format(\n '\"' + k + '\"', str(qca.data), qca.units, keywidth=largest_key, width=largest_characteristic + 14\n )\n )\n else:\n text.append(\n \"\"\" {:{keywidth}} => {:{width}.{prec}f} [{}]\"\"\".format(\n '\"' + k + '\"', qca.data, qca.units, keywidth=largest_key, width=largest_characteristic + 14, prec=12\n )\n )\n\n text.append(\"\")\n return \"\\n\".join(text)\n", "import copy\nimport logging\nimport pprint\nimport sys\nfrom typing import Callable, Dict, List, Tuple, Union\n\nimport numpy as np\nfrom pydantic import BaseModel\n\npp = pprint.PrettyPrinter(width=120)\n\n\ndef _handle_return(passfail: bool, label: str, message: str, return_message: bool, quiet: bool = False):\n \"\"\"Function to print a '*label*...PASSED' line to log.\"\"\"\n\n if not quiet:\n if passfail:\n logging.info(f\" {label:.<53}PASSED\")\n else:\n logging.error(f\" {label:.<53}FAILED\")\n logging.error(f\" {message:.<53}\")\n\n if return_message:\n return passfail, message\n else:\n return passfail\n\n\ndef tnm() -> str:\n \"\"\"Returns the name of the calling function, usually name of test case.\"\"\"\n\n return sys._getframe().f_back.f_code.co_name\n\n\ndef compare_values(\n expected: Union[float, List, np.ndarray],\n computed: Union[float, List, np.ndarray],\n label: str = None,\n *,\n atol: float = 1.0e-6,\n rtol: float = 1.0e-16,\n equal_nan: bool = False,\n equal_phase: bool = False,\n passnone: bool = False,\n quiet: bool = False,\n return_message: bool = False,\n return_handler: Callable = None,\n) -> Union[bool, Tuple[bool, str]]:\n r\"\"\"Returns True if two floats or float arrays are element-wise equal within a tolerance.\n\n Parameters\n ----------\n expected\n float or float array-like\n Reference value against which `computed` is compared.\n computed\n float or float array-like\n Input value to compare against `expected`.\n atol\n Absolute tolerance (see formula below).\n label\n Label for passed and error messages. Defaults to calling function name.\n rtol\n Relative tolerance (see formula below). By default set to zero so `atol` dominates.\n equal_nan\n Passed to np.isclose. Compare NaN's as equal.\n equal_phase\n Compare computed *or its opposite* as equal.\n passnone\n Return True when both expected and computed are None.\n quiet\n Whether to log the return message.\n return_message\n Whether to return tuple. See below.\n\n Returns\n -------\n allclose : bool\n Returns True if `expected` and `computed` are equal within tolerance; False otherwise.\n message : str\n When return_message=True, also return passed or error message.\n\n Other Parameters\n ----------------\n return_handler\n Function to control printing, logging, raising, and returning.\n Specialized interception for interfacing testing systems.\n\n Notes\n -----\n * Akin to np.allclose.\n * For scalar float-comparable types and for arbitrary-dimension, np.ndarray-castable, uniform-type,\n float-comparable types. For mixed types, use :py:func:`compare_recursive`.\n * Sets rtol to zero to match expected Psi4 behaviour, otherwise measured as:\n\n .. code-block:: python\n\n absolute(computed - expected) <= (atol + rtol * absolute(expected))\n\n \"\"\"\n label = label or sys._getframe().f_back.f_code.co_name\n pass_message = f\"\\t{label:.<66}PASSED\"\n if return_handler is None:\n return_handler = _handle_return\n\n if passnone:\n if expected is None and computed is None:\n return return_handler(True, label, pass_message, return_message, quiet)\n\n if np.iscomplexobj(expected):\n dtype = np.complex\n else:\n dtype = float\n\n try:\n xptd, cptd = np.array(expected, dtype=dtype), np.array(computed, dtype=dtype)\n except Exception:\n return return_handler(\n False, label, f\"\"\"\\t{label}: inputs not cast-able to ndarray of {dtype}.\"\"\", return_message, quiet\n )\n\n if xptd.shape != cptd.shape:\n return return_handler(\n False,\n label,\n f\"\"\"\\t{label}: computed shape ({cptd.shape}) does not match ({xptd.shape}).\"\"\",\n return_message,\n quiet,\n ) # lgtm: [py/syntax-error]\n\n digits1 = abs(int(np.log10(atol))) + 2\n digits_str = f\"to atol={atol}\"\n if rtol > 1.0e-12:\n digits_str += f\", rtol={rtol}\"\n\n isclose = np.isclose(cptd, xptd, rtol=rtol, atol=atol, equal_nan=equal_nan)\n allclose = bool(np.all(isclose))\n\n if not allclose and equal_phase and hasattr(cptd, \"__neg__\"):\n n_isclose = np.isclose(-cptd, xptd, rtol=rtol, atol=atol, equal_nan=equal_nan)\n allclose = bool(np.all(n_isclose))\n\n if allclose:\n message = pass_message\n\n else:\n if xptd.shape == ():\n xptd_str = f\"{float(xptd):.{digits1}f}\"\n else:\n xptd_str = np.array_str(xptd, max_line_width=120, precision=12, suppress_small=True)\n xptd_str = \"\\n\".join(\" \" + ln for ln in xptd_str.splitlines())\n\n if cptd.shape == ():\n cptd_str = f\"{float(cptd):.{digits1}f}\"\n else:\n cptd_str = np.array_str(cptd, max_line_width=120, precision=12, suppress_small=True)\n cptd_str = \"\\n\".join(\" \" + ln for ln in cptd_str.splitlines())\n\n diff = cptd - xptd\n if xptd.shape == ():\n diff_str = f\"{float(diff):.{digits1}f}\"\n message = \"\"\"\\t{}: computed value ({}) does not match ({}) {} by difference ({}).\"\"\".format(\n label, cptd_str, xptd_str, digits_str, diff_str\n )\n else:\n diff[isclose] = 0.0\n diff_str = np.array_str(diff, max_line_width=120, precision=12, suppress_small=False)\n diff_str = \"\\n\".join(\" \" + ln for ln in diff_str.splitlines())\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n diffrel = np.divide(diff, xptd)\n np.nan_to_num(diffrel, copy=False)\n diffraw = cptd - xptd\n digits_str += f\" (o-e: RMS {_rms(diffraw):.1e}, MAX {np.amax(np.absolute(diffraw)):.1e}, RMAX {np.amax(np.absolute(diffrel)):.1e})\"\n message = \"\"\"\\t{}: computed value does not match {}.\\n Expected:\\n{}\\n Observed:\\n{}\\n Difference (passed elements are zeroed):\\n{}\\n\"\"\".format(\n label, digits_str, xptd_str, cptd_str, diff_str\n )\n\n return return_handler(allclose, label, message, return_message, quiet)\n\n\ndef _rms(arr: np.ndarray) -> float:\n return np.sqrt(np.mean(np.square(arr)))\n\n\ndef compare(\n expected: Union[int, bool, str, List[int], np.ndarray],\n computed: Union[int, bool, str, List[int], np.ndarray],\n label: str = None,\n *,\n equal_phase: bool = False,\n quiet: bool = False,\n return_message: bool = False,\n return_handler: Callable = None,\n) -> Union[bool, Tuple[bool, str]]:\n r\"\"\"Returns True if two integers, strings, booleans, or integer arrays are element-wise equal.\n\n Parameters\n ----------\n expected\n int, bool, str or array-like of same.\n Reference value against which `computed` is compared.\n computed\n int, bool, str or array-like of same.\n Input value to compare against `expected`.\n label\n Label for passed and error messages. Defaults to calling function name.\n equal_phase\n Compare computed *or its opposite* as equal.\n quiet\n Whether to log the return message.\n return_message\n Whether to return tuple. See below.\n\n Returns\n -------\n allclose : bool\n Returns True if `expected` and `computed` are equal; False otherwise.\n message : str\n When return_message=True, also return passed or error message.\n\n Other Parameters\n ----------------\n return_handler\n Function to control printing, logging, raising, and returning.\n Specialized interception for interfacing testing systems.\n\n Notes\n -----\n * Akin to np.array_equal.\n * For scalar exactly-comparable types and for arbitrary-dimension, np.ndarray-castable, uniform-type,\n exactly-comparable types. For mixed types, use :py:func:`compare_recursive`.\n\n \"\"\"\n label = label or sys._getframe().f_back.f_code.co_name\n pass_message = f\"\\t{label:.<66}PASSED\"\n if return_handler is None:\n return_handler = _handle_return\n\n try:\n xptd, cptd = np.array(expected), np.array(computed)\n except Exception:\n return return_handler(False, label, f\"\"\"\\t{label}: inputs not cast-able to ndarray.\"\"\", return_message, quiet)\n\n if xptd.shape != cptd.shape:\n return return_handler(\n False,\n label,\n f\"\"\"\\t{label}: computed shape ({cptd.shape}) does not match ({xptd.shape}).\"\"\",\n return_message,\n quiet,\n )\n\n isclose = np.asarray(xptd == cptd)\n allclose = bool(isclose.all())\n\n if not allclose and equal_phase:\n try:\n n_isclose = np.asarray(xptd == -cptd)\n except TypeError:\n pass\n else:\n allclose = bool(n_isclose.all())\n\n if allclose:\n message = pass_message\n\n else:\n if xptd.shape == ():\n xptd_str = f\"{xptd}\"\n else:\n xptd_str = np.array_str(xptd, max_line_width=120, precision=12, suppress_small=True)\n xptd_str = \"\\n\".join(\" \" + ln for ln in xptd_str.splitlines())\n\n if cptd.shape == ():\n cptd_str = f\"{cptd}\"\n else:\n cptd_str = np.array_str(cptd, max_line_width=120, precision=12, suppress_small=True)\n cptd_str = \"\\n\".join(\" \" + ln for ln in cptd_str.splitlines())\n\n try:\n diff = cptd - xptd\n except TypeError:\n diff_str = \"(n/a)\"\n else:\n if xptd.shape == ():\n diff_str = f\"{diff}\"\n else:\n diff_str = np.array_str(diff, max_line_width=120, precision=12, suppress_small=False)\n diff_str = \"\\n\".join(\" \" + ln for ln in diff_str.splitlines())\n\n if xptd.shape == ():\n message = \"\"\"\\t{}: computed value ({}) does not match ({}) by difference ({}).\"\"\".format(\n label, cptd_str, xptd_str, diff_str\n )\n else:\n message = \"\"\"\\t{}: computed value does not match.\\n Expected:\\n{}\\n Observed:\\n{}\\n Difference:\\n{}\\n\"\"\".format(\n label, xptd_str, cptd_str, diff_str\n )\n\n return return_handler(allclose, label, message, return_message, quiet)\n\n\ndef _compare_recursive(expected, computed, atol, rtol, _prefix=False, equal_phase=False):\n\n errors = []\n name = _prefix or \"root\"\n prefix = name + \".\"\n\n # Initial conversions if required\n if isinstance(expected, BaseModel):\n expected = expected.dict()\n\n if isinstance(computed, BaseModel):\n computed = computed.dict()\n\n if isinstance(expected, (str, int, bool, complex)):\n if expected != computed:\n errors.append((name, \"Value {} did not match {}.\".format(expected, computed)))\n\n elif isinstance(expected, (list, tuple)):\n try:\n if len(expected) != len(computed):\n errors.append((name, \"Iterable lengths did not match\"))\n else:\n for i, item1, item2 in zip(range(len(expected)), expected, computed):\n errors.extend(\n _compare_recursive(\n item1, item2, _prefix=prefix + str(i), atol=atol, rtol=rtol, equal_phase=equal_phase\n )\n )\n except TypeError:\n errors.append((name, \"Expected computed to have a __len__()\"))\n\n elif isinstance(expected, dict):\n expected_extra = computed.keys() - expected.keys()\n computed_extra = expected.keys() - computed.keys()\n if len(expected_extra):\n errors.append((name, \"Found extra keys {}\".format(expected_extra)))\n if len(computed_extra):\n errors.append((name, \"Missing keys {}\".format(computed_extra)))\n\n for k in expected.keys() & computed.keys():\n name = prefix + str(k)\n errors.extend(\n _compare_recursive(\n expected[k], computed[k], _prefix=name, atol=atol, rtol=rtol, equal_phase=equal_phase\n )\n )\n\n elif isinstance(expected, (float, np.number)):\n passfail, msg = compare_values(\n expected, computed, atol=atol, rtol=rtol, equal_phase=equal_phase, return_message=True, quiet=True\n )\n if not passfail:\n errors.append((name, \"Arrays differ.\" + msg))\n\n elif isinstance(expected, np.ndarray):\n if np.issubdtype(expected.dtype, np.floating):\n passfail, msg = compare_values(\n expected, computed, atol=atol, rtol=rtol, equal_phase=equal_phase, return_message=True, quiet=True\n )\n else:\n passfail, msg = compare(expected, computed, equal_phase=equal_phase, return_message=True, quiet=True)\n if not passfail:\n errors.append((name, \"Arrays differ.\" + msg))\n\n elif isinstance(expected, type(None)):\n if expected is not computed:\n errors.append((name, \"'None' does not match.\"))\n\n else:\n errors.append((name, f\"Type {type(expected)} not understood -- stopping recursive compare.\"))\n\n return errors\n\n\ndef compare_recursive(\n expected: Union[Dict, BaseModel, \"ProtoModel\"], # type: ignore\n computed: Union[Dict, BaseModel, \"ProtoModel\"], # type: ignore\n label: str = None,\n *,\n atol: float = 1.0e-6,\n rtol: float = 1.0e-16,\n forgive: List[str] = None,\n equal_phase: Union[bool, List] = False,\n quiet: bool = False,\n return_message: bool = False,\n return_handler: Callable = None,\n) -> Union[bool, Tuple[bool, str]]:\n r\"\"\"\n Recursively compares nested structures such as dictionaries and lists.\n\n Parameters\n ----------\n expected\n Reference value against which `computed` is compared.\n Dict may be of any depth but should contain Plain Old Data.\n computed\n Input value to compare against `expected`.\n Dict may be of any depth but should contain Plain Old Data.\n atol\n Absolute tolerance (see formula below).\n label\n Label for passed and error messages. Defaults to calling function name.\n rtol\n Relative tolerance (see formula below). By default set to zero so `atol` dominates.\n forgive\n Keys in top level which may change between `expected` and `computed` without triggering failure.\n equal_phase\n Compare computed *or its opposite* as equal.\n quiet\n Whether to log the return message.\n return_message\n Whether to return tuple. See below.\n\n Returns\n -------\n allclose : bool\n Returns True if `expected` and `computed` are equal within tolerance; False otherwise.\n message : str\n When return_message=True, also return passed or error message.\n\n Notes\n -----\n\n .. code-block:: python\n\n absolute(computed - expected) <= (atol + rtol * absolute(expected))\n\n \"\"\"\n label = label or sys._getframe().f_back.f_code.co_name\n if atol >= 1:\n raise ValueError(\n \"Prior to v0.4.0, ``compare_recursive`` used to 10**-atol any atol >=1. That has ceased, so please express your atol literally.\"\n )\n if return_handler is None:\n return_handler = _handle_return\n\n errors = _compare_recursive(expected, computed, atol=atol, rtol=rtol)\n\n if errors and equal_phase:\n n_errors = _compare_recursive(expected, computed, atol=atol, rtol=rtol, equal_phase=True)\n n_errors = dict(n_errors)\n\n if equal_phase is False:\n equal_phase = []\n elif equal_phase is True:\n equal_phase = list(dict(errors).keys())\n else:\n equal_phase = [(ep if ep.startswith(\"root.\") else \"root.\" + ep) for ep in equal_phase]\n phased = []\n\n for nomatch in sorted(errors):\n for ep in equal_phase or []:\n if nomatch[0].startswith(ep):\n if nomatch[0] not in n_errors:\n phased.append(nomatch)\n errors.remove(nomatch)\n\n if forgive is None:\n forgive = []\n else:\n forgive = [(fg if fg.startswith(\"root.\") else \"root.\" + fg) for fg in forgive]\n forgiven = []\n\n for nomatch in sorted(errors):\n for fg in forgive or []:\n if nomatch[0].startswith(fg):\n forgiven.append(nomatch)\n errors.remove(nomatch)\n\n ## print if verbose >= 2 if these functions had that knob\n # forgiven_message = []\n # for e in sorted(forgiven):\n # forgiven_message.append(e[0])\n # forgiven_message.append(\"forgiven \" + e[1])\n # pprint.pprint(forgiven)\n\n message = []\n for e in sorted(errors):\n message.append(e[0])\n message.append(\" \" + e[1])\n\n ret_msg_str = \"\\n\".join(message)\n\n return return_handler(len(ret_msg_str) == 0, label, ret_msg_str, return_message, quiet)\n\n\ndef compare_molrecs(\n expected,\n computed,\n label: str = None,\n *,\n atol: float = 1.0e-6,\n rtol: float = 1.0e-16,\n forgive=None,\n verbose: int = 1,\n relative_geoms=\"exact\",\n return_message: bool = False,\n return_handler: Callable = None,\n) -> bool:\n \"\"\"Function to compare Molecule dictionaries.\"\"\"\n # Need to manipulate the dictionaries a bit, so hold values\n xptd = copy.deepcopy(expected)\n cptd = copy.deepcopy(computed)\n\n def massage_dicts(dicary):\n # if 'fix_symmetry' in dicary:\n # dicary['fix_symmetry'] = str(dicary['fix_symmetry'])\n # if 'units' in dicary:\n # dicary['units'] = str(dicary['units'])\n if \"fragment_files\" in dicary:\n dicary[\"fragment_files\"] = [str(f) for f in dicary[\"fragment_files\"]]\n # and about int vs long errors\n # if 'molecular_multiplicity' in dicary:\n # dicary['molecular_multiplicity'] = int(dicary['molecular_multiplicity'])\n # if 'fragment_multiplicities' in dicary:\n # dicary['fragment_multiplicities'] = [(m if m is None else int(m))\n # for m in dicary['fragment_multiplicities']]\n if \"fragment_separators\" in dicary:\n dicary[\"fragment_separators\"] = [(s if s is None else int(s)) for s in dicary[\"fragment_separators\"]]\n # forgive generator version changes\n if \"provenance\" in dicary:\n dicary[\"provenance\"].pop(\"version\")\n # regularize connectivity ordering\n if \"connectivity\" in dicary:\n conn = [(min(at1, at2), max(at1, at2), bo) for (at1, at2, bo) in dicary[\"connectivity\"]]\n conn.sort(key=lambda tup: tup[0])\n dicary[\"connectivity\"] = conn\n\n return dicary\n\n xptd = massage_dicts(xptd)\n cptd = massage_dicts(cptd)\n\n if relative_geoms == \"exact\":\n pass\n elif relative_geoms == \"align\":\n # can't just expect geometries to match, so we'll align them, check that\n # they overlap and that the translation/rotation arrays jibe with\n # fix_com/orientation, then attach the oriented geom to computed before the\n # recursive dict comparison.\n from .molutil.align import B787\n\n cgeom = np.array(cptd[\"geom\"]).reshape((-1, 3))\n rgeom = np.array(xptd[\"geom\"]).reshape((-1, 3))\n rmsd, mill = B787(\n rgeom=rgeom,\n cgeom=cgeom,\n runiq=None,\n cuniq=None,\n atoms_map=True,\n mols_align=True,\n run_mirror=False,\n verbose=0,\n )\n if cptd[\"fix_com\"]:\n return compare(\n True,\n np.allclose(np.zeros((3)), mill.shift, atol=atol),\n \"null shift\",\n quiet=(verbose == 0),\n return_message=return_message,\n return_handler=return_handler,\n )\n if cptd[\"fix_orientation\"]:\n return compare(\n True,\n np.allclose(np.identity(3), mill.rotation, atol=atol),\n \"null rotation\",\n quiet=(verbose == 0),\n return_message=return_message,\n return_handler=return_handler,\n )\n ageom = mill.align_coordinates(cgeom)\n cptd[\"geom\"] = ageom.reshape((-1))\n\n return compare_recursive(\n xptd,\n cptd,\n atol=atol,\n rtol=rtol,\n label=label,\n forgive=forgive,\n quiet=(verbose == 0),\n return_message=return_message,\n return_handler=return_handler,\n )\n" ]
[ [ "numpy.array_str" ], [ "numpy.square", "numpy.absolute", "numpy.asarray", "numpy.issubdtype", "numpy.nan_to_num", "numpy.all", "numpy.array_str", "numpy.log10", "numpy.identity", "numpy.iscomplexobj", "numpy.errstate", "numpy.array", "numpy.zeros", "numpy.divide", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jhardenberg/EnsClus
[ "c7591aa39d649fc4321ac4db219f241aabcaf295", "c7591aa39d649fc4321ac4db219f241aabcaf295" ]
[ "clus/sel_season_area.py", "clus/ens_eof_kmeans.py" ]
[ "# Standard packages\nfrom netCDF4 import Dataset, num2date\nfrom datetime import datetime\nimport numpy as np\nimport pandas as pd\n\n#____________Selecting a season (DJF,DJFM,NDJFM,JJA)\ndef sel_season(var,dates,season,timestep):\n #----------------------------------------------------------------------------------------\n #print('____________________________________________________________________________________________________________________')\n #print('Selecting only {0} data'.format(season))\n #----------------------------------------------------------------------------------------\n dates_pdh = pd.to_datetime(dates)\n print(dates_pdh)\n\n mesi_short = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\n\n if season=='DJF': #ONLY DEC-JAN-FEB\n m=[12,1,2]\n mask=(dates_pdh.month==12) | (dates_pdh.month==1) | (dates_pdh.month==2)\n elif season=='DJFM': #ONLY DEC-JAN-FEB-MAR\n m=[12,1,2,3]\n mask=(dates_pdh.month==12) | (dates_pdh.month==1) | (dates_pdh.month==2) | (dates_pdh.month==3)\n elif season=='NDJFM': #ONLY NOV-DEC-JAN-FEB-MAR\n m=[11,12,1,2,3]\n mask=(dates_pdh.month==11) | (dates_pdh.month==12) | (dates_pdh.month==1) | (dates_pdh.month==2) | (dates_pdh.month==3)\n elif season=='JJA': #ONLY JUN-JUL-AUG\n m=[6,7,8]\n mask=(dates_pdh.month==6) | (dates_pdh.month==7) | (dates_pdh.month==8)\n elif season=='MAM': #ONLY MAR-APR-MAY\n m=[3,4,5]\n mask=(dates_pdh.month==6) | (dates_pdh.month==7) | (dates_pdh.month==8)\n elif season=='SON': #ONLY SEP-OCT-NOV\n m=[9,10,11]\n mask=(dates_pdh.month==6) | (dates_pdh.month==7) | (dates_pdh.month==8)\n elif season in mesi_short:\n print(mesi_short.index(season)+1)\n mask = (dates_pdh.month == mesi_short.index(season)+1)\n else:\n print('season is not one of the following: DJF, DJFM, NDJFM, JJA, MAM, SON')\n #print(np.sum(mask))\n var_season = var[mask,:,:]\n dates_season=dates[mask]\n\n if var_season.ndim == 2:\n var_season = var_season[np.newaxis, :]\n\n cut = False\n if timestep == 'month':\n # count number of months\n n_months = var_season.shape[0]\n if n_months%len(season) != 0:\n cut = True\n elif timestep == 'day':\n cut = True\n\n if season in mesi_short:\n cut = False\n\n if cut:\n if (12 in m) or (1 in m):\n #REMOVING THE FIRST MONTHS (for the first year) because there is no previuos december\n print(np.where(dates_season==datetime(dates_pdh.year[0], m[0], dates_pdh.day[0], dates_pdh.hour[0], dates_pdh.minute[0]) ))\n start=int(np.where(dates_season==datetime(dates_pdh.year[0], m[0], dates_pdh.day[0], dates_pdh.hour[0], dates_pdh.minute[0]) )[0])\n #REMOVING THE LAST MONTHS (for the last year) because there is no following january\n last_sea = dates_season==datetime(dates_pdh.year[-1], m[0], dates_pdh.day[0], dates_pdh.hour[0], dates_pdh.minute[0])\n\n if np.sum(last_sea) > 0:\n end = np.argmax(last_sea)\n else:\n end = -1\n\n var_season=var_season[start:end,:,:]\n dates_season=dates_season[start:end]\n\n return var_season,dates_season\n\n#____________Selecting only [latS-latN, lonW-lonE] box region\ndef sel_area(lat,lon,var,area):\n '''\n GOAL\n Selecting the area of interest\n USAGE\n var_area, lat_area, lon_area =sel_area(lat,lon,var,area)\n area can be 'EAT', 'PNA', 'NH'\n '''\n if area=='EAT':\n printarea='Euro-Atlantic'\n latN = 87.5\n latS = 30.0\n lonW =-80.0 #280\n lonE = 40.0 #40\n # lat and lon are extracted from the netcdf file, assumed to be 1D\n #If 0<lon<360, convert to -180<lon<180\n if lon.min() >= 0:\n lon_new=lon-180\n var_roll=np.roll(var,int(len(lon)/2),axis=-1)\n else:\n var_roll=var\n lon_new=lon\n\n elif area=='PNA':\n printarea='Pacific North American'\n latN = 87.5\n latS = 30.0\n lonW = 140.0\n lonE = 280.0\n # lat and lon are extracted from the netcdf file, assumed to be 1D\n #If -180<lon<180, convert to 0<lon<360\n if lon.min() < 0:\n lon_new=lon+180\n var_roll=np.roll(var,int(len(lon)/2),axis=-1)\n else:\n var_roll=var\n lon_new=lon\n\n elif area=='NH':\n printarea='Northern Hemisphere'\n latN = 90.0\n latS = 0.0\n lonW = lon.min()\n lonE = lon.max()\n var_roll=var\n lon_new=lon\n\n elif area=='Eu':\n printarea='Europe'\n latN = 72.0\n latS = 27.0\n lonW = -22.0\n lonE = 45.0\n # lat and lon are extracted from the netcdf file, assumed to be 1D\n #If 0<lon<360, convert to -180<lon<180\n if lon.min() >= 0:\n lon_new=lon-180\n var_roll=np.roll(var,int(len(lon)/2),axis=-1)\n else:\n var_roll=var\n lon_new=lon\n elif area=='Med':\n printarea='Mediterranean'\n latN = 50.0\n latS = 25.0\n lonW = -10.0\n lonE = 40.0\n # lat and lon are extracted from the netcdf file, assumed to be 1D\n #If 0<lon<360, convert to -180<lon<180\n if lon.min() >= 0:\n lon_new=lon-180\n print(var.shape)\n var_roll=np.roll(var,int(len(lon)/2),axis=-1)\n else:\n var_roll=var\n lon_new=lon\n\n #----------------------------------------------------------------------------------------\n #print('____________________________________________________________________________________________________________________')\n #print('Selecting the area of interest: {0}'.format(printarea))\n #----------------------------------------------------------------------------------------\n #-------------------------Selecting only an area\n\n latidx = (lat >= latS) & (lat <= latN)\n lonidx = (lon_new >= lonW) & (lon_new <= lonE)\n\n print(var_roll.shape, len(latidx), len(lonidx))\n if var.ndim == 3:\n var_area = var_roll[:, latidx][..., lonidx]\n elif var.ndim == 2:\n var_area = var_roll[latidx, ...][..., lonidx]\n else:\n raise ValueError('Variable has {} dimensions, should have 2 or 3.'.format(var.ndim))\n\n #print('Grid dimension of the selected area ---> {0}'.format(var_area[0].shape))\n\n return var_area,lat[latidx],lon_new[lonidx]\n", "#*********************************\n# ens_eof_kmeans *\n#*********************************\n\n# Standard packages\nimport numpy as np\nimport sys\nimport os\nfrom sklearn.cluster import KMeans\nimport datetime\nimport math\nimport pandas as pd\nimport collections\nfrom itertools import combinations\nfrom numpy import linalg as LA\n\n\ndef clus_eval_indexes(elements, centroids, labels):\n \"\"\"\n Computes clustering evaluation indexes, as the Davies-Bouldin Index, the Dunn Index, the optimal variance ratio and the Silhouette value. Also computes cluster sigmas and distances.\n \"\"\"\n PCs = elements\n ### Computing clustering evaluation Indexes\n numclus = len(centroids)\n inertia_i = np.empty(numclus)\n for i in range(numclus):\n lab_clus = labels == i\n inertia_i[i] = np.sum([np.sum((pcok-centroids[i])**2) for pcok in PCs[lab_clus]])\n\n clus_eval = dict()\n clus_eval['Indexes'] = dict()\n\n # Optimal ratio\n\n n_clus = np.empty(numclus)\n for i in range(numclus):\n n_clus[i] = np.sum(labels == i)\n\n mean_intra_clus_variance = np.sum(inertia_i)/len(labels)\n\n dist_couples = dict()\n coppie = list(combinations(range(numclus), 2))\n for (i,j) in coppie:\n dist_couples[(i,j)] = LA.norm(centroids[i]-centroids[j])\n\n mean_inter_clus_variance = np.sum(np.array(dist_couples.values())**2)/len(coppie)\n\n clus_eval['Indexes']['Inter-Intra Variance ratio'] = mean_inter_clus_variance/mean_intra_clus_variance\n\n sigma_clusters = np.sqrt(inertia_i/n_clus)\n clus_eval['Indexes']['Inter-Intra Distance ratio'] = np.mean(dist_couples.values())/np.mean(sigma_clusters)\n\n # Davies-Bouldin Index\n R_couples = dict()\n for (i,j) in coppie:\n R_couples[(i,j)] = (sigma_clusters[i]+sigma_clusters[j])/dist_couples[(i,j)]\n\n DBI = 0.\n for i in range(numclus):\n coppie_i = [coup for coup in coppie if i in coup]\n Di = np.max([R_couples[cop] for cop in coppie_i])\n DBI += Di\n\n DBI /= numclus\n clus_eval['Indexes']['Davies-Bouldin'] = DBI\n\n # Dunn Index\n\n Delta_clus = np.empty(numclus)\n for i in range(numclus):\n lab_clus = labels == i\n distances = [LA.norm(pcok-centroids[i]) for pcok in PCs[lab_clus]]\n Delta_clus[i] = np.sum(distances)/n_clus[i]\n\n clus_eval['Indexes']['Dunn'] = np.min(dist_couples.values())/np.max(Delta_clus)\n\n clus_eval['Indexes']['Dunn 2'] = np.min(dist_couples.values())/np.max(sigma_clusters)\n\n # Silhouette\n sils = []\n for ind, el, lab in zip(range(len(PCs)), PCs, labels):\n lab_clus = labels == lab\n lab_clus[ind] = False\n ok_Pcs = PCs[lab_clus]\n a = np.sum([LA.norm(okpc - el) for okpc in ok_Pcs])/n_clus[lab]\n\n bs = []\n others = range(numclus)\n others.remove(lab)\n for lab_b in others:\n lab_clus = labels == lab_b\n ok_Pcs = PCs[lab_clus]\n b = np.sum([LA.norm(okpc - el) for okpc in ok_Pcs])/n_clus[lab_b]\n bs.append(b)\n\n b = np.min(bs)\n sils.append((b-a)/max([a,b]))\n\n sils = np.array(sils)\n sil_clus = []\n for i in range(numclus):\n lab_clus = labels == i\n popo = np.sum(sils[lab_clus])/n_clus[i]\n sil_clus.append(popo)\n\n siltot = np.sum(sil_clus)/numclus\n\n clus_eval['Indexes']['Silhouette'] = siltot\n clus_eval['clus_silhouettes'] = sil_clus\n\n clus_eval['Indexes']['Dunn2/DB'] = clus_eval['Indexes']['Dunn 2']/clus_eval['Indexes']['Davies-Bouldin']\n\n clus_eval['R couples'] = R_couples\n clus_eval['Inter cluster distances'] = dist_couples\n clus_eval['Sigma clusters'] = sigma_clusters\n\n return clus_eval\n\n\ndef ens_eof_kmeans(inputs):\n '''\n Find the most representative ensemble member for each cluster.\n METHODS:\n - Empirical Orthogonal Function (EOF) analysis of the input file\n - K-means cluster analysis applied to the retained Principal Components (PCs)\n\n TODO:\n - Order clusters per frequency\n - Give the anomalies in input (not from file)\n\n '''\n\n # User-defined libraries\n from read_netcdf import read_N_2Dfields\n from eof_tool import eof_computation\n\n OUTPUTdir = inputs['OUTPUTdir']\n numens = inputs['numens']\n name_outputs = inputs['name_outputs']\n filenames = inputs['filenames']\n numpcs = inputs['numpcs']\n perc = inputs['perc']\n numclus = inputs['numclus']\n\n # Either perc (cluster analysis is applied on a number of PCs such as they explain\n # 'perc' of total variance) or numpcs (number of PCs to retain) is set:\n if numpcs is not None:\n print('Number of principal components: {0}'.format(numpcs))\n\n if perc is not None:\n print('Percentage of explained variance: {0}%'.format(int(perc)))\n\n if (perc is None and numpcs is None) or (perc is not None and numpcs is not None):\n raise ValueError('You have to specify either \"perc\" or \"numpcs\".')\n\n print('Number of clusters: {0}'.format(numclus))\n\n #____________Reading the netCDF file of N 2Dfields of anomalies, saved by ens_anom.py\n ifile=os.path.join(OUTPUTdir,'ens_anomalies_{0}.nc'.format(name_outputs))\n var, varunits, lat, lon = read_N_2Dfields(ifile)\n print('var dim: (numens x lat x lon)={0}'.format(var.shape))\n\n\n #____________Compute EOFs (Empirical Orthogonal Functions)\n #____________and PCs (Principal Components) with respect to ensemble memeber\n print('____________________________________________________________________________________________________________________')\n print('EOF analysis')\n #----------------------------------------------------------------------------------------\n solver, pcs_scal1, eofs_scal2, pcs_unscal0, eofs_unscal0, varfrac = eof_computation(var,varunits,lat,lon)\n\n acc=np.cumsum(varfrac*100)\n if perc is not None:\n # Find how many PCs explain a certain percentage of variance\n # (find the mode relative to the percentage closest to perc, but bigger than perc)\n numpcs=min(enumerate(acc), key=lambda x: x[1]<=perc)[0]+1\n print('\\nThe number of PCs that explain the percentage closest to {0}% of variance (but grater than {0}%) is {1}'.format(perc,numpcs))\n exctperc=min(enumerate(acc), key=lambda x: x[1]<=perc)[1]\n if numpcs is not None:\n exctperc=acc[numpcs-1]\n if np.isnan(exctperc):\n print(acc)\n raise ValueError('NaN in evaluation of variance explained by first pcs')\n print('(the first {0} PCs explain exactly the {1}% of variance)'.format(numpcs,\"%.2f\" %exctperc))\n\n\n #____________Compute k-means analysis using a subset of PCs\n print('__________________________________________________\\n')\n print('k-means analysis using a subset of PCs')\n print('_____________________________________________\\n')\n #----------------------------------------------------------------------------------------\n PCs=pcs_unscal0[:,:numpcs]\n\n clus=KMeans(n_clusters=numclus, n_init=600, max_iter=1000)\n\n start = datetime.datetime.now()\n clus.fit(PCs)\n end = datetime.datetime.now()\n print('k-means algorithm took me %s seconds' %(end-start))\n\n centroids=clus.cluster_centers_ # shape---> (numclus,numpcs)\n labels=clus.labels_ # shape---> (numens,)\n inertia = clus.inertia_\n\n ## Ordering clusters for number of members\n centroids = np.array(centroids)\n labels = np.array(labels)\n\n num_mem = []\n for i in range(numclus):\n num_mem.append(np.sum(labels == i))\n num_mem = np.array(num_mem)\n\n new_ord = num_mem.argsort()[::-1]\n centroids = centroids[new_ord]\n\n labels_new = np.array(labels)\n for nu, i in zip(range(numclus), new_ord):\n labels_new[labels == i] = nu\n labels = labels_new\n ###\n clus_eval = clus_eval_indexes(PCs, centroids, labels)\n for nam in clus_eval['Indexes'].keys():\n print(nam, clus_eval['Indexes'][nam])\n\n print('\\nClusters are identified for {0} PCs (explained variance {1}%)'.format(numpcs, \"%.2f\" %exctperc))\n print('PCs dim: (number of ensemble members, number of PCs)={0}, EOF dim: (number of ensemble members, lat, lon)={1}'.format(pcs_unscal0[:,:numpcs].shape,eofs_unscal0[:numpcs].shape))\n print('Centroid coordinates dim: (number of clusters, number of PCs)={0}, labels dim: (number of ensemble members,)={1}\\n'.format(centroids.shape,labels.shape))\n\n #____________Save labels\n namef=os.path.join(OUTPUTdir,'labels_{0}.txt'.format(name_outputs))\n #np.savetxt(namef,labels,fmt='%d')\n filo = open(namef, 'w')\n stringo = '{:6s} {:20s} {:8s}\\n'.format('#', 'filename', 'cluster')\n filo.write(stringo)\n filo.write(' \\n')\n for filnam, ii, lab in zip(inputs['filenames'], range(numens), labels):\n indr = filnam.rindex('/')\n filnam = filnam[indr+1:]\n stringo = '{:6d} {:20s} {:8d}\\n'.format(ii, filnam, lab)\n filo.write(stringo)\n filo.close()\n\n #____________Compute cluster frequencies\n L=[]\n for nclus in range(numclus):\n cl=list(np.where(labels==nclus)[0])\n fr=len(cl)*100/len(labels)\n L.append([nclus,fr,cl])\n print('Cluster labels:')\n print([L[ncl][0] for ncl in range(numclus)])\n print('Cluster frequencies (%):')\n print([round(L[ncl][1],3) for ncl in range(numclus)])\n print('Cluster members:')\n print([L[ncl][2] for ncl in range(numclus)])\n\n #____________Find the most representative ensemble member for each cluster\n print('____________________________________________________________________________________________________________________')\n print('In order to find the most representative ensemble member for each cluster\\n(which is the closest member to the cluster centroid)')\n print('the Euclidean distance between cluster centroids and each ensemble member is computed in the PC space')\n print('____________________________________________________________________________________________________________________')\n # 1)\n print('Check: cluster #1 centroid coordinates vector dim {0} should be the same as the member #1 PC vector dim {1}\\n'.format(centroids[1,:].shape,PCs[1,:].shape))\n #print('\\nIn the PC space, the distance between:')\n norm=np.empty([numclus,numens])\n finalOUTPUT=[]\n repres=[]\n\n ens_mindist = []\n ens_maxdist = []\n for nclus in range(numclus):\n for ens in range(numens):\n normens=centroids[nclus,:]-PCs[ens,:]\n norm[nclus,ens]=math.sqrt(sum(normens**2))\n #print('The distance between centroid of cluster {0} and member {1} is {2}'.format(nclus,ens,round(norm[nclus,ens],3)))\n print('The distances between centroid of cluster {0} and member #0 to #{1} are:\\n{2}'.format(nclus,numens-1,np.round(norm[nclus],3)))\n\n ens_mindist.append((np.argmin(norm[nclus,:]), norm[nclus].min()))\n\n print('MINIMUM DISTANCE FOR CLUSTER {0} IS {1} --> member #{2}'.format(nclus, round(ens_mindist[-1][1],3), ens_mindist[-1][0]))\n\n repres.append(np.where(norm[nclus] == norm[nclus].min())[0][0])\n\n ens_maxdist.append((np.argmax(norm[nclus,:]), norm[nclus].max()))\n\n print('MAXIMUM DISTANCE FOR CLUSTER {0} IS {1} --> member #{2}'.format(nclus, round(ens_maxdist[-1][1],3), ens_maxdist[-1][0]))\n\n txt='Closest ensemble member/members to centroid of cluster {0} is/are {1}\\n'.format(nclus,list(np.where(norm[nclus] == norm[nclus].min())[0]))\n finalOUTPUT.append(txt)\n with open(OUTPUTdir+'RepresentativeEnsembleMembers_{0}.txt'.format(name_outputs), \"w\") as text_file:\n text_file.write(''.join(str(e) for e in finalOUTPUT))\n\n #____________Save the most representative ensemble members\n namef=os.path.join(OUTPUTdir,'repr_ens_{0}.txt'.format(name_outputs))\n filo = open(namef, 'w')\n filo.write('List of cluster representatives\\n')\n stringo = '{:10s} {:8s} -> {:20s}\\n'.format('', '#', 'filename')\n filo.write(stringo)\n filo.write(' \\n')\n for ii in range(numclus):\n okin = repres[ii]\n filnam = inputs['filenames'][okin]\n indr = filnam.rindex('/')\n filnam = filnam[indr+1:]\n stringo = 'Cluster {:2d}: {:8d} -> {:20s}\\n'.format(ii, okin, filnam)\n filo.write(stringo)\n filo.close()\n #np.savetxt(namef,repres,fmt='%i')\n\n\n print('____________________________________________________________________________________________________________________')\n print('In order to study the spread of each cluster,')\n print('the standard deviation of the distances between each member in a cluster and the cluster centroid is computed in the PC space')\n print('____________________________________________________________________________________________________________________')\n print('\\nIn the PC space:')\n statOUTPUT=[]\n for nclus in range(numclus):\n members=L[nclus][2]\n norm=np.empty([numclus,len(members)])\n for mem in range(len(members)):\n #print('mem=',mem)\n ens=members[mem]\n #print('ens',ens)\n normens=centroids[nclus,:]-PCs[ens,:]\n norm[nclus,mem]=math.sqrt(sum(normens**2))\n #print('norm=',norm[nclus],norm.dtype)\n print('the distances between centroid of cluster {0} and its belonging members {1} are:\\n{2}'.format(nclus,members,np.round(norm[nclus],3)))\n print('MINIMUM DISTANCE WITHIN CLUSTER {0} IS {1} --> member #{2}'.format(nclus,round(norm[nclus].min(),3),members[np.where(norm[nclus] == norm[nclus].min())[0][0]]))\n print('MAXIMUM DISTANCE WITHIN CLUSTER {0} IS {1} --> member #{2}'.format(nclus,round(norm[nclus].max(),3),members[np.where(norm[nclus] == norm[nclus].max())[0][0]]))\n print('INTRA-CLUSTER STANDARD DEVIATION FOR CLUSTER {0} IS {1}\\n'.format(nclus,norm[nclus].std()))\n\n d_stat=collections.OrderedDict()\n d_stat['cluster']=nclus\n d_stat['member']=members\n d_stat['d_to_centroid']=np.round(norm[nclus],3)\n d_stat['intra-clus_std']=norm[nclus].std()\n d_stat['d_min']=round(norm[nclus].min(),3)\n d_stat['d_max']=round(norm[nclus].max(),3)\n d_stat['freq(%)']=round(L[nclus][1],3)\n stat=pd.DataFrame(d_stat)\n statOUTPUT.append(stat)\n statOUTPUT = pd.concat(statOUTPUT, axis=0)\n #____________Save statistics of cluster analysis\n namef=os.path.join(OUTPUTdir,'statistics_clutering_{0}.txt'.format(name_outputs))\n with open(namef, 'w') as text_file:\n text_file.write(statOUTPUT.__repr__())\n\n return centroids, labels, ens_mindist, ens_maxdist, clus_eval\n\n\n#========================================================\n\n# if __name__ == '__main__':\n# print('This program is being run by itself')\n#\n# print('**************************************************************')\n# print('Running {0}'.format(sys.argv[0]))\n# print('**************************************************************')\n# dir_OUTPUT = sys.argv[1] # OUTPUT DIRECTORY\n# name_outputs = sys.argv[2] # name of the outputs\n# numens = int(sys.argv[3]) # number of ensemble members\n# numpcs = sys.argv[4] # number of retained PCs\n# perc = sys.argv[5] # percentage of explained variance by PCs\n# numclus = int(sys.argv[6]) # number of clusters\n#\n# ens_eof_kmeans(dir_OUTPUT,name_outputs,numens,numpcs,perc,numclus)\n#\n# else:\n# print('ens_eof_kmeans is being imported from another module')\n" ]
[ [ "pandas.to_datetime", "numpy.argmax", "numpy.sum" ], [ "pandas.concat", "numpy.sqrt", "sklearn.cluster.KMeans", "numpy.min", "numpy.isnan", "numpy.cumsum", "numpy.linalg.norm", "pandas.DataFrame", "numpy.round", "numpy.max", "numpy.argmax", "numpy.mean", "numpy.argmin", "numpy.array", "numpy.where", "numpy.sum", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
pengfei-ma/Google-Play-Store-Subjects-Analysis
[ "65d224eef9c0b6a2714f329edcfd5a4c32f6a2dd" ]
[ "Gradient Descent finding parameters.py" ]
[ "import sys\nfrom operator import add\nfrom pyspark.sql import SparkSession\nfrom pyspark import SparkContext\nimport pyspark\nfrom pyspark.ml.linalg import Vectors\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom pyspark.sql.types import *\nfrom pyspark.sql import functions as func\nfrom pyspark.sql.functions import *\nfrom pyspark.sql import SQLContext\nimport matplotlib.pyplot as plt \nimport time\nfrom pandas import Series,DataFrame\nimport pandas as pd\nimport re\nfrom collections import Counter\nfrom sklearn.linear_model import LinearRegression\nfrom pyspark.ml.classification import LogisticRegression\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\nfrom pyspark.ml.classification import LinearSVC\n\n\n# building functions\n\ndef isfloat(value):\n try:\n float(value)\n return True\n except:\n return False\n \ndef correctRows(p):\n if isfloat(p[3]) and isfloat(p[4]) and isfloat(p[6]) and isfloat(p[7]) and isfloat(p[9]):\n return p\n \ndef to_list(a):\n return [a]\n\ndef addToList(x, y):\n x.append(y)\n return x\n\ndef extend(x,y):\n x.extend(y)\n return x\n\nspark = SparkSession.builder.master(\"local[*]\").getOrCreate()\nsc = SparkContext.getOrCreate()\nsqlContext = SQLContext(sc)\n\n# load data set\nlines2 = sc.textFile(\"Google-Playstore.csv\")\n\n\nprint(\"##### Finding Simple Linear Regression Equation #####\")\n\n# data pre-processing\ncorrectLine = lines2.map(lambda x: x.split(','))\ncleaned = correctLine.filter(correctRows)\n\nmax_install = cleaned.map(lambda p: (float(p[7])))\nrating = cleaned.map(lambda p: (float(p[3])))\n\n# apply linear regression\nx = np.array(max_install.collect())\ny = np.array(rating.collect())\n\nX = np.stack([x], axis = 1)\n\nreg = LinearRegression(fit_intercept=True).fit(X, y)\n\nprint(\"The m (coefficient) =\",reg.coef_)\nprint(\"The b (y-intercept) =\",reg.intercept_)\nprint(\"The equation is: y = \"+str(reg.coef_[0])+\"X + \"+str(reg.intercept_))\n\nprint(\"##### Finding the parameters using gradient descent #####\")\n\nstart1 = time.time()\ndf = np.stack([y, x], axis=1)\ndff = map(lambda x: (float(x[0]), Vectors.dense(x[1:])), df)\nmydf = spark.createDataFrame(dff, schema=[\"Money\", \"Distance\"])\nmyRDD=mydf.rdd.map(tuple).map(lambda x: (float(x[0]), np.array(x[1]) ))\n\nlearningRate = 0.00001\nnum_iteration = 100\nsize = float(len(y))\nbeta = np.array([0.1])\ncosts = []\n\nfor i in range(num_iteration):\n gradientCost=myRDD.map(lambda x: (x[1], (x[0] - x[1] * beta) ))\\\n .map(lambda x: (x[0]*x[1], x[1]**2 )).reduce(lambda x, y: (x[0] +y[0], x[1]+y[1] ))\n cost= gradientCost[1]\n gradient=(-1/float(size))* gradientCost[0]\n print(i, \"Beta\", beta, \" Cost\", cost)\n beta = beta - learningRate * gradient\n costs.append(cost[0])\n\nend1 = time.time()\n\nprint(f\"Computation time of BGD is {(end1 - start1)/60} Minutes\")\n\n# making plot\nxValues = [i for i in range(len(costs))]\nplt.plot(xValues, costs, 'o', markersize=2)\nplt.xlabel(\"Number of Iteration\")\nplt.ylabel(\"Cost\")\nplt.title(\"Cost with the number of iteration\")\nplt.show()\n" ]
[ [ "matplotlib.pyplot.title", "numpy.stack", "matplotlib.pyplot.plot", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Ahmed-elshorbagy/dog_web_app
[ "058b79328d3ed16a77c312f39b5b150eb6423612" ]
[ "web/dog/dog/views.py" ]
[ "# import the necessary packages \r\nfrom django.shortcuts import render\r\nfrom django.views.decorators.csrf import csrf_exempt\r\nfrom django.http import JsonResponse,HttpResponse\r\nimport numpy as np\r\nimport urllib\r\nimport json\r\nimport cv2\r\nimport os\r\nfrom .face import dog_ear\r\nfrom glob import glob\r\nfrom .forms import ImgForm,UrlForm\r\nimport base64\r\nimport requests\r\nfrom keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions\r\nfrom keras.applications.inception_v3 import InceptionV3, preprocess_input\r\nfrom keras.preprocessing import image \r\nfrom keras.models import load_model\r\n\r\nimport io\r\nimport tensorflow as tf\r\nfrom PIL import Image\r\ngraph = tf.get_default_graph()\r\n# define ResNet50 model\r\ndog_names = [item[9:-1] for item in sorted(glob(\"test/*/\"))]\r\nResNet50_model = ResNet50(weights='imagenet')\r\nInceptionV3_model=load_model('dog/saved_models/weights.best.InceptionV3.hdf5') \r\n# define the path to the face detector\r\nFACE_DETECTOR_PATH = r\"{base_path}/haarcascades/haarcascade_frontalface_alt.xml\".format(\r\n\tbase_path=os.path.abspath(os.path.dirname(__file__)))\r\n\r\ndef main(request):\r\n \tcon={'form1':ImgForm,'form2':UrlForm}\r\n \treturn render(request,'main.html',con)\r\n@csrf_exempt\r\ndef detect(request):\r\n\t# initialize the data dictionary to be returned by the request\r\n\tglobal graph\r\n\twith graph.as_default():\r\n\t\tdata = {\"success\": False}\r\n\r\n\t\t# check to see if this is a post request\r\n\t\tif request.method == \"POST\":\r\n\t\t\t# check to see if an image was uploaded\r\n\t\t\tif request.FILES.get(\"image\", None) is not None:\r\n\t\t\t\t# grab the uploaded image\r\n\t\t\t\timage,dog = _grab_image(stream=request.FILES[\"image\"])\r\n\t\t\t\tad=request.POST.get(\"overlay\", None)\r\n\t\t\t# otherwise, assume that a URL was passed in\r\n\t\t\telse:\r\n\t\t\t\t# grab the URL from the request\r\n\t\t\t\turl = request.POST.get(\"url\", None)\r\n\t\t\t\tad=request.POST.get(\"overlay\", None)\r\n\t\t\t\t# if the URL is None, then return an error\r\n\t\t\t\tif url is None:\r\n\t\t\t\t\tdata[\"error\"] = \"No URL provided.\"\r\n\t\t\t\t\treturn JsonResponse(data)\r\n\r\n\t\t\t\t# load the image and convert\r\n\t\t\t\timage,dog = _grab_image(url=url)\r\n\r\n\t\t\t# convert the image to grayscale, load the face cascade detector,\r\n\t\t\t# and detect faces in the image\r\n\t\t\t\r\n\t\t\timg = cv2.cvtColor(dog_ear(image,ad), cv2.COLOR_BGR2RGB)\r\n\t\t\timg2 = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n\t\t\timage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n\t\t\tdetector = cv2.CascadeClassifier(FACE_DETECTOR_PATH)\r\n\t\t\trects = detector.detectMultiScale(image)\r\n\r\n\t\t\t# construct a list of bounding boxes from the detection\r\n\t\t\trects = [(int(x), int(y), int(x + w), int(y + h)) for (x, y, w, h) in rects]\r\n\t\t\t\r\n\t\t\tresponse=imgenc(img,rects)\r\n\t\t\t# if len(rects)<2:\r\n\t\t\t# \tbreed = InceptionV3_predict_breed(img2)\r\n\t\t\t\r\n\t\t\t# update the data dictionary with the faces detected\r\n\t\t\tdata.update({\"num_faces\": len(rects), \"faces\": rects, \"success\": True,\"dog\":str(dog),\"img\":response,'breed':\"breed\"})\r\n\t\t\r\n\t\treturn render(request,'main.html',data)\t\r\n\t\t# return a JSON response\r\n\t\t# return JsonResponse(data)\r\n\t\t\r\n\r\ndef _grab_image(path=None, stream=None, url=None):\r\n\t# if the path is not None, then load the image from disk\r\n\tif path is not None:\r\n\t\timage = cv2.imread(path)\r\n\r\n\t# otherwise, the image does not reside on disk\r\n\telse:\t\r\n\t\t# if the URL is not None, then download the image\r\n\t\tif url is not None:\r\n\t\t\tresp = urllib.request.urlopen(url)\r\n\t\t\tdata = resp.read()\r\n\t\t\t\r\n\t\t# if the stream is not None, then the image has been uploaded\r\n\t\telif stream is not None:\r\n\t\t\tdata = stream.read()\r\n\t\t\t\r\n\t\t# convert the image to a NumPy array and then read it into\r\n\t\t# OpenCV format\r\n\t\timage = np.asarray(bytearray(data), dtype=\"uint8\")\r\n\t\timage = cv2.imdecode(image, cv2.IMREAD_COLOR)\r\n\t\t\r\n\t\timg = preprocess_input(path_to_tensor(image))\r\n\t\tprediction = np.argmax(ResNet50_model.predict(img))\r\n\t\t#boolean variable of presence of dog in image or not\r\n\t\tdog=((prediction <= 268) & (prediction >= 151)) \r\n\t\t\r\n\t# return the image,and bool dog\r\n\treturn image,dog\r\n\r\ndef imgenc(image,rects):\r\n\t# for (startX, startY, endX, endY) in rects:\r\n\t# \tcv2.rectangle(image, (startX, startY), (endX, endY), (0, 255, 0), 2)\r\n\r\n\t# r = 300.0 / image.shape[1]\r\n\t# dim = (300, int(image.shape[0] * r))\r\n\t\r\n\t# # perform the actual resizing of the image and show it\r\n\t# resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)\r\n\tCDF=Image.fromarray(image)\r\n\tin_mem_file=io.BytesIO()\t\t\t\r\n\tCDF.save(in_mem_file, format = \"PNG\")\r\n\t# reset file pointer to start\r\n\tin_mem_file.seek(0)\r\n\timg_bytes = in_mem_file.read()\r\n\r\n\tbase64_encoded_result_bytes = base64.b64encode(img_bytes)\r\n\tbase64_encoded_result_str = base64_encoded_result_bytes.decode('ascii')\r\n\treturn \"data:image/png;base64,{0} \".format(base64_encoded_result_str)\r\n\r\ndef path_to_tensor(image):\r\n\t# resize the shape of image\r\n\timage2 =cv2.resize(image, (224,224), interpolation = cv2.INTER_AREA)\r\n\t# change the data type to float to be accepted\r\n\timage2 = image2.astype(np.float32)\r\n\t# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor\r\n\treturn np.expand_dims(image2, axis=0)\t\r\ndef extract_InceptionV3(tensor):\r\n return InceptionV3(weights='imagenet', include_top=False).predict(preprocess_input(tensor))\r\ndef InceptionV3_predict_breed(image):\r\n # extract bottleneck features\r\n bottleneck_feature = extract_InceptionV3(path_to_tensor(image))\r\n # obtain predicted vector\r\n predicted_vector = InceptionV3_model.predict(bottleneck_feature)\r\n # return dog breed that is predicted by the model\r\n return dog_names[np.argmax(predicted_vector)]\r\n\r\n" ]
[ [ "tensorflow.get_default_graph", "numpy.expand_dims", "numpy.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
itouchz/TRepNet
[ "5fa9f273dc57b778ac0a94fffcb926de333ecc37" ]
[ "OnlyWavenet.py" ]
[ "import numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport os\nimport warnings\nimport time\n\nwarnings.filterwarnings('ignore') \n\nfrom tensorflow import keras\nfrom sklearn.preprocessing import RobustScaler, Normalizer, StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom datasets import load_data, random_benchmark, list_datasets\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import f1_score, accuracy_score\n\nnp.random.seed(7)\ntf.random.set_seed(7)\n\nfrom tensorflow.keras.layers import Conv1D, LSTM, GRU, Bidirectional, MaxPool1D, RepeatVector, Dense, Attention, Flatten, Dot\nfrom tensorflow.keras.layers import BatchNormalization, Input, Activation, Dropout, Lambda, Multiply, Add, Concatenate, Conv2DTranspose\nfrom tensorflow.keras.models import Model\n\ndef get_output_dim(original_dim):\n if original_dim // 1.3 >= 512:\n return 512\n elif original_dim // 1.3 <= 128:\n return 128\n else:\n return int(original_dim // 1.3)\n\ndef TRepNet(n_steps, n_features, activation='elu'):\n codings_size = get_output_dim(n_steps * n_features)\n dilation_rates = [2**i for i in range(10)] * 1\n \n skips = []\n\n encoder_input = Input(shape=[n_steps, n_features])\n # Convolution\n conv = encoder_input\n for dilation_rate in dilation_rates:\n conv = keras.layers.GaussianNoise(0.01)(conv)\n conv = Conv1D(16, 1, activation=activation, padding='same')(conv)\n \n conv_filter = Conv1D(filters=128, kernel_size=3, padding='causal', activation=activation, dilation_rate=dilation_rate)(conv)\n conv_filter = Dropout(0.1)(conv_filter)\n \n conv_gate = Conv1D(filters=128, kernel_size=3, padding='causal', activation=activation, dilation_rate=dilation_rate)(conv)\n conv_gate = Dropout(0.1)(conv_gate)\n \n mul = Multiply()([Activation('tanh')(conv_filter), Activation('sigmoid')(conv_gate)])\n skip = Conv1D(16, 1, padding='same', activation=activation)(mul)\n \n conv = Add()([conv, skip])\n \n skips.append(skip)\n \n conv = Activation(activation)(Add()(skips))\n conv = Conv1D(16, 1, activation=activation, padding='same')(conv)\n conv = MaxPool1D(pool_size=2)(conv)\n conv = Flatten()(conv)\n\n \n z = Dense(codings_size, kernel_initializer='lecun_normal', activation='selu')(conv)\n \n encoder_output = Dense(codings_size, activation='sigmoid')(z)\n encoder = Model(inputs=[encoder_input], outputs=[encoder_output])\n\n # Decoder\n decoder_input = Input(shape=[codings_size])\n noise_input = keras.layers.GaussianNoise(0.01)(decoder_input)\n dconv = keras.layers.Reshape([codings_size, 1, 1])(noise_input)\n dconv = Conv2DTranspose(filters=32, kernel_size=3, activation=activation)(dconv)\n dconv = Conv2DTranspose(filters=16, kernel_size=1, activation=activation)(dconv)\n dconv = Flatten()(dconv)\n x = Dense(n_steps * n_features)(dconv)\n decoder_output = keras.layers.Reshape([n_steps, n_features])(x)\n decoder = Model(inputs=[decoder_input], outputs=[decoder_output])\n\n return encoder, decoder" ]
[ [ "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.Activation", "tensorflow.keras.layers.MaxPool1D", "numpy.random.seed", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv1D", "tensorflow.keras.layers.Conv2DTranspose", "tensorflow.keras.layers.Add", "tensorflow.keras.layers.GaussianNoise", "tensorflow.keras.layers.Multiply", "tensorflow.keras.layers.Reshape", "tensorflow.keras.layers.Flatten", "tensorflow.random.set_seed", "tensorflow.keras.layers.Input" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
daniilgaltsev/ImageNet-Training
[ "9ca1d26cde07782398c7f366d5bf510c9e988236" ]
[ "imagenet_training/models/simple_cnn.py" ]
[ "\"\"\"A simple cnn model.\"\"\"\n\n\nimport argparse\nfrom collections import OrderedDict\nfrom typing import Any, Dict, Optional\n\nimport torch\nimport torch.nn as nn\n\n\nclass SimpleCNN(nn.Module):\n \"\"\"A simple CNN model.\n\n Args:\n data_config: a dictionary containing information about data.\n args (optional): args from argparser.\n \"\"\"\n\n def __init__(\n self,\n data_config: Dict[str, Any],\n args: Optional[argparse.Namespace] = None,\n ):\n super().__init__()\n\n if args is None:\n self.args = {}\n else:\n self.args = vars(args)\n\n num_classes = len(data_config[\"mapping\"])\n\n self.cnn = nn.Sequential(OrderedDict([\n (\"conv1\", nn.Conv2d(3, 32, kernel_size=3, padding=1, bias=False)),\n (\"relu1\", nn.ReLU(inplace=True)),\n (\"bn1\", nn.BatchNorm2d(32)),\n (\"maxpool1\", nn.MaxPool2d(kernel_size=2, stride=2)),\n (\"conv2\", nn.Conv2d(32, 64, kernel_size=3, bias=False)),\n (\"relu2\", nn.ReLU(inplace=True)),\n (\"bn2\", nn.BatchNorm2d(64)),\n (\"maxpool2\", nn.MaxPool2d(kernel_size=2, stride=2)),\n (\"conv3\", nn.Conv2d(64, 128, kernel_size=3, bias=False)),\n (\"relu3\", nn.ReLU(inplace=True)),\n (\"bn3\", nn.BatchNorm2d(128))\n ]))\n self.head = nn.Sequential(OrderedDict([\n (\"avgpool\", nn.AdaptiveAvgPool2d(1)),\n (\"flatten\", nn.Flatten()),\n (\"fc1\", nn.Linear(128, 128)),\n (\"relu1\", nn.ReLU(inplace=True)),\n (\"fc2\", nn.Linear(128, num_classes))\n ]))\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Performs forward operation on a given tensor.\"\"\"\n x = self.cnn(x)\n x = self.head(x)\n return x\n\n @staticmethod\n def add_to_argparse(\n parser: argparse.ArgumentParser,\n main_parser: argparse.ArgumentParser # pylint: disable=unused-argument\n ) -> argparse.ArgumentParser:\n \"\"\"Adds possible agrs to the given parser.\"\"\"\n return parser\n" ]
[ [ "torch.nn.Conv2d", "torch.nn.Flatten", "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.nn.AdaptiveAvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sgjholt/SpecMod
[ "453c77c1fa51c220470e2aa4d92ec432360bfc9f" ]
[ "specmod/Models.py" ]
[ "# MODELS contains a set of functions for minimisation to seismic spectra.\n# It can be modified as appropriate.\nimport numpy as np\nfrom . import config as cfg\n\nMODS = [\"BRUNE\", \"BOATWRIGHT\"]\n\n# UTIL FUNCS\ndef which_model(mod):\n if mod in MODS:\n if mod == \"BRUNE\":\n return BRUNE_MODEL\n if mod == \"BOATWRIGHT\":\n return BOATWRIGHT_MODEL\n else:\n raise ValueError(f\"Model {mod} not available. Choose from {MODS}.\")\n\n\ndef scale_to_motion(motion, f):\n if motion.lower() == 'displacement':\n return 0\n\n elif motion.lower() == 'velocity':\n return np.log10(2*np.pi*f)\n\n elif motion.lower() == 'acceleration':\n return np.log10(np.power(2*np.pi*f,2))\n else:\n return None\n\n# DEFAULT PARAMS FOR SOURCE MODE:\nBRUNE_MODEL = (1, 2) # omega squared\nBOATWRIGHT_MODEL = (2, 2) # omega cubed\n#\nMODEL = which_model(cfg.MODELS[\"MODEL\"])\nMOTION = cfg.MODELS[\"MOTION\"]\n\n\n\n# MINIMISATION FUNCTIONS\n## Source model\ndef source(f, llpsp, fc):\n gam, n = MODEL\n loga = llpsp - (1/gam)*np.log10((1+(f/fc)**(gam*n)))\n return loga\n\n# freq independent t-star attenuation model\ndef t_star(f, ts):\n return -(np.pi*f*ts / np.log(10))\n\n# freq dependent t-star attenuation\ndef t_star_freq(f, ts, a):\n return -(np.pi*(f**(1-a))*ts / np.log(10))\n\n# combine models\ndef simple_model(f, llpsp, fc, ts):\n global MOTION\n \"\"\"\n Simple attenuated source model to minimise.\n \"\"\"\n return source(f, llpsp, fc) + t_star(f, ts) + scale_to_motion(MOTION, f)\n\ndef simple_model_fdep(f, llpsp, fc, ts, a):\n \"\"\"\n Simple model but with frequency dependent attenuation.\n \"\"\"\n return source(f, llpsp, fc) + t_star_freq(f, ts, a) + scale_to_motion(MOTION, f)\n" ]
[ [ "numpy.log", "numpy.log10", "numpy.power" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
egonina/svm
[ "397f6fa8d29e8299478586e88864cae095fb08c1" ]
[ "test/svm_test.py" ]
[ "import unittest2 as unittest\nimport copy\nimport numpy as np\nfrom svm_specializer.svm import * \n\nclass BasicTests(unittest.TestCase):\n def test_init(self):\n svm = SVM()\n self.assertIsNotNone(svm)\n\nclass SyntheticDataTests(unittest.TestCase):\n def read_data(self, in_file_name):\n feats = open(in_file_name, \"r\")\n labels = []\n points = {}\n self.D = 0\n first_line = 1\n\n for line in feats:\n vals = line.split(\" \")\n l = vals[0]\n labels.append(l)\n idx = 0\n for v in vals[1:]:\n if first_line:\n self.D += 1\n f = v.split(\":\")[1].strip('\\n')\n if idx not in points.keys():\n points[idx] = []\n points[idx].append(f)\n idx += 1\n if first_line:\n first_line = 0\n\n self.N = len(labels)\n return_labels = np.array(labels, dtype=np.float32)\n points_list = [] \n\n for idx in points.keys():\n points_list.append(points[idx]) \n\n return_points = np.array(points_list, dtype=np.float32)\n return_points = return_points.reshape(self.N, self.D)\n\n return return_labels, return_points\n\n def setUp(self):\n # read in training data\n self.t1_labels, self.t1_data = self.read_data(\"test/sample_data/svm_train_1.svm\")\n self.t2_labels, self.t2_data = self.read_data(\"test/sample_data/svm_train_2.svm\")\n\n # read in training data\n self.c_labels, self.c_data = self.read_data(\"test/sample_data/svm_classify.svm\")\n\n def test_training_and_classify_once(self):\n svm = SVM()\n svm.train(self.t1_data, self.t1_labels, \"linear\")\n svm.classify(self.c_data, self.c_labels)\n\n def test_training_once(self):\n svm = SVM()\n a = svm.train(self.t2_data, self.t2_labels, \"linear\")\n\n def test_training_kernels(self):\n svm = SVM()\n a = svm.train(self.t1_data, self.t1_labels, \"linear\")\n a = svm.train(self.t2_data, self.t2_labels, \"gaussian\")\n\n def test_training_and_classify_twice(self):\n svm = SVM()\n svm.train(self.t1_data, self.t1_labels, \"linear\")\n svm.classify(self.c_data, self.c_labels)\n\n svm1 = SVM()\n svm1.train(self.t2_data, self.t2_labels, \"linear\")\n svm1.classify(self.c_data, self.c_labels)\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
runxuanjiang/DeepRL
[ "f5c47c52d4db50577fbada17b09d739da3da67cc", "f5c47c52d4db50577fbada17b09d739da3da67cc" ]
[ "deep_rl/agent/PPO_recurrent_agent_recurrence.py", "deep_rl/agent/BaseAgent.py" ]
[ "#######################################################################\n# Copyright (C) 2017 Shangtong Zhang([email protected]) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\n\n# TODO:\n# - plot average rewards in matplotlib\n# - look at when entropy loss is recorded\n\n\nfrom ..network import *\nfrom ..component import *\nfrom .BaseAgent import *\n\nfrom torch_geometric.data import Data, Batch\nfrom torch_geometric.transforms import Distance\n\nimport numpy\nimport numpy.random\n\nimport pdb\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass PPORecurrentAgentRecurrence(BaseAgent):\n def __init__(self, config):\n BaseAgent.__init__(self, config)\n self.config = config #config file, contains hyperparameters and other info\n self.task = config.task_fn() #gym environment wrapper\n self.hidden_size = config.hidden_size\n\n if config.network: #nnet used\n self.network = config.network\n else:\n self.network = config.network_fn()\n self.network.to(device)\n\n self.optimizer = config.optimizer_fn(self.network.parameters()) #optimization function\n self.total_steps = 0\n self.states = self.task.reset()\n self.h0 = torch.zeros(self.config.num_workers, self.hidden_size).to(device) #lstm hidden states\n self.c0 = torch.zeros(self.config.num_workers, self.hidden_size).to(device) #lstm cell states\n self.recurrence = self.config.recurrence\n print(\"running PPO, tag is \" + config.tag)\n\n def step(self):\n config = self.config\n storage = Storage(config.rollout_length) \n\n states = self.states\n\n ##############################################################################################\n #Sampling Loop\n ##############################################################################################\n for _ in range(config.rollout_length):\n\n #add recurrent states (lstm hidden and lstm cell states) to storage\n storage.add({\n 'h0' : self.h0.to(device),\n 'c0' : self.c0.to(device)\n })\n\n #run the neural net once to get prediction\n prediction, (self.h0, self.c0) = self.network(states, (self.h0, self.c0))\n self.h0 = self.h0.to(device)\n self.c0 = self.c0.to(device)\n\n #step the environment with the action determined by the prediction\n next_states, rewards, terminals, info = self.task.step(to_np(prediction['a']))\n self.record_online_return(info)\n rewards = config.reward_normalizer(rewards)\n\n #add everything to storage\n storage.add(prediction)\n storage.add({\n 's' : tensor(states).to(device),\n 'r': tensor(rewards).unsqueeze(-1).to(device),\n 'm': tensor(1 - terminals).unsqueeze(-1).to(device)\n })\n states = next_states\n \n #zero out lstm recurrent state if any of the environments finish\n for i, done in enumerate(terminals):\n if done:\n self.h0[i] = torch.zeros(self.hidden_size)\n self.c0[i] = torch.zeros(self.hidden_size)\n\n self.total_steps += config.num_workers\n\n self.states = states\n\n prediction, _ = self.network(states, (self.h0, self.c0))\n\n storage.add(prediction)\n storage.placeholder()\n\n\n #############################################################################################\n #Calculate advantages and returns and set up for training\n #############################################################################################\n\n advantages = tensor(np.zeros((config.num_workers, 1))).to(device)\n returns = prediction['v'].detach()\n for i in reversed(range(config.rollout_length)):\n returns = storage.r[i] + config.discount * storage.m[i] * returns\n if not config.use_gae:\n advantages = returns - storage.v[i].detach()\n else:\n td_error = storage.r[i] + config.discount * storage.m[i] * storage.v[i + 1] - storage.v[i]\n advantages = advantages * config.gae_tau * config.discount * storage.m[i] + td_error\n storage.adv[i] = advantages.detach()\n storage.ret[i] = returns.detach()\n\n storage.a = storage.a[:self.config.rollout_length]\n storage.log_pi_a = storage.log_pi_a[:self.config.rollout_length]\n storage.v = storage.v[:self.config.rollout_length]\n\n\n actions = torch.stack(storage.a, 1).reshape(-1)\n log_probs_old = torch.cat(storage.log_pi_a, 1).reshape(-1)\n values = torch.cat(storage.v, 1).reshape(-1)\n returns = torch.cat(storage.ret, 1).reshape(-1)\n advantages = torch.cat(storage.adv, 1).reshape(-1)\n\n log_probs_old = log_probs_old.detach()\n values = values.detach()\n states = torch.stack(storage.s, 1).view(-1, 4)\n h0 = torch.stack(storage.h0, 1).view(-1, self.hidden_size)\n c0 = torch.stack(storage.c0, 1).view(-1, self.hidden_size)\n\n \n advantages = (advantages - advantages.mean()) / advantages.std()\n\n self.logger.add_scalar('advantages', advantages.mean(), self.total_steps)\n\n\n\n ############################################################################################\n #Training Loop\n ############################################################################################\n for _ in range(config.optimization_epochs):\n indices = numpy.arange(0, self.config.rollout_length * self.config.num_workers, self.recurrence);\n indices = numpy.random.permutation(indices);\n num_indices = config.mini_batch_size // self.recurrence\n starting_batch_indices = [indices[i:i+num_indices] for i in range(0, len(indices), num_indices)]\n for starting_indices in starting_batch_indices:\n batch_entropy = 0\n batch_value_loss = 0\n batch_policy_loss = 0\n batch_loss = 0\n\n sampled_h0 = h0[starting_indices]\n sampled_c0 = c0[starting_indices]\n\n for i in range(self.recurrence):\n sampled_actions = actions[starting_indices + i]\n sampled_log_probs_old = log_probs_old[starting_indices + i]\n sampled_values = values[starting_indices + i]\n sampled_returns = returns[starting_indices + i]\n sampled_advantages = advantages[starting_indices + i]\n sampled_states = states[starting_indices + i]\n\n prediction, (sampled_h0, sampled_c0) = self.network(sampled_states, (sampled_h0, sampled_c0), sampled_actions)\n\n entropy = prediction['ent'].mean()\n \n prediction['log_pi_a'] = prediction['log_pi_a'].reshape(-1)\n prediction['v'] = prediction['v'].reshape(-1)\n\n ratio = (prediction['log_pi_a'] - sampled_log_probs_old).exp()\n obj = ratio * sampled_advantages\n obj_clipped = ratio.clamp(1.0 - self.config.ppo_ratio_clip,\n 1.0 + self.config.ppo_ratio_clip) * sampled_advantages\n policy_loss = -torch.min(obj, obj_clipped).mean() - config.entropy_weight * prediction['ent'].mean()\n\n value_loss = 0.5 * (sampled_returns - prediction['v']).pow(2).mean()\n\n loss = policy_loss + value_loss\n\n batch_entropy += entropy.item()\n batch_policy_loss += policy_loss.item()\n batch_value_loss += value_loss.item()\n batch_loss += loss;\n\n\n batch_entropy /= self.recurrence\n batch_policy_loss /= self.recurrence\n batch_value_loss /= self.recurrence\n batch_loss /= self.recurrence\n\n self.logger.add_scalar('entropy_loss', batch_entropy, self.total_steps)\n self.logger.add_scalar('policy_loss', batch_policy_loss, self.total_steps)\n self.logger.add_scalar('value_loss', batch_value_loss, self.total_steps)\n\n self.optimizer.zero_grad()\n batch_loss.backward()\n nn.utils.clip_grad_norm_(self.network.parameters(), config.gradient_clip)\n self.optimizer.step()\n\n\n \n", "#######################################################################\n# Copyright (C) 2017 Shangtong Zhang([email protected]) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\n\nimport torch\nimport numpy as np\nfrom ..utils import *\nimport torch.multiprocessing as mp\nfrom collections import deque\nfrom skimage.io import imsave\n\n\nclass BaseAgent:\n def __init__(self, config):\n self.config = config\n self.logger = get_logger(tag=config.tag, log_level=config.log_level)\n self.task_ind = 0\n\n def close(self):\n close_obj(self.task)\n\n def save(self, filename):\n torch.save(self.network.state_dict(), '%s.model' % (filename))\n with open('%s.stats' % (filename), 'wb') as f:\n pickle.dump(self.config.state_normalizer.state_dict(), f)\n\n def load(self, filename):\n state_dict = torch.load('%s.model' % filename, map_location=lambda storage, loc: storage)\n self.network.load_state_dict(state_dict)\n with open('%s.stats' % (filename), 'rb') as f:\n self.config.state_normalizer.load_state_dict(pickle.load(f))\n\n def eval_step(self, state):\n raise NotImplementedError\n\n def eval_episode(self):\n env = self.config.eval_env\n state = env.reset()\n while True:\n action = self.eval_step(state)\n state, reward, done, info = env.step(action)\n ret = info[0]['episodic_return']\n if ret is not None:\n break\n return ret\n\n def eval_episodes(self):\n episodic_returns = []\n for ep in range(self.config.eval_episodes):\n total_rewards = self.eval_episode()\n episodic_returns.append(np.sum(total_rewards))\n self.logger.info('steps %d, episodic_return_test %.2f(%.2f)' % (\n self.total_steps, np.mean(episodic_returns), np.std(episodic_returns) / np.sqrt(len(episodic_returns))\n ))\n self.logger.add_scalar('episodic_return_test', np.mean(episodic_returns), self.total_steps)\n return {\n 'episodic_return_test': np.mean(episodic_returns),\n }\n\n def record_online_return(self, info, offset=0):\n if isinstance(info, dict):\n ret = info['episodic_return']\n if ret is not None:\n self.logger.add_scalar('episodic_return_train', ret, self.total_steps + offset)\n self.logger.info('steps %d, episodic_return_train %s' % (self.total_steps + offset, ret))\n\n for key in info:\n if key == 'episodic_return' or key == 'terminal_observation':\n continue\n else:\n if key and info[key]:\n self.logger.add_scalar(key, info[key], self.total_steps + offset)\n\n elif isinstance(info, tuple):\n for i, info_ in enumerate(info):\n self.record_online_return(info_, i)\n else:\n raise NotImplementedError\n\n def switch_task(self):\n config = self.config\n if not config.tasks:\n return\n segs = np.linspace(0, config.max_steps, len(config.tasks) + 1)\n if self.total_steps > segs[self.task_ind + 1]:\n self.task_ind += 1\n self.task = config.tasks[self.task_ind]\n self.states = self.task.reset()\n self.states = config.state_normalizer(self.states)\n self.done = True\n\n def record_episode(self, dir, env):\n mkdir(dir)\n steps = 0\n state = env.reset()\n while True:\n self.record_obs(env, dir, steps)\n action = self.record_step(state)\n state, reward, done, info = env.step(action)\n ret = info[0]['episodic_return']\n steps += 1\n if ret is not None:\n break\n\n def record_step(self, state):\n raise NotImplementedError\n\n # For DMControl\n def record_obs(self, env, dir, steps):\n env = env.env.envs[0]\n obs = env.render(mode='rgb_array')\n imsave('%s/%04d.png' % (dir, steps), obs)\n\n\nclass BaseActor(mp.Process):\n STEP = 0\n RESET = 1\n EXIT = 2\n SPECS = 3\n NETWORK = 4\n CACHE = 5\n\n def __init__(self, config):\n mp.Process.__init__(self)\n self.config = config\n self.__pipe, self.__worker_pipe = mp.Pipe()\n\n self._state = None\n self._task = None\n self._network = None\n self._total_steps = 0\n self.__cache_len = 2\n\n if not config.async_actor:\n self.start = lambda: None\n self.step = self._sample\n self.close = lambda: None\n self._set_up()\n self._task = config.task_fn()\n\n def _sample(self):\n transitions = []\n for _ in range(self.config.sgd_update_frequency):\n transitions.append(self._transition())\n return transitions\n\n def run(self):\n self._set_up()\n config = self.config\n self._task = config.task_fn()\n\n cache = deque([], maxlen=2)\n while True:\n op, data = self.__worker_pipe.recv()\n if op == self.STEP:\n if not len(cache):\n cache.append(self._sample())\n cache.append(self._sample())\n self.__worker_pipe.send(cache.popleft())\n cache.append(self._sample())\n elif op == self.EXIT:\n self.__worker_pipe.close()\n return\n elif op == self.NETWORK:\n self._network = data\n else:\n raise NotImplementedError\n\n def _transition(self):\n raise NotImplementedError\n\n def _set_up(self):\n pass\n\n def step(self):\n self.__pipe.send([self.STEP, None])\n return self.__pipe.recv()\n\n def close(self):\n self.__pipe.send([self.EXIT, None])\n self.__pipe.close()\n\n def set_network(self, net):\n if not self.config.async_actor:\n self._network = net\n else:\n self.__pipe.send([self.NETWORK, net])\n" ]
[ [ "numpy.arange", "numpy.random.permutation" ], [ "torch.multiprocessing.Process.__init__", "torch.load", "numpy.std", "numpy.mean", "torch.multiprocessing.Pipe", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
icmaple931/facenet-pytorch
[ "555aa4bec20ca3e7c2ead14e7e39d5bbce203e4b" ]
[ "tests/travis_test.py" ]
[ "\"\"\"\nThe following code is intended to be run only by travis for continuius intengration and testing\npurposes. For implementation examples see notebooks in the examples folder.\n\"\"\"\n\nfrom PIL import Image, ImageDraw\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms, datasets\nimport numpy as np\nimport pandas as pd\nfrom time import time\nimport sys, os\nimport glob\n\nfrom models.mtcnn import MTCNN, fixed_image_standardization\nfrom models.inception_resnet_v1 import InceptionResnetV1, get_torch_home\n\n\n#### CLEAR ALL OUTPUT FILES ####\n\ncheckpoints = glob.glob(os.path.join(get_torch_home(), 'checkpoints/*'))\nfor c in checkpoints:\n print('Removing {}'.format(c))\n os.remove(c)\n\ncrop_files = glob.glob('data/test_images_aligned/**/*.png')\nfor c in crop_files:\n print('Removing {}'.format(c))\n os.remove(c)\n\n\n#### TEST EXAMPLE IPYNB'S ####\n\nos.system('jupyter nbconvert --to script --stdout examples/infer.ipynb examples/finetune.ipynb > examples/tmptest.py')\nos.chdir('examples')\ntry:\n import examples.tmptest\nexcept:\n import tmptest\nos.chdir('..')\n\n\n#### TEST MTCNN ####\n\ndef get_image(path, trans):\n img = Image.open(path)\n img = trans(img)\n return img\n\ntrans = transforms.Compose([\n transforms.Resize(512)\n])\n\ntrans_cropped = transforms.Compose([\n np.float32,\n transforms.ToTensor(),\n fixed_image_standardization\n])\n\ndataset = datasets.ImageFolder('data/test_images', transform=trans)\ndataset.idx_to_class = {k: v for v, k in dataset.class_to_idx.items()}\n\nmtcnn_pt = MTCNN(device=torch.device('cpu'))\n\nnames = []\naligned = []\naligned_fromfile = []\nfor img, idx in dataset:\n name = dataset.idx_to_class[idx]\n start = time()\n img_align = mtcnn_pt(img, save_path='data/test_images_aligned/{}/1.png'.format(name))\n print('MTCNN time: {:6f} seconds'.format(time() - start))\n \n # Comparison between types\n img_box = mtcnn_pt.detect(img)[0]\n assert (img_box - mtcnn_pt.detect(np.array(img))[0]).sum() < 1e-2\n assert (img_box - mtcnn_pt.detect(torch.as_tensor(np.array(img)))[0]).sum() < 1e-2\n\n # Batching test\n assert (img_box - mtcnn_pt.detect([img, img])[0]).sum() < 1e-2\n assert (img_box - mtcnn_pt.detect(np.array([np.array(img), np.array(img)]))[0]).sum() < 1e-2\n assert (img_box - mtcnn_pt.detect(torch.as_tensor([np.array(img), np.array(img)]))[0]).sum() < 1e-2\n\n # Box selection\n mtcnn_pt.selection_method = 'probability'\n print('\\nprobability - ', mtcnn_pt.detect(img))\n mtcnn_pt.selection_method = 'largest'\n print('largest - ', mtcnn_pt.detect(img))\n mtcnn_pt.selection_method = 'largest_over_theshold'\n print('largest_over_theshold - ', mtcnn_pt.detect(img))\n mtcnn_pt.selection_method = 'center_weighted_size'\n print('center_weighted_size - ', mtcnn_pt.detect(img))\n\n if img_align is not None:\n names.append(name)\n aligned.append(img_align)\n aligned_fromfile.append(get_image('data/test_images_aligned/{}/1.png'.format(name), trans_cropped))\n\naligned = torch.stack(aligned)\naligned_fromfile = torch.stack(aligned_fromfile)\n\n\n#### TEST EMBEDDINGS ####\n\nexpected = [\n [\n [0.000000, 1.482895, 0.886342, 1.438450, 1.437583],\n [1.482895, 0.000000, 1.345686, 1.029880, 1.061939],\n [0.886342, 1.345686, 0.000000, 1.363125, 1.338803],\n [1.438450, 1.029880, 1.363125, 0.000000, 1.066040],\n [1.437583, 1.061939, 1.338803, 1.066040, 0.000000]\n ],\n [\n [0.000000, 1.430769, 0.992931, 1.414197, 1.329544],\n [1.430769, 0.000000, 1.253911, 1.144899, 1.079755],\n [0.992931, 1.253911, 0.000000, 1.358875, 1.337322],\n [1.414197, 1.144899, 1.358875, 0.000000, 1.204118],\n [1.329544, 1.079755, 1.337322, 1.204118, 0.000000]\n ]\n]\n\nfor i, ds in enumerate(['vggface2', 'casia-webface']):\n resnet_pt = InceptionResnetV1(pretrained=ds).eval()\n\n start = time()\n embs = resnet_pt(aligned)\n print('\\nResnet time: {:6f} seconds\\n'.format(time() - start))\n\n embs_fromfile = resnet_pt(aligned_fromfile)\n\n dists = [[(emb - e).norm().item() for e in embs] for emb in embs]\n dists_fromfile = [[(emb - e).norm().item() for e in embs_fromfile] for emb in embs_fromfile]\n\n print('\\nOutput:')\n print(pd.DataFrame(dists, columns=names, index=names))\n print('\\nOutput (from file):')\n print(pd.DataFrame(dists_fromfile, columns=names, index=names))\n print('\\nExpected:')\n print(pd.DataFrame(expected[i], columns=names, index=names))\n\n total_error = (torch.tensor(dists) - torch.tensor(expected[i])).norm()\n total_error_fromfile = (torch.tensor(dists_fromfile) - torch.tensor(expected[i])).norm()\n\n print('\\nTotal error: {}, {}'.format(total_error, total_error_fromfile))\n\n if sys.platform != 'win32':\n assert total_error < 1e-4\n assert total_error_fromfile < 1e-4\n\n\n#### TEST CLASSIFICATION ####\n\nresnet_pt = InceptionResnetV1(pretrained=ds, classify=True).eval()\nprob = resnet_pt(aligned)\n\n\n#### MULTI-FACE TEST ####\n\nmtcnn = MTCNN(keep_all=True)\nimg = Image.open('data/multiface.jpg')\nboxes, probs = mtcnn.detect(img)\n\ndraw = ImageDraw.Draw(img)\nfor i, box in enumerate(boxes):\n draw.rectangle(box.tolist())\n\nmtcnn(img, save_path='data/tmp.png')\n\n\n#### MTCNN TYPES TEST ####\n\nimg = Image.open('data/multiface.jpg')\n\nmtcnn = MTCNN(keep_all=True)\nboxes_ref, _ = mtcnn.detect(img)\n_ = mtcnn(img)\n\nmtcnn = MTCNN(keep_all=True).double()\nboxes_test, _ = mtcnn.detect(img)\n_ = mtcnn(img)\n\nbox_diff = boxes_ref[np.argsort(boxes_ref[:,1])] - boxes_test[np.argsort(boxes_test[:,1])]\ntotal_error = np.sum(np.abs(box_diff))\nprint('\\nfp64 Total box error: {}'.format(total_error))\n\nassert total_error < 1e-2\n\n\n# half is not supported on CPUs, only GPUs\nif torch.cuda.is_available():\n\n mtcnn = MTCNN(keep_all=True, device='cuda').half()\n boxes_test, _ = mtcnn.detect(img)\n _ = mtcnn(img)\n\n box_diff = boxes_ref[np.argsort(boxes_ref[:,1])] - boxes_test[np.argsort(boxes_test[:,1])]\n print('fp16 Total box error: {}'.format(np.sum(np.abs(box_diff))))\n\n # test new automatic multi precision to compare\n if hasattr(torch.cuda, 'amp'):\n with torch.cuda.amp.autocast():\n mtcnn = MTCNN(keep_all=True, device='cuda')\n boxes_test, _ = mtcnn.detect(img)\n _ = mtcnn(img)\n\n box_diff = boxes_ref[np.argsort(boxes_ref[:,1])] - boxes_test[np.argsort(boxes_test[:,1])]\n print('AMP total box error: {}'.format(np.sum(np.abs(box_diff))))\n\n \n#### MULTI-IMAGE TEST ####\n\nmtcnn = MTCNN(keep_all=True)\nimg = [\n Image.open('data/multiface.jpg'),\n Image.open('data/multiface.jpg')\n]\nbatch_boxes, batch_probs = mtcnn.detect(img)\n\nmtcnn(img, save_path=['data/tmp1.png', 'data/tmp1.png'])\ntmp_files = glob.glob('data/tmp*')\nfor f in tmp_files:\n os.remove(f)\n\n\n#### NO-FACE TEST ####\n\nimg = Image.new('RGB', (512, 512))\nmtcnn(img)\nmtcnn(img, return_prob=True)\n" ]
[ [ "numpy.argsort", "numpy.abs", "pandas.DataFrame", "torch.cuda.amp.autocast", "torch.tensor", "torch.cuda.is_available", "torch.stack", "torch.device", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
SandhyaaGopchandani/PythonNetworkLibsComparion
[ "72db0cabecd0a9764663a044b19ef4dde843c402" ]
[ "net_performance_comparison.py" ]
[ "import itertools\nimport numpy as np\nfrom timeit import default_timer as timer\nfrom graph_tool.all import *\nimport pickle\nimport networkx as nx\nimport matplotlib as mpl\n#mpl.use('TkAgg')\nimport matplotlib.pyplot as plt\nfrom igraph import *\n\n\ndef nodes_edges(num_nodes):\n \"\"\" this function takes number of nodes and returns nodes and edge list\"\"\"\n nodes = list(range(num_nodes))\n edges = list(itertools.combinations(nodes, 2))\n return nodes, edges\n\ndef create_graph_graphtool(node_num, edges):\n \"\"\" this function creates graph object of graphtool library\"\"\"\n g = Graph(directed=False)\n vlist = g.add_vertex(node_num)\n g.add_edge_list(edges)\n return g\n\ndef create_graph_igraph(nodes, edges):\n \"\"\" this function creates graph object of igraph library\"\"\"\n g = Graph(directed=False)\n g.add_vertices(nodes)\n g.add_edges(edges)\n return g\n\ndef create_graph_networkx(nodes, edges):\n \"\"\" this function creates graph object of networkx library\"\"\"\n g = nx.Graph(directed=False)\n g.add_nodes_from(nodes)\n g.add_edges_from(edges)\n return g\n\n\ndef get_edges(complete_edge_list, threshold=0.5):\n \"\"\" this function randomnly picks the edges in graph based on probability. 0.5 means we want to include only 50% of random \n edges of the total edges in the graph\"\"\"\n edge_list = []\n for key in complete_edge_list:\n if np.random.random() < threshold:\n edge_list.append(key)\n\n return edge_list\n\n\ndef multiple_graph(complete_edge_list, nodes, probs, netlib='networkx'):\n \"\"\"this function times the various centrality measures calculated using three different network libararies.\n The function computes various graph based on given probability of edges, computes the degree, closeness and betweenness\n centrality measure and time those. At the end, it returns the list of timestamp for each cenrality. \"\"\"\n print(\"total possible edges:\", len(complete_edge_list))\n time_deg_central = []\n time_closeness_central = []\n time_between_central = []\n num_edges = []\n for prob in probs:\n edges = get_edges(complete_edge_list, prob)\n if netlib == 'graph-tool':\n num_nodes = len(nodes)\n graph = create_graph_graphtool(num_nodes, edges)\n print(prob, len(graph.get_vertices()), len(graph.get_edges()))\n num_edges.append(len(graph.get_edges()))\n\n start = timer()\n doc_degree_centralities = graph.get_out_degrees(nodes)\n end = timer()\n time_deg_central.append(end - start)\n\n start = timer()\n vertex_betweenness, edge_betweenness = graph_tool.centrality.betweenness(graph)\n end = timer()\n time_between_central.append(end - start)\n\n start = timer()\n vertex_closeness = graph_tool.centrality.closeness(graph)\n end = timer()\n time_closeness_central.append(end - start)\n\n if netlib == 'networkx':\n graph = create_graph_networkx(nodes, edges)\n print(prob, len(graph.nodes()), len(graph.edges()))\n num_edges.append(len(graph.edges()))\n\n start = timer()\n doc_degree_centralities = nx.algorithms.centrality.degree_centrality(graph)\n end = timer()\n time_deg_central.append(end - start)\n\n start = timer()\n vertex_betweenness = nx.algorithms.centrality.betweenness_centrality(graph)\n end = timer()\n time_between_central.append(end - start)\n\n start = timer()\n vertex_closeness = nx.algorithms.centrality.closeness_centrality(graph)\n end = timer()\n time_closeness_central.append(end - start)\n\n if netlib == 'igraph':\n graph = create_graph_igraph(nodes, edges)\n print(prob, graph.vcount(), graph.ecount())\n num_edges.append(graph.ecount())\n\n start = timer()\n doc_degree_centralities = np.array(graph.degree(nodes), dtype='f') / (graph.vcount() - 1)\n end = timer()\n time_deg_central.append(end - start)\n\n start = timer()\n normalization_factor = 2 / (float(graph.vcount() - 1) * float(graph.vcount() - 2))\n vertex_betweenness = np.array(graph.betweenness(), dtype='f') * normalization_factor\n end = timer()\n time_between_central.append(end - start)\n\n start = timer()\n vertex_closeness = graph.closeness()\n end = timer()\n time_closeness_central.append(end - start)\n\n return num_edges, time_deg_central, time_closeness_central, time_between_central\n\n\ndef plot_result(num_nodes, x, y1, y2, y3):\n \"\"\"This function plots the timestamp for three centralities as a function of number of edges.\"\"\"\n plt.plot(x, y1)\n plt.plot(x, y2)\n plt.plot(x, y3)\n plt.legend(['degree centrality', 'closeness centrality','betweenness centrality'], loc='upper left')\n plt.xticks(x)\n plt.title('with network of nodes '+str(num_nodes))\n plt.xticks(rotation=90)\n plt.xlabel('number of edges')\n plt.ylabel('time (in seconds)')\n plt.show()\n\n\nif __name__ == '__main__':\n \n num_nodes = 500 # number of nodes\n nodes, complete_edge_list = nodes_edges(num_nodes)\n threshold = [0.05, 0.2, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n num_edges, time_deg_central, time_closeness_central, time_between_central = multiple_graph(complete_edge_list,\n nodes, threshold,\n netlib='igraph')\n print(num_edges, time_deg_central, time_closeness_central, time_between_central)\n plot_result(num_nodes, num_edges, time_deg_central, time_closeness_central, time_between_central)" ]
[ [ "matplotlib.pyplot.legend", "numpy.random.random", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tsjayram/mi-prometheus
[ "cf163d9e246c3ae3c100045e58924148b2f81c39" ]
[ "miprometheus/workers/worker.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) IBM Corporation 2018\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nworker.py:\n\n - Contains the definition of the ``Worker`` class, representing the base of the basic workers, such as \\\n ``OnlineTrainer`` and ``Tester``.\n\n\n\"\"\"\n__author__ = \"Vincent Marois, Tomasz Kornuta, Ryan L. McAvoy\"\n\nimport os\nimport yaml\n\nimport torch\nimport logging\nimport logging.config\nimport argparse\nimport numpy as np\nfrom random import randrange\nfrom abc import abstractmethod\n\nfrom torch.utils.data import DataLoader\nfrom miprometheus.utils.sampler_factory import SamplerFactory\nfrom miprometheus.problems.problem_factory import ProblemFactory\n\n# Import utils.\nfrom miprometheus.utils.app_state import AppState\nfrom miprometheus.utils.param_interface import ParamInterface\n\n\nclass Worker(object):\n \"\"\"\n Base abstract class for the workers.\n All base workers should subclass it and override the relevant methods.\n \"\"\"\n\n def __init__(self, name, add_default_parser_args = True):\n \"\"\"\n Base constructor for all workers:\n\n - Initializes the AppState singleton:\n\n >>> self.app_state = AppState()\n\n - Initializes the Parameter Registry:\n\n >>> self.params = ParamInterface()\n\n - Defines the logger:\n\n >>> self.logger = logging.getLogger(name=self.name)\n\n - Creates parser and adds default worker command line arguments.\n\n :param name: Name of the worker.\n :type name: str\n\n :param add_default_parser_args: If set, adds default parser arguments (DEFAULT: True).\n :type add_default_parser_args: bool\n\n \"\"\"\n # Call base constructor.\n super(Worker, self).__init__()\n\n # Set worker name.\n self.name = name\n\n # Initialize the application state singleton.\n self.app_state = AppState()\n\n # Initialize parameter interface/registry.\n self.params = ParamInterface()\n\n # Initialize logger using the configuration.\n self.initialize_logger()\n\n # Create parser with a list of runtime arguments.\n self.parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)\n\n # Add arguments to the specific parser.\n if add_default_parser_args:\n # These arguments will be shared by all basic workers.\n self.parser.add_argument('--config',\n dest='config',\n type=str,\n default='',\n help='Name of the configuration file(s) to be loaded. '\n 'If specifying more than one file, they must be separated with coma \",\".')\n\n self.parser.add_argument('--model',\n type=str,\n default='',\n dest='model',\n help='Path to the file containing the saved parameters'\n ' of the model to load (model checkpoint, should end with a .pt extension.)')\n\n self.parser.add_argument('--gpu',\n dest='use_gpu',\n action='store_true',\n help='The current worker will move the computations on GPU devices, if available '\n 'in the system. (Default: False)')\n\n self.parser.add_argument('--expdir',\n dest='expdir',\n type=str,\n default=\"./experiments\",\n help='Path to the directory where the experiment(s) folders are/will be stored.'\n ' (DEFAULT: ./experiments)')\n\n self.parser.add_argument('--savetag',\n dest='savetag',\n type=str,\n default='',\n help='Tag for the save directory.')\n\n self.parser.add_argument('--ll',\n action='store',\n dest='log_level',\n type=str,\n default='INFO',\n choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'],\n help=\"Log level. (Default: INFO)\")\n\n self.parser.add_argument('--li',\n dest='logging_interval',\n default=100,\n type=int,\n help='Statistics logging interval. Will impact logging to the logger and '\n 'exporting to TensorBoard. Writing to the csv file is not impacted '\n '(interval of 1).(Default: 100, i.e. logs every 100 episodes).')\n\n self.parser.add_argument('--agree',\n dest='confirm',\n action='store_true',\n help='Request user confirmation just after loading the settings, '\n 'before starting training. (Default: False)')\n\n def initialize_logger(self):\n \"\"\"\n Initializes the logger, with a specific configuration:\n\n >>> logger_config = {'version': 1,\n >>> 'disable_existing_loggers': False,\n >>> 'formatters': {\n >>> 'simple': {\n >>> 'format': '[%(asctime)s] - %(levelname)s - %(name)s >>> %(message)s',\n >>> 'datefmt': '%Y-%m-%d %H:%M:%S'}},\n >>> 'handlers': {\n >>> 'console': {\n >>> 'class': 'logging.StreamHandler',\n >>> 'level': 'INFO',\n >>> 'formatter': 'simple',\n >>> 'stream': 'ext://sys.stdout'}},\n >>> 'root': {'level': 'DEBUG',\n >>> 'handlers': ['console']}}\n\n \"\"\"\n # Load the default logger configuration.\n logger_config = {'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'simple': {\n 'format': '[%(asctime)s] - %(levelname)s - %(name)s >>> %(message)s',\n 'datefmt': '%Y-%m-%d %H:%M:%S'}},\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'level': 'INFO',\n 'formatter': 'simple',\n 'stream': 'ext://sys.stdout'}},\n 'root': {'level': 'DEBUG',\n 'handlers': ['console']}}\n\n logging.config.dictConfig(logger_config)\n\n # Create the Logger, set its label and logging level.\n self.logger = logging.getLogger(name=self.name)\n\n def display_parsing_results(self):\n \"\"\"\n Displays the properly & improperly parsed arguments (if any).\n\n \"\"\"\n # Log the parsed flags.\n flags_str = 'Properly parsed command line arguments: \\n'\n flags_str += '='*80 + '\\n'\n for arg in vars(self.flags): \n flags_str += \"{}= {} \\n\".format(arg, getattr(self.flags, arg))\n flags_str += '='*80 + '\\n'\n self.logger.info(flags_str)\n\n # Log the unparsed flags if any.\n if self.unparsed:\n flags_str = 'Invalid command line arguments: \\n'\n flags_str += '='*80 + '\\n'\n for arg in self.unparsed: \n flags_str += \"{} \\n\".format(arg)\n flags_str += '='*80 + '\\n'\n self.logger.warning(flags_str)\n\n def setup_experiment(self):\n \"\"\"\n Setups a specific experiment.\n\n Base method:\n\n - Parses command line arguments.\n\n - Sets the 3 default sections (training / validation / test) and sets their dataloaders params.\n\n .. note::\n\n Child classes should override this method, but still call its parent to draw the basic functionality \\\n implemented here.\n\n\n \"\"\"\n # Parse arguments.\n self.flags, self.unparsed = self.parser.parse_known_args()\n\n # Set logger depending on the settings.\n self.logger.setLevel(getattr(logging, self.flags.log_level.upper(), None))\n\n # add empty sections\n self.params.add_default_params({\"training\": {'terminal_conditions': {}}})\n self.params.add_default_params({\"validation\": {}})\n self.params.add_default_params({\"testing\": {}})\n\n # set a default configuration section for the DataLoaders\n dataloader_config = {'dataloader': {'shuffle': True, # shuffle set by default.\n 'batch_sampler': None,\n 'num_workers': 0, # Do not use multiprocessing by default - for now.\n 'pin_memory': False,\n 'drop_last': False,\n 'timeout': 0},\n 'sampler': {}, # not using sampler by default\n }\n\n self.params[\"training\"].add_default_params(dataloader_config)\n self.params[\"validation\"].add_default_params(dataloader_config)\n self.params[\"testing\"].add_default_params(dataloader_config)\n\n def build_problem_sampler_loader(self, params, section_name):\n \"\"\"\n Builds and returns the Problem class, alongside its DataLoader.\n\n Also builds the sampler if required.\n\n :param params: 'ParamInterface' object, referring to one of main sections (training/validation/testing).\n :type params: miprometheus.utils.ParamInterface\n\n :param section_name: name of the section that will be used by logger for display.\n\n :return: Problem instance & DataLoader instance.\n \"\"\"\n\n # Build the problem.\n problem = ProblemFactory.build(params['problem'])\n\n # Try to build the sampler.\n sampler = SamplerFactory.build(problem, params['sampler'])\n\n if sampler is not None:\n # Set shuffle to False - REQUIRED as those two are exclusive.\n params['dataloader'].add_config_params({'shuffle': False})\n\n # build the DataLoader on top of the validation problem\n loader = DataLoader(dataset=problem,\n batch_size=params['problem']['batch_size'],\n shuffle=params['dataloader']['shuffle'],\n sampler=sampler,\n batch_sampler=params['dataloader']['batch_sampler'],\n num_workers=params['dataloader']['num_workers'],\n collate_fn=problem.collate_fn,\n pin_memory=params['dataloader']['pin_memory'],\n drop_last=params['dataloader']['drop_last'],\n timeout=params['dataloader']['timeout'],\n worker_init_fn=problem.worker_init_fn)\n\n # Display sizes.\n self.logger.info(\"Problem for '{}' loaded (size: {})\".format(section_name, len(problem)))\n if (sampler is not None):\n self.logger.info(\"Sampler for '{}' created (size: {})\".format(section_name, len(sampler)))\n\n\n # Return sampler - even if it is none :]\n return problem, sampler, loader\n\n\n def get_epoch_size(self, problem, sampler, batch_size, drop_last):\n \"\"\"\n Compute the number of iterations ('episodes') to run given the size of the dataset and the batch size to cover\n the entire dataset once.\n\n Takes into account whether one used sampler or not.\n\n :param problem: Object derived from the ''Problem'' class\n\n :param sampler: Sampler (may be None)\n\n :param batch_size: Batch size.\n :type batch_size: int\n\n :param drop_last: If True then last batch (if incomplete) will not be counted\n :type drop_last: bool\n\n .. note::\n\n If the last batch is incomplete we are counting it in when ``drop_last`` in ``DataLoader()`` is set to Ttrue.\n\n .. warning::\n\n Leaving this method 'just in case', in most cases one might simply use ''len(dataloader)''.\n\n :return: Number of iterations to perform to go though the entire dataset once.\n\n \"\"\"\n # \"Estimate\" dataset size.\n if (sampler is not None):\n problem_size = len(sampler)\n else:\n problem_size = len(problem)\n\n # If problem_size is a multiciplity of batch_size OR drop last is set.\n if (problem_size % batch_size) == 0 or drop_last:\n return problem_size // batch_size\n else:\n return (problem_size // batch_size) + 1\n\n\n def export_experiment_configuration(self, log_dir, filename, user_confirm):\n \"\"\"\n Dumps the configuration to ``yaml`` file.\n\n :param log_dir: Directory used to host log files (such as the collected statistics).\n :type log_dir: str\n\n :param filename: Name of the ``yaml`` file to write to.\n :type filename: str\n\n :param user_confirm: Whether to request user confirmation.\n :type user_confirm: bool\n\n\n \"\"\"\n # -> At this point, all configuration for experiment is complete.\n\n # Display results of parsing.\n self.display_parsing_results()\n\n # Log the resulting training configuration.\n conf_str = 'Final parameter registry configuration:\\n'\n conf_str += '='*80 + '\\n'\n conf_str += yaml.safe_dump(self.params.to_dict(), default_flow_style=False)\n conf_str += '='*80 + '\\n'\n self.logger.info(conf_str)\n\n # Save the resulting configuration into a .yaml settings file, under log_dir\n with open(log_dir + filename, 'w') as yaml_backup_file:\n yaml.dump(self.params.to_dict(), yaml_backup_file, default_flow_style=False)\n\n # Ask for confirmation - optional.\n if user_confirm:\n try:\n input('Press <Enter> to confirm and start the experiment\\n')\n except KeyboardInterrupt:\n exit(0) \n\n\n def add_statistics(self, stat_col):\n \"\"\"\n Adds most elementary shared statistics to ``StatisticsCollector``: episode and loss.\n\n :param stat_col: ``StatisticsCollector``.\n\n \"\"\"\n # Add default statistics with formatting.\n stat_col.add_statistic('loss', '{:12.10f}')\n stat_col.add_statistic('episode', '{:06d}')\n\n def add_aggregators(self, stat_agg):\n \"\"\"\n Adds basic statistical aggregators to ``StatisticsAggregator``: episode, \\\n episodes_aggregated and loss derivatives.\n\n :param stat_agg: ``StatisticsAggregator``.\n\n \"\"\"\n # add 'aggregators' for the episode.\n stat_agg.add_aggregator('episode', '{:06d}')\n # Number of aggregated episodes.\n stat_agg.add_aggregator('episodes_aggregated', '{:06d}')\n\n # Add default statistical aggregators for the loss (indicating a formatting).\n # Represents the average loss, but stying with loss for TensorBoard \"variable compatibility\".\n stat_agg.add_aggregator('loss', '{:12.10f}') \n stat_agg.add_aggregator('loss_min', '{:12.10f}')\n stat_agg.add_aggregator('loss_max', '{:12.10f}')\n stat_agg.add_aggregator('loss_std', '{:12.10f}')\n\n def aggregate_statistics(self, stat_col, stat_agg):\n \"\"\"\n Aggregates the default statistics collected by the ``StatisticsCollector``.\n\n\n .. note::\n Only computes the min, max, mean, std of the loss as these are basic statistical aggregator by default.\n\n Given that the ``StatisticsAggregator`` uses the statistics collected by the ``StatisticsCollector``, \\\n It should be ensured that these statistics are correctly collected (i.e. use of ``self.add_statistics()`` \\\n and ``collect_statistics()``).\n\n :param stat_col: ``StatisticsCollector``\n\n :param stat_agg: ``StatisticsAggregator``\n\n \"\"\"\n # By default, copy the last value for all variables have matching names.\n # (will work well for e.g. episode or epoch)\n for k, v in stat_col.items():\n if k in stat_agg.aggregators:\n # Copy last collected value.\n stat_agg.aggregators[k] = v[-1]\n\n # Get loss values.\n loss_values = stat_col['loss']\n\n # Calculate default aggregates.\n stat_agg.aggregators['loss'] = torch.mean(torch.tensor(loss_values))\n stat_agg.aggregators['loss_min'] = min(loss_values)\n stat_agg.aggregators['loss_max'] = max(loss_values)\n stat_agg.aggregators['loss_std'] = 0.0 if len(loss_values) <= 1 else torch.std(torch.tensor(loss_values))\n stat_agg.aggregators['episodes_aggregated'] = len(loss_values)\n\n @abstractmethod\n def run_experiment(self):\n \"\"\"\n Main function of the worker which executes a specific experiment.\n\n .. note::\n\n Abstract. Should be implemented in the subclasses.\n\n\n \"\"\"\n\n def add_file_handler_to_logger(self, logfile):\n \"\"\"\n Add a ``logging.FileHandler`` to the logger of the current ``Worker``.\n\n Specifies a ``logging.Formatter``:\n\n >>> logging.Formatter(fmt='[%(asctime)s] - %(levelname)s - %(name)s >>> %(message)s',\n >>> datefmt='%Y-%m-%d %H:%M:%S')\n\n\n :param logfile: File used by the ``FileHandler``.\n\n \"\"\"\n # create file handler which logs even DEBUG messages\n fh = logging.FileHandler(logfile)\n\n # set logging level for this file\n fh.setLevel(logging.DEBUG)\n\n # create formatter and add it to the handlers\n formatter = logging.Formatter(fmt='[%(asctime)s] - %(levelname)s - %(name)s >>> %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n fh.setFormatter(formatter)\n\n # add the handler to the logger\n self.logger.addHandler(fh)\n\n def recurrent_config_parse(self, configs: str, configs_parsed: list):\n \"\"\"\n Parses names of configuration files in a recursive manner, i.e. \\\n by looking for ``default_config`` sections and trying to load and parse those \\\n files one by one.\n\n :param configs: String containing names of configuration files (with paths), separated by comas.\n :type configs: str\n\n :param configs_parsed: Configurations that were already parsed (so we won't parse them many times).\n :type configs_parsed: list\n\n\n :return: list of parsed configuration files.\n\n \"\"\"\n # Split and remove spaces.\n configs_to_parse = configs.replace(\" \", \"\").split(',')\n\n # Terminal condition.\n while len(configs_to_parse) > 0:\n\n # Get config.\n config = configs_to_parse.pop(0)\n\n # Skip empty names (after lose comas).\n if config == '':\n continue\n print(\"Info: Parsing the {} configuration file\".format(config))\n\n # Check if it was already loaded.\n if config in configs_parsed:\n print('Warning: Configuration file {} already parsed - skipping'.format(config))\n continue\n\n # Check if file exists.\n if not os.path.isfile(config):\n print('Error: Configuration file {} does not exist'.format(config))\n exit(-1)\n\n try:\n # Open file and get parameter dictionary.\n with open(config, 'r') as stream:\n param_dict = yaml.safe_load(stream)\n except yaml.YAMLError as e:\n print(\"Error: Couldn't properly parse the {} configuration file\".format(config))\n print('yaml.YAMLERROR:', e)\n exit(-1)\n\n # Remember that we loaded that config.\n configs_parsed.append(config)\n\n # Check if there are any default configs to load.\n if 'default_configs' in param_dict:\n # If there are - recursion!\n configs_parsed = self.recurrent_config_parse(\n param_dict['default_configs'], configs_parsed)\n\n # Done, return list of loaded configs.\n return configs_parsed\n\n def recurrent_config_load(self,configs_to_load):\n for config in reversed(configs_to_load):\n # Load params from YAML file.\n self.params.add_config_params_from_yaml(config)\n print('Loaded configuration from file {}'.format(config))\n\n def check_and_set_cuda(self, use_gpu):\n \"\"\"\n Enables computations on CUDA if GPU is available.\n Sets the default data types.\n\n :param use_gpu: Command line flag indicating whether use GPU/CUDA or not. \n\n \"\"\"\n # Determine if GPU/CUDA is available.\n if torch.cuda.is_available():\n if use_gpu:\n self.app_state.convert_cuda_types()\n self.logger.info('Running computations on GPU using CUDA enabled')\n elif use_gpu:\n self.logger.warning('GPU flag is enabled but there are no available GPU devices, using CPU instead')\n else:\n self.logger.warning('GPU flag is disabled, using CPU.')\n\n def predict_evaluate_collect(self, model, problem, data_dict, stat_col, episode, epoch=None):\n \"\"\"\n Function that performs the following:\n\n - passes samples through the model,\n - computes loss using the problem\n - collects problem and model statistics,\n\n\n :param model: trainable model.\n :type model: ``models.model.Model`` or a subclass\n\n :param problem: problem generating samples.\n :type problem: ``problems.problem.problem`` or a subclass\n\n :param data_dict: contains the batch of samples to pass to the model.\n :type data_dict: ``DataDict``\n\n :param stat_col: statistics collector used for logging accuracy etc.\n :type stat_col: ``StatisticsCollector``\n\n :param episode: current episode index\n :type episode: int\n\n :param epoch: current epoch index.\n :type epoch: int, optional\n\n\n :return:\n\n - logits,\n - loss\n\n\n \"\"\"\n # Convert to CUDA.\n if self.app_state.use_CUDA:\n data_dict = data_dict.cuda()\n\n # Perform forward calculation.\n logits = model(data_dict)\n\n # Evaluate loss function.\n loss = problem.evaluate_loss(data_dict, logits)\n\n # Collect \"elementary\" statistics - episode and loss.\n if ('epoch' in stat_col) and (epoch is not None):\n stat_col['epoch'] = epoch\n\n stat_col['episode'] = episode\n # Collect loss as float.\n stat_col['loss'] = loss.item()\n\n # Collect other (potential) statistics from problem & model.\n problem.collect_statistics(stat_col, data_dict, logits)\n model.collect_statistics(stat_col, data_dict, logits)\n\n # Return tuple: logits, loss.\n return logits, loss\n\n def export_statistics(self, stat_obj, tag='', export_to_log = True):\n \"\"\"\n Export the statistics/aggregations to logger, csv and TB.\n\n :param stat_obj: ``StatisticsCollector`` or ``StatisticsAggregato`` object.\n\n :param tag: Additional tag that will be added to string exported to logger, optional (DEFAULT = '').\n :type tag: str\n\n :param export_to_log: If True, exports statistics to logger (DEFAULT: True)\n :type export_to_log: bool\n\n \"\"\" \n # Log to logger\n if export_to_log:\n self.logger.info(stat_obj.export_to_string(tag))\n\n # Export to csv\n stat_obj.export_to_csv()\n\n # Export to TensorBoard.\n stat_obj.export_to_tensorboard()\n\n def aggregate_and_export_statistics(self, problem, model, stat_col, stat_agg, episode, tag='', export_to_log = True):\n \"\"\"\n Aggregates the collected statistics. Exports the aggregations to logger, csv and TB. \\\n Empties statistics collector for the next episode.\n\n :param model: trainable model.\n :type model: ``models.model.Model`` or a subclass\n\n :param problem: problem generating samples.\n :type problem: ``problems.problem.problem`` or a subclass\n\n :param stat_col: ``StatisticsCollector`` object.\n\n :param stat_agg: ``StatisticsAggregator`` object.\n\n :param tag: Additional tag that will be added to string exported to logger, optional (DEFAULT = '').\n :type tag: str\n\n :param export_to_log: If True, exports statistics to logger (DEFAULT: True)\n :type export_to_log: bool\n\n \"\"\" \n # Aggregate statistics.\n self.aggregate_statistics(stat_col, stat_agg)\n problem.aggregate_statistics(stat_col, stat_agg)\n model.aggregate_statistics(stat_col, stat_agg)\n\n # Set episode, so the datapoint will appear in the right place in TB.\n stat_agg[\"episode\"] = episode\n\n # Export to logger, cvs and TB.\n self.export_statistics(stat_agg, tag, export_to_log)\n\n def cycle(self, iterable):\n \"\"\"\n Cycle an iterator to prevent its exhaustion.\n This function is used in the (online) trainer to reuse the same ``DataLoader`` for a number of episodes\\\n > len(dataset)/batch_size.\n\n :param iterable: iterable.\n :type iterable: iter\n\n \"\"\"\n while True:\n for x in iterable:\n yield x\n\n def set_random_seeds(self, params, section_name):\n \"\"\"\n Set ``torch`` & ``NumPy`` random seeds from the ``ParamRegistry``: \\\n If one was indicated, use it, or set a random one.\n\n :param params: Section in config/param registry that will be changed \\\n (\"training\" or \"testing\" only will be taken into account.)\n\n :param section_name: Name of the section (for logging purposes only).\n :type section_name: str\n\n \"\"\"\n # Set the random seeds: either from the loaded configuration or a default randomly selected one.\n params.add_default_params({\"seed_numpy\": -1})\n if params[\"seed_numpy\"] == -1:\n seed = randrange(0, 2 ** 32)\n # Overwrite the config param!\n params.add_config_params({\"seed_numpy\": seed})\n\n self.logger.info(\"Setting numpy random seed in {} to: {}\".format(section_name, params[\"seed_numpy\"]))\n np.random.seed(params[\"seed_numpy\"])\n\n params.add_default_params({\"seed_torch\": -1})\n if params[\"seed_torch\"] == -1:\n seed = randrange(0, 2 ** 32)\n # Overwrite the config param!\n params.add_config_params({\"seed_torch\": seed})\n\n self.logger.info(\"Setting torch random seed in {} to: {}\".format(section_name, params[\"seed_torch\"]))\n torch.manual_seed(params[\"seed_torch\"])\n torch.cuda.manual_seed_all(params[\"seed_torch\"])\n" ]
[ [ "numpy.random.seed", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.tensor", "torch.cuda.is_available", "torch.cuda.manual_seed_all" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
laughingwithu/jesse
[ "c21adf59074ad62e4aa775261b4ad86c542ec4d5", "c21adf59074ad62e4aa775261b4ad86c542ec4d5" ]
[ "jesse/indicators/mom.py", "jesse/indicators/bop.py" ]
[ "import numpy as np\nimport talib\n\nfrom typing import Union\n\n\ndef mom(candles: np.ndarray, period=10, sequential=False) -> Union[float, np.ndarray]:\n \"\"\"\n MOM - Momentum\n\n :param candles: np.ndarray\n :param period: int - default=10\n :param sequential: bool - default=False\n\n :return: float | np.ndarray\n \"\"\"\n if not sequential and len(candles) > 240:\n candles = candles[-240:]\n\n res = talib.MOM(candles[:, 2], timeperiod=period)\n\n if sequential:\n return res\n else:\n return None if np.isnan(res[-1]) else res[-1]\n", "import numpy as np\nimport talib\n\nfrom typing import Union\n\n\ndef bop(candles: np.ndarray, sequential=False) -> Union[float, np.ndarray]:\n \"\"\"\n BOP - Balance Of Power\n\n :param candles: np.ndarray\n :param sequential: bool - default=False\n\n :return: float | np.ndarray\n \"\"\"\n if not sequential and len(candles) > 240:\n candles = candles[-240:]\n\n res = talib.BOP(candles[:, 1], candles[:, 3], candles[:, 4], candles[:, 2])\n\n if sequential:\n return res\n else:\n return None if np.isnan(res[-1]) else res[-1]\n" ]
[ [ "numpy.isnan" ], [ "numpy.isnan" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LJ-LiJiahe/cnn_pytorch
[ "abddc46240a2c7da9818c1cb945d951a8e3b107f" ]
[ "plot_loss_accuracy.py" ]
[ "import os\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport matplotlib\n\nimport config as cfg\nfrom utils import read_from_pickle_file\n\n# Server to Dell box\nmatplotlib.use('TkAgg')\ntrain_loss = []\nvalidation_loss = []\n\ntrain_loss_loc = os.path.join(cfg.loss_dir, 'train_loss')\nvalidation_loss_loc = os.path.join(cfg.loss_dir, 'validation_loss')\n\n\nfor item in read_from_pickle_file(train_loss_loc):\n train_loss.append(item)\n\nfor item in read_from_pickle_file(validation_loss_loc):\n validation_loss.append(item)\n\ntrain_loss = np.array(train_loss)\nvalidation_loss = np.array(validation_loss)\nplt.plot(train_loss[1:-1, 0],train_loss[1:-1, 1],label=\"Train Loss\")\nplt.plot(validation_loss[1:-1, 0],validation_loss[1:-1, 1],label=\"Validation Loss\")\nplt.ylabel(\"Loss\")\nplt.xlabel(\"iterations\")\nplt.legend(loc='upper left')\nplt.show()\n\n\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.use", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
antonkulaga/DeepAb
[ "51a32d06d19815705bdbfb35a8a9518c17ec313a" ]
[ "deepab/resnets/CrissCrossResNet2D.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.checkpoint import checkpoint\nfrom einops import rearrange, repeat\n\n\nclass CrissCrossAttention(nn.Module):\n def __init__(self, in_dim):\n super(CrissCrossAttention, self).__init__()\n self.query_conv = nn.Conv2d(in_channels=in_dim,\n out_channels=in_dim // 8,\n kernel_size=1)\n self.key_conv = nn.Conv2d(in_channels=in_dim,\n out_channels=in_dim // 8,\n kernel_size=1)\n self.value_conv = nn.Conv2d(in_channels=in_dim,\n out_channels=in_dim,\n kernel_size=1)\n self.softmax = nn.Softmax(dim=3)\n self.gamma = nn.Parameter(torch.zeros(1))\n\n def forward(self, x):\n device = x.device\n b, _, h, w = x.shape\n\n q = self.query_conv(x)\n q_h = rearrange(q, \"b c h w -> (b w) h c\")\n q_w = rearrange(q, \"b c h w -> (b h) w c\")\n\n k = self.key_conv(x)\n k_h = rearrange(k, \"b c h w -> (b w) c h\")\n k_w = rearrange(k, \"b c h w -> (b h) c w\")\n\n v = self.value_conv(x)\n v_h = rearrange(v, \"b c h w -> (b w) c h\")\n v_w = rearrange(v, \"b c h w -> (b h) c w\")\n\n inf = repeat(torch.diag(\n torch.tensor(float(\"-inf\"), device=device).repeat(h), 0),\n \"h1 h2 -> (b w) h1 h2\",\n b=b,\n w=w)\n e_h = rearrange(torch.bmm(q_h, k_h) + inf,\n \"(b w) h1 h2 -> b h1 w h2\",\n b=b)\n e_w = rearrange(torch.bmm(q_w, k_w), \"(b h) w1 w2 -> b h w1 w2\", b=b)\n\n attn = self.softmax(torch.cat([e_h, e_w], 3))\n attn_h, attn_w = attn.chunk(2, dim=-1)\n attn_h = rearrange(attn_h, \"b h1 w h2 -> (b w) h1 h2\")\n attn_w = rearrange(attn_w, \"b h w1 w2 -> (b h) w1 w2\")\n\n out_h = torch.bmm(v_h, rearrange(attn_h, \"bw h1 h2 -> bw h2 h1\"))\n out_h = rearrange(out_h, \"(b w) c h -> b c h w\", b=b)\n out_w = torch.bmm(v_w, rearrange(attn_w, \"bh w1 w2 -> bh w2 w1\"))\n out_w = rearrange(out_w, \"(b h) c w -> b c h w\", b=b)\n\n return_attn = torch.stack([\n rearrange(attn_h, \"(b w) h1 h2 -> b h2 h1 w\", b=b),\n rearrange(attn_w, \"(b h) w1 w2 -> b w2 h w1\", b=b)\n ],\n dim=1)\n\n return self.gamma * (out_h + out_w) + x, return_attn\n\n\nclass RCCAModule(nn.Module):\n def __init__(self, in_channels, kernel_size=3, return_attn=False):\n super(RCCAModule, self).__init__()\n self.return_attn = return_attn\n inter_channels = in_channels // 4\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels,\n inter_channels,\n kernel_size=(kernel_size, kernel_size),\n stride=(1, 1),\n padding=((kernel_size - 1) // 2, (kernel_size - 1) // 2),\n bias=False), nn.BatchNorm2d(inter_channels), nn.ReLU())\n self.cca = CrissCrossAttention(inter_channels)\n self.conv2 = nn.Sequential(\n nn.Conv2d(inter_channels,\n in_channels,\n kernel_size=(kernel_size, kernel_size),\n stride=(1, 1),\n padding=((kernel_size - 1) // 2, (kernel_size - 1) // 2),\n bias=False), nn.BatchNorm2d(in_channels), nn.ReLU())\n\n def forward(self, x):\n output = self.conv1(x)\n attns = []\n for _ in range(2):\n output, attn = checkpoint(self.cca, output)\n attns.append(attn)\n output = self.conv2(output)\n\n if self.return_attn:\n return output, attns\n else:\n return output\n" ]
[ [ "torch.nn.Softmax", "torch.cat", "torch.zeros", "torch.nn.Conv2d", "torch.utils.checkpoint.checkpoint", "torch.bmm", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
eugeniu1994/Update_CV
[ "562b646e02ffb374dae428a7b6f3ae1debecc997" ]
[ "stuff/scripts/stuff/PointCloudViz.py" ]
[ "from mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nskip = 15\n#source = '/home/eugen/catkin_ws/src/Camera_Lidar/DATA/pcd/0002.csv'\n#data = np.genfromtxt(source, delimiter=',')[1::skip,:3]\n#print ('data ', np.shape(data))\n\n#x,y,z = data[:,0],data[:,1],data[:,2]\n\n'''fig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\n#ax.scatter(x, y, z, c=c, marker=m)\nax.scatter(x, y, z, s=0.01)\n\nax.set_xlabel('X Label')\nax.set_ylabel('Y Label')\nax.set_zlabel('Z Label')\n\nplt.show()'''\n\n#---------------------------------------------------------\ndef fit_plane_scipy(P=None):\n from skspatial.objects import Points, Plane\n from skspatial.plotting import plot_3d\n\n points = Points([[0, 0, 0], [1, 3, 5], [-5, 6, 3], [3, 6, 7], [-2, 6, 7]]) if P is None else Points(P)\n\n plane = Plane.best_fit(points)\n plot_3d(\n points.plotter(c='k', s=0.1, depthshade=False),\n plane.plotter(alpha=0.8, lims_x=(-5, 5), lims_y=(-5, 5)),\n )\n plt.show()\n\n#fit_plane_scipy(data)\n\n#---------------------------------------------------------\ndef fit_plane_1():\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n import numpy as np\n\n '''N_POINTS = 10\n TARGET_X_SLOPE = 2\n TARGET_y_SLOPE = 3\n TARGET_OFFSET = 5\n EXTENTS = 5\n NOISE = 5\n \n # create random data\n xs = [np.random.uniform(2*EXTENTS)-EXTENTS for i in range(N_POINTS)]\n ys = [np.random.uniform(2*EXTENTS)-EXTENTS for i in range(N_POINTS)]\n zs = []\n for i in range(N_POINTS):\n zs.append(xs[i]*TARGET_X_SLOPE + ys[i]*TARGET_y_SLOPE + TARGET_OFFSET + np.random.normal(scale=NOISE))'''\n\n xs,ys,zs = x,y,z\n\n # plot raw data\n plt.figure()\n ax = plt.subplot(111, projection='3d')\n ax.scatter(xs, ys, zs,s=0.05)\n\n # do fit\n tmp_A = []\n tmp_b = []\n for i in range(len(xs)):\n tmp_A.append([xs[i], ys[i], 1])\n tmp_b.append(zs[i])\n b = np.matrix(tmp_b).T\n A = np.matrix(tmp_A)\n\n # Manual solution\n fit = (A.T * A).I * A.T * b\n errors = b - A * fit\n residual = np.linalg.norm(errors)\n\n # Or use Scipy\n # from scipy.linalg import lstsq\n # fit, residual, rnk, s = lstsq(A, b)\n\n print(\"solution:\")\n print (\"%f x + %f y + %f = z\" % (fit[0], fit[1], fit[2]))\n print (\"errors:\")\n print (errors)\n print (\"residual:\")\n print (residual)\n\n # plot plane\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n X,Y = np.meshgrid(np.arange(xlim[0], xlim[1]),\n np.arange(ylim[0], ylim[1]))\n Z = np.zeros(X.shape)\n for r in range(X.shape[0]):\n for c in range(X.shape[1]):\n Z[r,c] = fit[0] * X[r,c] + fit[1] * Y[r,c] + fit[2]\n ax.plot_wireframe(X,Y,Z, color='k')\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.show()\n#fit_plane_1()\n\ndef test():\n import matplotlib.pyplot as plt\n import numpy as np\n import pandas as pd\n import mpld3\n from mpld3 import plugins\n\n css = \"\"\"\n table\n {\n border-collapse: collapse;\n }\n th\n {\n color: #ffffff;\n background-color: #000000;\n }\n td\n {\n background-color: #cccccc;\n }\n table, th, td\n {\n font-family:Arial, Helvetica, sans-serif;\n border: 1px solid black;\n text-align: right;\n }\n \"\"\"\n\n fig, ax = plt.subplots()\n ax.grid(True, alpha=0.3)\n\n N = 50\n df = pd.DataFrame(index=range(N))\n df['x'] = np.random.randn(N)\n df['y'] = np.random.randn(N)\n df['z'] = np.random.randn(N)\n\n labels = []\n for i in range(N):\n #label = df.ix[[i], :].T\n label = df.iloc[[i], :].T\n label.columns = ['Row {0}'.format(i)]\n # .to_html() is unicode; so make leading 'u' go away with str()\n labels.append(str(label.to_html()))\n\n points = ax.plot(df.x, df.y, 'o', color='b',\n mec='k', ms=15, mew=1, alpha=.6)\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_title('HTML tooltips', size=20)\n\n tooltip = plugins.PointHTMLTooltip(points[0], labels, voffset=10, hoffset=10, css=css)\n plugins.connect(fig, tooltip)\n\n mpld3.show()\n#test()\n\ndef pick():\n import matplotlib.pyplot as plt, numpy as np\n from mpl_toolkits.mplot3d import proj3d\n\n def visualize3DData(X):\n fig = plt.figure(figsize=(16, 10))\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(X[:, 0], X[:, 1], X[:, 2], depthshade=False, s=2, picker=True)\n global idx\n def distance(point, event):\n assert point.shape == (3,), \"distance: point.shape is wrong: %s, must be (3,)\" % point.shape\n # Project 3d data space to 2d data space\n x2, y2, _ = proj3d.proj_transform(point[0], point[1], point[2], plt.gca().get_proj())\n # Convert 2d data space to 2d screen space\n x3, y3 = ax.transData.transform((x2, y2))\n\n return np.sqrt((x3 - event.x) ** 2 + (y3 - event.y) ** 2)\n\n def calcClosestDatapoint(X, event):\n distances = [distance(X[i, 0:3], event) for i in range(X.shape[0])]\n return np.argmin(distances)\n\n def annotatePlot(X, index):\n # If we have previously displayed another label, remove it first\n if hasattr(annotatePlot, 'label'):\n annotatePlot.label.remove()\n # Get data point from array of points X, at position index\n x2, y2, _ = proj3d.proj_transform(X[index, 0], X[index, 1], X[index, 2], ax.get_proj())\n annotatePlot.label = plt.annotate(\"Value %d\" % index,\n xy=(x2, y2), xytext=(-20, 20), textcoords='offset points', ha='right',\n va='bottom',\n bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),\n arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))\n fig.canvas.draw()\n\n def onMouseMotion(event):\n \"\"\"Event that is triggered when mouse is moved. Shows text annotation over data point closest to mouse.\"\"\"\n closestIndex = calcClosestDatapoint(X, event)\n annotatePlot(X, closestIndex)\n global idx\n idx = closestIndex\n\n # Pick points\n picked, corners = [], []\n def onpick(event):\n #ind = event.ind[0]\n #closestIndex = calcClosestDatapoint(X, event)\n #print('ind ', ind)\n global idx\n #print('closestIndex ',idx)\n ind = idx\n x, y, z = event.artist._offsets3d\n # Ignore if same point selected again\n if picked and (x[ind] == picked[-1][0] and y[ind] == picked[-1][1] and z[ind] == picked[-1][2]):\n return\n\n # Display picked point\n picked.append((x[ind], y[ind], z[ind]))\n corners.append((x[ind], y[ind], z[ind]))\n print('PCL: %s', str(picked[-1]))\n\n if len(picked) > 1:\n # Draw the line\n temp = np.array(picked)\n ax.plot(temp[:, 0], temp[:, 1], temp[:, 2])\n ax.figure.canvas.draw_idle()\n # Reset list for future pick events\n del picked[0]\n\n # Display GUI\n fig.canvas.mpl_connect('pick_event', onpick)\n fig.canvas.mpl_connect('motion_notify_event', onMouseMotion) # on mouse motion\n plt.show()\n\n velodyne = '/home/eugen/catkin_ws/src/Camera_Lidar/scripts/pcl_frame.csv'\n points = np.genfromtxt(velodyne, delimiter=',')[1::skip, :3]\n points = np.asarray(points.tolist())\n print('points ', np.shape(points))\n\n # Select points within chessboard range\n inrange = np.where((points[:, 0] > 0) &\n (points[:, 0] < 2.5) &\n (np.abs(points[:, 1]) < 2.5) &\n (points[:, 2] < 2))\n points = points[inrange[0]]\n if points.shape[0] > 5:\n print('PCL points available: %d', points.shape)\n else:\n print('Very few PCL points available in range')\n return\n\n X = np.random.random((30, 3))\n X = points\n visualize3DData(X)\n\n#pick()\n\ndef test2():\n import numpy as np\n import scipy.optimize\n\n from mpl_toolkits.mplot3d import Axes3D\n import matplotlib.pyplot as plt\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n def fitPlaneLTSQ(XYZ):\n (rows, cols) = XYZ.shape\n G = np.ones((rows, 3))\n G[:, 0] = XYZ[:, 0] # X\n G[:, 1] = XYZ[:, 1] # Y\n Z = XYZ[:, 2]\n (a, b, c), resid, rank, s = np.linalg.lstsq(G, Z, rcond=-1)\n normal = (a, b, -1)\n nn = np.linalg.norm(normal)\n normal = normal / nn\n return (c, normal)\n\n data = np.random.randn(100, 3) / 3\n data[:, 2] /= 10\n c, normal = fitPlaneLTSQ(data)\n\n # plot fitted plane\n maxx = np.max(data[:, 0])\n maxy = np.max(data[:, 1])\n minx = np.min(data[:, 0])\n miny = np.min(data[:, 1])\n\n point = np.array([0.0, 0.0, c])\n d = -point.dot(normal)\n\n # plot original points\n ax.scatter(data[:, 0], data[:, 1], data[:, 2])\n\n # compute needed points for plane plotting\n xx, yy = np.meshgrid([minx, maxx], [miny, maxy])\n z = (-normal[0] * xx - normal[1] * yy - d) * 1. / normal[2]\n\n # plot plane\n ax.plot_surface(xx, yy, z, alpha=0.2)\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.show()\n\ntest2()\n" ]
[ [ "numpy.matrix", "numpy.sqrt", "numpy.max", "numpy.random.randn", "numpy.argmin", "matplotlib.pyplot.gca", "numpy.arange", "matplotlib.pyplot.subplot", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.min", "numpy.genfromtxt", "numpy.linalg.lstsq", "numpy.meshgrid", "numpy.array", "matplotlib.pyplot.show", "numpy.random.random", "numpy.abs", "numpy.linalg.norm", "matplotlib.pyplot.subplots", "numpy.ones", "numpy.shape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sandbox/pandas
[ "fd5471208244ae1cb9cb426d6aa02ab408cfacba", "fd5471208244ae1cb9cb426d6aa02ab408cfacba", "fd5471208244ae1cb9cb426d6aa02ab408cfacba", "fd5471208244ae1cb9cb426d6aa02ab408cfacba", "fd5471208244ae1cb9cb426d6aa02ab408cfacba" ]
[ "pandas/tests/test_base.py", "pandas/tests/plotting/test_boxplot_method.py", "pandas/tests/test_generic.py", "pandas/tools/tests/test_hashing.py", "pandas/tests/frame/test_axis_select_reindex.py" ]
[ "# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\nimport re\nimport sys\nfrom datetime import datetime, timedelta\n\nimport numpy as np\n\nimport pandas as pd\nimport pandas.compat as compat\nfrom pandas.types.common import (is_object_dtype, is_datetimetz,\n needs_i8_conversion)\nimport pandas.util.testing as tm\nfrom pandas import (Series, Index, DatetimeIndex, TimedeltaIndex, PeriodIndex,\n Timedelta)\nfrom pandas.compat import u, StringIO\nfrom pandas.compat.numpy import np_array_datetime64_compat\nfrom pandas.core.base import (FrozenList, FrozenNDArray, PandasDelegate,\n NoNewAttributesMixin)\nfrom pandas.tseries.base import DatetimeIndexOpsMixin\n\n\nclass CheckStringMixin(object):\n\n def test_string_methods_dont_fail(self):\n repr(self.container)\n str(self.container)\n bytes(self.container)\n if not compat.PY3:\n unicode(self.container) # noqa\n\n def test_tricky_container(self):\n if not hasattr(self, 'unicode_container'):\n raise nose.SkipTest('Need unicode_container to test with this')\n repr(self.unicode_container)\n str(self.unicode_container)\n bytes(self.unicode_container)\n if not compat.PY3:\n unicode(self.unicode_container) # noqa\n\n\nclass CheckImmutable(object):\n mutable_regex = re.compile('does not support mutable operations')\n\n def check_mutable_error(self, *args, **kwargs):\n # pass whatever functions you normally would to assertRaises (after the\n # Exception kind)\n tm.assertRaisesRegexp(TypeError, self.mutable_regex, *args, **kwargs)\n\n def test_no_mutable_funcs(self):\n def setitem():\n self.container[0] = 5\n\n self.check_mutable_error(setitem)\n\n def setslice():\n self.container[1:2] = 3\n\n self.check_mutable_error(setslice)\n\n def delitem():\n del self.container[0]\n\n self.check_mutable_error(delitem)\n\n def delslice():\n del self.container[0:3]\n\n self.check_mutable_error(delslice)\n mutable_methods = getattr(self, \"mutable_methods\", [])\n for meth in mutable_methods:\n self.check_mutable_error(getattr(self.container, meth))\n\n def test_slicing_maintains_type(self):\n result = self.container[1:2]\n expected = self.lst[1:2]\n self.check_result(result, expected)\n\n def check_result(self, result, expected, klass=None):\n klass = klass or self.klass\n self.assertIsInstance(result, klass)\n self.assertEqual(result, expected)\n\n\nclass TestFrozenList(CheckImmutable, CheckStringMixin, tm.TestCase):\n mutable_methods = ('extend', 'pop', 'remove', 'insert')\n unicode_container = FrozenList([u(\"\\u05d0\"), u(\"\\u05d1\"), \"c\"])\n\n def setUp(self):\n self.lst = [1, 2, 3, 4, 5]\n self.container = FrozenList(self.lst)\n self.klass = FrozenList\n\n def test_add(self):\n result = self.container + (1, 2, 3)\n expected = FrozenList(self.lst + [1, 2, 3])\n self.check_result(result, expected)\n\n result = (1, 2, 3) + self.container\n expected = FrozenList([1, 2, 3] + self.lst)\n self.check_result(result, expected)\n\n def test_inplace(self):\n q = r = self.container\n q += [5]\n self.check_result(q, self.lst + [5])\n # other shouldn't be mutated\n self.check_result(r, self.lst)\n\n\nclass TestFrozenNDArray(CheckImmutable, CheckStringMixin, tm.TestCase):\n mutable_methods = ('put', 'itemset', 'fill')\n unicode_container = FrozenNDArray([u(\"\\u05d0\"), u(\"\\u05d1\"), \"c\"])\n\n def setUp(self):\n self.lst = [3, 5, 7, -2]\n self.container = FrozenNDArray(self.lst)\n self.klass = FrozenNDArray\n\n def test_shallow_copying(self):\n original = self.container.copy()\n self.assertIsInstance(self.container.view(), FrozenNDArray)\n self.assertFalse(isinstance(\n self.container.view(np.ndarray), FrozenNDArray))\n self.assertIsNot(self.container.view(), self.container)\n self.assert_numpy_array_equal(self.container, original)\n # shallow copy should be the same too\n self.assertIsInstance(self.container._shallow_copy(), FrozenNDArray)\n\n # setting should not be allowed\n def testit(container):\n container[0] = 16\n\n self.check_mutable_error(testit, self.container)\n\n def test_values(self):\n original = self.container.view(np.ndarray).copy()\n n = original[0] + 15\n vals = self.container.values()\n self.assert_numpy_array_equal(original, vals)\n self.assertIsNot(original, vals)\n vals[0] = n\n self.assertIsInstance(self.container, pd.core.base.FrozenNDArray)\n self.assert_numpy_array_equal(self.container.values(), original)\n self.assertEqual(vals[0], n)\n\n\nclass TestPandasDelegate(tm.TestCase):\n\n class Delegator(object):\n _properties = ['foo']\n _methods = ['bar']\n\n def _set_foo(self, value):\n self.foo = value\n\n def _get_foo(self):\n return self.foo\n\n foo = property(_get_foo, _set_foo, doc=\"foo property\")\n\n def bar(self, *args, **kwargs):\n \"\"\" a test bar method \"\"\"\n pass\n\n class Delegate(PandasDelegate):\n\n def __init__(self, obj):\n self.obj = obj\n\n def setUp(self):\n pass\n\n def test_invalida_delgation(self):\n # these show that in order for the delegation to work\n # the _delegate_* methods need to be overriden to not raise a TypeError\n\n self.Delegate._add_delegate_accessors(\n delegate=self.Delegator,\n accessors=self.Delegator._properties,\n typ='property'\n )\n self.Delegate._add_delegate_accessors(\n delegate=self.Delegator,\n accessors=self.Delegator._methods,\n typ='method'\n )\n\n delegate = self.Delegate(self.Delegator())\n\n def f():\n delegate.foo\n\n self.assertRaises(TypeError, f)\n\n def f():\n delegate.foo = 5\n\n self.assertRaises(TypeError, f)\n\n def f():\n delegate.foo()\n\n self.assertRaises(TypeError, f)\n\n def test_memory_usage(self):\n # Delegate does not implement memory_usage.\n # Check that we fall back to in-built `__sizeof__`\n # GH 12924\n delegate = self.Delegate(self.Delegator())\n sys.getsizeof(delegate)\n\n\nclass Ops(tm.TestCase):\n\n def _allow_na_ops(self, obj):\n \"\"\"Whether to skip test cases including NaN\"\"\"\n if (isinstance(obj, Index) and\n (obj.is_boolean() or not obj._can_hold_na)):\n # don't test boolean / int64 index\n return False\n return True\n\n def setUp(self):\n self.bool_index = tm.makeBoolIndex(10, name='a')\n self.int_index = tm.makeIntIndex(10, name='a')\n self.float_index = tm.makeFloatIndex(10, name='a')\n self.dt_index = tm.makeDateIndex(10, name='a')\n self.dt_tz_index = tm.makeDateIndex(10, name='a').tz_localize(\n tz='US/Eastern')\n self.period_index = tm.makePeriodIndex(10, name='a')\n self.string_index = tm.makeStringIndex(10, name='a')\n self.unicode_index = tm.makeUnicodeIndex(10, name='a')\n\n arr = np.random.randn(10)\n self.int_series = Series(arr, index=self.int_index, name='a')\n self.float_series = Series(arr, index=self.float_index, name='a')\n self.dt_series = Series(arr, index=self.dt_index, name='a')\n self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)\n self.period_series = Series(arr, index=self.period_index, name='a')\n self.string_series = Series(arr, index=self.string_index, name='a')\n\n types = ['bool', 'int', 'float', 'dt', 'dt_tz', 'period', 'string',\n 'unicode']\n fmts = [\"{0}_{1}\".format(t, f)\n for t in types for f in ['index', 'series']]\n self.objs = [getattr(self, f)\n for f in fmts if getattr(self, f, None) is not None]\n\n def check_ops_properties(self, props, filter=None, ignore_failures=False):\n for op in props:\n for o in self.is_valid_objs:\n\n # if a filter, skip if it doesn't match\n if filter is not None:\n filt = o.index if isinstance(o, Series) else o\n if not filter(filt):\n continue\n\n try:\n if isinstance(o, Series):\n expected = Series(\n getattr(o.index, op), index=o.index, name='a')\n else:\n expected = getattr(o, op)\n except (AttributeError):\n if ignore_failures:\n continue\n\n result = getattr(o, op)\n\n # these couuld be series, arrays or scalars\n if isinstance(result, Series) and isinstance(expected, Series):\n tm.assert_series_equal(result, expected)\n elif isinstance(result, Index) and isinstance(expected, Index):\n tm.assert_index_equal(result, expected)\n elif isinstance(result, np.ndarray) and isinstance(expected,\n np.ndarray):\n self.assert_numpy_array_equal(result, expected)\n else:\n self.assertEqual(result, expected)\n\n # freq raises AttributeError on an Int64Index because its not\n # defined we mostly care about Series hwere anyhow\n if not ignore_failures:\n for o in self.not_valid_objs:\n\n # an object that is datetimelike will raise a TypeError,\n # otherwise an AttributeError\n if issubclass(type(o), DatetimeIndexOpsMixin):\n self.assertRaises(TypeError, lambda: getattr(o, op))\n else:\n self.assertRaises(AttributeError,\n lambda: getattr(o, op))\n\n def test_binary_ops_docs(self):\n from pandas import DataFrame, Panel\n op_map = {'add': '+',\n 'sub': '-',\n 'mul': '*',\n 'mod': '%',\n 'pow': '**',\n 'truediv': '/',\n 'floordiv': '//'}\n for op_name in ['add', 'sub', 'mul', 'mod', 'pow', 'truediv',\n 'floordiv']:\n for klass in [Series, DataFrame, Panel]:\n operand1 = klass.__name__.lower()\n operand2 = 'other'\n op = op_map[op_name]\n expected_str = ' '.join([operand1, op, operand2])\n self.assertTrue(expected_str in getattr(klass,\n op_name).__doc__)\n\n # reverse version of the binary ops\n expected_str = ' '.join([operand2, op, operand1])\n self.assertTrue(expected_str in getattr(klass, 'r' +\n op_name).__doc__)\n\n\nclass TestIndexOps(Ops):\n\n def setUp(self):\n super(TestIndexOps, self).setUp()\n self.is_valid_objs = [o for o in self.objs if o._allow_index_ops]\n self.not_valid_objs = [o for o in self.objs if not o._allow_index_ops]\n\n def test_none_comparison(self):\n\n # bug brought up by #1079\n # changed from TypeError in 0.17.0\n for o in self.is_valid_objs:\n if isinstance(o, Series):\n\n o[0] = np.nan\n\n # noinspection PyComparisonWithNone\n result = o == None # noqa\n self.assertFalse(result.iat[0])\n self.assertFalse(result.iat[1])\n\n # noinspection PyComparisonWithNone\n result = o != None # noqa\n self.assertTrue(result.iat[0])\n self.assertTrue(result.iat[1])\n\n result = None == o # noqa\n self.assertFalse(result.iat[0])\n self.assertFalse(result.iat[1])\n\n # this fails for numpy < 1.9\n # and oddly for *some* platforms\n # result = None != o # noqa\n # self.assertTrue(result.iat[0])\n # self.assertTrue(result.iat[1])\n\n result = None > o\n self.assertFalse(result.iat[0])\n self.assertFalse(result.iat[1])\n\n result = o < None\n self.assertFalse(result.iat[0])\n self.assertFalse(result.iat[1])\n\n def test_ndarray_compat_properties(self):\n\n for o in self.objs:\n\n # check that we work\n for p in ['shape', 'dtype', 'flags', 'T', 'strides', 'itemsize',\n 'nbytes']:\n self.assertIsNotNone(getattr(o, p, None))\n self.assertTrue(hasattr(o, 'base'))\n\n # if we have a datetimelike dtype then needs a view to work\n # but the user is responsible for that\n try:\n self.assertIsNotNone(o.data)\n except ValueError:\n pass\n\n self.assertRaises(ValueError, o.item) # len > 1\n self.assertEqual(o.ndim, 1)\n self.assertEqual(o.size, len(o))\n\n self.assertEqual(Index([1]).item(), 1)\n self.assertEqual(Series([1]).item(), 1)\n\n def test_ops(self):\n for op in ['max', 'min']:\n for o in self.objs:\n result = getattr(o, op)()\n if not isinstance(o, PeriodIndex):\n expected = getattr(o.values, op)()\n else:\n expected = pd.Period(ordinal=getattr(o._values, op)(),\n freq=o.freq)\n try:\n self.assertEqual(result, expected)\n except TypeError:\n # comparing tz-aware series with np.array results in\n # TypeError\n expected = expected.astype('M8[ns]').astype('int64')\n self.assertEqual(result.value, expected)\n\n def test_nanops(self):\n # GH 7261\n for op in ['max', 'min']:\n for klass in [Index, Series]:\n\n obj = klass([np.nan, 2.0])\n self.assertEqual(getattr(obj, op)(), 2.0)\n\n obj = klass([np.nan])\n self.assertTrue(pd.isnull(getattr(obj, op)()))\n\n obj = klass([])\n self.assertTrue(pd.isnull(getattr(obj, op)()))\n\n obj = klass([pd.NaT, datetime(2011, 11, 1)])\n # check DatetimeIndex monotonic path\n self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1))\n\n obj = klass([pd.NaT, datetime(2011, 11, 1), pd.NaT])\n # check DatetimeIndex non-monotonic path\n self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1))\n\n # argmin/max\n obj = Index(np.arange(5, dtype='int64'))\n self.assertEqual(obj.argmin(), 0)\n self.assertEqual(obj.argmax(), 4)\n\n obj = Index([np.nan, 1, np.nan, 2])\n self.assertEqual(obj.argmin(), 1)\n self.assertEqual(obj.argmax(), 3)\n\n obj = Index([np.nan])\n self.assertEqual(obj.argmin(), -1)\n self.assertEqual(obj.argmax(), -1)\n\n obj = Index([pd.NaT, datetime(2011, 11, 1), datetime(2011, 11, 2),\n pd.NaT])\n self.assertEqual(obj.argmin(), 1)\n self.assertEqual(obj.argmax(), 2)\n\n obj = Index([pd.NaT])\n self.assertEqual(obj.argmin(), -1)\n self.assertEqual(obj.argmax(), -1)\n\n def test_value_counts_unique_nunique(self):\n for orig in self.objs:\n o = orig.copy()\n klass = type(o)\n values = o._values\n\n if isinstance(values, Index):\n # reset name not to affect latter process\n values.name = None\n\n # create repeated values, 'n'th element is repeated by n+1 times\n # skip boolean, because it only has 2 values at most\n if isinstance(o, Index) and o.is_boolean():\n continue\n elif isinstance(o, Index):\n expected_index = pd.Index(o[::-1])\n expected_index.name = None\n o = o.repeat(range(1, len(o) + 1))\n o.name = 'a'\n else:\n expected_index = pd.Index(values[::-1])\n idx = o.index.repeat(range(1, len(o) + 1))\n rep = np.repeat(values, range(1, len(o) + 1))\n o = klass(rep, index=idx, name='a')\n\n # check values has the same dtype as the original\n self.assertEqual(o.dtype, orig.dtype)\n\n expected_s = Series(range(10, 0, -1), index=expected_index,\n dtype='int64', name='a')\n\n result = o.value_counts()\n tm.assert_series_equal(result, expected_s)\n self.assertTrue(result.index.name is None)\n self.assertEqual(result.name, 'a')\n\n result = o.unique()\n if isinstance(o, Index):\n self.assertTrue(isinstance(result, o.__class__))\n self.assert_index_equal(result, orig)\n elif is_datetimetz(o):\n # datetimetz Series returns array of Timestamp\n self.assertEqual(result[0], orig[0])\n for r in result:\n self.assertIsInstance(r, pd.Timestamp)\n tm.assert_numpy_array_equal(result,\n orig._values.asobject.values)\n else:\n tm.assert_numpy_array_equal(result, orig.values)\n\n self.assertEqual(o.nunique(), len(np.unique(o.values)))\n\n def test_value_counts_unique_nunique_null(self):\n\n for null_obj in [np.nan, None]:\n for orig in self.objs:\n o = orig.copy()\n klass = type(o)\n values = o._values\n\n if not self._allow_na_ops(o):\n continue\n\n # special assign to the numpy array\n if is_datetimetz(o):\n if isinstance(o, DatetimeIndex):\n v = o.asi8\n v[0:2] = pd.tslib.iNaT\n values = o._shallow_copy(v)\n else:\n o = o.copy()\n o[0:2] = pd.tslib.iNaT\n values = o._values\n\n elif needs_i8_conversion(o):\n values[0:2] = pd.tslib.iNaT\n values = o._shallow_copy(values)\n else:\n values[0:2] = null_obj\n # check values has the same dtype as the original\n\n self.assertEqual(values.dtype, o.dtype)\n\n # create repeated values, 'n'th element is repeated by n+1\n # times\n if isinstance(o, (DatetimeIndex, PeriodIndex)):\n expected_index = o.copy()\n expected_index.name = None\n\n # attach name to klass\n o = klass(values.repeat(range(1, len(o) + 1)))\n o.name = 'a'\n else:\n if is_datetimetz(o):\n expected_index = orig._values._shallow_copy(values)\n else:\n expected_index = pd.Index(values)\n expected_index.name = None\n o = o.repeat(range(1, len(o) + 1))\n o.name = 'a'\n\n # check values has the same dtype as the original\n self.assertEqual(o.dtype, orig.dtype)\n # check values correctly have NaN\n nanloc = np.zeros(len(o), dtype=np.bool)\n nanloc[:3] = True\n if isinstance(o, Index):\n self.assert_numpy_array_equal(pd.isnull(o), nanloc)\n else:\n exp = pd.Series(nanloc, o.index, name='a')\n self.assert_series_equal(pd.isnull(o), exp)\n\n expected_s_na = Series(list(range(10, 2, -1)) + [3],\n index=expected_index[9:0:-1],\n dtype='int64', name='a')\n expected_s = Series(list(range(10, 2, -1)),\n index=expected_index[9:1:-1],\n dtype='int64', name='a')\n\n result_s_na = o.value_counts(dropna=False)\n tm.assert_series_equal(result_s_na, expected_s_na)\n self.assertTrue(result_s_na.index.name is None)\n self.assertEqual(result_s_na.name, 'a')\n result_s = o.value_counts()\n tm.assert_series_equal(o.value_counts(), expected_s)\n self.assertTrue(result_s.index.name is None)\n self.assertEqual(result_s.name, 'a')\n\n result = o.unique()\n if isinstance(o, Index):\n tm.assert_index_equal(result,\n Index(values[1:], name='a'))\n elif is_datetimetz(o):\n # unable to compare NaT / nan\n tm.assert_numpy_array_equal(result[1:],\n values[2:].asobject.values)\n self.assertIs(result[0], pd.NaT)\n else:\n tm.assert_numpy_array_equal(result[1:], values[2:])\n\n self.assertTrue(pd.isnull(result[0]))\n self.assertEqual(result.dtype, orig.dtype)\n\n self.assertEqual(o.nunique(), 8)\n self.assertEqual(o.nunique(dropna=False), 9)\n\n def test_value_counts_inferred(self):\n klasses = [Index, Series]\n for klass in klasses:\n s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a']\n s = klass(s_values)\n expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c'])\n tm.assert_series_equal(s.value_counts(), expected)\n\n if isinstance(s, Index):\n exp = Index(np.unique(np.array(s_values, dtype=np.object_)))\n tm.assert_index_equal(s.unique(), exp)\n else:\n exp = np.unique(np.array(s_values, dtype=np.object_))\n tm.assert_numpy_array_equal(s.unique(), exp)\n\n self.assertEqual(s.nunique(), 4)\n # don't sort, have to sort after the fact as not sorting is\n # platform-dep\n hist = s.value_counts(sort=False).sort_values()\n expected = Series([3, 1, 4, 2], index=list('acbd')).sort_values()\n tm.assert_series_equal(hist, expected)\n\n # sort ascending\n hist = s.value_counts(ascending=True)\n expected = Series([1, 2, 3, 4], index=list('cdab'))\n tm.assert_series_equal(hist, expected)\n\n # relative histogram.\n hist = s.value_counts(normalize=True)\n expected = Series([.4, .3, .2, .1], index=['b', 'a', 'd', 'c'])\n tm.assert_series_equal(hist, expected)\n\n def test_value_counts_bins(self):\n klasses = [Index, Series]\n for klass in klasses:\n s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a']\n s = klass(s_values)\n\n # bins\n self.assertRaises(TypeError,\n lambda bins: s.value_counts(bins=bins), 1)\n\n s1 = Series([1, 1, 2, 3])\n res1 = s1.value_counts(bins=1)\n exp1 = Series({0.998: 4})\n tm.assert_series_equal(res1, exp1)\n res1n = s1.value_counts(bins=1, normalize=True)\n exp1n = Series({0.998: 1.0})\n tm.assert_series_equal(res1n, exp1n)\n\n if isinstance(s1, Index):\n tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))\n else:\n exp = np.array([1, 2, 3], dtype=np.int64)\n tm.assert_numpy_array_equal(s1.unique(), exp)\n\n self.assertEqual(s1.nunique(), 3)\n\n res4 = s1.value_counts(bins=4)\n exp4 = Series({0.998: 2,\n 1.5: 1,\n 2.0: 0,\n 2.5: 1}, index=[0.998, 2.5, 1.5, 2.0])\n tm.assert_series_equal(res4, exp4)\n res4n = s1.value_counts(bins=4, normalize=True)\n exp4n = Series(\n {0.998: 0.5,\n 1.5: 0.25,\n 2.0: 0.0,\n 2.5: 0.25}, index=[0.998, 2.5, 1.5, 2.0])\n tm.assert_series_equal(res4n, exp4n)\n\n # handle NA's properly\n s_values = ['a', 'b', 'b', 'b', np.nan, np.nan,\n 'd', 'd', 'a', 'a', 'b']\n s = klass(s_values)\n expected = Series([4, 3, 2], index=['b', 'a', 'd'])\n tm.assert_series_equal(s.value_counts(), expected)\n\n if isinstance(s, Index):\n exp = Index(['a', 'b', np.nan, 'd'])\n tm.assert_index_equal(s.unique(), exp)\n else:\n exp = np.array(['a', 'b', np.nan, 'd'], dtype=object)\n tm.assert_numpy_array_equal(s.unique(), exp)\n self.assertEqual(s.nunique(), 3)\n\n s = klass({})\n expected = Series([], dtype=np.int64)\n tm.assert_series_equal(s.value_counts(), expected,\n check_index_type=False)\n # returned dtype differs depending on original\n if isinstance(s, Index):\n self.assert_index_equal(s.unique(), Index([]),\n exact=False)\n else:\n self.assert_numpy_array_equal(s.unique(), np.array([]),\n check_dtype=False)\n\n self.assertEqual(s.nunique(), 0)\n\n def test_value_counts_datetime64(self):\n klasses = [Index, Series]\n for klass in klasses:\n # GH 3002, datetime64[ns]\n # don't test names though\n txt = \"\\n\".join(['xxyyzz20100101PIE', 'xxyyzz20100101GUM',\n 'xxyyzz20100101EGG', 'xxyyww20090101EGG',\n 'foofoo20080909PIE', 'foofoo20080909GUM'])\n f = StringIO(txt)\n df = pd.read_fwf(f, widths=[6, 8, 3],\n names=[\"person_id\", \"dt\", \"food\"],\n parse_dates=[\"dt\"])\n\n s = klass(df['dt'].copy())\n s.name = None\n\n idx = pd.to_datetime(['2010-01-01 00:00:00Z',\n '2008-09-09 00:00:00Z',\n '2009-01-01 00:00:00X'])\n expected_s = Series([3, 2, 1], index=idx)\n tm.assert_series_equal(s.value_counts(), expected_s)\n\n expected = np_array_datetime64_compat(['2010-01-01 00:00:00Z',\n '2009-01-01 00:00:00Z',\n '2008-09-09 00:00:00Z'],\n dtype='datetime64[ns]')\n if isinstance(s, Index):\n tm.assert_index_equal(s.unique(), DatetimeIndex(expected))\n else:\n tm.assert_numpy_array_equal(s.unique(), expected)\n\n self.assertEqual(s.nunique(), 3)\n\n # with NaT\n s = df['dt'].copy()\n s = klass([v for v in s.values] + [pd.NaT])\n\n result = s.value_counts()\n self.assertEqual(result.index.dtype, 'datetime64[ns]')\n tm.assert_series_equal(result, expected_s)\n\n result = s.value_counts(dropna=False)\n expected_s[pd.NaT] = 1\n tm.assert_series_equal(result, expected_s)\n\n unique = s.unique()\n self.assertEqual(unique.dtype, 'datetime64[ns]')\n\n # numpy_array_equal cannot compare pd.NaT\n if isinstance(s, Index):\n exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT])\n tm.assert_index_equal(unique, exp_idx)\n else:\n tm.assert_numpy_array_equal(unique[:3], expected)\n self.assertTrue(pd.isnull(unique[3]))\n\n self.assertEqual(s.nunique(), 3)\n self.assertEqual(s.nunique(dropna=False), 4)\n\n # timedelta64[ns]\n td = df.dt - df.dt + timedelta(1)\n td = klass(td, name='dt')\n\n result = td.value_counts()\n expected_s = Series([6], index=[Timedelta('1day')], name='dt')\n tm.assert_series_equal(result, expected_s)\n\n expected = TimedeltaIndex(['1 days'], name='dt')\n if isinstance(td, Index):\n tm.assert_index_equal(td.unique(), expected)\n else:\n tm.assert_numpy_array_equal(td.unique(), expected.values)\n\n td2 = timedelta(1) + (df.dt - df.dt)\n td2 = klass(td2, name='dt')\n result2 = td2.value_counts()\n tm.assert_series_equal(result2, expected_s)\n\n def test_factorize(self):\n for orig in self.objs:\n o = orig.copy()\n\n if isinstance(o, Index) and o.is_boolean():\n exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp)\n exp_uniques = o\n exp_uniques = Index([False, True])\n else:\n exp_arr = np.array(range(len(o)), dtype=np.intp)\n exp_uniques = o\n labels, uniques = o.factorize()\n\n self.assert_numpy_array_equal(labels, exp_arr)\n if isinstance(o, Series):\n self.assert_index_equal(uniques, Index(orig),\n check_names=False)\n else:\n # factorize explicitly resets name\n self.assert_index_equal(uniques, exp_uniques,\n check_names=False)\n\n def test_factorize_repeated(self):\n for orig in self.objs:\n o = orig.copy()\n\n # don't test boolean\n if isinstance(o, Index) and o.is_boolean():\n continue\n\n # sort by value, and create duplicates\n if isinstance(o, Series):\n o = o.sort_values()\n n = o.iloc[5:].append(o)\n else:\n indexer = o.argsort()\n o = o.take(indexer)\n n = o[5:].append(o)\n\n exp_arr = np.array([5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n dtype=np.intp)\n labels, uniques = n.factorize(sort=True)\n\n self.assert_numpy_array_equal(labels, exp_arr)\n if isinstance(o, Series):\n self.assert_index_equal(uniques, Index(orig).sort_values(),\n check_names=False)\n else:\n self.assert_index_equal(uniques, o, check_names=False)\n\n exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4],\n np.intp)\n labels, uniques = n.factorize(sort=False)\n self.assert_numpy_array_equal(labels, exp_arr)\n\n if isinstance(o, Series):\n expected = Index(o.iloc[5:10].append(o.iloc[:5]))\n self.assert_index_equal(uniques, expected, check_names=False)\n else:\n expected = o[5:10].append(o[:5])\n self.assert_index_equal(uniques, expected, check_names=False)\n\n def test_duplicated_drop_duplicates_index(self):\n # GH 4060\n for original in self.objs:\n if isinstance(original, Index):\n\n # special case\n if original.is_boolean():\n result = original.drop_duplicates()\n expected = Index([False, True], name='a')\n tm.assert_index_equal(result, expected)\n continue\n\n # original doesn't have duplicates\n expected = np.array([False] * len(original), dtype=bool)\n duplicated = original.duplicated()\n tm.assert_numpy_array_equal(duplicated, expected)\n self.assertTrue(duplicated.dtype == bool)\n result = original.drop_duplicates()\n tm.assert_index_equal(result, original)\n self.assertFalse(result is original)\n\n # has_duplicates\n self.assertFalse(original.has_duplicates)\n\n # create repeated values, 3rd and 5th values are duplicated\n idx = original[list(range(len(original))) + [5, 3]]\n expected = np.array([False] * len(original) + [True, True],\n dtype=bool)\n duplicated = idx.duplicated()\n tm.assert_numpy_array_equal(duplicated, expected)\n self.assertTrue(duplicated.dtype == bool)\n tm.assert_index_equal(idx.drop_duplicates(), original)\n\n base = [False] * len(idx)\n base[3] = True\n base[5] = True\n expected = np.array(base)\n\n duplicated = idx.duplicated(keep='last')\n tm.assert_numpy_array_equal(duplicated, expected)\n self.assertTrue(duplicated.dtype == bool)\n result = idx.drop_duplicates(keep='last')\n tm.assert_index_equal(result, idx[~expected])\n\n # deprecate take_last\n with tm.assert_produces_warning(FutureWarning):\n duplicated = idx.duplicated(take_last=True)\n tm.assert_numpy_array_equal(duplicated, expected)\n self.assertTrue(duplicated.dtype == bool)\n with tm.assert_produces_warning(FutureWarning):\n result = idx.drop_duplicates(take_last=True)\n tm.assert_index_equal(result, idx[~expected])\n\n base = [False] * len(original) + [True, True]\n base[3] = True\n base[5] = True\n expected = np.array(base)\n\n duplicated = idx.duplicated(keep=False)\n tm.assert_numpy_array_equal(duplicated, expected)\n self.assertTrue(duplicated.dtype == bool)\n result = idx.drop_duplicates(keep=False)\n tm.assert_index_equal(result, idx[~expected])\n\n with tm.assertRaisesRegexp(\n TypeError, r\"drop_duplicates\\(\\) got an unexpected \"\n \"keyword argument\"):\n idx.drop_duplicates(inplace=True)\n\n else:\n expected = Series([False] * len(original),\n index=original.index, name='a')\n tm.assert_series_equal(original.duplicated(), expected)\n result = original.drop_duplicates()\n tm.assert_series_equal(result, original)\n self.assertFalse(result is original)\n\n idx = original.index[list(range(len(original))) + [5, 3]]\n values = original._values[list(range(len(original))) + [5, 3]]\n s = Series(values, index=idx, name='a')\n\n expected = Series([False] * len(original) + [True, True],\n index=idx, name='a')\n tm.assert_series_equal(s.duplicated(), expected)\n tm.assert_series_equal(s.drop_duplicates(), original)\n\n base = [False] * len(idx)\n base[3] = True\n base[5] = True\n expected = Series(base, index=idx, name='a')\n\n tm.assert_series_equal(s.duplicated(keep='last'), expected)\n tm.assert_series_equal(s.drop_duplicates(keep='last'),\n s[~np.array(base)])\n\n # deprecate take_last\n with tm.assert_produces_warning(FutureWarning):\n tm.assert_series_equal(\n s.duplicated(take_last=True), expected)\n with tm.assert_produces_warning(FutureWarning):\n tm.assert_series_equal(s.drop_duplicates(take_last=True),\n s[~np.array(base)])\n base = [False] * len(original) + [True, True]\n base[3] = True\n base[5] = True\n expected = Series(base, index=idx, name='a')\n\n tm.assert_series_equal(s.duplicated(keep=False), expected)\n tm.assert_series_equal(s.drop_duplicates(keep=False),\n s[~np.array(base)])\n\n s.drop_duplicates(inplace=True)\n tm.assert_series_equal(s, original)\n\n def test_drop_duplicates_series_vs_dataframe(self):\n # GH 14192\n df = pd.DataFrame({'a': [1, 1, 1, 'one', 'one'],\n 'b': [2, 2, np.nan, np.nan, np.nan],\n 'c': [3, 3, np.nan, np.nan, 'three'],\n 'd': [1, 2, 3, 4, 4],\n 'e': [datetime(2015, 1, 1), datetime(2015, 1, 1),\n datetime(2015, 2, 1), pd.NaT, pd.NaT]\n })\n for column in df.columns:\n for keep in ['first', 'last', False]:\n dropped_frame = df[[column]].drop_duplicates(keep=keep)\n dropped_series = df[column].drop_duplicates(keep=keep)\n tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())\n\n def test_fillna(self):\n # # GH 11343\n # though Index.fillna and Series.fillna has separate impl,\n # test here to confirm these works as the same\n\n for orig in self.objs:\n\n o = orig.copy()\n values = o.values\n\n # values will not be changed\n result = o.fillna(o.astype(object).values[0])\n if isinstance(o, Index):\n self.assert_index_equal(o, result)\n else:\n self.assert_series_equal(o, result)\n # check shallow_copied\n self.assertFalse(o is result)\n\n for null_obj in [np.nan, None]:\n for orig in self.objs:\n o = orig.copy()\n klass = type(o)\n\n if not self._allow_na_ops(o):\n continue\n\n if needs_i8_conversion(o):\n\n values = o.astype(object).values\n fill_value = values[0]\n values[0:2] = pd.NaT\n else:\n values = o.values.copy()\n fill_value = o.values[0]\n values[0:2] = null_obj\n\n expected = [fill_value] * 2 + list(values[2:])\n\n expected = klass(expected)\n o = klass(values)\n\n # check values has the same dtype as the original\n self.assertEqual(o.dtype, orig.dtype)\n\n result = o.fillna(fill_value)\n if isinstance(o, Index):\n self.assert_index_equal(result, expected)\n else:\n self.assert_series_equal(result, expected)\n # check shallow_copied\n self.assertFalse(o is result)\n\n def test_memory_usage(self):\n for o in self.objs:\n res = o.memory_usage()\n res_deep = o.memory_usage(deep=True)\n\n if (is_object_dtype(o) or (isinstance(o, Series) and\n is_object_dtype(o.index))):\n # if there are objects, only deep will pick them up\n self.assertTrue(res_deep > res)\n else:\n self.assertEqual(res, res_deep)\n\n if isinstance(o, Series):\n self.assertEqual(\n (o.memory_usage(index=False) +\n o.index.memory_usage()),\n o.memory_usage(index=True)\n )\n\n # sys.getsizeof will call the .memory_usage with\n # deep=True, and add on some GC overhead\n diff = res_deep - sys.getsizeof(o)\n self.assertTrue(abs(diff) < 100)\n\n def test_searchsorted(self):\n # See gh-12238\n for o in self.objs:\n index = np.searchsorted(o, max(o))\n self.assertTrue(0 <= index <= len(o))\n\n index = np.searchsorted(o, max(o), sorter=range(len(o)))\n self.assertTrue(0 <= index <= len(o))\n\n def test_validate_bool_args(self):\n invalid_values = [1, \"True\", [1, 2, 3], 5.0]\n\n for value in invalid_values:\n with self.assertRaises(ValueError):\n self.int_series.drop_duplicates(inplace=value)\n\n\nclass TestTranspose(Ops):\n errmsg = \"the 'axes' parameter is not supported\"\n\n def test_transpose(self):\n for obj in self.objs:\n if isinstance(obj, Index):\n tm.assert_index_equal(obj.transpose(), obj)\n else:\n tm.assert_series_equal(obj.transpose(), obj)\n\n def test_transpose_non_default_axes(self):\n for obj in self.objs:\n tm.assertRaisesRegexp(ValueError, self.errmsg,\n obj.transpose, 1)\n tm.assertRaisesRegexp(ValueError, self.errmsg,\n obj.transpose, axes=1)\n\n def test_numpy_transpose(self):\n for obj in self.objs:\n if isinstance(obj, Index):\n tm.assert_index_equal(np.transpose(obj), obj)\n else:\n tm.assert_series_equal(np.transpose(obj), obj)\n\n tm.assertRaisesRegexp(ValueError, self.errmsg,\n np.transpose, obj, axes=1)\n\n\nclass TestNoNewAttributesMixin(tm.TestCase):\n\n def test_mixin(self):\n class T(NoNewAttributesMixin):\n pass\n\n t = T()\n self.assertFalse(hasattr(t, \"__frozen\"))\n t.a = \"test\"\n self.assertEqual(t.a, \"test\")\n t._freeze()\n # self.assertTrue(\"__frozen\" not in dir(t))\n self.assertIs(getattr(t, \"__frozen\"), True)\n\n def f():\n t.b = \"test\"\n\n self.assertRaises(AttributeError, f)\n self.assertFalse(hasattr(t, \"b\"))\n\n\nif __name__ == '__main__':\n import nose\n\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n # '--with-coverage', '--cover-package=pandas.core'],\n exit=False)\n", "# coding: utf-8\n\nimport nose\nimport itertools\nimport string\nfrom distutils.version import LooseVersion\n\nfrom pandas import Series, DataFrame, MultiIndex\nfrom pandas.compat import range, lzip\nimport pandas.util.testing as tm\nfrom pandas.util.testing import slow\n\nimport numpy as np\nfrom numpy import random\nfrom numpy.random import randn\n\nimport pandas.tools.plotting as plotting\n\nfrom pandas.tests.plotting.common import (TestPlotBase, _check_plot_works)\n\n\n\"\"\" Test cases for .boxplot method \"\"\"\n\n\ndef _skip_if_mpl_14_or_dev_boxplot():\n # GH 8382\n # Boxplot failures on 1.4 and 1.4.1\n # Don't need try / except since that's done at class level\n import matplotlib\n if str(matplotlib.__version__) >= LooseVersion('1.4'):\n raise nose.SkipTest(\"Matplotlib Regression in 1.4 and current dev.\")\n\n\[email protected]\nclass TestDataFramePlots(TestPlotBase):\n\n @slow\n def test_boxplot_legacy(self):\n df = DataFrame(randn(6, 4),\n index=list(string.ascii_letters[:6]),\n columns=['one', 'two', 'three', 'four'])\n df['indic'] = ['foo', 'bar'] * 3\n df['indic2'] = ['foo', 'bar', 'foo'] * 2\n\n _check_plot_works(df.boxplot, return_type='dict')\n _check_plot_works(df.boxplot, column=[\n 'one', 'two'], return_type='dict')\n # _check_plot_works adds an ax so catch warning. see GH #13188\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.boxplot, column=['one', 'two'],\n by='indic')\n _check_plot_works(df.boxplot, column='one', by=['indic', 'indic2'])\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.boxplot, by='indic')\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.boxplot, by=['indic', 'indic2'])\n _check_plot_works(plotting.boxplot, data=df['one'], return_type='dict')\n _check_plot_works(df.boxplot, notch=1, return_type='dict')\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.boxplot, by='indic', notch=1)\n\n df = DataFrame(np.random.rand(10, 2), columns=['Col1', 'Col2'])\n df['X'] = Series(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B'])\n df['Y'] = Series(['A'] * 10)\n with tm.assert_produces_warning(UserWarning):\n _check_plot_works(df.boxplot, by='X')\n\n # When ax is supplied and required number of axes is 1,\n # passed ax should be used:\n fig, ax = self.plt.subplots()\n axes = df.boxplot('Col1', by='X', ax=ax)\n ax_axes = ax.axes if self.mpl_ge_1_5_0 else ax.get_axes()\n self.assertIs(ax_axes, axes)\n\n fig, ax = self.plt.subplots()\n axes = df.groupby('Y').boxplot(ax=ax, return_type='axes')\n ax_axes = ax.axes if self.mpl_ge_1_5_0 else ax.get_axes()\n self.assertIs(ax_axes, axes['A'])\n\n # Multiple columns with an ax argument should use same figure\n fig, ax = self.plt.subplots()\n with tm.assert_produces_warning(UserWarning):\n axes = df.boxplot(column=['Col1', 'Col2'],\n by='X', ax=ax, return_type='axes')\n self.assertIs(axes['Col1'].get_figure(), fig)\n\n # When by is None, check that all relevant lines are present in the\n # dict\n fig, ax = self.plt.subplots()\n d = df.boxplot(ax=ax, return_type='dict')\n lines = list(itertools.chain.from_iterable(d.values()))\n self.assertEqual(len(ax.get_lines()), len(lines))\n\n @slow\n def test_boxplot_return_type_none(self):\n # GH 12216; return_type=None & by=None -> axes\n result = self.hist_df.boxplot()\n self.assertTrue(isinstance(result, self.plt.Axes))\n\n @slow\n def test_boxplot_return_type_legacy(self):\n # API change in https://github.com/pandas-dev/pandas/pull/7096\n import matplotlib as mpl # noqa\n\n df = DataFrame(randn(6, 4),\n index=list(string.ascii_letters[:6]),\n columns=['one', 'two', 'three', 'four'])\n with tm.assertRaises(ValueError):\n df.boxplot(return_type='NOTATYPE')\n\n result = df.boxplot()\n self._check_box_return_type(result, 'axes')\n\n with tm.assert_produces_warning(False):\n result = df.boxplot(return_type='dict')\n self._check_box_return_type(result, 'dict')\n\n with tm.assert_produces_warning(False):\n result = df.boxplot(return_type='axes')\n self._check_box_return_type(result, 'axes')\n\n with tm.assert_produces_warning(False):\n result = df.boxplot(return_type='both')\n self._check_box_return_type(result, 'both')\n\n @slow\n def test_boxplot_axis_limits(self):\n\n def _check_ax_limits(col, ax):\n y_min, y_max = ax.get_ylim()\n self.assertTrue(y_min <= col.min())\n self.assertTrue(y_max >= col.max())\n\n df = self.hist_df.copy()\n df['age'] = np.random.randint(1, 20, df.shape[0])\n # One full row\n height_ax, weight_ax = df.boxplot(['height', 'weight'], by='category')\n _check_ax_limits(df['height'], height_ax)\n _check_ax_limits(df['weight'], weight_ax)\n self.assertEqual(weight_ax._sharey, height_ax)\n\n # Two rows, one partial\n p = df.boxplot(['height', 'weight', 'age'], by='category')\n height_ax, weight_ax, age_ax = p[0, 0], p[0, 1], p[1, 0]\n dummy_ax = p[1, 1]\n\n _check_ax_limits(df['height'], height_ax)\n _check_ax_limits(df['weight'], weight_ax)\n _check_ax_limits(df['age'], age_ax)\n self.assertEqual(weight_ax._sharey, height_ax)\n self.assertEqual(age_ax._sharey, height_ax)\n self.assertIsNone(dummy_ax._sharey)\n\n @slow\n def test_boxplot_empty_column(self):\n _skip_if_mpl_14_or_dev_boxplot()\n df = DataFrame(np.random.randn(20, 4))\n df.loc[:, 0] = np.nan\n _check_plot_works(df.boxplot, return_type='axes')\n\n\[email protected]\nclass TestDataFrameGroupByPlots(TestPlotBase):\n\n @slow\n def test_boxplot_legacy(self):\n grouped = self.hist_df.groupby(by='gender')\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(grouped.boxplot, return_type='axes')\n self._check_axes_shape(list(axes.values), axes_num=2, layout=(1, 2))\n axes = _check_plot_works(grouped.boxplot, subplots=False,\n return_type='axes')\n self._check_axes_shape(axes, axes_num=1, layout=(1, 1))\n tuples = lzip(string.ascii_letters[:10], range(10))\n df = DataFrame(np.random.rand(10, 3),\n index=MultiIndex.from_tuples(tuples))\n\n grouped = df.groupby(level=1)\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(grouped.boxplot, return_type='axes')\n self._check_axes_shape(list(axes.values), axes_num=10, layout=(4, 3))\n\n axes = _check_plot_works(grouped.boxplot, subplots=False,\n return_type='axes')\n self._check_axes_shape(axes, axes_num=1, layout=(1, 1))\n\n grouped = df.unstack(level=1).groupby(level=0, axis=1)\n with tm.assert_produces_warning(UserWarning):\n axes = _check_plot_works(grouped.boxplot, return_type='axes')\n self._check_axes_shape(list(axes.values), axes_num=3, layout=(2, 2))\n axes = _check_plot_works(grouped.boxplot, subplots=False,\n return_type='axes')\n self._check_axes_shape(axes, axes_num=1, layout=(1, 1))\n\n @slow\n def test_grouped_plot_fignums(self):\n n = 10\n weight = Series(np.random.normal(166, 20, size=n))\n height = Series(np.random.normal(60, 10, size=n))\n with tm.RNGContext(42):\n gender = np.random.choice(['male', 'female'], size=n)\n df = DataFrame({'height': height, 'weight': weight, 'gender': gender})\n gb = df.groupby('gender')\n\n res = gb.plot()\n self.assertEqual(len(self.plt.get_fignums()), 2)\n self.assertEqual(len(res), 2)\n tm.close()\n\n res = gb.boxplot(return_type='axes')\n self.assertEqual(len(self.plt.get_fignums()), 1)\n self.assertEqual(len(res), 2)\n tm.close()\n\n # now works with GH 5610 as gender is excluded\n res = df.groupby('gender').hist()\n tm.close()\n\n @slow\n def test_grouped_box_return_type(self):\n df = self.hist_df\n\n # old style: return_type=None\n result = df.boxplot(by='gender')\n self.assertIsInstance(result, np.ndarray)\n self._check_box_return_type(\n result, None,\n expected_keys=['height', 'weight', 'category'])\n\n # now for groupby\n result = df.groupby('gender').boxplot(return_type='dict')\n self._check_box_return_type(\n result, 'dict', expected_keys=['Male', 'Female'])\n\n columns2 = 'X B C D A G Y N Q O'.split()\n df2 = DataFrame(random.randn(50, 10), columns=columns2)\n categories2 = 'A B C D E F G H I J'.split()\n df2['category'] = categories2 * 5\n\n for t in ['dict', 'axes', 'both']:\n returned = df.groupby('classroom').boxplot(return_type=t)\n self._check_box_return_type(\n returned, t, expected_keys=['A', 'B', 'C'])\n\n returned = df.boxplot(by='classroom', return_type=t)\n self._check_box_return_type(\n returned, t,\n expected_keys=['height', 'weight', 'category'])\n\n returned = df2.groupby('category').boxplot(return_type=t)\n self._check_box_return_type(returned, t, expected_keys=categories2)\n\n returned = df2.boxplot(by='category', return_type=t)\n self._check_box_return_type(returned, t, expected_keys=columns2)\n\n @slow\n def test_grouped_box_layout(self):\n df = self.hist_df\n\n self.assertRaises(ValueError, df.boxplot, column=['weight', 'height'],\n by=df.gender, layout=(1, 1))\n self.assertRaises(ValueError, df.boxplot,\n column=['height', 'weight', 'category'],\n layout=(2, 1), return_type='dict')\n self.assertRaises(ValueError, df.boxplot, column=['weight', 'height'],\n by=df.gender, layout=(-1, -1))\n\n # _check_plot_works adds an ax so catch warning. see GH #13188\n with tm.assert_produces_warning(UserWarning):\n box = _check_plot_works(df.groupby('gender').boxplot,\n column='height', return_type='dict')\n self._check_axes_shape(self.plt.gcf().axes, axes_num=2, layout=(1, 2))\n\n with tm.assert_produces_warning(UserWarning):\n box = _check_plot_works(df.groupby('category').boxplot,\n column='height',\n return_type='dict')\n self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2))\n\n # GH 6769\n with tm.assert_produces_warning(UserWarning):\n box = _check_plot_works(df.groupby('classroom').boxplot,\n column='height', return_type='dict')\n self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))\n\n # GH 5897\n axes = df.boxplot(column=['height', 'weight', 'category'], by='gender',\n return_type='axes')\n self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))\n for ax in [axes['height']]:\n self._check_visible(ax.get_xticklabels(), visible=False)\n self._check_visible([ax.xaxis.get_label()], visible=False)\n for ax in [axes['weight'], axes['category']]:\n self._check_visible(ax.get_xticklabels())\n self._check_visible([ax.xaxis.get_label()])\n\n box = df.groupby('classroom').boxplot(\n column=['height', 'weight', 'category'], return_type='dict')\n self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))\n\n with tm.assert_produces_warning(UserWarning):\n box = _check_plot_works(df.groupby('category').boxplot,\n column='height',\n layout=(3, 2), return_type='dict')\n self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))\n with tm.assert_produces_warning(UserWarning):\n box = _check_plot_works(df.groupby('category').boxplot,\n column='height',\n layout=(3, -1), return_type='dict')\n self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))\n\n box = df.boxplot(column=['height', 'weight', 'category'], by='gender',\n layout=(4, 1))\n self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(4, 1))\n\n box = df.boxplot(column=['height', 'weight', 'category'], by='gender',\n layout=(-1, 1))\n self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(3, 1))\n\n box = df.groupby('classroom').boxplot(\n column=['height', 'weight', 'category'], layout=(1, 4),\n return_type='dict')\n self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 4))\n\n box = df.groupby('classroom').boxplot( # noqa\n column=['height', 'weight', 'category'], layout=(1, -1),\n return_type='dict')\n self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 3))\n\n @slow\n def test_grouped_box_multiple_axes(self):\n # GH 6970, GH 7069\n df = self.hist_df\n\n # check warning to ignore sharex / sharey\n # this check should be done in the first function which\n # passes multiple axes to plot, hist or boxplot\n # location should be changed if other test is added\n # which has earlier alphabetical order\n with tm.assert_produces_warning(UserWarning):\n fig, axes = self.plt.subplots(2, 2)\n df.groupby('category').boxplot(\n column='height', return_type='axes', ax=axes)\n self._check_axes_shape(self.plt.gcf().axes,\n axes_num=4, layout=(2, 2))\n\n fig, axes = self.plt.subplots(2, 3)\n with tm.assert_produces_warning(UserWarning):\n returned = df.boxplot(column=['height', 'weight', 'category'],\n by='gender', return_type='axes', ax=axes[0])\n returned = np.array(list(returned.values))\n self._check_axes_shape(returned, axes_num=3, layout=(1, 3))\n self.assert_numpy_array_equal(returned, axes[0])\n self.assertIs(returned[0].figure, fig)\n\n # draw on second row\n with tm.assert_produces_warning(UserWarning):\n returned = df.groupby('classroom').boxplot(\n column=['height', 'weight', 'category'],\n return_type='axes', ax=axes[1])\n returned = np.array(list(returned.values))\n self._check_axes_shape(returned, axes_num=3, layout=(1, 3))\n self.assert_numpy_array_equal(returned, axes[1])\n self.assertIs(returned[0].figure, fig)\n\n with tm.assertRaises(ValueError):\n fig, axes = self.plt.subplots(2, 3)\n # pass different number of axes from required\n with tm.assert_produces_warning(UserWarning):\n axes = df.groupby('classroom').boxplot(ax=axes)\n\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n", "# -*- coding: utf-8 -*-\n# pylint: disable-msg=E1101,W0612\n\nfrom operator import methodcaller\nimport nose\nimport numpy as np\nfrom numpy import nan\nimport pandas as pd\n\nfrom pandas.types.common import is_scalar\nfrom pandas import (Index, Series, DataFrame, Panel, isnull,\n date_range, period_range, Panel4D)\nfrom pandas.core.index import MultiIndex\n\nimport pandas.formats.printing as printing\n\nfrom pandas.compat import range, zip, PY3\nfrom pandas import compat\nfrom pandas.util.testing import (assertRaisesRegexp,\n assert_series_equal,\n assert_frame_equal,\n assert_panel_equal,\n assert_panel4d_equal,\n assert_almost_equal)\n\nimport pandas.util.testing as tm\n\n\n# ----------------------------------------------------------------------\n# Generic types test cases\n\n\nclass Generic(object):\n\n _multiprocess_can_split_ = True\n\n def setUp(self):\n pass\n\n @property\n def _ndim(self):\n return self._typ._AXIS_LEN\n\n def _axes(self):\n \"\"\" return the axes for my object typ \"\"\"\n return self._typ._AXIS_ORDERS\n\n def _construct(self, shape, value=None, dtype=None, **kwargs):\n \"\"\" construct an object for the given shape\n if value is specified use that if its a scalar\n if value is an array, repeat it as needed \"\"\"\n\n if isinstance(shape, int):\n shape = tuple([shape] * self._ndim)\n if value is not None:\n if is_scalar(value):\n if value == 'empty':\n arr = None\n\n # remove the info axis\n kwargs.pop(self._typ._info_axis_name, None)\n else:\n arr = np.empty(shape, dtype=dtype)\n arr.fill(value)\n else:\n fshape = np.prod(shape)\n arr = value.ravel()\n new_shape = fshape / arr.shape[0]\n if fshape % arr.shape[0] != 0:\n raise Exception(\"invalid value passed in _construct\")\n\n arr = np.repeat(arr, new_shape).reshape(shape)\n else:\n arr = np.random.randn(*shape)\n return self._typ(arr, dtype=dtype, **kwargs)\n\n def _compare(self, result, expected):\n self._comparator(result, expected)\n\n def test_rename(self):\n\n # single axis\n idx = list('ABCD')\n # relabeling values passed into self.rename\n args = [\n str.lower,\n {x: x.lower() for x in idx},\n Series({x: x.lower() for x in idx}),\n ]\n\n for axis in self._axes():\n kwargs = {axis: idx}\n obj = self._construct(4, **kwargs)\n\n for arg in args:\n # rename a single axis\n result = obj.rename(**{axis: arg})\n expected = obj.copy()\n setattr(expected, axis, list('abcd'))\n self._compare(result, expected)\n\n # multiple axes at once\n\n def test_rename_axis(self):\n idx = list('ABCD')\n # relabeling values passed into self.rename\n args = [\n str.lower,\n {x: x.lower() for x in idx},\n Series({x: x.lower() for x in idx}),\n ]\n\n for axis in self._axes():\n kwargs = {axis: idx}\n obj = self._construct(4, **kwargs)\n\n for arg in args:\n # rename a single axis\n result = obj.rename_axis(arg, axis=axis)\n expected = obj.copy()\n setattr(expected, axis, list('abcd'))\n self._compare(result, expected)\n # scalar values\n for arg in ['foo', None]:\n result = obj.rename_axis(arg, axis=axis)\n expected = obj.copy()\n getattr(expected, axis).name = arg\n self._compare(result, expected)\n\n def test_get_numeric_data(self):\n\n n = 4\n kwargs = {}\n for i in range(self._ndim):\n kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))\n\n # get the numeric data\n o = self._construct(n, **kwargs)\n result = o._get_numeric_data()\n self._compare(result, o)\n\n # non-inclusion\n result = o._get_bool_data()\n expected = self._construct(n, value='empty', **kwargs)\n self._compare(result, expected)\n\n # get the bool data\n arr = np.array([True, True, False, True])\n o = self._construct(n, value=arr, **kwargs)\n result = o._get_numeric_data()\n self._compare(result, o)\n\n # _get_numeric_data is includes _get_bool_data, so can't test for\n # non-inclusion\n\n def test_get_default(self):\n\n # GH 7725\n d0 = \"a\", \"b\", \"c\", \"d\"\n d1 = np.arange(4, dtype='int64')\n others = \"e\", 10\n\n for data, index in ((d0, d1), (d1, d0)):\n s = Series(data, index=index)\n for i, d in zip(index, data):\n self.assertEqual(s.get(i), d)\n self.assertEqual(s.get(i, d), d)\n self.assertEqual(s.get(i, \"z\"), d)\n for other in others:\n self.assertEqual(s.get(other, \"z\"), \"z\")\n self.assertEqual(s.get(other, other), other)\n\n def test_nonzero(self):\n\n # GH 4633\n # look at the boolean/nonzero behavior for objects\n obj = self._construct(shape=4)\n self.assertRaises(ValueError, lambda: bool(obj == 0))\n self.assertRaises(ValueError, lambda: bool(obj == 1))\n self.assertRaises(ValueError, lambda: bool(obj))\n\n obj = self._construct(shape=4, value=1)\n self.assertRaises(ValueError, lambda: bool(obj == 0))\n self.assertRaises(ValueError, lambda: bool(obj == 1))\n self.assertRaises(ValueError, lambda: bool(obj))\n\n obj = self._construct(shape=4, value=np.nan)\n self.assertRaises(ValueError, lambda: bool(obj == 0))\n self.assertRaises(ValueError, lambda: bool(obj == 1))\n self.assertRaises(ValueError, lambda: bool(obj))\n\n # empty\n obj = self._construct(shape=0)\n self.assertRaises(ValueError, lambda: bool(obj))\n\n # invalid behaviors\n\n obj1 = self._construct(shape=4, value=1)\n obj2 = self._construct(shape=4, value=1)\n\n def f():\n if obj1:\n printing.pprint_thing(\"this works and shouldn't\")\n\n self.assertRaises(ValueError, f)\n self.assertRaises(ValueError, lambda: obj1 and obj2)\n self.assertRaises(ValueError, lambda: obj1 or obj2)\n self.assertRaises(ValueError, lambda: not obj1)\n\n def test_numpy_1_7_compat_numeric_methods(self):\n # GH 4435\n # numpy in 1.7 tries to pass addtional arguments to pandas functions\n\n o = self._construct(shape=4)\n for op in ['min', 'max', 'max', 'var', 'std', 'prod', 'sum', 'cumsum',\n 'cumprod', 'median', 'skew', 'kurt', 'compound', 'cummax',\n 'cummin', 'all', 'any']:\n f = getattr(np, op, None)\n if f is not None:\n f(o)\n\n def test_downcast(self):\n # test close downcasting\n\n o = self._construct(shape=4, value=9, dtype=np.int64)\n result = o.copy()\n result._data = o._data.downcast(dtypes='infer')\n self._compare(result, o)\n\n o = self._construct(shape=4, value=9.)\n expected = o.astype(np.int64)\n result = o.copy()\n result._data = o._data.downcast(dtypes='infer')\n self._compare(result, expected)\n\n o = self._construct(shape=4, value=9.5)\n result = o.copy()\n result._data = o._data.downcast(dtypes='infer')\n self._compare(result, o)\n\n # are close\n o = self._construct(shape=4, value=9.000000000005)\n result = o.copy()\n result._data = o._data.downcast(dtypes='infer')\n expected = o.astype(np.int64)\n self._compare(result, expected)\n\n def test_constructor_compound_dtypes(self):\n # GH 5191\n # compound dtypes should raise not-implementederror\n\n def f(dtype):\n return self._construct(shape=3, dtype=dtype)\n\n self.assertRaises(NotImplementedError, f, [(\"A\", \"datetime64[h]\"),\n (\"B\", \"str\"),\n (\"C\", \"int32\")])\n\n # these work (though results may be unexpected)\n f('int64')\n f('float64')\n f('M8[ns]')\n\n def check_metadata(self, x, y=None):\n for m in x._metadata:\n v = getattr(x, m, None)\n if y is None:\n self.assertIsNone(v)\n else:\n self.assertEqual(v, getattr(y, m, None))\n\n def test_metadata_propagation(self):\n # check that the metadata matches up on the resulting ops\n\n o = self._construct(shape=3)\n o.name = 'foo'\n o2 = self._construct(shape=3)\n o2.name = 'bar'\n\n # TODO\n # Once panel can do non-trivial combine operations\n # (currently there is an a raise in the Panel arith_ops to prevent\n # this, though it actually does work)\n # can remove all of these try: except: blocks on the actual operations\n\n # ----------\n # preserving\n # ----------\n\n # simple ops with scalars\n for op in ['__add__', '__sub__', '__truediv__', '__mul__']:\n result = getattr(o, op)(1)\n self.check_metadata(o, result)\n\n # ops with like\n for op in ['__add__', '__sub__', '__truediv__', '__mul__']:\n try:\n result = getattr(o, op)(o)\n self.check_metadata(o, result)\n except (ValueError, AttributeError):\n pass\n\n # simple boolean\n for op in ['__eq__', '__le__', '__ge__']:\n v1 = getattr(o, op)(o)\n self.check_metadata(o, v1)\n\n try:\n self.check_metadata(o, v1 & v1)\n except (ValueError):\n pass\n\n try:\n self.check_metadata(o, v1 | v1)\n except (ValueError):\n pass\n\n # combine_first\n try:\n result = o.combine_first(o2)\n self.check_metadata(o, result)\n except (AttributeError):\n pass\n\n # ---------------------------\n # non-preserving (by default)\n # ---------------------------\n\n # add non-like\n try:\n result = o + o2\n self.check_metadata(result)\n except (ValueError, AttributeError):\n pass\n\n # simple boolean\n for op in ['__eq__', '__le__', '__ge__']:\n\n # this is a name matching op\n v1 = getattr(o, op)(o)\n\n v2 = getattr(o, op)(o2)\n self.check_metadata(v2)\n\n try:\n self.check_metadata(v1 & v2)\n except (ValueError):\n pass\n\n try:\n self.check_metadata(v1 | v2)\n except (ValueError):\n pass\n\n def test_head_tail(self):\n # GH5370\n\n o = self._construct(shape=10)\n\n # check all index types\n for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,\n tm.makeUnicodeIndex, tm.makeDateIndex,\n tm.makePeriodIndex]:\n axis = o._get_axis_name(0)\n setattr(o, axis, index(len(getattr(o, axis))))\n\n # Panel + dims\n try:\n o.head()\n except (NotImplementedError):\n raise nose.SkipTest('not implemented on {0}'.format(\n o.__class__.__name__))\n\n self._compare(o.head(), o.iloc[:5])\n self._compare(o.tail(), o.iloc[-5:])\n\n # 0-len\n self._compare(o.head(0), o.iloc[0:0])\n self._compare(o.tail(0), o.iloc[0:0])\n\n # bounded\n self._compare(o.head(len(o) + 1), o)\n self._compare(o.tail(len(o) + 1), o)\n\n # neg index\n self._compare(o.head(-3), o.head(7))\n self._compare(o.tail(-3), o.tail(7))\n\n def test_sample(self):\n # Fixes issue: 2419\n\n o = self._construct(shape=10)\n\n ###\n # Check behavior of random_state argument\n ###\n\n # Check for stability when receives seed or random state -- run 10\n # times.\n for test in range(10):\n seed = np.random.randint(0, 100)\n self._compare(\n o.sample(n=4, random_state=seed), o.sample(n=4,\n random_state=seed))\n self._compare(\n o.sample(frac=0.7, random_state=seed), o.sample(\n frac=0.7, random_state=seed))\n\n self._compare(\n o.sample(n=4, random_state=np.random.RandomState(test)),\n o.sample(n=4, random_state=np.random.RandomState(test)))\n\n self._compare(\n o.sample(frac=0.7, random_state=np.random.RandomState(test)),\n o.sample(frac=0.7, random_state=np.random.RandomState(test)))\n\n os1, os2 = [], []\n for _ in range(2):\n np.random.seed(test)\n os1.append(o.sample(n=4))\n os2.append(o.sample(frac=0.7))\n self._compare(*os1)\n self._compare(*os2)\n\n # Check for error when random_state argument invalid.\n with tm.assertRaises(ValueError):\n o.sample(random_state='astring!')\n\n ###\n # Check behavior of `frac` and `N`\n ###\n\n # Giving both frac and N throws error\n with tm.assertRaises(ValueError):\n o.sample(n=3, frac=0.3)\n\n # Check that raises right error for negative lengths\n with tm.assertRaises(ValueError):\n o.sample(n=-3)\n with tm.assertRaises(ValueError):\n o.sample(frac=-0.3)\n\n # Make sure float values of `n` give error\n with tm.assertRaises(ValueError):\n o.sample(n=3.2)\n\n # Check lengths are right\n self.assertTrue(len(o.sample(n=4) == 4))\n self.assertTrue(len(o.sample(frac=0.34) == 3))\n self.assertTrue(len(o.sample(frac=0.36) == 4))\n\n ###\n # Check weights\n ###\n\n # Weight length must be right\n with tm.assertRaises(ValueError):\n o.sample(n=3, weights=[0, 1])\n\n with tm.assertRaises(ValueError):\n bad_weights = [0.5] * 11\n o.sample(n=3, weights=bad_weights)\n\n with tm.assertRaises(ValueError):\n bad_weight_series = Series([0, 0, 0.2])\n o.sample(n=4, weights=bad_weight_series)\n\n # Check won't accept negative weights\n with tm.assertRaises(ValueError):\n bad_weights = [-0.1] * 10\n o.sample(n=3, weights=bad_weights)\n\n # Check inf and -inf throw errors:\n with tm.assertRaises(ValueError):\n weights_with_inf = [0.1] * 10\n weights_with_inf[0] = np.inf\n o.sample(n=3, weights=weights_with_inf)\n\n with tm.assertRaises(ValueError):\n weights_with_ninf = [0.1] * 10\n weights_with_ninf[0] = -np.inf\n o.sample(n=3, weights=weights_with_ninf)\n\n # All zeros raises errors\n zero_weights = [0] * 10\n with tm.assertRaises(ValueError):\n o.sample(n=3, weights=zero_weights)\n\n # All missing weights\n nan_weights = [np.nan] * 10\n with tm.assertRaises(ValueError):\n o.sample(n=3, weights=nan_weights)\n\n # Check np.nan are replaced by zeros.\n weights_with_nan = [np.nan] * 10\n weights_with_nan[5] = 0.5\n self._compare(\n o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])\n\n # Check None are also replaced by zeros.\n weights_with_None = [None] * 10\n weights_with_None[5] = 0.5\n self._compare(\n o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])\n\n def test_size_compat(self):\n # GH8846\n # size property should be defined\n\n o = self._construct(shape=10)\n self.assertTrue(o.size == np.prod(o.shape))\n self.assertTrue(o.size == 10 ** len(o.axes))\n\n def test_split_compat(self):\n # xref GH8846\n o = self._construct(shape=10)\n self.assertTrue(len(np.array_split(o, 5)) == 5)\n self.assertTrue(len(np.array_split(o, 2)) == 2)\n\n def test_unexpected_keyword(self): # GH8597\n df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])\n ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])\n ts = df['joe'].copy()\n ts[2] = np.nan\n\n with assertRaisesRegexp(TypeError, 'unexpected keyword'):\n df.drop('joe', axis=1, in_place=True)\n\n with assertRaisesRegexp(TypeError, 'unexpected keyword'):\n df.reindex([1, 0], inplace=True)\n\n with assertRaisesRegexp(TypeError, 'unexpected keyword'):\n ca.fillna(0, inplace=True)\n\n with assertRaisesRegexp(TypeError, 'unexpected keyword'):\n ts.fillna(0, in_place=True)\n\n # See gh-12301\n def test_stat_unexpected_keyword(self):\n obj = self._construct(5)\n starwars = 'Star Wars'\n errmsg = 'unexpected keyword'\n\n with assertRaisesRegexp(TypeError, errmsg):\n obj.max(epic=starwars) # stat_function\n with assertRaisesRegexp(TypeError, errmsg):\n obj.var(epic=starwars) # stat_function_ddof\n with assertRaisesRegexp(TypeError, errmsg):\n obj.sum(epic=starwars) # cum_function\n with assertRaisesRegexp(TypeError, errmsg):\n obj.any(epic=starwars) # logical_function\n\n def test_api_compat(self):\n\n # GH 12021\n # compat for __name__, __qualname__\n\n obj = self._construct(5)\n for func in ['sum', 'cumsum', 'any', 'var']:\n f = getattr(obj, func)\n self.assertEqual(f.__name__, func)\n if PY3:\n self.assertTrue(f.__qualname__.endswith(func))\n\n def test_stat_non_defaults_args(self):\n obj = self._construct(5)\n out = np.array([0])\n errmsg = \"the 'out' parameter is not supported\"\n\n with assertRaisesRegexp(ValueError, errmsg):\n obj.max(out=out) # stat_function\n with assertRaisesRegexp(ValueError, errmsg):\n obj.var(out=out) # stat_function_ddof\n with assertRaisesRegexp(ValueError, errmsg):\n obj.sum(out=out) # cum_function\n with assertRaisesRegexp(ValueError, errmsg):\n obj.any(out=out) # logical_function\n\n def test_clip(self):\n lower = 1\n upper = 3\n col = np.arange(5)\n\n obj = self._construct(len(col), value=col)\n\n if isinstance(obj, Panel):\n msg = \"clip is not supported yet for panels\"\n tm.assertRaisesRegexp(NotImplementedError, msg,\n obj.clip, lower=lower,\n upper=upper)\n\n else:\n out = obj.clip(lower=lower, upper=upper)\n expected = self._construct(len(col), value=col\n .clip(lower, upper))\n self._compare(out, expected)\n\n bad_axis = 'foo'\n msg = ('No axis named {axis} '\n 'for object').format(axis=bad_axis)\n assertRaisesRegexp(ValueError, msg, obj.clip,\n lower=lower, upper=upper,\n axis=bad_axis)\n\n def test_truncate_out_of_bounds(self):\n # GH11382\n\n # small\n shape = [int(2e3)] + ([1] * (self._ndim - 1))\n small = self._construct(shape, dtype='int8')\n self._compare(small.truncate(), small)\n self._compare(small.truncate(before=0, after=3e3), small)\n self._compare(small.truncate(before=-1, after=2e3), small)\n\n # big\n shape = [int(2e6)] + ([1] * (self._ndim - 1))\n big = self._construct(shape, dtype='int8')\n self._compare(big.truncate(), big)\n self._compare(big.truncate(before=0, after=3e6), big)\n self._compare(big.truncate(before=-1, after=2e6), big)\n\n def test_numpy_clip(self):\n lower = 1\n upper = 3\n col = np.arange(5)\n\n obj = self._construct(len(col), value=col)\n\n if isinstance(obj, Panel):\n msg = \"clip is not supported yet for panels\"\n tm.assertRaisesRegexp(NotImplementedError, msg,\n np.clip, obj,\n lower, upper)\n else:\n out = np.clip(obj, lower, upper)\n expected = self._construct(len(col), value=col\n .clip(lower, upper))\n self._compare(out, expected)\n\n msg = \"the 'out' parameter is not supported\"\n tm.assertRaisesRegexp(ValueError, msg,\n np.clip, obj,\n lower, upper, out=col)\n\n def test_validate_bool_args(self):\n df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n invalid_values = [1, \"True\", [1, 2, 3], 5.0]\n\n for value in invalid_values:\n with self.assertRaises(ValueError):\n super(DataFrame, df).rename_axis(mapper={'a': 'x', 'b': 'y'},\n axis=1, inplace=value)\n\n with self.assertRaises(ValueError):\n super(DataFrame, df).drop('a', axis=1, inplace=value)\n\n with self.assertRaises(ValueError):\n super(DataFrame, df).sort_index(inplace=value)\n\n with self.assertRaises(ValueError):\n super(DataFrame, df).consolidate(inplace=value)\n\n with self.assertRaises(ValueError):\n super(DataFrame, df).fillna(value=0, inplace=value)\n\n with self.assertRaises(ValueError):\n super(DataFrame, df).replace(to_replace=1, value=7,\n inplace=value)\n\n with self.assertRaises(ValueError):\n super(DataFrame, df).interpolate(inplace=value)\n\n with self.assertRaises(ValueError):\n super(DataFrame, df)._where(cond=df.a > 2, inplace=value)\n\n with self.assertRaises(ValueError):\n super(DataFrame, df).mask(cond=df.a > 2, inplace=value)\n\n\nclass TestSeries(tm.TestCase, Generic):\n _typ = Series\n _comparator = lambda self, x, y: assert_series_equal(x, y)\n\n def setUp(self):\n self.ts = tm.makeTimeSeries() # Was at top level in test_series\n self.ts.name = 'ts'\n\n self.series = tm.makeStringSeries()\n self.series.name = 'series'\n\n def test_rename_mi(self):\n s = Series([11, 21, 31],\n index=MultiIndex.from_tuples(\n [(\"A\", x) for x in [\"a\", \"B\", \"c\"]]))\n s.rename(str.lower)\n\n def test_set_axis_name(self):\n s = Series([1, 2, 3], index=['a', 'b', 'c'])\n funcs = ['rename_axis', '_set_axis_name']\n name = 'foo'\n for func in funcs:\n result = methodcaller(func, name)(s)\n self.assertTrue(s.index.name is None)\n self.assertEqual(result.index.name, name)\n\n def test_set_axis_name_mi(self):\n s = Series([11, 21, 31], index=MultiIndex.from_tuples(\n [(\"A\", x) for x in [\"a\", \"B\", \"c\"]],\n names=['l1', 'l2'])\n )\n funcs = ['rename_axis', '_set_axis_name']\n for func in funcs:\n result = methodcaller(func, ['L1', 'L2'])(s)\n self.assertTrue(s.index.name is None)\n self.assertEqual(s.index.names, ['l1', 'l2'])\n self.assertTrue(result.index.name is None)\n self.assertTrue(result.index.names, ['L1', 'L2'])\n\n def test_set_axis_name_raises(self):\n s = pd.Series([1])\n with tm.assertRaises(ValueError):\n s._set_axis_name(name='a', axis=1)\n\n def test_get_numeric_data_preserve_dtype(self):\n\n # get the numeric data\n o = Series([1, 2, 3])\n result = o._get_numeric_data()\n self._compare(result, o)\n\n o = Series([1, '2', 3.])\n result = o._get_numeric_data()\n expected = Series([], dtype=object, index=pd.Index([], dtype=object))\n self._compare(result, expected)\n\n o = Series([True, False, True])\n result = o._get_numeric_data()\n self._compare(result, o)\n\n o = Series([True, False, True])\n result = o._get_bool_data()\n self._compare(result, o)\n\n o = Series(date_range('20130101', periods=3))\n result = o._get_numeric_data()\n expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object))\n self._compare(result, expected)\n\n def test_nonzero_single_element(self):\n\n # allow single item via bool method\n s = Series([True])\n self.assertTrue(s.bool())\n\n s = Series([False])\n self.assertFalse(s.bool())\n\n # single item nan to raise\n for s in [Series([np.nan]), Series([pd.NaT]), Series([True]),\n Series([False])]:\n self.assertRaises(ValueError, lambda: bool(s))\n\n for s in [Series([np.nan]), Series([pd.NaT])]:\n self.assertRaises(ValueError, lambda: s.bool())\n\n # multiple bool are still an error\n for s in [Series([True, True]), Series([False, False])]:\n self.assertRaises(ValueError, lambda: bool(s))\n self.assertRaises(ValueError, lambda: s.bool())\n\n # single non-bool are an error\n for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]:\n self.assertRaises(ValueError, lambda: bool(s))\n self.assertRaises(ValueError, lambda: s.bool())\n\n def test_metadata_propagation_indiv(self):\n # check that the metadata matches up on the resulting ops\n\n o = Series(range(3), range(3))\n o.name = 'foo'\n o2 = Series(range(3), range(3))\n o2.name = 'bar'\n\n result = o.T\n self.check_metadata(o, result)\n\n # resample\n ts = Series(np.random.rand(1000),\n index=date_range('20130101', periods=1000, freq='s'),\n name='foo')\n result = ts.resample('1T').mean()\n self.check_metadata(ts, result)\n\n result = ts.resample('1T').min()\n self.check_metadata(ts, result)\n\n result = ts.resample('1T').apply(lambda x: x.sum())\n self.check_metadata(ts, result)\n\n _metadata = Series._metadata\n _finalize = Series.__finalize__\n Series._metadata = ['name', 'filename']\n o.filename = 'foo'\n o2.filename = 'bar'\n\n def finalize(self, other, method=None, **kwargs):\n for name in self._metadata:\n if method == 'concat' and name == 'filename':\n value = '+'.join([getattr(\n o, name) for o in other.objs if getattr(o, name, None)\n ])\n object.__setattr__(self, name, value)\n else:\n object.__setattr__(self, name, getattr(other, name, None))\n\n return self\n\n Series.__finalize__ = finalize\n\n result = pd.concat([o, o2])\n self.assertEqual(result.filename, 'foo+bar')\n self.assertIsNone(result.name)\n\n # reset\n Series._metadata = _metadata\n Series.__finalize__ = _finalize\n\n def test_describe(self):\n self.series.describe()\n self.ts.describe()\n\n def test_describe_objects(self):\n s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a'])\n result = s.describe()\n expected = Series({'count': 7, 'unique': 4,\n 'top': 'a', 'freq': 3, 'second': 'b',\n 'second_freq': 2}, index=result.index)\n assert_series_equal(result, expected)\n\n dt = list(self.ts.index)\n dt.append(dt[0])\n ser = Series(dt)\n rs = ser.describe()\n min_date = min(dt)\n max_date = max(dt)\n xp = Series({'count': len(dt),\n 'unique': len(self.ts.index),\n 'first': min_date, 'last': max_date, 'freq': 2,\n 'top': min_date}, index=rs.index)\n assert_series_equal(rs, xp)\n\n def test_describe_empty(self):\n result = pd.Series().describe()\n\n self.assertEqual(result['count'], 0)\n self.assertTrue(result.drop('count').isnull().all())\n\n nanSeries = Series([np.nan])\n nanSeries.name = 'NaN'\n result = nanSeries.describe()\n self.assertEqual(result['count'], 0)\n self.assertTrue(result.drop('count').isnull().all())\n\n def test_describe_none(self):\n noneSeries = Series([None])\n noneSeries.name = 'None'\n expected = Series([0, 0], index=['count', 'unique'], name='None')\n assert_series_equal(noneSeries.describe(), expected)\n\n def test_to_xarray(self):\n\n tm._skip_if_no_xarray()\n from xarray import DataArray\n\n s = Series([])\n s.index.name = 'foo'\n result = s.to_xarray()\n self.assertEqual(len(result), 0)\n self.assertEqual(len(result.coords), 1)\n assert_almost_equal(list(result.coords.keys()), ['foo'])\n self.assertIsInstance(result, DataArray)\n\n def testit(index, check_index_type=True, check_categorical=True):\n s = Series(range(6), index=index(6))\n s.index.name = 'foo'\n result = s.to_xarray()\n repr(result)\n self.assertEqual(len(result), 6)\n self.assertEqual(len(result.coords), 1)\n assert_almost_equal(list(result.coords.keys()), ['foo'])\n self.assertIsInstance(result, DataArray)\n\n # idempotency\n assert_series_equal(result.to_series(), s,\n check_index_type=check_index_type,\n check_categorical=check_categorical)\n\n for index in [tm.makeFloatIndex, tm.makeIntIndex,\n tm.makeStringIndex, tm.makeUnicodeIndex,\n tm.makeDateIndex, tm.makePeriodIndex,\n tm.makeTimedeltaIndex]:\n testit(index)\n\n # not idempotent\n testit(tm.makeCategoricalIndex, check_index_type=False,\n check_categorical=False)\n\n s = Series(range(6))\n s.index.name = 'foo'\n s.index = pd.MultiIndex.from_product([['a', 'b'], range(3)],\n names=['one', 'two'])\n result = s.to_xarray()\n self.assertEqual(len(result), 2)\n assert_almost_equal(list(result.coords.keys()), ['one', 'two'])\n self.assertIsInstance(result, DataArray)\n assert_series_equal(result.to_series(), s)\n\n\nclass TestDataFrame(tm.TestCase, Generic):\n _typ = DataFrame\n _comparator = lambda self, x, y: assert_frame_equal(x, y)\n\n def test_rename_mi(self):\n df = DataFrame([\n 11, 21, 31\n ], index=MultiIndex.from_tuples([(\"A\", x) for x in [\"a\", \"B\", \"c\"]]))\n df.rename(str.lower)\n\n def test_set_axis_name(self):\n df = pd.DataFrame([[1, 2], [3, 4]])\n funcs = ['_set_axis_name', 'rename_axis']\n for func in funcs:\n result = methodcaller(func, 'foo')(df)\n self.assertTrue(df.index.name is None)\n self.assertEqual(result.index.name, 'foo')\n\n result = methodcaller(func, 'cols', axis=1)(df)\n self.assertTrue(df.columns.name is None)\n self.assertEqual(result.columns.name, 'cols')\n\n def test_set_axis_name_mi(self):\n df = DataFrame(\n np.empty((3, 3)),\n index=MultiIndex.from_tuples([(\"A\", x) for x in list('aBc')]),\n columns=MultiIndex.from_tuples([('C', x) for x in list('xyz')])\n )\n\n level_names = ['L1', 'L2']\n funcs = ['_set_axis_name', 'rename_axis']\n for func in funcs:\n result = methodcaller(func, level_names)(df)\n self.assertEqual(result.index.names, level_names)\n self.assertEqual(result.columns.names, [None, None])\n\n result = methodcaller(func, level_names, axis=1)(df)\n self.assertEqual(result.columns.names, [\"L1\", \"L2\"])\n self.assertEqual(result.index.names, [None, None])\n\n def test_nonzero_single_element(self):\n\n # allow single item via bool method\n df = DataFrame([[True]])\n self.assertTrue(df.bool())\n\n df = DataFrame([[False]])\n self.assertFalse(df.bool())\n\n df = DataFrame([[False, False]])\n self.assertRaises(ValueError, lambda: df.bool())\n self.assertRaises(ValueError, lambda: bool(df))\n\n def test_get_numeric_data_preserve_dtype(self):\n\n # get the numeric data\n o = DataFrame({'A': [1, '2', 3.]})\n result = o._get_numeric_data()\n expected = DataFrame(index=[0, 1, 2], dtype=object)\n self._compare(result, expected)\n\n def test_describe(self):\n tm.makeDataFrame().describe()\n tm.makeMixedDataFrame().describe()\n tm.makeTimeDataFrame().describe()\n\n def test_describe_percentiles_percent_or_raw(self):\n msg = 'percentiles should all be in the interval \\\\[0, 1\\\\]'\n\n df = tm.makeDataFrame()\n with tm.assertRaisesRegexp(ValueError, msg):\n df.describe(percentiles=[10, 50, 100])\n\n with tm.assertRaisesRegexp(ValueError, msg):\n df.describe(percentiles=[2])\n\n with tm.assertRaisesRegexp(ValueError, msg):\n df.describe(percentiles=[-2])\n\n def test_describe_percentiles_equivalence(self):\n df = tm.makeDataFrame()\n d1 = df.describe()\n d2 = df.describe(percentiles=[.25, .75])\n assert_frame_equal(d1, d2)\n\n def test_describe_percentiles_insert_median(self):\n df = tm.makeDataFrame()\n d1 = df.describe(percentiles=[.25, .75])\n d2 = df.describe(percentiles=[.25, .5, .75])\n assert_frame_equal(d1, d2)\n self.assertTrue('25%' in d1.index)\n self.assertTrue('75%' in d2.index)\n\n # none above\n d1 = df.describe(percentiles=[.25, .45])\n d2 = df.describe(percentiles=[.25, .45, .5])\n assert_frame_equal(d1, d2)\n self.assertTrue('25%' in d1.index)\n self.assertTrue('45%' in d2.index)\n\n # none below\n d1 = df.describe(percentiles=[.75, 1])\n d2 = df.describe(percentiles=[.5, .75, 1])\n assert_frame_equal(d1, d2)\n self.assertTrue('75%' in d1.index)\n self.assertTrue('100%' in d2.index)\n\n # edge\n d1 = df.describe(percentiles=[0, 1])\n d2 = df.describe(percentiles=[0, .5, 1])\n assert_frame_equal(d1, d2)\n self.assertTrue('0%' in d1.index)\n self.assertTrue('100%' in d2.index)\n\n def test_describe_percentiles_insert_median_ndarray(self):\n # GH14908\n df = tm.makeDataFrame()\n result = df.describe(percentiles=np.array([.25, .75]))\n expected = df.describe(percentiles=[.25, .75])\n assert_frame_equal(result, expected)\n\n def test_describe_percentiles_unique(self):\n # GH13104\n df = tm.makeDataFrame()\n with self.assertRaises(ValueError):\n df.describe(percentiles=[0.1, 0.2, 0.4, 0.5, 0.2, 0.6])\n with self.assertRaises(ValueError):\n df.describe(percentiles=[0.1, 0.2, 0.4, 0.2, 0.6])\n\n def test_describe_percentiles_formatting(self):\n # GH13104\n df = tm.makeDataFrame()\n\n # default\n result = df.describe().index\n expected = Index(['count', 'mean', 'std', 'min', '25%', '50%', '75%',\n 'max'],\n dtype='object')\n tm.assert_index_equal(result, expected)\n\n result = df.describe(percentiles=[0.0001, 0.0005, 0.001, 0.999,\n 0.9995, 0.9999]).index\n expected = Index(['count', 'mean', 'std', 'min', '0.01%', '0.05%',\n '0.1%', '50%', '99.9%', '99.95%', '99.99%', 'max'],\n dtype='object')\n tm.assert_index_equal(result, expected)\n\n result = df.describe(percentiles=[0.00499, 0.005, 0.25, 0.50,\n 0.75]).index\n expected = Index(['count', 'mean', 'std', 'min', '0.499%', '0.5%',\n '25%', '50%', '75%', 'max'],\n dtype='object')\n tm.assert_index_equal(result, expected)\n\n result = df.describe(percentiles=[0.00499, 0.01001, 0.25, 0.50,\n 0.75]).index\n expected = Index(['count', 'mean', 'std', 'min', '0.5%', '1.0%',\n '25%', '50%', '75%', 'max'],\n dtype='object')\n tm.assert_index_equal(result, expected)\n\n def test_describe_column_index_type(self):\n # GH13288\n df = pd.DataFrame([1, 2, 3, 4])\n df.columns = pd.Index([0], dtype=object)\n result = df.describe().columns\n expected = Index([0], dtype=object)\n tm.assert_index_equal(result, expected)\n\n df = pd.DataFrame({'A': list(\"BCDE\"), 0: [1, 2, 3, 4]})\n result = df.describe().columns\n expected = Index([0], dtype=object)\n tm.assert_index_equal(result, expected)\n\n def test_describe_no_numeric(self):\n df = DataFrame({'A': ['foo', 'foo', 'bar'] * 8,\n 'B': ['a', 'b', 'c', 'd'] * 6})\n desc = df.describe()\n expected = DataFrame(dict((k, v.describe())\n for k, v in compat.iteritems(df)),\n columns=df.columns)\n assert_frame_equal(desc, expected)\n\n ts = tm.makeTimeSeries()\n df = DataFrame({'time': ts.index})\n desc = df.describe()\n self.assertEqual(desc.time['first'], min(ts.index))\n\n def test_describe_empty(self):\n df = DataFrame()\n tm.assertRaisesRegexp(ValueError, 'DataFrame without columns',\n df.describe)\n\n df = DataFrame(columns=['A', 'B'])\n result = df.describe()\n expected = DataFrame(0, columns=['A', 'B'], index=['count', 'unique'])\n tm.assert_frame_equal(result, expected)\n\n def test_describe_empty_int_columns(self):\n df = DataFrame([[0, 1], [1, 2]])\n desc = df[df[0] < 0].describe() # works\n assert_series_equal(desc.xs('count'),\n Series([0, 0], dtype=float, name='count'))\n self.assertTrue(isnull(desc.iloc[1:]).all().all())\n\n def test_describe_objects(self):\n df = DataFrame({\"C1\": ['a', 'a', 'c'], \"C2\": ['d', 'd', 'f']})\n result = df.describe()\n expected = DataFrame({\"C1\": [3, 2, 'a', 2], \"C2\": [3, 2, 'd', 2]},\n index=['count', 'unique', 'top', 'freq'])\n assert_frame_equal(result, expected)\n\n df = DataFrame({\"C1\": pd.date_range('2010-01-01', periods=4, freq='D')\n })\n df.loc[4] = pd.Timestamp('2010-01-04')\n result = df.describe()\n expected = DataFrame({\"C1\": [5, 4, pd.Timestamp('2010-01-04'), 2,\n pd.Timestamp('2010-01-01'),\n pd.Timestamp('2010-01-04')]},\n index=['count', 'unique', 'top', 'freq',\n 'first', 'last'])\n assert_frame_equal(result, expected)\n\n # mix time and str\n df['C2'] = ['a', 'a', 'b', 'c', 'a']\n result = df.describe()\n expected['C2'] = [5, 3, 'a', 3, np.nan, np.nan]\n assert_frame_equal(result, expected)\n\n # just str\n expected = DataFrame({'C2': [5, 3, 'a', 4]},\n index=['count', 'unique', 'top', 'freq'])\n result = df[['C2']].describe()\n\n # mix of time, str, numeric\n df['C3'] = [2, 4, 6, 8, 2]\n result = df.describe()\n expected = DataFrame({\"C3\": [5., 4.4, 2.607681, 2., 2., 4., 6., 8.]},\n index=['count', 'mean', 'std', 'min', '25%',\n '50%', '75%', 'max'])\n assert_frame_equal(result, expected)\n assert_frame_equal(df.describe(), df[['C3']].describe())\n\n assert_frame_equal(df[['C1', 'C3']].describe(), df[['C3']].describe())\n assert_frame_equal(df[['C2', 'C3']].describe(), df[['C3']].describe())\n\n def test_describe_typefiltering(self):\n df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,\n 'catB': ['a', 'b', 'c', 'd'] * 6,\n 'numC': np.arange(24, dtype='int64'),\n 'numD': np.arange(24.) + .5,\n 'ts': tm.makeTimeSeries()[:24].index})\n\n descN = df.describe()\n expected_cols = ['numC', 'numD', ]\n expected = DataFrame(dict((k, df[k].describe())\n for k in expected_cols),\n columns=expected_cols)\n assert_frame_equal(descN, expected)\n\n desc = df.describe(include=['number'])\n assert_frame_equal(desc, descN)\n desc = df.describe(exclude=['object', 'datetime'])\n assert_frame_equal(desc, descN)\n desc = df.describe(include=['float'])\n assert_frame_equal(desc, descN.drop('numC', 1))\n\n descC = df.describe(include=['O'])\n expected_cols = ['catA', 'catB']\n expected = DataFrame(dict((k, df[k].describe())\n for k in expected_cols),\n columns=expected_cols)\n assert_frame_equal(descC, expected)\n\n descD = df.describe(include=['datetime'])\n assert_series_equal(descD.ts, df.ts.describe())\n\n desc = df.describe(include=['object', 'number', 'datetime'])\n assert_frame_equal(desc.loc[:, [\"numC\", \"numD\"]].dropna(), descN)\n assert_frame_equal(desc.loc[:, [\"catA\", \"catB\"]].dropna(), descC)\n descDs = descD.sort_index() # the index order change for mixed-types\n assert_frame_equal(desc.loc[:, \"ts\":].dropna().sort_index(), descDs)\n\n desc = df.loc[:, 'catA':'catB'].describe(include='all')\n assert_frame_equal(desc, descC)\n desc = df.loc[:, 'numC':'numD'].describe(include='all')\n assert_frame_equal(desc, descN)\n\n desc = df.describe(percentiles=[], include='all')\n cnt = Series(data=[4, 4, 6, 6, 6],\n index=['catA', 'catB', 'numC', 'numD', 'ts'])\n assert_series_equal(desc.count(), cnt)\n self.assertTrue('count' in desc.index)\n self.assertTrue('unique' in desc.index)\n self.assertTrue('50%' in desc.index)\n self.assertTrue('first' in desc.index)\n\n desc = df.drop(\"ts\", 1).describe(percentiles=[], include='all')\n assert_series_equal(desc.count(), cnt.drop(\"ts\"))\n self.assertTrue('first' not in desc.index)\n desc = df.drop([\"numC\", \"numD\"], 1).describe(percentiles=[],\n include='all')\n assert_series_equal(desc.count(), cnt.drop([\"numC\", \"numD\"]))\n self.assertTrue('50%' not in desc.index)\n\n def test_describe_typefiltering_category_bool(self):\n df = DataFrame({'A_cat': pd.Categorical(['foo', 'foo', 'bar'] * 8),\n 'B_str': ['a', 'b', 'c', 'd'] * 6,\n 'C_bool': [True] * 12 + [False] * 12,\n 'D_num': np.arange(24.) + .5,\n 'E_ts': tm.makeTimeSeries()[:24].index})\n\n desc = df.describe()\n expected_cols = ['D_num']\n expected = DataFrame(dict((k, df[k].describe())\n for k in expected_cols),\n columns=expected_cols)\n assert_frame_equal(desc, expected)\n\n desc = df.describe(include=[\"category\"])\n self.assertTrue(desc.columns.tolist() == [\"A_cat\"])\n\n # 'all' includes numpy-dtypes + category\n desc1 = df.describe(include=\"all\")\n desc2 = df.describe(include=[np.generic, \"category\"])\n assert_frame_equal(desc1, desc2)\n\n def test_describe_timedelta(self):\n df = DataFrame({\"td\": pd.to_timedelta(np.arange(24) % 20, \"D\")})\n self.assertTrue(df.describe().loc[\"mean\"][0] == pd.to_timedelta(\n \"8d4h\"))\n\n def test_describe_typefiltering_dupcol(self):\n df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,\n 'catB': ['a', 'b', 'c', 'd'] * 6,\n 'numC': np.arange(24),\n 'numD': np.arange(24.) + .5,\n 'ts': tm.makeTimeSeries()[:24].index})\n s = df.describe(include='all').shape[1]\n df = pd.concat([df, df], axis=1)\n s2 = df.describe(include='all').shape[1]\n self.assertTrue(s2 == 2 * s)\n\n def test_describe_typefiltering_groupby(self):\n df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,\n 'catB': ['a', 'b', 'c', 'd'] * 6,\n 'numC': np.arange(24),\n 'numD': np.arange(24.) + .5,\n 'ts': tm.makeTimeSeries()[:24].index})\n G = df.groupby('catA')\n self.assertTrue(G.describe(include=['number']).shape == (16, 2))\n self.assertTrue(G.describe(include=['number', 'object']).shape == (22,\n 3))\n self.assertTrue(G.describe(include='all').shape == (26, 4))\n\n def test_describe_multi_index_df_column_names(self):\n \"\"\" Test that column names persist after the describe operation.\"\"\"\n\n df = pd.DataFrame(\n {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],\n 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],\n 'C': np.random.randn(8),\n 'D': np.random.randn(8)})\n\n # GH 11517\n # test for hierarchical index\n hierarchical_index_df = df.groupby(['A', 'B']).mean().T\n self.assertTrue(hierarchical_index_df.columns.names == ['A', 'B'])\n self.assertTrue(hierarchical_index_df.describe().columns.names ==\n ['A', 'B'])\n\n # test for non-hierarchical index\n non_hierarchical_index_df = df.groupby(['A']).mean().T\n self.assertTrue(non_hierarchical_index_df.columns.names == ['A'])\n self.assertTrue(non_hierarchical_index_df.describe().columns.names ==\n ['A'])\n\n def test_metadata_propagation_indiv(self):\n\n # groupby\n df = DataFrame(\n {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],\n 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],\n 'C': np.random.randn(8),\n 'D': np.random.randn(8)})\n result = df.groupby('A').sum()\n self.check_metadata(df, result)\n\n # resample\n df = DataFrame(np.random.randn(1000, 2),\n index=date_range('20130101', periods=1000, freq='s'))\n result = df.resample('1T')\n self.check_metadata(df, result)\n\n # merging with override\n # GH 6923\n _metadata = DataFrame._metadata\n _finalize = DataFrame.__finalize__\n\n np.random.seed(10)\n df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['a', 'b'])\n df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['c', 'd'])\n DataFrame._metadata = ['filename']\n df1.filename = 'fname1.csv'\n df2.filename = 'fname2.csv'\n\n def finalize(self, other, method=None, **kwargs):\n\n for name in self._metadata:\n if method == 'merge':\n left, right = other.left, other.right\n value = getattr(left, name, '') + '|' + getattr(right,\n name, '')\n object.__setattr__(self, name, value)\n else:\n object.__setattr__(self, name, getattr(other, name, ''))\n\n return self\n\n DataFrame.__finalize__ = finalize\n result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner')\n self.assertEqual(result.filename, 'fname1.csv|fname2.csv')\n\n # concat\n # GH 6927\n DataFrame._metadata = ['filename']\n df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list('ab'))\n df1.filename = 'foo'\n\n def finalize(self, other, method=None, **kwargs):\n for name in self._metadata:\n if method == 'concat':\n value = '+'.join([getattr(\n o, name) for o in other.objs if getattr(o, name, None)\n ])\n object.__setattr__(self, name, value)\n else:\n object.__setattr__(self, name, getattr(other, name, None))\n\n return self\n\n DataFrame.__finalize__ = finalize\n\n result = pd.concat([df1, df1])\n self.assertEqual(result.filename, 'foo+foo')\n\n # reset\n DataFrame._metadata = _metadata\n DataFrame.__finalize__ = _finalize\n\n def test_tz_convert_and_localize(self):\n l0 = date_range('20140701', periods=5, freq='D')\n\n # TODO: l1 should be a PeriodIndex for testing\n # after GH2106 is addressed\n with tm.assertRaises(NotImplementedError):\n period_range('20140701', periods=1).tz_convert('UTC')\n with tm.assertRaises(NotImplementedError):\n period_range('20140701', periods=1).tz_localize('UTC')\n # l1 = period_range('20140701', periods=5, freq='D')\n l1 = date_range('20140701', periods=5, freq='D')\n\n int_idx = Index(range(5))\n\n for fn in ['tz_localize', 'tz_convert']:\n\n if fn == 'tz_convert':\n l0 = l0.tz_localize('UTC')\n l1 = l1.tz_localize('UTC')\n\n for idx in [l0, l1]:\n\n l0_expected = getattr(idx, fn)('US/Pacific')\n l1_expected = getattr(idx, fn)('US/Pacific')\n\n df1 = DataFrame(np.ones(5), index=l0)\n df1 = getattr(df1, fn)('US/Pacific')\n self.assert_index_equal(df1.index, l0_expected)\n\n # MultiIndex\n # GH7846\n df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1]))\n\n df3 = getattr(df2, fn)('US/Pacific', level=0)\n self.assertFalse(df3.index.levels[0].equals(l0))\n self.assert_index_equal(df3.index.levels[0], l0_expected)\n self.assert_index_equal(df3.index.levels[1], l1)\n self.assertFalse(df3.index.levels[1].equals(l1_expected))\n\n df3 = getattr(df2, fn)('US/Pacific', level=1)\n self.assert_index_equal(df3.index.levels[0], l0)\n self.assertFalse(df3.index.levels[0].equals(l0_expected))\n self.assert_index_equal(df3.index.levels[1], l1_expected)\n self.assertFalse(df3.index.levels[1].equals(l1))\n\n df4 = DataFrame(np.ones(5),\n MultiIndex.from_arrays([int_idx, l0]))\n\n # TODO: untested\n df5 = getattr(df4, fn)('US/Pacific', level=1) # noqa\n\n self.assert_index_equal(df3.index.levels[0], l0)\n self.assertFalse(df3.index.levels[0].equals(l0_expected))\n self.assert_index_equal(df3.index.levels[1], l1_expected)\n self.assertFalse(df3.index.levels[1].equals(l1))\n\n # Bad Inputs\n for fn in ['tz_localize', 'tz_convert']:\n # Not DatetimeIndex / PeriodIndex\n with tm.assertRaisesRegexp(TypeError, 'DatetimeIndex'):\n df = DataFrame(index=int_idx)\n df = getattr(df, fn)('US/Pacific')\n\n # Not DatetimeIndex / PeriodIndex\n with tm.assertRaisesRegexp(TypeError, 'DatetimeIndex'):\n df = DataFrame(np.ones(5),\n MultiIndex.from_arrays([int_idx, l0]))\n df = getattr(df, fn)('US/Pacific', level=0)\n\n # Invalid level\n with tm.assertRaisesRegexp(ValueError, 'not valid'):\n df = DataFrame(index=l0)\n df = getattr(df, fn)('US/Pacific', level=1)\n\n def test_set_attribute(self):\n # Test for consistent setattr behavior when an attribute and a column\n # have the same name (Issue #8994)\n df = DataFrame({'x': [1, 2, 3]})\n\n df.y = 2\n df['y'] = [2, 4, 6]\n df.y = 5\n\n self.assertEqual(df.y, 5)\n assert_series_equal(df['y'], Series([2, 4, 6], name='y'))\n\n def test_pct_change(self):\n # GH 11150\n pnl = DataFrame([np.arange(0, 40, 10), np.arange(0, 40, 10), np.arange(\n 0, 40, 10)]).astype(np.float64)\n pnl.iat[1, 0] = np.nan\n pnl.iat[1, 1] = np.nan\n pnl.iat[2, 3] = 60\n\n mask = pnl.isnull()\n\n for axis in range(2):\n expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(\n axis=axis) - 1\n expected[mask] = np.nan\n result = pnl.pct_change(axis=axis, fill_method='pad')\n\n self.assert_frame_equal(result, expected)\n\n def test_to_xarray(self):\n\n tm._skip_if_no_xarray()\n from xarray import Dataset\n\n df = DataFrame({'a': list('abc'),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.Categorical(list('abc')),\n 'g': pd.date_range('20130101', periods=3),\n 'h': pd.date_range('20130101',\n periods=3,\n tz='US/Eastern')}\n )\n\n df.index.name = 'foo'\n result = df[0:0].to_xarray()\n self.assertEqual(result.dims['foo'], 0)\n self.assertIsInstance(result, Dataset)\n\n for index in [tm.makeFloatIndex, tm.makeIntIndex,\n tm.makeStringIndex, tm.makeUnicodeIndex,\n tm.makeDateIndex, tm.makePeriodIndex,\n tm.makeCategoricalIndex, tm.makeTimedeltaIndex]:\n df.index = index(3)\n df.index.name = 'foo'\n df.columns.name = 'bar'\n result = df.to_xarray()\n self.assertEqual(result.dims['foo'], 3)\n self.assertEqual(len(result.coords), 1)\n self.assertEqual(len(result.data_vars), 8)\n assert_almost_equal(list(result.coords.keys()), ['foo'])\n self.assertIsInstance(result, Dataset)\n\n # idempotency\n # categoricals are not preserved\n # datetimes w/tz are not preserved\n # column names are lost\n expected = df.copy()\n expected['f'] = expected['f'].astype(object)\n expected['h'] = expected['h'].astype('datetime64[ns]')\n expected.columns.name = None\n assert_frame_equal(result.to_dataframe(), expected,\n check_index_type=False, check_categorical=False)\n\n # available in 0.7.1\n # MultiIndex\n df.index = pd.MultiIndex.from_product([['a'], range(3)],\n names=['one', 'two'])\n result = df.to_xarray()\n self.assertEqual(result.dims['one'], 1)\n self.assertEqual(result.dims['two'], 3)\n self.assertEqual(len(result.coords), 2)\n self.assertEqual(len(result.data_vars), 8)\n assert_almost_equal(list(result.coords.keys()), ['one', 'two'])\n self.assertIsInstance(result, Dataset)\n\n result = result.to_dataframe()\n expected = df.copy()\n expected['f'] = expected['f'].astype(object)\n expected['h'] = expected['h'].astype('datetime64[ns]')\n expected.columns.name = None\n assert_frame_equal(result,\n expected,\n check_index_type=False)\n\n\nclass TestPanel(tm.TestCase, Generic):\n _typ = Panel\n _comparator = lambda self, x, y: assert_panel_equal(x, y, by_blocks=True)\n\n def test_to_xarray(self):\n\n tm._skip_if_no_xarray()\n from xarray import DataArray\n\n p = tm.makePanel()\n\n result = p.to_xarray()\n self.assertIsInstance(result, DataArray)\n self.assertEqual(len(result.coords), 3)\n assert_almost_equal(list(result.coords.keys()),\n ['items', 'major_axis', 'minor_axis'])\n self.assertEqual(len(result.dims), 3)\n\n # idempotency\n assert_panel_equal(result.to_pandas(), p)\n\n\nclass TestPanel4D(tm.TestCase, Generic):\n _typ = Panel4D\n _comparator = lambda self, x, y: assert_panel4d_equal(x, y, by_blocks=True)\n\n def test_sample(self):\n raise nose.SkipTest(\"sample on Panel4D\")\n\n def test_to_xarray(self):\n\n tm._skip_if_no_xarray()\n from xarray import DataArray\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n p = tm.makePanel4D()\n\n result = p.to_xarray()\n self.assertIsInstance(result, DataArray)\n self.assertEqual(len(result.coords), 4)\n assert_almost_equal(list(result.coords.keys()),\n ['labels', 'items', 'major_axis',\n 'minor_axis'])\n self.assertEqual(len(result.dims), 4)\n\n # non-convertible\n self.assertRaises(ValueError, lambda: result.to_pandas())\n\n# run all the tests, but wrap each in a warning catcher\nfor t in ['test_rename', 'test_rename_axis', 'test_get_numeric_data',\n 'test_get_default', 'test_nonzero',\n 'test_numpy_1_7_compat_numeric_methods',\n 'test_downcast', 'test_constructor_compound_dtypes',\n 'test_head_tail',\n 'test_size_compat', 'test_split_compat',\n 'test_unexpected_keyword',\n 'test_stat_unexpected_keyword', 'test_api_compat',\n 'test_stat_non_defaults_args',\n 'test_clip', 'test_truncate_out_of_bounds', 'test_numpy_clip',\n 'test_metadata_propagation']:\n\n def f():\n def tester(self):\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n return getattr(super(TestPanel4D, self), t)()\n return tester\n\n setattr(TestPanel4D, t, f())\n\n\nclass TestNDFrame(tm.TestCase):\n # tests that don't fit elsewhere\n\n def test_sample(sel):\n # Fixes issue: 2419\n # additional specific object based tests\n\n # A few dataframe test with degenerate weights.\n easy_weight_list = [0] * 10\n easy_weight_list[5] = 1\n\n df = pd.DataFrame({'col1': range(10, 20),\n 'col2': range(20, 30),\n 'colString': ['a'] * 10,\n 'easyweights': easy_weight_list})\n sample1 = df.sample(n=1, weights='easyweights')\n assert_frame_equal(sample1, df.iloc[5:6])\n\n # Ensure proper error if string given as weight for Series, panel, or\n # DataFrame with axis = 1.\n s = Series(range(10))\n with tm.assertRaises(ValueError):\n s.sample(n=3, weights='weight_column')\n\n panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4],\n minor_axis=[3, 4, 5])\n with tm.assertRaises(ValueError):\n panel.sample(n=1, weights='weight_column')\n\n with tm.assertRaises(ValueError):\n df.sample(n=1, weights='weight_column', axis=1)\n\n # Check weighting key error\n with tm.assertRaises(KeyError):\n df.sample(n=3, weights='not_a_real_column_name')\n\n # Check that re-normalizes weights that don't sum to one.\n weights_less_than_1 = [0] * 10\n weights_less_than_1[0] = 0.5\n tm.assert_frame_equal(\n df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])\n\n ###\n # Test axis argument\n ###\n\n # Test axis argument\n df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})\n second_column_weight = [0, 1]\n assert_frame_equal(\n df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])\n\n # Different axis arg types\n assert_frame_equal(df.sample(n=1, axis='columns',\n weights=second_column_weight),\n df[['col2']])\n\n weight = [0] * 10\n weight[5] = 0.5\n assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),\n df.iloc[5:6])\n assert_frame_equal(df.sample(n=1, axis='index', weights=weight),\n df.iloc[5:6])\n\n # Check out of range axis values\n with tm.assertRaises(ValueError):\n df.sample(n=1, axis=2)\n\n with tm.assertRaises(ValueError):\n df.sample(n=1, axis='not_a_name')\n\n with tm.assertRaises(ValueError):\n s = pd.Series(range(10))\n s.sample(n=1, axis=1)\n\n # Test weight length compared to correct axis\n with tm.assertRaises(ValueError):\n df.sample(n=1, axis=1, weights=[0.5] * 10)\n\n # Check weights with axis = 1\n easy_weight_list = [0] * 3\n easy_weight_list[2] = 1\n\n df = pd.DataFrame({'col1': range(10, 20),\n 'col2': range(20, 30),\n 'colString': ['a'] * 10})\n sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)\n assert_frame_equal(sample1, df[['colString']])\n\n # Test default axes\n p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],\n minor_axis=[1, 3, 5])\n assert_panel_equal(\n p.sample(n=3, random_state=42), p.sample(n=3, axis=1,\n random_state=42))\n assert_frame_equal(\n df.sample(n=3, random_state=42), df.sample(n=3, axis=0,\n random_state=42))\n\n # Test that function aligns weights with frame\n df = DataFrame(\n {'col1': [5, 6, 7],\n 'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])\n s = Series([1, 0, 0], index=[3, 5, 9])\n assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))\n\n # Weights have index values to be dropped because not in\n # sampled DataFrame\n s2 = Series([0.001, 0, 10000], index=[3, 5, 10])\n assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))\n\n # Weights have empty values to be filed with zeros\n s3 = Series([0.01, 0], index=[3, 5])\n assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))\n\n # No overlap in weight and sampled DataFrame indices\n s4 = Series([1, 0], index=[1, 2])\n with tm.assertRaises(ValueError):\n df.sample(1, weights=s4)\n\n def test_squeeze(self):\n # noop\n for s in [tm.makeFloatSeries(), tm.makeStringSeries(),\n tm.makeObjectSeries()]:\n tm.assert_series_equal(s.squeeze(), s)\n for df in [tm.makeTimeDataFrame()]:\n tm.assert_frame_equal(df.squeeze(), df)\n for p in [tm.makePanel()]:\n tm.assert_panel_equal(p.squeeze(), p)\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n for p4d in [tm.makePanel4D()]:\n tm.assert_panel4d_equal(p4d.squeeze(), p4d)\n\n # squeezing\n df = tm.makeTimeDataFrame().reindex(columns=['A'])\n tm.assert_series_equal(df.squeeze(), df['A'])\n\n p = tm.makePanel().reindex(items=['ItemA'])\n tm.assert_frame_equal(p.squeeze(), p['ItemA'])\n\n p = tm.makePanel().reindex(items=['ItemA'], minor_axis=['A'])\n tm.assert_series_equal(p.squeeze(), p.loc['ItemA', :, 'A'])\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n p4d = tm.makePanel4D().reindex(labels=['label1'])\n tm.assert_panel_equal(p4d.squeeze(), p4d['label1'])\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n p4d = tm.makePanel4D().reindex(labels=['label1'], items=['ItemA'])\n tm.assert_frame_equal(p4d.squeeze(), p4d.loc['label1', 'ItemA'])\n\n # don't fail with 0 length dimensions GH11229 & GH8999\n empty_series = pd.Series([], name='five')\n empty_frame = pd.DataFrame([empty_series])\n empty_panel = pd.Panel({'six': empty_frame})\n\n [tm.assert_series_equal(empty_series, higher_dim.squeeze())\n for higher_dim in [empty_series, empty_frame, empty_panel]]\n\n def test_numpy_squeeze(self):\n s = tm.makeFloatSeries()\n tm.assert_series_equal(np.squeeze(s), s)\n\n df = tm.makeTimeDataFrame().reindex(columns=['A'])\n tm.assert_series_equal(np.squeeze(df), df['A'])\n\n msg = \"the 'axis' parameter is not supported\"\n tm.assertRaisesRegexp(ValueError, msg,\n np.squeeze, s, axis=0)\n\n def test_transpose(self):\n msg = (r\"transpose\\(\\) got multiple values for \"\n r\"keyword argument 'axes'\")\n for s in [tm.makeFloatSeries(), tm.makeStringSeries(),\n tm.makeObjectSeries()]:\n # calls implementation in pandas/core/base.py\n tm.assert_series_equal(s.transpose(), s)\n for df in [tm.makeTimeDataFrame()]:\n tm.assert_frame_equal(df.transpose().transpose(), df)\n for p in [tm.makePanel()]:\n tm.assert_panel_equal(p.transpose(2, 0, 1)\n .transpose(1, 2, 0), p)\n tm.assertRaisesRegexp(TypeError, msg, p.transpose,\n 2, 0, 1, axes=(2, 0, 1))\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n for p4d in [tm.makePanel4D()]:\n tm.assert_panel4d_equal(p4d.transpose(2, 0, 3, 1)\n .transpose(1, 3, 0, 2), p4d)\n tm.assertRaisesRegexp(TypeError, msg, p4d.transpose,\n 2, 0, 3, 1, axes=(2, 0, 3, 1))\n\n def test_numpy_transpose(self):\n msg = \"the 'axes' parameter is not supported\"\n\n s = tm.makeFloatSeries()\n tm.assert_series_equal(\n np.transpose(s), s)\n tm.assertRaisesRegexp(ValueError, msg,\n np.transpose, s, axes=1)\n\n df = tm.makeTimeDataFrame()\n tm.assert_frame_equal(np.transpose(\n np.transpose(df)), df)\n tm.assertRaisesRegexp(ValueError, msg,\n np.transpose, df, axes=1)\n\n p = tm.makePanel()\n tm.assert_panel_equal(np.transpose(\n np.transpose(p, axes=(2, 0, 1)),\n axes=(1, 2, 0)), p)\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n p4d = tm.makePanel4D()\n tm.assert_panel4d_equal(np.transpose(\n np.transpose(p4d, axes=(2, 0, 3, 1)),\n axes=(1, 3, 0, 2)), p4d)\n\n def test_take(self):\n indices = [1, 5, -2, 6, 3, -1]\n for s in [tm.makeFloatSeries(), tm.makeStringSeries(),\n tm.makeObjectSeries()]:\n out = s.take(indices)\n expected = Series(data=s.values.take(indices),\n index=s.index.take(indices))\n tm.assert_series_equal(out, expected)\n for df in [tm.makeTimeDataFrame()]:\n out = df.take(indices)\n expected = DataFrame(data=df.values.take(indices, axis=0),\n index=df.index.take(indices),\n columns=df.columns)\n tm.assert_frame_equal(out, expected)\n\n indices = [-3, 2, 0, 1]\n for p in [tm.makePanel()]:\n out = p.take(indices)\n expected = Panel(data=p.values.take(indices, axis=0),\n items=p.items.take(indices),\n major_axis=p.major_axis,\n minor_axis=p.minor_axis)\n tm.assert_panel_equal(out, expected)\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n for p4d in [tm.makePanel4D()]:\n out = p4d.take(indices)\n expected = Panel4D(data=p4d.values.take(indices, axis=0),\n labels=p4d.labels.take(indices),\n major_axis=p4d.major_axis,\n minor_axis=p4d.minor_axis,\n items=p4d.items)\n tm.assert_panel4d_equal(out, expected)\n\n def test_take_invalid_kwargs(self):\n indices = [-3, 2, 0, 1]\n s = tm.makeFloatSeries()\n df = tm.makeTimeDataFrame()\n p = tm.makePanel()\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n p4d = tm.makePanel4D()\n\n for obj in (s, df, p, p4d):\n msg = r\"take\\(\\) got an unexpected keyword argument 'foo'\"\n tm.assertRaisesRegexp(TypeError, msg, obj.take,\n indices, foo=2)\n\n msg = \"the 'out' parameter is not supported\"\n tm.assertRaisesRegexp(ValueError, msg, obj.take,\n indices, out=indices)\n\n msg = \"the 'mode' parameter is not supported\"\n tm.assertRaisesRegexp(ValueError, msg, obj.take,\n indices, mode='clip')\n\n def test_equals(self):\n s1 = pd.Series([1, 2, 3], index=[0, 2, 1])\n s2 = s1.copy()\n self.assertTrue(s1.equals(s2))\n\n s1[1] = 99\n self.assertFalse(s1.equals(s2))\n\n # NaNs compare as equal\n s1 = pd.Series([1, np.nan, 3, np.nan], index=[0, 2, 1, 3])\n s2 = s1.copy()\n self.assertTrue(s1.equals(s2))\n\n s2[0] = 9.9\n self.assertFalse(s1.equals(s2))\n\n idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])\n s1 = Series([1, 2, np.nan], index=idx)\n s2 = s1.copy()\n self.assertTrue(s1.equals(s2))\n\n # Add object dtype column with nans\n index = np.random.random(10)\n df1 = DataFrame(\n np.random.random(10, ), index=index, columns=['floats'])\n df1['text'] = 'the sky is so blue. we could use more chocolate.'.split(\n )\n df1['start'] = date_range('2000-1-1', periods=10, freq='T')\n df1['end'] = date_range('2000-1-1', periods=10, freq='D')\n df1['diff'] = df1['end'] - df1['start']\n df1['bool'] = (np.arange(10) % 3 == 0)\n df1.loc[::2] = nan\n df2 = df1.copy()\n self.assertTrue(df1['text'].equals(df2['text']))\n self.assertTrue(df1['start'].equals(df2['start']))\n self.assertTrue(df1['end'].equals(df2['end']))\n self.assertTrue(df1['diff'].equals(df2['diff']))\n self.assertTrue(df1['bool'].equals(df2['bool']))\n self.assertTrue(df1.equals(df2))\n self.assertFalse(df1.equals(object))\n\n # different dtype\n different = df1.copy()\n different['floats'] = different['floats'].astype('float32')\n self.assertFalse(df1.equals(different))\n\n # different index\n different_index = -index\n different = df2.set_index(different_index)\n self.assertFalse(df1.equals(different))\n\n # different columns\n different = df2.copy()\n different.columns = df2.columns[::-1]\n self.assertFalse(df1.equals(different))\n\n # DatetimeIndex\n index = pd.date_range('2000-1-1', periods=10, freq='T')\n df1 = df1.set_index(index)\n df2 = df1.copy()\n self.assertTrue(df1.equals(df2))\n\n # MultiIndex\n df3 = df1.set_index(['text'], append=True)\n df2 = df1.set_index(['text'], append=True)\n self.assertTrue(df3.equals(df2))\n\n df2 = df1.set_index(['floats'], append=True)\n self.assertFalse(df3.equals(df2))\n\n # NaN in index\n df3 = df1.set_index(['floats'], append=True)\n df2 = df1.set_index(['floats'], append=True)\n self.assertTrue(df3.equals(df2))\n\n # GH 8437\n a = pd.Series([False, np.nan])\n b = pd.Series([False, np.nan])\n c = pd.Series(index=range(2))\n d = pd.Series(index=range(2))\n e = pd.Series(index=range(2))\n f = pd.Series(index=range(2))\n c[:-1] = d[:-1] = e[0] = f[0] = False\n self.assertTrue(a.equals(a))\n self.assertTrue(a.equals(b))\n self.assertTrue(a.equals(c))\n self.assertTrue(a.equals(d))\n self.assertFalse(a.equals(e))\n self.assertTrue(e.equals(f))\n\n def test_describe_raises(self):\n with tm.assertRaises(NotImplementedError):\n tm.makePanel().describe()\n\n def test_pipe(self):\n df = DataFrame({'A': [1, 2, 3]})\n f = lambda x, y: x ** y\n result = df.pipe(f, 2)\n expected = DataFrame({'A': [1, 4, 9]})\n self.assert_frame_equal(result, expected)\n\n result = df.A.pipe(f, 2)\n self.assert_series_equal(result, expected.A)\n\n def test_pipe_tuple(self):\n df = DataFrame({'A': [1, 2, 3]})\n f = lambda x, y: y\n result = df.pipe((f, 'y'), 0)\n self.assert_frame_equal(result, df)\n\n result = df.A.pipe((f, 'y'), 0)\n self.assert_series_equal(result, df.A)\n\n def test_pipe_tuple_error(self):\n df = DataFrame({\"A\": [1, 2, 3]})\n f = lambda x, y: y\n with tm.assertRaises(ValueError):\n df.pipe((f, 'y'), x=1, y=0)\n\n with tm.assertRaises(ValueError):\n df.A.pipe((f, 'y'), x=1, y=0)\n\n def test_pipe_panel(self):\n wp = Panel({'r1': DataFrame({\"A\": [1, 2, 3]})})\n f = lambda x, y: x + y\n result = wp.pipe(f, 2)\n expected = wp + 2\n assert_panel_equal(result, expected)\n\n result = wp.pipe((f, 'y'), x=1)\n expected = wp + 1\n assert_panel_equal(result, expected)\n\n with tm.assertRaises(ValueError):\n result = wp.pipe((f, 'y'), x=1, y=1)\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n", "import numpy as np\nimport pandas as pd\n\nfrom pandas import DataFrame, Series, Index\nfrom pandas.tools.hashing import hash_array, hash_pandas_object\nimport pandas.util.testing as tm\n\n\nclass TestHashing(tm.TestCase):\n\n _multiprocess_can_split_ = True\n\n def setUp(self):\n self.df = DataFrame(\n {'i32': np.array([1, 2, 3] * 3, dtype='int32'),\n 'f32': np.array([None, 2.5, 3.5] * 3, dtype='float32'),\n 'cat': Series(['a', 'b', 'c'] * 3).astype('category'),\n 'obj': Series(['d', 'e', 'f'] * 3),\n 'bool': np.array([True, False, True] * 3),\n 'dt': Series(pd.date_range('20130101', periods=9)),\n 'dt_tz': Series(pd.date_range('20130101', periods=9,\n tz='US/Eastern')),\n 'td': Series(pd.timedelta_range('2000', periods=9))})\n\n def test_consistency(self):\n # check that our hash doesn't change because of a mistake\n # in the actual code; this is the ground truth\n result = hash_pandas_object(Index(['foo', 'bar', 'baz']))\n expected = Series(np.array([3600424527151052760, 1374399572096150070,\n 477881037637427054], dtype='uint64'),\n index=['foo', 'bar', 'baz'])\n tm.assert_series_equal(result, expected)\n\n def test_hash_array(self):\n for name, s in self.df.iteritems():\n a = s.values\n tm.assert_numpy_array_equal(hash_array(a), hash_array(a))\n\n def check_equal(self, obj, **kwargs):\n a = hash_pandas_object(obj, **kwargs)\n b = hash_pandas_object(obj, **kwargs)\n tm.assert_series_equal(a, b)\n\n kwargs.pop('index', None)\n a = hash_pandas_object(obj, **kwargs)\n b = hash_pandas_object(obj, **kwargs)\n tm.assert_series_equal(a, b)\n\n def check_not_equal_with_index(self, obj):\n\n # check that we are not hashing the same if\n # we include the index\n if not isinstance(obj, Index):\n a = hash_pandas_object(obj, index=True)\n b = hash_pandas_object(obj, index=False)\n self.assertFalse((a == b).all())\n\n def test_hash_pandas_object(self):\n\n for obj in [Series([1, 2, 3]),\n Series([1.0, 1.5, 3.2]),\n Series([1.0, 1.5, np.nan]),\n Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),\n Series(['a', 'b', 'c']),\n Series(['a', np.nan, 'c']),\n Series(['a', None, 'c']),\n Series([True, False, True]),\n Index([1, 2, 3]),\n Index([True, False, True]),\n DataFrame({'x': ['a', 'b', 'c'], 'y': [1, 2, 3]}),\n tm.makeMissingDataframe(),\n tm.makeMixedDataFrame(),\n tm.makeTimeDataFrame(),\n tm.makeTimeSeries(),\n tm.makeTimedeltaIndex()]:\n self.check_equal(obj)\n self.check_not_equal_with_index(obj)\n\n def test_hash_pandas_object2(self):\n for name, s in self.df.iteritems():\n self.check_equal(s)\n self.check_not_equal_with_index(s)\n\n def test_hash_pandas_empty_object(self):\n for obj in [Series([], dtype='float64'),\n Series([], dtype='object'),\n Index([])]:\n self.check_equal(obj)\n\n # these are by-definition the same with\n # or w/o the index as the data is empty\n\n def test_categorical_consistency(self):\n # GH15143\n # Check that categoricals hash consistent with their values, not codes\n # This should work for categoricals of any dtype\n for s1 in [Series(['a', 'b', 'c', 'd']),\n Series([1000, 2000, 3000, 4000]),\n Series(pd.date_range(0, periods=4))]:\n s2 = s1.astype('category').cat.set_categories(s1)\n s3 = s2.cat.set_categories(list(reversed(s1)))\n for categorize in [True, False]:\n # These should all hash identically\n h1 = hash_pandas_object(s1, categorize=categorize)\n h2 = hash_pandas_object(s2, categorize=categorize)\n h3 = hash_pandas_object(s3, categorize=categorize)\n tm.assert_series_equal(h1, h2)\n tm.assert_series_equal(h1, h3)\n\n def test_errors(self):\n\n for obj in [pd.Timestamp('20130101'), tm.makePanel()]:\n def f():\n hash_pandas_object(f)\n\n self.assertRaises(TypeError, f)\n\n def test_hash_keys(self):\n # using different hash keys, should have different hashes\n # for the same data\n\n # this only matters for object dtypes\n obj = Series(list('abc'))\n a = hash_pandas_object(obj, hash_key='9876543210123456')\n b = hash_pandas_object(obj, hash_key='9876543210123465')\n self.assertTrue((a != b).all())\n\n def test_invalid_key(self):\n # this only matters for object dtypes\n def f():\n hash_pandas_object(Series(list('abc')), hash_key='foo')\n self.assertRaises(ValueError, f)\n\n def test_unsupported_objects(self):\n\n # mixed objects are not supported\n obj = Series(['1', 2, 3])\n\n def f():\n hash_pandas_object(obj)\n self.assertRaises(TypeError, f)\n\n # MultiIndex are represented as tuples\n obj = Series([1, 2, 3], index=pd.MultiIndex.from_tuples(\n [('a', 1), ('a', 2), ('b', 1)]))\n\n def f():\n hash_pandas_object(obj)\n self.assertRaises(TypeError, f)\n\n def test_alread_encoded(self):\n # if already encoded then ok\n\n obj = Series(list('abc')).str.encode('utf8')\n self.check_equal(obj)\n\n def test_alternate_encoding(self):\n\n obj = Series(list('abc'))\n self.check_equal(obj, encoding='ascii')\n\n def test_same_len_hash_collisions(self):\n\n for l in range(8):\n length = 2**(l + 8) + 1\n s = tm.rands_array(length, 2)\n result = hash_array(s, 'utf8')\n self.assertFalse(result[0] == result[1])\n\n for l in range(8):\n length = 2**(l + 8)\n s = tm.rands_array(length, 2)\n result = hash_array(s, 'utf8')\n self.assertFalse(result[0] == result[1])\n\n def test_hash_collisions(self):\n\n # hash collisions are bad\n # https://github.com/pandas-dev/pandas/issues/14711#issuecomment-264885726\n L = ['Ingrid-9Z9fKIZmkO7i7Cn51Li34pJm44fgX6DYGBNj3VPlOH50m7HnBlPxfIwFMrcNJNMP6PSgLmwWnInciMWrCSAlLEvt7JkJl4IxiMrVbXSa8ZQoVaq5xoQPjltuJEfwdNlO6jo8qRRHvD8sBEBMQASrRa6TsdaPTPCBo3nwIBpE7YzzmyH0vMBhjQZLx1aCT7faSEx7PgFxQhHdKFWROcysamgy9iVj8DO2Fmwg1NNl93rIAqC3mdqfrCxrzfvIY8aJdzin2cHVzy3QUJxZgHvtUtOLxoqnUHsYbNTeq0xcLXpTZEZCxD4PGubIuCNf32c33M7HFsnjWSEjE2yVdWKhmSVodyF8hFYVmhYnMCztQnJrt3O8ZvVRXd5IKwlLexiSp4h888w7SzAIcKgc3g5XQJf6MlSMftDXm9lIsE1mJNiJEv6uY6pgvC3fUPhatlR5JPpVAHNSbSEE73MBzJrhCAbOLXQumyOXigZuPoME7QgJcBalliQol7YZ9', # noqa\n 'Tim-b9MddTxOWW2AT1Py6vtVbZwGAmYCjbp89p8mxsiFoVX4FyDOF3wFiAkyQTUgwg9sVqVYOZo09Dh1AzhFHbgij52ylF0SEwgzjzHH8TGY8Lypart4p4onnDoDvVMBa0kdthVGKl6K0BDVGzyOXPXKpmnMF1H6rJzqHJ0HywfwS4XYpVwlAkoeNsiicHkJUFdUAhG229INzvIAiJuAHeJDUoyO4DCBqtoZ5TDend6TK7Y914yHlfH3g1WZu5LksKv68VQHJriWFYusW5e6ZZ6dKaMjTwEGuRgdT66iU5nqWTHRH8WSzpXoCFwGcTOwyuqPSe0fTe21DVtJn1FKj9F9nEnR9xOvJUO7E0piCIF4Ad9yAIDY4DBimpsTfKXCu1vdHpKYerzbndfuFe5AhfMduLYZJi5iAw8qKSwR5h86ttXV0Mc0QmXz8dsRvDgxjXSmupPxBggdlqUlC828hXiTPD7am0yETBV0F3bEtvPiNJfremszcV8NcqAoARMe'] # noqa\n\n # these should be different!\n result1 = hash_array(np.asarray(L[0:1], dtype=object), 'utf8')\n expected1 = np.array([14963968704024874985], dtype=np.uint64)\n self.assert_numpy_array_equal(result1, expected1)\n\n result2 = hash_array(np.asarray(L[1:2], dtype=object), 'utf8')\n expected2 = np.array([16428432627716348016], dtype=np.uint64)\n self.assert_numpy_array_equal(result2, expected2)\n\n result = hash_array(np.asarray(L, dtype=object), 'utf8')\n self.assert_numpy_array_equal(\n result, np.concatenate([expected1, expected2], axis=0))\n", "# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\nfrom datetime import datetime\n\nfrom numpy import random\nimport numpy as np\n\nfrom pandas.compat import lrange, lzip, u\nfrom pandas import (compat, DataFrame, Series, Index, MultiIndex,\n date_range, isnull)\nimport pandas as pd\n\nfrom pandas.util.testing import (assert_series_equal,\n assert_frame_equal,\n assertRaisesRegexp)\n\nfrom pandas.core.common import PerformanceWarning\nimport pandas.util.testing as tm\n\nfrom pandas.tests.frame.common import TestData\n\n\nclass TestDataFrameSelectReindex(tm.TestCase, TestData):\n # These are specific reindex-based tests; other indexing tests should go in\n # test_indexing\n\n _multiprocess_can_split_ = True\n\n def test_drop_names(self):\n df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],\n index=['a', 'b', 'c'],\n columns=['d', 'e', 'f'])\n df.index.name, df.columns.name = 'first', 'second'\n df_dropped_b = df.drop('b')\n df_dropped_e = df.drop('e', axis=1)\n df_inplace_b, df_inplace_e = df.copy(), df.copy()\n df_inplace_b.drop('b', inplace=True)\n df_inplace_e.drop('e', axis=1, inplace=True)\n for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):\n self.assertEqual(obj.index.name, 'first')\n self.assertEqual(obj.columns.name, 'second')\n self.assertEqual(list(df.columns), ['d', 'e', 'f'])\n\n self.assertRaises(ValueError, df.drop, ['g'])\n self.assertRaises(ValueError, df.drop, ['g'], 1)\n\n # errors = 'ignore'\n dropped = df.drop(['g'], errors='ignore')\n expected = Index(['a', 'b', 'c'], name='first')\n self.assert_index_equal(dropped.index, expected)\n\n dropped = df.drop(['b', 'g'], errors='ignore')\n expected = Index(['a', 'c'], name='first')\n self.assert_index_equal(dropped.index, expected)\n\n dropped = df.drop(['g'], axis=1, errors='ignore')\n expected = Index(['d', 'e', 'f'], name='second')\n self.assert_index_equal(dropped.columns, expected)\n\n dropped = df.drop(['d', 'g'], axis=1, errors='ignore')\n expected = Index(['e', 'f'], name='second')\n self.assert_index_equal(dropped.columns, expected)\n\n def test_drop_col_still_multiindex(self):\n arrays = [['a', 'b', 'c', 'top'],\n ['', '', '', 'OD'],\n ['', '', '', 'wx']]\n\n tuples = sorted(zip(*arrays))\n index = MultiIndex.from_tuples(tuples)\n\n df = DataFrame(np.random.randn(3, 4), columns=index)\n del df[('a', '', '')]\n assert(isinstance(df.columns, MultiIndex))\n\n def test_drop(self):\n simple = DataFrame({\"A\": [1, 2, 3, 4], \"B\": [0, 1, 2, 3]})\n assert_frame_equal(simple.drop(\"A\", axis=1), simple[['B']])\n assert_frame_equal(simple.drop([\"A\", \"B\"], axis='columns'),\n simple[[]])\n assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])\n assert_frame_equal(simple.drop(\n [0, 3], axis='index'), simple.loc[[1, 2], :])\n\n self.assertRaises(ValueError, simple.drop, 5)\n self.assertRaises(ValueError, simple.drop, 'C', 1)\n self.assertRaises(ValueError, simple.drop, [1, 5])\n self.assertRaises(ValueError, simple.drop, ['A', 'C'], 1)\n\n # errors = 'ignore'\n assert_frame_equal(simple.drop(5, errors='ignore'), simple)\n assert_frame_equal(simple.drop([0, 5], errors='ignore'),\n simple.loc[[1, 2, 3], :])\n assert_frame_equal(simple.drop('C', axis=1, errors='ignore'), simple)\n assert_frame_equal(simple.drop(['A', 'C'], axis=1, errors='ignore'),\n simple[['B']])\n\n # non-unique - wheee!\n nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')),\n columns=['a', 'a', 'b'])\n assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])\n assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])\n\n nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))\n nu_df.columns = list('abc')\n assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.loc[[\"Y\"], :])\n assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.loc[[], :])\n\n # inplace cache issue\n # GH 5628\n df = pd.DataFrame(np.random.randn(10, 3), columns=list('abc'))\n expected = df[~(df.b > 0)]\n df.drop(labels=df[df.b > 0].index, inplace=True)\n assert_frame_equal(df, expected)\n\n def test_drop_multiindex_not_lexsorted(self):\n # GH 11640\n\n # define the lexsorted version\n lexsorted_mi = MultiIndex.from_tuples(\n [('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])\n lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)\n self.assertTrue(lexsorted_df.columns.is_lexsorted())\n\n # define the non-lexsorted version\n not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],\n data=[[1, 'b1', 'c1', 3],\n [1, 'b2', 'c2', 4]])\n not_lexsorted_df = not_lexsorted_df.pivot_table(\n index='a', columns=['b', 'c'], values='d')\n not_lexsorted_df = not_lexsorted_df.reset_index()\n self.assertFalse(not_lexsorted_df.columns.is_lexsorted())\n\n # compare the results\n tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)\n\n expected = lexsorted_df.drop('a', axis=1)\n with tm.assert_produces_warning(PerformanceWarning):\n result = not_lexsorted_df.drop('a', axis=1)\n\n tm.assert_frame_equal(result, expected)\n\n def test_merge_join_different_levels(self):\n # GH 9455\n\n # first dataframe\n df1 = DataFrame(columns=['a', 'b'], data=[[1, 11], [0, 22]])\n\n # second dataframe\n columns = MultiIndex.from_tuples([('a', ''), ('c', 'c1')])\n df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]])\n\n # merge\n columns = ['a', 'b', ('c', 'c1')]\n expected = DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]])\n with tm.assert_produces_warning(UserWarning):\n result = pd.merge(df1, df2, on='a')\n tm.assert_frame_equal(result, expected)\n\n # join, see discussion in GH 12219\n columns = ['a', 'b', ('a', ''), ('c', 'c1')]\n expected = DataFrame(columns=columns,\n data=[[1, 11, 0, 44], [0, 22, 1, 33]])\n with tm.assert_produces_warning(UserWarning):\n result = df1.join(df2, on='a')\n tm.assert_frame_equal(result, expected)\n\n def test_reindex(self):\n newFrame = self.frame.reindex(self.ts1.index)\n\n for col in newFrame.columns:\n for idx, val in compat.iteritems(newFrame[col]):\n if idx in self.frame.index:\n if np.isnan(val):\n self.assertTrue(np.isnan(self.frame[col][idx]))\n else:\n self.assertEqual(val, self.frame[col][idx])\n else:\n self.assertTrue(np.isnan(val))\n\n for col, series in compat.iteritems(newFrame):\n self.assertTrue(tm.equalContents(series.index, newFrame.index))\n emptyFrame = self.frame.reindex(Index([]))\n self.assertEqual(len(emptyFrame.index), 0)\n\n # Cython code should be unit-tested directly\n nonContigFrame = self.frame.reindex(self.ts1.index[::2])\n\n for col in nonContigFrame.columns:\n for idx, val in compat.iteritems(nonContigFrame[col]):\n if idx in self.frame.index:\n if np.isnan(val):\n self.assertTrue(np.isnan(self.frame[col][idx]))\n else:\n self.assertEqual(val, self.frame[col][idx])\n else:\n self.assertTrue(np.isnan(val))\n\n for col, series in compat.iteritems(nonContigFrame):\n self.assertTrue(tm.equalContents(series.index,\n nonContigFrame.index))\n\n # corner cases\n\n # Same index, copies values but not index if copy=False\n newFrame = self.frame.reindex(self.frame.index, copy=False)\n self.assertIs(newFrame.index, self.frame.index)\n\n # length zero\n newFrame = self.frame.reindex([])\n self.assertTrue(newFrame.empty)\n self.assertEqual(len(newFrame.columns), len(self.frame.columns))\n\n # length zero with columns reindexed with non-empty index\n newFrame = self.frame.reindex([])\n newFrame = newFrame.reindex(self.frame.index)\n self.assertEqual(len(newFrame.index), len(self.frame.index))\n self.assertEqual(len(newFrame.columns), len(self.frame.columns))\n\n # pass non-Index\n newFrame = self.frame.reindex(list(self.ts1.index))\n self.assert_index_equal(newFrame.index, self.ts1.index)\n\n # copy with no axes\n result = self.frame.reindex()\n assert_frame_equal(result, self.frame)\n self.assertFalse(result is self.frame)\n\n def test_reindex_nan(self):\n df = pd.DataFrame([[1, 2], [3, 5], [7, 11], [9, 23]],\n index=[2, np.nan, 1, 5],\n columns=['joe', 'jim'])\n\n i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1]\n assert_frame_equal(df.reindex(i), df.iloc[j])\n\n df.index = df.index.astype('object')\n assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False)\n\n # GH10388\n df = pd.DataFrame({'other': ['a', 'b', np.nan, 'c'],\n 'date': ['2015-03-22', np.nan,\n '2012-01-08', np.nan],\n 'amount': [2, 3, 4, 5]})\n\n df['date'] = pd.to_datetime(df.date)\n df['delta'] = (pd.to_datetime('2015-06-18') - df['date']).shift(1)\n\n left = df.set_index(['delta', 'other', 'date']).reset_index()\n right = df.reindex(columns=['delta', 'other', 'date', 'amount'])\n assert_frame_equal(left, right)\n\n def test_reindex_name_remains(self):\n s = Series(random.rand(10))\n df = DataFrame(s, index=np.arange(len(s)))\n i = Series(np.arange(10), name='iname')\n\n df = df.reindex(i)\n self.assertEqual(df.index.name, 'iname')\n\n df = df.reindex(Index(np.arange(10), name='tmpname'))\n self.assertEqual(df.index.name, 'tmpname')\n\n s = Series(random.rand(10))\n df = DataFrame(s.T, index=np.arange(len(s)))\n i = Series(np.arange(10), name='iname')\n df = df.reindex(columns=i)\n self.assertEqual(df.columns.name, 'iname')\n\n def test_reindex_int(self):\n smaller = self.intframe.reindex(self.intframe.index[::2])\n\n self.assertEqual(smaller['A'].dtype, np.int64)\n\n bigger = smaller.reindex(self.intframe.index)\n self.assertEqual(bigger['A'].dtype, np.float64)\n\n smaller = self.intframe.reindex(columns=['A', 'B'])\n self.assertEqual(smaller['A'].dtype, np.int64)\n\n def test_reindex_like(self):\n other = self.frame.reindex(index=self.frame.index[:10],\n columns=['C', 'B'])\n\n assert_frame_equal(other, self.frame.reindex_like(other))\n\n def test_reindex_columns(self):\n newFrame = self.frame.reindex(columns=['A', 'B', 'E'])\n\n assert_series_equal(newFrame['B'], self.frame['B'])\n self.assertTrue(np.isnan(newFrame['E']).all())\n self.assertNotIn('C', newFrame)\n\n # length zero\n newFrame = self.frame.reindex(columns=[])\n self.assertTrue(newFrame.empty)\n\n def test_reindex_columns_method(self):\n\n # GH 14992, reindexing over columns ignored method\n df = DataFrame(data=[[11, 12, 13], [21, 22, 23], [31, 32, 33]],\n index=[1, 2, 4],\n columns=[1, 2, 4],\n dtype=float)\n\n # default method\n result = df.reindex(columns=range(6))\n expected = DataFrame(data=[[np.nan, 11, 12, np.nan, 13, np.nan],\n [np.nan, 21, 22, np.nan, 23, np.nan],\n [np.nan, 31, 32, np.nan, 33, np.nan]],\n index=[1, 2, 4],\n columns=range(6),\n dtype=float)\n assert_frame_equal(result, expected)\n\n # method='ffill'\n result = df.reindex(columns=range(6), method='ffill')\n expected = DataFrame(data=[[np.nan, 11, 12, 12, 13, 13],\n [np.nan, 21, 22, 22, 23, 23],\n [np.nan, 31, 32, 32, 33, 33]],\n index=[1, 2, 4],\n columns=range(6),\n dtype=float)\n assert_frame_equal(result, expected)\n\n # method='bfill'\n result = df.reindex(columns=range(6), method='bfill')\n expected = DataFrame(data=[[11, 11, 12, 13, 13, np.nan],\n [21, 21, 22, 23, 23, np.nan],\n [31, 31, 32, 33, 33, np.nan]],\n index=[1, 2, 4],\n columns=range(6),\n dtype=float)\n assert_frame_equal(result, expected)\n\n def test_reindex_axes(self):\n # GH 3317, reindexing by both axes loses freq of the index\n df = DataFrame(np.ones((3, 3)),\n index=[datetime(2012, 1, 1),\n datetime(2012, 1, 2),\n datetime(2012, 1, 3)],\n columns=['a', 'b', 'c'])\n time_freq = date_range('2012-01-01', '2012-01-03', freq='d')\n some_cols = ['a', 'b']\n\n index_freq = df.reindex(index=time_freq).index.freq\n both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq\n seq_freq = df.reindex(index=time_freq).reindex(\n columns=some_cols).index.freq\n self.assertEqual(index_freq, both_freq)\n self.assertEqual(index_freq, seq_freq)\n\n def test_reindex_fill_value(self):\n df = DataFrame(np.random.randn(10, 4))\n\n # axis=0\n result = df.reindex(lrange(15))\n self.assertTrue(np.isnan(result.values[-5:]).all())\n\n result = df.reindex(lrange(15), fill_value=0)\n expected = df.reindex(lrange(15)).fillna(0)\n assert_frame_equal(result, expected)\n\n # axis=1\n result = df.reindex(columns=lrange(5), fill_value=0.)\n expected = df.copy()\n expected[4] = 0.\n assert_frame_equal(result, expected)\n\n result = df.reindex(columns=lrange(5), fill_value=0)\n expected = df.copy()\n expected[4] = 0\n assert_frame_equal(result, expected)\n\n result = df.reindex(columns=lrange(5), fill_value='foo')\n expected = df.copy()\n expected[4] = 'foo'\n assert_frame_equal(result, expected)\n\n # reindex_axis\n result = df.reindex_axis(lrange(15), fill_value=0., axis=0)\n expected = df.reindex(lrange(15)).fillna(0)\n assert_frame_equal(result, expected)\n\n result = df.reindex_axis(lrange(5), fill_value=0., axis=1)\n expected = df.reindex(columns=lrange(5)).fillna(0)\n assert_frame_equal(result, expected)\n\n # other dtypes\n df['foo'] = 'foo'\n result = df.reindex(lrange(15), fill_value=0)\n expected = df.reindex(lrange(15)).fillna(0)\n assert_frame_equal(result, expected)\n\n def test_reindex_dups(self):\n\n # GH4746, reindex on duplicate index error messages\n arr = np.random.randn(10)\n df = DataFrame(arr, index=[1, 2, 3, 4, 5, 1, 2, 3, 4, 5])\n\n # set index is ok\n result = df.copy()\n result.index = list(range(len(df)))\n expected = DataFrame(arr, index=list(range(len(df))))\n assert_frame_equal(result, expected)\n\n # reindex fails\n self.assertRaises(ValueError, df.reindex, index=list(range(len(df))))\n\n def test_align(self):\n af, bf = self.frame.align(self.frame)\n self.assertIsNot(af._data, self.frame._data)\n\n af, bf = self.frame.align(self.frame, copy=False)\n self.assertIs(af._data, self.frame._data)\n\n # axis = 0\n other = self.frame.iloc[:-5, :3]\n af, bf = self.frame.align(other, axis=0, fill_value=-1)\n self.assert_index_equal(bf.columns, other.columns)\n # test fill value\n join_idx = self.frame.index.join(other.index)\n diff_a = self.frame.index.difference(join_idx)\n diff_b = other.index.difference(join_idx)\n diff_a_vals = af.reindex(diff_a).values\n diff_b_vals = bf.reindex(diff_b).values\n self.assertTrue((diff_a_vals == -1).all())\n\n af, bf = self.frame.align(other, join='right', axis=0)\n self.assert_index_equal(bf.columns, other.columns)\n self.assert_index_equal(bf.index, other.index)\n self.assert_index_equal(af.index, other.index)\n\n # axis = 1\n other = self.frame.iloc[:-5, :3].copy()\n af, bf = self.frame.align(other, axis=1)\n self.assert_index_equal(bf.columns, self.frame.columns)\n self.assert_index_equal(bf.index, other.index)\n\n # test fill value\n join_idx = self.frame.index.join(other.index)\n diff_a = self.frame.index.difference(join_idx)\n diff_b = other.index.difference(join_idx)\n diff_a_vals = af.reindex(diff_a).values\n\n # TODO(wesm): unused?\n diff_b_vals = bf.reindex(diff_b).values # noqa\n\n self.assertTrue((diff_a_vals == -1).all())\n\n af, bf = self.frame.align(other, join='inner', axis=1)\n self.assert_index_equal(bf.columns, other.columns)\n\n af, bf = self.frame.align(other, join='inner', axis=1, method='pad')\n self.assert_index_equal(bf.columns, other.columns)\n\n # test other non-float types\n af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')\n self.assert_index_equal(bf.columns, other.columns)\n\n af, bf = self.mixed_frame.align(self.mixed_frame,\n join='inner', axis=1, method='pad')\n self.assert_index_equal(bf.columns, self.mixed_frame.columns)\n\n af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1,\n method=None, fill_value=None)\n self.assert_index_equal(bf.index, Index([]))\n\n af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1,\n method=None, fill_value=0)\n self.assert_index_equal(bf.index, Index([]))\n\n # mixed floats/ints\n af, bf = self.mixed_float.align(other.iloc[:, 0], join='inner', axis=1,\n method=None, fill_value=0)\n self.assert_index_equal(bf.index, Index([]))\n\n af, bf = self.mixed_int.align(other.iloc[:, 0], join='inner', axis=1,\n method=None, fill_value=0)\n self.assert_index_equal(bf.index, Index([]))\n\n # try to align dataframe to series along bad axis\n self.assertRaises(ValueError, self.frame.align, af.iloc[0, :3],\n join='inner', axis=2)\n\n # align dataframe to series with broadcast or not\n idx = self.frame.index\n s = Series(range(len(idx)), index=idx)\n\n left, right = self.frame.align(s, axis=0)\n tm.assert_index_equal(left.index, self.frame.index)\n tm.assert_index_equal(right.index, self.frame.index)\n self.assertTrue(isinstance(right, Series))\n\n left, right = self.frame.align(s, broadcast_axis=1)\n tm.assert_index_equal(left.index, self.frame.index)\n expected = {}\n for c in self.frame.columns:\n expected[c] = s\n expected = DataFrame(expected, index=self.frame.index,\n columns=self.frame.columns)\n assert_frame_equal(right, expected)\n\n # GH 9558\n df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})\n result = df[df['a'] == 2]\n expected = DataFrame([[2, 5]], index=[1], columns=['a', 'b'])\n assert_frame_equal(result, expected)\n\n result = df.where(df['a'] == 2, 0)\n expected = DataFrame({'a': [0, 2, 0], 'b': [0, 5, 0]})\n assert_frame_equal(result, expected)\n\n def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):\n aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit,\n fill_axis=fill_axis)\n\n join_index, join_columns = None, None\n\n ea, eb = a, b\n if axis is None or axis == 0:\n join_index = a.index.join(b.index, how=how)\n ea = ea.reindex(index=join_index)\n eb = eb.reindex(index=join_index)\n\n if axis is None or axis == 1:\n join_columns = a.columns.join(b.columns, how=how)\n ea = ea.reindex(columns=join_columns)\n eb = eb.reindex(columns=join_columns)\n\n ea = ea.fillna(axis=fill_axis, method=method, limit=limit)\n eb = eb.fillna(axis=fill_axis, method=method, limit=limit)\n\n assert_frame_equal(aa, ea)\n assert_frame_equal(ab, eb)\n\n def test_align_fill_method_inner(self):\n for meth in ['pad', 'bfill']:\n for ax in [0, 1, None]:\n for fax in [0, 1]:\n self._check_align_fill('inner', meth, ax, fax)\n\n def test_align_fill_method_outer(self):\n for meth in ['pad', 'bfill']:\n for ax in [0, 1, None]:\n for fax in [0, 1]:\n self._check_align_fill('outer', meth, ax, fax)\n\n def test_align_fill_method_left(self):\n for meth in ['pad', 'bfill']:\n for ax in [0, 1, None]:\n for fax in [0, 1]:\n self._check_align_fill('left', meth, ax, fax)\n\n def test_align_fill_method_right(self):\n for meth in ['pad', 'bfill']:\n for ax in [0, 1, None]:\n for fax in [0, 1]:\n self._check_align_fill('right', meth, ax, fax)\n\n def _check_align_fill(self, kind, meth, ax, fax):\n left = self.frame.iloc[0:4, :10]\n right = self.frame.iloc[2:, 6:]\n empty = self.frame.iloc[:0, :0]\n\n self._check_align(left, right, axis=ax, fill_axis=fax,\n how=kind, method=meth)\n self._check_align(left, right, axis=ax, fill_axis=fax,\n how=kind, method=meth, limit=1)\n\n # empty left\n self._check_align(empty, right, axis=ax, fill_axis=fax,\n how=kind, method=meth)\n self._check_align(empty, right, axis=ax, fill_axis=fax,\n how=kind, method=meth, limit=1)\n\n # empty right\n self._check_align(left, empty, axis=ax, fill_axis=fax,\n how=kind, method=meth)\n self._check_align(left, empty, axis=ax, fill_axis=fax,\n how=kind, method=meth, limit=1)\n\n # both empty\n self._check_align(empty, empty, axis=ax, fill_axis=fax,\n how=kind, method=meth)\n self._check_align(empty, empty, axis=ax, fill_axis=fax,\n how=kind, method=meth, limit=1)\n\n def test_align_int_fill_bug(self):\n # GH #910\n X = np.arange(10 * 10, dtype='float64').reshape(10, 10)\n Y = np.ones((10, 1), dtype=int)\n\n df1 = DataFrame(X)\n df1['0.X'] = Y.squeeze()\n\n df2 = df1.astype(float)\n\n result = df1 - df1.mean()\n expected = df2 - df2.mean()\n assert_frame_equal(result, expected)\n\n def test_align_multiindex(self):\n # GH 10665\n # same test cases as test_align_multiindex in test_series.py\n\n midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],\n names=('a', 'b', 'c'))\n idx = pd.Index(range(2), name='b')\n df1 = pd.DataFrame(np.arange(12, dtype='int64'), index=midx)\n df2 = pd.DataFrame(np.arange(2, dtype='int64'), index=idx)\n\n # these must be the same results (but flipped)\n res1l, res1r = df1.align(df2, join='left')\n res2l, res2r = df2.align(df1, join='right')\n\n expl = df1\n assert_frame_equal(expl, res1l)\n assert_frame_equal(expl, res2r)\n expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)\n assert_frame_equal(expr, res1r)\n assert_frame_equal(expr, res2l)\n\n res1l, res1r = df1.align(df2, join='right')\n res2l, res2r = df2.align(df1, join='left')\n\n exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],\n names=('a', 'b', 'c'))\n expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)\n assert_frame_equal(expl, res1l)\n assert_frame_equal(expl, res2r)\n expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx)\n assert_frame_equal(expr, res1r)\n assert_frame_equal(expr, res2l)\n\n def test_align_series_combinations(self):\n df = pd.DataFrame({'a': [1, 3, 5],\n 'b': [1, 3, 5]}, index=list('ACE'))\n s = pd.Series([1, 2, 4], index=list('ABD'), name='x')\n\n # frame + series\n res1, res2 = df.align(s, axis=0)\n exp1 = pd.DataFrame({'a': [1, np.nan, 3, np.nan, 5],\n 'b': [1, np.nan, 3, np.nan, 5]},\n index=list('ABCDE'))\n exp2 = pd.Series([1, 2, np.nan, 4, np.nan],\n index=list('ABCDE'), name='x')\n\n tm.assert_frame_equal(res1, exp1)\n tm.assert_series_equal(res2, exp2)\n\n # series + frame\n res1, res2 = s.align(df)\n tm.assert_series_equal(res1, exp2)\n tm.assert_frame_equal(res2, exp1)\n\n def test_filter(self):\n # items\n filtered = self.frame.filter(['A', 'B', 'E'])\n self.assertEqual(len(filtered.columns), 2)\n self.assertNotIn('E', filtered)\n\n filtered = self.frame.filter(['A', 'B', 'E'], axis='columns')\n self.assertEqual(len(filtered.columns), 2)\n self.assertNotIn('E', filtered)\n\n # other axis\n idx = self.frame.index[0:4]\n filtered = self.frame.filter(idx, axis='index')\n expected = self.frame.reindex(index=idx)\n assert_frame_equal(filtered, expected)\n\n # like\n fcopy = self.frame.copy()\n fcopy['AA'] = 1\n\n filtered = fcopy.filter(like='A')\n self.assertEqual(len(filtered.columns), 2)\n self.assertIn('AA', filtered)\n\n # like with ints in column names\n df = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '_A', '_B'])\n filtered = df.filter(like='_')\n self.assertEqual(len(filtered.columns), 2)\n\n # regex with ints in column names\n # from PR #10384\n df = DataFrame(0., index=[0, 1, 2], columns=['A1', 1, 'B', 2, 'C'])\n expected = DataFrame(\n 0., index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object))\n filtered = df.filter(regex='^[0-9]+$')\n assert_frame_equal(filtered, expected)\n\n expected = DataFrame(0., index=[0, 1, 2], columns=[0, '0', 1, '1'])\n # shouldn't remove anything\n filtered = expected.filter(regex='^[0-9]+$')\n assert_frame_equal(filtered, expected)\n\n # pass in None\n with assertRaisesRegexp(TypeError, 'Must pass'):\n self.frame.filter()\n with assertRaisesRegexp(TypeError, 'Must pass'):\n self.frame.filter(items=None)\n with assertRaisesRegexp(TypeError, 'Must pass'):\n self.frame.filter(axis=1)\n\n # test mutually exclusive arguments\n with assertRaisesRegexp(TypeError, 'mutually exclusive'):\n self.frame.filter(items=['one', 'three'], regex='e$', like='bbi')\n with assertRaisesRegexp(TypeError, 'mutually exclusive'):\n self.frame.filter(items=['one', 'three'], regex='e$', axis=1)\n with assertRaisesRegexp(TypeError, 'mutually exclusive'):\n self.frame.filter(items=['one', 'three'], regex='e$')\n with assertRaisesRegexp(TypeError, 'mutually exclusive'):\n self.frame.filter(items=['one', 'three'], like='bbi', axis=0)\n with assertRaisesRegexp(TypeError, 'mutually exclusive'):\n self.frame.filter(items=['one', 'three'], like='bbi')\n\n # objects\n filtered = self.mixed_frame.filter(like='foo')\n self.assertIn('foo', filtered)\n\n # unicode columns, won't ascii-encode\n df = self.frame.rename(columns={'B': u('\\u2202')})\n filtered = df.filter(like='C')\n self.assertTrue('C' in filtered)\n\n def test_filter_regex_search(self):\n fcopy = self.frame.copy()\n fcopy['AA'] = 1\n\n # regex\n filtered = fcopy.filter(regex='[A]+')\n self.assertEqual(len(filtered.columns), 2)\n self.assertIn('AA', filtered)\n\n # doesn't have to be at beginning\n df = DataFrame({'aBBa': [1, 2],\n 'BBaBB': [1, 2],\n 'aCCa': [1, 2],\n 'aCCaBB': [1, 2]})\n\n result = df.filter(regex='BB')\n exp = df[[x for x in df.columns if 'BB' in x]]\n assert_frame_equal(result, exp)\n\n def test_filter_corner(self):\n empty = DataFrame()\n\n result = empty.filter([])\n assert_frame_equal(result, empty)\n\n result = empty.filter(like='foo')\n assert_frame_equal(result, empty)\n\n def test_select(self):\n f = lambda x: x.weekday() == 2\n result = self.tsframe.select(f, axis=0)\n expected = self.tsframe.reindex(\n index=self.tsframe.index[[f(x) for x in self.tsframe.index]])\n assert_frame_equal(result, expected)\n\n result = self.frame.select(lambda x: x in ('B', 'D'), axis=1)\n expected = self.frame.reindex(columns=['B', 'D'])\n\n # TODO should reindex check_names?\n assert_frame_equal(result, expected, check_names=False)\n\n def test_take(self):\n # homogeneous\n order = [3, 1, 2, 0]\n for df in [self.frame]:\n\n result = df.take(order, axis=0)\n expected = df.reindex(df.index.take(order))\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.take(order, axis=1)\n expected = df.loc[:, ['D', 'B', 'C', 'A']]\n assert_frame_equal(result, expected, check_names=False)\n\n # neg indicies\n order = [2, 1, -1]\n for df in [self.frame]:\n\n result = df.take(order, axis=0)\n expected = df.reindex(df.index.take(order))\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.take(order, axis=1)\n expected = df.loc[:, ['C', 'B', 'D']]\n assert_frame_equal(result, expected, check_names=False)\n\n # illegal indices\n self.assertRaises(IndexError, df.take, [3, 1, 2, 30], axis=0)\n self.assertRaises(IndexError, df.take, [3, 1, 2, -31], axis=0)\n self.assertRaises(IndexError, df.take, [3, 1, 2, 5], axis=1)\n self.assertRaises(IndexError, df.take, [3, 1, 2, -5], axis=1)\n\n # mixed-dtype\n order = [4, 1, 2, 0, 3]\n for df in [self.mixed_frame]:\n\n result = df.take(order, axis=0)\n expected = df.reindex(df.index.take(order))\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.take(order, axis=1)\n expected = df.loc[:, ['foo', 'B', 'C', 'A', 'D']]\n assert_frame_equal(result, expected)\n\n # neg indicies\n order = [4, 1, -2]\n for df in [self.mixed_frame]:\n\n result = df.take(order, axis=0)\n expected = df.reindex(df.index.take(order))\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.take(order, axis=1)\n expected = df.loc[:, ['foo', 'B', 'D']]\n assert_frame_equal(result, expected)\n\n # by dtype\n order = [1, 2, 0, 3]\n for df in [self.mixed_float, self.mixed_int]:\n\n result = df.take(order, axis=0)\n expected = df.reindex(df.index.take(order))\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.take(order, axis=1)\n expected = df.loc[:, ['B', 'C', 'A', 'D']]\n assert_frame_equal(result, expected)\n\n def test_reindex_boolean(self):\n frame = DataFrame(np.ones((10, 2), dtype=bool),\n index=np.arange(0, 20, 2),\n columns=[0, 2])\n\n reindexed = frame.reindex(np.arange(10))\n self.assertEqual(reindexed.values.dtype, np.object_)\n self.assertTrue(isnull(reindexed[0][1]))\n\n reindexed = frame.reindex(columns=lrange(3))\n self.assertEqual(reindexed.values.dtype, np.object_)\n self.assertTrue(isnull(reindexed[1]).all())\n\n def test_reindex_objects(self):\n reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B'])\n self.assertIn('foo', reindexed)\n\n reindexed = self.mixed_frame.reindex(columns=['A', 'B'])\n self.assertNotIn('foo', reindexed)\n\n def test_reindex_corner(self):\n index = Index(['a', 'b', 'c'])\n dm = self.empty.reindex(index=[1, 2, 3])\n reindexed = dm.reindex(columns=index)\n self.assert_index_equal(reindexed.columns, index)\n\n # ints are weird\n smaller = self.intframe.reindex(columns=['A', 'B', 'E'])\n self.assertEqual(smaller['E'].dtype, np.float64)\n\n def test_reindex_axis(self):\n cols = ['A', 'B', 'E']\n reindexed1 = self.intframe.reindex_axis(cols, axis=1)\n reindexed2 = self.intframe.reindex(columns=cols)\n assert_frame_equal(reindexed1, reindexed2)\n\n rows = self.intframe.index[0:5]\n reindexed1 = self.intframe.reindex_axis(rows, axis=0)\n reindexed2 = self.intframe.reindex(index=rows)\n assert_frame_equal(reindexed1, reindexed2)\n\n self.assertRaises(ValueError, self.intframe.reindex_axis, rows, axis=2)\n\n # no-op case\n cols = self.frame.columns.copy()\n newFrame = self.frame.reindex_axis(cols, axis=1)\n assert_frame_equal(newFrame, self.frame)\n\n def test_reindex_with_nans(self):\n df = DataFrame([[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],\n columns=['a', 'b'],\n index=[100.0, 101.0, np.nan, 102.0, 103.0])\n\n result = df.reindex(index=[101.0, 102.0, 103.0])\n expected = df.iloc[[1, 3, 4]]\n assert_frame_equal(result, expected)\n\n result = df.reindex(index=[103.0])\n expected = df.iloc[[4]]\n assert_frame_equal(result, expected)\n\n result = df.reindex(index=[101.0])\n expected = df.iloc[[1]]\n assert_frame_equal(result, expected)\n\n def test_reindex_multi(self):\n df = DataFrame(np.random.randn(3, 3))\n\n result = df.reindex(lrange(4), lrange(4))\n expected = df.reindex(lrange(4)).reindex(columns=lrange(4))\n\n assert_frame_equal(result, expected)\n\n df = DataFrame(np.random.randint(0, 10, (3, 3)))\n\n result = df.reindex(lrange(4), lrange(4))\n expected = df.reindex(lrange(4)).reindex(columns=lrange(4))\n\n assert_frame_equal(result, expected)\n\n df = DataFrame(np.random.randint(0, 10, (3, 3)))\n\n result = df.reindex(lrange(2), lrange(2))\n expected = df.reindex(lrange(2)).reindex(columns=lrange(2))\n\n assert_frame_equal(result, expected)\n\n df = DataFrame(np.random.randn(5, 3) + 1j, columns=['a', 'b', 'c'])\n\n result = df.reindex(index=[0, 1], columns=['a', 'b'])\n expected = df.reindex([0, 1]).reindex(columns=['a', 'b'])\n\n assert_frame_equal(result, expected)\n" ]
[ [ "pandas.types.common.is_datetimetz", "pandas.types.common.needs_i8_conversion", "pandas.to_datetime", "pandas.Series", "pandas.util.testing.assert_produces_warning", "pandas.core.base.FrozenNDArray", "pandas.util.testing.assert_index_equal", "pandas.util.testing.makeUnicodeIndex", "numpy.random.randn", "pandas.util.testing.makeBoolIndex", "pandas.util.testing.assert_numpy_array_equal", "numpy.unique", "numpy.arange", "pandas.compat.StringIO", "pandas.Index", "pandas.util.testing.assert_series_equal", "pandas.DatetimeIndex", "pandas.util.testing.makeDateIndex", "pandas.read_fwf", "pandas.compat.u", "pandas.util.testing.makeStringIndex", "pandas.Timedelta", "numpy.transpose", "numpy.array", "pandas.types.common.is_object_dtype", "pandas.TimedeltaIndex", "pandas.isnull", "pandas.util.testing.assertRaisesRegexp", "pandas.compat.numpy.np_array_datetime64_compat", "pandas.util.testing.makeIntIndex", "pandas.core.base.FrozenList", "pandas.util.testing.makeFloatIndex", "pandas.util.testing.makePeriodIndex" ], [ "pandas.util.testing.close", "pandas.util.testing.RNGContext", "pandas.Series", "numpy.random.choice", "pandas.util.testing.assert_produces_warning", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "pandas.util.testing.assertRaises", "pandas.compat.range", "numpy.random.normal", "numpy.random.randn", "numpy.random.rand", "pandas.tests.plotting.common._check_plot_works", "numpy.random.randint" ], [ "pandas.Series", "pandas.util.testing.makeObjectSeries", "numpy.squeeze", "pandas.util.testing.assert_produces_warning", "pandas.DataFrame", "pandas.util.testing.assert_frame_equal", "pandas.util.testing.assert_index_equal", "pandas.util.testing.makePanel", "numpy.random.randn", "pandas.compat.iteritems", "pandas.util.testing.makeDataFrame", "numpy.random.randint", "pandas.util.testing.makeTimeDataFrame", "pandas.util.testing.makePanel4D", "numpy.clip", "numpy.arange", "pandas.Panel", "pandas.util.testing.assert_series_equal", "pandas.Index", "pandas.util.testing.assert_panel4d_equal", "pandas.core.index.MultiIndex.from_tuples", "numpy.array_split", "pandas.util.testing.assert_panel_equal", "numpy.repeat", "pandas.types.common.is_scalar", "pandas.concat", "pandas.core.index.MultiIndex.from_arrays", "pandas.Categorical", "pandas.util.testing._skip_if_no_xarray", "numpy.random.rand", "pandas.date_range", "numpy.transpose", "numpy.array", "numpy.random.RandomState", "numpy.random.random", "pandas.util.testing.makeTimeSeries", "numpy.random.seed", "pandas.util.testing.assertRaisesRegexp", "pandas.util.testing.makeFloatSeries", "pandas.period_range", "pandas.util.testing.makeMixedDataFrame", "pandas.isnull", "pandas.formats.printing.pprint_thing", "pandas.util.testing.assertRaises", "pandas.util.testing.makeStringSeries", "numpy.ones", "pandas.compat.zip", "numpy.prod", "pandas.to_timedelta", "pandas.Timestamp", "numpy.empty", "pandas.compat.range" ], [ "pandas.util.testing.makeMissingDataframe", "pandas.Series", "numpy.asarray", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "numpy.concatenate", "pandas.util.testing.makePanel", "pandas.util.testing.makeTimeDataFrame", "pandas.tools.hashing.hash_pandas_object", "pandas.Index", "pandas.util.testing.assert_series_equal", "pandas.util.testing.rands_array", "pandas.date_range", "pandas.util.testing.makeTimedeltaIndex", "numpy.array", "pandas.timedelta_range", "pandas.util.testing.makeTimeSeries", "pandas.util.testing.makeMixedDataFrame", "pandas.tools.hashing.hash_array", "pandas.Timestamp" ], [ "pandas.merge", "pandas.to_datetime", "pandas.util.testing.assert_produces_warning", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "pandas.util.testing.assert_frame_equal", "pandas.util.testing.assert_index_equal", "numpy.random.randn", "pandas.compat.iteritems", "numpy.random.randint", "numpy.arange", "pandas.Index", "pandas.util.testing.assert_series_equal", "pandas.util.testing.equalContents", "pandas.compat.u", "numpy.isnan", "numpy.random.rand", "pandas.date_range", "pandas.isnull", "pandas.util.testing.assertRaisesRegexp", "numpy.ones", "pandas.compat.lrange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.19" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.19" ], "scipy": [], "tensorflow": [] } ]
dgoodwin208/6.883ProteinDocking
[ "07f33688bd5ec8c5ae6d4d4113eb64b0f2352e9e" ]
[ "config.py" ]
[ "import torch\n\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\nFloatTensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor\nLongTensor = torch.cuda.LongTensor if torch.cuda.is_available() else torch.LongTensor\n" ]
[ [ "torch.device", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ilya-fedin/tg_owt
[ "d5c3d43b959c7e9e7d8004b9b7fdadd12ce7d589" ]
[ "src/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation_unittest.py" ]
[ "# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.\n#\n# Use of this source code is governed by a BSD-style license\n# that can be found in the LICENSE file in the root of the source\n# tree. An additional intellectual property rights grant can be found\n# in the file PATENTS. All contributing project authors may\n# be found in the AUTHORS file in the root of the source tree.\n\"\"\"Unit tests for the test_data_generation module.\n\"\"\"\n\nimport os\nimport shutil\nimport tempfile\nimport unittest\n\nimport numpy as np\nimport scipy.io\n\nfrom . import test_data_generation\nfrom . import test_data_generation_factory\nfrom . import signal_processing\n\n\nclass TestTestDataGenerators(unittest.TestCase):\n \"\"\"Unit tests for the test_data_generation module.\n \"\"\"\n\n def setUp(self):\n \"\"\"Create temporary folders.\"\"\"\n self._base_output_path = tempfile.mkdtemp()\n self._test_data_cache_path = tempfile.mkdtemp()\n self._fake_air_db_path = tempfile.mkdtemp()\n\n # Fake AIR DB impulse responses.\n # TODO(alessiob): ReverberationTestDataGenerator will change to allow custom\n # impulse responses. When changed, the coupling below between\n # impulse_response_mat_file_names and\n # ReverberationTestDataGenerator._IMPULSE_RESPONSES can be removed.\n impulse_response_mat_file_names = [\n 'air_binaural_lecture_0_0_1.mat',\n 'air_binaural_booth_0_0_1.mat',\n ]\n for impulse_response_mat_file_name in impulse_response_mat_file_names:\n data = {'h_air': np.random.rand(1, 1000).astype('<f8')}\n scipy.io.savemat(\n os.path.join(self._fake_air_db_path,\n impulse_response_mat_file_name), data)\n\n def tearDown(self):\n \"\"\"Recursively delete temporary folders.\"\"\"\n shutil.rmtree(self._base_output_path)\n shutil.rmtree(self._test_data_cache_path)\n shutil.rmtree(self._fake_air_db_path)\n\n def testTestDataGenerators(self):\n # Preliminary check.\n self.assertTrue(os.path.exists(self._base_output_path))\n self.assertTrue(os.path.exists(self._test_data_cache_path))\n\n # Check that there is at least one registered test data generator.\n registered_classes = (\n test_data_generation.TestDataGenerator.REGISTERED_CLASSES)\n self.assertIsInstance(registered_classes, dict)\n self.assertGreater(len(registered_classes), 0)\n\n # Instance generators factory.\n generators_factory = test_data_generation_factory.TestDataGeneratorFactory(\n aechen_ir_database_path=self._fake_air_db_path,\n noise_tracks_path=test_data_generation. \\\n AdditiveNoiseTestDataGenerator. \\\n DEFAULT_NOISE_TRACKS_PATH,\n copy_with_identity=False)\n generators_factory.SetOutputDirectoryPrefix('datagen-')\n\n # Use a simple input file as clean input signal.\n input_signal_filepath = os.path.join(os.getcwd(), 'probing_signals',\n 'tone-880.wav')\n self.assertTrue(os.path.exists(input_signal_filepath))\n\n # Load input signal.\n input_signal = signal_processing.SignalProcessingUtils.LoadWav(\n input_signal_filepath)\n\n # Try each registered test data generator.\n for generator_name in registered_classes:\n # Instance test data generator.\n generator = generators_factory.GetInstance(\n registered_classes[generator_name])\n\n # Generate the noisy input - reference pairs.\n generator.Generate(input_signal_filepath=input_signal_filepath,\n test_data_cache_path=self._test_data_cache_path,\n base_output_path=self._base_output_path)\n\n # Perform checks.\n self._CheckGeneratedPairsListSizes(generator)\n self._CheckGeneratedPairsSignalDurations(generator, input_signal)\n self._CheckGeneratedPairsOutputPaths(generator)\n\n def testTestidentityDataGenerator(self):\n # Preliminary check.\n self.assertTrue(os.path.exists(self._base_output_path))\n self.assertTrue(os.path.exists(self._test_data_cache_path))\n\n # Use a simple input file as clean input signal.\n input_signal_filepath = os.path.join(os.getcwd(), 'probing_signals',\n 'tone-880.wav')\n self.assertTrue(os.path.exists(input_signal_filepath))\n\n def GetNoiseReferenceFilePaths(identity_generator):\n noisy_signal_filepaths = identity_generator.noisy_signal_filepaths\n reference_signal_filepaths = identity_generator.reference_signal_filepaths\n assert noisy_signal_filepaths.keys(\n ) == reference_signal_filepaths.keys()\n assert len(noisy_signal_filepaths.keys()) == 1\n key = noisy_signal_filepaths.keys()[0]\n return noisy_signal_filepaths[key], reference_signal_filepaths[key]\n\n # Test the `copy_with_identity` flag.\n for copy_with_identity in [False, True]:\n # Instance the generator through the factory.\n factory = test_data_generation_factory.TestDataGeneratorFactory(\n aechen_ir_database_path='',\n noise_tracks_path='',\n copy_with_identity=copy_with_identity)\n factory.SetOutputDirectoryPrefix('datagen-')\n generator = factory.GetInstance(\n test_data_generation.IdentityTestDataGenerator)\n # Check `copy_with_identity` is set correctly.\n self.assertEqual(copy_with_identity, generator.copy_with_identity)\n\n # Generate test data and extract the paths to the noise and the reference\n # files.\n generator.Generate(input_signal_filepath=input_signal_filepath,\n test_data_cache_path=self._test_data_cache_path,\n base_output_path=self._base_output_path)\n noisy_signal_filepath, reference_signal_filepath = (\n GetNoiseReferenceFilePaths(generator))\n\n # Check that a copy is made if and only if `copy_with_identity` is True.\n if copy_with_identity:\n self.assertNotEqual(noisy_signal_filepath,\n input_signal_filepath)\n self.assertNotEqual(reference_signal_filepath,\n input_signal_filepath)\n else:\n self.assertEqual(noisy_signal_filepath, input_signal_filepath)\n self.assertEqual(reference_signal_filepath,\n input_signal_filepath)\n\n def _CheckGeneratedPairsListSizes(self, generator):\n config_names = generator.config_names\n number_of_pairs = len(config_names)\n self.assertEqual(number_of_pairs,\n len(generator.noisy_signal_filepaths))\n self.assertEqual(number_of_pairs, len(generator.apm_output_paths))\n self.assertEqual(number_of_pairs,\n len(generator.reference_signal_filepaths))\n\n def _CheckGeneratedPairsSignalDurations(self, generator, input_signal):\n \"\"\"Checks duration of the generated signals.\n\n Checks that the noisy input and the reference tracks are audio files\n with duration equal to or greater than that of the input signal.\n\n Args:\n generator: TestDataGenerator instance.\n input_signal: AudioSegment instance.\n \"\"\"\n input_signal_length = (\n signal_processing.SignalProcessingUtils.CountSamples(input_signal))\n\n # Iterate over the noisy signal - reference pairs.\n for config_name in generator.config_names:\n # Load the noisy input file.\n noisy_signal_filepath = generator.noisy_signal_filepaths[\n config_name]\n noisy_signal = signal_processing.SignalProcessingUtils.LoadWav(\n noisy_signal_filepath)\n\n # Check noisy input signal length.\n noisy_signal_length = (signal_processing.SignalProcessingUtils.\n CountSamples(noisy_signal))\n self.assertGreaterEqual(noisy_signal_length, input_signal_length)\n\n # Load the reference file.\n reference_signal_filepath = generator.reference_signal_filepaths[\n config_name]\n reference_signal = signal_processing.SignalProcessingUtils.LoadWav(\n reference_signal_filepath)\n\n # Check noisy input signal length.\n reference_signal_length = (signal_processing.SignalProcessingUtils.\n CountSamples(reference_signal))\n self.assertGreaterEqual(reference_signal_length,\n input_signal_length)\n\n def _CheckGeneratedPairsOutputPaths(self, generator):\n \"\"\"Checks that the output path created by the generator exists.\n\n Args:\n generator: TestDataGenerator instance.\n \"\"\"\n # Iterate over the noisy signal - reference pairs.\n for config_name in generator.config_names:\n output_path = generator.apm_output_paths[config_name]\n self.assertTrue(os.path.exists(output_path))\n" ]
[ [ "numpy.random.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
alexandgu/hyperopt
[ "cfb7a89d689ea8102b90b20daefd390d526eb131" ]
[ "hyperopt/tpe.py" ]
[ "\"\"\"\nGraphical model (GM)-based optimization algorithm using Theano\n\"\"\"\nfrom past.utils import old_div\nimport logging\nimport time\n\nimport numpy as np\nfrom scipy.special import erf\nfrom . import pyll\nfrom .pyll import scope\nfrom .pyll.stochastic import implicit_stochastic\n\nfrom .base import miscs_to_idxs_vals\nfrom .base import miscs_update_idxs_vals\n\n# from .base import Trials\nfrom . import rand\n\n__authors__ = \"James Bergstra\"\n__license__ = \"3-clause BSD License\"\n__contact__ = \"github.com/jaberg/hyperopt\"\nlogger = logging.getLogger(__name__)\n\nEPS = 1e-12\n\n# -- default linear forgetting. don't try to change by writing this variable\n# because it's captured in function default args when this file is read\nDEFAULT_LF = 25\n\n\nadaptive_parzen_samplers = {}\n\n\n# a decorator to register functions to the dict `adaptive_parzen_samplers`\ndef adaptive_parzen_sampler(name):\n def wrapper(f):\n assert name not in adaptive_parzen_samplers\n adaptive_parzen_samplers[name] = f\n return f\n\n return wrapper\n\n\n#\n# These are some custom distributions\n# that are used to represent posterior distributions.\n#\n\n# -- Categorical\n\n\[email protected]\ndef categorical_lpdf(sample, p):\n if sample.size:\n return np.log(np.asarray(p)[sample])\n return np.asarray([])\n\n\[email protected]\ndef randint_via_categorical_lpdf(sample, p):\n if sample.size:\n return np.log(np.asarray(p)[sample])\n return np.asarray([])\n\n\n# -- Bounded Gaussian Mixture Model (BGMM)\n\n\n@implicit_stochastic\[email protected]\ndef GMM1(weights, mus, sigmas, low=None, high=None, q=None, rng=None, size=()):\n \"\"\"Sample from truncated 1-D Gaussian Mixture Model\"\"\"\n weights, mus, sigmas = list(map(np.asarray, (weights, mus, sigmas)))\n assert len(weights) == len(mus) == len(sigmas)\n n_samples = int(np.prod(size))\n # n_components = len(weights)\n if low is None and high is None:\n # -- draw from a standard GMM\n active = np.argmax(rng.multinomial(1, weights, (n_samples,)), axis=1)\n samples = rng.normal(loc=mus[active], scale=sigmas[active])\n else:\n # -- draw from truncated components, handling one-sided truncation\n low = float(low) if low is not None else -float(\"Inf\")\n high = float(high) if high is not None else float(\"Inf\")\n if low >= high:\n raise ValueError(\"low >= high\", (low, high))\n samples = []\n while len(samples) < n_samples:\n active = np.argmax(rng.multinomial(1, weights))\n draw = rng.normal(loc=mus[active], scale=sigmas[active])\n if low <= draw < high:\n samples.append(draw)\n samples = np.reshape(np.asarray(samples), size)\n if q is None:\n return samples\n return np.round(old_div(samples, q)) * q\n\n\[email protected]\ndef normal_cdf(x, mu, sigma):\n top = x - mu\n bottom = np.maximum(np.sqrt(2) * sigma, EPS)\n z = old_div(top, bottom)\n return 0.5 * (1 + erf(z))\n\n\[email protected]\ndef GMM1_lpdf(samples, weights, mus, sigmas, low=None, high=None, q=None):\n def print_verbose(s, x):\n return print(f\"GMM1_lpdf:{s}\", x)\n\n verbose = 0\n samples, weights, mus, sigmas = list(\n map(np.asarray, (samples, weights, mus, sigmas))\n )\n if samples.size == 0:\n return np.asarray([])\n if weights.ndim != 1:\n raise TypeError(\"need vector of weights\", weights.shape)\n if mus.ndim != 1:\n raise TypeError(\"need vector of mus\", mus.shape)\n if sigmas.ndim != 1:\n raise TypeError(\"need vector of sigmas\", sigmas.shape)\n assert len(weights) == len(mus) == len(sigmas)\n _samples = samples\n samples = _samples.flatten()\n\n if verbose:\n print_verbose(\"samples\", set(samples))\n print_verbose(\"weights\", weights)\n print_verbose(\"mus\", mus)\n print_verbose(\"sigmas\", sigmas)\n print_verbose(\"low\", low)\n print_verbose(\"high\", high)\n print_verbose(\"q\", q)\n\n if low is None and high is None:\n p_accept = 1\n else:\n p_accept = np.sum(\n weights * (normal_cdf(high, mus, sigmas) - normal_cdf(low, mus, sigmas))\n )\n\n if q is None:\n dist = samples[:, None] - mus\n mahal = (old_div(dist, np.maximum(sigmas, EPS))) ** 2\n # mahal shape is (n_samples, n_components)\n Z = np.sqrt(2 * np.pi * sigmas ** 2)\n coef = weights / Z / p_accept\n rval = logsum_rows(-0.5 * mahal + np.log(coef))\n else:\n prob = np.zeros(samples.shape, dtype=\"float64\")\n for w, mu, sigma in zip(weights, mus, sigmas):\n if high is None:\n ubound = samples + old_div(q, 2.0)\n else:\n ubound = np.minimum(samples + old_div(q, 2.0), high)\n if low is None:\n lbound = samples - old_div(q, 2.0)\n else:\n lbound = np.maximum(samples - old_div(q, 2.0), low)\n # -- two-stage addition is slightly more numerically accurate\n inc_amt = w * normal_cdf(ubound, mu, sigma)\n inc_amt -= w * normal_cdf(lbound, mu, sigma)\n prob += inc_amt\n rval = np.log(prob) - np.log(p_accept)\n\n if verbose:\n print_verbose(\"rval:\", dict(list(zip(samples, rval))))\n\n rval.shape = _samples.shape\n return rval\n\n\n# -- Mixture of Log-Normals\n\n\[email protected]\ndef lognormal_cdf(x, mu, sigma):\n # wikipedia claims cdf is\n # .5 + .5 erf( log(x) - mu / sqrt(2 sigma^2))\n #\n # the maximum is used to move negative values and 0 up to a point\n # where they do not cause nan or inf, but also don't contribute much\n # to the cdf.\n if len(x) == 0:\n return np.asarray([])\n if x.min() < 0:\n raise ValueError(\"negative arg to lognormal_cdf\", x)\n olderr = np.seterr(divide=\"ignore\")\n try:\n top = np.log(np.maximum(x, EPS)) - mu\n bottom = np.maximum(np.sqrt(2) * sigma, EPS)\n z = old_div(top, bottom)\n return 0.5 + 0.5 * erf(z)\n finally:\n np.seterr(**olderr)\n\n\[email protected]\ndef lognormal_lpdf(x, mu, sigma):\n # formula copied from wikipedia\n # http://en.wikipedia.org/wiki/Log-normal_distribution\n assert np.all(sigma >= 0)\n sigma = np.maximum(sigma, EPS)\n Z = sigma * x * np.sqrt(2 * np.pi)\n E = 0.5 * (old_div((np.log(x) - mu), sigma)) ** 2\n rval = -E - np.log(Z)\n return rval\n\n\[email protected]\ndef qlognormal_lpdf(x, mu, sigma, q):\n # casting rounds up to nearest step multiple.\n # so lpdf is log of integral from x-step to x+1 of P(x)\n\n # XXX: subtracting two numbers potentially very close together.\n return np.log(lognormal_cdf(x, mu, sigma) - lognormal_cdf(x - q, mu, sigma))\n\n\n@implicit_stochastic\[email protected]\ndef LGMM1(weights, mus, sigmas, low=None, high=None, q=None, rng=None, size=()):\n weights, mus, sigmas = list(map(np.asarray, (weights, mus, sigmas)))\n n_samples = np.prod(size)\n # n_components = len(weights)\n if low is None and high is None:\n active = np.argmax(rng.multinomial(1, weights, (n_samples,)), axis=1)\n assert len(active) == n_samples\n samples = np.exp(rng.normal(loc=mus[active], scale=sigmas[active]))\n else:\n # -- draw from truncated components\n # TODO: one-sided-truncation\n low = float(low)\n high = float(high)\n if low >= high:\n raise ValueError(\"low >= high\", (low, high))\n samples = []\n while len(samples) < n_samples:\n active = np.argmax(rng.multinomial(1, weights))\n draw = rng.normal(loc=mus[active], scale=sigmas[active])\n if low <= draw < high:\n samples.append(np.exp(draw))\n samples = np.asarray(samples)\n\n samples = np.reshape(np.asarray(samples), size)\n if q is not None:\n samples = np.round(old_div(samples, q)) * q\n return samples\n\n\ndef logsum_rows(x):\n m = x.max(axis=1)\n return np.log(np.exp(x - m[:, None]).sum(axis=1)) + m\n\n\[email protected]\ndef LGMM1_lpdf(samples, weights, mus, sigmas, low=None, high=None, q=None):\n samples, weights, mus, sigmas = list(\n map(np.asarray, (samples, weights, mus, sigmas))\n )\n assert weights.ndim == 1\n assert mus.ndim == 1\n assert sigmas.ndim == 1\n _samples = samples\n if samples.ndim != 1:\n samples = samples.flatten()\n\n if low is None and high is None:\n p_accept = 1\n else:\n p_accept = np.sum(\n weights * (normal_cdf(high, mus, sigmas) - normal_cdf(low, mus, sigmas))\n )\n\n if q is None:\n # compute the lpdf of each sample under each component\n lpdfs = lognormal_lpdf(samples[:, None], mus, sigmas)\n rval = logsum_rows(lpdfs + np.log(weights))\n else:\n # compute the lpdf of each sample under each component\n prob = np.zeros(samples.shape, dtype=\"float64\")\n for w, mu, sigma in zip(weights, mus, sigmas):\n if high is None:\n ubound = samples + old_div(q, 2.0)\n else:\n ubound = np.minimum(samples + old_div(q, 2.0), np.exp(high))\n if low is None:\n lbound = samples - old_div(q, 2.0)\n else:\n lbound = np.maximum(samples - old_div(q, 2.0), np.exp(low))\n lbound = np.maximum(0, lbound)\n # -- two-stage addition is slightly more numerically accurate\n inc_amt = w * lognormal_cdf(ubound, mu, sigma)\n inc_amt -= w * lognormal_cdf(lbound, mu, sigma)\n prob += inc_amt\n rval = np.log(prob) - np.log(p_accept)\n rval.shape = _samples.shape\n return rval\n\n\n#\n# This is the weird heuristic ParzenWindow estimator used for continuous\n# distributions in various ways.\n#\n\n\[email protected]_info(o_len=3)\ndef adaptive_parzen_normal_orig(mus, prior_weight, prior_mu, prior_sigma):\n \"\"\"\n A heuristic estimator for the mu and sigma values of a GMM\n TODO: try to find this heuristic in the literature, and cite it - Yoshua\n mentioned the term 'elastic' I think?\n\n mus - matrix (N, M) of M, N-dimensional component centers\n \"\"\"\n mus_orig = np.array(mus)\n mus = np.array(mus)\n assert str(mus.dtype) != \"object\"\n\n if mus.ndim != 1:\n raise TypeError(\"mus must be vector\", mus)\n if len(mus) == 0:\n mus = np.asarray([prior_mu])\n sigma = np.asarray([prior_sigma])\n elif len(mus) == 1:\n mus = np.asarray([prior_mu] + [mus[0]])\n sigma = np.asarray([prior_sigma, prior_sigma * 0.5])\n elif len(mus) >= 2:\n order = np.argsort(mus)\n mus = mus[order]\n sigma = np.zeros_like(mus)\n sigma[1:-1] = np.maximum(mus[1:-1] - mus[0:-2], mus[2:] - mus[1:-1])\n if len(mus) > 2:\n lsigma = mus[2] - mus[0]\n usigma = mus[-1] - mus[-3]\n else:\n lsigma = mus[1] - mus[0]\n usigma = mus[-1] - mus[-2]\n\n sigma[0] = lsigma\n sigma[-1] = usigma\n\n # XXX: is sorting them necessary anymore?\n # un-sort the mus and sigma\n mus[order] = mus.copy()\n sigma[order] = sigma.copy()\n\n if not np.all(mus_orig == mus):\n print(\"orig\", mus_orig)\n print(\"mus\", mus)\n assert np.all(mus_orig == mus)\n\n # put the prior back in\n mus = np.asarray([prior_mu] + list(mus))\n sigma = np.asarray([prior_sigma] + list(sigma))\n\n maxsigma = prior_sigma\n # -- magic formula:\n minsigma = old_div(prior_sigma, np.sqrt(1 + len(mus)))\n\n sigma = np.clip(sigma, minsigma, maxsigma)\n\n weights = np.ones(len(mus), dtype=mus.dtype)\n weights[0] = prior_weight\n\n weights = old_div(weights, weights.sum())\n\n return weights, mus, sigma\n\n\[email protected]\ndef linear_forgetting_weights(N, LF):\n assert N >= 0\n assert LF > 0\n if N == 0:\n return np.asarray([])\n if N < LF:\n return np.ones(N)\n ramp = np.linspace(old_div(1.0, N), 1.0, num=N - LF)\n flat = np.ones(LF)\n weights = np.concatenate([ramp, flat], axis=0)\n assert weights.shape == (N,), (weights.shape, N)\n return weights\n\n\n# XXX: make TPE do a post-inference pass over the pyll graph and insert\n# non-default LF argument\n\n\[email protected]_info(o_len=3)\ndef adaptive_parzen_normal(mus, prior_weight, prior_mu, prior_sigma, LF=DEFAULT_LF):\n \"\"\"\n mus - matrix (N, M) of M, N-dimensional component centers\n \"\"\"\n mus = np.array(mus)\n assert str(mus.dtype) != \"object\"\n\n if mus.ndim != 1:\n raise TypeError(\"mus must be vector\", mus)\n if len(mus) == 0:\n srtd_mus = np.asarray([prior_mu])\n sigma = np.asarray([prior_sigma])\n prior_pos = 0\n elif len(mus) == 1:\n if prior_mu < mus[0]:\n prior_pos = 0\n srtd_mus = np.asarray([prior_mu, mus[0]])\n sigma = np.asarray([prior_sigma, prior_sigma * 0.5])\n else:\n prior_pos = 1\n srtd_mus = np.asarray([mus[0], prior_mu])\n sigma = np.asarray([prior_sigma * 0.5, prior_sigma])\n elif len(mus) >= 2:\n\n # create new_mus, which is sorted, and in which\n # the prior has been inserted\n order = np.argsort(mus)\n prior_pos = np.searchsorted(mus[order], prior_mu)\n srtd_mus = np.zeros(len(mus) + 1)\n srtd_mus[:prior_pos] = mus[order[:prior_pos]]\n srtd_mus[prior_pos] = prior_mu\n srtd_mus[prior_pos + 1 :] = mus[order[prior_pos:]]\n sigma = np.zeros_like(srtd_mus)\n sigma[1:-1] = np.maximum(\n srtd_mus[1:-1] - srtd_mus[0:-2], srtd_mus[2:] - srtd_mus[1:-1]\n )\n lsigma = srtd_mus[1] - srtd_mus[0]\n usigma = srtd_mus[-1] - srtd_mus[-2]\n sigma[0] = lsigma\n sigma[-1] = usigma\n\n if LF and LF < len(mus):\n unsrtd_weights = linear_forgetting_weights(len(mus), LF)\n srtd_weights = np.zeros_like(srtd_mus)\n assert len(unsrtd_weights) + 1 == len(srtd_mus)\n srtd_weights[:prior_pos] = unsrtd_weights[order[:prior_pos]]\n srtd_weights[prior_pos] = prior_weight\n srtd_weights[prior_pos + 1 :] = unsrtd_weights[order[prior_pos:]]\n\n else:\n srtd_weights = np.ones(len(srtd_mus))\n srtd_weights[prior_pos] = prior_weight\n\n # -- magic formula:\n maxsigma = old_div(prior_sigma, 1.0)\n minsigma = old_div(prior_sigma, min(100.0, (1.0 + len(srtd_mus))))\n\n sigma = np.clip(sigma, minsigma, maxsigma)\n\n sigma[prior_pos] = prior_sigma\n assert prior_sigma > 0\n assert maxsigma > 0\n assert minsigma > 0\n assert np.all(sigma > 0), (sigma.min(), minsigma, maxsigma)\n\n srtd_weights /= srtd_weights.sum()\n\n return srtd_weights, srtd_mus, sigma\n\n\n#\n# Adaptive Parzen Samplers\n# These produce conditional estimators for various prior distributions\n#\n# NOTE: These are actually used in a fairly complicated way.\n# They are actually returning pyll.Apply AST (Abstract Syntax Tree) objects.\n# This AST is then manipulated and the corresponding _lpdf function is called\n# (e.g GMM1_lpdf)\n#\n# Please see the build_posterior function for details\n\n# -- Uniform\n\n\n@adaptive_parzen_sampler(\"uniform\")\ndef ap_uniform_sampler(obs, prior_weight, low, high, size=(), rng=None):\n prior_mu = 0.5 * (high + low)\n prior_sigma = 1.0 * (high - low)\n weights, mus, sigmas = scope.adaptive_parzen_normal(\n obs, prior_weight, prior_mu, prior_sigma\n )\n return scope.GMM1(\n weights, mus, sigmas, low=low, high=high, q=None, size=size, rng=rng\n )\n\n\n@adaptive_parzen_sampler(\"quniform\")\ndef ap_quniform_sampler(obs, prior_weight, low, high, q, size=(), rng=None):\n prior_mu = 0.5 * (high + low)\n prior_sigma = 1.0 * (high - low)\n weights, mus, sigmas = scope.adaptive_parzen_normal(\n obs, prior_weight, prior_mu, prior_sigma\n )\n return scope.GMM1(weights, mus, sigmas, low=low, high=high, q=q, size=size, rng=rng)\n\n\n@adaptive_parzen_sampler(\"loguniform\")\ndef ap_loguniform_sampler(obs, prior_weight, low, high, size=(), rng=None):\n prior_mu = 0.5 * (high + low)\n prior_sigma = 1.0 * (high - low)\n weights, mus, sigmas = scope.adaptive_parzen_normal(\n scope.log(obs), prior_weight, prior_mu, prior_sigma\n )\n rval = scope.LGMM1(weights, mus, sigmas, low=low, high=high, size=size, rng=rng)\n return rval\n\n\n@adaptive_parzen_sampler(\"qloguniform\")\ndef ap_qloguniform_sampler(obs, prior_weight, low, high, q, size=(), rng=None):\n prior_mu = 0.5 * (high + low)\n prior_sigma = 1.0 * (high - low)\n weights, mus, sigmas = scope.adaptive_parzen_normal(\n scope.log(\n # -- map observations that were quantized to be below exp(low)\n # (particularly 0) back up to exp(low) where they will\n # interact in a reasonable way with the AdaptiveParzen\n # thing.\n scope.maximum(\n obs,\n scope.maximum( # -- protect against exp(low) underflow\n EPS, scope.exp(low)\n ),\n )\n ),\n prior_weight,\n prior_mu,\n prior_sigma,\n )\n return scope.LGMM1(weights, mus, sigmas, low, high, q=q, size=size, rng=rng)\n\n\n# -- Normal\n\n\n@adaptive_parzen_sampler(\"normal\")\ndef ap_normal_sampler(obs, prior_weight, mu, sigma, size=(), rng=None):\n weights, mus, sigmas = scope.adaptive_parzen_normal(obs, prior_weight, mu, sigma)\n return scope.GMM1(weights, mus, sigmas, size=size, rng=rng)\n\n\n@adaptive_parzen_sampler(\"qnormal\")\ndef ap_qnormal_sampler(obs, prior_weight, mu, sigma, q, size=(), rng=None):\n weights, mus, sigmas = scope.adaptive_parzen_normal(obs, prior_weight, mu, sigma)\n return scope.GMM1(weights, mus, sigmas, q=q, size=size, rng=rng)\n\n\n@adaptive_parzen_sampler(\"lognormal\")\ndef ap_loglognormal_sampler(obs, prior_weight, mu, sigma, size=(), rng=None):\n weights, mus, sigmas = scope.adaptive_parzen_normal(\n scope.log(obs), prior_weight, mu, sigma\n )\n rval = scope.LGMM1(weights, mus, sigmas, size=size, rng=rng)\n return rval\n\n\n@adaptive_parzen_sampler(\"qlognormal\")\ndef ap_qlognormal_sampler(obs, prior_weight, mu, sigma, q, size=(), rng=None):\n log_obs = scope.log(scope.maximum(obs, EPS))\n weights, mus, sigmas = scope.adaptive_parzen_normal(\n log_obs, prior_weight, mu, sigma\n )\n rval = scope.LGMM1(weights, mus, sigmas, q=q, size=size, rng=rng)\n return rval\n\n\n# -- Categorical\n\n\n@adaptive_parzen_sampler(\"randint\")\ndef ap_randint_sampler(\n obs, prior_weight, low, high=None, size=(), rng=None, LF=DEFAULT_LF\n):\n # randint can be seen as a categorical with high - low categories\n weights = scope.linear_forgetting_weights(scope.len(obs), LF=LF)\n # if high is None, then low represents high and there is no offset\n domain_size = low if high is None else high - low\n offset = pyll.Literal(0) if high is None else low\n counts = scope.bincount(obs, offset=offset, minlength=domain_size, weights=weights)\n # -- add in some prior pseudocounts\n pseudocounts = counts + prior_weight\n random_variable = scope.randint_via_categorical(\n old_div(pseudocounts, scope.sum(pseudocounts)), size=size, rng=rng\n )\n return random_variable\n\n\[email protected]\ndef tpe_cat_pseudocounts(counts, prior_weight, p, size):\n if np.prod(size) == 0:\n return []\n if p.ndim == 2:\n assert np.all(p == p[0])\n p = p[0]\n pseudocounts = counts + p.size * (prior_weight * p)\n return old_div(pseudocounts, np.sum(pseudocounts))\n\n\n@adaptive_parzen_sampler(\"categorical\")\ndef ap_categorical_sampler(obs, prior_weight, p, size=(), rng=None, LF=DEFAULT_LF):\n weights = scope.linear_forgetting_weights(scope.len(obs), LF=LF)\n # in order to support pchoice here, we need to find the size of p,\n # but p can have p.ndim == 2, so we pass p to bincount and unpack it\n # (if required) there\n counts = scope.bincount(obs, p=p, weights=weights)\n pseudocounts = scope.tpe_cat_pseudocounts(counts, prior_weight, p, size)\n return scope.categorical(pseudocounts, size=size, rng=rng)\n\n\n#\n# Posterior clone performs symbolic inference on the pyll graph of priors.\n#\n\n\[email protected]_info(o_len=2)\ndef ap_split_trials(o_idxs, o_vals, l_idxs, l_vals, gamma, gamma_cap=DEFAULT_LF):\n \"\"\"Split the elements of `o_vals` (observations values) into two groups: those for\n trials whose losses (`l_vals`) were above gamma, and those below gamma. Note that\n only unique elements are returned, so the total number of returned elements might\n be lower than `len(o_vals)`\n \"\"\"\n o_idxs, o_vals, l_idxs, l_vals = list(\n map(np.asarray, [o_idxs, o_vals, l_idxs, l_vals])\n )\n\n # XXX if this is working, refactor this sort for efficiency\n\n # Splitting is done this way to cope with duplicate loss values.\n n_below = min(int(np.ceil(gamma * np.sqrt(len(l_vals)))), gamma_cap)\n l_order = np.argsort(l_vals)\n\n keep_idxs = set(l_idxs[l_order[:n_below]])\n below = [v for i, v in zip(o_idxs, o_vals) if i in keep_idxs]\n\n keep_idxs = set(l_idxs[l_order[n_below:]])\n above = [v for i, v in zip(o_idxs, o_vals) if i in keep_idxs]\n\n return np.asarray(below), np.asarray(above)\n\n\[email protected]\ndef broadcast_best(samples, below_llik, above_llik):\n if len(samples):\n score = below_llik - above_llik\n if len(samples) != len(score):\n raise ValueError()\n best = np.argmax(score)\n return [samples[best]] * len(samples)\n else:\n return []\n\n\ndef build_posterior(\n specs,\n prior_idxs,\n prior_vals,\n obs_idxs,\n obs_vals,\n obs_loss_idxs,\n obs_loss_vals,\n oloss_gamma,\n prior_weight,\n):\n \"\"\"\n This method clones a posterior inference graph by iterating forward in\n topological order, and replacing prior random-variables (prior_idxs, prior_vals)\n with new posterior distributions (post_specs, post_idxs, post_vals) that make use\n of observations (obs_idxs, obs_vals).\n\n \"\"\"\n assert all(\n isinstance(arg, pyll.Apply)\n for arg in [obs_loss_idxs, obs_loss_vals, oloss_gamma]\n )\n assert set(prior_idxs.keys()) == set(prior_vals.keys())\n\n expr = pyll.as_apply([specs, prior_idxs, prior_vals])\n nodes = pyll.dfs(expr)\n\n # build the joint posterior distribution as the values in this memo\n memo = {}\n # map prior RVs to observations\n obs_memo = {}\n\n for nid in prior_vals:\n # construct the leading args for each call to adaptive_parzen_sampler\n # which will permit the \"adaptive parzen samplers\" to adapt to the\n # correct samples.\n obs_below, obs_above = scope.ap_split_trials(\n obs_idxs[nid], obs_vals[nid], obs_loss_idxs, obs_loss_vals, oloss_gamma\n )\n obs_memo[prior_vals[nid]] = [obs_below, obs_above]\n for node in nodes:\n if node not in memo:\n new_inputs = [memo[arg] for arg in node.inputs()]\n if node in obs_memo:\n # -- this case corresponds to an observed Random Var\n # node.name is a distribution like \"normal\", \"randint\", etc.\n obs_below, obs_above = obs_memo[node]\n aa = [memo[a] for a in node.pos_args]\n fn = adaptive_parzen_samplers[node.name]\n b_args = [obs_below, prior_weight] + aa\n named_args = {kw: memo[arg] for (kw, arg) in node.named_args}\n b_post = fn(*b_args, **named_args)\n a_args = [obs_above, prior_weight] + aa\n a_post = fn(*a_args, **named_args)\n\n # fn is a function e.g ap_uniform_sampler, ap_normal_sampler, etc\n # b_post and a_post are pyll.Apply objects that are\n # AST (Abstract Syntax Trees). They create the distribution,\n # (e.g. using adaptive_parzen_normal), and then\n # call a function to sample randomly from that distribution\n # (e.g. using scope.GMM1) which return those samples.\n #\n # However we are only interested in using the samples from b_post.\n # This code looks at the AST and grabs the function name that we used\n # for sampling (e.g. scope.GMM1) and modifies it, e.g. to\n # \"scope.GMM1_lpdf\". It then calls this function, passing in the\n # samples as the first parameter.a_args\n #\n # The result is that we are effectively calling, for example:\n # below_llik = GMM1_lpdf( b_post, *adaptive_parzen_normal(obs_below, ...))\n # above_llik = GMM1_lpdf( b_post, *adaptive_parzen_normal(obs_above, ...))\n\n assert a_post.name == b_post.name\n fn_lpdf = getattr(scope, a_post.name + \"_lpdf\")\n a_kwargs = {\n n: a for n, a in a_post.named_args if n not in (\"rng\", \"size\")\n }\n b_kwargs = {\n n: a for n, a in b_post.named_args if n not in (\"rng\", \"size\")\n }\n\n # calculate the log likelihood of b_post under both distributions\n below_llik = fn_lpdf(*([b_post] + b_post.pos_args), **b_kwargs)\n above_llik = fn_lpdf(*([b_post] + a_post.pos_args), **a_kwargs)\n # compute new_node based on below & above log likelihood\n new_node = scope.broadcast_best(b_post, below_llik, above_llik)\n elif hasattr(node, \"obj\"):\n # -- keep same literals in the graph\n new_node = node\n else:\n # -- this case is for all the other stuff in the graph\n new_node = node.clone_from_inputs(new_inputs)\n memo[node] = new_node\n\n post_idxs = {nid: memo[idxs] for nid, idxs in prior_idxs.items()}\n post_vals = {nid: memo[vals] for nid, vals in prior_vals.items()}\n return post_idxs, post_vals\n\n\n# TODO: is this used?\n# @scope.define\n# def idxs_prod(full_idxs, idxs_by_label, llik_by_label):\n# \"\"\"Add all of the log-likelihoods together by id.\n#\n# Example arguments:\n# full_idxs = [0, 1, ... N-1]\n# idxs_by_label = {'node_a': [1, 3], 'node_b': [3]}\n# llik_by_label = {'node_a': [0.1, -3.3], node_b: [1.0]}\n#\n# This would return N elements: [0, 0.1, 0, -2.3, 0, 0, ... ]\n# \"\"\"\n# assert len(set(full_idxs)) == len(full_idxs)\n# full_idxs = list(full_idxs)\n# rval = np.zeros(len(full_idxs))\n# pos_of_tid = dict(list(zip(full_idxs, list(range(len(full_idxs))))))\n# assert set(idxs_by_label.keys()) == set(llik_by_label.keys())\n# for nid in idxs_by_label:\n# idxs = idxs_by_label[nid]\n# llik = llik_by_label[nid]\n# assert np.all(np.asarray(idxs) > 1)\n# assert len(set(idxs)) == len(idxs)\n# assert len(idxs) == len(llik)\n# for ii, ll in zip(idxs, llik):\n# rval[pos_of_tid[ii]] += ll\n# return rval\n\n\n_default_prior_weight = 1.0\n\n# -- suggest best of this many draws on every iteration\n_default_n_EI_candidates = 120\n\n# -- gamma * sqrt(n_trials) is fraction of to use as good\n_default_gamma = 0.25\n\n_default_n_startup_jobs = 100\n\n_default_linear_forgetting = DEFAULT_LF\n\n\ndef build_posterior_wrapper(domain, prior_weight, gamma):\n \"\"\"\n Calls build_posterior\n Args:\n domain (hyperopt.base.Domain): contains info about the obj function and the hp\n space passed to fmin\n prior_weight (float): smoothing factor for counts, to avoid having 0 prob\n # TODO: consider renaming or improving documentation for suggest\n gamma (float): the threshold to split between l(x) and g(x), see eq. 2 in\n https://papers.nips.cc/paper/4443-algorithms-for-hyper-parameter-optimization.pdf\n\n Returns:\n\n \"\"\"\n\n # -- these dummy values will be replaced in build_posterior() and never used\n observed = {\"idxs\": pyll.Literal(), \"vals\": pyll.Literal()}\n observed_loss = {\"idxs\": pyll.Literal(), \"vals\": pyll.Literal()}\n\n posterior = build_posterior(\n # -- vectorized clone of bandit template\n domain.vh.v_expr,\n # -- this dict and next represent prior dists\n domain.vh.idxs_by_label(),\n domain.vh.vals_by_label(),\n observed[\"idxs\"],\n observed[\"vals\"],\n observed_loss[\"idxs\"],\n observed_loss[\"vals\"],\n pyll.Literal(gamma),\n pyll.Literal(float(prior_weight)),\n )\n\n return observed, observed_loss, posterior\n\n\ndef suggest(\n new_ids,\n domain,\n trials,\n seed,\n prior_weight=_default_prior_weight,\n n_startup_jobs=_default_n_startup_jobs,\n n_EI_candidates=_default_n_EI_candidates,\n gamma=_default_gamma,\n verbose=True,\n):\n \"\"\"\n Given previous trials and the domain, suggest the best expected hp point\n according to the TPE-EI algo\n\n\n Args:\n prior_weight(\n n_startup_jobs:\n n_EI_candidates:\n gamma:\n verbose:\n\n Returns:\n\n \"\"\"\n\n t0 = time.time()\n # use build_posterior_wrapper to create the pyll nodes\n observed, observed_loss, posterior = build_posterior_wrapper(\n domain, prior_weight, gamma\n )\n tt = time.time() - t0\n if verbose:\n logger.info(\"build_posterior_wrapper took %f seconds\" % tt)\n\n # Loop over previous trials to collect best_docs and best_docs_loss\n best_docs = dict()\n best_docs_loss = dict()\n for doc in trials.trials:\n\n # get either these docs own tid or the one that it's from\n tid = doc[\"misc\"].get(\"from_tid\", doc[\"tid\"])\n\n # associate infinite loss to new/running/failed jobs\n loss = doc[\"result\"].get(\"loss\")\n loss = float(\"inf\") if loss is None else float(loss)\n\n # if set, update loss for this tid if it's higher than current loss\n # otherwise, set it\n best_docs_loss.setdefault(tid, loss)\n if loss <= best_docs_loss[tid]:\n best_docs_loss[tid] = loss\n best_docs[tid] = doc\n\n # -- sort docs by order of suggestion\n # so that linear_forgetting removes the oldest ones\n tid_docs = sorted(best_docs.items())\n losses = [best_docs_loss[tid] for tid, doc in tid_docs]\n tids, docs = list(zip(*tid_docs)) if tid_docs else ([], [])\n\n if verbose:\n if docs:\n s = \"%i/%i trials with best loss %f\" % (\n len(docs),\n len(trials),\n np.nanmin(losses),\n )\n else:\n s = \"0 trials\"\n logger.info(\"TPE using %s\" % s)\n\n if len(docs) < n_startup_jobs:\n # N.B. THIS SEEDS THE RNG BASED ON THE new_id\n return rand.suggest(new_ids, domain, trials, seed)\n\n # Sample and compute log-probability.\n first_new_id = new_ids[0]\n if tids:\n # -- the +2 coordinates with an assertion above\n # to ensure that fake ids are used during sampling\n # TODO: not sure what assertion this refers to...\n fake_id_0 = max(max(tids), first_new_id) + 2\n else:\n # -- weird - we're running the TPE algo from scratch\n assert n_startup_jobs <= 0\n fake_id_0 = first_new_id + 2\n\n fake_ids = list(range(fake_id_0, fake_id_0 + n_EI_candidates))\n\n # -- this dictionary will map pyll nodes to the values\n # they should take during the evaluation of the pyll program\n memo = {domain.s_new_ids: fake_ids, domain.s_rng: np.random.default_rng(seed)}\n\n memo[observed_loss[\"idxs\"]] = tids\n memo[observed_loss[\"vals\"]] = losses\n\n observed_idxs_dict, observed_vals_dict = miscs_to_idxs_vals(\n [doc[\"misc\"] for doc in docs], keys=list(domain.params.keys())\n )\n memo[observed[\"idxs\"]] = observed_idxs_dict\n memo[observed[\"vals\"]] = observed_vals_dict\n\n # evaluate `n_EI_candidates` pyll nodes in `posterior` using `memo`\n # TODO: it seems to return idxs, vals, all the same. Is this correct?\n idxs, vals = pyll.rec_eval(posterior, memo=memo, print_node_on_error=False)\n\n # hack to add offset again for randint params\n for label, param in domain.params.items():\n if param.name == \"randint\" and len(param.pos_args) == 2:\n offset = param.pos_args[0].obj\n vals[label] = [val + offset for val in vals[label]]\n\n # -- retrieve the best of the samples and form the return tuple\n\n # specs are deprecated since build_posterior makes all the same\n rval_specs = [None]\n rval_results = [domain.new_result()]\n rval_miscs = [{\"tid\": first_new_id, \"cmd\": domain.cmd, \"workdir\": domain.workdir}]\n\n miscs_update_idxs_vals(\n rval_miscs,\n idxs,\n vals,\n idxs_map={fake_ids[0]: first_new_id},\n assert_all_vals_used=False,\n )\n # return the doc for the best new trial\n return trials.new_trial_docs([first_new_id], rval_specs, rval_results, rval_miscs)\n" ]
[ [ "numpy.sqrt", "numpy.asarray", "numpy.nanmin", "numpy.all", "numpy.seterr", "numpy.concatenate", "numpy.zeros_like", "numpy.searchsorted", "numpy.exp", "numpy.random.default_rng", "numpy.clip", "numpy.argmax", "scipy.special.erf", "numpy.zeros", "numpy.log", "numpy.argsort", "numpy.array", "numpy.sum", "numpy.maximum", "numpy.ones", "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
selflein/nn_uncertainty_eval
[ "94a7f2292b8db2197cd55fab57324d438618ae06" ]
[ "uncertainty_eval/datasets/other.py" ]
[ "import json\nfrom pathlib import Path\n\nimport torch\nimport numpy as np\nfrom PIL import Image\nfrom torch.utils.data import Dataset, TensorDataset\nfrom tfrecord.torch.dataset import MultiTFRecordDataset\n\nfrom uncertainty_eval.datasets.tabular import TabularDataset\nfrom uncertainty_eval.datasets.abstract_datasplit import DatasetSplit\n\n\nclass GaussianNoise(DatasetSplit):\n def __init__(self, data_root, mean, std, length=10_000):\n self.data_root = data_root\n self.mean = mean\n self.std = std\n self.length = length\n\n def train(self, transform):\n return self.test(transform)\n\n def val(self, transform):\n return self.test(transform)\n\n def test(self, transform):\n return GaussianNoiseDataset(self.length, self.mean, self.std, transform)\n\n\nclass GaussianNoiseDataset(Dataset):\n \"\"\"\n Use CIFAR-10 mean and standard deviation as default values.\n mean=(125.3, 123.0, 113.9), std=(63.0, 62.1, 66.7)\n \"\"\"\n\n def __init__(self, length, mean, std, transform=None):\n self.transform = transform\n self.mean = mean\n self.std = std\n self.length = length\n self.dist = torch.distributions.Normal(mean, std)\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, idx):\n img = self.dist.sample()\n if len(self.mean.shape) == 3:\n img = Image.fromarray(img.numpy().squeeze().astype(np.uint8))\n if self.transform is not None:\n img = self.transform(img)\n return img, -1\n\n\nclass Constant(DatasetSplit):\n def __init__(self, data_root, low, high, shape, length=10_000):\n self.low = low\n self.high = high\n self.length = length\n self.shape = shape\n\n def train(self, transform):\n return self.test(transform)\n\n def val(self, transform):\n return self.test(transform)\n\n def test(self, transform):\n return ConstantDataset(self.length, self.low, self.high, self.shape, transform)\n\n\nclass ConstantDataset(Dataset):\n def __init__(self, length, low, high, shape, transform=None):\n assert isinstance(low, float) and isinstance(high, float)\n\n self.low = low\n self.high = high\n self.transform = transform\n self.length = length\n self.shape = shape\n self.dist = torch.distributions.Uniform(low, high)\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, idx):\n sample = self.dist.sample().item()\n sample = torch.empty(self.shape).fill_(sample)\n\n if len(self.shape) == 3:\n sample = Image.fromarray(sample.numpy().squeeze().astype(np.uint8))\n\n if self.transform is not None:\n sample = self.transform(sample)\n return sample, -1\n\n\nclass UniformNoise(DatasetSplit):\n def __init__(self, data_root, low, high, length=10_000):\n self.low = low\n self.high = high\n self.length = length\n\n def train(self, transform):\n return self.test(transform)\n\n def val(self, transform):\n return self.test(transform)\n\n def test(self, transform):\n return UniformNoiseDataset(self.length, self.low, self.high, transform)\n\n\nclass UniformNoiseDataset(Dataset):\n def __init__(self, length, low, high, transform=None):\n self.low = low\n self.high = high\n self.transform = transform\n self.length = length\n self.dist = torch.distributions.Uniform(low, high)\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, idx):\n img = self.dist.sample()\n if len(self.low.shape) == 3:\n img = Image.fromarray(img.numpy().squeeze().astype(np.uint8))\n if self.transform is not None:\n img = self.transform(img)\n return img, -1\n\n\nclass OODGenomics(torch.utils.data.IterableDataset):\n \"\"\"PyTorch Dataset implementation for the Bacteria Genomics OOD dataset (https://github.com/google-research/google-research/tree/master/genomics_ood) proposed in\n\n J. Ren et al., “Likelihood Ratios for Out-of-Distribution Detection,” arXiv:1906.02845 [cs, stat], Available: http://arxiv.org/abs/1906.02845.\n \"\"\"\n\n splits = {\n \"train\": \"before_2011_in_tr\",\n \"val\": \"between_2011-2016_in_val\",\n \"test\": \"after_2016_in_test\",\n \"val_ood\": \"between_2011-2016_ood_val\",\n \"test_ood\": \"after_2016_ood_test\",\n }\n\n def __init__(self, data_root, split=\"train\", transform=None, target_transform=None):\n if isinstance(data_root, str):\n data_root = Path(data_root)\n self.data_root = data_root / \"llr_ood_genomics\"\n\n assert split in self.splits, f\"Split '{split}' does not exist.\"\n split_dir = self.data_root / self.splits[split]\n\n tf_record_ids = [f.stem for f in split_dir.iterdir() if f.suffix == \".tfrecord\"]\n\n self.ds = MultiTFRecordDataset(\n data_pattern=str(split_dir / \"{}.tfrecord\"),\n index_pattern=str(split_dir / \"{}.index\"),\n splits={id_: 1 / len(tf_record_ids) for id_ in tf_record_ids},\n description={\"x\": \"byte\", \"y\": \"int\", \"z\": \"byte\"},\n )\n\n with open(self.data_root / \"label_dict.json\") as f:\n label_dict = json.load(f)\n self.label_dict = {v: k for k, v in label_dict.items()}\n\n transform = transform if transform is not None else lambda x: x\n target_transform = (\n target_transform if target_transform is not None else lambda x: x\n )\n self.data_transform = lambda x: self.full_transform(\n x, transform, target_transform\n )\n\n @staticmethod\n def full_transform(item, transform, target_transform):\n dec = np.array([int(i) for i in item[\"x\"].tobytes().decode(\"utf-8\").split(\" \")])\n x = torch.from_numpy(transform(dec.copy()))\n x = torch.nn.functional.one_hot(x.long(), 4).float()\n y = torch.from_numpy(target_transform(item[\"y\"].copy())).long().squeeze()\n return x, y\n\n def __iter__(self):\n return map(self.data_transform, self.ds.__iter__())\n\n\nclass GenomicsDataset(DatasetSplit):\n data_shape = (250,)\n\n def __init__(self, data_root):\n self.data_root = data_root\n\n def train(self, transform):\n return OODGenomics(self.data_root, split=\"train\", transform=transform)\n\n def val(self, transform):\n return OODGenomics(self.data_root, split=\"val\", transform=transform)\n\n def test(self, transform):\n return OODGenomics(self.data_root, split=\"test\", transform=transform)\n\n\nclass OODGenomicsDataset(DatasetSplit):\n data_shape = (250,)\n\n def __init__(self, data_root):\n self.data_root = data_root\n\n def train(self, transform):\n raise NotImplementedError\n\n def val(self, transform):\n return OODGenomics(self.data_root, split=\"val_ood\", transform=transform)\n\n def test(self, transform):\n return OODGenomics(self.data_root, split=\"test_ood\", transform=transform)\n\n\nclass ImageEmbeddingDataset(DatasetSplit):\n data_shape = (640,)\n\n def __init__(self, data_root, dataset_name):\n self.data_root = data_root\n self.dataset_name = dataset_name\n\n def load_split(self, split):\n data = np.load(\n self.data_root / \"embeddings\" / f\"{self.dataset_name}_{split}.npz\"\n )\n return torch.from_numpy(data[\"x\"]), torch.from_numpy(data[\"y\"])\n\n def train(self, transform):\n return TabularDataset(*self.load_split(\"train\"), transforms=transform)\n\n def val(self, transform):\n return TabularDataset(*self.load_split(\"val\"), transforms=transform)\n\n def test(self, transform):\n return TabularDataset(*self.load_split(\"test\"), transforms=transform)\n\n\nclass GenomicsNoise(DatasetSplit):\n data_shape = (250,)\n\n def __init__(self, data_root):\n self.data_root = data_root\n data = np.load(self.data_root / \"genomics_noise.npz\")\n self.x = torch.from_numpy(data[\"x\"])\n self.y = torch.from_numpy(data[\"y\"])\n\n def train(self, transform):\n raise NotImplementedError\n\n def val(self, transform):\n raise NotImplementedError\n\n def test(self, transform):\n return TensorDataset(self.x, self.y)\n\n\nclass GenomicsEmbeddingsDataset(ImageEmbeddingDataset):\n data_shape = (128,)\n" ]
[ [ "torch.empty", "torch.utils.data.TensorDataset", "numpy.load", "torch.from_numpy", "torch.distributions.Normal", "torch.distributions.Uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
atakanokan/flair
[ "d33aa6a007384da76d1ae8dac6f4fc61bc652ce7" ]
[ "flair/embeddings.py" ]
[ "import os\nimport re\nimport logging\nfrom abc import abstractmethod\nfrom collections import Counter\nfrom pathlib import Path\nfrom typing import List, Union, Dict\n\nimport gensim\nimport numpy as np\nimport torch\nfrom bpemb import BPEmb\nfrom deprecated import deprecated\n\nfrom pytorch_pretrained_bert import (\n BertTokenizer,\n BertModel,\n TransfoXLTokenizer,\n TransfoXLModel,\n OpenAIGPTModel,\n OpenAIGPTTokenizer,\n)\n\nfrom pytorch_pretrained_bert.modeling_openai import (\n PRETRAINED_MODEL_ARCHIVE_MAP as OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP,\n)\n\nfrom pytorch_pretrained_bert.modeling_transfo_xl import (\n PRETRAINED_MODEL_ARCHIVE_MAP as TRANSFORMER_XL_PRETRAINED_MODEL_ARCHIVE_MAP,\n)\n\nimport flair\nfrom flair.data import Corpus\nfrom .nn import LockedDropout, WordDropout\nfrom .data import Dictionary, Token, Sentence\nfrom .file_utils import cached_path, open_inside_zip\n\nlog = logging.getLogger(\"flair\")\n\n\nclass Embeddings(torch.nn.Module):\n \"\"\"Abstract base class for all embeddings. Every new type of embedding must implement these methods.\"\"\"\n\n @property\n @abstractmethod\n def embedding_length(self) -> int:\n \"\"\"Returns the length of the embedding vector.\"\"\"\n pass\n\n @property\n @abstractmethod\n def embedding_type(self) -> str:\n pass\n\n def embed(self, sentences: Union[Sentence, List[Sentence]]) -> List[Sentence]:\n \"\"\"Add embeddings to all words in a list of sentences. If embeddings are already added, updates only if embeddings\n are non-static.\"\"\"\n\n # if only one sentence is passed, convert to list of sentence\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n everything_embedded: bool = True\n\n if self.embedding_type == \"word-level\":\n for sentence in sentences:\n for token in sentence.tokens:\n if self.name not in token._embeddings.keys():\n everything_embedded = False\n else:\n for sentence in sentences:\n if self.name not in sentence._embeddings.keys():\n everything_embedded = False\n\n if not everything_embedded or not self.static_embeddings:\n self._add_embeddings_internal(sentences)\n\n return sentences\n\n @abstractmethod\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n \"\"\"Private method for adding embeddings to all words in a list of sentences.\"\"\"\n pass\n\n\nclass TokenEmbeddings(Embeddings):\n \"\"\"Abstract base class for all token-level embeddings. Ever new type of word embedding must implement these methods.\"\"\"\n\n @property\n @abstractmethod\n def embedding_length(self) -> int:\n \"\"\"Returns the length of the embedding vector.\"\"\"\n pass\n\n @property\n def embedding_type(self) -> str:\n return \"word-level\"\n\n\nclass DocumentEmbeddings(Embeddings):\n \"\"\"Abstract base class for all document-level embeddings. Ever new type of document embedding must implement these methods.\"\"\"\n\n @property\n @abstractmethod\n def embedding_length(self) -> int:\n \"\"\"Returns the length of the embedding vector.\"\"\"\n pass\n\n @property\n def embedding_type(self) -> str:\n return \"sentence-level\"\n\n\nclass StackedEmbeddings(TokenEmbeddings):\n \"\"\"A stack of embeddings, used if you need to combine several different embedding types.\"\"\"\n\n def __init__(self, embeddings: List[TokenEmbeddings], detach: bool = True):\n \"\"\"The constructor takes a list of embeddings to be combined.\"\"\"\n super().__init__()\n\n self.embeddings = embeddings\n\n # IMPORTANT: add embeddings as torch modules\n for i, embedding in enumerate(embeddings):\n self.add_module(\"list_embedding_{}\".format(i), embedding)\n\n self.detach: bool = detach\n self.name: str = \"Stack\"\n self.static_embeddings: bool = True\n\n self.__embedding_type: str = embeddings[0].embedding_type\n\n self.__embedding_length: int = 0\n for embedding in embeddings:\n self.__embedding_length += embedding.embedding_length\n\n def embed(\n self, sentences: Union[Sentence, List[Sentence]], static_embeddings: bool = True\n ):\n # if only one sentence is passed, convert to list of sentence\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n for embedding in self.embeddings:\n embedding.embed(sentences)\n\n @property\n def embedding_type(self) -> str:\n return self.__embedding_type\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n for embedding in self.embeddings:\n embedding._add_embeddings_internal(sentences)\n\n return sentences\n\n def __str__(self):\n return f'StackedEmbeddings [{\",\".join([str(e) for e in self.embeddings])}]'\n\n\nclass WordEmbeddings(TokenEmbeddings):\n \"\"\"Standard static word embeddings, such as GloVe or FastText.\"\"\"\n\n def __init__(self, embeddings: str, field: str = None):\n \"\"\"\n Initializes classic word embeddings. Constructor downloads required files if not there.\n :param embeddings: one of: 'glove', 'extvec', 'crawl' or two-letter language code or custom\n If you want to use a custom embedding file, just pass the path to the embeddings as embeddings variable.\n \"\"\"\n self.embeddings = embeddings\n\n old_base_path = (\n \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/\"\n )\n base_path = (\n \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/\"\n )\n embeddings_path_v4 = (\n \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/\"\n )\n embeddings_path_v4_1 = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4.1/\"\n\n cache_dir = Path(\"embeddings\")\n\n # GLOVE embeddings\n if embeddings.lower() == \"glove\" or embeddings.lower() == \"en-glove\":\n cached_path(f\"{old_base_path}glove.gensim.vectors.npy\", cache_dir=cache_dir)\n embeddings = cached_path(\n f\"{old_base_path}glove.gensim\", cache_dir=cache_dir\n )\n\n # TURIAN embeddings\n elif embeddings.lower() == \"turian\" or embeddings.lower() == \"en-turian\":\n cached_path(\n f\"{embeddings_path_v4_1}turian.vectors.npy\", cache_dir=cache_dir\n )\n embeddings = cached_path(\n f\"{embeddings_path_v4_1}turian\", cache_dir=cache_dir\n )\n\n # KOMNINOS embeddings\n elif embeddings.lower() == \"extvec\" or embeddings.lower() == \"en-extvec\":\n cached_path(\n f\"{old_base_path}extvec.gensim.vectors.npy\", cache_dir=cache_dir\n )\n embeddings = cached_path(\n f\"{old_base_path}extvec.gensim\", cache_dir=cache_dir\n )\n\n # FT-CRAWL embeddings\n elif embeddings.lower() == \"crawl\" or embeddings.lower() == \"en-crawl\":\n cached_path(\n f\"{base_path}en-fasttext-crawl-300d-1M.vectors.npy\", cache_dir=cache_dir\n )\n embeddings = cached_path(\n f\"{base_path}en-fasttext-crawl-300d-1M\", cache_dir=cache_dir\n )\n\n # FT-CRAWL embeddings\n elif (\n embeddings.lower() == \"news\"\n or embeddings.lower() == \"en-news\"\n or embeddings.lower() == \"en\"\n ):\n cached_path(\n f\"{base_path}en-fasttext-news-300d-1M.vectors.npy\", cache_dir=cache_dir\n )\n embeddings = cached_path(\n f\"{base_path}en-fasttext-news-300d-1M\", cache_dir=cache_dir\n )\n\n # twitter embeddings\n elif embeddings.lower() == \"twitter\" or embeddings.lower() == \"en-twitter\":\n cached_path(\n f\"{old_base_path}twitter.gensim.vectors.npy\", cache_dir=cache_dir\n )\n embeddings = cached_path(\n f\"{old_base_path}twitter.gensim\", cache_dir=cache_dir\n )\n\n # two-letter language code wiki embeddings\n elif len(embeddings.lower()) == 2:\n cached_path(\n f\"{embeddings_path_v4}{embeddings}-wiki-fasttext-300d-1M.vectors.npy\",\n cache_dir=cache_dir,\n )\n embeddings = cached_path(\n f\"{embeddings_path_v4}{embeddings}-wiki-fasttext-300d-1M\",\n cache_dir=cache_dir,\n )\n\n # two-letter language code wiki embeddings\n elif len(embeddings.lower()) == 7 and embeddings.endswith(\"-wiki\"):\n cached_path(\n f\"{embeddings_path_v4}{embeddings[:2]}-wiki-fasttext-300d-1M.vectors.npy\",\n cache_dir=cache_dir,\n )\n embeddings = cached_path(\n f\"{embeddings_path_v4}{embeddings[:2]}-wiki-fasttext-300d-1M\",\n cache_dir=cache_dir,\n )\n\n # two-letter language code crawl embeddings\n elif len(embeddings.lower()) == 8 and embeddings.endswith(\"-crawl\"):\n cached_path(\n f\"{embeddings_path_v4}{embeddings[:2]}-crawl-fasttext-300d-1M.vectors.npy\",\n cache_dir=cache_dir,\n )\n embeddings = cached_path(\n f\"{embeddings_path_v4}{embeddings[:2]}-crawl-fasttext-300d-1M\",\n cache_dir=cache_dir,\n )\n\n elif not Path(embeddings).exists():\n raise ValueError(\n f'The given embeddings \"{embeddings}\" is not available or is not a valid path.'\n )\n\n self.name: str = str(embeddings)\n self.static_embeddings = True\n\n if str(embeddings).endswith(\".bin\"):\n self.precomputed_word_embeddings = gensim.models.KeyedVectors.load_word2vec_format(\n str(embeddings), binary=True\n )\n else:\n self.precomputed_word_embeddings = gensim.models.KeyedVectors.load(\n str(embeddings)\n )\n\n self.field = field\n\n self.__embedding_length: int = self.precomputed_word_embeddings.vector_size\n super().__init__()\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n for i, sentence in enumerate(sentences):\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n\n if \"field\" not in self.__dict__ or self.field is None:\n word = token.text\n else:\n word = token.get_tag(self.field).value\n\n if word in self.precomputed_word_embeddings:\n word_embedding = self.precomputed_word_embeddings[word]\n elif word.lower() in self.precomputed_word_embeddings:\n word_embedding = self.precomputed_word_embeddings[word.lower()]\n elif (\n re.sub(r\"\\d\", \"#\", word.lower()) in self.precomputed_word_embeddings\n ):\n word_embedding = self.precomputed_word_embeddings[\n re.sub(r\"\\d\", \"#\", word.lower())\n ]\n elif (\n re.sub(r\"\\d\", \"0\", word.lower()) in self.precomputed_word_embeddings\n ):\n word_embedding = self.precomputed_word_embeddings[\n re.sub(r\"\\d\", \"0\", word.lower())\n ]\n else:\n word_embedding = np.zeros(self.embedding_length, dtype=\"float\")\n\n word_embedding = torch.FloatTensor(word_embedding)\n\n token.set_embedding(self.name, word_embedding)\n\n return sentences\n\n def __str__(self):\n return self.name\n\n def extra_repr(self):\n return f\"'{self.embeddings}'\"\n\n\nclass OneHotEmbeddings(TokenEmbeddings):\n \"\"\"One-hot encoded embeddings.\"\"\"\n\n def __init__(\n self,\n corpus=Union[Corpus, List[Sentence]],\n field: str = \"text\",\n embedding_length: int = 300,\n min_freq: int = 3,\n ):\n\n super().__init__()\n self.name = \"one-hot\"\n self.static_embeddings = False\n self.min_freq = min_freq\n\n tokens = list(map((lambda s: s.tokens), corpus.train))\n tokens = [token for sublist in tokens for token in sublist]\n\n if field == \"text\":\n most_common = Counter(list(map((lambda t: t.text), tokens))).most_common()\n else:\n most_common = Counter(\n list(map((lambda t: t.get_tag(field)), tokens))\n ).most_common()\n\n tokens = []\n for token, freq in most_common:\n if freq < min_freq:\n break\n tokens.append(token)\n\n self.vocab_dictionary: Dictionary = Dictionary()\n for token in tokens:\n self.vocab_dictionary.add_item(token)\n\n # max_tokens = 500\n self.__embedding_length = embedding_length\n\n print(self.vocab_dictionary.idx2item)\n print(f\"vocabulary size of {len(self.vocab_dictionary)}\")\n\n # model architecture\n self.embedding_layer = torch.nn.Embedding(\n len(self.vocab_dictionary), self.__embedding_length\n )\n torch.nn.init.xavier_uniform_(self.embedding_layer.weight)\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n one_hot_sentences = []\n for i, sentence in enumerate(sentences):\n context_idxs = [\n self.vocab_dictionary.get_idx_for_item(t.text) for t in sentence.tokens\n ]\n\n one_hot_sentences.extend(context_idxs)\n\n one_hot_sentences = torch.tensor(one_hot_sentences, dtype=torch.long).to(\n flair.device\n )\n\n embedded = self.embedding_layer.forward(one_hot_sentences)\n\n index = 0\n for sentence in sentences:\n for token in sentence:\n embedding = embedded[index]\n token.set_embedding(self.name, embedding)\n index += 1\n\n return sentences\n\n def __str__(self):\n return self.name\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def extra_repr(self):\n return \"min_freq={}\".format(self.min_freq)\n\n\nclass BPEmbSerializable(BPEmb):\n def __getstate__(self):\n state = self.__dict__.copy()\n # save the sentence piece model as binary file (not as path which may change)\n state[\"spm_model_binary\"] = open(self.model_file, mode=\"rb\").read()\n state[\"spm\"] = None\n return state\n\n def __setstate__(self, state):\n from bpemb.util import sentencepiece_load\n\n model_file = self.model_tpl.format(lang=state[\"lang\"], vs=state[\"vs\"])\n self.__dict__ = state\n\n # write out the binary sentence piece model into the expected directory\n self.cache_dir: Path = Path(flair.cache_root) / \"embeddings\"\n if \"spm_model_binary\" in self.__dict__:\n # if the model was saved as binary and it is not found on disk, write to appropriate path\n if not os.path.exists(self.cache_dir / state[\"lang\"]):\n os.makedirs(self.cache_dir / state[\"lang\"])\n self.model_file = self.cache_dir / model_file\n with open(self.model_file, \"wb\") as out:\n out.write(self.__dict__[\"spm_model_binary\"])\n else:\n # otherwise, use normal process and potentially trigger another download\n self.model_file = self._load_file(model_file)\n\n # once the modes if there, load it with sentence piece\n state[\"spm\"] = sentencepiece_load(self.model_file)\n\n\nclass BytePairEmbeddings(TokenEmbeddings):\n def __init__(\n self,\n language: str,\n dim: int = 50,\n syllables: int = 100000,\n cache_dir=Path(flair.cache_root) / \"embeddings\",\n ):\n \"\"\"\n Initializes BP embeddings. Constructor downloads required files if not there.\n \"\"\"\n\n self.name: str = f\"bpe-{language}-{syllables}-{dim}\"\n self.static_embeddings = True\n self.embedder = BPEmbSerializable(\n lang=language, vs=syllables, dim=dim, cache_dir=cache_dir\n )\n\n self.__embedding_length: int = self.embedder.emb.vector_size * 2\n super().__init__()\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n for i, sentence in enumerate(sentences):\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n\n if \"field\" not in self.__dict__ or self.field is None:\n word = token.text\n else:\n word = token.get_tag(self.field).value\n\n if word.strip() == \"\":\n # empty words get no embedding\n token.set_embedding(\n self.name, torch.zeros(self.embedding_length, dtype=torch.float)\n )\n else:\n # all other words get embedded\n embeddings = self.embedder.embed(word.lower())\n embedding = np.concatenate(\n (embeddings[0], embeddings[len(embeddings) - 1])\n )\n token.set_embedding(\n self.name, torch.tensor(embedding, dtype=torch.float)\n )\n\n return sentences\n\n def __str__(self):\n return self.name\n\n def extra_repr(self):\n return \"model={}\".format(self.name)\n\n\nclass ELMoEmbeddings(TokenEmbeddings):\n \"\"\"Contextual word embeddings using word-level LM, as proposed in Peters et al., 2018.\"\"\"\n\n def __init__(\n self, model: str = \"original\", options_file: str = None, weight_file: str = None\n ):\n super().__init__()\n\n try:\n import allennlp.commands.elmo\n except:\n log.warning(\"-\" * 100)\n log.warning('ATTENTION! The library \"allennlp\" is not installed!')\n log.warning(\n 'To use ELMoEmbeddings, please first install with \"pip install allennlp\"'\n )\n log.warning(\"-\" * 100)\n pass\n\n self.name = \"elmo-\" + model\n self.static_embeddings = True\n\n if not options_file or not weight_file:\n # the default model for ELMo is the 'original' model, which is very large\n options_file = allennlp.commands.elmo.DEFAULT_OPTIONS_FILE\n weight_file = allennlp.commands.elmo.DEFAULT_WEIGHT_FILE\n # alternatively, a small, medium or portuguese model can be selected by passing the appropriate mode name\n if model == \"small\":\n options_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_options.json\"\n weight_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_weights.hdf5\"\n if model == \"medium\":\n options_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x2048_256_2048cnn_1xhighway/elmo_2x2048_256_2048cnn_1xhighway_options.json\"\n weight_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x2048_256_2048cnn_1xhighway/elmo_2x2048_256_2048cnn_1xhighway_weights.hdf5\"\n if model == \"pt\" or model == \"portuguese\":\n options_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pt/elmo_pt_options.json\"\n weight_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pt/elmo_pt_weights.hdf5\"\n if model == \"pubmed\":\n options_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pubmed/elmo_2x4096_512_2048cnn_2xhighway_options.json\"\n weight_file = \"https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pubmed/elmo_2x4096_512_2048cnn_2xhighway_weights_PubMed_only.hdf5\"\n\n # put on Cuda if available\n from flair import device\n\n if re.fullmatch(r'cuda:[0-9]+', str(device)):\n cuda_device = int(str(device).split(':')[-1])\n elif str(device) == \"cpu\":\n cuda_device = -1\n else:\n cuda_device = 0\n\n self.ee = allennlp.commands.elmo.ElmoEmbedder(\n options_file=options_file, weight_file=weight_file, cuda_device=cuda_device\n )\n\n # embed a dummy sentence to determine embedding_length\n dummy_sentence: Sentence = Sentence()\n dummy_sentence.add_token(Token(\"hello\"))\n embedded_dummy = self.embed(dummy_sentence)\n self.__embedding_length: int = len(\n embedded_dummy[0].get_token(1).get_embedding()\n )\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n sentence_words: List[List[str]] = []\n for sentence in sentences:\n sentence_words.append([token.text for token in sentence])\n\n embeddings = self.ee.embed_batch(sentence_words)\n\n for i, sentence in enumerate(sentences):\n\n sentence_embeddings = embeddings[i]\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n word_embedding = torch.cat(\n [\n torch.FloatTensor(sentence_embeddings[0, token_idx, :]),\n torch.FloatTensor(sentence_embeddings[1, token_idx, :]),\n torch.FloatTensor(sentence_embeddings[2, token_idx, :]),\n ],\n 0,\n )\n\n token.set_embedding(self.name, word_embedding)\n\n return sentences\n\n def extra_repr(self):\n return \"model={}\".format(self.name)\n\n def __str__(self):\n return self.name\n\n\nclass ELMoTransformerEmbeddings(TokenEmbeddings):\n \"\"\"Contextual word embeddings using word-level Transformer-based LM, as proposed in Peters et al., 2018.\"\"\"\n\n def __init__(self, model_file: str):\n super().__init__()\n\n try:\n from allennlp.modules.token_embedders.bidirectional_language_model_token_embedder import (\n BidirectionalLanguageModelTokenEmbedder,\n )\n from allennlp.data.token_indexers.elmo_indexer import (\n ELMoTokenCharactersIndexer,\n )\n except:\n log.warning(\"-\" * 100)\n log.warning('ATTENTION! The library \"allennlp\" is not installed!')\n log.warning(\n \"To use ELMoTransformerEmbeddings, please first install a recent version from https://github.com/allenai/allennlp\"\n )\n log.warning(\"-\" * 100)\n pass\n\n self.name = \"elmo-transformer\"\n self.static_embeddings = True\n self.lm_embedder = BidirectionalLanguageModelTokenEmbedder(\n archive_file=model_file,\n dropout=0.2,\n bos_eos_tokens=(\"<S>\", \"</S>\"),\n remove_bos_eos=True,\n requires_grad=False,\n )\n self.lm_embedder = self.lm_embedder.to(device=flair.device)\n self.vocab = self.lm_embedder._lm.vocab\n self.indexer = ELMoTokenCharactersIndexer()\n\n # embed a dummy sentence to determine embedding_length\n dummy_sentence: Sentence = Sentence()\n dummy_sentence.add_token(Token(\"hello\"))\n embedded_dummy = self.embed(dummy_sentence)\n self.__embedding_length: int = len(\n embedded_dummy[0].get_token(1).get_embedding()\n )\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n # Avoid conflicts with flair's Token class\n import allennlp.data.tokenizers.token as allen_nlp_token\n\n indexer = self.indexer\n vocab = self.vocab\n\n for sentence in sentences:\n character_indices = indexer.tokens_to_indices(\n [allen_nlp_token.Token(token.text) for token in sentence], vocab, \"elmo\"\n )[\"elmo\"]\n\n indices_tensor = torch.LongTensor([character_indices])\n indices_tensor = indices_tensor.to(device=flair.device)\n embeddings = self.lm_embedder(indices_tensor)[0].detach().cpu().numpy()\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n embedding = embeddings[token_idx]\n word_embedding = torch.FloatTensor(embedding)\n token.set_embedding(self.name, word_embedding)\n\n return sentences\n\n def extra_repr(self):\n return \"model={}\".format(self.name)\n\n def __str__(self):\n return self.name\n\n\nclass TransformerXLEmbeddings(TokenEmbeddings):\n def __init__(self, model: str = \"transfo-xl-wt103\"):\n \"\"\"Transformer-XL embeddings, as proposed in Dai et al., 2019.\n :param model: name of Transformer-XL model\n \"\"\"\n super().__init__()\n\n if model not in TRANSFORMER_XL_PRETRAINED_MODEL_ARCHIVE_MAP.keys():\n raise ValueError(\"Provided Transformer-XL model is not available.\")\n\n self.tokenizer = TransfoXLTokenizer.from_pretrained(model)\n self.model = TransfoXLModel.from_pretrained(model)\n self.name = model\n self.static_embeddings = True\n\n dummy_sentence: Sentence = Sentence()\n dummy_sentence.add_token(Token(\"hello\"))\n embedded_dummy = self.embed(dummy_sentence)\n self.__embedding_length: int = len(\n embedded_dummy[0].get_token(1).get_embedding()\n )\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n self.model.to(flair.device)\n self.model.eval()\n\n with torch.no_grad():\n for sentence in sentences:\n token_strings = [token.text for token in sentence.tokens]\n indexed_tokens = self.tokenizer.convert_tokens_to_ids(token_strings)\n\n tokens_tensor = torch.tensor([indexed_tokens])\n tokens_tensor = tokens_tensor.to(flair.device)\n\n hidden_states, _ = self.model(tokens_tensor)\n\n for token, token_idx in zip(\n sentence.tokens, range(len(sentence.tokens))\n ):\n token.set_embedding(self.name, hidden_states[0][token_idx])\n\n return sentences\n\n def extra_repr(self):\n return \"model={}\".format(self.name)\n\n def __str__(self):\n return self.name\n\n\nclass OpenAIGPTEmbeddings(TokenEmbeddings):\n def __init__(\n self, model: str = \"openai-gpt\", pooling_operation: str = \"first_last\"\n ):\n \"\"\"OpenAI GPT embeddings, as proposed in Radford et al. 2018.\n :param model: name of OpenAI GPT model\n :param pooling_operation: defines pooling operation for subwords\n \"\"\"\n super().__init__()\n\n if model not in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP.keys():\n raise ValueError(\"Provided OpenAI GPT model is not available.\")\n\n self.tokenizer = OpenAIGPTTokenizer.from_pretrained(model)\n self.model = OpenAIGPTModel.from_pretrained(model)\n self.name = model\n self.static_embeddings = True\n self.pooling_operation = pooling_operation\n\n dummy_sentence: Sentence = Sentence()\n dummy_sentence.add_token(Token(\"hello\"))\n embedded_dummy = self.embed(dummy_sentence)\n self.__embedding_length: int = len(\n embedded_dummy[0].get_token(1).get_embedding()\n )\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n self.model.to(flair.device)\n self.model.eval()\n\n with torch.no_grad():\n for sentence in sentences:\n for token in sentence.tokens:\n token_text = token.text\n\n subwords = self.tokenizer.tokenize(token_text)\n indexed_tokens = self.tokenizer.convert_tokens_to_ids(subwords)\n tokens_tensor = torch.tensor([indexed_tokens])\n tokens_tensor = tokens_tensor.to(flair.device)\n\n hidden_states = self.model(tokens_tensor)\n\n if self.pooling_operation == \"first\":\n # Use embedding of first subword\n token.set_embedding(self.name, hidden_states[0][0])\n elif self.pooling_operation == \"last\":\n last_embedding = hidden_states[0][len(hidden_states[0]) - 1]\n token.set_embedding(self.name, last_embedding)\n elif self.pooling_operation == \"first_last\":\n # Use embedding of first and last subword\n first_embedding = hidden_states[0][0]\n last_embedding = hidden_states[0][len(hidden_states[0]) - 1]\n final_embedding = torch.cat([first_embedding, last_embedding])\n token.set_embedding(self.name, final_embedding)\n else:\n # Otherwise, use mean over all subwords in token\n all_embeddings = [\n embedding.unsqueeze(0) for embedding in hidden_states[0]\n ]\n mean = torch.mean(torch.cat(all_embeddings, dim=0), dim=0)\n token.set_embedding(self.name, mean)\n\n return sentences\n\n def extra_repr(self):\n return \"model={}\".format(self.name)\n\n def __str__(self):\n return self.name\n\n\nclass CharacterEmbeddings(TokenEmbeddings):\n \"\"\"Character embeddings of words, as proposed in Lample et al., 2016.\"\"\"\n\n def __init__(self, path_to_char_dict: str = None, char_embedding_dim: int = 25, hidden_size_char: int = 25):\n \"\"\"Uses the default character dictionary if none provided.\"\"\"\n\n super().__init__()\n self.name = \"Char\"\n self.static_embeddings = False\n\n # use list of common characters if none provided\n if path_to_char_dict is None:\n self.char_dictionary: Dictionary = Dictionary.load(\"common-chars\")\n else:\n self.char_dictionary: Dictionary = Dictionary.load_from_file(\n path_to_char_dict\n )\n\n self.char_embedding_dim: int = char_embedding_dim\n self.hidden_size_char: int = hidden_size_char\n self.char_embedding = torch.nn.Embedding(\n len(self.char_dictionary.item2idx), self.char_embedding_dim\n )\n self.char_rnn = torch.nn.LSTM(\n self.char_embedding_dim,\n self.hidden_size_char,\n num_layers=1,\n bidirectional=True,\n )\n\n self.__embedding_length = self.char_embedding_dim * 2\n\n self.to(flair.device)\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n\n for sentence in sentences:\n\n tokens_char_indices = []\n\n # translate words in sentence into ints using dictionary\n for token in sentence.tokens:\n char_indices = [\n self.char_dictionary.get_idx_for_item(char) for char in token.text\n ]\n tokens_char_indices.append(char_indices)\n\n # sort words by length, for batching and masking\n tokens_sorted_by_length = sorted(\n tokens_char_indices, key=lambda p: len(p), reverse=True\n )\n d = {}\n for i, ci in enumerate(tokens_char_indices):\n for j, cj in enumerate(tokens_sorted_by_length):\n if ci == cj:\n d[j] = i\n continue\n chars2_length = [len(c) for c in tokens_sorted_by_length]\n longest_token_in_sentence = max(chars2_length)\n tokens_mask = torch.zeros(\n (len(tokens_sorted_by_length), longest_token_in_sentence),\n dtype=torch.long,\n device=flair.device,\n )\n\n for i, c in enumerate(tokens_sorted_by_length):\n tokens_mask[i, : chars2_length[i]] = torch.tensor(\n c, dtype=torch.long, device=flair.device\n )\n\n # chars for rnn processing\n chars = tokens_mask\n\n character_embeddings = self.char_embedding(chars).transpose(0, 1)\n\n packed = torch.nn.utils.rnn.pack_padded_sequence(\n character_embeddings, chars2_length\n )\n\n lstm_out, self.hidden = self.char_rnn(packed)\n\n outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(lstm_out)\n outputs = outputs.transpose(0, 1)\n chars_embeds_temp = torch.zeros(\n (outputs.size(0), outputs.size(2)),\n dtype=torch.float,\n device=flair.device,\n )\n for i, index in enumerate(output_lengths):\n chars_embeds_temp[i] = outputs[i, index - 1]\n character_embeddings = chars_embeds_temp.clone()\n for i in range(character_embeddings.size(0)):\n character_embeddings[d[i]] = chars_embeds_temp[i]\n\n for token_number, token in enumerate(sentence.tokens):\n token.set_embedding(self.name, character_embeddings[token_number])\n\n def __str__(self):\n return self.name\n\n\nclass FlairEmbeddings(TokenEmbeddings):\n \"\"\"Contextual string embeddings of words, as proposed in Akbik et al., 2018.\"\"\"\n\n def __init__(\n self,\n model: str,\n use_cache: bool = False,\n cache_directory: Path = None,\n chars_per_chunk: int = 512,\n ):\n \"\"\"\n initializes contextual string embeddings using a character-level language model.\n :param model: model string, one of 'news-forward', 'news-backward', 'news-forward-fast', 'news-backward-fast',\n 'mix-forward', 'mix-backward', 'german-forward', 'german-backward', 'polish-backward', 'polish-forward'\n depending on which character language model is desired.\n :param use_cache: if set to False, will not write embeddings to file for later retrieval. this saves disk space but will\n not allow re-use of once computed embeddings that do not fit into memory\n :param cache_directory: if cache_directory is not set, the cache will be written to ~/.flair/embeddings. otherwise the cache\n is written to the provided directory.\n :param chars_per_chunk: max number of chars per rnn pass to control speed/memory tradeoff. Higher means faster but requires\n more memory. Lower means slower but less memory.\n \"\"\"\n super().__init__()\n\n cache_dir = Path(\"embeddings\")\n\n aws_path: str = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources\"\n\n self.PRETRAINED_MODEL_ARCHIVE_MAP = {\n # multilingual models\n \"multi-forward\": f\"{aws_path}/embeddings-v0.4/lm-multi-forward-v0.1.pt\",\n \"multi-backward\": f\"{aws_path}/embeddings-v0.4/lm-multi-backward-v0.1.pt\",\n \"multi-forward-fast\": f\"{aws_path}/embeddings-v0.4/lm-multi-forward-fast-v0.1.pt\",\n \"multi-backward-fast\": f\"{aws_path}/embeddings-v0.4/lm-multi-backward-fast-v0.1.pt\",\n # English models\n \"news-forward\": f\"{aws_path}/embeddings-v0.4.1/big-news-forward--h2048-l1-d0.05-lr30-0.25-20/news-forward-0.4.1.pt\",\n \"news-backward\": f\"{aws_path}/embeddings-v0.4.1/big-news-backward--h2048-l1-d0.05-lr30-0.25-20/news-backward-0.4.1.pt\",\n \"news-forward-fast\": f\"{aws_path}/embeddings/lm-news-english-forward-1024-v0.2rc.pt\",\n \"news-backward-fast\": f\"{aws_path}/embeddings/lm-news-english-backward-1024-v0.2rc.pt\",\n \"mix-forward\": f\"{aws_path}/embeddings/lm-mix-english-forward-v0.2rc.pt\",\n \"mix-backward\": f\"{aws_path}/embeddings/lm-mix-english-backward-v0.2rc.pt\",\n # Arabic\n \"ar-forward\": f\"{aws_path}/embeddings-stefan-it/lm-ar-opus-large-forward-v0.1.pt\",\n \"ar-backward\": f\"{aws_path}/embeddings-stefan-it/lm-ar-opus-large-backward-v0.1.pt\",\n # Bulgarian\n \"bg-forward-fast\": f\"{aws_path}/embeddings-v0.3/lm-bg-small-forward-v0.1.pt\",\n \"bg-backward-fast\": f\"{aws_path}/embeddings-v0.3/lm-bg-small-backward-v0.1.pt\",\n \"bg-forward\": f\"{aws_path}/embeddings-stefan-it/lm-bg-opus-large-forward-v0.1.pt\",\n \"bg-backward\": f\"{aws_path}/embeddings-stefan-it/lm-bg-opus-large-backward-v0.1.pt\",\n # Czech\n \"cs-forward\": f\"{aws_path}/embeddings-stefan-it/lm-cs-opus-large-forward-v0.1.pt\",\n \"cs-backward\": f\"{aws_path}/embeddings-stefan-it/lm-cs-opus-large-backward-v0.1.pt\",\n \"cs-v0-forward\": f\"{aws_path}/embeddings-v0.4/lm-cs-large-forward-v0.1.pt\",\n \"cs-v0-backward\": f\"{aws_path}/embeddings-v0.4/lm-cs-large-backward-v0.1.pt\",\n # Danish\n \"da-forward\": f\"{aws_path}/embeddings-stefan-it/lm-da-opus-large-forward-v0.1.pt\",\n \"da-backward\": f\"{aws_path}/embeddings-stefan-it/lm-da-opus-large-backward-v0.1.pt\",\n # German\n \"de-forward\": f\"{aws_path}/embeddings/lm-mix-german-forward-v0.2rc.pt\",\n \"de-backward\": f\"{aws_path}/embeddings/lm-mix-german-backward-v0.2rc.pt\",\n \"de-historic-ha-forward\": f\"{aws_path}/embeddings-stefan-it/lm-historic-hamburger-anzeiger-forward-v0.1.pt\",\n \"de-historic-ha-backward\": f\"{aws_path}/embeddings-stefan-it/lm-historic-hamburger-anzeiger-backward-v0.1.pt\",\n \"de-historic-wz-forward\": f\"{aws_path}/embeddings-stefan-it/lm-historic-wiener-zeitung-forward-v0.1.pt\",\n \"de-historic-wz-backward\": f\"{aws_path}/embeddings-stefan-it/lm-historic-wiener-zeitung-backward-v0.1.pt\",\n # Spanish\n \"es-forward\": f\"{aws_path}/embeddings-v0.4/language_model_es_forward_long/lm-es-forward.pt\",\n \"es-backward\": f\"{aws_path}/embeddings-v0.4/language_model_es_backward_long/lm-es-backward.pt\",\n \"es-forward-fast\": f\"{aws_path}/embeddings-v0.4/language_model_es_forward/lm-es-forward-fast.pt\",\n \"es-backward-fast\": f\"{aws_path}/embeddings-v0.4/language_model_es_backward/lm-es-backward-fast.pt\",\n # Basque\n \"eu-forward\": f\"{aws_path}/embeddings-stefan-it/lm-eu-opus-large-forward-v0.1.pt\",\n \"eu-backward\": f\"{aws_path}/embeddings-stefan-it/lm-eu-opus-large-backward-v0.1.pt\",\n \"eu-v0-forward\": f\"{aws_path}/embeddings-v0.4/lm-eu-large-forward-v0.1.pt\",\n \"eu-v0-backward\": f\"{aws_path}/embeddings-v0.4/lm-eu-large-backward-v0.1.pt\",\n # Persian\n \"fa-forward\": f\"{aws_path}/embeddings-stefan-it/lm-fa-opus-large-forward-v0.1.pt\",\n \"fa-backward\": f\"{aws_path}/embeddings-stefan-it/lm-fa-opus-large-backward-v0.1.pt\",\n # Finnish\n \"fi-forward\": f\"{aws_path}/embeddings-stefan-it/lm-fi-opus-large-forward-v0.1.pt\",\n \"fi-backward\": f\"{aws_path}/embeddings-stefan-it/lm-fi-opus-large-backward-v0.1.pt\",\n # French\n \"fr-forward\": f\"{aws_path}/embeddings/lm-fr-charlm-forward.pt\",\n \"fr-backward\": f\"{aws_path}/embeddings/lm-fr-charlm-backward.pt\",\n # Hebrew\n \"he-forward\": f\"{aws_path}/embeddings-stefan-it/lm-he-opus-large-forward-v0.1.pt\",\n \"he-backward\": f\"{aws_path}/embeddings-stefan-it/lm-he-opus-large-backward-v0.1.pt\",\n # Hindi\n \"hi-forward\": f\"{aws_path}/embeddings-stefan-it/lm-hi-opus-large-forward-v0.1.pt\",\n \"hi-backward\": f\"{aws_path}/embeddings-stefan-it/lm-hi-opus-large-backward-v0.1.pt\",\n # Croatian\n \"hr-forward\": f\"{aws_path}/embeddings-stefan-it/lm-hr-opus-large-forward-v0.1.pt\",\n \"hr-backward\": f\"{aws_path}/embeddings-stefan-it/lm-hr-opus-large-backward-v0.1.pt\",\n # Indonesian\n \"id-forward\": f\"{aws_path}/embeddings-stefan-it/lm-id-opus-large-forward-v0.1.pt\",\n \"id-backward\": f\"{aws_path}/embeddings-stefan-it/lm-id-opus-large-backward-v0.1.pt\",\n # Italian\n \"it-forward\": f\"{aws_path}/embeddings-stefan-it/lm-it-opus-large-forward-v0.1.pt\",\n \"it-backward\": f\"{aws_path}/embeddings-stefan-it/lm-it-opus-large-backward-v0.1.pt\",\n # Japanese\n \"ja-forward\": f\"{aws_path}/embeddings-v0.4.1/lm__char-forward__ja-wikipedia-3GB/japanese-forward.pt\",\n \"ja-backward\": f\"{aws_path}/embeddings-v0.4.1/lm__char-backward__ja-wikipedia-3GB/japanese-backward.pt\",\n # Dutch\n \"nl-forward\": f\"{aws_path}/embeddings-stefan-it/lm-nl-opus-large-forward-v0.1.pt\",\n \"nl-backward\": f\"{aws_path}/embeddings-stefan-it/lm-nl-opus-large-backward-v0.1.pt\",\n \"nl-v0-forward\": f\"{aws_path}/embeddings-v0.4/lm-nl-large-forward-v0.1.pt\",\n \"nl-v0-backward\": f\"{aws_path}/embeddings-v0.4/lm-nl-large-backward-v0.1.pt\",\n # Norwegian\n \"no-forward\": f\"{aws_path}/embeddings-stefan-it/lm-no-opus-large-forward-v0.1.pt\",\n \"no-backward\": f\"{aws_path}/embeddings-stefan-it/lm-no-opus-large-backward-v0.1.pt\",\n # Polish\n \"pl-forward\": f\"{aws_path}/embeddings/lm-polish-forward-v0.2.pt\",\n \"pl-backward\": f\"{aws_path}/embeddings/lm-polish-backward-v0.2.pt\",\n \"pl-opus-forward\": f\"{aws_path}/embeddings-stefan-it/lm-pl-opus-large-forward-v0.1.pt\",\n \"pl-opus-backward\": f\"{aws_path}/embeddings-stefan-it/lm-pl-opus-large-backward-v0.1.pt\",\n # Portuguese\n \"pt-forward\": f\"{aws_path}/embeddings-v0.4/lm-pt-forward.pt\",\n \"pt-backward\": f\"{aws_path}/embeddings-v0.4/lm-pt-backward.pt\",\n # Pubmed\n \"pubmed-forward\": f\"{aws_path}/embeddings-v0.4.1/pubmed-2015-fw-lm.pt\",\n \"pubmed-backward\": f\"{aws_path}/embeddings-v0.4.1/pubmed-2015-bw-lm.pt\",\n # Slovenian\n \"sl-forward\": f\"{aws_path}/embeddings-stefan-it/lm-sl-opus-large-forward-v0.1.pt\",\n \"sl-backward\": f\"{aws_path}/embeddings-stefan-it/lm-sl-opus-large-backward-v0.1.pt\",\n \"sl-v0-forward\": f\"{aws_path}/embeddings-v0.3/lm-sl-large-forward-v0.1.pt\",\n \"sl-v0-backward\": f\"{aws_path}/embeddings-v0.3/lm-sl-large-backward-v0.1.pt\",\n # Swedish\n \"sv-forward\": f\"{aws_path}/embeddings-stefan-it/lm-sv-opus-large-forward-v0.1.pt\",\n \"sv-backward\": f\"{aws_path}/embeddings-stefan-it/lm-sv-opus-large-backward-v0.1.pt\",\n \"sv-v0-forward\": f\"{aws_path}/embeddings-v0.4/lm-sv-large-forward-v0.1.pt\",\n \"sv-v0-backward\": f\"{aws_path}/embeddings-v0.4/lm-sv-large-backward-v0.1.pt\",\n }\n\n # load model if in pretrained model map\n if model.lower() in self.PRETRAINED_MODEL_ARCHIVE_MAP:\n base_path = self.PRETRAINED_MODEL_ARCHIVE_MAP[model.lower()]\n model = cached_path(base_path, cache_dir=cache_dir)\n\n elif replace_with_language_code(model) in self.PRETRAINED_MODEL_ARCHIVE_MAP:\n base_path = self.PRETRAINED_MODEL_ARCHIVE_MAP[\n replace_with_language_code(model)\n ]\n model = cached_path(base_path, cache_dir=cache_dir)\n\n elif not Path(model).exists():\n raise ValueError(\n f'The given model \"{model}\" is not available or is not a valid path.'\n )\n\n self.name = str(model)\n self.static_embeddings = True\n\n from flair.models import LanguageModel\n\n self.lm = LanguageModel.load_language_model(model)\n\n self.is_forward_lm: bool = self.lm.is_forward_lm\n self.chars_per_chunk: int = chars_per_chunk\n\n # initialize cache if use_cache set\n self.cache = None\n if use_cache:\n cache_path = (\n Path(f\"{self.name}-tmp-cache.sqllite\")\n if not cache_directory\n else cache_directory / f\"{self.name}-tmp-cache.sqllite\"\n )\n from sqlitedict import SqliteDict\n\n self.cache = SqliteDict(str(cache_path), autocommit=True)\n\n # embed a dummy sentence to determine embedding_length\n dummy_sentence: Sentence = Sentence()\n dummy_sentence.add_token(Token(\"hello\"))\n embedded_dummy = self.embed(dummy_sentence)\n self.__embedding_length: int = len(\n embedded_dummy[0].get_token(1).get_embedding()\n )\n\n # set to eval mode\n self.eval()\n\n def train(self, mode=True):\n pass\n\n def __getstate__(self):\n # Copy the object's state from self.__dict__ which contains\n # all our instance attributes. Always use the dict.copy()\n # method to avoid modifying the original state.\n state = self.__dict__.copy()\n # Remove the unpicklable entries.\n state[\"cache\"] = None\n return state\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n # make compatible with serialized models\n if \"chars_per_chunk\" not in self.__dict__:\n self.chars_per_chunk = 512\n\n # if cache is used, try setting embeddings from cache first\n if \"cache\" in self.__dict__ and self.cache is not None:\n\n # try populating embeddings from cache\n all_embeddings_retrieved_from_cache: bool = True\n for sentence in sentences:\n key = sentence.to_tokenized_string()\n embeddings = self.cache.get(key)\n\n if not embeddings:\n all_embeddings_retrieved_from_cache = False\n break\n else:\n for token, embedding in zip(sentence, embeddings):\n token.set_embedding(self.name, torch.FloatTensor(embedding))\n\n if all_embeddings_retrieved_from_cache:\n return sentences\n\n with torch.no_grad():\n\n # if this is not possible, use LM to generate embedding. First, get text sentences\n text_sentences = [sentence.to_tokenized_string() for sentence in sentences]\n\n longest_character_sequence_in_batch: int = len(max(text_sentences, key=len))\n\n # pad strings with whitespaces to longest sentence\n sentences_padded: List[str] = []\n append_padded_sentence = sentences_padded.append\n\n start_marker = \"\\n\"\n\n end_marker = \" \"\n extra_offset = len(start_marker)\n for sentence_text in text_sentences:\n pad_by = longest_character_sequence_in_batch - len(sentence_text)\n if self.is_forward_lm:\n padded = \"{}{}{}{}\".format(\n start_marker, sentence_text, end_marker, pad_by * \" \"\n )\n append_padded_sentence(padded)\n else:\n padded = \"{}{}{}{}\".format(\n start_marker, sentence_text[::-1], end_marker, pad_by * \" \"\n )\n append_padded_sentence(padded)\n\n # get hidden states from language model\n all_hidden_states_in_lm = self.lm.get_representation(\n sentences_padded, self.chars_per_chunk\n )\n\n # take first or last hidden states from language model as word representation\n for i, sentence in enumerate(sentences):\n sentence_text = sentence.to_tokenized_string()\n\n offset_forward: int = extra_offset\n offset_backward: int = len(sentence_text) + extra_offset\n\n for token in sentence.tokens:\n\n offset_forward += len(token.text)\n\n if self.is_forward_lm:\n offset = offset_forward\n else:\n offset = offset_backward\n\n embedding = all_hidden_states_in_lm[offset, i, :]\n\n # if self.tokenized_lm or token.whitespace_after:\n offset_forward += 1\n offset_backward -= 1\n\n offset_backward -= len(token.text)\n\n token.set_embedding(self.name, embedding.clone().detach())\n\n all_hidden_states_in_lm = None\n\n if \"cache\" in self.__dict__ and self.cache is not None:\n for sentence in sentences:\n self.cache[sentence.to_tokenized_string()] = [\n token._embeddings[self.name].tolist() for token in sentence\n ]\n\n return sentences\n\n def __str__(self):\n return self.name\n\n\nclass PooledFlairEmbeddings(TokenEmbeddings):\n def __init__(\n self,\n contextual_embeddings: Union[str, FlairEmbeddings],\n pooling: str = \"min\",\n only_capitalized: bool = False,\n **kwargs,\n ):\n\n super().__init__()\n\n # use the character language model embeddings as basis\n if type(contextual_embeddings) is str:\n self.context_embeddings: FlairEmbeddings = FlairEmbeddings(\n contextual_embeddings, **kwargs\n )\n else:\n self.context_embeddings: FlairEmbeddings = contextual_embeddings\n\n # length is twice the original character LM embedding length\n self.embedding_length = self.context_embeddings.embedding_length * 2\n self.name = self.context_embeddings.name + \"-context\"\n\n # these fields are for the embedding memory\n self.word_embeddings = {}\n self.word_count = {}\n\n # whether to add only capitalized words to memory (faster runtime and lower memory consumption)\n self.only_capitalized = only_capitalized\n\n # we re-compute embeddings dynamically at each epoch\n self.static_embeddings = False\n\n # set the memory method\n self.pooling = pooling\n if pooling == \"mean\":\n self.aggregate_op = torch.add\n elif pooling == \"fade\":\n self.aggregate_op = torch.add\n elif pooling == \"max\":\n self.aggregate_op = torch.max\n elif pooling == \"min\":\n self.aggregate_op = torch.min\n\n def train(self, mode=True):\n super().train(mode=mode)\n if mode:\n # memory is wiped each time we do a training run\n print(\"train mode resetting embeddings\")\n self.word_embeddings = {}\n self.word_count = {}\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n self.context_embeddings.embed(sentences)\n\n # if we keep a pooling, it needs to be updated continuously\n for sentence in sentences:\n for token in sentence.tokens:\n\n # update embedding\n local_embedding = token._embeddings[self.context_embeddings.name]\n local_embedding = local_embedding.to(flair.device)\n\n if token.text[0].isupper() or not self.only_capitalized:\n\n if token.text not in self.word_embeddings:\n self.word_embeddings[token.text] = local_embedding\n self.word_count[token.text] = 1\n else:\n aggregated_embedding = self.aggregate_op(\n self.word_embeddings[token.text], local_embedding\n )\n if self.pooling == \"fade\":\n aggregated_embedding /= 2\n self.word_embeddings[token.text] = aggregated_embedding\n self.word_count[token.text] += 1\n\n # add embeddings after updating\n for sentence in sentences:\n for token in sentence.tokens:\n if token.text in self.word_embeddings:\n base = (\n self.word_embeddings[token.text] / self.word_count[token.text]\n if self.pooling == \"mean\"\n else self.word_embeddings[token.text]\n )\n else:\n base = token._embeddings[self.context_embeddings.name]\n\n token.set_embedding(self.name, base)\n\n return sentences\n\n def embedding_length(self) -> int:\n return self.embedding_length\n\n\nclass BertEmbeddings(TokenEmbeddings):\n def __init__(\n self,\n bert_model_or_path: str = \"bert-base-uncased\",\n layers: str = \"-1,-2,-3,-4\",\n pooling_operation: str = \"first\",\n ):\n \"\"\"\n Bidirectional transformer embeddings of words, as proposed in Devlin et al., 2018.\n :param bert_model_or_path: name of BERT model ('') or directory path containing custom model, configuration file\n and vocab file (names of three files should be - bert_config.json, pytorch_model.bin/model.chkpt, vocab.txt)\n :param layers: string indicating which layers to take for embedding\n :param pooling_operation: how to get from token piece embeddings to token embedding. Either pool them and take\n the average ('mean') or use first word piece embedding as token embedding ('first)\n \"\"\"\n super().__init__()\n\n self.tokenizer = BertTokenizer.from_pretrained(bert_model_or_path)\n self.model = BertModel.from_pretrained(bert_model_or_path)\n self.layer_indexes = [int(x) for x in layers.split(\",\")]\n self.pooling_operation = pooling_operation\n self.name = str(bert_model_or_path)\n self.static_embeddings = True\n\n class BertInputFeatures(object):\n \"\"\"Private helper class for holding BERT-formatted features\"\"\"\n\n def __init__(\n self,\n unique_id,\n tokens,\n input_ids,\n input_mask,\n input_type_ids,\n token_subtoken_count,\n ):\n self.unique_id = unique_id\n self.tokens = tokens\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.input_type_ids = input_type_ids\n self.token_subtoken_count = token_subtoken_count\n\n def _convert_sentences_to_features(\n self, sentences, max_sequence_length: int\n ) -> [BertInputFeatures]:\n\n max_sequence_length = max_sequence_length + 2\n\n features: List[BertEmbeddings.BertInputFeatures] = []\n for (sentence_index, sentence) in enumerate(sentences):\n\n bert_tokenization: List[str] = []\n token_subtoken_count: Dict[int, int] = {}\n\n for token in sentence:\n subtokens = self.tokenizer.tokenize(token.text)\n bert_tokenization.extend(subtokens)\n token_subtoken_count[token.idx] = len(subtokens)\n\n if len(bert_tokenization) > max_sequence_length - 2:\n bert_tokenization = bert_tokenization[0 : (max_sequence_length - 2)]\n\n tokens = []\n input_type_ids = []\n tokens.append(\"[CLS]\")\n input_type_ids.append(0)\n for token in bert_tokenization:\n tokens.append(token)\n input_type_ids.append(0)\n tokens.append(\"[SEP]\")\n input_type_ids.append(0)\n\n input_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_sequence_length:\n input_ids.append(0)\n input_mask.append(0)\n input_type_ids.append(0)\n\n features.append(\n BertEmbeddings.BertInputFeatures(\n unique_id=sentence_index,\n tokens=tokens,\n input_ids=input_ids,\n input_mask=input_mask,\n input_type_ids=input_type_ids,\n token_subtoken_count=token_subtoken_count,\n )\n )\n\n return features\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n \"\"\"Add embeddings to all words in a list of sentences. If embeddings are already added,\n updates only if embeddings are non-static.\"\"\"\n\n # first, find longest sentence in batch\n longest_sentence_in_batch: int = len(\n max(\n [\n self.tokenizer.tokenize(sentence.to_tokenized_string())\n for sentence in sentences\n ],\n key=len,\n )\n )\n\n # prepare id maps for BERT model\n features = self._convert_sentences_to_features(\n sentences, longest_sentence_in_batch\n )\n all_input_ids = torch.LongTensor([f.input_ids for f in features]).to(\n flair.device\n )\n all_input_masks = torch.LongTensor([f.input_mask for f in features]).to(\n flair.device\n )\n\n # put encoded batch through BERT model to get all hidden states of all encoder layers\n self.model.to(flair.device)\n self.model.eval()\n all_encoder_layers, _ = self.model(\n all_input_ids, token_type_ids=None, attention_mask=all_input_masks\n )\n\n with torch.no_grad():\n\n for sentence_index, sentence in enumerate(sentences):\n\n feature = features[sentence_index]\n\n # get aggregated embeddings for each BERT-subtoken in sentence\n subtoken_embeddings = []\n for token_index, _ in enumerate(feature.tokens):\n all_layers = []\n for layer_index in self.layer_indexes:\n layer_output = (\n all_encoder_layers[int(layer_index)]\n .detach()\n .cpu()[sentence_index]\n )\n all_layers.append(layer_output[token_index])\n\n subtoken_embeddings.append(torch.cat(all_layers))\n\n # get the current sentence object\n token_idx = 0\n for token in sentence:\n # add concatenated embedding to sentence\n token_idx += 1\n\n if self.pooling_operation == \"first\":\n # use first subword embedding if pooling operation is 'first'\n token.set_embedding(self.name, subtoken_embeddings[token_idx])\n else:\n # otherwise, do a mean over all subwords in token\n embeddings = subtoken_embeddings[\n token_idx : token_idx\n + feature.token_subtoken_count[token.idx]\n ]\n embeddings = [\n embedding.unsqueeze(0) for embedding in embeddings\n ]\n mean = torch.mean(torch.cat(embeddings, dim=0), dim=0)\n token.set_embedding(self.name, mean)\n\n token_idx += feature.token_subtoken_count[token.idx] - 1\n\n return sentences\n\n @property\n @abstractmethod\n def embedding_length(self) -> int:\n \"\"\"Returns the length of the embedding vector.\"\"\"\n return len(self.layer_indexes) * self.model.config.hidden_size\n\n\nclass CharLMEmbeddings(TokenEmbeddings):\n \"\"\"Contextual string embeddings of words, as proposed in Akbik et al., 2018. \"\"\"\n\n @deprecated(version=\"0.4\", reason=\"Use 'FlairEmbeddings' instead.\")\n def __init__(\n self,\n model: str,\n detach: bool = True,\n use_cache: bool = False,\n cache_directory: Path = None,\n ):\n \"\"\"\n initializes contextual string embeddings using a character-level language model.\n :param model: model string, one of 'news-forward', 'news-backward', 'news-forward-fast', 'news-backward-fast',\n 'mix-forward', 'mix-backward', 'german-forward', 'german-backward', 'polish-backward', 'polish-forward'\n depending on which character language model is desired.\n :param detach: if set to False, the gradient will propagate into the language model. this dramatically slows down\n training and often leads to worse results, so not recommended.\n :param use_cache: if set to False, will not write embeddings to file for later retrieval. this saves disk space but will\n not allow re-use of once computed embeddings that do not fit into memory\n :param cache_directory: if cache_directory is not set, the cache will be written to ~/.flair/embeddings. otherwise the cache\n is written to the provided directory.\n \"\"\"\n super().__init__()\n\n cache_dir = Path(\"embeddings\")\n\n # multilingual forward (English, German, French, Italian, Dutch, Polish)\n if model.lower() == \"multi-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-multi-forward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n # multilingual backward (English, German, French, Italian, Dutch, Polish)\n elif model.lower() == \"multi-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-multi-backward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # news-english-forward\n elif model.lower() == \"news-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-forward-v0.2rc.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # news-english-backward\n elif model.lower() == \"news-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-backward-v0.2rc.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # news-english-forward\n elif model.lower() == \"news-forward-fast\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-forward-1024-v0.2rc.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # news-english-backward\n elif model.lower() == \"news-backward-fast\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-backward-1024-v0.2rc.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # mix-english-forward\n elif model.lower() == \"mix-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-english-forward-v0.2rc.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # mix-english-backward\n elif model.lower() == \"mix-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-english-backward-v0.2rc.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # mix-german-forward\n elif model.lower() == \"german-forward\" or model.lower() == \"de-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-german-forward-v0.2rc.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # mix-german-backward\n elif model.lower() == \"german-backward\" or model.lower() == \"de-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-german-backward-v0.2rc.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # common crawl Polish forward\n elif model.lower() == \"polish-forward\" or model.lower() == \"pl-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-polish-forward-v0.2.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # common crawl Polish backward\n elif model.lower() == \"polish-backward\" or model.lower() == \"pl-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-polish-backward-v0.2.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # Slovenian forward\n elif model.lower() == \"slovenian-forward\" or model.lower() == \"sl-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-sl-large-forward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n # Slovenian backward\n elif model.lower() == \"slovenian-backward\" or model.lower() == \"sl-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-sl-large-backward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # Bulgarian forward\n elif model.lower() == \"bulgarian-forward\" or model.lower() == \"bg-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-bg-small-forward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n # Bulgarian backward\n elif model.lower() == \"bulgarian-backward\" or model.lower() == \"bg-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-bg-small-backward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # Dutch forward\n elif model.lower() == \"dutch-forward\" or model.lower() == \"nl-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-nl-large-forward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n # Dutch backward\n elif model.lower() == \"dutch-backward\" or model.lower() == \"nl-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-nl-large-backward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # Swedish forward\n elif model.lower() == \"swedish-forward\" or model.lower() == \"sv-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-sv-large-forward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n # Swedish backward\n elif model.lower() == \"swedish-backward\" or model.lower() == \"sv-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-sv-large-backward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # French forward\n elif model.lower() == \"french-forward\" or model.lower() == \"fr-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-fr-charlm-forward.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n # French backward\n elif model.lower() == \"french-backward\" or model.lower() == \"fr-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-fr-charlm-backward.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # Czech forward\n elif model.lower() == \"czech-forward\" or model.lower() == \"cs-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-cs-large-forward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n # Czech backward\n elif model.lower() == \"czech-backward\" or model.lower() == \"cs-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-cs-large-backward-v0.1.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n # Portuguese forward\n elif model.lower() == \"portuguese-forward\" or model.lower() == \"pt-forward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-pt-forward.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n # Portuguese backward\n elif model.lower() == \"portuguese-backward\" or model.lower() == \"pt-backward\":\n base_path = \"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-pt-backward.pt\"\n model = cached_path(base_path, cache_dir=cache_dir)\n\n elif not Path(model).exists():\n raise ValueError(\n f'The given model \"{model}\" is not available or is not a valid path.'\n )\n\n self.name = str(model)\n self.static_embeddings = detach\n\n from flair.models import LanguageModel\n\n self.lm = LanguageModel.load_language_model(model)\n self.detach = detach\n\n self.is_forward_lm: bool = self.lm.is_forward_lm\n\n # initialize cache if use_cache set\n self.cache = None\n if use_cache:\n cache_path = (\n Path(f\"{self.name}-tmp-cache.sqllite\")\n if not cache_directory\n else cache_directory / f\"{self.name}-tmp-cache.sqllite\"\n )\n from sqlitedict import SqliteDict\n\n self.cache = SqliteDict(str(cache_path), autocommit=True)\n\n # embed a dummy sentence to determine embedding_length\n dummy_sentence: Sentence = Sentence()\n dummy_sentence.add_token(Token(\"hello\"))\n embedded_dummy = self.embed(dummy_sentence)\n self.__embedding_length: int = len(\n embedded_dummy[0].get_token(1).get_embedding()\n )\n\n # set to eval mode\n self.eval()\n\n def train(self, mode=True):\n pass\n\n def __getstate__(self):\n # Copy the object's state from self.__dict__ which contains\n # all our instance attributes. Always use the dict.copy()\n # method to avoid modifying the original state.\n state = self.__dict__.copy()\n # Remove the unpicklable entries.\n state[\"cache\"] = None\n return state\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n # if cache is used, try setting embeddings from cache first\n if \"cache\" in self.__dict__ and self.cache is not None:\n\n # try populating embeddings from cache\n all_embeddings_retrieved_from_cache: bool = True\n for sentence in sentences:\n key = sentence.to_tokenized_string()\n embeddings = self.cache.get(key)\n\n if not embeddings:\n all_embeddings_retrieved_from_cache = False\n break\n else:\n for token, embedding in zip(sentence, embeddings):\n token.set_embedding(self.name, torch.FloatTensor(embedding))\n\n if all_embeddings_retrieved_from_cache:\n return sentences\n\n # if this is not possible, use LM to generate embedding. First, get text sentences\n text_sentences = [sentence.to_tokenized_string() for sentence in sentences]\n\n longest_character_sequence_in_batch: int = len(max(text_sentences, key=len))\n\n # pad strings with whitespaces to longest sentence\n sentences_padded: List[str] = []\n append_padded_sentence = sentences_padded.append\n\n end_marker = \" \"\n extra_offset = 1\n for sentence_text in text_sentences:\n pad_by = longest_character_sequence_in_batch - len(sentence_text)\n if self.is_forward_lm:\n padded = \"\\n{}{}{}\".format(sentence_text, end_marker, pad_by * \" \")\n append_padded_sentence(padded)\n else:\n padded = \"\\n{}{}{}\".format(\n sentence_text[::-1], end_marker, pad_by * \" \"\n )\n append_padded_sentence(padded)\n\n # get hidden states from language model\n all_hidden_states_in_lm = self.lm.get_representation(sentences_padded)\n\n # take first or last hidden states from language model as word representation\n for i, sentence in enumerate(sentences):\n sentence_text = sentence.to_tokenized_string()\n\n offset_forward: int = extra_offset\n offset_backward: int = len(sentence_text) + extra_offset\n\n for token in sentence.tokens:\n\n offset_forward += len(token.text)\n\n if self.is_forward_lm:\n offset = offset_forward\n else:\n offset = offset_backward\n\n embedding = all_hidden_states_in_lm[offset, i, :]\n\n # if self.tokenized_lm or token.whitespace_after:\n offset_forward += 1\n offset_backward -= 1\n\n offset_backward -= len(token.text)\n\n token.set_embedding(self.name, embedding)\n\n if \"cache\" in self.__dict__ and self.cache is not None:\n for sentence in sentences:\n self.cache[sentence.to_tokenized_string()] = [\n token._embeddings[self.name].tolist() for token in sentence\n ]\n\n return sentences\n\n def __str__(self):\n return self.name\n\n\nclass DocumentMeanEmbeddings(DocumentEmbeddings):\n @deprecated(\n version=\"0.3.1\",\n reason=\"The functionality of this class is moved to 'DocumentPoolEmbeddings'\",\n )\n def __init__(self, token_embeddings: List[TokenEmbeddings]):\n \"\"\"The constructor takes a list of embeddings to be combined.\"\"\"\n super().__init__()\n\n self.embeddings: StackedEmbeddings = StackedEmbeddings(\n embeddings=token_embeddings\n )\n self.name: str = \"document_mean\"\n\n self.__embedding_length: int = self.embeddings.embedding_length\n\n self.to(flair.device)\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def embed(self, sentences: Union[List[Sentence], Sentence]):\n \"\"\"Add embeddings to every sentence in the given list of sentences. If embeddings are already added, updates\n only if embeddings are non-static.\"\"\"\n\n everything_embedded: bool = True\n\n # if only one sentence is passed, convert to list of sentence\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n for sentence in sentences:\n if self.name not in sentence._embeddings.keys():\n everything_embedded = False\n\n if not everything_embedded:\n\n self.embeddings.embed(sentences)\n\n for sentence in sentences:\n word_embeddings = []\n for token in sentence.tokens:\n word_embeddings.append(token.get_embedding().unsqueeze(0))\n\n word_embeddings = torch.cat(word_embeddings, dim=0).to(flair.device)\n\n mean_embedding = torch.mean(word_embeddings, 0)\n\n sentence.set_embedding(self.name, mean_embedding)\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n pass\n\n\nclass DocumentPoolEmbeddings(DocumentEmbeddings):\n def __init__(\n self,\n embeddings: List[TokenEmbeddings],\n fine_tune_mode=\"linear\",\n pooling: str = \"mean\",\n ):\n \"\"\"The constructor takes a list of embeddings to be combined.\n :param embeddings: a list of token embeddings\n :param pooling: a string which can any value from ['mean', 'max', 'min']\n \"\"\"\n super().__init__()\n\n self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)\n self.__embedding_length = self.embeddings.embedding_length\n\n # optional fine-tuning on top of embedding layer\n self.fine_tune_mode = fine_tune_mode\n if self.fine_tune_mode in [\"nonlinear\", \"linear\"]:\n self.embedding_flex = torch.nn.Linear(\n self.embedding_length, self.embedding_length, bias=False\n )\n self.embedding_flex.weight.data.copy_(torch.eye(self.embedding_length))\n\n if self.fine_tune_mode in [\"nonlinear\"]:\n self.embedding_flex_nonlinear = torch.nn.ReLU(self.embedding_length)\n self.embedding_flex_nonlinear_map = torch.nn.Linear(\n self.embedding_length, self.embedding_length\n )\n\n self.__embedding_length: int = self.embeddings.embedding_length\n\n self.to(flair.device)\n\n self.pooling = pooling\n if self.pooling == \"mean\":\n self.pool_op = torch.mean\n elif pooling == \"max\":\n self.pool_op = torch.max\n elif pooling == \"min\":\n self.pool_op = torch.min\n else:\n raise ValueError(f\"Pooling operation for {self.mode!r} is not defined\")\n self.name: str = f\"document_{self.pooling}\"\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def embed(self, sentences: Union[List[Sentence], Sentence]):\n \"\"\"Add embeddings to every sentence in the given list of sentences. If embeddings are already added, updates\n only if embeddings are non-static.\"\"\"\n\n # if only one sentence is passed, convert to list of sentence\n if isinstance(sentences, Sentence):\n sentences = [sentences]\n\n self.embeddings.embed(sentences)\n\n for sentence in sentences:\n word_embeddings = []\n for token in sentence.tokens:\n word_embeddings.append(token.get_embedding().unsqueeze(0))\n\n word_embeddings = torch.cat(word_embeddings, dim=0).to(flair.device)\n\n if self.fine_tune_mode in [\"nonlinear\", \"linear\"]:\n word_embeddings = self.embedding_flex(word_embeddings)\n\n if self.fine_tune_mode in [\"nonlinear\"]:\n word_embeddings = self.embedding_flex_nonlinear(word_embeddings)\n word_embeddings = self.embedding_flex_nonlinear_map(word_embeddings)\n\n if self.pooling == \"mean\":\n pooled_embedding = self.pool_op(word_embeddings, 0)\n else:\n pooled_embedding, _ = self.pool_op(word_embeddings, 0)\n\n sentence.set_embedding(self.name, pooled_embedding)\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n pass\n\n def extra_repr(self):\n return f\"fine_tune_mode={self.fine_tune_mode}, pooling={self.pooling}\"\n\n\nclass DocumentRNNEmbeddings(DocumentEmbeddings):\n def __init__(\n self,\n embeddings: List[TokenEmbeddings],\n hidden_size=128,\n rnn_layers=1,\n reproject_words: bool = True,\n reproject_words_dimension: int = None,\n bidirectional: bool = False,\n dropout: float = 0.5,\n word_dropout: float = 0.0,\n locked_dropout: float = 0.0,\n rnn_type=\"GRU\",\n ):\n \"\"\"The constructor takes a list of embeddings to be combined.\n :param embeddings: a list of token embeddings\n :param hidden_size: the number of hidden states in the rnn\n :param rnn_layers: the number of layers for the rnn\n :param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear\n layer before putting them into the rnn or not\n :param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output\n dimension as before will be taken.\n :param bidirectional: boolean value, indicating whether to use a bidirectional rnn or not\n :param dropout: the dropout value to be used\n :param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used\n :param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used\n :param rnn_type: 'GRU' or 'LSTM'\n \"\"\"\n super().__init__()\n\n self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)\n\n self.rnn_type = rnn_type\n\n self.reproject_words = reproject_words\n self.bidirectional = bidirectional\n\n self.length_of_all_token_embeddings: int = self.embeddings.embedding_length\n\n self.static_embeddings = False\n\n self.__embedding_length: int = hidden_size\n if self.bidirectional:\n self.__embedding_length *= 4\n\n self.embeddings_dimension: int = self.length_of_all_token_embeddings\n if self.reproject_words and reproject_words_dimension is not None:\n self.embeddings_dimension = reproject_words_dimension\n\n self.word_reprojection_map = torch.nn.Linear(\n self.length_of_all_token_embeddings, self.embeddings_dimension\n )\n\n # bidirectional RNN on top of embedding layer\n if rnn_type == \"LSTM\":\n self.rnn = torch.nn.LSTM(\n self.embeddings_dimension,\n hidden_size,\n num_layers=rnn_layers,\n bidirectional=self.bidirectional,\n )\n else:\n self.rnn = torch.nn.GRU(\n self.embeddings_dimension,\n hidden_size,\n num_layers=rnn_layers,\n bidirectional=self.bidirectional,\n )\n\n self.name = \"document_\" + self.rnn._get_name()\n\n # dropouts\n if locked_dropout > 0.0:\n self.dropout: torch.nn.Module = LockedDropout(locked_dropout)\n else:\n self.dropout = torch.nn.Dropout(dropout)\n\n self.use_word_dropout: bool = word_dropout > 0.0\n if self.use_word_dropout:\n self.word_dropout = WordDropout(word_dropout)\n\n torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)\n\n self.to(flair.device)\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def embed(self, sentences: Union[List[Sentence], Sentence]):\n \"\"\"Add embeddings to all sentences in the given list of sentences. If embeddings are already added, update\n only if embeddings are non-static.\"\"\"\n\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n self.rnn.zero_grad()\n\n sentences.sort(key=lambda x: len(x), reverse=True)\n\n self.embeddings.embed(sentences)\n\n # first, sort sentences by number of tokens\n longest_token_sequence_in_batch: int = len(sentences[0])\n\n all_sentence_tensors = []\n lengths: List[int] = []\n\n # go through each sentence in batch\n for i, sentence in enumerate(sentences):\n\n lengths.append(len(sentence.tokens))\n\n word_embeddings = []\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n word_embeddings.append(token.get_embedding().unsqueeze(0))\n\n # PADDING: pad shorter sentences out\n for add in range(longest_token_sequence_in_batch - len(sentence.tokens)):\n word_embeddings.append(\n torch.zeros(\n self.length_of_all_token_embeddings, dtype=torch.float\n ).unsqueeze(0)\n )\n\n word_embeddings_tensor = torch.cat(word_embeddings, 0).to(flair.device)\n\n sentence_states = word_embeddings_tensor\n\n # ADD TO SENTENCE LIST: add the representation\n all_sentence_tensors.append(sentence_states.unsqueeze(1))\n\n # --------------------------------------------------------------------\n # GET REPRESENTATION FOR ENTIRE BATCH\n # --------------------------------------------------------------------\n sentence_tensor = torch.cat(all_sentence_tensors, 1)\n\n # --------------------------------------------------------------------\n # FF PART\n # --------------------------------------------------------------------\n # use word dropout if set\n if self.use_word_dropout:\n sentence_tensor = self.word_dropout(sentence_tensor)\n\n if self.reproject_words:\n sentence_tensor = self.word_reprojection_map(sentence_tensor)\n\n sentence_tensor = self.dropout(sentence_tensor)\n\n packed = torch.nn.utils.rnn.pack_padded_sequence(sentence_tensor, lengths)\n\n self.rnn.flatten_parameters()\n\n rnn_out, hidden = self.rnn(packed)\n\n outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(rnn_out)\n\n outputs = self.dropout(outputs)\n\n # --------------------------------------------------------------------\n # EXTRACT EMBEDDINGS FROM RNN\n # --------------------------------------------------------------------\n for sentence_no, length in enumerate(lengths):\n last_rep = outputs[length - 1, sentence_no]\n\n embedding = last_rep\n if self.bidirectional:\n first_rep = outputs[0, sentence_no]\n embedding = torch.cat([first_rep, last_rep], 0)\n\n sentence = sentences[sentence_no]\n sentence.set_embedding(self.name, embedding)\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n pass\n\n\n@deprecated(\n version=\"0.4\",\n reason=\"The functionality of this class is moved to 'DocumentRNNEmbeddings'\",\n)\nclass DocumentLSTMEmbeddings(DocumentEmbeddings):\n def __init__(\n self,\n embeddings: List[TokenEmbeddings],\n hidden_size=128,\n rnn_layers=1,\n reproject_words: bool = True,\n reproject_words_dimension: int = None,\n bidirectional: bool = False,\n dropout: float = 0.5,\n word_dropout: float = 0.0,\n locked_dropout: float = 0.0,\n ):\n \"\"\"The constructor takes a list of embeddings to be combined.\n :param embeddings: a list of token embeddings\n :param hidden_size: the number of hidden states in the lstm\n :param rnn_layers: the number of layers for the lstm\n :param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear\n layer before putting them into the lstm or not\n :param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output\n dimension as before will be taken.\n :param bidirectional: boolean value, indicating whether to use a bidirectional lstm or not\n :param dropout: the dropout value to be used\n :param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used\n :param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used\n \"\"\"\n super().__init__()\n\n self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)\n\n self.reproject_words = reproject_words\n self.bidirectional = bidirectional\n\n self.length_of_all_token_embeddings: int = self.embeddings.embedding_length\n\n self.name = \"document_lstm\"\n self.static_embeddings = False\n\n self.__embedding_length: int = hidden_size\n if self.bidirectional:\n self.__embedding_length *= 4\n\n self.embeddings_dimension: int = self.length_of_all_token_embeddings\n if self.reproject_words and reproject_words_dimension is not None:\n self.embeddings_dimension = reproject_words_dimension\n\n # bidirectional LSTM on top of embedding layer\n self.word_reprojection_map = torch.nn.Linear(\n self.length_of_all_token_embeddings, self.embeddings_dimension\n )\n self.rnn = torch.nn.GRU(\n self.embeddings_dimension,\n hidden_size,\n num_layers=rnn_layers,\n bidirectional=self.bidirectional,\n )\n\n # dropouts\n if locked_dropout > 0.0:\n self.dropout: torch.nn.Module = LockedDropout(locked_dropout)\n else:\n self.dropout = torch.nn.Dropout(dropout)\n\n self.use_word_dropout: bool = word_dropout > 0.0\n if self.use_word_dropout:\n self.word_dropout = WordDropout(word_dropout)\n\n torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)\n\n self.to(flair.device)\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def embed(self, sentences: Union[List[Sentence], Sentence]):\n \"\"\"Add embeddings to all sentences in the given list of sentences. If embeddings are already added, update\n only if embeddings are non-static.\"\"\"\n\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n self.rnn.zero_grad()\n\n sentences.sort(key=lambda x: len(x), reverse=True)\n\n self.embeddings.embed(sentences)\n\n # first, sort sentences by number of tokens\n longest_token_sequence_in_batch: int = len(sentences[0])\n\n all_sentence_tensors = []\n lengths: List[int] = []\n\n # go through each sentence in batch\n for i, sentence in enumerate(sentences):\n\n lengths.append(len(sentence.tokens))\n\n word_embeddings = []\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n word_embeddings.append(token.get_embedding().unsqueeze(0))\n\n # PADDING: pad shorter sentences out\n for add in range(longest_token_sequence_in_batch - len(sentence.tokens)):\n word_embeddings.append(\n torch.zeros(\n self.length_of_all_token_embeddings, dtype=torch.float\n ).unsqueeze(0)\n )\n\n word_embeddings_tensor = torch.cat(word_embeddings, 0).to(flair.device)\n\n sentence_states = word_embeddings_tensor\n\n # ADD TO SENTENCE LIST: add the representation\n all_sentence_tensors.append(sentence_states.unsqueeze(1))\n\n # --------------------------------------------------------------------\n # GET REPRESENTATION FOR ENTIRE BATCH\n # --------------------------------------------------------------------\n sentence_tensor = torch.cat(all_sentence_tensors, 1)\n\n # --------------------------------------------------------------------\n # FF PART\n # --------------------------------------------------------------------\n # use word dropout if set\n if self.use_word_dropout:\n sentence_tensor = self.word_dropout(sentence_tensor)\n\n if self.reproject_words:\n sentence_tensor = self.word_reprojection_map(sentence_tensor)\n\n sentence_tensor = self.dropout(sentence_tensor)\n\n packed = torch.nn.utils.rnn.pack_padded_sequence(sentence_tensor, lengths)\n\n self.rnn.flatten_parameters()\n\n lstm_out, hidden = self.rnn(packed)\n\n outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(lstm_out)\n\n outputs = self.dropout(outputs)\n\n # --------------------------------------------------------------------\n # EXTRACT EMBEDDINGS FROM LSTM\n # --------------------------------------------------------------------\n for sentence_no, length in enumerate(lengths):\n last_rep = outputs[length - 1, sentence_no]\n\n embedding = last_rep\n if self.bidirectional:\n first_rep = outputs[0, sentence_no]\n embedding = torch.cat([first_rep, last_rep], 0)\n\n sentence = sentences[sentence_no]\n sentence.set_embedding(self.name, embedding)\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n pass\n\n\nclass DocumentLMEmbeddings(DocumentEmbeddings):\n def __init__(self, flair_embeddings: List[FlairEmbeddings], detach: bool = True):\n super().__init__()\n\n self.embeddings = flair_embeddings\n self.name = \"document_lm\"\n\n self.static_embeddings = detach\n self.detach = detach\n\n self._embedding_length: int = sum(\n embedding.embedding_length for embedding in flair_embeddings\n )\n\n @property\n def embedding_length(self) -> int:\n return self._embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n for embedding in self.embeddings:\n embedding.embed(sentences)\n\n # iterate over sentences\n for sentence in sentences:\n\n # if its a forward LM, take last state\n if embedding.is_forward_lm:\n sentence.set_embedding(\n embedding.name,\n sentence[len(sentence) - 1]._embeddings[embedding.name],\n )\n else:\n sentence.set_embedding(\n embedding.name, sentence[0]._embeddings[embedding.name]\n )\n\n return sentences\n\n\nclass NILCEmbeddings(WordEmbeddings):\n def __init__(self, embeddings: str, model: str = \"skip\", size: int = 100):\n \"\"\"\n Initializes portuguese classic word embeddings trained by NILC Lab (http://www.nilc.icmc.usp.br/embeddings).\n Constructor downloads required files if not there.\n :param embeddings: one of: 'fasttext', 'glove', 'wang2vec' or 'word2vec'\n :param model: one of: 'skip' or 'cbow'. This is not applicable to glove.\n :param size: one of: 50, 100, 300, 600 or 1000.\n \"\"\"\n\n base_path = \"http://143.107.183.175:22980/download.php?file=embeddings/\"\n\n cache_dir = Path(\"embeddings\") / embeddings.lower()\n\n # GLOVE embeddings\n if embeddings.lower() == \"glove\":\n cached_path(\n f\"{base_path}{embeddings}/{embeddings}_s{size}.zip\", cache_dir=cache_dir\n )\n embeddings = cached_path(\n f\"{base_path}{embeddings}/{embeddings}_s{size}.zip\", cache_dir=cache_dir\n )\n\n elif embeddings.lower() in [\"fasttext\", \"wang2vec\", \"word2vec\"]:\n cached_path(\n f\"{base_path}{embeddings}/{model}_s{size}.zip\", cache_dir=cache_dir\n )\n embeddings = cached_path(\n f\"{base_path}{embeddings}/{model}_s{size}.zip\", cache_dir=cache_dir\n )\n\n elif not Path(embeddings).exists():\n raise ValueError(\n f'The given embeddings \"{embeddings}\" is not available or is not a valid path.'\n )\n\n self.name: str = str(embeddings)\n self.static_embeddings = True\n\n log.info(\"Reading embeddings from %s\" % embeddings)\n self.precomputed_word_embeddings = gensim.models.KeyedVectors.load_word2vec_format(\n open_inside_zip(str(embeddings), cache_dir=cache_dir)\n )\n\n self.__embedding_length: int = self.precomputed_word_embeddings.vector_size\n super(TokenEmbeddings, self).__init__()\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def __str__(self):\n return self.name\n\n\ndef replace_with_language_code(string: str):\n string = string.replace(\"arabic-\", \"ar-\")\n string = string.replace(\"basque-\", \"eu-\")\n string = string.replace(\"bulgarian-\", \"bg-\")\n string = string.replace(\"croatian-\", \"hr-\")\n string = string.replace(\"czech-\", \"cs-\")\n string = string.replace(\"danish-\", \"da-\")\n string = string.replace(\"dutch-\", \"nl-\")\n string = string.replace(\"farsi-\", \"fa-\")\n string = string.replace(\"persian-\", \"fa-\")\n string = string.replace(\"finnish-\", \"fi-\")\n string = string.replace(\"french-\", \"fr-\")\n string = string.replace(\"german-\", \"de-\")\n string = string.replace(\"hebrew-\", \"he-\")\n string = string.replace(\"hindi-\", \"hi-\")\n string = string.replace(\"indonesian-\", \"id-\")\n string = string.replace(\"italian-\", \"it-\")\n string = string.replace(\"japanese-\", \"ja-\")\n string = string.replace(\"norwegian-\", \"no\")\n string = string.replace(\"polish-\", \"pl-\")\n string = string.replace(\"portuguese-\", \"pt-\")\n string = string.replace(\"slovenian-\", \"sl-\")\n string = string.replace(\"spanish-\", \"es-\")\n string = string.replace(\"swedish-\", \"sv-\")\n return string\n" ]
[ [ "torch.mean", "torch.LongTensor", "torch.nn.Dropout", "torch.nn.LSTM", "torch.cat", "torch.zeros", "torch.nn.GRU", "torch.eye", "torch.nn.utils.rnn.pack_padded_sequence", "torch.tensor", "torch.nn.Linear", "torch.nn.utils.rnn.pad_packed_sequence", "torch.no_grad", "torch.FloatTensor", "torch.nn.init.xavier_uniform_", "torch.nn.ReLU", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mavismonica/pandas
[ "dbdc55c9d59f25589d58cc60247af193f06c3c66", "dbdc55c9d59f25589d58cc60247af193f06c3c66", "dbdc55c9d59f25589d58cc60247af193f06c3c66", "dbdc55c9d59f25589d58cc60247af193f06c3c66" ]
[ "pandas/tests/indexing/test_indexing.py", "pandas/tests/frame/methods/test_drop_duplicates.py", "asv_bench/benchmarks/algorithms.py", "pandas/tests/io/json/test_ujson.py" ]
[ "\"\"\" test fancy indexing & misc \"\"\"\n\nfrom datetime import datetime\nimport re\nimport weakref\n\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.common import is_float_dtype, is_integer_dtype\n\nimport pandas as pd\nfrom pandas import DataFrame, Index, NaT, Series\nimport pandas._testing as tm\nfrom pandas.core.indexing import maybe_numeric_slice, non_reducing_slice\nfrom pandas.tests.indexing.common import _mklbl\n\nfrom .test_floats import gen_obj\n\n\ndef getitem(x):\n return x\n\n\ndef setitem(x):\n return x\n\n\ndef loc(x):\n return x.loc\n\n\ndef iloc(x):\n return x.iloc\n\n\n# ------------------------------------------------------------------------\n# Indexing test cases\n\n\nclass TestFancy:\n \"\"\" pure get/set item & fancy indexing \"\"\"\n\n def test_setitem_ndarray_1d(self):\n # GH5508\n\n # len of indexer vs length of the 1d ndarray\n df = DataFrame(index=Index(np.arange(1, 11)))\n df[\"foo\"] = np.zeros(10, dtype=np.float64)\n df[\"bar\"] = np.zeros(10, dtype=complex)\n\n # invalid\n msg = \"Must have equal len keys and value when setting with an iterable\"\n with pytest.raises(ValueError, match=msg):\n df.loc[df.index[2:5], \"bar\"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])\n\n # valid\n df.loc[df.index[2:6], \"bar\"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])\n\n result = df.loc[df.index[2:6], \"bar\"]\n expected = Series(\n [2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6], name=\"bar\"\n )\n tm.assert_series_equal(result, expected)\n\n # dtype getting changed?\n df = DataFrame(index=Index(np.arange(1, 11)))\n df[\"foo\"] = np.zeros(10, dtype=np.float64)\n df[\"bar\"] = np.zeros(10, dtype=complex)\n\n msg = \"Must have equal len keys and value when setting with an iterable\"\n with pytest.raises(ValueError, match=msg):\n df[2:5] = np.arange(1, 4) * 1j\n\n @pytest.mark.parametrize(\"idxr\", [getitem, loc, iloc])\n def test_getitem_ndarray_3d(self, index, frame_or_series, idxr):\n # GH 25567\n obj = gen_obj(frame_or_series, index)\n idxr = idxr(obj)\n nd3 = np.random.randint(5, size=(2, 2, 2))\n\n msg = \"|\".join(\n [\n r\"Buffer has wrong number of dimensions \\(expected 1, got 3\\)\",\n \"Cannot index with multidimensional key\",\n r\"Wrong number of dimensions. values.ndim != ndim \\[3 != 1\\]\",\n \"Index data must be 1-dimensional\",\n \"positional indexers are out-of-bounds\",\n \"Indexing a MultiIndex with a multidimensional key is not implemented\",\n ]\n )\n\n potential_errors = (IndexError, ValueError, NotImplementedError)\n with pytest.raises(potential_errors, match=msg):\n with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):\n idxr[nd3]\n\n @pytest.mark.parametrize(\"indexer\", [setitem, loc, iloc])\n def test_setitem_ndarray_3d(self, index, frame_or_series, indexer):\n # GH 25567\n obj = gen_obj(frame_or_series, index)\n idxr = indexer(obj)\n nd3 = np.random.randint(5, size=(2, 2, 2))\n\n if indexer.__name__ == \"iloc\":\n err = ValueError\n msg = f\"Cannot set values with ndim > {obj.ndim}\"\n elif (\n isinstance(index, pd.IntervalIndex)\n and indexer.__name__ == \"setitem\"\n and obj.ndim == 1\n ):\n err = AttributeError\n msg = (\n \"'pandas._libs.interval.IntervalTree' object has no attribute 'get_loc'\"\n )\n else:\n err = ValueError\n msg = r\"Buffer has wrong number of dimensions \\(expected 1, got 3\\)|\"\n\n with pytest.raises(err, match=msg):\n idxr[nd3] = 0\n\n def test_inf_upcast(self):\n # GH 16957\n # We should be able to use np.inf as a key\n # np.inf should cause an index to convert to float\n\n # Test with np.inf in rows\n df = DataFrame(columns=[0])\n df.loc[1] = 1\n df.loc[2] = 2\n df.loc[np.inf] = 3\n\n # make sure we can look up the value\n assert df.loc[np.inf, 0] == 3\n\n result = df.index\n expected = pd.Float64Index([1, 2, np.inf])\n tm.assert_index_equal(result, expected)\n\n # Test with np.inf in columns\n df = DataFrame()\n df.loc[0, 0] = 1\n df.loc[1, 1] = 2\n df.loc[0, np.inf] = 3\n\n result = df.columns\n expected = pd.Float64Index([0, 1, np.inf])\n tm.assert_index_equal(result, expected)\n\n def test_setitem_dtype_upcast(self):\n\n # GH3216\n df = DataFrame([{\"a\": 1}, {\"a\": 3, \"b\": 2}])\n df[\"c\"] = np.nan\n assert df[\"c\"].dtype == np.float64\n\n df.loc[0, \"c\"] = \"foo\"\n expected = DataFrame(\n [{\"a\": 1, \"b\": np.nan, \"c\": \"foo\"}, {\"a\": 3, \"b\": 2, \"c\": np.nan}]\n )\n tm.assert_frame_equal(df, expected)\n\n # GH10280\n df = DataFrame(\n np.arange(6, dtype=\"int64\").reshape(2, 3),\n index=list(\"ab\"),\n columns=[\"foo\", \"bar\", \"baz\"],\n )\n\n for val in [3.14, \"wxyz\"]:\n left = df.copy()\n left.loc[\"a\", \"bar\"] = val\n right = DataFrame(\n [[0, val, 2], [3, 4, 5]],\n index=list(\"ab\"),\n columns=[\"foo\", \"bar\", \"baz\"],\n )\n\n tm.assert_frame_equal(left, right)\n assert is_integer_dtype(left[\"foo\"])\n assert is_integer_dtype(left[\"baz\"])\n\n left = DataFrame(\n np.arange(6, dtype=\"int64\").reshape(2, 3) / 10.0,\n index=list(\"ab\"),\n columns=[\"foo\", \"bar\", \"baz\"],\n )\n left.loc[\"a\", \"bar\"] = \"wxyz\"\n\n right = DataFrame(\n [[0, \"wxyz\", 0.2], [0.3, 0.4, 0.5]],\n index=list(\"ab\"),\n columns=[\"foo\", \"bar\", \"baz\"],\n )\n\n tm.assert_frame_equal(left, right)\n assert is_float_dtype(left[\"foo\"])\n assert is_float_dtype(left[\"baz\"])\n\n def test_dups_fancy_indexing(self):\n\n # GH 3455\n\n df = tm.makeCustomDataframe(10, 3)\n df.columns = [\"a\", \"a\", \"b\"]\n result = df[[\"b\", \"a\"]].columns\n expected = Index([\"b\", \"a\", \"a\"])\n tm.assert_index_equal(result, expected)\n\n # across dtypes\n df = DataFrame([[1, 2, 1.0, 2.0, 3.0, \"foo\", \"bar\"]], columns=list(\"aaaaaaa\"))\n df.head()\n str(df)\n result = DataFrame([[1, 2, 1.0, 2.0, 3.0, \"foo\", \"bar\"]])\n result.columns = list(\"aaaaaaa\")\n\n # TODO(wesm): unused?\n df_v = df.iloc[:, 4] # noqa\n res_v = result.iloc[:, 4] # noqa\n\n tm.assert_frame_equal(df, result)\n\n # GH 3561, dups not in selected order\n df = DataFrame(\n {\"test\": [5, 7, 9, 11], \"test1\": [4.0, 5, 6, 7], \"other\": list(\"abcd\")},\n index=[\"A\", \"A\", \"B\", \"C\"],\n )\n rows = [\"C\", \"B\"]\n expected = DataFrame(\n {\"test\": [11, 9], \"test1\": [7.0, 6], \"other\": [\"d\", \"c\"]}, index=rows\n )\n result = df.loc[rows]\n tm.assert_frame_equal(result, expected)\n\n result = df.loc[Index(rows)]\n tm.assert_frame_equal(result, expected)\n\n rows = [\"C\", \"B\", \"E\"]\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n df.loc[rows]\n\n # see GH5553, make sure we use the right indexer\n rows = [\"F\", \"G\", \"H\", \"C\", \"B\", \"E\"]\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n df.loc[rows]\n\n # List containing only missing label\n dfnu = DataFrame(np.random.randn(5, 3), index=list(\"AABCD\"))\n with pytest.raises(\n KeyError,\n match=re.escape(\n \"\\\"None of [Index(['E'], dtype='object')] are in the [index]\\\"\"\n ),\n ):\n dfnu.loc[[\"E\"]]\n\n # ToDo: check_index_type can be True after GH 11497\n\n # GH 4619; duplicate indexer with missing label\n df = DataFrame({\"A\": [0, 1, 2]})\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n df.loc[[0, 8, 0]]\n\n df = DataFrame({\"A\": list(\"abc\")})\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n df.loc[[0, 8, 0]]\n\n # non unique with non unique selector\n df = DataFrame({\"test\": [5, 7, 9, 11]}, index=[\"A\", \"A\", \"B\", \"C\"])\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n df.loc[[\"A\", \"A\", \"E\"]]\n\n def test_dups_fancy_indexing2(self):\n # GH 5835\n # dups on index and missing values\n df = DataFrame(np.random.randn(5, 5), columns=[\"A\", \"B\", \"B\", \"B\", \"A\"])\n\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n df.loc[:, [\"A\", \"B\", \"C\"]]\n\n # GH 6504, multi-axis indexing\n df = DataFrame(\n np.random.randn(9, 2), index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=[\"a\", \"b\"]\n )\n\n expected = df.iloc[0:6]\n result = df.loc[[1, 2]]\n tm.assert_frame_equal(result, expected)\n\n expected = df\n result = df.loc[:, [\"a\", \"b\"]]\n tm.assert_frame_equal(result, expected)\n\n expected = df.iloc[0:6, :]\n result = df.loc[[1, 2], [\"a\", \"b\"]]\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"case\", [getitem, loc])\n def test_duplicate_int_indexing(self, case):\n # GH 17347\n s = Series(range(3), index=[1, 1, 3])\n expected = s[1]\n result = case(s)[[1]]\n tm.assert_series_equal(result, expected)\n\n def test_indexing_mixed_frame_bug(self):\n\n # GH3492\n df = DataFrame(\n {\"a\": {1: \"aaa\", 2: \"bbb\", 3: \"ccc\"}, \"b\": {1: 111, 2: 222, 3: 333}}\n )\n\n # this works, new column is created correctly\n df[\"test\"] = df[\"a\"].apply(lambda x: \"_\" if x == \"aaa\" else x)\n\n # this does not work, ie column test is not changed\n idx = df[\"test\"] == \"_\"\n temp = df.loc[idx, \"a\"].apply(lambda x: \"-----\" if x == \"aaa\" else x)\n df.loc[idx, \"test\"] = temp\n assert df.iloc[0, 2] == \"-----\"\n\n def test_multitype_list_index_access(self):\n # GH 10610\n df = DataFrame(np.random.random((10, 5)), columns=[\"a\"] + [20, 21, 22, 23])\n\n with pytest.raises(KeyError, match=re.escape(\"'[-8, 26] not in index'\")):\n df[[22, 26, -8]]\n assert df[21].shape[0] == df.shape[0]\n\n def test_set_index_nan(self):\n\n # GH 3586\n df = DataFrame(\n {\n \"PRuid\": {\n 17: \"nonQC\",\n 18: \"nonQC\",\n 19: \"nonQC\",\n 20: \"10\",\n 21: \"11\",\n 22: \"12\",\n 23: \"13\",\n 24: \"24\",\n 25: \"35\",\n 26: \"46\",\n 27: \"47\",\n 28: \"48\",\n 29: \"59\",\n 30: \"10\",\n },\n \"QC\": {\n 17: 0.0,\n 18: 0.0,\n 19: 0.0,\n 20: np.nan,\n 21: np.nan,\n 22: np.nan,\n 23: np.nan,\n 24: 1.0,\n 25: np.nan,\n 26: np.nan,\n 27: np.nan,\n 28: np.nan,\n 29: np.nan,\n 30: np.nan,\n },\n \"data\": {\n 17: 7.9544899999999998,\n 18: 8.0142609999999994,\n 19: 7.8591520000000008,\n 20: 0.86140349999999999,\n 21: 0.87853110000000001,\n 22: 0.8427041999999999,\n 23: 0.78587700000000005,\n 24: 0.73062459999999996,\n 25: 0.81668560000000001,\n 26: 0.81927080000000008,\n 27: 0.80705009999999999,\n 28: 0.81440240000000008,\n 29: 0.80140849999999997,\n 30: 0.81307740000000006,\n },\n \"year\": {\n 17: 2006,\n 18: 2007,\n 19: 2008,\n 20: 1985,\n 21: 1985,\n 22: 1985,\n 23: 1985,\n 24: 1985,\n 25: 1985,\n 26: 1985,\n 27: 1985,\n 28: 1985,\n 29: 1985,\n 30: 1986,\n },\n }\n ).reset_index()\n\n result = (\n df.set_index([\"year\", \"PRuid\", \"QC\"])\n .reset_index()\n .reindex(columns=df.columns)\n )\n tm.assert_frame_equal(result, df)\n\n def test_multi_assign(self):\n\n # GH 3626, an assignment of a sub-df to a df\n df = DataFrame(\n {\n \"FC\": [\"a\", \"b\", \"a\", \"b\", \"a\", \"b\"],\n \"PF\": [0, 0, 0, 0, 1, 1],\n \"col1\": list(range(6)),\n \"col2\": list(range(6, 12)),\n }\n )\n df.iloc[1, 0] = np.nan\n df2 = df.copy()\n\n mask = ~df2.FC.isna()\n cols = [\"col1\", \"col2\"]\n\n dft = df2 * 2\n dft.iloc[3, 3] = np.nan\n\n expected = DataFrame(\n {\n \"FC\": [\"a\", np.nan, \"a\", \"b\", \"a\", \"b\"],\n \"PF\": [0, 0, 0, 0, 1, 1],\n \"col1\": Series([0, 1, 4, 6, 8, 10]),\n \"col2\": [12, 7, 16, np.nan, 20, 22],\n }\n )\n\n # frame on rhs\n df2.loc[mask, cols] = dft.loc[mask, cols]\n tm.assert_frame_equal(df2, expected)\n\n df2.loc[mask, cols] = dft.loc[mask, cols]\n tm.assert_frame_equal(df2, expected)\n\n # with an ndarray on rhs\n # coerces to float64 because values has float64 dtype\n # GH 14001\n expected = DataFrame(\n {\n \"FC\": [\"a\", np.nan, \"a\", \"b\", \"a\", \"b\"],\n \"PF\": [0, 0, 0, 0, 1, 1],\n \"col1\": [0.0, 1.0, 4.0, 6.0, 8.0, 10.0],\n \"col2\": [12, 7, 16, np.nan, 20, 22],\n }\n )\n df2 = df.copy()\n df2.loc[mask, cols] = dft.loc[mask, cols].values\n tm.assert_frame_equal(df2, expected)\n df2.loc[mask, cols] = dft.loc[mask, cols].values\n tm.assert_frame_equal(df2, expected)\n\n # broadcasting on the rhs is required\n df = DataFrame(\n {\n \"A\": [1, 2, 0, 0, 0],\n \"B\": [0, 0, 0, 10, 11],\n \"C\": [0, 0, 0, 10, 11],\n \"D\": [3, 4, 5, 6, 7],\n }\n )\n\n expected = df.copy()\n mask = expected[\"A\"] == 0\n for col in [\"A\", \"B\"]:\n expected.loc[mask, col] = df[\"D\"]\n\n df.loc[df[\"A\"] == 0, [\"A\", \"B\"]] = df[\"D\"]\n tm.assert_frame_equal(df, expected)\n\n def test_setitem_list(self):\n\n # GH 6043\n # iloc with a list\n df = DataFrame(index=[0, 1], columns=[0])\n df.iloc[1, 0] = [1, 2, 3]\n df.iloc[1, 0] = [1, 2]\n\n result = DataFrame(index=[0, 1], columns=[0])\n result.iloc[1, 0] = [1, 2]\n\n tm.assert_frame_equal(result, df)\n\n # iloc with an object\n class TO:\n def __init__(self, value):\n self.value = value\n\n def __str__(self) -> str:\n return f\"[{self.value}]\"\n\n __repr__ = __str__\n\n def __eq__(self, other) -> bool:\n return self.value == other.value\n\n def view(self):\n return self\n\n df = DataFrame(index=[0, 1], columns=[0])\n df.iloc[1, 0] = TO(1)\n df.iloc[1, 0] = TO(2)\n\n result = DataFrame(index=[0, 1], columns=[0])\n result.iloc[1, 0] = TO(2)\n\n tm.assert_frame_equal(result, df)\n\n # remains object dtype even after setting it back\n df = DataFrame(index=[0, 1], columns=[0])\n df.iloc[1, 0] = TO(1)\n df.iloc[1, 0] = np.nan\n result = DataFrame(index=[0, 1], columns=[0])\n\n tm.assert_frame_equal(result, df)\n\n def test_string_slice(self):\n # GH 14424\n # string indexing against datetimelike with object\n # dtype should properly raises KeyError\n df = DataFrame([1], Index([pd.Timestamp(\"2011-01-01\")], dtype=object))\n assert df.index._is_all_dates\n with pytest.raises(KeyError, match=\"'2011'\"):\n df[\"2011\"]\n\n with pytest.raises(KeyError, match=\"'2011'\"):\n with tm.assert_produces_warning(FutureWarning):\n # This does an is_all_dates check\n df.loc[\"2011\", 0]\n\n df = DataFrame()\n assert not df.index._is_all_dates\n with pytest.raises(KeyError, match=\"'2011'\"):\n df[\"2011\"]\n\n with pytest.raises(KeyError, match=\"'2011'\"):\n df.loc[\"2011\", 0]\n\n def test_astype_assignment(self):\n\n # GH4312 (iloc)\n df_orig = DataFrame(\n [[\"1\", \"2\", \"3\", \".4\", 5, 6.0, \"foo\"]], columns=list(\"ABCDEFG\")\n )\n\n df = df_orig.copy()\n df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)\n expected = DataFrame(\n [[1, 2, \"3\", \".4\", 5, 6.0, \"foo\"]], columns=list(\"ABCDEFG\")\n )\n tm.assert_frame_equal(df, expected)\n\n df = df_orig.copy()\n df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)\n expected = DataFrame(\n [[1, 2, \"3\", \".4\", 5, 6.0, \"foo\"]], columns=list(\"ABCDEFG\")\n )\n tm.assert_frame_equal(df, expected)\n\n # GH5702 (loc)\n df = df_orig.copy()\n df.loc[:, \"A\"] = df.loc[:, \"A\"].astype(np.int64)\n expected = DataFrame(\n [[1, \"2\", \"3\", \".4\", 5, 6.0, \"foo\"]], columns=list(\"ABCDEFG\")\n )\n tm.assert_frame_equal(df, expected)\n\n df = df_orig.copy()\n df.loc[:, [\"B\", \"C\"]] = df.loc[:, [\"B\", \"C\"]].astype(np.int64)\n expected = DataFrame(\n [[\"1\", 2, 3, \".4\", 5, 6.0, \"foo\"]], columns=list(\"ABCDEFG\")\n )\n tm.assert_frame_equal(df, expected)\n\n # full replacements / no nans\n df = DataFrame({\"A\": [1.0, 2.0, 3.0, 4.0]})\n df.iloc[:, 0] = df[\"A\"].astype(np.int64)\n expected = DataFrame({\"A\": [1, 2, 3, 4]})\n tm.assert_frame_equal(df, expected)\n\n df = DataFrame({\"A\": [1.0, 2.0, 3.0, 4.0]})\n df.loc[:, \"A\"] = df[\"A\"].astype(np.int64)\n expected = DataFrame({\"A\": [1, 2, 3, 4]})\n tm.assert_frame_equal(df, expected)\n\n @pytest.mark.parametrize(\"indexer\", [getitem, loc])\n def test_index_type_coercion(self, indexer):\n\n # GH 11836\n # if we have an index type and set it with something that looks\n # to numpy like the same, but is actually, not\n # (e.g. setting with a float or string '0')\n # then we need to coerce to object\n\n # integer indexes\n for s in [Series(range(5)), Series(range(5), index=range(1, 6))]:\n\n assert s.index.is_integer()\n\n s2 = s.copy()\n indexer(s2)[0.1] = 0\n assert s2.index.is_floating()\n assert indexer(s2)[0.1] == 0\n\n s2 = s.copy()\n indexer(s2)[0.0] = 0\n exp = s.index\n if 0 not in s:\n exp = Index(s.index.tolist() + [0])\n tm.assert_index_equal(s2.index, exp)\n\n s2 = s.copy()\n indexer(s2)[\"0\"] = 0\n assert s2.index.is_object()\n\n for s in [Series(range(5), index=np.arange(5.0))]:\n\n assert s.index.is_floating()\n\n s2 = s.copy()\n indexer(s2)[0.1] = 0\n assert s2.index.is_floating()\n assert indexer(s2)[0.1] == 0\n\n s2 = s.copy()\n indexer(s2)[0.0] = 0\n tm.assert_index_equal(s2.index, s.index)\n\n s2 = s.copy()\n indexer(s2)[\"0\"] = 0\n assert s2.index.is_object()\n\n\nclass TestMisc:\n def test_float_index_to_mixed(self):\n df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})\n df[\"a\"] = 10\n tm.assert_frame_equal(\n DataFrame({0.0: df[0.0], 1.0: df[1.0], \"a\": [10] * 10}), df\n )\n\n def test_float_index_non_scalar_assignment(self):\n df = DataFrame({\"a\": [1, 2, 3], \"b\": [3, 4, 5]}, index=[1.0, 2.0, 3.0])\n df.loc[df.index[:2]] = 1\n expected = DataFrame({\"a\": [1, 1, 3], \"b\": [1, 1, 5]}, index=df.index)\n tm.assert_frame_equal(expected, df)\n\n df = DataFrame({\"a\": [1, 2, 3], \"b\": [3, 4, 5]}, index=[1.0, 2.0, 3.0])\n df2 = df.copy()\n df.loc[df.index] = df.loc[df.index]\n tm.assert_frame_equal(df, df2)\n\n def test_float_index_at_iat(self):\n s = Series([1, 2, 3], index=[0.1, 0.2, 0.3])\n for el, item in s.items():\n assert s.at[el] == item\n for i in range(len(s)):\n assert s.iat[i] == i + 1\n\n def test_rhs_alignment(self):\n # GH8258, tests that both rows & columns are aligned to what is\n # assigned to. covers both uniform data-type & multi-type cases\n def run_tests(df, rhs, right_loc, right_iloc):\n # label, index, slice\n lbl_one, idx_one, slice_one = list(\"bcd\"), [1, 2, 3], slice(1, 4)\n lbl_two, idx_two, slice_two = [\"joe\", \"jolie\"], [1, 2], slice(1, 3)\n\n left = df.copy()\n left.loc[lbl_one, lbl_two] = rhs\n tm.assert_frame_equal(left, right_loc)\n\n left = df.copy()\n left.iloc[idx_one, idx_two] = rhs\n tm.assert_frame_equal(left, right_iloc)\n\n left = df.copy()\n left.iloc[slice_one, slice_two] = rhs\n tm.assert_frame_equal(left, right_iloc)\n\n xs = np.arange(20).reshape(5, 4)\n cols = [\"jim\", \"joe\", \"jolie\", \"joline\"]\n df = DataFrame(xs, columns=cols, index=list(\"abcde\"), dtype=\"int64\")\n\n # right hand side; permute the indices and multiplpy by -2\n rhs = -2 * df.iloc[3:0:-1, 2:0:-1]\n\n # expected `right` result; just multiply by -2\n right_iloc = df.copy()\n right_iloc[\"joe\"] = [1, 14, 10, 6, 17]\n right_iloc[\"jolie\"] = [2, 13, 9, 5, 18]\n right_iloc.iloc[1:4, 1:3] *= -2\n right_loc = df.copy()\n right_loc.iloc[1:4, 1:3] *= -2\n\n # run tests with uniform dtypes\n run_tests(df, rhs, right_loc, right_iloc)\n\n # make frames multi-type & re-run tests\n for frame in [df, rhs, right_loc, right_iloc]:\n frame[\"joe\"] = frame[\"joe\"].astype(\"float64\")\n frame[\"jolie\"] = frame[\"jolie\"].map(\"@{}\".format)\n right_iloc[\"joe\"] = [1.0, \"@-28\", \"@-20\", \"@-12\", 17.0]\n right_iloc[\"jolie\"] = [\"@2\", -26.0, -18.0, -10.0, \"@18\"]\n run_tests(df, rhs, right_loc, right_iloc)\n\n def test_str_label_slicing_with_negative_step(self):\n SLC = pd.IndexSlice\n\n def assert_slices_equivalent(l_slc, i_slc):\n tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])\n\n if not idx.is_integer:\n # For integer indices, .loc and plain getitem are position-based.\n tm.assert_series_equal(s[l_slc], s.iloc[i_slc])\n tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])\n\n for idx in [_mklbl(\"A\", 20), np.arange(20) + 100, np.linspace(100, 150, 20)]:\n idx = Index(idx)\n s = Series(np.arange(20), index=idx)\n assert_slices_equivalent(SLC[idx[9] :: -1], SLC[9::-1])\n assert_slices_equivalent(SLC[: idx[9] : -1], SLC[:8:-1])\n assert_slices_equivalent(SLC[idx[13] : idx[9] : -1], SLC[13:8:-1])\n assert_slices_equivalent(SLC[idx[9] : idx[13] : -1], SLC[:0])\n\n def test_slice_with_zero_step_raises(self):\n s = Series(np.arange(20), index=_mklbl(\"A\", 20))\n with pytest.raises(ValueError, match=\"slice step cannot be zero\"):\n s[::0]\n with pytest.raises(ValueError, match=\"slice step cannot be zero\"):\n s.loc[::0]\n\n def test_indexing_assignment_dict_already_exists(self):\n df = DataFrame({\"x\": [1, 2, 6], \"y\": [2, 2, 8], \"z\": [-5, 0, 5]}).set_index(\"z\")\n expected = df.copy()\n rhs = {\"x\": 9, \"y\": 99}\n df.loc[5] = rhs\n expected.loc[5] = [9, 99]\n tm.assert_frame_equal(df, expected)\n\n def test_indexing_dtypes_on_empty(self):\n # Check that .iloc returns correct dtypes GH9983\n df = DataFrame({\"a\": [1, 2, 3], \"b\": [\"b\", \"b2\", \"b3\"]})\n df2 = df.iloc[[], :]\n\n assert df2.loc[:, \"a\"].dtype == np.int64\n tm.assert_series_equal(df2.loc[:, \"a\"], df2.iloc[:, 0])\n\n @pytest.mark.parametrize(\"size\", [5, 999999, 1000000])\n def test_range_in_series_indexing(self, size):\n # range can cause an indexing error\n # GH 11652\n s = Series(index=range(size), dtype=np.float64)\n s.loc[range(1)] = 42\n tm.assert_series_equal(s.loc[range(1)], Series(42.0, index=[0]))\n\n s.loc[range(2)] = 43\n tm.assert_series_equal(s.loc[range(2)], Series(43.0, index=[0, 1]))\n\n @pytest.mark.parametrize(\n \"slc\",\n [\n pd.IndexSlice[:, :],\n pd.IndexSlice[:, 1],\n pd.IndexSlice[1, :],\n pd.IndexSlice[[1], [1]],\n pd.IndexSlice[1, [1]],\n pd.IndexSlice[[1], 1],\n pd.IndexSlice[1],\n pd.IndexSlice[1, 1],\n slice(None, None, None),\n [0, 1],\n np.array([0, 1]),\n Series([0, 1]),\n ],\n )\n def test_non_reducing_slice(self, slc):\n df = DataFrame([[0, 1], [2, 3]])\n\n tslice_ = non_reducing_slice(slc)\n assert isinstance(df.loc[tslice_], DataFrame)\n\n def test_list_slice(self):\n # like dataframe getitem\n slices = [[\"A\"], Series([\"A\"]), np.array([\"A\"])]\n df = DataFrame({\"A\": [1, 2], \"B\": [3, 4]}, index=[\"A\", \"B\"])\n expected = pd.IndexSlice[:, [\"A\"]]\n for subset in slices:\n result = non_reducing_slice(subset)\n tm.assert_frame_equal(df.loc[result], df.loc[expected])\n\n def test_maybe_numeric_slice(self):\n df = DataFrame({\"A\": [1, 2], \"B\": [\"c\", \"d\"], \"C\": [True, False]})\n result = maybe_numeric_slice(df, slice_=None)\n expected = pd.IndexSlice[:, [\"A\"]]\n assert result == expected\n\n result = maybe_numeric_slice(df, None, include_bool=True)\n expected = pd.IndexSlice[:, [\"A\", \"C\"]]\n assert all(result[1] == expected[1])\n result = maybe_numeric_slice(df, [1])\n expected = [1]\n assert result == expected\n\n def test_partial_boolean_frame_indexing(self):\n # GH 17170\n df = DataFrame(\n np.arange(9.0).reshape(3, 3), index=list(\"abc\"), columns=list(\"ABC\")\n )\n index_df = DataFrame(1, index=list(\"ab\"), columns=list(\"AB\"))\n result = df[index_df.notnull()]\n expected = DataFrame(\n np.array([[0.0, 1.0, np.nan], [3.0, 4.0, np.nan], [np.nan] * 3]),\n index=list(\"abc\"),\n columns=list(\"ABC\"),\n )\n tm.assert_frame_equal(result, expected)\n\n def test_no_reference_cycle(self):\n df = DataFrame({\"a\": [0, 1], \"b\": [2, 3]})\n for name in (\"loc\", \"iloc\", \"at\", \"iat\"):\n getattr(df, name)\n wr = weakref.ref(df)\n del df\n assert wr() is None\n\n def test_label_indexing_on_nan(self):\n # GH 32431\n df = Series([1, \"{1,2}\", 1, None])\n vc = df.value_counts(dropna=False)\n result1 = vc.loc[np.nan]\n result2 = vc[np.nan]\n\n expected = 1\n assert result1 == expected\n assert result2 == expected\n\n\nclass TestSeriesNoneCoercion:\n EXPECTED_RESULTS = [\n # For numeric series, we should coerce to NaN.\n ([1, 2, 3], [np.nan, 2, 3]),\n ([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),\n # For datetime series, we should coerce to NaT.\n (\n [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],\n [NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)],\n ),\n # For objects, we should preserve the None value.\n ([\"foo\", \"bar\", \"baz\"], [None, \"bar\", \"baz\"]),\n ]\n\n @pytest.mark.parametrize(\"start_data,expected_result\", EXPECTED_RESULTS)\n def test_coercion_with_setitem(self, start_data, expected_result):\n start_series = Series(start_data)\n start_series[0] = None\n\n expected_series = Series(expected_result)\n tm.assert_series_equal(start_series, expected_series)\n\n @pytest.mark.parametrize(\"start_data,expected_result\", EXPECTED_RESULTS)\n def test_coercion_with_loc_setitem(self, start_data, expected_result):\n start_series = Series(start_data)\n start_series.loc[0] = None\n\n expected_series = Series(expected_result)\n tm.assert_series_equal(start_series, expected_series)\n\n @pytest.mark.parametrize(\"start_data,expected_result\", EXPECTED_RESULTS)\n def test_coercion_with_setitem_and_series(self, start_data, expected_result):\n start_series = Series(start_data)\n start_series[start_series == start_series[0]] = None\n\n expected_series = Series(expected_result)\n tm.assert_series_equal(start_series, expected_series)\n\n @pytest.mark.parametrize(\"start_data,expected_result\", EXPECTED_RESULTS)\n def test_coercion_with_loc_and_series(self, start_data, expected_result):\n start_series = Series(start_data)\n start_series.loc[start_series == start_series[0]] = None\n\n expected_series = Series(expected_result)\n tm.assert_series_equal(start_series, expected_series)\n\n\nclass TestDataframeNoneCoercion:\n EXPECTED_SINGLE_ROW_RESULTS = [\n # For numeric series, we should coerce to NaN.\n ([1, 2, 3], [np.nan, 2, 3]),\n ([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),\n # For datetime series, we should coerce to NaT.\n (\n [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],\n [NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)],\n ),\n # For objects, we should preserve the None value.\n ([\"foo\", \"bar\", \"baz\"], [None, \"bar\", \"baz\"]),\n ]\n\n @pytest.mark.parametrize(\"expected\", EXPECTED_SINGLE_ROW_RESULTS)\n def test_coercion_with_loc(self, expected):\n start_data, expected_result = expected\n\n start_dataframe = DataFrame({\"foo\": start_data})\n start_dataframe.loc[0, [\"foo\"]] = None\n\n expected_dataframe = DataFrame({\"foo\": expected_result})\n tm.assert_frame_equal(start_dataframe, expected_dataframe)\n\n @pytest.mark.parametrize(\"expected\", EXPECTED_SINGLE_ROW_RESULTS)\n def test_coercion_with_setitem_and_dataframe(self, expected):\n start_data, expected_result = expected\n\n start_dataframe = DataFrame({\"foo\": start_data})\n start_dataframe[start_dataframe[\"foo\"] == start_dataframe[\"foo\"][0]] = None\n\n expected_dataframe = DataFrame({\"foo\": expected_result})\n tm.assert_frame_equal(start_dataframe, expected_dataframe)\n\n @pytest.mark.parametrize(\"expected\", EXPECTED_SINGLE_ROW_RESULTS)\n def test_none_coercion_loc_and_dataframe(self, expected):\n start_data, expected_result = expected\n\n start_dataframe = DataFrame({\"foo\": start_data})\n start_dataframe.loc[start_dataframe[\"foo\"] == start_dataframe[\"foo\"][0]] = None\n\n expected_dataframe = DataFrame({\"foo\": expected_result})\n tm.assert_frame_equal(start_dataframe, expected_dataframe)\n\n def test_none_coercion_mixed_dtypes(self):\n start_dataframe = DataFrame(\n {\n \"a\": [1, 2, 3],\n \"b\": [1.0, 2.0, 3.0],\n \"c\": [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],\n \"d\": [\"a\", \"b\", \"c\"],\n }\n )\n start_dataframe.iloc[0] = None\n\n exp = DataFrame(\n {\n \"a\": [np.nan, 2, 3],\n \"b\": [np.nan, 2.0, 3.0],\n \"c\": [NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)],\n \"d\": [None, \"b\", \"c\"],\n }\n )\n tm.assert_frame_equal(start_dataframe, exp)\n\n\ndef test_extension_array_cross_section():\n # A cross-section of a homogeneous EA should be an EA\n df = DataFrame(\n {\n \"A\": pd.core.arrays.integer_array([1, 2]),\n \"B\": pd.core.arrays.integer_array([3, 4]),\n },\n index=[\"a\", \"b\"],\n )\n expected = Series(pd.core.arrays.integer_array([1, 3]), index=[\"A\", \"B\"], name=\"a\")\n result = df.loc[\"a\"]\n tm.assert_series_equal(result, expected)\n\n result = df.iloc[0]\n tm.assert_series_equal(result, expected)\n\n\ndef test_extension_array_cross_section_converts():\n # all numeric columns -> numeric series\n df = DataFrame(\n {\"A\": pd.array([1, 2], dtype=\"Int64\"), \"B\": np.array([1, 2])}, index=[\"a\", \"b\"]\n )\n result = df.loc[\"a\"]\n expected = Series([1, 1], dtype=\"Int64\", index=[\"A\", \"B\"], name=\"a\")\n tm.assert_series_equal(result, expected)\n\n result = df.iloc[0]\n tm.assert_series_equal(result, expected)\n\n # mixed columns -> object series\n df = DataFrame(\n {\"A\": pd.array([1, 2], dtype=\"Int64\"), \"B\": np.array([\"a\", \"b\"])},\n index=[\"a\", \"b\"],\n )\n result = df.loc[\"a\"]\n expected = Series([1, \"a\"], dtype=object, index=[\"A\", \"B\"], name=\"a\")\n tm.assert_series_equal(result, expected)\n\n result = df.iloc[0]\n tm.assert_series_equal(result, expected)\n\n\ndef test_setitem_with_bool_mask_and_values_matching_n_trues_in_length():\n # GH 30567\n ser = Series([None] * 10)\n mask = [False] * 3 + [True] * 5 + [False] * 2\n ser[mask] = range(5)\n result = ser\n expected = Series([None] * 3 + list(range(5)) + [None] * 2).astype(\"object\")\n tm.assert_series_equal(result, expected)\n\n\ndef test_missing_labels_inside_loc_matched_in_error_message():\n # GH34272\n s = Series({\"a\": 1, \"b\": 2, \"c\": 3})\n error_message_regex = \"missing_0.*missing_1.*missing_2\"\n with pytest.raises(KeyError, match=error_message_regex):\n s.loc[[\"a\", \"b\", \"missing_0\", \"c\", \"missing_1\", \"missing_2\"]]\n\n\ndef test_many_missing_labels_inside_loc_error_message_limited():\n # GH34272\n n = 10000\n missing_labels = [f\"missing_{label}\" for label in range(n)]\n s = Series({\"a\": 1, \"b\": 2, \"c\": 3})\n # regex checks labels between 4 and 9995 are replaced with ellipses\n error_message_regex = \"missing_4.*\\\\.\\\\.\\\\..*missing_9995\"\n with pytest.raises(KeyError, match=error_message_regex):\n s.loc[[\"a\", \"c\"] + missing_labels]\n\n\ndef test_long_text_missing_labels_inside_loc_error_message_limited():\n # GH34272\n s = Series({\"a\": 1, \"b\": 2, \"c\": 3})\n missing_labels = [f\"long_missing_label_text_{i}\" * 5 for i in range(3)]\n # regex checks for very long labels there are new lines between each\n error_message_regex = \"long_missing_label_text_0.*\\\\\\\\n.*long_missing_label_text_1\"\n with pytest.raises(KeyError, match=error_message_regex):\n s.loc[[\"a\", \"c\"] + missing_labels]\n\n\ndef test_setitem_categorical():\n # https://github.com/pandas-dev/pandas/issues/35369\n df = DataFrame({\"h\": Series(list(\"mn\")).astype(\"category\")})\n df.h = df.h.cat.reorder_categories([\"n\", \"m\"])\n expected = DataFrame(\n {\"h\": pd.Categorical([\"m\", \"n\"]).reorder_categories([\"n\", \"m\"])}\n )\n tm.assert_frame_equal(df, expected)\n", "from datetime import datetime\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom pandas import DataFrame, NaT\nimport pandas._testing as tm\n\n\[email protected](\"subset\", [\"a\", [\"a\"], [\"a\", \"B\"]])\ndef test_drop_duplicates_with_misspelled_column_name(subset):\n # GH 19730\n df = DataFrame({\"A\": [0, 0, 1], \"B\": [0, 0, 1], \"C\": [0, 0, 1]})\n msg = re.escape(\"Index(['a'], dtype='object')\")\n\n with pytest.raises(KeyError, match=msg):\n df.drop_duplicates(subset)\n\n\ndef test_drop_duplicates():\n df = DataFrame(\n {\n \"AAA\": [\"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"bar\", \"bar\", \"foo\"],\n \"B\": [\"one\", \"one\", \"two\", \"two\", \"two\", \"two\", \"one\", \"two\"],\n \"C\": [1, 1, 2, 2, 2, 2, 1, 2],\n \"D\": range(8),\n }\n )\n # single column\n result = df.drop_duplicates(\"AAA\")\n expected = df[:2]\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(\"AAA\", keep=\"last\")\n expected = df.loc[[6, 7]]\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(\"AAA\", keep=False)\n expected = df.loc[[]]\n tm.assert_frame_equal(result, expected)\n assert len(result) == 0\n\n # multi column\n expected = df.loc[[0, 1, 2, 3]]\n result = df.drop_duplicates(np.array([\"AAA\", \"B\"]))\n tm.assert_frame_equal(result, expected)\n result = df.drop_duplicates([\"AAA\", \"B\"])\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates((\"AAA\", \"B\"), keep=\"last\")\n expected = df.loc[[0, 5, 6, 7]]\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates((\"AAA\", \"B\"), keep=False)\n expected = df.loc[[0]]\n tm.assert_frame_equal(result, expected)\n\n # consider everything\n df2 = df.loc[:, [\"AAA\", \"B\", \"C\"]]\n\n result = df2.drop_duplicates()\n # in this case only\n expected = df2.drop_duplicates([\"AAA\", \"B\"])\n tm.assert_frame_equal(result, expected)\n\n result = df2.drop_duplicates(keep=\"last\")\n expected = df2.drop_duplicates([\"AAA\", \"B\"], keep=\"last\")\n tm.assert_frame_equal(result, expected)\n\n result = df2.drop_duplicates(keep=False)\n expected = df2.drop_duplicates([\"AAA\", \"B\"], keep=False)\n tm.assert_frame_equal(result, expected)\n\n # integers\n result = df.drop_duplicates(\"C\")\n expected = df.iloc[[0, 2]]\n tm.assert_frame_equal(result, expected)\n result = df.drop_duplicates(\"C\", keep=\"last\")\n expected = df.iloc[[-2, -1]]\n tm.assert_frame_equal(result, expected)\n\n df[\"E\"] = df[\"C\"].astype(\"int8\")\n result = df.drop_duplicates(\"E\")\n expected = df.iloc[[0, 2]]\n tm.assert_frame_equal(result, expected)\n result = df.drop_duplicates(\"E\", keep=\"last\")\n expected = df.iloc[[-2, -1]]\n tm.assert_frame_equal(result, expected)\n\n # GH 11376\n df = DataFrame({\"x\": [7, 6, 3, 3, 4, 8, 0], \"y\": [0, 6, 5, 5, 9, 1, 2]})\n expected = df.loc[df.index != 3]\n tm.assert_frame_equal(df.drop_duplicates(), expected)\n\n df = DataFrame([[1, 0], [0, 2]])\n tm.assert_frame_equal(df.drop_duplicates(), df)\n\n df = DataFrame([[-2, 0], [0, -4]])\n tm.assert_frame_equal(df.drop_duplicates(), df)\n\n x = np.iinfo(np.int64).max / 3 * 2\n df = DataFrame([[-x, x], [0, x + 4]])\n tm.assert_frame_equal(df.drop_duplicates(), df)\n\n df = DataFrame([[-x, x], [x, x + 4]])\n tm.assert_frame_equal(df.drop_duplicates(), df)\n\n # GH 11864\n df = DataFrame([i] * 9 for i in range(16))\n df = df.append([[1] + [0] * 8], ignore_index=True)\n\n for keep in [\"first\", \"last\", False]:\n assert df.duplicated(keep=keep).sum() == 0\n\n\ndef test_drop_duplicates_with_duplicate_column_names():\n # GH17836\n df = DataFrame([[1, 2, 5], [3, 4, 6], [3, 4, 7]], columns=[\"a\", \"a\", \"b\"])\n\n result0 = df.drop_duplicates()\n tm.assert_frame_equal(result0, df)\n\n result1 = df.drop_duplicates(\"a\")\n expected1 = df[:2]\n tm.assert_frame_equal(result1, expected1)\n\n\ndef test_drop_duplicates_for_take_all():\n df = DataFrame(\n {\n \"AAA\": [\"foo\", \"bar\", \"baz\", \"bar\", \"foo\", \"bar\", \"qux\", \"foo\"],\n \"B\": [\"one\", \"one\", \"two\", \"two\", \"two\", \"two\", \"one\", \"two\"],\n \"C\": [1, 1, 2, 2, 2, 2, 1, 2],\n \"D\": range(8),\n }\n )\n # single column\n result = df.drop_duplicates(\"AAA\")\n expected = df.iloc[[0, 1, 2, 6]]\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(\"AAA\", keep=\"last\")\n expected = df.iloc[[2, 5, 6, 7]]\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(\"AAA\", keep=False)\n expected = df.iloc[[2, 6]]\n tm.assert_frame_equal(result, expected)\n\n # multiple columns\n result = df.drop_duplicates([\"AAA\", \"B\"])\n expected = df.iloc[[0, 1, 2, 3, 4, 6]]\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates([\"AAA\", \"B\"], keep=\"last\")\n expected = df.iloc[[0, 1, 2, 5, 6, 7]]\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates([\"AAA\", \"B\"], keep=False)\n expected = df.iloc[[0, 1, 2, 6]]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_drop_duplicates_tuple():\n df = DataFrame(\n {\n (\"AA\", \"AB\"): [\"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"bar\", \"bar\", \"foo\"],\n \"B\": [\"one\", \"one\", \"two\", \"two\", \"two\", \"two\", \"one\", \"two\"],\n \"C\": [1, 1, 2, 2, 2, 2, 1, 2],\n \"D\": range(8),\n }\n )\n # single column\n result = df.drop_duplicates((\"AA\", \"AB\"))\n expected = df[:2]\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates((\"AA\", \"AB\"), keep=\"last\")\n expected = df.loc[[6, 7]]\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates((\"AA\", \"AB\"), keep=False)\n expected = df.loc[[]] # empty df\n assert len(result) == 0\n tm.assert_frame_equal(result, expected)\n\n # multi column\n expected = df.loc[[0, 1, 2, 3]]\n result = df.drop_duplicates(((\"AA\", \"AB\"), \"B\"))\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"df\",\n [\n DataFrame(),\n DataFrame(columns=[]),\n DataFrame(columns=[\"A\", \"B\", \"C\"]),\n DataFrame(index=[]),\n DataFrame(index=[\"A\", \"B\", \"C\"]),\n ],\n)\ndef test_drop_duplicates_empty(df):\n # GH 20516\n result = df.drop_duplicates()\n tm.assert_frame_equal(result, df)\n\n result = df.copy()\n result.drop_duplicates(inplace=True)\n tm.assert_frame_equal(result, df)\n\n\ndef test_drop_duplicates_NA():\n # none\n df = DataFrame(\n {\n \"A\": [None, None, \"foo\", \"bar\", \"foo\", \"bar\", \"bar\", \"foo\"],\n \"B\": [\"one\", \"one\", \"two\", \"two\", \"two\", \"two\", \"one\", \"two\"],\n \"C\": [1.0, np.nan, np.nan, np.nan, 1.0, 1.0, 1, 1.0],\n \"D\": range(8),\n }\n )\n # single column\n result = df.drop_duplicates(\"A\")\n expected = df.loc[[0, 2, 3]]\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(\"A\", keep=\"last\")\n expected = df.loc[[1, 6, 7]]\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(\"A\", keep=False)\n expected = df.loc[[]] # empty df\n tm.assert_frame_equal(result, expected)\n assert len(result) == 0\n\n # multi column\n result = df.drop_duplicates([\"A\", \"B\"])\n expected = df.loc[[0, 2, 3, 6]]\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates([\"A\", \"B\"], keep=\"last\")\n expected = df.loc[[1, 5, 6, 7]]\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates([\"A\", \"B\"], keep=False)\n expected = df.loc[[6]]\n tm.assert_frame_equal(result, expected)\n\n # nan\n df = DataFrame(\n {\n \"A\": [\"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"bar\", \"bar\", \"foo\"],\n \"B\": [\"one\", \"one\", \"two\", \"two\", \"two\", \"two\", \"one\", \"two\"],\n \"C\": [1.0, np.nan, np.nan, np.nan, 1.0, 1.0, 1, 1.0],\n \"D\": range(8),\n }\n )\n # single column\n result = df.drop_duplicates(\"C\")\n expected = df[:2]\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(\"C\", keep=\"last\")\n expected = df.loc[[3, 7]]\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(\"C\", keep=False)\n expected = df.loc[[]] # empty df\n tm.assert_frame_equal(result, expected)\n assert len(result) == 0\n\n # multi column\n result = df.drop_duplicates([\"C\", \"B\"])\n expected = df.loc[[0, 1, 2, 4]]\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates([\"C\", \"B\"], keep=\"last\")\n expected = df.loc[[1, 3, 6, 7]]\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates([\"C\", \"B\"], keep=False)\n expected = df.loc[[1]]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_drop_duplicates_NA_for_take_all():\n # none\n df = DataFrame(\n {\n \"A\": [None, None, \"foo\", \"bar\", \"foo\", \"baz\", \"bar\", \"qux\"],\n \"C\": [1.0, np.nan, np.nan, np.nan, 1.0, 2.0, 3, 1.0],\n }\n )\n\n # single column\n result = df.drop_duplicates(\"A\")\n expected = df.iloc[[0, 2, 3, 5, 7]]\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(\"A\", keep=\"last\")\n expected = df.iloc[[1, 4, 5, 6, 7]]\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(\"A\", keep=False)\n expected = df.iloc[[5, 7]]\n tm.assert_frame_equal(result, expected)\n\n # nan\n\n # single column\n result = df.drop_duplicates(\"C\")\n expected = df.iloc[[0, 1, 5, 6]]\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(\"C\", keep=\"last\")\n expected = df.iloc[[3, 5, 6, 7]]\n tm.assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(\"C\", keep=False)\n expected = df.iloc[[5, 6]]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_drop_duplicates_inplace():\n orig = DataFrame(\n {\n \"A\": [\"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"bar\", \"bar\", \"foo\"],\n \"B\": [\"one\", \"one\", \"two\", \"two\", \"two\", \"two\", \"one\", \"two\"],\n \"C\": [1, 1, 2, 2, 2, 2, 1, 2],\n \"D\": range(8),\n }\n )\n # single column\n df = orig.copy()\n return_value = df.drop_duplicates(\"A\", inplace=True)\n expected = orig[:2]\n result = df\n tm.assert_frame_equal(result, expected)\n assert return_value is None\n\n df = orig.copy()\n return_value = df.drop_duplicates(\"A\", keep=\"last\", inplace=True)\n expected = orig.loc[[6, 7]]\n result = df\n tm.assert_frame_equal(result, expected)\n assert return_value is None\n\n df = orig.copy()\n return_value = df.drop_duplicates(\"A\", keep=False, inplace=True)\n expected = orig.loc[[]]\n result = df\n tm.assert_frame_equal(result, expected)\n assert len(df) == 0\n assert return_value is None\n\n # multi column\n df = orig.copy()\n return_value = df.drop_duplicates([\"A\", \"B\"], inplace=True)\n expected = orig.loc[[0, 1, 2, 3]]\n result = df\n tm.assert_frame_equal(result, expected)\n assert return_value is None\n\n df = orig.copy()\n return_value = df.drop_duplicates([\"A\", \"B\"], keep=\"last\", inplace=True)\n expected = orig.loc[[0, 5, 6, 7]]\n result = df\n tm.assert_frame_equal(result, expected)\n assert return_value is None\n\n df = orig.copy()\n return_value = df.drop_duplicates([\"A\", \"B\"], keep=False, inplace=True)\n expected = orig.loc[[0]]\n result = df\n tm.assert_frame_equal(result, expected)\n assert return_value is None\n\n # consider everything\n orig2 = orig.loc[:, [\"A\", \"B\", \"C\"]].copy()\n\n df2 = orig2.copy()\n return_value = df2.drop_duplicates(inplace=True)\n # in this case only\n expected = orig2.drop_duplicates([\"A\", \"B\"])\n result = df2\n tm.assert_frame_equal(result, expected)\n assert return_value is None\n\n df2 = orig2.copy()\n return_value = df2.drop_duplicates(keep=\"last\", inplace=True)\n expected = orig2.drop_duplicates([\"A\", \"B\"], keep=\"last\")\n result = df2\n tm.assert_frame_equal(result, expected)\n assert return_value is None\n\n df2 = orig2.copy()\n return_value = df2.drop_duplicates(keep=False, inplace=True)\n expected = orig2.drop_duplicates([\"A\", \"B\"], keep=False)\n result = df2\n tm.assert_frame_equal(result, expected)\n assert return_value is None\n\n\[email protected](\"inplace\", [True, False])\[email protected](\n \"origin_dict, output_dict, ignore_index, output_index\",\n [\n ({\"A\": [2, 2, 3]}, {\"A\": [2, 3]}, True, [0, 1]),\n ({\"A\": [2, 2, 3]}, {\"A\": [2, 3]}, False, [0, 2]),\n ({\"A\": [2, 2, 3], \"B\": [2, 2, 4]}, {\"A\": [2, 3], \"B\": [2, 4]}, True, [0, 1]),\n ({\"A\": [2, 2, 3], \"B\": [2, 2, 4]}, {\"A\": [2, 3], \"B\": [2, 4]}, False, [0, 2]),\n ],\n)\ndef test_drop_duplicates_ignore_index(\n inplace, origin_dict, output_dict, ignore_index, output_index\n):\n # GH 30114\n df = DataFrame(origin_dict)\n expected = DataFrame(output_dict, index=output_index)\n\n if inplace:\n result_df = df.copy()\n result_df.drop_duplicates(ignore_index=ignore_index, inplace=inplace)\n else:\n result_df = df.drop_duplicates(ignore_index=ignore_index, inplace=inplace)\n\n tm.assert_frame_equal(result_df, expected)\n tm.assert_frame_equal(df, DataFrame(origin_dict))\n\n\ndef test_drop_duplicates_null_in_object_column(nulls_fixture):\n # https://github.com/pandas-dev/pandas/issues/32992\n df = DataFrame([[1, nulls_fixture], [2, \"a\"]], dtype=object)\n result = df.drop_duplicates()\n tm.assert_frame_equal(result, df)\n\n\[email protected](\"keep\", [\"first\", \"last\", False])\ndef test_drop_duplicates_series_vs_dataframe(keep):\n # GH#14192\n df = DataFrame(\n {\n \"a\": [1, 1, 1, \"one\", \"one\"],\n \"b\": [2, 2, np.nan, np.nan, np.nan],\n \"c\": [3, 3, np.nan, np.nan, \"three\"],\n \"d\": [1, 2, 3, 4, 4],\n \"e\": [\n datetime(2015, 1, 1),\n datetime(2015, 1, 1),\n datetime(2015, 2, 1),\n NaT,\n NaT,\n ],\n }\n )\n for column in df.columns:\n dropped_frame = df[[column]].drop_duplicates(keep=keep)\n dropped_series = df[column].drop_duplicates(keep=keep)\n tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())\n", "from importlib import import_module\n\nimport numpy as np\n\nfrom pandas._libs import lib\n\nimport pandas as pd\nfrom pandas.core.algorithms import make_duplicates_of_left_unique_in_right\n\nfrom .pandas_vb_common import tm\n\nfor imp in [\"pandas.util\", \"pandas.tools.hashing\"]:\n try:\n hashing = import_module(imp)\n break\n except (ImportError, TypeError, ValueError):\n pass\n\n\nclass MaybeConvertObjects:\n def setup(self):\n N = 10 ** 5\n\n data = list(range(N))\n data[0] = pd.NaT\n data = np.array(data)\n self.data = data\n\n def time_maybe_convert_objects(self):\n lib.maybe_convert_objects(self.data)\n\n\nclass Factorize:\n\n params = [\n [True, False],\n [True, False],\n [\n \"int\",\n \"uint\",\n \"float\",\n \"string\",\n \"datetime64[ns]\",\n \"datetime64[ns, tz]\",\n \"Int64\",\n \"boolean\",\n ],\n ]\n param_names = [\"unique\", \"sort\", \"dtype\"]\n\n def setup(self, unique, sort, dtype):\n N = 10 ** 5\n data = {\n \"int\": pd.Int64Index(np.arange(N)),\n \"uint\": pd.UInt64Index(np.arange(N)),\n \"float\": pd.Float64Index(np.random.randn(N)),\n \"string\": tm.makeStringIndex(N),\n \"datetime64[ns]\": pd.date_range(\"2011-01-01\", freq=\"H\", periods=N),\n \"datetime64[ns, tz]\": pd.date_range(\n \"2011-01-01\", freq=\"H\", periods=N, tz=\"Asia/Tokyo\"\n ),\n \"Int64\": pd.array(np.arange(N), dtype=\"Int64\"),\n \"boolean\": pd.array(np.random.randint(0, 2, N), dtype=\"boolean\"),\n }[dtype]\n if not unique:\n data = data.repeat(5)\n self.data = data\n\n def time_factorize(self, unique, sort, dtype):\n pd.factorize(self.data, sort=sort)\n\n\nclass Duplicated:\n\n params = [\n [True, False],\n [\"first\", \"last\", False],\n [\"int\", \"uint\", \"float\", \"string\", \"datetime64[ns]\", \"datetime64[ns, tz]\"],\n ]\n param_names = [\"unique\", \"keep\", \"dtype\"]\n\n def setup(self, unique, keep, dtype):\n N = 10 ** 5\n data = {\n \"int\": pd.Int64Index(np.arange(N)),\n \"uint\": pd.UInt64Index(np.arange(N)),\n \"float\": pd.Float64Index(np.random.randn(N)),\n \"string\": tm.makeStringIndex(N),\n \"datetime64[ns]\": pd.date_range(\"2011-01-01\", freq=\"H\", periods=N),\n \"datetime64[ns, tz]\": pd.date_range(\n \"2011-01-01\", freq=\"H\", periods=N, tz=\"Asia/Tokyo\"\n ),\n }[dtype]\n if not unique:\n data = data.repeat(5)\n self.idx = data\n # cache is_unique\n self.idx.is_unique\n\n def time_duplicated(self, unique, keep, dtype):\n self.idx.duplicated(keep=keep)\n\n\nclass Hashing:\n def setup_cache(self):\n N = 10 ** 5\n\n df = pd.DataFrame(\n {\n \"strings\": pd.Series(\n tm.makeStringIndex(10000).take(np.random.randint(0, 10000, size=N))\n ),\n \"floats\": np.random.randn(N),\n \"ints\": np.arange(N),\n \"dates\": pd.date_range(\"20110101\", freq=\"s\", periods=N),\n \"timedeltas\": pd.timedelta_range(\"1 day\", freq=\"s\", periods=N),\n }\n )\n df[\"categories\"] = df[\"strings\"].astype(\"category\")\n df.iloc[10:20] = np.nan\n return df\n\n def time_frame(self, df):\n hashing.hash_pandas_object(df)\n\n def time_series_int(self, df):\n hashing.hash_pandas_object(df[\"ints\"])\n\n def time_series_string(self, df):\n hashing.hash_pandas_object(df[\"strings\"])\n\n def time_series_float(self, df):\n hashing.hash_pandas_object(df[\"floats\"])\n\n def time_series_categorical(self, df):\n hashing.hash_pandas_object(df[\"categories\"])\n\n def time_series_timedeltas(self, df):\n hashing.hash_pandas_object(df[\"timedeltas\"])\n\n def time_series_dates(self, df):\n hashing.hash_pandas_object(df[\"dates\"])\n\n\nclass Quantile:\n params = [\n [0, 0.5, 1],\n [\"linear\", \"nearest\", \"lower\", \"higher\", \"midpoint\"],\n [\"float\", \"int\", \"uint\"],\n ]\n param_names = [\"quantile\", \"interpolation\", \"dtype\"]\n\n def setup(self, quantile, interpolation, dtype):\n N = 10 ** 5\n data = {\n \"int\": np.arange(N),\n \"uint\": np.arange(N).astype(np.uint64),\n \"float\": np.random.randn(N),\n }\n self.idx = pd.Series(data[dtype].repeat(5))\n\n def time_quantile(self, quantile, interpolation, dtype):\n self.idx.quantile(quantile, interpolation=interpolation)\n\n\nclass SortIntegerArray:\n params = [10 ** 3, 10 ** 5]\n\n def setup(self, N):\n data = np.arange(N, dtype=float)\n data[40] = np.nan\n self.array = pd.array(data, dtype=\"Int64\")\n\n def time_argsort(self, N):\n self.array.argsort()\n\n\nclass RemoveDuplicates:\n def setup(self):\n N = 10 ** 5\n na = np.arange(int(N / 2))\n self.left = np.concatenate([na[: int(N / 4)], na[: int(N / 4)]])\n self.right = np.concatenate([na, na])\n\n def time_make_duplicates_of_left_unique_in_right(self):\n make_duplicates_of_left_unique_in_right(self.left, self.right)\n\n\nfrom .pandas_vb_common import setup # noqa: F401 isort:skip\n", "import calendar\nimport datetime\nimport decimal\nimport json\nimport locale\nimport math\nimport re\nimport sys\nimport time\n\nimport dateutil\nimport numpy as np\nimport pytest\nimport pytz\n\nimport pandas._libs.json as ujson\nfrom pandas._libs.tslib import Timestamp\nfrom pandas.compat import IS64, is_platform_windows\n\nfrom pandas import DataFrame, DatetimeIndex, Index, NaT, Series, Timedelta, date_range\nimport pandas._testing as tm\n\n\ndef _clean_dict(d):\n \"\"\"\n Sanitize dictionary for JSON by converting all keys to strings.\n\n Parameters\n ----------\n d : dict\n The dictionary to convert.\n\n Returns\n -------\n cleaned_dict : dict\n \"\"\"\n return {str(k): v for k, v in d.items()}\n\n\[email protected](\n params=[None, \"split\", \"records\", \"values\", \"index\"] # Column indexed by default.\n)\ndef orient(request):\n return request.param\n\n\[email protected](params=[None, True])\ndef numpy(request):\n return request.param\n\n\ndef get_int32_compat_dtype(numpy, orient):\n # See GH#32527\n dtype = np.int64\n if not ((numpy is None or orient == \"index\") or (numpy is True and orient is None)):\n if is_platform_windows():\n dtype = np.int32\n else:\n dtype = np.intp\n\n return dtype\n\n\nclass TestUltraJSONTests:\n @pytest.mark.skipif(not IS64, reason=\"not compliant on 32-bit, xref #15865\")\n def test_encode_decimal(self):\n sut = decimal.Decimal(\"1337.1337\")\n encoded = ujson.encode(sut, double_precision=15)\n decoded = ujson.decode(encoded)\n assert decoded == 1337.1337\n\n sut = decimal.Decimal(\"0.95\")\n encoded = ujson.encode(sut, double_precision=1)\n assert encoded == \"1.0\"\n\n decoded = ujson.decode(encoded)\n assert decoded == 1.0\n\n sut = decimal.Decimal(\"0.94\")\n encoded = ujson.encode(sut, double_precision=1)\n assert encoded == \"0.9\"\n\n decoded = ujson.decode(encoded)\n assert decoded == 0.9\n\n sut = decimal.Decimal(\"1.95\")\n encoded = ujson.encode(sut, double_precision=1)\n assert encoded == \"2.0\"\n\n decoded = ujson.decode(encoded)\n assert decoded == 2.0\n\n sut = decimal.Decimal(\"-1.95\")\n encoded = ujson.encode(sut, double_precision=1)\n assert encoded == \"-2.0\"\n\n decoded = ujson.decode(encoded)\n assert decoded == -2.0\n\n sut = decimal.Decimal(\"0.995\")\n encoded = ujson.encode(sut, double_precision=2)\n assert encoded == \"1.0\"\n\n decoded = ujson.decode(encoded)\n assert decoded == 1.0\n\n sut = decimal.Decimal(\"0.9995\")\n encoded = ujson.encode(sut, double_precision=3)\n assert encoded == \"1.0\"\n\n decoded = ujson.decode(encoded)\n assert decoded == 1.0\n\n sut = decimal.Decimal(\"0.99999999999999944\")\n encoded = ujson.encode(sut, double_precision=15)\n assert encoded == \"1.0\"\n\n decoded = ujson.decode(encoded)\n assert decoded == 1.0\n\n @pytest.mark.parametrize(\"ensure_ascii\", [True, False])\n def test_encode_string_conversion(self, ensure_ascii):\n string_input = \"A string \\\\ / \\b \\f \\n \\r \\t </script> &\"\n not_html_encoded = '\"A string \\\\\\\\ \\\\/ \\\\b \\\\f \\\\n \\\\r \\\\t <\\\\/script> &\"'\n html_encoded = (\n '\"A string \\\\\\\\ \\\\/ \\\\b \\\\f \\\\n \\\\r \\\\t \\\\u003c\\\\/script\\\\u003e \\\\u0026\"'\n )\n\n def helper(expected_output, **encode_kwargs):\n output = ujson.encode(\n string_input, ensure_ascii=ensure_ascii, **encode_kwargs\n )\n\n assert output == expected_output\n assert string_input == json.loads(output)\n assert string_input == ujson.decode(output)\n\n # Default behavior assumes encode_html_chars=False.\n helper(not_html_encoded)\n\n # Make sure explicit encode_html_chars=False works.\n helper(not_html_encoded, encode_html_chars=False)\n\n # Make sure explicit encode_html_chars=True does the encoding.\n helper(html_encoded, encode_html_chars=True)\n\n @pytest.mark.parametrize(\n \"long_number\", [-4342969734183514, -12345678901234.56789012, -528656961.4399388]\n )\n def test_double_long_numbers(self, long_number):\n sut = {\"a\": long_number}\n encoded = ujson.encode(sut, double_precision=15)\n\n decoded = ujson.decode(encoded)\n assert sut == decoded\n\n def test_encode_non_c_locale(self):\n lc_category = locale.LC_NUMERIC\n\n # We just need one of these locales to work.\n for new_locale in (\"it_IT.UTF-8\", \"Italian_Italy\"):\n if tm.can_set_locale(new_locale, lc_category):\n with tm.set_locale(new_locale, lc_category):\n assert ujson.loads(ujson.dumps(4.78e60)) == 4.78e60\n assert ujson.loads(\"4.78\", precise_float=True) == 4.78\n break\n\n def test_decimal_decode_test_precise(self):\n sut = {\"a\": 4.56}\n encoded = ujson.encode(sut)\n decoded = ujson.decode(encoded, precise_float=True)\n assert sut == decoded\n\n def test_encode_double_tiny_exponential(self):\n num = 1e-40\n assert num == ujson.decode(ujson.encode(num))\n num = 1e-100\n assert num == ujson.decode(ujson.encode(num))\n num = -1e-45\n assert num == ujson.decode(ujson.encode(num))\n num = -1e-145\n assert np.allclose(num, ujson.decode(ujson.encode(num)))\n\n @pytest.mark.parametrize(\"unicode_key\", [\"key1\", \"بن\"])\n def test_encode_dict_with_unicode_keys(self, unicode_key):\n unicode_dict = {unicode_key: \"value1\"}\n assert unicode_dict == ujson.decode(ujson.encode(unicode_dict))\n\n @pytest.mark.parametrize(\n \"double_input\", [math.pi, -math.pi] # Should work with negatives too.\n )\n def test_encode_double_conversion(self, double_input):\n output = ujson.encode(double_input)\n assert round(double_input, 5) == round(json.loads(output), 5)\n assert round(double_input, 5) == round(ujson.decode(output), 5)\n\n def test_encode_with_decimal(self):\n decimal_input = 1.0\n output = ujson.encode(decimal_input)\n\n assert output == \"1.0\"\n\n def test_encode_array_of_nested_arrays(self):\n nested_input = [[[[]]]] * 20\n output = ujson.encode(nested_input)\n\n assert nested_input == json.loads(output)\n assert nested_input == ujson.decode(output)\n\n nested_input = np.array(nested_input)\n tm.assert_numpy_array_equal(\n nested_input, ujson.decode(output, numpy=True, dtype=nested_input.dtype)\n )\n\n def test_encode_array_of_doubles(self):\n doubles_input = [31337.31337, 31337.31337, 31337.31337, 31337.31337] * 10\n output = ujson.encode(doubles_input)\n\n assert doubles_input == json.loads(output)\n assert doubles_input == ujson.decode(output)\n\n tm.assert_numpy_array_equal(\n np.array(doubles_input), ujson.decode(output, numpy=True)\n )\n\n def test_double_precision(self):\n double_input = 30.012345678901234\n output = ujson.encode(double_input, double_precision=15)\n\n assert double_input == json.loads(output)\n assert double_input == ujson.decode(output)\n\n for double_precision in (3, 9):\n output = ujson.encode(double_input, double_precision=double_precision)\n rounded_input = round(double_input, double_precision)\n\n assert rounded_input == json.loads(output)\n assert rounded_input == ujson.decode(output)\n\n @pytest.mark.parametrize(\"invalid_val\", [20, -1, \"9\", None])\n def test_invalid_double_precision(self, invalid_val):\n double_input = 30.12345678901234567890\n expected_exception = ValueError if isinstance(invalid_val, int) else TypeError\n\n with pytest.raises(expected_exception):\n ujson.encode(double_input, double_precision=invalid_val)\n\n def test_encode_string_conversion2(self):\n string_input = \"A string \\\\ / \\b \\f \\n \\r \\t\"\n output = ujson.encode(string_input)\n\n assert string_input == json.loads(output)\n assert string_input == ujson.decode(output)\n assert output == '\"A string \\\\\\\\ \\\\/ \\\\b \\\\f \\\\n \\\\r \\\\t\"'\n\n @pytest.mark.parametrize(\n \"unicode_input\",\n [\"Räksmörgås اسامة بن محمد بن عوض بن لادن\", \"\\xe6\\x97\\xa5\\xd1\\x88\"],\n )\n def test_encode_unicode_conversion(self, unicode_input):\n enc = ujson.encode(unicode_input)\n dec = ujson.decode(enc)\n\n assert enc == json.dumps(unicode_input)\n assert dec == json.loads(enc)\n\n def test_encode_control_escaping(self):\n escaped_input = \"\\x19\"\n enc = ujson.encode(escaped_input)\n dec = ujson.decode(enc)\n\n assert escaped_input == dec\n assert enc == json.dumps(escaped_input)\n\n def test_encode_unicode_surrogate_pair(self):\n surrogate_input = \"\\xf0\\x90\\x8d\\x86\"\n enc = ujson.encode(surrogate_input)\n dec = ujson.decode(enc)\n\n assert enc == json.dumps(surrogate_input)\n assert dec == json.loads(enc)\n\n def test_encode_unicode_4bytes_utf8(self):\n four_bytes_input = \"\\xf0\\x91\\x80\\xb0TRAILINGNORMAL\"\n enc = ujson.encode(four_bytes_input)\n dec = ujson.decode(enc)\n\n assert enc == json.dumps(four_bytes_input)\n assert dec == json.loads(enc)\n\n def test_encode_unicode_4bytes_utf8highest(self):\n four_bytes_input = \"\\xf3\\xbf\\xbf\\xbfTRAILINGNORMAL\"\n enc = ujson.encode(four_bytes_input)\n\n dec = ujson.decode(enc)\n\n assert enc == json.dumps(four_bytes_input)\n assert dec == json.loads(enc)\n\n def test_encode_array_in_array(self):\n arr_in_arr_input = [[[[]]]]\n output = ujson.encode(arr_in_arr_input)\n\n assert arr_in_arr_input == json.loads(output)\n assert output == json.dumps(arr_in_arr_input)\n assert arr_in_arr_input == ujson.decode(output)\n\n tm.assert_numpy_array_equal(\n np.array(arr_in_arr_input), ujson.decode(output, numpy=True)\n )\n\n @pytest.mark.parametrize(\n \"num_input\",\n [\n 31337,\n -31337, # Negative number.\n -9223372036854775808, # Large negative number.\n ],\n )\n def test_encode_num_conversion(self, num_input):\n output = ujson.encode(num_input)\n assert num_input == json.loads(output)\n assert output == json.dumps(num_input)\n assert num_input == ujson.decode(output)\n\n def test_encode_list_conversion(self):\n list_input = [1, 2, 3, 4]\n output = ujson.encode(list_input)\n\n assert list_input == json.loads(output)\n assert list_input == ujson.decode(output)\n\n tm.assert_numpy_array_equal(\n np.array(list_input), ujson.decode(output, numpy=True)\n )\n\n def test_encode_dict_conversion(self):\n dict_input = {\"k1\": 1, \"k2\": 2, \"k3\": 3, \"k4\": 4}\n output = ujson.encode(dict_input)\n\n assert dict_input == json.loads(output)\n assert dict_input == ujson.decode(output)\n\n @pytest.mark.parametrize(\"builtin_value\", [None, True, False])\n def test_encode_builtin_values_conversion(self, builtin_value):\n output = ujson.encode(builtin_value)\n assert builtin_value == json.loads(output)\n assert output == json.dumps(builtin_value)\n assert builtin_value == ujson.decode(output)\n\n def test_encode_datetime_conversion(self):\n datetime_input = datetime.datetime.fromtimestamp(time.time())\n output = ujson.encode(datetime_input, date_unit=\"s\")\n expected = calendar.timegm(datetime_input.utctimetuple())\n\n assert int(expected) == json.loads(output)\n assert int(expected) == ujson.decode(output)\n\n def test_encode_date_conversion(self):\n date_input = datetime.date.fromtimestamp(time.time())\n output = ujson.encode(date_input, date_unit=\"s\")\n\n tup = (date_input.year, date_input.month, date_input.day, 0, 0, 0)\n expected = calendar.timegm(tup)\n\n assert int(expected) == json.loads(output)\n assert int(expected) == ujson.decode(output)\n\n @pytest.mark.parametrize(\n \"test\",\n [datetime.time(), datetime.time(1, 2, 3), datetime.time(10, 12, 15, 343243)],\n )\n def test_encode_time_conversion_basic(self, test):\n output = ujson.encode(test)\n expected = f'\"{test.isoformat()}\"'\n assert expected == output\n\n def test_encode_time_conversion_pytz(self):\n # see gh-11473: to_json segfaults with timezone-aware datetimes\n test = datetime.time(10, 12, 15, 343243, pytz.utc)\n output = ujson.encode(test)\n expected = f'\"{test.isoformat()}\"'\n assert expected == output\n\n def test_encode_time_conversion_dateutil(self):\n # see gh-11473: to_json segfaults with timezone-aware datetimes\n test = datetime.time(10, 12, 15, 343243, dateutil.tz.tzutc())\n output = ujson.encode(test)\n expected = f'\"{test.isoformat()}\"'\n assert expected == output\n\n @pytest.mark.parametrize(\n \"decoded_input\", [NaT, np.datetime64(\"NaT\"), np.nan, np.inf, -np.inf]\n )\n def test_encode_as_null(self, decoded_input):\n assert ujson.encode(decoded_input) == \"null\", \"Expected null\"\n\n def test_datetime_units(self):\n val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)\n stamp = Timestamp(val)\n\n roundtrip = ujson.decode(ujson.encode(val, date_unit=\"s\"))\n assert roundtrip == stamp.value // 10 ** 9\n\n roundtrip = ujson.decode(ujson.encode(val, date_unit=\"ms\"))\n assert roundtrip == stamp.value // 10 ** 6\n\n roundtrip = ujson.decode(ujson.encode(val, date_unit=\"us\"))\n assert roundtrip == stamp.value // 10 ** 3\n\n roundtrip = ujson.decode(ujson.encode(val, date_unit=\"ns\"))\n assert roundtrip == stamp.value\n\n msg = \"Invalid value 'foo' for option 'date_unit'\"\n with pytest.raises(ValueError, match=msg):\n ujson.encode(val, date_unit=\"foo\")\n\n def test_encode_to_utf8(self):\n unencoded = \"\\xe6\\x97\\xa5\\xd1\\x88\"\n\n enc = ujson.encode(unencoded, ensure_ascii=False)\n dec = ujson.decode(enc)\n\n assert enc == json.dumps(unencoded, ensure_ascii=False)\n assert dec == json.loads(enc)\n\n def test_decode_from_unicode(self):\n unicode_input = '{\"obj\": 31337}'\n\n dec1 = ujson.decode(unicode_input)\n dec2 = ujson.decode(str(unicode_input))\n\n assert dec1 == dec2\n\n def test_encode_recursion_max(self):\n # 8 is the max recursion depth\n\n class O2:\n member = 0\n pass\n\n class O1:\n member = 0\n pass\n\n decoded_input = O1()\n decoded_input.member = O2()\n decoded_input.member.member = decoded_input\n\n with pytest.raises(OverflowError):\n ujson.encode(decoded_input)\n\n def test_decode_jibberish(self):\n jibberish = \"fdsa sda v9sa fdsa\"\n\n with pytest.raises(ValueError):\n ujson.decode(jibberish)\n\n @pytest.mark.parametrize(\n \"broken_json\",\n [\n \"[\", # Broken array start.\n \"{\", # Broken object start.\n \"]\", # Broken array end.\n \"}\", # Broken object end.\n ],\n )\n def test_decode_broken_json(self, broken_json):\n with pytest.raises(ValueError):\n ujson.decode(broken_json)\n\n @pytest.mark.parametrize(\"too_big_char\", [\"[\", \"{\"])\n def test_decode_depth_too_big(self, too_big_char):\n with pytest.raises(ValueError):\n ujson.decode(too_big_char * (1024 * 1024))\n\n @pytest.mark.parametrize(\n \"bad_string\",\n [\n '\"TESTING', # Unterminated.\n '\"TESTING\\\\\"', # Unterminated escape.\n \"tru\", # Broken True.\n \"fa\", # Broken False.\n \"n\", # Broken None.\n ],\n )\n def test_decode_bad_string(self, bad_string):\n with pytest.raises(ValueError):\n ujson.decode(bad_string)\n\n @pytest.mark.parametrize(\"broken_json\", ['{{1337:\"\"}}', '{{\"key\":\"}', \"[[[true\"])\n def test_decode_broken_json_leak(self, broken_json):\n for _ in range(1000):\n with pytest.raises(ValueError):\n ujson.decode(broken_json)\n\n @pytest.mark.parametrize(\n \"invalid_dict\",\n [\n \"{{{{31337}}}}\", # No key.\n '{{{{\"key\":}}}}', # No value.\n '{{{{\"key\"}}}}', # No colon or value.\n ],\n )\n def test_decode_invalid_dict(self, invalid_dict):\n with pytest.raises(ValueError):\n ujson.decode(invalid_dict)\n\n @pytest.mark.parametrize(\n \"numeric_int_as_str\", [\"31337\", \"-31337\"] # Should work with negatives.\n )\n def test_decode_numeric_int(self, numeric_int_as_str):\n assert int(numeric_int_as_str) == ujson.decode(numeric_int_as_str)\n\n def test_encode_null_character(self):\n wrapped_input = \"31337 \\x00 1337\"\n output = ujson.encode(wrapped_input)\n\n assert wrapped_input == json.loads(output)\n assert output == json.dumps(wrapped_input)\n assert wrapped_input == ujson.decode(output)\n\n alone_input = \"\\x00\"\n output = ujson.encode(alone_input)\n\n assert alone_input == json.loads(output)\n assert output == json.dumps(alone_input)\n assert alone_input == ujson.decode(output)\n assert '\" \\\\u0000\\\\r\\\\n \"' == ujson.dumps(\" \\u0000\\r\\n \")\n\n def test_decode_null_character(self):\n wrapped_input = '\"31337 \\\\u0000 31337\"'\n assert ujson.decode(wrapped_input) == json.loads(wrapped_input)\n\n def test_encode_list_long_conversion(self):\n long_input = [\n 9223372036854775807,\n 9223372036854775807,\n 9223372036854775807,\n 9223372036854775807,\n 9223372036854775807,\n 9223372036854775807,\n ]\n output = ujson.encode(long_input)\n\n assert long_input == json.loads(output)\n assert long_input == ujson.decode(output)\n\n tm.assert_numpy_array_equal(\n np.array(long_input), ujson.decode(output, numpy=True, dtype=np.int64)\n )\n\n def test_encode_long_conversion(self):\n long_input = 9223372036854775807\n output = ujson.encode(long_input)\n\n assert long_input == json.loads(output)\n assert output == json.dumps(long_input)\n assert long_input == ujson.decode(output)\n\n @pytest.mark.parametrize(\"bigNum\", [sys.maxsize + 1, -(sys.maxsize + 2)])\n @pytest.mark.xfail(not IS64, reason=\"GH-35288\")\n def test_dumps_ints_larger_than_maxsize(self, bigNum):\n # GH34395\n bigNum = sys.maxsize + 1\n encoding = ujson.encode(bigNum)\n assert str(bigNum) == encoding\n\n # GH20599\n with pytest.raises(ValueError):\n assert ujson.loads(encoding) == bigNum\n\n @pytest.mark.parametrize(\n \"int_exp\", [\"1337E40\", \"1.337E40\", \"1337E+9\", \"1.337e+40\", \"1.337E-4\"]\n )\n def test_decode_numeric_int_exp(self, int_exp):\n assert ujson.decode(int_exp) == json.loads(int_exp)\n\n def test_loads_non_str_bytes_raises(self):\n msg = \"Expected 'str' or 'bytes'\"\n with pytest.raises(TypeError, match=msg):\n ujson.loads(None)\n\n @pytest.mark.parametrize(\"val\", [3590016419, 2 ** 31, 2 ** 32, (2 ** 32) - 1])\n def test_decode_number_with_32bit_sign_bit(self, val):\n # Test that numbers that fit within 32 bits but would have the\n # sign bit set (2**31 <= x < 2**32) are decoded properly.\n doc = f'{{\"id\": {val}}}'\n assert ujson.decode(doc)[\"id\"] == val\n\n def test_encode_big_escape(self):\n # Make sure no Exception is raised.\n for _ in range(10):\n base = \"\\u00e5\".encode()\n escape_input = base * 1024 * 1024 * 2\n ujson.encode(escape_input)\n\n def test_decode_big_escape(self):\n # Make sure no Exception is raised.\n for _ in range(10):\n base = \"\\u00e5\".encode()\n quote = b'\"'\n\n escape_input = quote + (base * 1024 * 1024 * 2) + quote\n ujson.decode(escape_input)\n\n def test_to_dict(self):\n d = {\"key\": 31337}\n\n class DictTest:\n def toDict(self):\n return d\n\n o = DictTest()\n output = ujson.encode(o)\n\n dec = ujson.decode(output)\n assert dec == d\n\n def test_default_handler(self):\n class _TestObject:\n def __init__(self, val):\n self.val = val\n\n @property\n def recursive_attr(self):\n return _TestObject(\"recursive_attr\")\n\n def __str__(self) -> str:\n return str(self.val)\n\n msg = \"Maximum recursion level reached\"\n with pytest.raises(OverflowError, match=msg):\n ujson.encode(_TestObject(\"foo\"))\n assert '\"foo\"' == ujson.encode(_TestObject(\"foo\"), default_handler=str)\n\n def my_handler(_):\n return \"foobar\"\n\n assert '\"foobar\"' == ujson.encode(\n _TestObject(\"foo\"), default_handler=my_handler\n )\n\n def my_handler_raises(_):\n raise TypeError(\"I raise for anything\")\n\n with pytest.raises(TypeError, match=\"I raise for anything\"):\n ujson.encode(_TestObject(\"foo\"), default_handler=my_handler_raises)\n\n def my_int_handler(_):\n return 42\n\n assert (\n ujson.decode(\n ujson.encode(_TestObject(\"foo\"), default_handler=my_int_handler)\n )\n == 42\n )\n\n def my_obj_handler(_):\n return datetime.datetime(2013, 2, 3)\n\n assert ujson.decode(\n ujson.encode(datetime.datetime(2013, 2, 3))\n ) == ujson.decode(\n ujson.encode(_TestObject(\"foo\"), default_handler=my_obj_handler)\n )\n\n obj_list = [_TestObject(\"foo\"), _TestObject(\"bar\")]\n assert json.loads(json.dumps(obj_list, default=str)) == ujson.decode(\n ujson.encode(obj_list, default_handler=str)\n )\n\n\nclass TestNumpyJSONTests:\n @pytest.mark.parametrize(\"bool_input\", [True, False])\n def test_bool(self, bool_input):\n b = bool(bool_input)\n assert ujson.decode(ujson.encode(b)) == b\n\n def test_bool_array(self):\n bool_array = np.array(\n [True, False, True, True, False, True, False, False], dtype=bool\n )\n output = np.array(ujson.decode(ujson.encode(bool_array)), dtype=bool)\n tm.assert_numpy_array_equal(bool_array, output)\n\n def test_int(self, any_int_dtype):\n klass = np.dtype(any_int_dtype).type\n num = klass(1)\n\n assert klass(ujson.decode(ujson.encode(num))) == num\n\n def test_int_array(self, any_int_dtype):\n arr = np.arange(100, dtype=int)\n arr_input = arr.astype(any_int_dtype)\n\n arr_output = np.array(\n ujson.decode(ujson.encode(arr_input)), dtype=any_int_dtype\n )\n tm.assert_numpy_array_equal(arr_input, arr_output)\n\n def test_int_max(self, any_int_dtype):\n if any_int_dtype in (\"int64\", \"uint64\") and not IS64:\n pytest.skip(\"Cannot test 64-bit integer on 32-bit platform\")\n\n klass = np.dtype(any_int_dtype).type\n\n # uint64 max will always overflow,\n # as it's encoded to signed.\n if any_int_dtype == \"uint64\":\n num = np.iinfo(\"int64\").max\n else:\n num = np.iinfo(any_int_dtype).max\n\n assert klass(ujson.decode(ujson.encode(num))) == num\n\n def test_float(self, float_dtype):\n klass = np.dtype(float_dtype).type\n num = klass(256.2013)\n\n assert klass(ujson.decode(ujson.encode(num))) == num\n\n def test_float_array(self, float_dtype):\n arr = np.arange(12.5, 185.72, 1.7322, dtype=float)\n float_input = arr.astype(float_dtype)\n\n float_output = np.array(\n ujson.decode(ujson.encode(float_input, double_precision=15)),\n dtype=float_dtype,\n )\n tm.assert_almost_equal(float_input, float_output)\n\n def test_float_max(self, float_dtype):\n klass = np.dtype(float_dtype).type\n num = klass(np.finfo(float_dtype).max / 10)\n\n tm.assert_almost_equal(\n klass(ujson.decode(ujson.encode(num, double_precision=15))), num\n )\n\n def test_array_basic(self):\n arr = np.arange(96)\n arr = arr.reshape((2, 2, 2, 2, 3, 2))\n\n tm.assert_numpy_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)\n tm.assert_numpy_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)\n\n @pytest.mark.parametrize(\"shape\", [(10, 10), (5, 5, 4), (100, 1)])\n def test_array_reshaped(self, shape):\n arr = np.arange(100)\n arr = arr.reshape(shape)\n\n tm.assert_numpy_array_equal(np.array(ujson.decode(ujson.encode(arr))), arr)\n tm.assert_numpy_array_equal(ujson.decode(ujson.encode(arr), numpy=True), arr)\n\n def test_array_list(self):\n arr_list = [\n \"a\",\n list(),\n dict(),\n dict(),\n list(),\n 42,\n 97.8,\n [\"a\", \"b\"],\n {\"key\": \"val\"},\n ]\n arr = np.array(arr_list, dtype=object)\n result = np.array(ujson.decode(ujson.encode(arr)), dtype=object)\n tm.assert_numpy_array_equal(result, arr)\n\n def test_array_float(self):\n dtype = np.float32\n\n arr = np.arange(100.202, 200.202, 1, dtype=dtype)\n arr = arr.reshape((5, 5, 4))\n\n arr_out = np.array(ujson.decode(ujson.encode(arr)), dtype=dtype)\n tm.assert_almost_equal(arr, arr_out)\n\n arr_out = ujson.decode(ujson.encode(arr), numpy=True, dtype=dtype)\n tm.assert_almost_equal(arr, arr_out)\n\n def test_0d_array(self):\n # gh-18878\n msg = re.escape(\"array(1) (0d array) is not JSON serializable at the moment\")\n with pytest.raises(TypeError, match=msg):\n ujson.encode(np.array(1))\n\n @pytest.mark.parametrize(\n \"bad_input,exc_type,kwargs\",\n [\n ([{}, []], ValueError, {}),\n ([42, None], TypeError, {}),\n ([[\"a\"], 42], ValueError, {}),\n ([42, {}, \"a\"], TypeError, {}),\n ([42, [\"a\"], 42], ValueError, {}),\n ([\"a\", \"b\", [], \"c\"], ValueError, {}),\n ([{\"a\": \"b\"}], ValueError, dict(labelled=True)),\n ({\"a\": {\"b\": {\"c\": 42}}}, ValueError, dict(labelled=True)),\n ([{\"a\": 42, \"b\": 23}, {\"c\": 17}], ValueError, dict(labelled=True)),\n ],\n )\n def test_array_numpy_except(self, bad_input, exc_type, kwargs):\n with pytest.raises(exc_type):\n ujson.decode(ujson.dumps(bad_input), numpy=True, **kwargs)\n\n def test_array_numpy_labelled(self):\n labelled_input = {\"a\": []}\n output = ujson.loads(ujson.dumps(labelled_input), numpy=True, labelled=True)\n assert (np.empty((1, 0)) == output[0]).all()\n assert (np.array([\"a\"]) == output[1]).all()\n assert output[2] is None\n\n labelled_input = [{\"a\": 42}]\n output = ujson.loads(ujson.dumps(labelled_input), numpy=True, labelled=True)\n assert (np.array([\"a\"]) == output[2]).all()\n assert (np.array([42]) == output[0]).all()\n assert output[1] is None\n\n # see gh-10837: write out the dump explicitly\n # so there is no dependency on iteration order\n input_dumps = '[{\"a\": 42, \"b\":31}, {\"a\": 24, \"c\": 99}, {\"a\": 2.4, \"b\": 78}]'\n output = ujson.loads(input_dumps, numpy=True, labelled=True)\n expected_vals = np.array([42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))\n assert (expected_vals == output[0]).all()\n assert output[1] is None\n assert (np.array([\"a\", \"b\"]) == output[2]).all()\n\n input_dumps = (\n '{\"1\": {\"a\": 42, \"b\":31}, \"2\": {\"a\": 24, \"c\": 99}, '\n '\"3\": {\"a\": 2.4, \"b\": 78}}'\n )\n output = ujson.loads(input_dumps, numpy=True, labelled=True)\n expected_vals = np.array([42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))\n assert (expected_vals == output[0]).all()\n assert (np.array([\"1\", \"2\", \"3\"]) == output[1]).all()\n assert (np.array([\"a\", \"b\"]) == output[2]).all()\n\n\nclass TestPandasJSONTests:\n def test_dataframe(self, orient, numpy):\n if orient == \"records\" and numpy:\n pytest.skip(\"Not idiomatic pandas\")\n\n dtype = get_int32_compat_dtype(numpy, orient)\n\n df = DataFrame(\n [[1, 2, 3], [4, 5, 6]],\n index=[\"a\", \"b\"],\n columns=[\"x\", \"y\", \"z\"],\n dtype=dtype,\n )\n encode_kwargs = {} if orient is None else dict(orient=orient)\n decode_kwargs = {} if numpy is None else dict(numpy=numpy)\n assert (df.dtypes == dtype).all()\n\n output = ujson.decode(ujson.encode(df, **encode_kwargs), **decode_kwargs)\n assert (df.dtypes == dtype).all()\n\n # Ensure proper DataFrame initialization.\n if orient == \"split\":\n dec = _clean_dict(output)\n output = DataFrame(**dec)\n else:\n output = DataFrame(output)\n\n # Corrections to enable DataFrame comparison.\n if orient == \"values\":\n df.columns = [0, 1, 2]\n df.index = [0, 1]\n elif orient == \"records\":\n df.index = [0, 1]\n elif orient == \"index\":\n df = df.transpose()\n\n assert (df.dtypes == dtype).all()\n tm.assert_frame_equal(output, df)\n\n def test_dataframe_nested(self, orient):\n df = DataFrame(\n [[1, 2, 3], [4, 5, 6]], index=[\"a\", \"b\"], columns=[\"x\", \"y\", \"z\"]\n )\n\n nested = {\"df1\": df, \"df2\": df.copy()}\n kwargs = {} if orient is None else dict(orient=orient)\n\n exp = {\n \"df1\": ujson.decode(ujson.encode(df, **kwargs)),\n \"df2\": ujson.decode(ujson.encode(df, **kwargs)),\n }\n assert ujson.decode(ujson.encode(nested, **kwargs)) == exp\n\n def test_dataframe_numpy_labelled(self, orient):\n if orient in (\"split\", \"values\"):\n pytest.skip(\"Incompatible with labelled=True\")\n\n df = DataFrame(\n [[1, 2, 3], [4, 5, 6]],\n index=[\"a\", \"b\"],\n columns=[\"x\", \"y\", \"z\"],\n dtype=int,\n )\n kwargs = {} if orient is None else dict(orient=orient)\n\n output = DataFrame(\n *ujson.decode(ujson.encode(df, **kwargs), numpy=True, labelled=True)\n )\n\n if orient is None:\n df = df.T\n elif orient == \"records\":\n df.index = [0, 1]\n\n tm.assert_frame_equal(output, df)\n\n def test_series(self, orient, numpy):\n dtype = get_int32_compat_dtype(numpy, orient)\n s = Series(\n [10, 20, 30, 40, 50, 60],\n name=\"series\",\n index=[6, 7, 8, 9, 10, 15],\n dtype=dtype,\n ).sort_values()\n assert s.dtype == dtype\n\n encode_kwargs = {} if orient is None else dict(orient=orient)\n decode_kwargs = {} if numpy is None else dict(numpy=numpy)\n\n output = ujson.decode(ujson.encode(s, **encode_kwargs), **decode_kwargs)\n assert s.dtype == dtype\n\n if orient == \"split\":\n dec = _clean_dict(output)\n output = Series(**dec)\n else:\n output = Series(output)\n\n if orient in (None, \"index\"):\n s.name = None\n output = output.sort_values()\n s.index = [\"6\", \"7\", \"8\", \"9\", \"10\", \"15\"]\n elif orient in (\"records\", \"values\"):\n s.name = None\n s.index = [0, 1, 2, 3, 4, 5]\n\n assert s.dtype == dtype\n tm.assert_series_equal(output, s)\n\n def test_series_nested(self, orient):\n s = Series(\n [10, 20, 30, 40, 50, 60], name=\"series\", index=[6, 7, 8, 9, 10, 15]\n ).sort_values()\n nested = {\"s1\": s, \"s2\": s.copy()}\n kwargs = {} if orient is None else dict(orient=orient)\n\n exp = {\n \"s1\": ujson.decode(ujson.encode(s, **kwargs)),\n \"s2\": ujson.decode(ujson.encode(s, **kwargs)),\n }\n assert ujson.decode(ujson.encode(nested, **kwargs)) == exp\n\n def test_index(self):\n i = Index([23, 45, 18, 98, 43, 11], name=\"index\")\n\n # Column indexed.\n output = Index(ujson.decode(ujson.encode(i)), name=\"index\")\n tm.assert_index_equal(i, output)\n\n output = Index(ujson.decode(ujson.encode(i), numpy=True), name=\"index\")\n tm.assert_index_equal(i, output)\n\n dec = _clean_dict(ujson.decode(ujson.encode(i, orient=\"split\")))\n output = Index(**dec)\n\n tm.assert_index_equal(i, output)\n assert i.name == output.name\n\n dec = _clean_dict(ujson.decode(ujson.encode(i, orient=\"split\"), numpy=True))\n output = Index(**dec)\n\n tm.assert_index_equal(i, output)\n assert i.name == output.name\n\n output = Index(ujson.decode(ujson.encode(i, orient=\"values\")), name=\"index\")\n tm.assert_index_equal(i, output)\n\n output = Index(\n ujson.decode(ujson.encode(i, orient=\"values\"), numpy=True), name=\"index\"\n )\n tm.assert_index_equal(i, output)\n\n output = Index(ujson.decode(ujson.encode(i, orient=\"records\")), name=\"index\")\n tm.assert_index_equal(i, output)\n\n output = Index(\n ujson.decode(ujson.encode(i, orient=\"records\"), numpy=True), name=\"index\"\n )\n tm.assert_index_equal(i, output)\n\n output = Index(ujson.decode(ujson.encode(i, orient=\"index\")), name=\"index\")\n tm.assert_index_equal(i, output)\n\n output = Index(\n ujson.decode(ujson.encode(i, orient=\"index\"), numpy=True), name=\"index\"\n )\n tm.assert_index_equal(i, output)\n\n def test_datetime_index(self):\n date_unit = \"ns\"\n\n # freq doesnt round-trip\n rng = DatetimeIndex(list(date_range(\"1/1/2000\", periods=20)), freq=None)\n encoded = ujson.encode(rng, date_unit=date_unit)\n\n decoded = DatetimeIndex(np.array(ujson.decode(encoded)))\n tm.assert_index_equal(rng, decoded)\n\n ts = Series(np.random.randn(len(rng)), index=rng)\n decoded = Series(ujson.decode(ujson.encode(ts, date_unit=date_unit)))\n\n idx_values = decoded.index.values.astype(np.int64)\n decoded.index = DatetimeIndex(idx_values)\n tm.assert_series_equal(ts, decoded)\n\n @pytest.mark.parametrize(\n \"invalid_arr\",\n [\n \"[31337,]\", # Trailing comma.\n \"[,31337]\", # Leading comma.\n \"[]]\", # Unmatched bracket.\n \"[,]\", # Only comma.\n ],\n )\n def test_decode_invalid_array(self, invalid_arr):\n with pytest.raises(ValueError):\n ujson.decode(invalid_arr)\n\n @pytest.mark.parametrize(\"arr\", [[], [31337]])\n def test_decode_array(self, arr):\n assert arr == ujson.decode(str(arr))\n\n @pytest.mark.parametrize(\"extreme_num\", [9223372036854775807, -9223372036854775808])\n def test_decode_extreme_numbers(self, extreme_num):\n assert extreme_num == ujson.decode(str(extreme_num))\n\n @pytest.mark.parametrize(\n \"too_extreme_num\", [\"9223372036854775808\", \"-90223372036854775809\"]\n )\n def test_decode_too_extreme_numbers(self, too_extreme_num):\n with pytest.raises(ValueError):\n ujson.decode(too_extreme_num)\n\n def test_decode_with_trailing_whitespaces(self):\n assert {} == ujson.decode(\"{}\\n\\t \")\n\n def test_decode_with_trailing_non_whitespaces(self):\n with pytest.raises(ValueError):\n ujson.decode(\"{}\\n\\t a\")\n\n def test_decode_array_with_big_int(self):\n with pytest.raises(ValueError):\n ujson.loads(\"[18446098363113800555]\")\n\n @pytest.mark.parametrize(\n \"float_number\",\n [\n 1.1234567893,\n 1.234567893,\n 1.34567893,\n 1.4567893,\n 1.567893,\n 1.67893,\n 1.7893,\n 1.893,\n 1.3,\n ],\n )\n @pytest.mark.parametrize(\"sign\", [-1, 1])\n def test_decode_floating_point(self, sign, float_number):\n float_number *= sign\n tm.assert_almost_equal(float_number, ujson.loads(str(float_number)), rtol=1e-15)\n\n def test_encode_big_set(self):\n s = set()\n\n for x in range(0, 100000):\n s.add(x)\n\n # Make sure no Exception is raised.\n ujson.encode(s)\n\n def test_encode_empty_set(self):\n assert \"[]\" == ujson.encode(set())\n\n def test_encode_set(self):\n s = {1, 2, 3, 4, 5, 6, 7, 8, 9}\n enc = ujson.encode(s)\n dec = ujson.decode(enc)\n\n for v in dec:\n assert v in s\n\n @pytest.mark.parametrize(\n \"td\",\n [\n Timedelta(days=366),\n Timedelta(days=-1),\n Timedelta(hours=13, minutes=5, seconds=5),\n Timedelta(hours=13, minutes=20, seconds=30),\n Timedelta(days=-1, nanoseconds=5),\n Timedelta(nanoseconds=1),\n Timedelta(microseconds=1, nanoseconds=1),\n Timedelta(milliseconds=1, microseconds=1, nanoseconds=1),\n Timedelta(milliseconds=999, microseconds=999, nanoseconds=999),\n ],\n )\n def test_encode_timedelta_iso(self, td):\n # GH 28256\n result = ujson.encode(td, iso_dates=True)\n expected = f'\"{td.isoformat()}\"'\n\n assert result == expected\n" ]
[ [ "pandas.Series", "numpy.linspace", "pandas.DataFrame", "pandas.tests.indexing.common._mklbl", "numpy.random.randn", "pandas._testing.assert_frame_equal", "numpy.random.randint", "pandas.core.indexing.maybe_numeric_slice", "numpy.arange", "pandas.Index", "pandas._testing.makeCustomDataframe", "pandas.core.arrays.integer_array", "pandas.core.dtypes.common.is_float_dtype", "pandas._testing.assert_series_equal", "numpy.zeros", "pandas._testing.assert_index_equal", "pandas.core.dtypes.common.is_integer_dtype", "pandas._testing.assert_produces_warning", "pandas.Categorical", "pandas.array", "pandas.Float64Index", "pandas.core.indexing.non_reducing_slice", "numpy.random.rand", "numpy.array", "numpy.random.random", "pandas.Timestamp" ], [ "numpy.array", "pandas._testing.assert_frame_equal", "numpy.iinfo", "pandas.DataFrame" ], [ "pandas.timedelta_range", "numpy.arange", "pandas.factorize", "pandas.array", "numpy.concatenate", "numpy.random.randn", "pandas._libs.lib.maybe_convert_objects", "pandas.date_range", "numpy.array", "pandas.core.algorithms.make_duplicates_of_left_unique_in_right", "numpy.random.randint" ], [ "pandas._testing.assert_almost_equal", "pandas.Series", "pandas.DataFrame", "numpy.dtype", "pandas._libs.json.loads", "numpy.iinfo", "pandas._testing.assert_frame_equal", "pandas._testing.assert_numpy_array_equal", "numpy.arange", "pandas.Index", "pandas.DatetimeIndex", "numpy.finfo", "pandas._libs.tslib.Timestamp", "pandas._testing.assert_series_equal", "pandas._testing.assert_index_equal", "pandas._libs.json.encode", "pandas._libs.json.dumps", "pandas.compat.is_platform_windows", "pandas.Timedelta", "pandas._testing.can_set_locale", "pandas.date_range", "numpy.array", "pandas._libs.json.decode", "numpy.datetime64", "pandas._testing.set_locale", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jinhan814/PyTorch-GAN-Study
[ "c63ed1bbcbc663d3267671d8ded4ed13c766b738" ]
[ "PGGAN/hyun_experiments.py" ]
[ "import torch\n## 실험용 입니다\n\n# x = torch.randint(10,size=(1,4,2,2))\n# print(x)\n# print(x.size())\n\n# factor =2\n# s = x.size()\n# x = x.view(-1, s[1], s[2], 1, s[3], 1) # (-1, 4, 2, 1, 2, 1)\n# print(x.size())\n# # print(x)\n# x = x.expand(-1, s[1], s[2], factor, s[3], factor) # (-1, 4,2,2,2,2)\n# print(x.size())\n# # print(x)\n# x = x.contiguous().view(-1, s[1], s[2] * factor, s[3] * factor)\n# # x = x.view(-1, s[1], s[2] * factor, s[3] * factor)\n# print(x.size())\n# # print(x)\n\n# x = torch.rand(,4,2,2)\n# subGroupSize = 4\n\n# size = x.size()\n# subGroupSize = min(size[0], subGroupSize)\n# if size[0] % subGroupSize != 0:\n# subGroupSize = size[0]\n# G = int(size[0] / subGroupSize)\n\n# print(subGroupSize,G)\n# print(x)\n# if subGroupSize > 1:\n# y = x.view(-1, subGroupSize, size[1], size[2], size[3])\n# print(y)\n# y = torch.var(y, 1)\n# print(y)\n# y = torch.sqrt(y + 1e-8)\n# print(y)\n# y = y.view(G, -1)\n# print(y)\n# y = torch.mean(y, 1).view(G, 1)\n# print(y)\n# y = y.expand(G, size[2]*size[3]).view((G, 1, 1, size[2], size[3]))\n# print(y)\n# y = y.expand(G, subGroupSize, -1, -1, -1)\n# print(y)\n# y = y.contiguous().view((-1, 1, size[2], size[3]))\n# else:\n# y = torch.zeros(x.size(0), 1, x.size(2), x.size(3), device=x.device)\n#\n\nimport torch\nimport torchvision\nimport cv2\nx = torch.randint(10,size=(8,8,3))\nx= torch.transpose(x,(2,0,1))\nprint(x.size())\nx = torchvision.transforms.Resize((4,4))(x)\nx = torch.transpose(x,(1,2,0))\nprint(x.size())" ]
[ [ "torch.transpose", "torch.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nxdao2000/probability
[ "33d2bc1cb0e7b6284579ea7f3692b9d056e0d700", "33d2bc1cb0e7b6284579ea7f3692b9d056e0d700", "33d2bc1cb0e7b6284579ea7f3692b9d056e0d700", "33d2bc1cb0e7b6284579ea7f3692b9d056e0d700", "33d2bc1cb0e7b6284579ea7f3692b9d056e0d700", "33d2bc1cb0e7b6284579ea7f3692b9d056e0d700", "33d2bc1cb0e7b6284579ea7f3692b9d056e0d700", "33d2bc1cb0e7b6284579ea7f3692b9d056e0d700", "33d2bc1cb0e7b6284579ea7f3692b9d056e0d700" ]
[ "tensorflow_probability/python/positive_semidefinite_kernels/positive_semidefinite_kernel.py", "tensorflow_probability/python/internal/backend/numpy/nn.py", "tensorflow_probability/python/internal/auto_batching/numpy_backend_test.py", "tensorflow_probability/python/glm/fisher_scoring.py", "tensorflow_probability/python/distributions/gaussian_process_test.py", "tensorflow_probability/python/bijectors/iterated_sigmoid_centered_test.py", "tensorflow_probability/python/distributions/onehot_categorical.py", "tensorflow_probability/python/distributions/onehot_categorical_test.py", "tensorflow_probability/python/internal/auto_batching/tf_backend.py" ]
[ "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"PositiveSemidefiniteKernel base.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport contextlib\nimport functools\nimport operator\nimport six\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.positive_semidefinite_kernels.internal import util\n\n\n__all__ = [\n 'PositiveSemidefiniteKernel',\n]\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass PositiveSemidefiniteKernel(tf.Module):\n \"\"\"Abstract base class for positive semi-definite kernel functions.\n\n #### Background\n\n For any set `S`, a real- (or complex-valued) function `k` on the Cartesian\n product `S x S` is called positive semi-definite if we have\n\n ```none\n sum_i sum_j (c[i]*) c[j] k(x[i], x[j]) >= 0\n ```\n\n for any finite collections `{x[1], ..., x[N]}` in S and `{c[1], ..., c[N]}` in\n the reals (or the complex plane). '*' denotes the complex conjugate, in the\n complex case.\n\n Some examples:\n - `S` is R, and `k(s, t) = (s - a) (t - b)`, where a, b are in R. This\n corresponds to a linear kernel.\n - `S` is R^+ U {0}, and `k(s, t) = min(s, t)`. This corresponds to a kernel\n for a Wiener process.\n - `S` is the set of strings over an alphabet `A = {c1, ... cC}`, and\n `k(s, t)` is defined via some similarity metric over strings.\n\n We model positive semi-definite functions (*kernels*, in common machine\n learning parlance) as classes with 3 primary public methods: `apply`,\n `matrix`, and `tensor`.\n\n `apply` computes the value of the kernel function at a pair of (batches of)\n input locations. It is the more \"low-level\" operation: `matrix` and `tensor`\n are implemented in terms of `apply`.\n\n `matrix` computes the value of the kernel *pairwise* on two (batches of)\n lists of input examples. When the two collections are the same the result is\n called the Gram (or Gramian) matrix\n (https://en.wikipedia.org/wiki/Gramian_matrix).\n\n `tensor` generalizes `matrix`, taking rank `k1` and `k2` collections of\n input examples to a rank `k1 + k2` collection of kernel values.\n\n #### Kernel Parameter Shape Semantics\n\n PositiveSemidefiniteKernel implementations support batching of kernel\n parameters and broadcasting of these parameters across batches of inputs. This\n allows, for example, creating a single kernel object which acts like a\n collection of kernels with different parameters. This might be useful for,\n e.g., for exploring multiple random initializations in parallel during a\n kernel parameter optimization procedure.\n\n The interaction between kernel parameter shapes and input shapes (see below)\n is somewhat subtle. The semantics are designed to make the most common use\n cases easy, while not ruling out more intricate control. The overarching\n principle is that kernel parameter batch shapes must be broadcastable with\n input batch shapes (see below). Examples are provided in the method-level\n documentation.\n\n #### Input Shape Semantics\n\n PositiveSemidefiniteKernel methods each support a notion of batching inputs;\n see the method-level documentation for full details; here we describe the\n overall semantics of input shapes. Inputs to PositiveSemidefiniteKernel\n methods partition into 3 pieces:\n\n ```none\n [b1, ..., bB, e1, ..., eE, f1, ..., fF]\n '----------' '---------' '---------'\n | | '-- Feature dimensions\n | '-- Example dimensions\n '-- Batch dimensions\n ```\n\n - Feature dimensions correspond to the space over which the kernel is defined;\n in typical applications inputs are vectors and this part of the shape is\n rank-1. For example, if our kernel is defined over R^2 x R^2, each input is\n a 2-D vector (a rank-1 tensor of shape `[2,]`) so that\n `F = 1, [f1, ..., fF] = [2]`. If we defined a kernel over DxD matrices, its\n domain would be R^(DxD) x R^(DxD), we would have `F = 2` and\n `[f1, ..., fF] = [D, D]`. Feature shapes of inputs should be the same, but\n no exception will be raised unless they are broadcast-incompatible.\n - Batch dimensions describe collections of inputs which in some sense have\n nothing to do with each other, but may be coupled to batches of kernel\n parameters. It's required that batch dimensions of inputs broadcast with\n each other, and with the kernel's overall batch shape.\n - Example dimensions are shape elements which represent a collection of inputs\n that in some sense \"go together\" (whereas batches are \"independent\"). The\n exact semantics are different for the `apply`, `matrix` and `tensor` methods\n (see method-level doc strings for more details). `apply` combines examples\n together pairwise, much like the python built-in `zip`. `matrix` combines\n examples pairwise for *all* pairs of elements from two rank-1 input\n collections (lists), ie, it applies the kernel to all elements in the\n cross-product of two lists of examples. `tensor` further generalizes\n `matrix` to higher rank collections of inputs. Only `matrix` strictly\n requires example dimensions to be present (and to be exactly rank 1),\n although the typical usage of `apply` (eg, building a matrix diagonal) will\n also have `example_ndims` 1.\n\n ##### Examples\n\n ```python\n import tensorflow_probability as tfp\n\n # Suppose `SomeKernel` acts on vectors (rank-1 tensors), ie number of\n # feature dimensions is 1.\n scalar_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=.5)\n scalar_kernel.batch_shape\n # ==> []\n\n # `x` and `y` are batches of five 3-D vectors:\n x = np.ones([5, 3], np.float32)\n y = np.ones([5, 3], np.float32)\n scalar_kernel.apply(x, y).shape\n # ==> [5]\n\n scalar_kernel.matrix(x, y).shape\n # ==> [5, 5]\n ```\n\n Now we can consider a kernel with batched parameters:\n\n ```python\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=[.2, .5])\n batch_kernel.batch_shape\n # ==> [2]\n\n # `x` and `y` are batches of five 3-D vectors:\n x = np.ones([5, 3], np.float32)\n y = np.ones([5, 3], np.float32)\n\n batch_kernel.apply(x, y).shape\n # ==> Error! [2] and [5] can't broadcast.\n # We could solve this by telling `apply` to treat the 5 as an example dim:\n\n batch_kernel.apply(x, y, example_ndims=1).shape\n # ==> [2, 5]\n\n # Note that example_ndims is implicitly 1 for a call to `matrix`, so the\n # following just works:\n batch_kernel.matrix(x, y).shape\n # ==> [2, 5, 5]\n ```\n\n \"\"\"\n\n def __init__(self, feature_ndims, dtype=None, name=None):\n \"\"\"Construct a PositiveSemidefiniteKernel (subclass) instance.\n\n Args:\n feature_ndims: Python `integer` indicating the number of dims (the rank)\n of the feature space this kernel acts on.\n dtype: `DType` on which this kernel operates.\n name: Python `str` name prefixed to Ops created by this class. Default:\n subclass name.\n\n Raises:\n ValueError: if `feature_ndims` is not an integer greater than 0\n Inputs to PositiveSemidefiniteKernel methods partition into 3 pieces:\n\n ```none\n [b1, ..., bB, e1, ..., eE, f1, ..., fF]\n '----------' '---------' '---------'\n | | '-- Feature dimensions\n | '-- Example dimensions\n '-- Batch dimensions\n ```\n\n The `feature_ndims` argument declares how many of the right-most shape\n dimensions belong to the feature dimensions. This enables us to predict\n which shape dimensions will be 'reduced' away during kernel computation.\n \"\"\"\n if not (isinstance(feature_ndims, int) and feature_ndims > 0):\n raise ValueError(\n '`feature_ndims` must be a Python `integer` greater than zero. ' +\n 'Got: {}'.format(feature_ndims))\n self._feature_ndims = feature_ndims\n self._dtype = dtype\n if not name or name[-1] != '/': # `name` is not a name scope\n name = tf.name_scope(name or type(self).__name__).name\n self._name = name\n\n @property\n def feature_ndims(self):\n \"\"\"The number of feature dimensions.\n\n Kernel functions generally act on pairs of inputs from some space like\n\n ```none\n R^(d1 x ... x dD)\n ```\n\n or, in words: rank-`D` real-valued tensors of shape `[d1, ..., dD]`. Inputs\n can be vectors in some `R^N`, but are not restricted to be. Indeed, one\n might consider kernels over matrices, tensors, or even more general spaces,\n like strings or graphs.\n\n Returns:\n The number of feature dimensions (feature rank) of this kernel.\n \"\"\"\n return self._feature_ndims\n\n @property\n def dtype(self):\n \"\"\"DType over which the kernel operates.\"\"\"\n return self._dtype\n\n @property\n def name(self):\n \"\"\"Name prepended to all ops created by this class.\"\"\"\n return self._name\n\n @property\n def batch_shape(self):\n \"\"\"The batch_shape property of a PositiveSemidefiniteKernel.\n\n This property describes the fully broadcast shape of all kernel parameters.\n For example, consider an ExponentiatedQuadratic kernel, which is\n parameterized by an amplitude and length_scale:\n\n ```none\n exp_quad(x, x') := amplitude * exp(||x - x'||**2 / length_scale**2)\n ```\n\n The batch_shape of such a kernel is derived from broadcasting the shapes of\n `amplitude` and `length_scale`. E.g., if their shapes were\n\n ```python\n amplitude.shape = [2, 1, 1]\n length_scale.shape = [1, 4, 3]\n ```\n\n then `exp_quad`'s batch_shape would be `[2, 4, 3]`.\n\n Note that this property defers to the private _batch_shape method, which\n concrete implementation sub-classes are obliged to provide.\n\n Returns:\n `TensorShape` instance describing the fully broadcast shape of all\n kernel parameters.\n \"\"\"\n return self._batch_shape()\n\n def batch_shape_tensor(self):\n \"\"\"The batch_shape property of a PositiveSemidefiniteKernel as a `Tensor`.\n\n Returns:\n `Tensor` which evaluates to a vector of integers which are the\n fully-broadcast shapes of the kernel parameters.\n \"\"\"\n with tf.name_scope(self._name):\n if self.batch_shape.is_fully_defined():\n return tf.convert_to_tensor(\n self.batch_shape.as_list(), dtype=tf.int32, name='batch_shape')\n with tf.name_scope('batch_shape_tensor'):\n return self._batch_shape_tensor()\n\n @contextlib.contextmanager\n def _name_scope(self, name=None, values=None):\n \"\"\"Helper function to standardize op scope.\"\"\"\n with tf.name_scope(self.name):\n values = [] if values is None else values\n with tf.name_scope(name) as scope:\n yield scope\n\n def apply(self, x1, x2, example_ndims=0):\n \"\"\"Apply the kernel function pairs of inputs.\n\n Args:\n x1: `Tensor` input to the kernel, of shape `B1 + E1 + F`, where `B1` and\n `E1` may be empty (ie, no batch/example dims, resp.) and `F` (the\n feature shape) must have rank equal to the kernel's `feature_ndims`\n property. Batch shape must broadcast with the batch shape of `x2` and\n with the kernel's batch shape. Example shape must broadcast with example\n shape of `x2`. `x1` and `x2` must have the same *number* of example dims\n (ie, same rank).\n x2: `Tensor` input to the kernel, of shape `B2 + E2 + F`, where `B2` and\n `E2` may be empty (ie, no batch/example dims, resp.) and `F` (the\n feature shape) must have rank equal to the kernel's `feature_ndims`\n property. Batch shape must broadcast with the batch shape of `x2` and\n with the kernel's batch shape. Example shape must broadcast with example\n shape of `x2`. `x1` and `x2` must have the same *number* of example\n example_ndims: A python integer, the number of example dims in the inputs.\n In essence, this parameter controls how broadcasting of the kernel's\n batch shape with input batch shapes works. The kernel batch shape will\n be broadcast against everything to the left of the combined example and\n feature dimensions in the input shapes.\n\n Returns:\n `Tensor` containing the results of applying the kernel function to inputs\n `x1` and `x2`. If the kernel parameters' batch shape is `Bk` then the\n shape of the `Tensor` resulting from this method call is\n `broadcast(Bk, B1, B2) + broadcast(E1, E2)`.\n\n Given an index set `S`, a kernel function is mathematically defined as a\n real- or complex-valued function on `S` satisfying the\n positive semi-definiteness constraint:\n\n ```none\n sum_i sum_j (c[i]*) c[j] k(x[i], x[j]) >= 0\n ```\n\n for any finite collections `{x[1], ..., x[N]}` in `S` and\n `{c[1], ..., c[N]}` in the reals (or the complex plane). '*' is the complex\n conjugate, in the complex case.\n\n This method most closely resembles the function described in the\n mathematical definition of a kernel. Given a PositiveSemidefiniteKernel `k`\n with scalar parameters and inputs `x` and `y` in `S`, `apply(x, y)` yields a\n single scalar value.\n\n #### Examples\n\n ```python\n import tensorflow_probability as tfp\n\n # Suppose `SomeKernel` acts on vectors (rank-1 tensors)\n scalar_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=.5)\n scalar_kernel.batch_shape\n # ==> []\n\n # `x` and `y` are batches of five 3-D vectors:\n x = np.ones([5, 3], np.float32)\n y = np.ones([5, 3], np.float32)\n scalar_kernel.apply(x, y).shape\n # ==> [5]\n ```\n\n The above output is the result of vectorized computation of the five values\n\n ```none\n [k(x[0], y[0]), k(x[1], y[1]), ..., k(x[4], y[4])]\n ```\n\n Now we can consider a kernel with batched parameters:\n\n ```python\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=[.2, .5])\n batch_kernel.batch_shape\n # ==> [2]\n batch_kernel.apply(x, y).shape\n # ==> Error! [2] and [5] can't broadcast.\n ```\n\n The parameter batch shape of `[2]` and the input batch shape of `[5]` can't\n be broadcast together. We can fix this in either of two ways:\n\n 1. Give the parameter a shape of `[2, 1]` which will correctly\n broadcast with `[5]` to yield `[2, 5]`:\n\n ```python\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(\n param=[[.2], [.5]])\n batch_kernel.batch_shape\n # ==> [2, 1]\n batch_kernel.apply(x, y).shape\n # ==> [2, 5]\n ```\n\n 2. By specifying `example_ndims`, which tells the kernel to treat the `5`\n in the input shape as part of the \"example shape\", and \"pushing\" the\n kernel batch shape to the left:\n\n ```python\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=[.2, .5])\n batch_kernel.batch_shape\n # ==> [2]\n batch_kernel.apply(x, y, example_ndims=1).shape\n # ==> [2, 5]\n\n \"\"\"\n with self._name_scope(self._name, values=[x1, x2]):\n x1 = tf.convert_to_tensor(x1, name='x1')\n x2 = tf.convert_to_tensor(x2, name='x2')\n\n should_expand_dims = (example_ndims == 0)\n\n if should_expand_dims:\n example_ndims += 1\n x1 = tf.expand_dims(x1, -(self.feature_ndims + 1))\n x2 = tf.expand_dims(x2, -(self.feature_ndims + 1))\n\n result = self._apply(x1, x2, example_ndims=example_ndims)\n\n if should_expand_dims:\n result = tf.squeeze(result, axis=-1)\n\n return result\n\n def _apply(self, x1, x2, example_ndims=1):\n \"\"\"Apply the kernel function to a pair of (batches of) inputs.\n\n Subclasses must implement this method. It will always be called with\n example_ndims >= 1. Implementations should take care to respect\n example_ndims, by padding parameters on the right with 1's example_ndims\n times. See tests and existing subclasses for examples.\n\n Args:\n x1: `Tensor` input to the first positional parameter of the kernel, of\n shape `B1 + E1 + F`, where `B1` may be empty (ie, no batch dims, resp.),\n `E1` is a shape of rank at least 1, and `F` (the feature shape) must\n have rank equal to the kernel's `feature_ndims` property. Batch shape\n must broadcast with the batch shape of `x2` and with the kernel's batch\n shape. Example shape must broadcast with example shape of `x2` (They\n don't strictly need to be equal, e.g., when `apply` is called from\n `matrix`, `x1` and `x2` each have 1's in opposing positions in their\n example shapes). `x1` and `x2` must have the same *number* of example\n dims (ie, same rank).\n x2: `Tensor` input to the second positional parameter of the kernel,\n shape `B2 + E2 + F`, where `B2` may be empty (ie, no batch dims, resp.),\n `E2` is a shape of rank at least 1, and `F` (the feature shape) must\n have rank equal to the kernel's `feature_ndims` property. Batch shape\n must broadcast with the batch shape of `x1` and with the kernel's batch\n shape. Example shape must broadcast with example shape of `x1` (They\n don't strictly need to be equal, e.g., when `apply` is called from\n `matrix`, `x1` and `x2` each have 1's in opposing positions in their\n example shapes). `x1` and `x2` must have the same *number* of example\n dims (ie, same rank).\n example_ndims: A python integer greater than or equal to 1, the number of\n example dims in the inputs. In essence, this parameter controls how\n broadcasting of the kernel's batch shape with input batch shapes works.\n The kernel batch shape will be broadcast against everything to the left\n of the combined example and feature dimensions in the input shapes.\n\n Returns:\n `Tensor` containing the results of applying the kernel function to inputs\n `x1` and `x2`. If the kernel parameters' batch shape is `Bk` then the\n shape of the `Tensor` resulting from this method call is\n `broadcast(Bk, B1, B2) + broadcast(E1, E2)`.\n \"\"\"\n raise NotImplementedError(\n 'Subclasses must provide `_apply` implementation.')\n\n def matrix(self, x1, x2):\n \"\"\"Construct (batched) matrices from (batches of) collections of inputs.\n\n Args:\n x1: `Tensor` input to the first positional parameter of the kernel, of\n shape `B1 + [e1] + F`, where `B1` may be empty (ie, no batch dims,\n resp.), `e1` is a single integer (ie, `x1` has example ndims exactly 1),\n and `F` (the feature shape) must have rank equal to the kernel's\n `feature_ndims` property. Batch shape must broadcast with the batch\n shape of `x2` and with the kernel's batch shape.\n x2: `Tensor` input to the second positional parameter of the kernel,\n shape `B2 + [e2] + F`, where `B2` may be empty (ie, no batch dims,\n resp.), `e2` is a single integer (ie, `x2` has example ndims exactly 1),\n and `F` (the feature shape) must have rank equal to the kernel's\n `feature_ndims` property. Batch shape must broadcast with the batch\n shape of `x1` and with the kernel's batch shape.\n\n Returns:\n `Tensor` containing the matrix (possibly batched) of kernel applications\n to pairs from inputs `x1` and `x2`. If the kernel parameters' batch shape\n is `Bk` then the shape of the `Tensor` resulting from this method call is\n `broadcast(Bk, B1, B2) + [e1, e2]` (note this differs from `apply`: the\n example dimensions are concatenated, whereas in `apply` the example dims\n are broadcast together).\n\n Given inputs `x1` and `x2` of shapes\n\n ```none\n [b1, ..., bB, e1, f1, ..., fF]\n ```\n\n and\n\n ```none\n [c1, ..., cC, e2, f1, ..., fF]\n ```\n\n This method computes the batch of `e1 x e2` matrices resulting from applying\n the kernel function to all pairs of inputs from `x1` and `x2`. The shape\n of the batch of matrices is the result of broadcasting the batch shapes of\n `x1`, `x2`, and the kernel parameters (see examples below). As such, it's\n required that these shapes all be broadcast compatible. However, the kernel\n parameter batch shapes need not broadcast against the 'example shapes' (`e1`\n and `e2` above).\n\n When the two inputs are the (batches of) identical collections, the\n resulting matrix is the so-called Gram (or Gramian) matrix\n (https://en.wikipedia.org/wiki/Gramian_matrix).\n\n #### Examples\n\n First, consider a kernel with a single scalar parameter.\n\n ```python\n import tensorflow_probability as tfp\n\n scalar_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=.5)\n scalar_kernel.batch_shape\n # ==> []\n\n # Our inputs are two lists of 3-D vectors\n x = np.ones([5, 3], np.float32)\n y = np.ones([4, 3], np.float32)\n scalar_kernel.matrix(x, y).shape\n # ==> [5, 4]\n ```\n\n The result comes from applying the kernel to the entries in `x` and `y`\n pairwise, across all pairs:\n\n ```none\n | k(x[0], y[0]) k(x[0], y[1]) ... k(x[0], y[3]) |\n | k(x[1], y[0]) k(x[1], y[1]) ... k(x[1], y[3]) |\n | ... ... ... |\n | k(x[4], y[0]) k(x[4], y[1]) ... k(x[4], y[3]) |\n ```\n\n Now consider a kernel with batched parameters with the same inputs\n\n ```python\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=[1., .5])\n batch_kernel.batch_shape\n # ==> [2]\n\n batch_kernel.matrix(x, y).shape\n # ==> [2, 5, 4]\n ```\n\n This results in a batch of 2 matrices, one computed from the kernel with\n `param = 1.` and the other with `param = .5`.\n\n We also support batching of the inputs. First, let's look at that with\n the scalar kernel again.\n\n ```python\n # Batch of 10 lists of 5 vectors of dimension 3\n x = np.ones([10, 5, 3], np.float32)\n\n # Batch of 10 lists of 4 vectors of dimension 3\n y = np.ones([10, 4, 3], np.float32)\n\n scalar_kernel.matrix(x, y).shape\n # ==> [10, 5, 4]\n ```\n\n The result is a batch of 10 matrices built from the batch of 10 lists of\n input vectors. These batch shapes have to be broadcastable. The following\n will *not* work:\n\n ```python\n x = np.ones([10, 5, 3], np.float32)\n y = np.ones([20, 4, 3], np.float32)\n scalar_kernel.matrix(x, y).shape\n # ==> Error! [10] and [20] can't broadcast.\n ```\n\n Now let's consider batches of inputs in conjunction with batches of kernel\n parameters. We require that the input batch shapes be broadcastable with\n the kernel parameter batch shapes, otherwise we get an error:\n\n ```python\n x = np.ones([10, 5, 3], np.float32)\n y = np.ones([10, 4, 3], np.float32)\n\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(params=[1., .5])\n batch_kernel.batch_shape\n # ==> [2]\n batch_kernel.matrix(x, y).shape\n # ==> Error! [2] and [10] can't broadcast.\n ```\n\n The fix is to make the kernel parameter shape broadcastable with `[10]` (or\n reshape the inputs to be broadcastable!):\n\n ```python\n x = np.ones([10, 5, 3], np.float32)\n y = np.ones([10, 4, 3], np.float32)\n\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(\n params=[[1.], [.5]])\n batch_kernel.batch_shape\n # ==> [2, 1]\n batch_kernel.matrix(x, y).shape\n # ==> [2, 10, 5, 4]\n\n # Or, make the inputs broadcastable:\n x = np.ones([10, 1, 5, 3], np.float32)\n y = np.ones([10, 1, 4, 3], np.float32)\n\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(\n params=[1., .5])\n batch_kernel.batch_shape\n # ==> [2]\n batch_kernel.matrix(x, y).shape\n # ==> [10, 2, 5, 4]\n ```\n\n Here, we have the result of applying the kernel, with 2 different\n parameters, to each of a batch of 10 pairs of input lists.\n\n \"\"\"\n with self._name_scope(self._name, values=[x1, x2]):\n x1 = tf.convert_to_tensor(x1, name='x1')\n x2 = tf.convert_to_tensor(x2, name='x2')\n\n return self.tensor(x1, x2, x1_example_ndims=1, x2_example_ndims=1)\n\n def tensor(self, x1, x2, x1_example_ndims, x2_example_ndims):\n \"\"\"Construct (batched) tensors from (batches of) collections of inputs.\n\n Args:\n x1: `Tensor` input to the first positional parameter of the kernel, of\n shape `B1 + E1 + F`, where `B1` and `E1` arbitrary shapes which may be\n empty (ie, no batch/example dims, resp.), and `F` (the feature shape)\n must have rank equal to the kernel's `feature_ndims` property. Batch\n shape must broadcast with the batch shape of `x2` and with the kernel's\n batch shape.\n x2: `Tensor` input to the second positional parameter of the kernel,\n shape `B2 + E2 + F`, where `B2` and `E2` arbitrary shapes which may be\n empty (ie, no batch/example dims, resp.), and `F` (the feature shape)\n must have rank equal to the kernel's `feature_ndims` property. Batch\n shape must broadcast with the batch shape of `x1` and with the kernel's\n batch shape.\n x1_example_ndims: A python integer greater than or equal to 0, the number\n of example dims in the first input. This affects both the alignment of\n batch shapes and the shape of the final output of the function.\n Everything left of the feature shape and the example dims in `x1` is\n considered \"batch shape\", and must broadcast as specified above.\n x2_example_ndims: A python integer greater than or equal to 0, the number\n of example dims in the second input. This affects both the alignment of\n batch shapes and the shape of the final output of the function.\n Everything left of the feature shape and the example dims in `x1` is\n considered \"batch shape\", and must broadcast as specified above.\n\n Returns:\n `Tensor` containing (possibly batched) kernel applications to pairs from\n inputs `x1` and `x2`. If the kernel parameters' batch shape is `Bk` then\n the shape of the `Tensor` resulting from this method call is\n `broadcast(Bk, B1, B2) + E1 + E2`. Note this differs from `apply`: the\n example dimensions are concatenated, whereas in `apply` the example dims\n are broadcast together. It also differs from `matrix`: the example shapes\n are arbitrary here, and the result accrues a rank equal to the sum of the\n ranks of the input example shapes.\n\n #### Examples\n\n First, consider a kernel with a single scalar parameter.\n\n ```python\n import tensorflow_probability as tfp\n\n scalar_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=.5)\n scalar_kernel.batch_shape\n # ==> []\n\n # Our inputs are two rank-2 collections of 3-D vectors\n x = np.ones([5, 6, 3], np.float32)\n y = np.ones([7, 8, 3], np.float32)\n scalar_kernel.tensor(x, y, x1_example_ndims=2, x2_example_ndims=2).shape\n # ==> [5, 6, 7, 8]\n\n # Empty example shapes work too!\n x = np.ones([3], np.float32)\n y = np.ones([5, 3], np.float32)\n scalar_kernel.tensor(x, y, x1_example_ndims=0, x2_example_ndims=1).shape\n # ==> [5]\n ```\n\n The result comes from applying the kernel to the entries in `x` and `y`\n pairwise, across all pairs:\n\n ```none\n | k(x[0], y[0]) k(x[0], y[1]) ... k(x[0], y[3]) |\n | k(x[1], y[0]) k(x[1], y[1]) ... k(x[1], y[3]) |\n | ... ... ... |\n | k(x[4], y[0]) k(x[4], y[1]) ... k(x[4], y[3]) |\n ```\n\n Now consider a kernel with batched parameters.\n\n ```python\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(param=[1., .5])\n batch_kernel.batch_shape\n # ==> [2]\n\n # Inputs are two rank-2 collections of 3-D vectors\n x = np.ones([5, 6, 3], np.float32)\n y = np.ones([7, 8, 3], np.float32)\n scalar_kernel.tensor(x, y, x1_example_ndims=2, x2_example_ndims=2).shape\n # ==> [2, 5, 6, 7, 8]\n ```\n\n We also support batching of the inputs. First, let's look at that with\n the scalar kernel again.\n\n ```python\n # Batch of 10 lists of 5x6 collections of dimension 3\n x = np.ones([10, 5, 6, 3], np.float32)\n\n # Batch of 10 lists of 7x8 collections of dimension 3\n y = np.ones([10, 7, 8, 3], np.float32)\n\n scalar_kernel.tensor(x, y, x1_example_ndims=2, x2_example_ndims=2).shape\n # ==> [10, 5, 6, 7, 8]\n ```\n\n The result is a batch of 10 tensors built from the batch of 10 rank-2\n collections of input vectors. The batch shapes have to be broadcastable.\n The following will *not* work:\n\n ```python\n x = np.ones([10, 5, 3], np.float32)\n y = np.ones([20, 4, 3], np.float32)\n scalar_kernel.tensor(x, y, x1_example_ndims=1, x2_example_ndims=1).shape\n # ==> Error! [10] and [20] can't broadcast.\n ```\n\n Now let's consider batches of inputs in conjunction with batches of kernel\n parameters. We require that the input batch shapes be broadcastable with\n the kernel parameter batch shapes, otherwise we get an error:\n\n ```python\n x = np.ones([10, 5, 6, 3], np.float32)\n y = np.ones([10, 7, 8, 3], np.float32)\n\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(params=[1., .5])\n batch_kernel.batch_shape\n # ==> [2]\n batch_kernel.tensor(x, y, x1_example_ndims=2, x2_example_ndims=2).shape\n # ==> Error! [2] and [10] can't broadcast.\n ```\n\n The fix is to make the kernel parameter shape broadcastable with `[10]` (or\n reshape the inputs to be broadcastable!):\n\n ```python\n x = np.ones([10, 5, 6, 3], np.float32)\n y = np.ones([10, 7, 8, 3], np.float32)\n\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(\n params=[[1.], [.5]])\n batch_kernel.batch_shape\n # ==> [2, 1]\n batch_kernel.tensor(x, y, x1_example_ndims=2, x2_example_ndims=2).shape\n # ==> [2, 10, 5, 6, 7, 8]\n\n # Or, make the inputs broadcastable:\n x = np.ones([10, 1, 5, 6, 3], np.float32)\n y = np.ones([10, 1, 7, 8, 3], np.float32)\n\n batch_kernel = tfp.positive_semidefinite_kernels.SomeKernel(\n params=[1., .5])\n batch_kernel.batch_shape\n # ==> [2]\n batch_kernel.tensor(x, y, x1_example_ndims=2, x2_example_ndims=2).shape\n # ==> [10, 2, 5, 6, 7, 8]\n ```\n\n \"\"\"\n with self._name_scope(self._name, values=[x1, x2]):\n x1 = tf.convert_to_tensor(x1, name='x1')\n x2 = tf.convert_to_tensor(x2, name='x2')\n\n x1 = util.pad_shape_with_ones(\n x1,\n ndims=x2_example_ndims,\n start=-(self.feature_ndims + 1))\n\n x2 = util.pad_shape_with_ones(\n x2,\n ndims=x1_example_ndims,\n start=-(self.feature_ndims + 1 + x2_example_ndims))\n\n return self.apply(\n x1, x2, example_ndims=(x1_example_ndims + x2_example_ndims))\n\n def _batch_shape(self):\n raise NotImplementedError('Subclasses must provide batch_shape property.')\n\n def _batch_shape_tensor(self):\n raise NotImplementedError(\n 'Subclasses must provide batch_shape_tensor implementation')\n\n def __add__(self, k):\n if not isinstance(k, PositiveSemidefiniteKernel):\n raise ValueError(\n \"Can't add non-kernel (of type '%s') to kernel\" % type(k))\n return _SumKernel([self, k])\n\n def __iadd__(self, k):\n return self.__add__(k)\n\n def __mul__(self, k):\n if not isinstance(k, PositiveSemidefiniteKernel):\n raise ValueError(\n \"Can't multiply by non-kernel (of type '%s') to kernel\" % type(k))\n return _ProductKernel([self, k])\n\n def __imul__(self, k):\n return self.__mul__(k)\n\n def __str__(self):\n return ('tfp.positive_semidefinite_kernels.{type_name}('\n '\"{self_name}\"'\n '{maybe_batch_shape}'\n ', feature_ndims={feature_ndims}'\n ', dtype={dtype})'.format(\n type_name=type(self).__name__,\n self_name=self.name,\n maybe_batch_shape=(', batch_shape={}'.format(self.batch_shape)\n if self.batch_shape.ndims is not None\n else ''),\n feature_ndims=self.feature_ndims,\n dtype=None if self.dtype is None else self.dtype.name))\n\n def __repr__(self):\n return ('<tfp.positive_semidefinite_kernels.{type_name} '\n '\\'{self_name}\\''\n ' batch_shape={batch_shape}'\n ' feature_ndims={feature_ndims}'\n ' dtype={dtype}>'.format(\n type_name=type(self).__name__,\n self_name=self.name,\n batch_shape=self.batch_shape,\n feature_ndims=self.feature_ndims,\n dtype=None if self.dtype is None else self.dtype.name))\n\n\ndef _flatten_summand_list(kernels):\n \"\"\"Flatten a list of kernels which may contain _SumKernel instances.\n\n Args:\n kernels: Python list of `PositiveSemidefiniteKernel` instances\n\n Returns:\n Python list containing the elements of kernels, with any _SumKernel\n instances replaced by their `kernels` property contents.\n \"\"\"\n flattened = []\n for k in kernels:\n if isinstance(k, _SumKernel):\n flattened += k.kernels\n else:\n flattened.append(k)\n return flattened\n\n\ndef _flatten_multiplicand_list(kernels):\n \"\"\"Flatten a list of kernels which may contain _ProductKernel instances.\n\n Args:\n kernels: Python list of `PositiveSemidefiniteKernel` instances\n\n Returns:\n Python list containing the elements of kernels, with any _ProductKernel\n instances replaced by their `kernels` property contents.\n \"\"\"\n flattened = []\n for k in kernels:\n if isinstance(k, _ProductKernel):\n flattened += k.kernels\n else:\n flattened.append(k)\n return flattened\n\n\nclass _SumKernel(PositiveSemidefiniteKernel):\n \"\"\"Kernel class representing summation over a list of kernels.\n\n Mathematically this class represents the pointwise sum of several kernels.\n Given two kernels, `k1` and `k2`, and `kp = _SumKernel([k1, k2])`, we have\n\n ```none\n kp.apply(x, y) = k1(x, y) + k2(x, y)\n ```\n\n for any `x`, `y` in the feature space (this presumes that the constituent\n kernels all act on the same feature space).\n\n That the sum is positive semi-definite follows simply from the definition of\n positive semi-definiteness of functions. If we have\n\n ```none\n sum_i sum_j (c[i]*) c[j] k1(x[i], x[j]) >= 0\n ```\n and\n\n ```none\n sum_i sum_j (c[i]*) c[j] k2(x[i], x[j]) >= 0\n ```\n\n for any finite collections `{x[1], ..., x[N]}` in S and `{c[1], ..., c[N]}` in\n the reals (or the complex plane), then we clearly also have the same for the\n sum of `k1` and `k2`.\n \"\"\"\n\n def __init__(self, kernels, name=None):\n \"\"\"Create a kernel which is the sum of `kernels`.\n\n The input list is 'flattened' in the sense that any entries which are also\n of type `_SumKernel` will have their list of kernels appended to this\n instance's list of kernels. This will reduce the stack depth when actually\n evaluating the sum over kernel applications.\n\n Args:\n kernels: Python `list` of `PositiveSemidefiniteKernel` instances.\n name: Python `str` name prefixed to Ops created by this class.\n Raises:\n ValueError: `kernels` is an empty list, or `kernels` don't all have the\n same `feature_ndims`.\n \"\"\"\n if not kernels:\n raise ValueError(\"Can't create _SumKernel over empty list.\")\n if len(set([k.feature_ndims for k in kernels])) > 1:\n raise ValueError(\n \"Can't sum kernels with different feature_ndims. Got:\\n%s\" %\n str([k.feature_ndims for k in kernels]))\n self._kernels = _flatten_summand_list(kernels)\n if name is None:\n name = 'SumKernel'\n # We have ensured the list is non-empty and all feature_ndims are the same.\n super(_SumKernel, self).__init__(\n feature_ndims=kernels[0].feature_ndims,\n dtype=util.maybe_get_common_dtype(\n [None if k.dtype is None else k for k in kernels]),\n name=name)\n\n @property\n def kernels(self):\n \"\"\"The list of kernels this _SumKernel sums over.\"\"\"\n return self._kernels\n\n def _apply(self, x1, x2, example_ndims=0):\n return sum([k.apply(x1, x2, example_ndims) for k in self.kernels])\n\n def _batch_shape(self):\n return functools.reduce(tf.broadcast_static_shape,\n [k.batch_shape for k in self.kernels])\n\n def _batch_shape_tensor(self):\n return functools.reduce(tf.broadcast_dynamic_shape,\n [k.batch_shape_tensor() for k in self.kernels])\n\n\nclass _ProductKernel(PositiveSemidefiniteKernel):\n \"\"\"Kernel class representing the product over a list of kernels.\n\n Mathematically this class represents the pointwise product of several kernels.\n Given two kernels, `k1` and `k2`, and `kp = _ProductKernel([k1, k2])`, we have\n\n ```none\n kp.apply(x, y) = k1(x, y) * k2(x, y)\n ```\n\n for any x, y in the feature space (this presumes that the constituent kernels\n all act on the same feature space).\n\n The fact that this product is still positive semi-definite can be shown in a\n variety of ways, many deep and all fascinating, but follows readily from the\n [Schur product theorem](https://en.wikipedia.org/wiki/Schur_product_theorem),\n which states that the Hadamard (element-wise) product of two PSD matrices is\n also PSD.\n \"\"\"\n\n def __init__(self, kernels, name=None):\n \"\"\"Create a kernel which is the product of `kernels`.\n\n The input list is 'flattened' in the sense that any entries which are also\n of type `_ProductKernel` will have their list of kernels appended to this\n instance's list of kernels. This will reduce the stack depth when actually\n evaluating the product over kernel applications.\n\n Args:\n kernels: Python `list` of `PositiveSemidefiniteKernel` instances.\n name: Python `str` name prefixed to Ops created by this class.\n Raises:\n ValueError: `kernels` is an empty list, or `kernels` don't all have the\n same `feature_ndims`.\n \"\"\"\n if not kernels:\n raise ValueError(\"Can't create _ProductKernel over empty list.\")\n if len(set([k.feature_ndims for k in kernels])) > 1:\n raise ValueError(\n \"Can't multiply kernels with different feature_ndims. Got:\\n%s\" %\n str([k.feature_ndims for k in kernels]))\n self._kernels = _flatten_multiplicand_list(kernels)\n if name is None:\n name = 'ProductKernel'\n # We have ensured the list is non-empty and all feature_ndims are the same.\n super(_ProductKernel, self).__init__(\n feature_ndims=kernels[0].feature_ndims,\n dtype=util.maybe_get_common_dtype(\n [None if k.dtype is None else k for k in kernels]),\n name=name)\n\n @property\n def kernels(self):\n \"\"\"The list of kernels this _ProductKernel multiplies over.\"\"\"\n return self._kernels\n\n def _apply(self, x1, x2, example_ndims=0):\n return functools.reduce(\n operator.mul,\n [k.apply(x1, x2, example_ndims) for k in self.kernels])\n\n def _batch_shape(self):\n return functools.reduce(tf.broadcast_static_shape,\n [k.batch_shape for k in self.kernels])\n\n def _batch_shape_tensor(self):\n return functools.reduce(tf.broadcast_dynamic_shape,\n [k.batch_shape_tensor() for k in self.kernels])\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Numpy implementations of TensorFlow functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow as tf\n\nfrom tensorflow_probability.python.internal.backend.numpy import numpy_array\n\nfrom tensorflow_probability.python.internal.backend.numpy.internal import utils\nfrom tensorflow_probability.python.internal.backend.numpy.numpy_math import l2_normalize\nfrom tensorflow_probability.python.internal.backend.numpy.numpy_math import log_softmax\nfrom tensorflow_probability.python.internal.backend.numpy.numpy_math import reduce_logsumexp\nfrom tensorflow_probability.python.internal.backend.numpy.numpy_math import softmax\nfrom tensorflow_probability.python.internal.backend.numpy.numpy_math import softplus\nfrom tensorflow_probability.python.internal.backend.numpy.numpy_math import top_k\n\n\n__all__ = [\n 'l2_normalize',\n 'log_softmax',\n 'relu',\n 'softmax',\n 'softplus',\n 'sigmoid_cross_entropy_with_logits',\n 'sparse_softmax_cross_entropy_with_logits',\n 'top_k',\n]\n\n\ndef _sigmoid_cross_entropy_with_logits( # pylint: disable=invalid-name,unused-argument\n _sentinel=None,\n labels=None,\n logits=None,\n name=None):\n return (np.maximum(logits, 0)\n - logits * labels + np.log1p(np.exp(-np.abs(logits))))\n\n\ndef _sparse_softmax_cross_entropy_with_logits( # pylint: disable=invalid-name,unused-argument\n _sentinel=None,\n labels=None,\n logits=None,\n name=None):\n \"\"\"Softmax cross entropy with logits.\"\"\"\n labels_shape = labels.shape\n num_classes = logits.shape[-1]\n logits = np.reshape(logits, [-1, num_classes])\n labels = np.reshape(labels, [-1])\n\n labels = numpy_array.one_hot(labels, num_classes)\n\n cost = -np.sum(\n labels * (logits - reduce_logsumexp(logits, axis=-1, keepdims=True)),\n axis=-1)\n cost = np.reshape(cost, labels_shape)\n return cost\n\n\n# --- Begin Public Functions --------------------------------------------------\n\nl2_normalize = utils.copy_docstring(\n tf.nn.l2_normalize,\n l2_normalize)\n\n\nrelu = utils.copy_docstring(\n tf.nn.relu,\n lambda features, name=None: np.max(features, 0))\n\n\nsoftplus = utils.copy_docstring(\n tf.nn.softplus,\n lambda features, name=None: np.log(1 + np.exp(features)))\n\n\nsigmoid_cross_entropy_with_logits = utils.copy_docstring(\n tf.nn.sigmoid_cross_entropy_with_logits,\n _sigmoid_cross_entropy_with_logits)\n\n\nsparse_softmax_cross_entropy_with_logits = utils.copy_docstring(\n tf.nn.sparse_softmax_cross_entropy_with_logits,\n _sparse_softmax_cross_entropy_with_logits)\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for implementations of batched variables.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport hypothesis as hp\nfrom hypothesis import strategies as hps\nfrom hypothesis.extra import numpy as hpnp\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_probability.python.internal.auto_batching import backend_test_lib as backend_test\nfrom tensorflow_probability.python.internal.auto_batching import instructions as inst\nfrom tensorflow_probability.python.internal.auto_batching import numpy_backend\n\nNP_BACKEND = numpy_backend.NumpyBackend()\n\n\ndef var_init(max_stack_depth, initial_value):\n type_ = inst.TensorType(initial_value.dtype, initial_value.shape[1:])\n var = NP_BACKEND.create_variable(\n None, inst.VariableAllocation.FULL, type_,\n max_stack_depth, batch_size=initial_value.shape[0])\n return var.update(\n initial_value, NP_BACKEND.full_mask(initial_value.shape[0]))\n\n\n# A TF test case for self.assertAllEqual, but doesn't use TF so doesn't care\n# about Eager vs Graph mode.\nclass NumpyVariableTest(tf.test.TestCase, backend_test.VariableTestCase):\n\n def testNumpySmoke(self):\n \"\"\"Test the property on specific example, without relying on Hypothesis.\"\"\"\n init = (12, np.random.randn(3, 2, 2).astype(np.float32))\n ops = [('pop', [False, False, True]),\n ('push', [True, False, True]),\n ('update', np.ones((3, 2, 2), dtype=np.float32),\n [True, True, False]),\n ('pop', [True, False, True])]\n self.check_same_results(init, ops, var_init)\n\n @hp.given(hps.data())\n @hp.settings(\n deadline=None,\n max_examples=100)\n def testNumpyVariableRandomOps(self, data):\n # Hypothesis strategy:\n # Generate a random max stack depth and value shape\n # Deduce the batch size from the value shape\n # Make a random dtype\n # Generate a random initial value of that dtype and shape\n # Generate ops, some of which write random values of that dtype and shape\n max_stack_depth = data.draw(hps.integers(min_value=1, max_value=1000))\n value_shape = data.draw(hpnp.array_shapes(min_dims=1))\n batch_size = value_shape[0]\n dtype = data.draw(hpnp.scalar_dtypes())\n masks = hpnp.arrays(dtype=np.bool, shape=[batch_size])\n values = hpnp.arrays(dtype, value_shape)\n init_val = data.draw(values)\n ops = data.draw(\n hps.lists(\n hps.one_of(\n hps.tuples(hps.just('update'), values, masks),\n hps.tuples(hps.just('push'), masks),\n hps.tuples(hps.just('pop'), masks), # preserve line break\n hps.tuples(hps.just('read')))))\n self.check_same_results((max_stack_depth, init_val), ops, var_init)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Generalized Linear Model Fisher Scoring.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nimport tensorflow as tf\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import prefer_static\n\n\n__all__ = [\n 'fit',\n 'fit_one_step',\n 'convergence_criteria_small_relative_norm_weights_change',\n]\n\n\ndef fit(\n model_matrix,\n response,\n model,\n model_coefficients_start=None,\n predicted_linear_response_start=None,\n l2_regularizer=None,\n dispersion=None,\n offset=None,\n convergence_criteria_fn=None,\n learning_rate=None,\n fast_unsafe_numerics=True,\n maximum_iterations=None,\n name=None):\n \"\"\"Runs multiple Fisher scoring steps.\n\n Args:\n model_matrix: (Batch of) `float`-like, matrix-shaped `Tensor` where each row\n represents a sample's features.\n response: (Batch of) vector-shaped `Tensor` where each element represents a\n sample's observed response (to the corresponding row of features). Must\n have same `dtype` as `model_matrix`.\n model: `tfp.glm.ExponentialFamily`-like instance which implicitly\n characterizes a negative log-likelihood loss by specifying the\n distribuion's `mean`, `gradient_mean`, and `variance`.\n model_coefficients_start: Optional (batch of) vector-shaped `Tensor`\n representing the initial model coefficients, one for each column in\n `model_matrix`. Must have same `dtype` as `model_matrix`.\n Default value: Zeros.\n predicted_linear_response_start: Optional `Tensor` with `shape`, `dtype`\n matching `response`; represents `offset` shifted initial linear\n predictions based on `model_coefficients_start`.\n Default value: `offset` if `model_coefficients is None`, and\n `tf.linalg.matvec(model_matrix, model_coefficients_start) + offset`\n otherwise.\n l2_regularizer: Optional scalar `Tensor` representing L2 regularization\n penalty, i.e.,\n `loss(w) = sum{-log p(y[i]|x[i],w) : i=1..n} + l2_regularizer ||w||_2^2`.\n Default value: `None` (i.e., no L2 regularization).\n dispersion: Optional (batch of) `Tensor` representing `response` dispersion,\n i.e., as in, `p(y|theta) := exp((y theta - A(theta)) / dispersion)`.\n Must broadcast with rows of `model_matrix`.\n Default value: `None` (i.e., \"no dispersion\").\n offset: Optional `Tensor` representing constant shift applied to\n `predicted_linear_response`. Must broadcast to `response`.\n Default value: `None` (i.e., `tf.zeros_like(response)`).\n convergence_criteria_fn: Python `callable` taking:\n `is_converged_previous`, `iter_`, `model_coefficients_previous`,\n `predicted_linear_response_previous`, `model_coefficients_next`,\n `predicted_linear_response_next`, `response`, `model`, `dispersion` and\n returning a `bool` `Tensor` indicating that Fisher scoring has converged.\n See `convergence_criteria_small_relative_norm_weights_change` as an\n example function.\n Default value: `None` (i.e.,\n `convergence_criteria_small_relative_norm_weights_change`).\n learning_rate: Optional (batch of) scalar `Tensor` used to dampen iterative\n progress. Typically only needed if optimization diverges, should be no\n larger than `1` and typically very close to `1`.\n Default value: `None` (i.e., `1`).\n fast_unsafe_numerics: Optional Python `bool` indicating if faster, less\n numerically accurate methods can be employed for computing the weighted\n least-squares solution.\n Default value: `True` (i.e., \"fast but possibly diminished accuracy\").\n maximum_iterations: Optional maximum number of iterations of Fisher scoring\n to run; \"and-ed\" with result of `convergence_criteria_fn`.\n Default value: `None` (i.e., `infinity`).\n name: Python `str` used as name prefix to ops created by this function.\n Default value: `\"fit\"`.\n\n Returns:\n model_coefficients: (Batch of) vector-shaped `Tensor`; represents the\n fitted model coefficients, one for each column in `model_matrix`.\n predicted_linear_response: `response`-shaped `Tensor` representing linear\n predictions based on new `model_coefficients`, i.e.,\n `tf.linalg.matvec(model_matrix, model_coefficients) + offset`.\n is_converged: `bool` `Tensor` indicating that the returned\n `model_coefficients` met the `convergence_criteria_fn` criteria within the\n `maximum_iterations` limit.\n iter_: `int32` `Tensor` indicating the number of iterations taken.\n\n #### Example\n\n ```python\n from __future__ import print_function\n import numpy as np\n import tensorflow as tf\n import tensorflow_probability as tfp\n tfd = tfp.distributions\n\n def make_dataset(n, d, link, scale=1., dtype=np.float32):\n model_coefficients = tfd.Uniform(\n low=np.array(-1, dtype),\n high=np.array(1, dtype)).sample(d, seed=42)\n radius = np.sqrt(2.)\n model_coefficients *= radius / tf.linalg.norm(model_coefficients)\n model_matrix = tfd.Normal(\n loc=np.array(0, dtype),\n scale=np.array(1, dtype)).sample([n, d], seed=43)\n scale = tf.convert_to_tensor(scale, dtype)\n linear_response = tf.tensordot(\n model_matrix, model_coefficients, axes=[[1], [0]])\n if link == 'linear':\n response = tfd.Normal(loc=linear_response, scale=scale).sample(seed=44)\n elif link == 'probit':\n response = tf.cast(\n tfd.Normal(loc=linear_response, scale=scale).sample(seed=44) > 0,\n dtype)\n elif link == 'logit':\n response = tfd.Bernoulli(logits=linear_response).sample(seed=44)\n else:\n raise ValueError('unrecognized true link: {}'.format(link))\n return model_matrix, response, model_coefficients\n\n X, Y, w_true = make_dataset(n=int(1e6), d=100, link='probit')\n\n w, linear_response, is_converged, num_iter = tfp.glm.fit(\n model_matrix=X,\n response=Y,\n model=tfp.glm.BernoulliNormalCDF())\n log_likelihood = tfp.glm.BernoulliNormalCDF().log_prob(Y, linear_response)\n\n with tf.Session() as sess:\n [w_, linear_response_, is_converged_, num_iter_, Y_, w_true_,\n log_likelihood_] = sess.run([\n w, linear_response, is_converged, num_iter, Y, w_true,\n log_likelihood])\n\n print('is_converged: ', is_converged_)\n print(' num_iter: ', num_iter_)\n print(' accuracy: ', np.mean((linear_response_ > 0.) == Y_))\n print(' deviance: ', 2. * np.mean(log_likelihood_))\n print('||w0-w1||_2 / (1+||w0||_2): ', (np.linalg.norm(w_true_ - w_, ord=2) /\n (1. + np.linalg.norm(w_true_, ord=2))))\n\n # ==>\n # is_converged: True\n # num_iter: 6\n # accuracy: 0.804382\n # deviance: -0.820746600628\n # ||w0-w1||_2 / (1+||w0||_2): 0.00619245105309\n ```\n\n \"\"\"\n graph_deps = [model_matrix, response, model_coefficients_start,\n predicted_linear_response_start, dispersion, offset,\n learning_rate, maximum_iterations]\n with tf.compat.v1.name_scope(name, 'fit', graph_deps):\n [\n model_matrix,\n response,\n model_coefficients_start,\n predicted_linear_response_start,\n offset,\n ] = prepare_args(\n model_matrix,\n response,\n model_coefficients_start,\n predicted_linear_response_start,\n offset)\n if convergence_criteria_fn is None:\n convergence_criteria_fn = (\n convergence_criteria_small_relative_norm_weights_change())\n\n def _body(\n is_converged_previous,\n iter_,\n model_coefficients_previous,\n predicted_linear_response_previous):\n \"\"\"`tf.while_loop` body.\"\"\"\n model_coefficients_next, predicted_linear_response_next = fit_one_step(\n model_matrix,\n response,\n model,\n model_coefficients_previous,\n predicted_linear_response_previous,\n l2_regularizer,\n dispersion,\n offset,\n learning_rate,\n fast_unsafe_numerics)\n is_converged_next = convergence_criteria_fn(\n is_converged_previous=is_converged_previous,\n iter_=iter_,\n model_coefficients_previous=model_coefficients_previous,\n predicted_linear_response_previous=predicted_linear_response_previous,\n model_coefficients_next=model_coefficients_next,\n predicted_linear_response_next=predicted_linear_response_next,\n response=response,\n model=model,\n dispersion=dispersion)\n return [\n is_converged_next,\n iter_ + 1,\n model_coefficients_next,\n predicted_linear_response_next,\n ]\n\n # while not converged:\n # fit_one_step\n [\n is_converged,\n iter_,\n model_coefficients,\n predicted_linear_response,\n ] = tf.while_loop(\n cond=lambda is_converged, *args: tf.logical_not(is_converged),\n body=_body,\n loop_vars=[\n tf.zeros([], np.bool), # is_converged\n tf.zeros([], np.int32), # iter_\n model_coefficients_start,\n predicted_linear_response_start,\n ],\n maximum_iterations=maximum_iterations)\n\n return [\n model_coefficients,\n predicted_linear_response,\n is_converged,\n iter_\n ]\n\n\ndef fit_one_step(\n model_matrix,\n response,\n model,\n model_coefficients_start=None,\n predicted_linear_response_start=None,\n l2_regularizer=None,\n dispersion=None,\n offset=None,\n learning_rate=None,\n fast_unsafe_numerics=True,\n name=None):\n \"\"\"Runs one step of Fisher scoring.\n\n Args:\n model_matrix: (Batch of) `float`-like, matrix-shaped `Tensor` where each row\n represents a sample's features.\n response: (Batch of) vector-shaped `Tensor` where each element represents a\n sample's observed response (to the corresponding row of features). Must\n have same `dtype` as `model_matrix`.\n model: `tfp.glm.ExponentialFamily`-like instance used to construct the\n negative log-likelihood loss, gradient, and expected Hessian (i.e., the\n Fisher information matrix).\n model_coefficients_start: Optional (batch of) vector-shaped `Tensor`\n representing the initial model coefficients, one for each column in\n `model_matrix`. Must have same `dtype` as `model_matrix`.\n Default value: Zeros.\n predicted_linear_response_start: Optional `Tensor` with `shape`, `dtype`\n matching `response`; represents `offset` shifted initial linear\n predictions based on `model_coefficients_start`.\n Default value: `offset` if `model_coefficients is None`, and\n `tf.linalg.matvec(model_matrix, model_coefficients_start) + offset`\n otherwise.\n l2_regularizer: Optional scalar `Tensor` representing L2 regularization\n penalty, i.e.,\n `loss(w) = sum{-log p(y[i]|x[i],w) : i=1..n} + l2_regularizer ||w||_2^2`.\n Default value: `None` (i.e., no L2 regularization).\n dispersion: Optional (batch of) `Tensor` representing `response` dispersion,\n i.e., as in, `p(y|theta) := exp((y theta - A(theta)) / dispersion)`.\n Must broadcast with rows of `model_matrix`.\n Default value: `None` (i.e., \"no dispersion\").\n offset: Optional `Tensor` representing constant shift applied to\n `predicted_linear_response`. Must broadcast to `response`.\n Default value: `None` (i.e., `tf.zeros_like(response)`).\n learning_rate: Optional (batch of) scalar `Tensor` used to dampen iterative\n progress. Typically only needed if optimization diverges, should be no\n larger than `1` and typically very close to `1`.\n Default value: `None` (i.e., `1`).\n fast_unsafe_numerics: Optional Python `bool` indicating if solve should be\n based on Cholesky or QR decomposition.\n Default value: `True` (i.e., \"prefer speed via Cholesky decomposition\").\n name: Python `str` used as name prefix to ops created by this function.\n Default value: `\"fit_one_step\"`.\n\n Returns:\n model_coefficients: (Batch of) vector-shaped `Tensor`; represents the\n next estimate of the model coefficients, one for each column in\n `model_matrix`.\n predicted_linear_response: `response`-shaped `Tensor` representing linear\n predictions based on new `model_coefficients`, i.e.,\n `tf.linalg.matvec(model_matrix, model_coefficients_next) + offset`.\n \"\"\"\n graph_deps = [model_matrix, response, model_coefficients_start,\n predicted_linear_response_start, dispersion, learning_rate]\n with tf.compat.v1.name_scope(name, 'fit_one_step', graph_deps):\n\n [\n model_matrix,\n response,\n model_coefficients_start,\n predicted_linear_response_start,\n offset,\n ] = prepare_args(\n model_matrix,\n response,\n model_coefficients_start,\n predicted_linear_response_start,\n offset)\n\n # Compute: mean, grad(mean, predicted_linear_response_start), and variance.\n mean, variance, grad_mean = model(predicted_linear_response_start)\n\n # If either `grad_mean` or `variance is non-finite or zero, then we'll\n # replace it with a value such that the row is zeroed out. Although this\n # procedure may seem circuitous, it is necessary to ensure this algorithm is\n # itself differentiable.\n is_valid = (\n tf.math.is_finite(grad_mean) & tf.not_equal(grad_mean, 0.)\n & tf.math.is_finite(variance) & (variance > 0.))\n\n def mask_if_invalid(x, mask):\n mask = tf.fill(\n tf.shape(input=x), value=np.array(mask, x.dtype.as_numpy_dtype))\n return tf.compat.v1.where(is_valid, x, mask)\n\n # Run one step of iteratively reweighted least-squares.\n # Compute \"`z`\", the adjusted predicted linear response.\n # z = predicted_linear_response_start\n # + learning_rate * (response - mean) / grad_mean\n z = (response - mean) / mask_if_invalid(grad_mean, 1.)\n # TODO(jvdillon): Rather than use learning rate, we should consider using\n # backtracking line search.\n if learning_rate is not None:\n z *= learning_rate[..., tf.newaxis]\n z += predicted_linear_response_start\n if offset is not None:\n z -= offset\n\n # Compute \"`w`\", the per-sample weight.\n if dispersion is not None:\n # For convenience, we'll now scale the variance by the dispersion factor.\n variance *= dispersion\n w = (\n mask_if_invalid(grad_mean, 0.) *\n tf.math.rsqrt(mask_if_invalid(variance, np.inf)))\n\n a = model_matrix * w[..., tf.newaxis]\n b = z * w\n # Solve `min{ || A @ model_coefficients - b ||_2**2 : model_coefficients }`\n # where `@` denotes `matmul`.\n\n if l2_regularizer is None:\n l2_regularizer = np.array(0, a.dtype.as_numpy_dtype)\n else:\n l2_regularizer_ = distribution_util.maybe_get_static_value(\n l2_regularizer, a.dtype.as_numpy_dtype)\n if l2_regularizer_ is not None:\n l2_regularizer = l2_regularizer_\n\n def _embed_l2_regularization():\n \"\"\"Adds synthetic observations to implement L2 regularization.\"\"\"\n # `tf.matrix_solve_ls` does not respect the `l2_regularization` argument\n # when `fast_unsafe_numerics` is `False`. This function adds synthetic\n # observations to the data to implement the regularization instead.\n # Adding observations `sqrt(l2_regularizer) * I` is mathematically\n # equivalent to adding the term\n # `-l2_regularizer ||coefficients||_2**2` to the log-likelihood.\n num_model_coefficients = num_cols(model_matrix)\n batch_shape = tf.shape(input=model_matrix)[:-2]\n eye = tf.eye(\n num_model_coefficients, batch_shape=batch_shape, dtype=a.dtype)\n a_ = tf.concat([a, tf.sqrt(l2_regularizer) * eye], axis=-2)\n b_ = distribution_util.pad(\n b, count=num_model_coefficients, axis=-1, back=True)\n # Return l2_regularizer=0 since its now embedded.\n l2_regularizer_ = np.array(0, a.dtype.as_numpy_dtype)\n return a_, b_, l2_regularizer_\n\n a, b, l2_regularizer = prefer_static.cond(\n prefer_static.reduce_all([not(fast_unsafe_numerics),\n l2_regularizer > 0.]),\n _embed_l2_regularization,\n lambda: (a, b, l2_regularizer))\n\n model_coefficients_next = tf.linalg.lstsq(\n a,\n b[..., tf.newaxis],\n fast=fast_unsafe_numerics,\n l2_regularizer=l2_regularizer,\n name='model_coefficients_next')\n model_coefficients_next = model_coefficients_next[..., 0]\n\n # TODO(b/79122261): The approach used in `matrix_solve_ls` could be made\n # faster by avoiding explicitly forming Q and instead keeping the\n # factorization in 'implicit' form with stacked (rescaled) Householder\n # vectors underneath the 'R' and then applying the (accumulated)\n # reflectors in the appropriate order to apply Q'. However, we don't\n # presently do this because we lack core TF functionality. For reference,\n # the vanilla QR approach is:\n # q, r = tf.linalg.qr(a)\n # c = tf.matmul(q, b, adjoint_a=True)\n # model_coefficients_next = tf.matrix_triangular_solve(\n # r, c, lower=False, name='model_coefficients_next')\n\n predicted_linear_response_next = calculate_linear_predictor(\n model_matrix,\n model_coefficients_next,\n offset,\n name='predicted_linear_response_next')\n\n return model_coefficients_next, predicted_linear_response_next\n\n\ndef convergence_criteria_small_relative_norm_weights_change(\n tolerance=1e-5,\n norm_order=2):\n \"\"\"Returns Python `callable` which indicates fitting procedure has converged.\n\n Writing old, new `model_coefficients` as `w0`, `w1`, this function\n defines convergence as,\n\n ```python\n relative_euclidean_norm = (tf.norm(w0 - w1, ord=2, axis=-1) /\n (1. + tf.norm(w0, ord=2, axis=-1)))\n reduce_all(relative_euclidean_norm < tolerance)\n ```\n\n where `tf.norm(x, ord=2)` denotes the [Euclidean norm](\n https://en.wikipedia.org/wiki/Norm_(mathematics)#Euclidean_norm) of `x`.\n\n Args:\n tolerance: `float`-like `Tensor` indicating convergence, i.e., when\n max relative Euclidean norm weights difference < tolerance`.\n Default value: `1e-5`.\n norm_order: Order of the norm. Default value: `2` (i.e., \"Euclidean norm\".)\n\n Returns:\n convergence_criteria_fn: Python `callable` which returns `bool` `Tensor`\n indicated fitting procedure has converged. (See inner function\n specification for argument signature.)\n Default value: `1e-5`.\n \"\"\"\n def convergence_criteria_fn(\n is_converged_previous, # pylint: disable=unused-argument\n iter_,\n model_coefficients_previous,\n predicted_linear_response_previous, # pylint: disable=unused-argument\n model_coefficients_next,\n predicted_linear_response_next, # pylint: disable=unused-argument\n response, # pylint: disable=unused-argument\n model, # pylint: disable=unused-argument\n dispersion): # pylint: disable=unused-argument\n \"\"\"Returns `bool` `Tensor` indicating if fitting procedure has converged.\n\n Args:\n is_converged_previous: \"old\" convergence results.\n iter_: Iteration number.\n model_coefficients_previous: \"old\" `model_coefficients`.\n predicted_linear_response_previous: \"old\" `predicted_linear_response`.\n model_coefficients_next: \"new\" `model_coefficients`.\n predicted_linear_response_next: \"new: `predicted_linear_response`.\n response: (Batch of) vector-shaped `Tensor` where each element represents\n a sample's observed response (to the corresponding row of features).\n Must have same `dtype` as `model_matrix`.\n model: `tfp.glm.ExponentialFamily`-like instance used to construct the\n negative log-likelihood loss, gradient, and expected Hessian (i.e., the\n Fisher information matrix).\n dispersion: `Tensor` representing `response` dispersion, i.e., as in:\n `p(y|theta) := exp((y theta - A(theta)) / dispersion)`. Must broadcast\n with rows of `model_matrix`.\n Default value: `None` (i.e., \"no dispersion\").\n\n Returns:\n is_converged: `bool` `Tensor`.\n \"\"\"\n relative_euclidean_norm = (\n tf.norm(\n tensor=model_coefficients_previous - model_coefficients_next,\n ord=norm_order,\n axis=-1) /\n (1. +\n tf.norm(tensor=model_coefficients_previous, ord=norm_order, axis=-1)))\n return (iter_ > 0) & tf.reduce_all(\n input_tensor=relative_euclidean_norm < tolerance)\n\n return convergence_criteria_fn\n\n\ndef prepare_args(model_matrix,\n response,\n model_coefficients,\n predicted_linear_response,\n offset,\n name=None):\n \"\"\"Helper to `fit` which sanitizes input args.\n\n Args:\n model_matrix: (Batch of) `float`-like, matrix-shaped `Tensor` where each row\n represents a sample's features.\n response: (Batch of) vector-shaped `Tensor` where each element represents a\n sample's observed response (to the corresponding row of features). Must\n have same `dtype` as `model_matrix`.\n model_coefficients: Optional (batch of) vector-shaped `Tensor` representing\n the model coefficients, one for each column in `model_matrix`. Must have\n same `dtype` as `model_matrix`.\n Default value: `tf.zeros(tf.shape(model_matrix)[-1], model_matrix.dtype)`.\n predicted_linear_response: Optional `Tensor` with `shape`, `dtype` matching\n `response`; represents `offset` shifted initial linear predictions based\n on current `model_coefficients`.\n Default value: `offset` if `model_coefficients is None`, and\n `tf.linalg.matvec(model_matrix, model_coefficients_start) + offset`\n otherwise.\n offset: Optional `Tensor` with `shape`, `dtype` matching `response`;\n represents constant shift applied to `predicted_linear_response`.\n Default value: `None` (i.e., `tf.zeros_like(response)`).\n name: Python `str` used as name prefix to ops created by this function.\n Default value: `\"prepare_args\"`.\n\n Returns:\n model_matrix: A `Tensor` with `shape`, `dtype` and values of the\n `model_matrix` argument.\n response: A `Tensor` with `shape`, `dtype` and values of the\n `response` argument.\n model_coefficients_start: A `Tensor` with `shape`, `dtype` and\n values of the `model_coefficients_start` argument if specified.\n A (batch of) vector-shaped `Tensors` with `dtype` matching `model_matrix`\n containing the default starting point otherwise.\n predicted_linear_response: A `Tensor` with `shape`, `dtype` and\n values of the `predicted_linear_response` argument if specified.\n A `Tensor` with `shape`, `dtype` matching `response` containing the\n default value otherwise.\n offset: A `Tensor` with `shape`, `dtype` and values of the `offset` argument\n if specified or `None` otherwise.\n \"\"\"\n graph_deps = [model_matrix, response, model_coefficients,\n predicted_linear_response, offset]\n with tf.compat.v1.name_scope(name, 'prepare_args', graph_deps):\n dtype = dtype_util.common_dtype(graph_deps, np.float32)\n\n model_matrix = tf.convert_to_tensor(\n value=model_matrix, dtype=dtype, name='model_matrix')\n\n if offset is not None:\n offset = tf.convert_to_tensor(value=offset, dtype=dtype, name='offset')\n\n response = tf.convert_to_tensor(\n value=response, dtype=dtype, name='response')\n\n use_default_model_coefficients = model_coefficients is None\n if use_default_model_coefficients:\n # User did not supply model coefficients; assume they're all zero.\n batch_shape = tf.shape(input=model_matrix)[:-2]\n num_columns = tf.shape(input=model_matrix)[-1]\n model_coefficients = tf.zeros(\n shape=tf.concat([batch_shape, [num_columns]], axis=0),\n dtype=dtype, name='model_coefficients')\n else:\n # User did supply model coefficients; convert to Tensor in case it's\n # numpy or literal.\n model_coefficients = tf.convert_to_tensor(\n value=model_coefficients, dtype=dtype, name='model_coefficients')\n\n if predicted_linear_response is None:\n if use_default_model_coefficients:\n # Since we're using zeros for model_coefficients, we know the predicted\n # linear response will also be all zeros.\n if offset is None:\n predicted_linear_response = tf.zeros_like(\n response, dtype, name='predicted_linear_response')\n else:\n predicted_linear_response = tf.broadcast_to(\n offset,\n tf.shape(input=response),\n name='predicted_linear_response')\n else:\n # We were given model_coefficients but not the predicted linear\n # response.\n predicted_linear_response = calculate_linear_predictor(\n model_matrix, model_coefficients, offset)\n else:\n predicted_linear_response = tf.convert_to_tensor(\n value=predicted_linear_response,\n dtype=dtype,\n name='predicted_linear_response')\n\n return [\n model_matrix,\n response,\n model_coefficients,\n predicted_linear_response,\n offset,\n ]\n\n\ndef calculate_linear_predictor(model_matrix, model_coefficients, offset=None,\n name=None):\n \"\"\"Computes `model_matrix @ model_coefficients + offset`.\"\"\"\n with tf.compat.v1.name_scope(name, 'calculate_linear_predictor',\n [model_matrix, model_coefficients, offset]):\n predicted_linear_response = tf.linalg.matvec(model_matrix,\n model_coefficients)\n if offset is not None:\n predicted_linear_response += offset\n return predicted_linear_response\n\n\ndef num_cols(x):\n \"\"\"Returns number of cols in a given `Tensor`.\"\"\"\n if tf.compat.dimension_value(x.shape[-1]) is not None:\n return tf.compat.dimension_value(x.shape[-1])\n return tf.shape(input=x)[-1]\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python import distributions as tfd\nfrom tensorflow_probability.python import positive_semidefinite_kernels as psd_kernels\nfrom tensorflow_probability.python.internal import tensorshape_util\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top\n\n\nclass _GaussianProcessTest(object):\n\n def testShapes(self):\n # 5x5 grid of index points in R^2 and flatten to 25x2\n index_points = np.linspace(-4., 4., 5, dtype=np.float32)\n index_points = np.stack(np.meshgrid(index_points, index_points), axis=-1)\n index_points = np.reshape(index_points, [-1, 2])\n # ==> shape = [25, 2]\n\n # Kernel with batch_shape [2, 4, 3, 1]\n amplitude = np.array([1., 2.], np.float32).reshape([2, 1, 1, 1])\n length_scale = np.array([1., 2., 3., 4.], np.float32).reshape([1, 4, 1, 1])\n observation_noise_variance = np.array(\n [1e-5, 1e-6, 1e-5], np.float32).reshape([1, 1, 3, 1])\n batched_index_points = np.stack([index_points]*6)\n # ==> shape = [6, 25, 2]\n if not self.is_static:\n amplitude = tf1.placeholder_with_default(amplitude, shape=None)\n length_scale = tf1.placeholder_with_default(length_scale, shape=None)\n batched_index_points = tf1.placeholder_with_default(\n batched_index_points, shape=None)\n kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)\n gp = tfd.GaussianProcess(\n kernel,\n batched_index_points,\n observation_noise_variance=observation_noise_variance,\n jitter=1e-5)\n\n batch_shape = [2, 4, 3, 6]\n event_shape = [25]\n sample_shape = [5, 3]\n\n samples = gp.sample(sample_shape)\n\n if self.is_static or tf.executing_eagerly():\n self.assertAllEqual(gp.batch_shape_tensor(), batch_shape)\n self.assertAllEqual(gp.event_shape_tensor(), event_shape)\n self.assertAllEqual(samples.shape,\n sample_shape + batch_shape + event_shape)\n self.assertAllEqual(gp.batch_shape, batch_shape)\n self.assertAllEqual(gp.event_shape, event_shape)\n self.assertAllEqual(samples.shape,\n sample_shape + batch_shape + event_shape)\n self.assertAllEqual(gp.mean().shape, batch_shape + event_shape)\n self.assertAllEqual(gp.variance().shape, batch_shape + event_shape)\n else:\n self.assertAllEqual(self.evaluate(gp.batch_shape_tensor()), batch_shape)\n self.assertAllEqual(self.evaluate(gp.event_shape_tensor()), event_shape)\n self.assertAllEqual(\n self.evaluate(samples).shape,\n sample_shape + batch_shape + event_shape)\n self.assertIsNone(tensorshape_util.rank(samples.shape))\n self.assertIsNone(tensorshape_util.rank(gp.batch_shape))\n self.assertEqual(tensorshape_util.rank(gp.event_shape), 1)\n self.assertIsNone(\n tf.compat.dimension_value(tensorshape_util.dims(gp.event_shape)[0]))\n self.assertAllEqual(\n self.evaluate(tf.shape(input=gp.mean())), batch_shape + event_shape)\n self.assertAllEqual(self.evaluate(\n tf.shape(input=gp.variance())), batch_shape + event_shape)\n\n def testVarianceAndCovarianceMatrix(self):\n amp = np.float64(.5)\n len_scale = np.float64(.2)\n jitter = np.float64(1e-4)\n observation_noise_variance = np.float64(3e-3)\n\n kernel = psd_kernels.ExponentiatedQuadratic(amp, len_scale)\n\n index_points = np.expand_dims(np.random.uniform(-1., 1., 10), -1)\n\n gp = tfd.GaussianProcess(\n kernel,\n index_points,\n observation_noise_variance=observation_noise_variance,\n jitter=jitter)\n\n def _kernel_fn(x, y):\n return amp ** 2 * np.exp(-.5 * (np.squeeze((x - y)**2)) / (len_scale**2))\n\n expected_covariance = (\n _kernel_fn(np.expand_dims(index_points, 0),\n np.expand_dims(index_points, 1)) +\n observation_noise_variance * np.eye(10))\n\n self.assertAllClose(expected_covariance,\n self.evaluate(gp.covariance()))\n self.assertAllClose(np.diag(expected_covariance),\n self.evaluate(gp.variance()))\n\n def testMean(self):\n mean_fn = lambda x: x[:, 0]**2\n kernel = psd_kernels.ExponentiatedQuadratic()\n index_points = np.expand_dims(np.random.uniform(-1., 1., 10), -1)\n gp = tfd.GaussianProcess(kernel, index_points, mean_fn=mean_fn)\n expected_mean = mean_fn(index_points)\n self.assertAllClose(expected_mean,\n self.evaluate(gp.mean()))\n\n def testCopy(self):\n # 5 random index points in R^2\n index_points_1 = np.random.uniform(-4., 4., (5, 2)).astype(np.float32)\n # 10 random index points in R^2\n index_points_2 = np.random.uniform(-4., 4., (10, 2)).astype(np.float32)\n\n # ==> shape = [6, 25, 2]\n if not self.is_static:\n index_points_1 = tf1.placeholder_with_default(index_points_1, shape=None)\n index_points_2 = tf1.placeholder_with_default(index_points_2, shape=None)\n\n mean_fn = lambda x: np.array([0.], np.float32)\n kernel_1 = psd_kernels.ExponentiatedQuadratic()\n kernel_2 = psd_kernels.ExpSinSquared()\n\n gp1 = tfd.GaussianProcess(kernel_1, index_points_1, mean_fn, jitter=1e-5)\n gp2 = gp1.copy(index_points=index_points_2,\n kernel=kernel_2)\n\n event_shape_1 = [5]\n event_shape_2 = [10]\n\n self.assertEqual(gp1.mean_fn, gp2.mean_fn)\n self.assertIsInstance(gp1.kernel, psd_kernels.ExponentiatedQuadratic)\n self.assertIsInstance(gp2.kernel, psd_kernels.ExpSinSquared)\n\n if self.is_static or tf.executing_eagerly():\n self.assertAllEqual(gp1.batch_shape, gp2.batch_shape)\n self.assertAllEqual(gp1.event_shape, event_shape_1)\n self.assertAllEqual(gp2.event_shape, event_shape_2)\n self.assertAllEqual(gp1.index_points, index_points_1)\n self.assertAllEqual(gp2.index_points, index_points_2)\n self.assertAllEqual(\n tf.get_static_value(gp1.jitter), tf.get_static_value(gp2.jitter))\n else:\n self.assertAllEqual(\n self.evaluate(gp1.batch_shape_tensor()),\n self.evaluate(gp2.batch_shape_tensor()))\n self.assertAllEqual(\n self.evaluate(gp1.event_shape_tensor()), event_shape_1)\n self.assertAllEqual(\n self.evaluate(gp2.event_shape_tensor()), event_shape_2)\n self.assertEqual(self.evaluate(gp1.jitter), self.evaluate(gp2.jitter))\n self.assertAllEqual(self.evaluate(gp1.index_points), index_points_1)\n self.assertAllEqual(self.evaluate(gp2.index_points), index_points_2)\n\n def testLateBindingIndexPoints(self):\n amp = np.float64(.5)\n len_scale = np.float64(.2)\n kernel = psd_kernels.ExponentiatedQuadratic(amp, len_scale)\n mean_fn = lambda x: x[:, 0]**2\n jitter = np.float64(1e-4)\n observation_noise_variance = np.float64(3e-3)\n\n gp = tfd.GaussianProcess(\n kernel=kernel,\n mean_fn=mean_fn,\n observation_noise_variance=observation_noise_variance,\n jitter=jitter)\n\n index_points = np.random.uniform(-1., 1., [10, 1])\n\n expected_mean = mean_fn(index_points)\n self.assertAllClose(expected_mean,\n self.evaluate(gp.mean(index_points=index_points)))\n\n def _kernel_fn(x, y):\n return amp ** 2 * np.exp(-.5 * (np.squeeze((x - y)**2)) / (len_scale**2))\n\n expected_covariance = (\n _kernel_fn(np.expand_dims(index_points, -3),\n np.expand_dims(index_points, -2)) +\n observation_noise_variance * np.eye(10))\n\n self.assertAllClose(expected_covariance,\n self.evaluate(gp.covariance(index_points=index_points)))\n self.assertAllClose(np.diag(expected_covariance),\n self.evaluate(gp.variance(index_points=index_points)))\n self.assertAllClose(np.sqrt(np.diag(expected_covariance)),\n self.evaluate(gp.stddev(index_points=index_points)))\n\n # Calling mean with no index_points should raise an Error\n with self.assertRaises(ValueError):\n gp.mean()\n\n def testMarginalHasCorrectTypes(self):\n gp = tfd.GaussianProcess(kernel=psd_kernels.ExponentiatedQuadratic())\n\n self.assertIsInstance(\n gp.get_marginal_distribution(\n index_points=np.ones([1, 1], dtype=np.float32)),\n tfd.Normal)\n\n self.assertIsInstance(\n gp.get_marginal_distribution(\n index_points=np.ones([10, 1], dtype=np.float32)),\n tfd.MultivariateNormalLinearOperator)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass GaussianProcessStaticTest(_GaussianProcessTest, tf.test.TestCase):\n is_static = True\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass GaussianProcessDynamicTest(_GaussianProcessTest, tf.test.TestCase):\n is_static = False\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for IteratedSigmoidCenteredBijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python import bijectors as tfb\n\nfrom tensorflow_probability.python.bijectors import bijector_test_util\nfrom tensorflow_probability.python.internal import tensorshape_util\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass _IteratedSigmoidCenteredBijectorTest(object):\n \"\"\"Tests correctness of Stick breaking transformation.\"\"\"\n\n def testBijectorVector(self):\n iterated_sigmoid = tfb.IteratedSigmoidCentered()\n self.assertStartsWith(iterated_sigmoid.name, \"iterated_sigmoid\")\n x = self.dtype([[0., 0., 0.], -np.log([1 / 3., 1 / 2., 1.])])\n y = self.dtype([[0.25, 0.25, 0.25, 0.25], [0.5, 0.25, 0.125, 0.125]])\n self.assertAllClose(y, self.evaluate(iterated_sigmoid.forward(x)))\n self.assertAllClose(x, self.evaluate(iterated_sigmoid.inverse(y)))\n self.assertAllClose(\n -np.sum(np.log(y), axis=1),\n self.evaluate(\n iterated_sigmoid.inverse_log_det_jacobian(y, event_ndims=1)),\n atol=0.,\n rtol=1e-7)\n self.assertAllClose(\n self.evaluate(\n -iterated_sigmoid.inverse_log_det_jacobian(y, event_ndims=1)),\n self.evaluate(\n iterated_sigmoid.forward_log_det_jacobian(x, event_ndims=1)),\n atol=0.,\n rtol=1e-7)\n\n def testBijectorUnknownShape(self):\n iterated_sigmoid = tfb.IteratedSigmoidCentered()\n self.assertStartsWith(iterated_sigmoid.name, \"iterated_sigmoid\")\n x_ = self.dtype([[0., 0., 0.], -np.log([1 / 3., 1 / 2., 1.])])\n y_ = self.dtype([[0.25, 0.25, 0.25, 0.25], [0.5, 0.25, 0.125, 0.125]])\n x = tf1.placeholder_with_default(x_, shape=[2, None])\n y = tf1.placeholder_with_default(y_, shape=[2, None])\n self.assertAllClose(y_, self.evaluate(iterated_sigmoid.forward(x)))\n self.assertAllClose(x_, self.evaluate(iterated_sigmoid.inverse(y)))\n self.assertAllClose(\n -np.sum(np.log(y_), axis=1),\n self.evaluate(\n iterated_sigmoid.inverse_log_det_jacobian(y, event_ndims=1)),\n atol=0.,\n rtol=1e-7)\n self.assertAllClose(\n -self.evaluate(\n iterated_sigmoid.inverse_log_det_jacobian(y, event_ndims=1)),\n self.evaluate(\n iterated_sigmoid.forward_log_det_jacobian(x, event_ndims=1)),\n atol=0.,\n rtol=1e-7)\n\n def testShapeGetters(self):\n x = tf.TensorShape([4])\n y = tf.TensorShape([5])\n bijector = tfb.IteratedSigmoidCentered(validate_args=True)\n self.assertAllEqual(y, bijector.forward_event_shape(x))\n self.assertAllEqual(\n tensorshape_util.as_list(y),\n self.evaluate(\n bijector.forward_event_shape_tensor(tensorshape_util.as_list(x))))\n self.assertAllEqual(x, bijector.inverse_event_shape(y))\n self.assertAllEqual(\n tensorshape_util.as_list(x),\n self.evaluate(\n bijector.inverse_event_shape_tensor(tensorshape_util.as_list(y))))\n\n def testBijectiveAndFinite(self):\n iterated_sigmoid = tfb.IteratedSigmoidCentered()\n\n # Grid of points in [-30, 30] x [-30, 30].\n x = np.mgrid[-30:30:0.5, -30:30:0.5].reshape(2, -1).T # pylint: disable=invalid-slice-index\n # Make y values on the simplex with a wide range.\n y_0 = np.ones(x.shape[0], dtype=self.dtype)\n y_1 = self.dtype(1e-3 * np.random.rand(x.shape[0]))\n y_2 = self.dtype(1e1 * np.random.rand(x.shape[0]))\n y = np.array([y_0, y_1, y_2])\n y /= y.sum(axis=0)\n y = y.T\n bijector_test_util.assert_bijective_and_finite(\n iterated_sigmoid, x, y, eval_func=self.evaluate, event_ndims=1)\n\n def testJacobianConsistent(self):\n with tf.GradientTape(persistent=True) as g:\n x = tf.constant((60 * np.random.rand(10) - 30).reshape(5, 2))\n g.watch(x)\n bijector = tfb.IteratedSigmoidCentered()\n y = bijector.forward(x)\n jacobian_matrix = g.batch_jacobian(y, x, experimental_use_pfor=False)\n # In our case, y[-1] is determined by all the other y, so we can drop it\n # for the jacobian calculation.\n jacobian_matrix = jacobian_matrix[..., :-1, :]\n self.assertAllClose(\n tf.linalg.slogdet(jacobian_matrix)[1],\n bijector.forward_log_det_jacobian(x, event_ndims=1),\n atol=0.,\n rtol=1e-7)\n\n\nclass IteratedSigmoidCenteredBijectorTestFloat32(\n tf.test.TestCase,\n _IteratedSigmoidCenteredBijectorTest):\n dtype = np.float32\n\n\nclass IteratedSigmoidCenteredBijectorTestFloat64(\n tf.test.TestCase,\n _IteratedSigmoidCenteredBijectorTest):\n dtype = np.float64\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The OneHotCategorical distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.distributions import kullback_leibler\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import reparameterization\nfrom tensorflow_probability.python.internal import tensorshape_util\n\n\nclass OneHotCategorical(distribution.Distribution):\n \"\"\"OneHotCategorical distribution.\n\n The categorical distribution is parameterized by the log-probabilities\n of a set of classes. The difference between OneHotCategorical and Categorical\n distributions is that OneHotCategorical is a discrete distribution over\n one-hot bit vectors whereas Categorical is a discrete distribution over\n positive integers. OneHotCategorical is equivalent to Categorical except\n Categorical has event_dim=() while OneHotCategorical has event_dim=K, where\n K is the number of classes.\n\n This class provides methods to create indexed batches of OneHotCategorical\n distributions. If the provided `logits` or `probs` is rank 2 or higher, for\n every fixed set of leading dimensions, the last dimension represents one\n single OneHotCategorical distribution. When calling distribution\n functions (e.g. `dist.prob(x)`), `logits` and `x` are broadcast to the\n same shape (if possible). In all cases, the last dimension of `logits,x`\n represents single OneHotCategorical distributions.\n\n #### Examples\n\n Creates a 3-class distribution, with the 2nd class, the most likely to be\n drawn from.\n\n ```python\n p = [0.1, 0.5, 0.4]\n dist = OneHotCategorical(probs=p)\n ```\n\n Creates a 3-class distribution, with the 2nd class the most likely to be\n drawn from, using logits.\n\n ```python\n logits = [-2, 2, 0]\n dist = OneHotCategorical(logits=logits)\n ```\n\n Creates a 3-class distribution, with the 3rd class is most likely to be drawn.\n\n ```python\n # counts is a scalar.\n p = [0.1, 0.4, 0.5]\n dist = OneHotCategorical(probs=p)\n dist.prob([0,1,0]) # Shape []\n\n # p will be broadcast to [[0.1, 0.4, 0.5], [0.1, 0.4, 0.5]] to match.\n samples = [[0,1,0], [1,0,0]]\n dist.prob(samples) # Shape [2]\n ```\n\n \"\"\"\n\n def __init__(self,\n logits=None,\n probs=None,\n dtype=tf.int32,\n validate_args=False,\n allow_nan_stats=True,\n name='OneHotCategorical'):\n \"\"\"Initialize OneHotCategorical distributions using class log-probabilities.\n\n Args:\n logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities of a\n set of Categorical distributions. The first `N - 1` dimensions index\n into a batch of independent distributions and the last dimension\n represents a vector of logits for each class. Only one of `logits` or\n `probs` should be passed in.\n probs: An N-D `Tensor`, `N >= 1`, representing the probabilities of a set\n of Categorical distributions. The first `N - 1` dimensions index into a\n batch of independent distributions and the last dimension represents a\n vector of probabilities for each class. Only one of `logits` or `probs`\n should be passed in.\n dtype: The type of the event samples (default: int32).\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value \"`NaN`\" to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n self._logits, self._probs = distribution_util.get_logits_and_probs(\n name=name, logits=logits, probs=probs, validate_args=validate_args,\n multidimensional=True)\n\n logits_shape_static = tensorshape_util.with_rank_at_least(\n self._logits.shape, 1)\n if tensorshape_util.rank(logits_shape_static) is not None:\n self._batch_rank = tf.convert_to_tensor(\n tensorshape_util.rank(logits_shape_static) - 1,\n dtype=tf.int32,\n name='batch_rank')\n else:\n with tf.name_scope('batch_rank'):\n self._batch_rank = tf.rank(self._logits) - 1\n\n with tf.name_scope('event_size'):\n self._event_size = tf.shape(self._logits)[-1]\n\n super(OneHotCategorical, self).__init__(\n dtype=dtype,\n reparameterization_type=reparameterization.NOT_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n graph_parents=[self._logits, self._probs],\n name=name)\n\n @classmethod\n def _params_event_ndims(cls):\n return dict(logits=1, probs=1)\n\n @property\n def event_size(self):\n \"\"\"Scalar `int32` tensor: the number of classes.\"\"\"\n return self._event_size\n\n @property\n def logits(self):\n \"\"\"Input argument `logits`.\"\"\"\n return self._logits\n\n @property\n def probs(self):\n \"\"\"Input argument `probs`.\"\"\"\n return self._probs\n\n def _batch_shape_tensor(self):\n return tf.shape(self.logits)[:-1]\n\n def _batch_shape(self):\n return self.logits.shape[:-1]\n\n def _event_shape_tensor(self):\n return tf.shape(self.logits)[-1:]\n\n def _event_shape(self):\n return tensorshape_util.with_rank_at_least(self.logits.shape, 1)[-1:]\n\n def _sample_n(self, n, seed=None):\n sample_shape = tf.concat([[n], tf.shape(self.logits)], 0)\n logits = self.logits\n if tensorshape_util.rank(logits.shape) == 2:\n logits_2d = logits\n else:\n logits_2d = tf.reshape(logits, [-1, self.event_size])\n samples = tf.random.categorical(logits_2d, n, seed=seed)\n samples = tf.transpose(a=samples)\n samples = tf.one_hot(samples, self.event_size, dtype=self.dtype)\n ret = tf.reshape(samples, sample_shape)\n return ret\n\n def _log_prob(self, x):\n x = tf.cast(x, self.logits.dtype)\n x = self._assert_valid_sample(x)\n # broadcast logits or x if need be.\n logits = self.logits\n if (not tensorshape_util.is_fully_defined(x.shape) or\n not tensorshape_util.is_fully_defined(logits.shape) or\n x.shape != logits.shape):\n logits = tf.ones_like(x, dtype=logits.dtype) * logits\n x = tf.ones_like(logits, dtype=x.dtype) * x\n\n logits_shape = tf.shape(tf.reduce_sum(logits, axis=-1))\n logits_2d = tf.reshape(logits, [-1, self.event_size])\n x_2d = tf.reshape(x, [-1, self.event_size])\n ret = -tf.nn.softmax_cross_entropy_with_logits(\n labels=tf.stop_gradient(x_2d),\n logits=logits_2d)\n # Reshape back to user-supplied batch and sample dims prior to 2D reshape.\n ret = tf.reshape(ret, logits_shape)\n return ret\n\n def _entropy(self):\n return -tf.reduce_sum(\n tf.math.log_softmax(self.logits) * self.probs, axis=-1)\n\n def _mean(self):\n return self.probs\n\n def _mode(self):\n ret = tf.argmax(self.logits, axis=self._batch_rank)\n ret = tf.one_hot(ret, self.event_size, dtype=self.dtype)\n tensorshape_util.set_shape(ret, self.logits.shape)\n return ret\n\n def _covariance(self):\n p = self.probs\n ret = -tf.matmul(p[..., None], p[..., None, :])\n return tf.linalg.set_diag(ret, self._variance())\n\n def _variance(self):\n return self.probs * (1. - self.probs)\n\n def logits_parameter(self, name=None):\n \"\"\"Logits vec computed from non-`None` input arg (`probs` or `logits`).\"\"\"\n with self._name_and_control_scope(name or 'logits_parameter'):\n if self.logits is None:\n return tf.math.log(self.probs)\n return tf.identity(self.logits)\n\n def probs_parameter(self, name=None):\n \"\"\"Probs vec computed from non-`None` input arg (`probs` or `logits`).\"\"\"\n with self._name_and_control_scope(name or 'probs_parameter'):\n if self.logits is None:\n return tf.identity(self.probs)\n return tf.math.softmax(self.logits)\n\n def _assert_valid_sample(self, x):\n if not self.validate_args:\n return x\n return distribution_util.with_dependencies([\n assert_util.assert_non_positive(x),\n assert_util.assert_near(\n tf.zeros([], dtype=self.logits.dtype),\n tf.reduce_logsumexp(x, axis=[-1])),\n ], x)\n\n\n@kullback_leibler.RegisterKL(OneHotCategorical, OneHotCategorical)\ndef _kl_categorical_categorical(a, b, name=None):\n \"\"\"Calculate the batched KL divergence KL(a || b) with a, b OneHotCategorical.\n\n Args:\n a: instance of a OneHotCategorical distribution object.\n b: instance of a OneHotCategorical distribution object.\n name: Python `str` name to use for created operations.\n Default value: `None` (i.e., `'kl_categorical_categorical'`).\n\n Returns:\n Batchwise KL(a || b)\n \"\"\"\n with tf.name_scope(name or 'kl_categorical_categorical'):\n # sum(p ln(p / q))\n a_logits = a.logits_parameter()\n b_logits = b.logits_parameter()\n return tf.reduce_sum(\n (tf.math.softmax(a_logits) *\n (tf.math.log_softmax(a_logits) - tf.math.log_softmax(b_logits))),\n axis=-1)\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for OneHotCategorical distribution.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\n\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow_probability.python.internal import tensorshape_util\nfrom tensorflow_probability.python.internal import test_util as tfp_test_util\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\n\ntfd = tfp.distributions\n\n\ndef make_onehot_categorical(batch_shape, num_classes, dtype=tf.int32):\n logits = tf.random.uniform(\n list(batch_shape) + [num_classes], -10, 10, dtype=tf.float32) - 50.\n return tfd.OneHotCategorical(logits, dtype=dtype)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass OneHotCategoricalTest(tf.test.TestCase):\n\n def setUp(self):\n self._rng = np.random.RandomState(42)\n\n def testP(self):\n p = [0.2, 0.8]\n dist = tfd.OneHotCategorical(probs=p)\n self.assertAllClose(p, self.evaluate(dist.probs))\n self.assertAllEqual([2], dist.logits.shape)\n\n def testLogits(self):\n p = np.array([0.2, 0.8], dtype=np.float32)\n logits = np.log(p) - 50.\n dist = tfd.OneHotCategorical(logits=logits)\n self.assertAllEqual([2], dist.probs.shape)\n self.assertAllEqual([2], dist.logits.shape)\n self.assertAllClose(self.evaluate(dist.probs), p)\n self.assertAllClose(self.evaluate(dist.logits), logits)\n\n def testShapes(self):\n for batch_shape in ([], [1], [2, 3, 4]):\n dist = make_onehot_categorical(batch_shape, 10)\n self.assertAllEqual(batch_shape,\n tensorshape_util.as_list(dist.batch_shape))\n self.assertAllEqual(batch_shape, self.evaluate(dist.batch_shape_tensor()))\n self.assertAllEqual([10], tensorshape_util.as_list(dist.event_shape))\n self.assertAllEqual([10], self.evaluate(dist.event_shape_tensor()))\n # event_shape is available as a constant because the shape is\n # known at graph build time.\n self.assertEqual(10, dist.event_shape)\n\n for batch_shape in ([], [1], [2, 3, 4]):\n dist = make_onehot_categorical(batch_shape, tf.constant(\n 10, dtype=tf.int32))\n self.assertAllEqual(\n len(batch_shape), tensorshape_util.rank(dist.batch_shape))\n self.assertAllEqual(batch_shape, self.evaluate(dist.batch_shape_tensor()))\n self.assertAllEqual([10], tensorshape_util.as_list(dist.event_shape))\n self.assertEqual(10, self.evaluate(dist.event_shape_tensor()))\n\n def testDtype(self):\n dist = make_onehot_categorical([], 5, dtype=tf.int32)\n self.assertEqual(dist.dtype, tf.int32)\n self.assertEqual(dist.dtype, dist.sample(5).dtype)\n self.assertEqual(dist.dtype, dist.mode().dtype)\n dist = make_onehot_categorical([], 5, dtype=tf.int64)\n self.assertEqual(dist.dtype, tf.int64)\n self.assertEqual(dist.dtype, dist.sample(5).dtype)\n self.assertEqual(dist.dtype, dist.mode().dtype)\n self.assertEqual(dist.probs.dtype, tf.float32)\n self.assertEqual(dist.logits.dtype, tf.float32)\n self.assertEqual(dist.logits.dtype, dist.entropy().dtype)\n self.assertEqual(dist.logits.dtype, dist.prob(\n np.array([1]+[0]*4, dtype=np.int64)).dtype)\n self.assertEqual(dist.logits.dtype, dist.log_prob(\n np.array([1]+[0]*4, dtype=np.int64)).dtype)\n\n def testUnknownShape(self):\n logits = tf1.placeholder_with_default(\n input=[[-1000.0, 1000.0], [1000.0, -1000.0]], shape=None)\n dist = tfd.OneHotCategorical(logits)\n sample = dist.sample()\n # Batch entry 0 will sample class 1, batch entry 1 will sample class 0.\n sample_value_batch = self.evaluate(sample)\n self.assertAllEqual([[0, 1], [1, 0]], sample_value_batch)\n\n def testEntropyNoBatch(self):\n logits = np.log([0.2, 0.8]) - 50.\n dist = tfd.OneHotCategorical(logits)\n self.assertAllClose(\n self.evaluate(dist.entropy()), -(0.2 * np.log(0.2) + 0.8 * np.log(0.8)))\n\n def testEntropyWithBatch(self):\n logits = np.log([[0.2, 0.8], [0.6, 0.4]]) - 50.\n dist = tfd.OneHotCategorical(logits)\n self.assertAllClose(\n self.evaluate(dist.entropy()), [\n -(0.2 * np.log(0.2) + 0.8 * np.log(0.8)),\n -(0.6 * np.log(0.6) + 0.4 * np.log(0.4))\n ])\n\n def testPmf(self):\n # check that probability of samples correspond to their class probabilities\n logits = self._rng.random_sample(size=(8, 2, 10))\n prob = np.exp(logits) / np.sum(np.exp(logits), axis=-1, keepdims=True)\n dist = tfd.OneHotCategorical(logits=logits)\n np_sample = self.evaluate(dist.sample())\n np_prob = self.evaluate(dist.prob(np_sample))\n expected_prob = prob[np_sample.astype(np.bool)]\n self.assertAllClose(expected_prob, np_prob.flatten())\n\n def testSample(self):\n probs = [[[0.2, 0.8], [0.4, 0.6]]]\n dist = tfd.OneHotCategorical(tf.math.log(probs) - 50.)\n n = 100\n samples = dist.sample(n, seed=tfp_test_util.test_seed())\n self.assertEqual(samples.dtype, tf.int32)\n sample_values = self.evaluate(samples)\n self.assertAllEqual([n, 1, 2, 2], sample_values.shape)\n self.assertFalse(np.any(sample_values < 0))\n self.assertFalse(np.any(sample_values > 1))\n\n def testSampleWithSampleShape(self):\n probs = [[[0.2, 0.8], [0.4, 0.6]]]\n dist = tfd.OneHotCategorical(tf.math.log(probs) - 50.)\n samples = dist.sample((100, 100), seed=tfp_test_util.test_seed())\n prob = dist.prob(samples)\n prob_val = self.evaluate(prob)\n self.assertAllClose(\n [0.2**2 + 0.8**2], [prob_val[:, :, :, 0].mean()], atol=1e-2)\n self.assertAllClose(\n [0.4**2 + 0.6**2], [prob_val[:, :, :, 1].mean()], atol=1e-2)\n\n def testCategoricalCategoricalKL(self):\n def np_softmax(logits):\n exp_logits = np.exp(logits)\n return exp_logits / exp_logits.sum(axis=-1, keepdims=True)\n\n for categories in [2, 10]:\n for batch_size in [1, 2]:\n p_logits = self._rng.random_sample((batch_size, categories))\n q_logits = self._rng.random_sample((batch_size, categories))\n p = tfd.OneHotCategorical(logits=p_logits)\n q = tfd.OneHotCategorical(logits=q_logits)\n prob_p = np_softmax(p_logits)\n prob_q = np_softmax(q_logits)\n kl_expected = np.sum(\n prob_p * (np.log(prob_p) - np.log(prob_q)), axis=-1)\n\n kl_actual = tfd.kl_divergence(p, q)\n kl_same = tfd.kl_divergence(p, p)\n x = p.sample(int(2e4), seed=tfp_test_util.test_seed())\n x = tf.cast(x, dtype=tf.float32)\n # Compute empirical KL(p||q).\n kl_sample = tf.reduce_mean(\n input_tensor=p.log_prob(x) - q.log_prob(x), axis=0)\n\n [kl_sample_, kl_actual_,\n kl_same_] = self.evaluate([kl_sample, kl_actual, kl_same])\n self.assertEqual(kl_actual.shape, (batch_size,))\n self.assertAllClose(kl_same_, np.zeros_like(kl_expected))\n self.assertAllClose(kl_actual_, kl_expected, atol=0., rtol=1e-6)\n self.assertAllClose(kl_sample_, kl_expected, atol=1e-2, rtol=0.)\n\n def testSampleUnbiasedNonScalarBatch(self):\n logits = self._rng.rand(4, 3, 2).astype(np.float32)\n dist = tfd.OneHotCategorical(logits=logits)\n n = int(3e3)\n x = dist.sample(n, seed=tfp_test_util.test_seed())\n x = tf.cast(x, dtype=tf.float32)\n sample_mean = tf.reduce_mean(input_tensor=x, axis=0)\n x_centered = tf.transpose(a=x - sample_mean, perm=[1, 2, 3, 0])\n sample_covariance = tf.matmul(x_centered, x_centered, adjoint_b=True) / n\n [\n sample_mean_,\n sample_covariance_,\n actual_mean_,\n actual_covariance_,\n ] = self.evaluate([\n sample_mean,\n sample_covariance,\n dist.mean(),\n dist.covariance(),\n ])\n self.assertAllEqual([4, 3, 2], sample_mean.shape)\n self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.07)\n self.assertAllEqual([4, 3, 2, 2], sample_covariance.shape)\n self.assertAllClose(\n actual_covariance_, sample_covariance_, atol=0., rtol=0.10)\n\n def testSampleUnbiasedScalarBatch(self):\n logits = self._rng.rand(3).astype(np.float32)\n dist = tfd.OneHotCategorical(logits=logits)\n n = int(1e4)\n x = dist.sample(n, seed=tfp_test_util.test_seed())\n x = tf.cast(x, dtype=tf.float32)\n sample_mean = tf.reduce_mean(input_tensor=x, axis=0) # elementwise mean\n x_centered = x - sample_mean\n sample_covariance = tf.matmul(x_centered, x_centered, adjoint_a=True) / n\n [\n sample_mean_,\n sample_covariance_,\n actual_mean_,\n actual_covariance_,\n ] = self.evaluate([\n sample_mean,\n sample_covariance,\n dist.probs,\n dist.covariance(),\n ])\n self.assertAllEqual([3], sample_mean.shape)\n self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.1)\n self.assertAllEqual([3, 3], sample_covariance.shape)\n self.assertAllClose(\n actual_covariance_, sample_covariance_, atol=0., rtol=0.1)\n\n def testParamTensorFromLogits(self):\n x = tf.constant([-1., 0.5, 1.])\n d = tfd.OneHotCategorical(logits=x, validate_args=True)\n self.assertAllClose(\n *self.evaluate([x, d.logits_parameter()]),\n atol=0, rtol=1e-4)\n self.assertAllClose(\n *self.evaluate([tf.math.softmax(x),\n d.probs_parameter()]),\n atol=0,\n rtol=1e-4)\n\n def testParamTensorFromProbs(self):\n x = tf.constant([0.1, 0.5, 0.4])\n d = tfd.OneHotCategorical(probs=x, validate_args=True)\n self.assertAllClose(\n *self.evaluate([tf.math.log(x), d.logits_parameter()]),\n atol=0, rtol=1e-4)\n self.assertAllClose(\n *self.evaluate([x, d.probs_parameter()]),\n atol=0, rtol=1e-4)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"TensorFlow (graph) backend for auto-batching VM.\n\nImplements VM variable stack and registers backed by TF `Tensor`s.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport contextlib\n\n# Dependency imports\nimport six\nimport tensorflow as tf\n\nfrom tensorflow_probability.python.internal.auto_batching import instructions\nfrom tensorflow_probability.python.internal.auto_batching import xla\nfrom tensorflow.python.ops import control_flow_util # pylint: disable=g-direct-tensorflow-import\n\n__all__ = ['TensorFlowBackend']\n\n\[email protected]\ndef _control_flow_v2():\n enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2\n control_flow_util.ENABLE_CONTROL_FLOW_V2 = True\n try:\n yield\n finally:\n control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old\n\n\ndef _generalized_where(mask, value, old_value):\n \"\"\"Version of tf.compat.v1.where that broadcasts `value` to `old_value`.\"\"\"\n mask = tf.convert_to_tensor(value=mask, name='mask')\n mask.shape.assert_has_rank(1)\n value = tf.convert_to_tensor(value=value, name='value', dtype=old_value.dtype)\n if (not value.shape.is_fully_defined() or\n not old_value.shape.is_fully_defined() or\n value.shape != old_value.shape):\n # We force broadcast value w/ current, e.g. for program constants.\n if old_value.dtype == tf.bool:\n value |= tf.zeros_like(old_value)\n else:\n value += tf.zeros_like(old_value)\n new_value = tf.compat.v1.where(mask, value, old_value, name='new_value')\n # TODO(b/78655271): Do we need 'new_val.set_shape(old_value.shape)'?\n return new_value\n\n\nclass RegisterTensorFlowVariable(collections.namedtuple(\n 'RegisterTensorFlowVariable', ['value'])):\n \"\"\"A register-only variable.\n\n Efficiently stores and updates values whose lifetime does not cross function\n calls (and therefore does not require a stack). This is different from\n `TemporaryVariable` because it supports crossing basic block boundaries. A\n `RegisterTensorFlowVariable` therefore needs to store its content persistently\n across the `while_loop` in `execute`, and to handle divergence (and\n re-convergence) of logical threads.\n \"\"\"\n\n def update(self, value, mask):\n \"\"\"Update with `value` at `mask`, propagate other positions.\"\"\"\n if isinstance(self.value, tuple):\n # Support fast path for Eager mode initialization. Initializing with a\n # well-formed value is only necessary in graph mode, where the value\n # Tensor needs to be part of while-carried state. In Eager, however, it\n # does not, so the variable may just carry its type information as a\n # Python tuple.\n batch_size, dtype, event_shape = self.value\n value = tf.convert_to_tensor(value=value, dtype=dtype)\n new_value = tf.broadcast_to(value, shape=[batch_size] + list(event_shape))\n else:\n new_value = _generalized_where(mask, value, self.value)\n return type(self)(new_value)\n\n def push(self, mask):\n del mask\n return self\n\n def read(self):\n if isinstance(self.value, tuple):\n raise ValueError(\n 'Accessing uninitialized variable {}'.format(self._name()))\n return self.value\n\n def pop(self, mask):\n del mask\n return self\n\n def ensure_initialized(self):\n if isinstance(self.value, tuple):\n return self.update(False, None)\n return self\n\n\nclass Stack(collections.namedtuple('Stack', ['stack', 'stack_index'])):\n \"\"\"Immutable, internal container for a fixed size stack.\n\n The implementation is backed by a `Tensor` each for the stack and the\n (batched) stack pointer.\n\n As a namedtuple, it can be directly passed through TF's nest library for\n flattening and restructuring as an element passed to e.g. a TF while loop.\n \"\"\"\n\n def _safety_checks(self):\n \"\"\"Put in runtime asserts of stack bounds? Overridden by UnsafeStack.\"\"\"\n return True\n\n def pop(self, mask, name=None):\n \"\"\"Pops each indicated batch member, returns the new top of the stack.\n\n Does not mutate `self`.\n\n Args:\n mask: Boolean `Tensor` of shape `[batch_size]`. The stack frames at `True`\n indices of `mask` are regressed; the others are unchanged.\n name: Optional name for this op.\n\n Returns:\n new_stack: A new stack whose frames have been regressed where indicated\n by `mask`.\n read: The batch of values at the newly-current stack frame.\n \"\"\"\n with tf.compat.v2.name_scope(name or 'Stack.pop'):\n mask = tf.convert_to_tensor(value=mask, name='mask')\n new_stack_index = self.stack_index - tf.cast(mask, self.stack_index.dtype)\n if self._safety_checks():\n with tf.control_dependencies(\n [tf.compat.v1.assert_greater_equal(\n new_stack_index, tf.constant(0, new_stack_index.dtype))]):\n new_stack_index = tf.identity(new_stack_index)\n new_stack_index.set_shape(self.stack_index.shape)\n # self.stack: [max_stack_depth * batch_size, ...]\n # self.stack_index: [batch_size]\n # returned: [batch_size, ...]\n batch_size = (\n tf.compat.dimension_value(self.stack_index.shape[0]) or\n tf.shape(input=self.stack_index, out_type=self.stack_index.dtype)[0])\n # Note that stack depth and batch are in a single dimension, stack major.\n gather_indices = (\n new_stack_index * batch_size + tf.range(\n batch_size, dtype=new_stack_index.dtype))\n read_value = tf.gather(self.stack, gather_indices)\n read_value.set_shape(\n self.stack_index.shape.concatenate(self.stack.shape[1:]))\n return type(self)(self.stack, new_stack_index), read_value\n\n def push(self, value, mask, name=None):\n \"\"\"Pushes `value` onto the stack, advances frame of batch members in `mask`.\n\n In this impl, we update each thread's top-of-stack (regardless of `mask`) to\n the corresponding `value`, then advance the stack pointers of only those\n threads indicated by `mask`.\n\n Args:\n value: `Tensor` having the shape of a single batch of the variable.\n mask: Boolean `Tensor` of shape `[batch_size]`. Threads at `True` indices\n of `mask` have their stack frames advanced; the others remain.\n name: Optional name for this op.\n\n Returns:\n stack: Updated stack. Does not mutate `self`.\n asserted_value: A assertion-bound snapshot of the input `value`,\n assertions used to catch stack overflows.\n \"\"\"\n with tf.compat.v2.name_scope(name or 'Stack.push'):\n value = tf.convert_to_tensor(value=value, name='value')\n mask = tf.convert_to_tensor(value=mask, name='mask')\n # self.stack: [max_stack_depth * batch_size, ...]\n # self.stack_index: [batch_size]\n # value: [batch_size, ...]\n batch_size = (\n tf.compat.dimension_value(self.stack_index.shape[0]) or\n tf.shape(input=self.stack_index)[0])\n max_stack_depth = (tf.compat.dimension_value(self.stack.shape[0]) or\n tf.shape(input=self.stack)[0]) // batch_size\n max_stack_depth_tensor = tf.convert_to_tensor(value=max_stack_depth)\n tiled_value = tf.tile(\n input=value[tf.newaxis, ...],\n multiples=tf.concat(\n [[max_stack_depth_tensor],\n tf.ones(tf.rank(value), dtype=max_stack_depth_tensor.dtype)],\n axis=0))\n update_stack_mask = tf.one_hot(\n self.stack_index,\n depth=max_stack_depth,\n axis=0, # Stack depth x batch are both in outermost dim, stack major.\n on_value=True,\n off_value=False,\n dtype=tf.bool)\n new_stack = tf.compat.v1.where(\n tf.reshape(update_stack_mask, [-1]),\n tf.reshape(tiled_value, tf.shape(input=self.stack)), self.stack)\n new_stack.set_shape(self.stack.shape)\n new_stack_index = self.stack_index + tf.cast(mask, self.stack_index.dtype)\n new_stack_index.set_shape(self.stack_index.shape)\n if self._safety_checks():\n with tf.control_dependencies(\n [tf.compat.v1.assert_less(\n new_stack_index, tf.cast(\n max_stack_depth_tensor, new_stack_index.dtype))]):\n value = tf.identity(value)\n new_stack_index = tf.identity(new_stack_index)\n return type(self)(new_stack, new_stack_index), value\n\n\nclass UnsafeStack(Stack):\n \"\"\"Stack with runtime assertions disabled.\"\"\"\n\n def _safety_checks(self):\n return False\n\n\ndef _create_stack(max_stack_depth, value, safety_checks=True, name=None):\n \"\"\"Creates a new Stack instance.\n\n Args:\n max_stack_depth: A scalar int `Tensor` indicating the depth of stack we\n should pre-allocate.\n value: A batched `Tensor` giving the shape of a batch of values in a\n single stack frame.\n safety_checks: Python `bool` indicating whether we must use runtime\n assertions to detect stack overflow/underflow.\n name: Optional name for this op.\n\n Returns:\n stack: An initialized Stack object.\n \"\"\"\n with tf.compat.v2.name_scope(name or 'Stack.initialize'):\n value = tf.convert_to_tensor(value=value, name='value')\n batch_size = _get_leftmost_dim_size(value)\n # Home the stack index in the same memory space as the value. The\n # convention on GPU is that int32 are in host memory and int64 are in device\n # memory.\n stack_index_dtype = tf.int64 if value.dtype != tf.int32 else tf.int32\n stack_index = tf.zeros(\n [batch_size], dtype=stack_index_dtype, name='stack_index')\n stack = tf.zeros(\n shape=tf.concat([[max_stack_depth * batch_size],\n tf.shape(input=value)[1:]],\n axis=0),\n dtype=value.dtype,\n name='stack')\n stack_class = Stack if safety_checks else UnsafeStack\n return stack_class(stack, stack_index)\n\n\nclass FullTensorFlowVariable(\n collections.namedtuple('FullTensorFlowVariable', ['current', 'stack'])):\n \"\"\"An immutable register + stack backed by batched TF `Tensor`s.\n\n All state-changing methods return new Variable instances.\n\n The register is used to make reads from and writes to the top of the stack\n cheaper than they would be otherwise, i.e. save slice updates.\n\n As a namedtuple, the variable can be passed through the TF nest library as\n part of the structure handed to/returned from the body of a while_loop, or\n even a Session.run call.\n \"\"\"\n\n def _name(self):\n \"\"\"The variable's name. Overridden by `NamedVariable` in create_variable.\"\"\"\n return 'Variable'\n\n def read(self, name=None):\n \"\"\"Returns the batch of top values.\"\"\"\n with tf.compat.v2.name_scope(name or '{}.read'.format(self._name())):\n return tf.identity(self.current)\n\n def update(self, value, mask, name=None):\n \"\"\"Updates the variable at the indicated places.\n\n Args:\n value: Array of shape `[batch_size, ...]` of data to update with.\n Indices in the first dimension corresponding to `False`\n entries in `mask` are ignored.\n mask: Boolean array of shape `[batch_size]`. The values at `True`\n indices of `mask` are updated; the others remain.\n name: Optional name for this op.\n\n Returns:\n var: Updated variable. Does not mutate `self`.\n \"\"\"\n with tf.compat.v2.name_scope(name or '{}.update'.format(self._name())):\n new_value = _generalized_where(mask, value, self.current)\n return type(self)(new_value, self.stack)\n\n def push(self, mask, name=None):\n \"\"\"Pushes each indicated batch member, making room for a new write.\n\n The new top value is the same as the old top value (this is a\n \"duplicating push\").\n\n Args:\n mask: Boolean array of shape `[batch_size]`. The values at `True`\n indices of `mask` are updated; the others remain.\n name: Optional name for this op.\n\n Returns:\n var: Updated variable. Does not mutate `self`.\n \"\"\"\n with tf.compat.v2.name_scope(name or '{}.push'.format(self._name())):\n new_stack, asserted_value = self.stack.push(self.current, mask)\n return type(self)(asserted_value, new_stack)\n\n def pop(self, mask, name=None):\n \"\"\"Pops each indicated batch member, restoring a previous write.\n\n Args:\n mask: Boolean `Tensor` of shape `[batch_size]`. The values at `True`\n indices of `mask` are updated; the others are unchanged.\n name: Optional name for this op.\n\n Returns:\n var: Updated variable. Does not mutate `self`.\n \"\"\"\n with tf.compat.v2.name_scope(name or '{}.pop'.format(self._name())):\n mask = tf.convert_to_tensor(value=mask, name='mask')\n new_stack, stack_value = self.stack.pop(mask)\n new_value = tf.compat.v1.where(\n mask, stack_value, self.current, name='new_value')\n return type(self)(new_value, new_stack)\n\n\nclass TensorFlowBackend(object):\n \"\"\"Implements the TF backend ops for a PC auto-batching VM.\"\"\"\n\n def __init__(self,\n safety_checks=True,\n while_parallel_iterations=10,\n while_maximum_iterations=None,\n basic_block_xla_device=None):\n \"\"\"Construct a new backend instance.\n\n Args:\n safety_checks: Python `bool` indicating whether we should use runtime\n assertions to detect stack overflow/underflow.\n while_parallel_iterations: Python `int`, the argument to pass along to\n `tf.while_loop(..., parallel_iterations=while_parallel_iterations)`\n while_maximum_iterations: Python `int` or None, the argument to pass along\n to `tf.while_loop(..., maximum_iterations=while_maximum_iterations)`\n basic_block_xla_device: Python `str` indicating the device to which basic\n blocks should be targeted (i.e. 'CPU:0' or 'GPU:0'); if not None.\n \"\"\"\n self._safety_checks = safety_checks\n self._while_parallel_iterations = while_parallel_iterations\n self._while_maximum_iterations = while_maximum_iterations\n self._basic_block_xla_device = basic_block_xla_device\n\n @property\n def variable_class(self):\n return (instructions.NullVariable,\n instructions.TemporaryVariable,\n RegisterTensorFlowVariable,\n FullTensorFlowVariable)\n\n def type_of(self, t, dtype_hint=None):\n \"\"\"Returns the `instructions.Type` of `t`.\n\n Args:\n t: `tf.Tensor` or a Python or numpy constant.\n dtype_hint: dtype to prefer, if `t` is a constant.\n\n Returns:\n vm_type: `instructions.TensorType` describing `t`.\n \"\"\"\n if tf.executing_eagerly():\n new_t = tf.convert_to_tensor(value=t, dtype=dtype_hint)\n else:\n with tf.Graph().as_default(): # Use a scratch graph.\n new_t = tf.convert_to_tensor(value=t, dtype=dtype_hint)\n dtype = new_t.dtype.base_dtype.as_numpy_dtype\n shape = None if new_t.shape.ndims is None else tuple(new_t.shape.as_list())\n return instructions.TensorType(dtype, shape)\n\n def run_on_dummies(self, primitive_callable, input_types):\n \"\"\"Runs the given `primitive_callable` with dummy input.\n\n This is useful for examining the outputs for the purpose of type inference.\n\n Args:\n primitive_callable: A python callable.\n input_types: `list` of `instructions.Type` type of each argument to the\n callable. Note that the contained `TensorType` objects must match the\n dimensions with which the primitive is to be invoked at runtime, even\n though type inference conventionally does not store the batch dimension\n in the `TensorType`s.\n\n Returns:\n outputs: pattern of backend-specific objects whose types may be\n analyzed by the caller with `type_of`.\n \"\"\"\n with tf.compat.v2.name_scope('VM.run_on_dummies'):\n # We cannot use a temporary graph in eager mode because user code may\n # close over eager tensors, causing `RuntimeError: Attempting to capture\n # an EagerTensor without building a function.`\n # In graph mode, capturing user Tensors has also been a problem, because\n # TF doesn't like the inputs of an op being in different graphs.\n # Status quo is unfortunate because it involves running the computation\n # in the primop to determine its shape behavior, instead of just invoking\n # shape inference.\n # There may be a solution involving FuncGraph; see b/118896442.\n def mk_placeholder(vt):\n return tf.ones(vt.shape, dtype=vt.dtype)\n phs = [\n instructions.pattern_map(\n mk_placeholder, vtype.tensors, leaf_type=instructions.TensorType)\n for vtype in input_types]\n return primitive_callable(*phs)\n\n def merge_dtypes(self, dt1, dt2):\n \"\"\"Merges two dtypes, returning a compatible dtype.\n\n In practice, TF implementation asserts that the two dtypes are identical.\n\n Args:\n dt1: A numpy dtype, or None.\n dt2: A numpy dtype, or None.\n\n Returns:\n dtype: The common numpy dtype.\n\n Raises:\n ValueError: If dt1 and dt2 are not equal and both are non-`None`.\n \"\"\"\n if dt1 == dt2 or None in (dt1, dt2):\n return dt1 or dt2\n raise ValueError('Mismatched dtypes {} vs {}'.format(dt1, dt2))\n\n def merge_shapes(self, s1, s2):\n \"\"\"Merges two shapes, returning a broadcasted shape.\n\n Args:\n s1: A `list` of Python `int` or None.\n s2: A `list` of Python `int` or None.\n\n Returns:\n shape: A `list` of Python `int` or None.\n\n Raises:\n ValueError: If `s1` and `s2` are not broadcast compatible.\n \"\"\"\n new_shp = tf.broadcast_static_shape(\n tf.TensorShape(s1), tf.TensorShape(s2))\n return None if new_shp.ndims is None else tuple(new_shp.as_list())\n\n def assert_matching_dtype(self, expected_dtype, value, message=''):\n \"\"\"Asserts that the dtype of `value` matches `expected_dtype`.\n\n Args:\n expected_dtype: A numpy dtype\n value: `Tensor` or convertible.\n message: Optional diagnostic message.\n\n Raises:\n ValueError: If dtype does not match.\n \"\"\"\n with tf.compat.v2.name_scope('VM.assert_matching_dtype'):\n value = tf.convert_to_tensor(\n value=value, name='value', dtype=expected_dtype)\n if value.dtype.base_dtype.as_numpy_dtype != expected_dtype:\n raise ValueError('Mismatched dtype: expected {} found {}. {}'.format(\n expected_dtype, value.dtype.base_dtype.as_numpy_dtype, message))\n\n def batch_size(self, value, name=None):\n \"\"\"Returns the first (batch) dimension of `value`.\"\"\"\n with tf.compat.v2.name_scope(name or 'VM.batch_size'):\n value = tf.convert_to_tensor(value=value, name='value')\n return _get_leftmost_dim_size(value)\n\n def static_value(self, t):\n \"\"\"Gets the eager/immediate value of `t`, or `None` if `t` is a Tensor.\"\"\"\n if tf.executing_eagerly():\n return t.numpy()\n return None\n\n def fill(self, value, size, dtype, shape, name=None):\n \"\"\"Fill a fresh batched Tensor of the given shape and dtype with `value`.\n\n Args:\n value: Scalar to fill with.\n size: Scalar `int` `Tensor` specifying the number of VM threads.\n dtype: `tf.DType` of the zeros to be returned.\n shape: Rank 1 `int` `Tensor`, the per-thread value shape.\n name: Optional name for the op.\n\n Returns:\n result: `Tensor` of `dtype` `value`s with shape `[size, *shape]`\n \"\"\"\n with tf.compat.v2.name_scope(name or 'VM.fill'):\n size = tf.convert_to_tensor(value=size, name='size')\n shape = tf.convert_to_tensor(value=shape, name='shape', dtype=size.dtype)\n return tf.fill(tf.concat([[size], shape], axis=0),\n value=tf.cast(value, dtype=dtype))\n\n def create_variable(self, name, alloc, type_, max_stack_depth, batch_size):\n \"\"\"Returns an intialized Variable.\n\n Args:\n name: Name for the variable.\n alloc: `VariableAllocation` for the variable.\n type_: `instructions.TensorType` describing the sub-batch shape and dtype\n of the variable being created.\n max_stack_depth: Scalar `int` `Tensor`, the maximum stack depth allocated.\n batch_size: Scalar `int` `Tensor`, the number of parallel threads being\n executed.\n\n Returns:\n var: A new, initialized Variable object.\n \"\"\"\n if alloc is instructions.VariableAllocation.NULL:\n return instructions.NullVariable()\n elif alloc is instructions.VariableAllocation.TEMPORARY:\n return instructions.TemporaryVariable.empty()\n else:\n name = 'Variable' if name is None else 'VM.var_{}'.format(name)\n dtype, event_shape = type_\n\n with tf.compat.v2.name_scope('{}.initialize'.format(name)):\n if (alloc is instructions.VariableAllocation.REGISTER and\n tf.executing_eagerly()):\n # Don't need to construct the empty value in Eager mode, because there\n # is no tf.while_loop whose loop-carried state it would need to be.\n # This is a substantial optimization for stackless mode, because that\n # initializes variables on every function call, rather than just once.\n value = (batch_size, dtype, event_shape)\n else:\n value = self.fill(0, batch_size, dtype, event_shape)\n\n if alloc is instructions.VariableAllocation.REGISTER:\n klass = RegisterTensorFlowVariable\n extra = []\n else:\n klass = FullTensorFlowVariable\n extra = [_create_stack(max_stack_depth, value, self._safety_checks)]\n\n class NamedVariable(klass):\n \"\"\"Captures `name` to yield improved downstream TF op names.\"\"\"\n\n def _name(self):\n return name\n\n return NamedVariable(value, *extra)\n\n def full_mask(self, size, name=None):\n \"\"\"Returns an all-True mask `Tensor` with shape `[size]`.\"\"\"\n with tf.compat.v2.name_scope(name or 'VM.full_mask'):\n size = tf.convert_to_tensor(value=size, name='size')\n return tf.ones(size, dtype=tf.bool)\n\n def broadcast_to_shape_of(self, val, target, name=None):\n \"\"\"Broadcasts val to the shape of target.\n\n Attempts to match the dtype of `broadcast_val` to the dtype of `target`, if\n `val` is not a `Tensor` and `target` has a dtype.\n\n Args:\n val: The value to be broadcast. Must be broadcast-compatible with\n `target`.\n target: `Tensor` whose shape we will broadcast `val` to match.\n name: Optional name for the op.\n\n Returns:\n broadcast_val: A `Tensor` with shape matching `val + target`. Provided\n that `val`'s dimension sizes are all smaller or equal to `target`'s, the\n returned value will be the shape of `target`.\n \"\"\"\n # TODO(b/78594182): This is a compatibility shim, required because\n # `tf.compat.v1.where` does not support broadcasting of its value operands.\n with tf.compat.v2.name_scope(name or 'VM.broadcast_to_shape_of'):\n dtype = getattr(target, 'dtype', getattr(val, 'dtype', None))\n target = tf.convert_to_tensor(value=target, name='target', dtype=dtype)\n val = tf.convert_to_tensor(value=val, name='val', dtype=target.dtype)\n if val.dtype == tf.bool:\n return val | tf.zeros_like(target, dtype=val.dtype)\n return val + tf.zeros_like(target, dtype=val.dtype)\n\n def cond(self, pred, true_fn, false_fn, name=None):\n \"\"\"Implements a conditional operation for the backend.\n\n Args:\n pred: A boolean scalar `Tensor` indicating the condition.\n true_fn: A callable accepting and returning nests of `Tensor`s having\n the same structure as `state`, to be executed when `pred` is True.\n false_fn: A callable accepting and returning nests of `Tensor`s having\n the same structure as `state`, to be executed when `pred` is False.\n name: Optional name for the op.\n\n Returns:\n state: Output state, matching nest structure of input argument `state`.\n \"\"\"\n with tf.compat.v2.name_scope(name or 'VM.cond'):\n with _control_flow_v2():\n return tf.cond(pred=pred, true_fn=true_fn, false_fn=false_fn)\n\n def prepare_for_cond(self, state):\n \"\"\"Backend hook for preparing Tensors for `cond`.\n\n The TensorFlow backend uses this hook to apply `tf.convert_to_tensor` before\n entering the cond tree generated by `virtual_machine._staged_apply`. One\n could do this inside `cond`, but when this API element was defined there\n seemed to be a performance reason (for Eager mode) to do it once per cond\n tree rather than once per cond.\n\n Args:\n state: A state to be prepared for use in conditionals.\n\n Returns:\n state: The prepared state.\n \"\"\"\n if tf.executing_eagerly():\n # Eager doesn't need to pre-wrap the cond-carried state at all. Also, in\n # Eager, lazy initialization for register variables means that the state\n # may not always be correct to convert to a Tensor.\n return state\n with tf.compat.v2.name_scope('VM.prepare_for_cond'):\n state_flat = [tf.convert_to_tensor(value=x)\n for x in tf.compat.v2.nest.flatten(state)]\n return tf.compat.v2.nest.pack_sequence_as(state, state_flat)\n\n def where(self, condition, x, y, name=None):\n \"\"\"Implements a where selector for the TF backend.\n\n Attempts to match the dtypes of the value operands, if they are not yet both\n `Tensor`s.\n\n Args:\n condition: A boolean `Tensor`, either a vector having length\n `(x + y).shape[0]` or matching the full shape of `x + y`.\n x: `Tensor` of values to take when `condition` is `True`. Shape must match\n that of `y`.\n y: `Tensor` of values to take when `condition` is `False`. Shape must\n match that of `x`.\n name: Optional name for the op.\n\n Returns:\n masked: A broadcast-shaped `Tensor` where elements corresponding to `True`\n values of `condition` come from `x`, and others come from `y`.\n \"\"\"\n with tf.compat.v2.name_scope(name or 'VM.where'):\n condition = tf.convert_to_tensor(value=condition, name='condition')\n dtype = getattr(x, 'dtype', getattr(y, 'dtype', None))\n x = tf.convert_to_tensor(value=x, name='x', dtype=dtype)\n y = tf.convert_to_tensor(value=y, name='y', dtype=x.dtype)\n return tf.compat.v1.where(condition, x, y)\n\n def reduce_min(self, t, name=None):\n \"\"\"Implements reduce_min for TF backend.\"\"\"\n with tf.compat.v2.name_scope('VM.reduce_min'):\n return tf.reduce_min(input_tensor=t, name=name)\n\n def while_loop(self, cond, body, loop_vars, name=None):\n \"\"\"Implements while loops for TF backend.\"\"\"\n with tf.compat.v2.name_scope('VM.while_loop'):\n if tf.executing_eagerly():\n # The reg. variable optimization (see create_variable) may change loop\n # structure across iterations, which now triggers an exception for eager\n # tf.while_loop.\n while cond(*loop_vars):\n loop_vars = body(*loop_vars)\n return loop_vars\n with _control_flow_v2():\n return tf.while_loop(\n cond=cond,\n body=body,\n loop_vars=loop_vars,\n back_prop=False,\n name=name,\n parallel_iterations=self._while_parallel_iterations,\n maximum_iterations=self._while_maximum_iterations)\n\n def switch_case(self, branch_selector, branch_callables, name=None):\n \"\"\"Implements a switch (branch_selector) { case ... } construct.\"\"\"\n with tf.compat.v2.name_scope('VM.switch_case'):\n with _control_flow_v2():\n return tf.switch_case(branch_selector, branch_callables, name=name)\n\n def equal(self, t1, t2, name=None):\n \"\"\"Implements equality comparison for TF backend.\"\"\"\n with tf.compat.v2.name_scope('VM.equal'):\n return tf.equal(t1, t2, name=name)\n\n def not_equal(self, t1, t2, name=None):\n \"\"\"Implements inequality comparison for TF backend.\"\"\"\n with tf.compat.v2.name_scope('VM.not_equal'):\n return tf.not_equal(t1, t2, name=name)\n\n def any(self, t, name=None):\n with tf.compat.v2.name_scope(name or 'VM.any'):\n return tf.reduce_any(input_tensor=t)\n\n def wrap_straightline_callable(self, f):\n \"\"\"Method exists solely to be stubbed, i.e. for defun + XLA compile.\"\"\"\n if self._basic_block_xla_device is None:\n return f\n\n @tf.function\n def _f(*args):\n with tf.device(self._basic_block_xla_device):\n return xla.compile_nested_output(\n f, tf.xla.experimental.compile)(*args)\n\n def _ensure_regvars_initialized(t):\n if isinstance(t, RegisterTensorFlowVariable):\n return t.ensure_initialized()\n return t\n\n def _init_f(env_dict, *args):\n \"\"\"A RegisterTensorFlowVariable-initializing wrapper of `_f`.\"\"\"\n # We ensure RegisterTensorFlowVariable instances have a Tensor value when\n # using XLA and/or defun. Otherwise, we will trigger cache misses on the\n # tfe.defun or get issues around \"Cannot convert object of type [dtype] to\n # a Tensor\" (XLA). This corresponds with the optimization in\n # `create_variable` conditioned on Eager & VariableAllocation.REGISTER.\n env_dict = dict({k: instructions.pattern_map(\n _ensure_regvars_initialized, v, leaf_type=RegisterTensorFlowVariable)\n for k, v in six.iteritems(env_dict)})\n return _f(env_dict, *args)\n\n return _init_f\n\n\ndef _get_leftmost_dim_size(x, name=None):\n \"\"\"Returns the size of the left most dimension, statically if possible.\"\"\"\n with tf.compat.v2.name_scope(name or 'get_leftmost_dim_size'):\n x = tf.convert_to_tensor(value=x, name='x')\n if x.shape.ndims is None:\n # If tf.shape(x) is scalar, the [:1] will produce the empty list, whose\n # reduce_prod is 1 as desired. Otherwise, the [:1] will select the first\n # dimension, and reduce_prod will not alter it.\n return tf.reduce_prod(input_tensor=tf.shape(input=x)[:1])\n if x.shape.ndims == 0:\n return 1\n leftmost = tf.compat.dimension_value(x.shape[0])\n return leftmost if leftmost is not None else tf.shape(input=x)[0]\n" ]
[ [ "tensorflow.compat.v2.convert_to_tensor", "tensorflow.compat.v2.expand_dims", "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.squeeze" ], [ "numpy.maximum", "numpy.abs", "numpy.reshape", "numpy.max", "numpy.exp" ], [ "numpy.random.randn", "tensorflow.test.main", "numpy.ones" ], [ "tensorflow.convert_to_tensor", "tensorflow.compat.v1.where", "tensorflow.norm", "tensorflow.math.is_finite", "tensorflow.not_equal", "tensorflow.concat", "tensorflow.shape", "tensorflow.zeros", "tensorflow.compat.dimension_value", "tensorflow.eye", "tensorflow.linalg.lstsq", "tensorflow.zeros_like", "tensorflow.linalg.matvec", "tensorflow.logical_not", "tensorflow.sqrt", "numpy.array", "tensorflow.reduce_all", "tensorflow.compat.v1.name_scope" ], [ "numpy.diag", "numpy.expand_dims", "tensorflow.compat.v2.executing_eagerly", "tensorflow.compat.v2.test.main", "numpy.linspace", "tensorflow.compat.v2.get_static_value", "numpy.reshape", "numpy.eye", "numpy.squeeze", "numpy.stack", "numpy.ones", "numpy.float64", "numpy.random.uniform", "numpy.array", "numpy.meshgrid", "tensorflow.compat.v1.placeholder_with_default" ], [ "numpy.log", "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.GradientTape", "numpy.ones", "tensorflow.compat.v2.linalg.slogdet", "numpy.random.rand", "tensorflow.compat.v2.TensorShape", "numpy.array", "tensorflow.compat.v1.placeholder_with_default" ], [ "tensorflow.compat.v2.transpose", "tensorflow.compat.v2.rank", "tensorflow.compat.v2.shape", "tensorflow.compat.v2.math.softmax", "tensorflow.compat.v2.identity", "tensorflow.compat.v2.math.log_softmax", "tensorflow.compat.v2.reduce_sum", "tensorflow.compat.v2.argmax", "tensorflow.compat.v2.name_scope", "tensorflow.compat.v2.random.categorical", "tensorflow.compat.v2.reshape", "tensorflow.compat.v2.zeros", "tensorflow.compat.v2.math.log", "tensorflow.compat.v2.one_hot", "tensorflow.compat.v2.matmul", "tensorflow.compat.v2.reduce_logsumexp", "tensorflow.compat.v2.ones_like", "tensorflow.compat.v2.cast", "tensorflow.compat.v2.stop_gradient" ], [ "numpy.log", "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.transpose", "tensorflow.compat.v2.constant", "tensorflow.compat.v2.reduce_mean", "tensorflow.compat.v2.cast", "tensorflow.compat.v2.math.softmax", "numpy.zeros_like", "numpy.any", "tensorflow.compat.v2.matmul", "tensorflow.compat.v2.math.log", "numpy.exp", "numpy.array", "tensorflow.compat.v1.placeholder_with_default", "numpy.random.RandomState" ], [ "tensorflow.convert_to_tensor", "tensorflow.cond", "tensorflow.device", "tensorflow.concat", "tensorflow.zeros", "tensorflow.equal", "tensorflow.cast", "tensorflow.rank", "tensorflow.Graph", "tensorflow.while_loop", "tensorflow.compat.v2.name_scope", "tensorflow.gather", "tensorflow.compat.v1.where", "tensorflow.TensorShape", "tensorflow.executing_eagerly", "tensorflow.switch_case", "tensorflow.shape", "tensorflow.reduce_any", "tensorflow.identity", "tensorflow.compat.dimension_value", "tensorflow.zeros_like", "tensorflow.compat.v2.nest.pack_sequence_as", "tensorflow.one_hot", "tensorflow.compat.v2.nest.flatten", "tensorflow.not_equal", "tensorflow.constant", "tensorflow.range", "tensorflow.reshape", "tensorflow.ones", "tensorflow.reduce_min" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] } ]
PeterDeWeirdt/sgrna_modeler
[ "5c6cf0330cda35acf67d7e5f58d0b2ae29bf026e" ]
[ "sgrna_modeler/models.py" ]
[ "from sgrna_modeler import features as fe\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import ensemble\nfrom tensorflow import keras as k\nimport pandas as pd\nimport os\nfrom joblib import load\nimport sgrna_modeler.enzymes as en\n\ndef curr_path():\n return os.path.dirname(__file__)\n\ndef get_deepcpf1_weights():\n path = os.path.join(curr_path(), 'data/saved_models/Seq_deepCpf1_weights_tf.h5')\n return path\n\ndef get_enpam_gb():\n path = os.path.join(curr_path(), 'data/saved_models/enPAM_GB.joblib')\n return path\n\ndef build_kim2018(input_shape=(34, 4)):\n \"\"\"\n Build a convolutional neural network\n\n From:\n Kim, Hui Kwon, et al. \"Deep learning improves prediction of CRISPR–Cpf1 guide RNA activity.\" \\\n Nature biotechnology 36.3 (2018): 239.\n\n :param input_shape: guide length by nts (4)\n :type input_shape: tuple\n :return: CNN architecture\n :rtype: keras Model object\n \"\"\"\n \"\"\"Build a Convolutional neural network model from Kim 2018\n\n Parmeters\n ---------\n input_shape: tuple, optional (default (34, 4)\n shape of the first layer of the model\n\n Returns\n -------\n model: keras model object\n \"\"\"\n Input_SEQ = k.layers.Input(shape=input_shape)\n C1 = k.layers.Convolution1D(80, 5, activation='relu')(Input_SEQ)\n P1 = k.layers.AveragePooling1D(2)(C1)\n F = k.layers.Flatten()(P1)\n DO1 = k.layers.Dropout(0.3)(F)\n D1 = k.layers.Dense(80, activation='relu')(DO1)\n DO2 = k.layers.Dropout(0.3)(D1)\n D2 = k.layers.Dense(40, activation='relu')(DO2)\n DO3 = k.layers.Dropout(0.3)(D2)\n D3 = k.layers.Dense(40, activation='relu')(DO3)\n DO4 = k.layers.Dropout(0.3)(D3)\n Output = k.layers.Dense(1, activation='linear')(DO4)\n model = k.models.Model(inputs = Input_SEQ, outputs = Output)\n return model\n\nclass KerasSgrnaModel(object):\n \"\"\"This class is for creating, training, and predicting guide activity with a Keras model\n\n :param random_state: set random state in train/test split for reproducibility\n :type random_stat: int\n :param val_frac: amount of data to use for early stopping\n :type val_frac: float\n :param base_arc: base architecture to build neural network, defaults to build_kim2018\n :type base_arc: function, which takes an input shape and returns a keras model\n\n :Example:\n\n >>> from sgrna_modeler import datasets as da\n >>> from sgrna_modeler import models as sg\n >>> train_data = da.load_kim_2018_train()\n >>> train_model = sg.KerasSgrnaModel()\n >>> train_model.fit(train_data)\n >>> test_data = da.load_kim_2018_test()\n >>> test_predictions = train_model.predict(test_data)\n \"\"\"\n def __init__(self, random_state = 7, val_frac = 0.1, base_arc = None):\n \"\"\"Constructor\n \"\"\"\n self.base_name = 'Keras_CNN'\n self.val_frac = val_frac\n self.random_state = random_state\n if base_arc is None:\n self.base_arc = build_kim2018\n else:\n self.base_arc = base_arc\n self.train_dataset = None\n self.enzyme = None\n self.model = None\n self.model_history = None\n self.train_name = None\n\n def load_weights(self, weights, enzyme, name):\n \"\"\"Load previously trained weights\n\n :param enzyme: cas9 or cas12a\n :type enyme: dict\n :param weights: filepath to weights\n :type weights: str\n :param name: name of the model\n :type name:str\n \"\"\"\n if weights is None:\n weights = get_deepcpf1_weights()\n self.train_name = 'Seq-DeepCpf1'\n self.enzyme = en.cas12a\n else:\n self.train_name = name\n self.enzyme = enzyme\n model = self.base_arc(input_shape = (self.enzyme['context_length'],4))\n model.load_weights(weights)\n self.model = model\n return self\n\n def fit(self, train_dataset):\n \"\"\" Fit a model to the training data\n\n :param train_dataset: training data\n :type train_dataset: :class:`sgrna_modeler.datasets.ActivityData`\n :return: self\n \"\"\"\n self.train_dataset = train_dataset\n self.train_name = train_dataset.name\n self.enzyme = train_dataset.enzyme\n train_val_x, y = train_dataset.get_xy()\n encoded_train_val_x = fe.encode_seqs(train_val_x)\n train_x, val_x, train_y, val_y = train_test_split(encoded_train_val_x, y, test_size=self.val_frac,\n random_state=self.random_state)\n model = self.base_arc(input_shape = (self.enzyme['context_length'],4))\n model.compile(optimizer='RMSprop',loss='mse',metrics=['mae'])\n self.model_history = model.fit(train_x, train_y, epochs = 200,\n validation_data = (val_x, val_y),\n callbacks = [k.callbacks.EarlyStopping(patience=20,restore_best_weights=True),\n k.callbacks.History()],\n verbose = 0)\n self.model = model\n return self\n\n def predict(self, test_dataset):\n \"\"\"Predict activity of test data\n\n :param test_dataset: testing data\n :type test_dataset: :class:`sgrna_modeler.datasets.ActivityData`\n :return: dataframe of predictions and other meta information\n :rtype: pandas dataframe\n \"\"\"\n x, y = test_dataset.get_xy()\n encoded_x = fe.encode_seqs(x)\n predictions = self.model.predict(encoded_x)\n out_data = pd.DataFrame({'kmer': x, 'y': y})\n if test_dataset.group_column:\n out_data['group'] = test_dataset.data[test_dataset.group_column]\n else:\n out_data['group'] = ''\n out_data['prediction'] = predictions\n out_data['model'] = self.base_name\n out_data['training_data'] = self.train_name\n out_data['test_data'] = test_dataset.name\n return out_data\n\n def predict_seqs(self, seqs):\n \"\"\" Predict from sequences\n\n :param seqs: sequences to predict\n :return: numeric vector of predcitions\n \"\"\"\n featurized_x = fe.encode_seqs(seqs)\n predictions = self.model.predict(featurized_x).flatten()\n return predictions\n\nclass SklearnSgrnaModel(object):\n \"\"\"scikit-learn gradient boosting for modeling sgRNA activity\n\n :param random_state: set random state in train/test split for reproducibility\n :type random_state: int\n :param val_frac: amount of data to use for early stopping\n :type val_frac: float\n :param model: base model\n :type model: sklearn GradientBoostingRegressor\n :param features: features to model\n :type features: list\n\n :Example:\n >>> from sgrna_modeler import datasets as da\n >>> from sgrna_modeler import models as sg\n >>> train_model = sg.SklearnSgrnaModel()\n >>> rs2_data = da.load_doench_2016()\n >>> train_model.fit(rs2_data)\n \"\"\"\n def __init__(self, random_state = 7, val_frac = 0.1, model = None, features = None):\n \"\"\"Constructor\n \"\"\"\n self.base_name = 'Sklearn_GB'\n self.val_frac = val_frac\n self.random_state = random_state\n if model is None:\n # Gradient boosted model\n self.model = ensemble.GradientBoostingRegressor(n_iter_no_change=20,\n validation_fraction = self.val_frac,\n random_state=self.random_state)\n else:\n self.model = model\n if features is None:\n # Default features for RuleSet2\n self.features = ['Pos. Ind. 1mer', 'Pos. Ind. 2mer', 'Pos. Dep. 1mer', 'Pos. Dep. 2mer', 'GC content', 'Tm']\n else:\n self.features = features\n self.enzyme = None\n self.train_dataset = None\n self.train_name = None\n\n def load_model(self, model, enzyme, name):\n \"\"\"Load previously trained model\n\n :param enzyme: cas9 or cas12a\n :type enyme: dict\n :param model: filepath to trained model\n :type model: str (*.joblib)\n :param name: name of the model\n :type name:str\n \"\"\"\n self.enzyme = enzyme\n self.model = load(model)\n self.train_name = name\n return self\n\n def fit(self, train_dataset):\n \"\"\" Fit a model to the training data\n\n :param train_dataset: training data\n :type train_dataset: :class:`sgrna_modeler.datasets.ActivityData`\n :return: self\n \"\"\"\n self.train_name = train_dataset.name\n self.enzyme = train_dataset.enzyme\n train_val_x, y = train_dataset.get_xy()\n featurized_train_val_x = fe.featurize_guides(train_val_x, features=self.features,\n guide_start = self.enzyme['guide_start'],\n guide_length = self.enzyme['guide_length'])\n self.model.fit(featurized_train_val_x, y)\n return self\n\n def predict(self, test_dataset):\n \"\"\"Predict activity of test data\n\n :param test_dataset: testing data\n :type test_dataset: :class:`sgrna_modeler.datasets.ActivityData`\n :return: dataframe of predictions and other meta information\n :rtype: pandas dataframe\n \"\"\"\n x, y = test_dataset.get_xy()\n featurized_x = fe.featurize_guides(x, features=self.features,\n guide_start=test_dataset.enzyme['guide_start'],\n guide_length=test_dataset.enzyme['guide_length'])\n predictions = self.model.predict(featurized_x)\n out_data = pd.DataFrame({'kmer': x, 'y': y})\n if test_dataset.group_column:\n out_data['group'] = test_dataset.data[test_dataset.group_column]\n else:\n out_data['group'] = ''\n out_data['prediction'] = predictions\n out_data['model'] = self.base_name\n out_data['training_data'] = self.train_name\n out_data['test_data'] = test_dataset.name\n return out_data\n\n def predict_seqs(self, seqs):\n \"\"\" Predict from sequences\n\n :param seqs: sequences to predict\n :return: numeric vector of predcitions\n \"\"\"\n featurized_x = fe.featurize_guides(seqs, features=self.features,\n guide_start=self.enzyme['guide_start'],\n guide_length=self.enzyme['guide_length'])\n predictions = self.model.predict(featurized_x)\n return predictions\n\n" ]
[ [ "tensorflow.keras.layers.AveragePooling1D", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Dense", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "sklearn.ensemble.GradientBoostingRegressor", "tensorflow.keras.layers.Convolution1D", "tensorflow.keras.callbacks.History", "tensorflow.keras.layers.Dropout", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.Input" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
kagemeka/competitive-programming
[ "c70fe481bcd518f507b885fc9234691d8ce63171", "c70fe481bcd518f507b885fc9234691d8ce63171", "c70fe481bcd518f507b885fc9234691d8ce63171", "c70fe481bcd518f507b885fc9234691d8ce63171", "c70fe481bcd518f507b885fc9234691d8ce63171", "c70fe481bcd518f507b885fc9234691d8ce63171", "c70fe481bcd518f507b885fc9234691d8ce63171", "c70fe481bcd518f507b885fc9234691d8ce63171", "c70fe481bcd518f507b885fc9234691d8ce63171", "c70fe481bcd518f507b885fc9234691d8ce63171", "c70fe481bcd518f507b885fc9234691d8ce63171", "c70fe481bcd518f507b885fc9234691d8ce63171" ]
[ "src/atcoder/abc212/g/sol_8.py", "src/atcoder/abc016/d/sol_1.py", "src/atcoder/abc216/f/sol_3.py", "src/atcoder/dp/q/sol_4.py", "src/atcoder/abc226/e/sol_0.py", "src/atcoder/typical_algorithm/e/sol_6.py", "src/atcoder/abc215/d/sol_4.py", "src/atcoder/abc014/c/sol_1.py", "src/atcoder/abc218/e/sol_0.py", "src/atcoder/abc073/b/sol_0.py", "src/atcoder/typical_algorithm/e/sol_5.py", "src/atcoder/abc213/c/sol_0.py" ]
[ "import typing\nimport numpy as np\nimport numba as nb\n\n\n\n\[email protected]\ndef find_divisors(\n n: int,\n) -> np.array:\n i = np.arange(int(n ** .5))\n i += 1\n i = i[n % i == 0]\n i = np.hstack((i, n // i))\n return np.unique(i)\n\n\n\[email protected]\ndef gpf(\n n: int = 1 << 20,\n) -> np.array:\n s = np.arange(n)\n s[:2] = -1\n i = 0\n while i * i < n - 1:\n i += 1\n if s[i] == i: s[i::i] = i\n return s\n\n\[email protected]\ndef lpf(\n n: int = 1 << 20,\n) -> np.array:\n s = np.arange(n)\n s[:2] = -1\n i = 0\n while i * i < n - 1:\n i += 1\n if s[i] != i: continue\n j = np.arange(i, n, i)\n s[j][s[j] == j] = i\n return s\n\n\[email protected]\ndef sieve_of_eratosthenes(\n n: int = 1 << 20,\n) -> np.array:\n return gpf(n) == np.arange(n)\n\n\n\[email protected]\ndef prime_numbers(\n n: int = 1 << 20,\n) -> np.array:\n s = sieve_of_eratosthenes(n)\n return np.flatnonzero(s)\n\n\n\[email protected]\ndef euler_totient(\n n: int,\n prime_numbers: np.array,\n) -> int:\n c = n\n for p in prime_numbers:\n if p * p > n: break\n if n % p: continue\n c = c // p * (p - 1)\n while not n % p: n //= p\n if n > 1:\n c = c // n * (n - 1)\n return c\n\n\[email protected](\n (nb.i8, ),\n cache=True,\n)\ndef solve(\n p: int,\n) -> typing.NoReturn:\n n = p - 1\n divs = find_divisors(n) \n pn = prime_numbers(1 << 20)\n mod = 998244353\n c = 1\n for d in divs:\n e = euler_totient(d, pn)\n e %= mod\n d %= mod\n c += e * d % mod \n c %= mod\n print(c)\n\n\ndef main() -> typing.NoReturn:\n p = int(input())\n solve(p)\n\n\nmain()", "import typing\nimport sys \nimport numpy as np \nimport numba as nb \n\n\[email protected] \ndef cross(x0: int, y0: int, x1: int, y1: int) -> int:\n return x0 * y1 - x1 * y0\n\n\n\[email protected]((nb.i8, ) * 4 + (nb.i8[:, :], ), cache=True)\ndef solve(\n x0: int,\n y0: int,\n x1: int,\n y1: int,\n xy: np.ndarray,\n) -> typing.NoReturn:\n n = len(xy)\n xy = np.vstack((xy, xy[:1]))\n x, y = xy[:, 0], xy[:, 1]\n x2, y2 = x[:-1], y[:-1]\n x3, y3 = x[1:], y[1:]\n p0 = cross(x1 - x0, y1 - y0, x2 - x0, y2 - y0) \n p0 *= cross(x1 - x0, y1 - y0, x3 - x0, y3 - y0)\n p1 = cross(x0 - x2, y0 - y2, x3 - x2, y3 - y2)\n p1 *= cross(x1 - x2, y1 - y2, x3 - x2, y3 - y2)\n cnt = 1 + np.count_nonzero((p0 < 0) & (p1 < 0)) // 2\n print(cnt)\n \n\n\ndef main() -> typing.NoReturn:\n x0, y0, x1, y1 = map(int, input().split())\n n = int(input())\n xy = np.array(\n sys.stdin.read().split(),\n dtype=np.int64,\n ).reshape(n, 2)\n solve(x0, y0, x1, y1, xy)\n\n\nmain()", "import typing \nimport sys \nimport numpy as np \nimport numba as nb\n\n\n\[email protected](\n (nb.i8[:], nb.i8[:]),\n cache=True,\n)\ndef solve(\n a: np.array,\n b: np.array,\n) -> typing.NoReturn:\n mod = 998_244_353\n m = 1 << 13\n n = a.size\n idx = np.argsort(a)\n a, b = a[idx], b[idx]\n \n dp = np.zeros(m, dtype=np.int64)\n s = 0 \n dp[0] = 1\n for i in range(n):\n x, y = a[i], b[i]\n s += dp[:max(x - y + 1, 0)].sum()\n dp[:y - 1:-1] += dp[-y - 1::-1]\n dp %= mod\n print(s % mod)\n\n\ndef main() -> typing.NoReturn:\n n = int(input())\n a, b = np.array(\n sys.stdin.read().split(),\n dtype=np.int64,\n ).reshape(2, n)\n solve(a, b)\n\n\nmain()\n\n", "import typing \nimport sys \nimport numpy as np\n\n\n\ndef set_val(\n a: np.array,\n i: int,\n x: int,\n) -> typing.NoReturn:\n while i < a.size:\n a[i] = max(a[i], x)\n i += i & -i\n\n\ndef get_mx(\n a: np.array,\n i: int,\n) -> int:\n mx = 0 \n while i > 0:\n mx = max(mx, a[i])\n i -= i & -i\n return mx\n\n\n\ndef solve(\n n: int,\n h: np.array,\n a: np.array,\n) -> typing.NoReturn:\n fw = np.zeros(\n n + 1,\n dtype=np.int64,\n )\n mx = 0 \n for i in range(n):\n v = get_mx(fw, h[i] - 1)\n set_val(fw, h[i], v + a[i])\n print(get_mx(fw, n))\n\n\n\ndef main() -> typing.NoReturn:\n n = int(input())\n h = np.array(\n sys.stdin.readline()\n .split(),\n dtype=np.int64,\n )\n a = np.array(\n sys.stdin.readline()\n .split(),\n dtype=np.int64,\n )\n solve(n, h, a)\n\n\n\nOJ = 'ONLINE_JUDGE'\nif sys.argv[-1] == OJ:\n from numba import njit, i8\n from numba.pycc import CC\n cc = CC('my_module')\n fn = solve\n sig = (i8, i8[:], i8[:])\n get_mx = njit(get_mx)\n set_val = njit(set_val)\n cc.export(\n fn.__name__,\n sig,\n )(fn)\n cc.compile()\n exit(0)\n\n\nfrom my_module import solve\nmain()", "import typing \nimport sys \nimport numpy as np \nimport numba as nb \n\n\n\n\[email protected]\ndef csgraph_to_directed(g: np.ndarray) -> np.ndarray:\n m = len(g)\n g = np.vstack((g, g))\n g[m:, :2] = g[m:, 1::-1]\n return g\n\n\[email protected]\ndef sort_csgraph(n: int, g: np.ndarray) -> typing.Tuple[(np.ndarray, ) * 3]:\n idx = g[:, 0] << 30 | g[:, 1]\n sort_idx = np.argsort(idx, kind='mergesort')\n g = g[sort_idx]\n original_idx = np.arange(len(g))[sort_idx]\n edge_idx = np.searchsorted(g[:, 0], np.arange(n + 1))\n return g, edge_idx, original_idx\n\n\n\[email protected]\ndef connected_components_dfs(n: int, g: np.ndarray):\n g = csgraph_to_directed(g)\n g, edge_idx, _ = sort_csgraph(n, g)\n label = np.full(n, -1, np.int64)\n l = 0\n for i in range(n):\n if label[i] != -1: continue\n label[i] = l\n st = [i]\n while st:\n u = st.pop()\n for v in g[edge_idx[u]:edge_idx[u + 1], 1]:\n if label[v] != -1: continue\n label[v] = l\n st.append(v)\n l += 1\n return label\n\n\n\[email protected]((nb.i8, nb.i8[:, :]), cache=True)\ndef solve(n: int, uv: np.ndarray) -> typing.NoReturn:\n m = len(uv)\n if n != m:\n print(0)\n return \n mod = 998_244_353\n edge_cnt = np.zeros(n, np.int64)\n for i in range(m):\n u, v = uv[i]\n edge_cnt[u] += 1\n edge_cnt[v] += 1\n label = connected_components_dfs(n, uv)\n\n k = label.max() + 1 \n edge_cnt2 = np.zeros(k, np.int64)\n for i in range(n):\n edge_cnt2[label[i]] += edge_cnt[i]\n \n b = np.bincount(label)\n if not np.all(b * 2 == edge_cnt2):\n print(0)\n return \n res = 1 \n for _ in range(label.max() + 1):\n res = res * 2 % mod\n print(res)\n \n\ndef main() -> typing.NoReturn:\n n, m = map(int, input().split())\n uv = np.array(\n sys.stdin.read().split(),\n dtype=np.int64,\n ).reshape(m, 2) - 1\n solve(n, uv)\n\n\nmain()", "import typing \nimport sys \nimport numpy as np\nimport numba as nb \n\n\n\[email protected](\n (nb.i8[:, :], ),\n cache=True,\n)\ndef shortest_dist_floyd_warshall(\n g: np.ndarray,\n) -> np.ndarray:\n n = len(g)\n assert g.shape == (n, n)\n dist = g.copy()\n for i in range(n): dist[i, i] = 0\n for k in range(n):\n for i in range(n):\n for j in range(n):\n dist[i, j] = min(\n dist[i, j],\n dist[i, k] + dist[k, j],\n )\n return dist \n\n\[email protected](\n (nb.i8[:, :], nb.i8),\n cache=True,\n)\ndef csgraph_to_dense(\n csgraph: np.ndarray,\n n: int,\n) -> np.ndarray:\n m = len(csgraph)\n assert csgraph.shape == (m, 3)\n inf = 1 << 60\n g = np.full((n, n), inf, np.int64)\n for i in range(m):\n u, v, w = csgraph[i]\n g[u, v] = min(g[u, v], w)\n return g \n\n\[email protected](\n (nb.i8, nb.i8[:, :]),\n cache=True,\n)\ndef solve(\n n: int,\n uvc: np.ndarray,\n) -> typing.NoReturn:\n g = csgraph_to_dense(uvc, n)\n s = shortest_dist_floyd_warshall(g).sum()\n print(s)\n\n\ndef main() -> typing.NoReturn:\n n, m = map(int, input().split())\n uvc = np.array(\n sys.stdin.read().split(),\n dtype=np.int64,\n ).reshape(m, 3)\n solve(n, uvc)\n\n\nmain()", "import typing \nimport sys\nimport numpy as np \nimport numba as nb\n\n\n\[email protected]\ndef sieve_of_eratosthenes(\n n: int,\n) -> np.array:\n return gpf(n) == np.arange(n)\n\n\[email protected]\ndef gpf(\n n: int,\n) -> np.array:\n s = np.arange(n)\n s[:2] = -1\n i = 0 \n while i * i < n - 1:\n i += 1\n if s[i] == i: s[i::i] = i\n return s\n\n\[email protected]\ndef lpf(\n n: int,\n) -> np.array:\n s = np.arange(n)\n s[:2] = -1\n i = 0 \n while i * i < n - 1:\n i += 1\n if s[i] != i: continue\n j = np.arange(i, n, i)\n s[j[s[j] == j]] = i\n return s\n\n\n\[email protected]\ndef prime_numbers(\n n: int=1 << 20,\n) -> np.array:\n return np.flatnonzero(\n sieve_of_eratosthenes(n),\n )\n\n\n\[email protected]\ndef prime_factorize(\n n: int,\n pn: np.array,\n) -> np.array:\n p, c = [], []\n for i in pn:\n if i * i > n: break\n if n % i: continue\n p.append(i)\n c.append(0)\n while n % i == 0:\n n //= i\n c[-1] += 1\n if n > 1: \n p.append(n)\n c.append(1)\n return np.vstack((\n np.array(p),\n np.array(c),\n )).T\n\n\[email protected]\ndef prime_factorize_factorial(\n n: int,\n pn: np.array,\n) -> np.array:\n prime, cnt = [], []\n idx = np.full(n + 1, -1, dtype=np.int32)\n for i in range(n + 1):\n for p, c in prime_factorize(i, pn):\n i = idx[p]\n if i != -1:\n cnt[i] += c\n continue\n idx[p] = len(prime)\n prime.append(p)\n cnt.append(c)\n return np.vstack((\n np.array(prime),\n np.array(cnt),\n )).T \n\n\n\[email protected](\n (nb.i8[:], nb.i8),\n cache=True,\n)\ndef solve(\n a: np.array,\n m: int,\n) -> typing.NoReturn:\n n = a.size\n pn = prime_numbers(1 << 20)\n\n p = np.zeros(1 << 20, dtype=np.bool8)\n for i in range(n):\n x = a[i]\n res = prime_factorize(x, pn)\n for j in res.T[0]: p[j] = True\n \n s = np.ones(1 + m, dtype=np.bool8)\n s[0] = False\n for i in range(1 + m):\n if not p[i] or not s[i]: continue\n s[i::i] = False\n print(s.sum())\n for i in range(1 + m):\n if s[i]: print(i)\n\n\ndef main() -> typing.NoReturn:\n n, m = map(int, input().split())\n a = np.array(\n sys.stdin.readline().split(),\n dtype=np.int64,\n )\n solve(a, m)\n\n\nmain()", "import typing \nimport sys \nimport numpy as np \nimport numba as nb \n\n\n\ndef solve(ab: np.ndarray) -> typing.NoReturn:\n m = 1 << 20\n c = np.zeros(m, np.int64)\n a, b = ab.T \n np.add.at(c, a, 1)\n np.add.at(c, b + 1, -1)\n np.cumsum(c, out=c)\n print(c.max())\n\n\ndef main() -> typing.NoReturn:\n n = int(input())\n ab = np.array(\n sys.stdin.read().split(),\n dtype=np.int64,\n ).reshape(n, 2)\n solve(ab)\n\n\nmain()", "import typing \nimport sys\nimport numpy as np \nimport numba as nb \n\n\n\n\[email protected]\ndef uf_build(\n n: int,\n) -> np.ndarray:\n return np.full(n, -1, np.int64)\n\n\[email protected]\ndef uf_find(\n uf: np.ndarray,\n u: int,\n) -> int:\n if uf[u] < 0: return u\n uf[u] = uf_find(uf, uf[u])\n return uf[u]\n\n\[email protected]\ndef uf_unite(\n uf: np.ndarray,\n u: int,\n v: int,\n) -> typing.NoReturn:\n u = uf_find(uf, u)\n v = uf_find(uf, v)\n if u == v: return \n if uf[u] > uf[v]: u, v = v, u\n uf[u] += uf[v]\n uf[v] = u\n\n\n\[email protected](\n (nb.i8, nb.i8[:, :]),\n cache=True,\n)\ndef mst_kruskal(\n n: int,\n csgraph: np.ndarray,\n) -> np.ndarray:\n m = len(csgraph)\n assert csgraph.shape == (m, 3)\n sort_idx = np.argsort(csgraph[:, 2], kind='mergesort')\n csgraph = csgraph[sort_idx]\n uf = uf_build(n)\n\n added_edge_indices = np.zeros(m, np.int64)\n idx_to_add = 0 \n def add_edge(i):\n nonlocal idx_to_add\n added_edge_indices[idx_to_add] = i\n idx_to_add += 1\n\n for i in range(m):\n u, v, _ = csgraph[i]\n if uf_find(uf, u) == uf_find(uf, v): continue\n uf_unite(uf, u, v)\n add_edge(i)\n \n return added_edge_indices[:idx_to_add]\n\n # return csgraph[added_edge_indices[:idx_to_add]]\n\n\[email protected](\n (nb.i8, nb.i8[:, :]),\n cache=True,\n)\ndef solve(\n n: int,\n abc: np.ndarray,\n) -> typing.NoReturn:\n # mst = mst_kruskal(n, abc)\n # print(abc[:, 2].sum() - mst[:, 2].sum())\n sort_idx = np.argsort(abc[:, 2], kind='mergesort')\n abc = abc[sort_idx]\n edge_indices = mst_kruskal(n, abc)\n g = abc.copy()\n g[edge_indices, 2] = 0\n # print(g)\n print(g[g[:, 2] >= 0][:, 2].sum())\n\n\ndef main() -> typing.NoReturn:\n n, m = map(int, input().split())\n abc = np.array(\n sys.stdin.read().split(),\n dtype=np.int64,\n ).reshape(m, 3)\n abc[:, :2] -= 1\n solve(n, abc)\n\n\nmain()", "import typing \nimport sys \nimport numpy as np \n\n\ndef main() -> typing.NoReturn:\n n = int(input())\n l, r = np.array(\n sys.stdin.read().split(),\n dtype=np.int64,\n ).reshape(n, 2).T\n\n print(np.sum(r - l + 1))\n\nmain()", "import typing \nimport sys \nimport numpy as np\nimport numba as nb \nimport heapq\n\n\n\[email protected](\n (nb.i8, nb.i8[:, :]),\n cache=True,\n)\ndef shortest_dist_floyd_warshall(\n n: int,\n g: np.ndarray,\n) -> np.ndarray:\n m = len(g)\n assert g.shape == (m, 3)\n inf = 1 << 60\n assert inf > g[:, 2].max() * n\n dist = np.full((n, n), inf, np.int64)\n for i in range(m):\n u, v, w = g[i]\n dist[u, v] = min(dist[u, v], w)\n for i in range(n): dist[i, i] = 0\n for k in range(n):\n for i in range(n):\n for j in range(n):\n dist[i, j] = min(\n dist[i, j],\n dist[i, k] + dist[k, j],\n )\n return dist \n\n\[email protected](\n (nb.i8, nb.i8[:, :]),\n cache=True,\n)\ndef solve(\n n: int,\n uvc: np.ndarray,\n) -> typing.NoReturn:\n s = shortest_dist_floyd_warshall(n, uvc).sum()\n print(s)\n\n\ndef main() -> typing.NoReturn:\n n, m = map(int, input().split())\n uvc = np.array(\n sys.stdin.read().split(),\n dtype=np.int64,\n ).reshape(m, 3)\n solve(n, uvc)\n\n\nmain()", "import typing \nimport sys \nimport numpy as np\n\n\n\nimport numpy as np\nimport typing \n\n\n\nclass CompressArray():\n def retrieve(\n self,\n i: int,\n ) -> int:\n return self.__v[i]\n \n \n def __call__(\n self,\n a: np.array,\n ) -> np.array:\n v = np.unique(a)\n self.__v = v\n i = np.searchsorted(v, a)\n return i\n\n\ndef main():\n h, w, n = map(\n int, input().split(),\n )\n a, b = np.array(\n sys.stdin.read().split(),\n dtype=np.int64,\n ).reshape(n, 2).T \n \n compress = CompressArray()\n a = compress(a) + 1\n b = compress(b) + 1\n for a, b in zip(a, b):\n print(a, b)\n\n\nmain()\n" ]
[ [ "numpy.hstack", "numpy.flatnonzero", "numpy.arange", "numpy.unique" ], [ "numpy.count_nonzero", "numpy.vstack" ], [ "numpy.argsort", "numpy.zeros" ], [ "numpy.zeros" ], [ "numpy.arange", "numpy.full", "numpy.all", "numpy.bincount", "numpy.argsort", "numpy.zeros", "numpy.vstack" ], [ "numpy.full" ], [ "numpy.arange", "numpy.ones", "numpy.full", "numpy.array", "numpy.zeros" ], [ "numpy.add.at", "numpy.zeros", "numpy.cumsum" ], [ "numpy.argsort", "numpy.zeros", "numpy.full" ], [ "numpy.sum" ], [ "numpy.full" ], [ "numpy.searchsorted", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tobiasmaier/pytorch-lightning
[ "7f352cb69a8202e3f829419657597697ca5d99e2", "7f352cb69a8202e3f829419657597697ca5d99e2", "7f352cb69a8202e3f829419657597697ca5d99e2" ]
[ "pytorch_lightning/core/lightning.py", "pytorch_lightning/accelerators/dp_accelerator.py", "pytorch_lightning/trainer/connectors/checkpoint_connector.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"nn.Module with additional great features.\"\"\"\n\nimport collections\nimport copy\nimport inspect\nimport os\nimport re\nimport tempfile\nfrom abc import ABC\nfrom argparse import Namespace\nfrom pathlib import Path\nfrom typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, Union\n\nimport torch\nfrom torch import ScriptModule, Tensor\nfrom torch.nn import Module\nfrom torch.optim.optimizer import Optimizer\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.core.grads import GradInformation\nfrom pytorch_lightning.core.hooks import CheckpointHooks, DataHooks, ModelHooks\nfrom pytorch_lightning.core.memory import ModelSummary\nfrom pytorch_lightning.core.optimizer import LightningOptimizer\nfrom pytorch_lightning.core.saving import ALLOWED_CONFIG_TYPES, ModelIO, PRIMITIVE_TYPES\nfrom pytorch_lightning.core.step_result import Result\nfrom pytorch_lightning.utilities import rank_zero_warn, TPU_AVAILABLE\nfrom pytorch_lightning.utilities.device_dtype_mixin import DeviceDtypeModuleMixin\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.parsing import AttributeDict, collect_init_args, get_init_args\n\nif TPU_AVAILABLE:\n import torch_xla.core.xla_model as xm\n\n\nclass LightningModule(\n ABC,\n DeviceDtypeModuleMixin,\n GradInformation,\n ModelIO,\n ModelHooks,\n DataHooks,\n CheckpointHooks,\n Module,\n):\n # Below is for property support of JIT in PyTorch 1.7\n # since none of them is important when using JIT, we are going to ignore them.\n __jit_unused_properties__ = [\n \"datamodule\",\n \"example_input_array\",\n \"hparams\",\n \"hparams_initial\",\n \"on_gpu\",\n \"current_epoch\",\n \"global_step\",\n ] + DeviceDtypeModuleMixin.__jit_unused_properties__\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # see (https://github.com/pytorch/pytorch/blob/3e6bb5233f9ca2c5aa55d9cda22a7ee85439aa6e/\n # torch/nn/modules/module.py#L227)\n torch._C._log_api_usage_once(f\"lightning.module.{self.__class__.__name__}\")\n\n self.exp_save_path = None\n\n self.loaded_optimizer_states_dict = {}\n\n #: Pointer to the trainer object\n self.trainer = None\n\n #: Pointer to the logger object\n self.logger = None\n\n #: True if using dp\n self.use_dp = False\n\n #: True if using ddp\n self.use_ddp = False\n\n #: True if using ddp2\n self.use_ddp2 = False\n\n # True if on tpu\n self.use_tpu = False\n\n #: True if using amp\n self.use_amp = False\n\n #: The precision used\n self.precision = 32\n\n # optionally can be set by user\n self._example_input_array = None\n self._datamodule = None\n self._results: Optional[Result] = None\n self._current_fx_name = ''\n self._running_manual_backward = False\n self._current_hook_fx_name = None\n self._current_dataloader_idx = None\n self._automatic_optimization: bool = True\n\n def optimizers(self, use_pl_optimizer: bool = True) -> Union[Optimizer, List[Optimizer], List[LightningOptimizer]]:\n if use_pl_optimizer:\n opts = list(self.trainer.lightning_optimizers.values())\n else:\n opts = self.trainer.optimizers\n\n # single optimizer\n if isinstance(opts, list) and len(opts) == 1 and isinstance(opts[0], Optimizer):\n return opts[0]\n # multiple opts\n return opts\n\n @property\n def example_input_array(self) -> Any:\n return self._example_input_array\n\n @property\n def current_epoch(self) -> int:\n \"\"\"The current epoch\"\"\"\n return self.trainer.current_epoch if self.trainer else 0\n\n @property\n def global_step(self) -> int:\n \"\"\"Total training batches seen across all epochs\"\"\"\n return self.trainer.global_step if self.trainer else 0\n\n @example_input_array.setter\n def example_input_array(self, example: Any) -> None:\n self._example_input_array = example\n\n @property\n def datamodule(self) -> Any:\n return self._datamodule\n\n @datamodule.setter\n def datamodule(self, datamodule: Any) -> None:\n self._datamodule = datamodule\n\n @property\n def on_gpu(self):\n \"\"\"\n True if your model is currently running on GPUs.\n Useful to set flags around the LightningModule for different CPU vs GPU behavior.\n \"\"\"\n return self.device.type == \"cuda\"\n\n @property\n def automatic_optimization(self) -> bool:\n \"\"\"\n If False you are responsible for calling .backward, .step, zero_grad.\n \"\"\"\n return self._automatic_optimization\n\n @automatic_optimization.setter\n def automatic_optimization(self, automatic_optimization: bool) -> None:\n self._automatic_optimization = automatic_optimization\n\n def print(self, *args, **kwargs) -> None:\n r\"\"\"\n Prints only from process 0. Use this in any distributed mode to log only once.\n\n Args:\n *args: The thing to print. Will be passed to Python's built-in print function.\n **kwargs: Will be passed to Python's built-in print function.\n\n Example:\n\n .. code-block:: python\n\n def forward(self, x):\n self.print(x, 'in forward')\n\n \"\"\"\n if self.trainer.is_global_zero:\n print(*args, **kwargs)\n\n def log(\n self,\n name: str,\n value: Any,\n prog_bar: bool = False,\n logger: bool = True,\n on_step: Optional[bool] = None,\n on_epoch: Optional[bool] = None,\n reduce_fx: Callable = torch.mean,\n tbptt_reduce_fx: Callable = torch.mean,\n tbptt_pad_token: int = 0,\n enable_graph: bool = False,\n sync_dist: bool = False,\n sync_dist_op: Union[Any, str] = 'mean',\n sync_dist_group: Optional[Any] = None,\n ):\n \"\"\"\n Log a key, value\n\n Example::\n\n self.log('train_loss', loss)\n\n The default behavior per hook is as follows\n\n .. csv-table:: ``*`` also applies to the test loop\n :header: \"LightningMoule Hook\", \"on_step\", \"on_epoch\", \"prog_bar\", \"logger\"\n :widths: 20, 10, 10, 10, 10\n\n \"training_step\", \"T\", \"F\", \"F\", \"T\"\n \"training_step_end\", \"T\", \"F\", \"F\", \"T\"\n \"training_epoch_end\", \"F\", \"T\", \"F\", \"T\"\n \"validation_step*\", \"F\", \"T\", \"F\", \"T\"\n \"validation_step_end*\", \"F\", \"T\", \"F\", \"T\"\n \"validation_epoch_end*\", \"F\", \"T\", \"F\", \"T\"\n\n Args:\n name: key name\n value: value name\n prog_bar: if True logs to the progress bar\n logger: if True logs to the logger\n on_step: if True logs at this step. None auto-logs at the training_step but not validation/test_step\n on_epoch: if True logs epoch accumulated metrics. None auto-logs at the val/test step but not training_step\n reduce_fx: reduction function over step values for end of epoch. Torch.mean by default\n tbptt_reduce_fx: function to reduce on truncated back prop\n tbptt_pad_token: token to use for padding\n enable_graph: if True, will not auto detach the graph\n sync_dist: if True, reduces the metric across GPUs/TPUs\n sync_dist_op: the op to sync across GPUs/TPUs\n sync_dist_group: the ddp group\n \"\"\"\n if self._results is not None:\n # in any epoch end can't log step metrics (only epoch metric)\n if 'epoch_end' in self._current_fx_name and on_step:\n m = f'on_step=True cannot be used on {self._current_fx_name} method'\n raise MisconfigurationException(m)\n\n if 'epoch_end' in self._current_fx_name and on_epoch is False:\n m = f'on_epoch cannot be False when called from the {self._current_fx_name} method'\n raise MisconfigurationException(m)\n\n # add log_dict\n # TODO: if logged twice fail with crash\n\n # set the default depending on the fx_name\n on_step = self.__auto_choose_log_on_step(on_step)\n on_epoch = self.__auto_choose_log_on_epoch(on_epoch)\n\n if self._current_hook_fx_name is not None:\n self.trainer.logger_connector.check_logging_in_callbacks(\n self._current_hook_fx_name,\n on_step=on_step,\n on_epoch=on_epoch\n )\n\n # make sure user doesn't introduce logic for multi-dataloaders\n if \"/dataloader_idx_\" in name:\n raise MisconfigurationException(\n f\"Logged key: {name} should not contain information about dataloader_idx.\")\n\n accelerator = self.trainer.accelerator_backend\n\n self._results.log(\n name,\n value,\n prog_bar,\n logger,\n on_step,\n on_epoch,\n reduce_fx,\n tbptt_reduce_fx,\n tbptt_pad_token,\n enable_graph,\n sync_dist,\n sync_dist_op,\n sync_dist_group,\n accelerator.sync_tensor,\n self._current_dataloader_idx,\n self.device,\n )\n\n def log_dict(\n self,\n dictionary: dict,\n prog_bar: bool = False,\n logger: bool = True,\n on_step: Optional[bool] = None,\n on_epoch: Optional[bool] = None,\n reduce_fx: Callable = torch.mean,\n tbptt_reduce_fx: Callable = torch.mean,\n tbptt_pad_token: int = 0,\n enable_graph: bool = False,\n sync_dist: bool = False,\n sync_dist_op: Union[Any, str] = 'mean',\n sync_dist_group: Optional[Any] = None,\n ):\n \"\"\"\n Log a dictonary of values at once\n\n Example::\n\n values = {'loss': loss, 'acc': acc, ..., 'metric_n': metric_n}\n self.log_dict(values)\n\n Args:\n dictionary: key value pairs (str, tensors)\n prog_bar: if True logs to the progress base\n logger: if True logs to the logger\n on_step: if True logs at this step. None auto-logs for training_step but not validation/test_step\n on_epoch: if True logs epoch accumulated metrics. None auto-logs for val/test step but not training_step\n reduce_fx: reduction function over step values for end of epoch. Torch.mean by default\n tbptt_reduce_fx: function to reduce on truncated back prop\n tbptt_pad_token: token to use for padding\n enable_graph: if True, will not auto detach the graph\n sync_dist: if True, reduces the metric across GPUs/TPUs\n sync_dist_op: the op to sync across GPUs/TPUs\n sync_dist_group: the ddp group:\n \"\"\"\n for k, v in dictionary.items():\n self.log(\n name=k,\n value=v,\n prog_bar=prog_bar,\n logger=logger,\n on_step=on_step,\n on_epoch=on_epoch,\n reduce_fx=reduce_fx,\n enable_graph=enable_graph,\n sync_dist=sync_dist,\n sync_dist_group=sync_dist_group,\n sync_dist_op=sync_dist_op,\n tbptt_pad_token=tbptt_pad_token,\n tbptt_reduce_fx=tbptt_reduce_fx,\n )\n\n def write_prediction(self, name, value, filename='predictions.pt'):\n self.trainer.evaluation_loop.predictions._add_prediction(name, value, filename)\n\n def write_prediction_dict(self, predictions_dict, filename='predictions.pt'):\n for k, v in predictions_dict.items():\n self.write_prediction(k, v, filename)\n\n def __auto_choose_log_on_step(self, on_step):\n if on_step is None:\n if self._current_fx_name in {'training_step', 'training_step_end'}:\n on_step = True\n elif self._current_fx_name in {'evaluation_step', 'evaluation_step_end',\n 'evaluation_epoch_end', 'training_epoch_end'}:\n on_step = False\n else:\n on_step = False\n\n return on_step\n\n def __auto_choose_log_on_epoch(self, on_epoch):\n if on_epoch is None:\n if self._current_fx_name in {'training_step', 'training_step_end'}:\n on_epoch = False\n elif self._current_fx_name in {'evaluation_step', 'evaluation_step_end',\n 'evaluation_epoch_end', 'training_epoch_end'}:\n on_epoch = True\n else:\n on_epoch = True\n\n return on_epoch\n\n def all_gather(self, tensor: Union[torch.Tensor], group: Optional[Any] = None, sync_grads: bool = False):\n r\"\"\"\n Allows users to call ``self.all_gather()`` from the LightningModule, thus making\n the ```all_gather``` operation accelerator agnostic.\n\n ```all_gather``` is a function provided by accelerators to gather a tensor from several\n distributed processes\n\n Args:\n tensor: tensor of shape (batch, ...)\n group: the process group to gather results from. Defaults to all processes (world)\n sync_grads: flag that allows users to synchronize gradients for all_gather op\n\n Return:\n A tensor of shape (world_size, batch, ...)\n \"\"\"\n return self.trainer.accelerator_backend.all_gather(tensor, group=group, sync_grads=sync_grads)\n\n def forward(self, *args, **kwargs):\n r\"\"\"\n Same as :meth:`torch.nn.Module.forward()`, however in Lightning you want this to define\n the operations you want to use for prediction (i.e.: on a server or as a feature extractor).\n\n Normally you'd call ``self()`` from your :meth:`training_step` method.\n This makes it easy to write a complex system for training with the outputs\n you'd want in a prediction setting.\n\n You may also find the :func:`~pytorch_lightning.core.decorators.auto_move_data` decorator useful\n when using the module outside Lightning in a production setting.\n\n Args:\n *args: Whatever you decide to pass into the forward method.\n **kwargs: Keyword arguments are also possible.\n\n Return:\n Predicted output\n\n Examples:\n .. code-block:: python\n\n # example if we were using this model as a feature extractor\n def forward(self, x):\n feature_maps = self.convnet(x)\n return feature_maps\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n feature_maps = self(x)\n logits = self.classifier(feature_maps)\n\n # ...\n return loss\n\n # splitting it this way allows model to be used a feature extractor\n model = MyModelAbove()\n\n inputs = server.get_request()\n results = model(inputs)\n server.write_results(results)\n\n # -------------\n # This is in stark contrast to torch.nn.Module where normally you would have this:\n def forward(self, batch):\n x, y = batch\n feature_maps = self.convnet(x)\n logits = self.classifier(feature_maps)\n return logits\n\n \"\"\"\n return super().forward(*args, **kwargs)\n\n def training_step(self, *args, **kwargs):\n r\"\"\"\n Here you compute and return the training loss and some additional metrics for e.g.\n the progress bar or logger.\n\n Args:\n batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):\n The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.\n batch_idx (int): Integer displaying index of this batch\n optimizer_idx (int): When using multiple optimizers, this argument will also be present.\n hiddens(:class:`~torch.Tensor`): Passed in if\n :paramref:`~pytorch_lightning.trainer.trainer.Trainer.truncated_bptt_steps` > 0.\n\n Return:\n Any of.\n\n - :class:`~torch.Tensor` - The loss tensor\n - `dict` - A dictionary. Can include any keys, but must include the key 'loss'\n - `None` - Training will skip to the next batch\n\n In this step you'd normally do the forward pass and calculate the loss for a batch.\n You can also do fancier things like multiple forward passes or something model specific.\n\n Example::\n\n def training_step(self, batch, batch_idx):\n x, y, z = batch\n out = self.encoder(x)\n loss = self.loss(out, x)\n return loss\n\n If you define multiple optimizers, this step will be called with an additional\n ``optimizer_idx`` parameter.\n\n .. code-block:: python\n\n # Multiple optimizers (e.g.: GANs)\n def training_step(self, batch, batch_idx, optimizer_idx):\n if optimizer_idx == 0:\n # do training_step with encoder\n if optimizer_idx == 1:\n # do training_step with decoder\n\n\n If you add truncated back propagation through time you will also get an additional\n argument with the hidden states of the previous step.\n\n .. code-block:: python\n\n # Truncated back-propagation through time\n def training_step(self, batch, batch_idx, hiddens):\n # hiddens are the hidden states from the previous truncated backprop step\n ...\n out, hiddens = self.lstm(data, hiddens)\n ...\n return {'loss': loss, 'hiddens': hiddens}\n\n Note:\n The loss value shown in the progress bar is smoothed (averaged) over the last values,\n so it differs from the actual loss returned in train/validation step.\n \"\"\"\n rank_zero_warn(\n \"`training_step` must be implemented to be used with the Lightning Trainer\"\n )\n\n def training_step_end(self, *args, **kwargs):\n \"\"\"\n Use this when training with dp or ddp2 because :meth:`training_step`\n will operate on only part of the batch. However, this is still optional\n and only needed for things like softmax or NCE loss.\n\n Note:\n If you later switch to ddp or some other mode, this will still be called\n so that you don't have to change your code\n\n .. code-block:: python\n\n # pseudocode\n sub_batches = split_batches_for_dp(batch)\n batch_parts_outputs = [training_step(sub_batch) for sub_batch in sub_batches]\n training_step_end(batch_parts_outputs)\n\n Args:\n batch_parts_outputs: What you return in `training_step` for each batch part.\n\n Return:\n Anything\n\n When using dp/ddp2 distributed backends, only a portion of the batch is inside the training_step:\n\n .. code-block:: python\n\n def training_step(self, batch, batch_idx):\n # batch is 1/num_gpus big\n x, y = batch\n\n out = self(x)\n\n # softmax uses only a portion of the batch in the denomintaor\n loss = self.softmax(out)\n loss = nce_loss(loss)\n return loss\n\n If you wish to do something with all the parts of the batch, then use this method to do it:\n\n .. code-block:: python\n\n def training_step(self, batch, batch_idx):\n # batch is 1/num_gpus big\n x, y = batch\n\n out = self.encoder(x)\n return {'pred': out}\n\n def training_step_end(self, training_step_outputs):\n gpu_0_pred = training_step_outputs[0]['pred']\n gpu_1_pred = training_step_outputs[1]['pred']\n gpu_n_pred = training_step_outputs[n]['pred']\n\n # this softmax now uses the full batch\n loss = nce_loss([gpu_0_pred, gpu_1_pred, gpu_n_pred])\n return loss\n\n See Also:\n See the :ref:`multi_gpu` guide for more details.\n \"\"\"\n\n def training_epoch_end(self, outputs: List[Any]) -> None:\n \"\"\"\n Called at the end of the training epoch with the outputs of all training steps.\n Use this in case you need to do something with all the outputs for every training_step.\n\n .. code-block:: python\n\n # the pseudocode for these calls\n train_outs = []\n for train_batch in train_data:\n out = training_step(train_batch)\n train_outs.append(out)\n training_epoch_end(train_outs)\n\n Args:\n outputs: List of outputs you defined in :meth:`training_step`, or if there are\n multiple dataloaders, a list containing a list of outputs for each dataloader.\n\n Return:\n None\n\n Note:\n If this method is not overridden, this won't be called.\n\n Example::\n\n def training_epoch_end(self, training_step_outputs):\n # do something with all training_step outputs\n return result\n\n With multiple dataloaders, ``outputs`` will be a list of lists. The outer list contains\n one entry per dataloader, while the inner list contains the individual outputs of\n each training step for that dataloader.\n\n .. code-block:: python\n\n def training_epoch_end(self, training_step_outputs):\n for out in training_step_outputs:\n # do something here\n \"\"\"\n\n def validation_step(self, *args, **kwargs):\n r\"\"\"\n Operates on a single batch of data from the validation set.\n In this step you'd might generate examples or calculate anything of interest like accuracy.\n\n .. code-block:: python\n\n # the pseudocode for these calls\n val_outs = []\n for val_batch in val_data:\n out = validation_step(val_batch)\n val_outs.append(out)\n validation_epoch_end(val_outs)\n\n Args:\n batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):\n The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.\n batch_idx (int): The index of this batch\n dataloader_idx (int): The index of the dataloader that produced this batch\n (only if multiple val dataloaders used)\n\n Return:\n Any of.\n\n - Any object or value\n - `None` - Validation will skip to the next batch\n\n .. code-block:: python\n\n # pseudocode of order\n out = validation_step()\n if defined('validation_step_end'):\n out = validation_step_end(out)\n out = validation_epoch_end(out)\n\n\n .. code-block:: python\n\n # if you have one val dataloader:\n def validation_step(self, batch, batch_idx)\n\n # if you have multiple val dataloaders:\n def validation_step(self, batch, batch_idx, dataloader_idx)\n\n Examples:\n .. code-block:: python\n\n # CASE 1: A single validation dataset\n def validation_step(self, batch, batch_idx):\n x, y = batch\n\n # implement your own\n out = self(x)\n loss = self.loss(out, y)\n\n # log 6 example images\n # or generated text... or whatever\n sample_imgs = x[:6]\n grid = torchvision.utils.make_grid(sample_imgs)\n self.logger.experiment.add_image('example_images', grid, 0)\n\n # calculate acc\n labels_hat = torch.argmax(out, dim=1)\n val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)\n\n # log the outputs!\n self.log_dict({'val_loss': loss, 'val_acc': val_acc})\n\n If you pass in multiple val dataloaders, :meth:`validation_step` will have an additional argument.\n\n .. code-block:: python\n\n # CASE 2: multiple validation dataloaders\n def validation_step(self, batch, batch_idx, dataloader_idx):\n # dataloader_idx tells you which dataset this is.\n\n Note:\n If you don't need to validate you don't need to implement this method.\n\n Note:\n When the :meth:`validation_step` is called, the model has been put in eval mode\n and PyTorch gradients have been disabled. At the end of validation,\n the model goes back to training mode and gradients are enabled.\n \"\"\"\n\n def validation_step_end(self, *args, **kwargs):\n \"\"\"\n Use this when validating with dp or ddp2 because :meth:`validation_step`\n will operate on only part of the batch. However, this is still optional\n and only needed for things like softmax or NCE loss.\n\n Note:\n If you later switch to ddp or some other mode, this will still be called\n so that you don't have to change your code.\n\n .. code-block:: python\n\n # pseudocode\n sub_batches = split_batches_for_dp(batch)\n batch_parts_outputs = [validation_step(sub_batch) for sub_batch in sub_batches]\n validation_step_end(batch_parts_outputs)\n\n Args:\n batch_parts_outputs: What you return in :meth:`validation_step`\n for each batch part.\n\n Return:\n None or anything\n\n .. code-block:: python\n\n # WITHOUT validation_step_end\n # if used in DP or DDP2, this batch is 1/num_gpus large\n def validation_step(self, batch, batch_idx):\n # batch is 1/num_gpus big\n x, y = batch\n\n out = self.encoder(x)\n loss = self.softmax(out)\n loss = nce_loss(loss)\n self.log('val_loss', loss)\n\n # --------------\n # with validation_step_end to do softmax over the full batch\n def validation_step(self, batch, batch_idx):\n # batch is 1/num_gpus big\n x, y = batch\n\n out = self(x)\n return out\n\n def validation_step_end(self, val_step_outputs):\n for out in val_step_outputs:\n # do something with these\n\n See Also:\n See the :ref:`multi_gpu` guide for more details.\n \"\"\"\n\n def validation_epoch_end(self, outputs: List[Any]) -> None:\n \"\"\"\n Called at the end of the validation epoch with the outputs of all validation steps.\n\n .. code-block:: python\n\n # the pseudocode for these calls\n val_outs = []\n for val_batch in val_data:\n out = validation_step(val_batch)\n val_outs.append(out)\n validation_epoch_end(val_outs)\n\n Args:\n outputs: List of outputs you defined in :meth:`validation_step`, or if there\n are multiple dataloaders, a list containing a list of outputs for each dataloader.\n\n Return:\n None\n\n Note:\n If you didn't define a :meth:`validation_step`, this won't be called.\n\n Examples:\n With a single dataloader:\n\n .. code-block:: python\n\n def validation_epoch_end(self, val_step_outputs):\n for out in val_step_outputs:\n # do something\n\n With multiple dataloaders, `outputs` will be a list of lists. The outer list contains\n one entry per dataloader, while the inner list contains the individual outputs of\n each validation step for that dataloader.\n\n .. code-block:: python\n\n def validation_epoch_end(self, outputs):\n for dataloader_output_result in outputs:\n dataloader_outs = dataloader_output_result.dataloader_i_outputs\n\n self.log('final_metric', final_value)\n \"\"\"\n\n def test_step(self, *args, **kwargs):\n r\"\"\"\n Operates on a single batch of data from the test set.\n In this step you'd normally generate examples or calculate anything of interest\n such as accuracy.\n\n .. code-block:: python\n\n # the pseudocode for these calls\n test_outs = []\n for test_batch in test_data:\n out = test_step(test_batch)\n test_outs.append(out)\n test_epoch_end(test_outs)\n\n Args:\n batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):\n The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.\n batch_idx (int): The index of this batch.\n dataloader_idx (int): The index of the dataloader that produced this batch\n (only if multiple test dataloaders used).\n\n Return:\n Any of.\n\n - Any object or value\n - `None` - Testing will skip to the next batch\n\n .. code-block:: python\n\n # if you have one test dataloader:\n def test_step(self, batch, batch_idx)\n\n # if you have multiple test dataloaders:\n def test_step(self, batch, batch_idx, dataloader_idx)\n\n Examples:\n .. code-block:: python\n\n # CASE 1: A single test dataset\n def test_step(self, batch, batch_idx):\n x, y = batch\n\n # implement your own\n out = self(x)\n loss = self.loss(out, y)\n\n # log 6 example images\n # or generated text... or whatever\n sample_imgs = x[:6]\n grid = torchvision.utils.make_grid(sample_imgs)\n self.logger.experiment.add_image('example_images', grid, 0)\n\n # calculate acc\n labels_hat = torch.argmax(out, dim=1)\n test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)\n\n # log the outputs!\n self.log_dict({'test_loss': loss, 'test_acc': test_acc})\n\n If you pass in multiple test dataloaders, :meth:`test_step` will have an additional\n argument.\n\n .. code-block:: python\n\n # CASE 2: multiple test dataloaders\n def test_step(self, batch, batch_idx, dataloader_idx):\n # dataloader_idx tells you which dataset this is.\n\n Note:\n If you don't need to test you don't need to implement this method.\n\n Note:\n When the :meth:`test_step` is called, the model has been put in eval mode and\n PyTorch gradients have been disabled. At the end of the test epoch, the model goes back\n to training mode and gradients are enabled.\n \"\"\"\n\n def test_step_end(self, *args, **kwargs):\n \"\"\"\n Use this when testing with dp or ddp2 because :meth:`test_step` will operate\n on only part of the batch. However, this is still optional\n and only needed for things like softmax or NCE loss.\n\n Note:\n If you later switch to ddp or some other mode, this will still be called\n so that you don't have to change your code.\n\n .. code-block:: python\n\n # pseudocode\n sub_batches = split_batches_for_dp(batch)\n batch_parts_outputs = [test_step(sub_batch) for sub_batch in sub_batches]\n test_step_end(batch_parts_outputs)\n\n Args:\n batch_parts_outputs: What you return in :meth:`test_step` for each batch part.\n\n Return:\n None or anything\n\n .. code-block:: python\n\n # WITHOUT test_step_end\n # if used in DP or DDP2, this batch is 1/num_gpus large\n def test_step(self, batch, batch_idx):\n # batch is 1/num_gpus big\n x, y = batch\n\n out = self(x)\n loss = self.softmax(out)\n self.log('test_loss', loss)\n\n # --------------\n # with test_step_end to do softmax over the full batch\n def test_step(self, batch, batch_idx):\n # batch is 1/num_gpus big\n x, y = batch\n\n out = self.encoder(x)\n return out\n\n def test_step_end(self, output_results):\n # this out is now the full size of the batch\n all_test_step_outs = output_results.out\n loss = nce_loss(all_test_step_outs)\n self.log('test_loss', loss)\n\n See Also:\n See the :ref:`multi_gpu` guide for more details.\n \"\"\"\n\n def test_epoch_end(\n self, outputs: List[Any]\n ) -> None:\n \"\"\"\n Called at the end of a test epoch with the output of all test steps.\n\n .. code-block:: python\n\n # the pseudocode for these calls\n test_outs = []\n for test_batch in test_data:\n out = test_step(test_batch)\n test_outs.append(out)\n test_epoch_end(test_outs)\n\n Args:\n outputs: List of outputs you defined in :meth:`test_step_end`, or if there\n are multiple dataloaders, a list containing a list of outputs for each dataloader\n\n Return:\n None\n\n Note:\n If you didn't define a :meth:`test_step`, this won't be called.\n\n Examples:\n With a single dataloader:\n\n .. code-block:: python\n\n def test_epoch_end(self, outputs):\n # do something with the outputs of all test batches\n all_test_preds = test_step_outputs.predictions\n\n some_result = calc_all_results(all_test_preds)\n self.log(some_result)\n\n With multiple dataloaders, `outputs` will be a list of lists. The outer list contains\n one entry per dataloader, while the inner list contains the individual outputs of\n each test step for that dataloader.\n\n .. code-block:: python\n\n def test_epoch_end(self, outputs):\n final_value = 0\n for dataloader_outputs in outputs:\n for test_step_out in dataloader_outputs:\n # do something\n final_value += test_step_out\n\n self.log('final_metric', final_value)\n \"\"\"\n\n def configure_optimizers(\n self,\n ):\n r\"\"\"\n Choose what optimizers and learning-rate schedulers to use in your optimization.\n Normally you'd need one. But in the case of GANs or similar you might have multiple.\n\n Return:\n Any of these 6 options.\n\n - Single optimizer.\n - List or Tuple - List of optimizers.\n - Two lists - The first list has multiple optimizers, the second a list of LR schedulers (or lr_dict).\n - Dictionary, with an 'optimizer' key, and (optionally) a 'lr_scheduler'\n key whose value is a single LR scheduler or lr_dict.\n - Tuple of dictionaries as described, with an optional 'frequency' key.\n - None - Fit will run without any optimizer.\n\n Note:\n The 'frequency' value is an int corresponding to the number of sequential batches\n optimized with the specific optimizer. It should be given to none or to all of the optimizers.\n There is a difference between passing multiple optimizers in a list,\n and passing multiple optimizers in dictionaries with a frequency of 1:\n In the former case, all optimizers will operate on the given batch in each optimization step.\n In the latter, only one optimizer will operate on the given batch at every step.\n\n The lr_dict is a dictionary which contains the scheduler and its associated configuration.\n The default configuration is shown below.\n\n .. code-block:: python\n\n {\n 'scheduler': lr_scheduler, # The LR scheduler instance (required)\n 'interval': 'epoch', # The unit of the scheduler's step size\n 'frequency': 1, # The frequency of the scheduler\n 'reduce_on_plateau': False, # For ReduceLROnPlateau scheduler\n 'monitor': 'val_loss', # Metric for ReduceLROnPlateau to monitor\n 'strict': True, # Whether to crash the training if `monitor` is not found\n 'name': None, # Custom name for LearningRateMonitor to use\n }\n\n Only the ``scheduler`` key is required, the rest will be set to the defaults above.\n\n Examples:\n .. code-block:: python\n\n # most cases\n def configure_optimizers(self):\n opt = Adam(self.parameters(), lr=1e-3)\n return opt\n\n # multiple optimizer case (e.g.: GAN)\n def configure_optimizers(self):\n generator_opt = Adam(self.model_gen.parameters(), lr=0.01)\n disriminator_opt = Adam(self.model_disc.parameters(), lr=0.02)\n return generator_opt, disriminator_opt\n\n # example with learning rate schedulers\n def configure_optimizers(self):\n generator_opt = Adam(self.model_gen.parameters(), lr=0.01)\n disriminator_opt = Adam(self.model_disc.parameters(), lr=0.02)\n discriminator_sched = CosineAnnealing(discriminator_opt, T_max=10)\n return [generator_opt, disriminator_opt], [discriminator_sched]\n\n # example with step-based learning rate schedulers\n def configure_optimizers(self):\n gen_opt = Adam(self.model_gen.parameters(), lr=0.01)\n dis_opt = Adam(self.model_disc.parameters(), lr=0.02)\n gen_sched = {'scheduler': ExponentialLR(gen_opt, 0.99),\n 'interval': 'step'} # called after each training step\n dis_sched = CosineAnnealing(discriminator_opt, T_max=10) # called every epoch\n return [gen_opt, dis_opt], [gen_sched, dis_sched]\n\n # example with optimizer frequencies\n # see training procedure in `Improved Training of Wasserstein GANs`, Algorithm 1\n # https://arxiv.org/abs/1704.00028\n def configure_optimizers(self):\n gen_opt = Adam(self.model_gen.parameters(), lr=0.01)\n dis_opt = Adam(self.model_disc.parameters(), lr=0.02)\n n_critic = 5\n return (\n {'optimizer': dis_opt, 'frequency': n_critic},\n {'optimizer': gen_opt, 'frequency': 1}\n )\n\n Note:\n\n Some things to know:\n\n - Lightning calls ``.backward()`` and ``.step()`` on each optimizer\n and learning rate scheduler as needed.\n\n - If you use 16-bit precision (``precision=16``), Lightning will automatically\n handle the optimizers for you.\n\n - If you use multiple optimizers, :meth:`training_step` will have an additional\n ``optimizer_idx`` parameter.\n\n - If you use LBFGS Lightning handles the closure function automatically for you.\n\n - If you use multiple optimizers, gradients will be calculated only\n for the parameters of current optimizer at each training step.\n\n - If you need to control how often those optimizers step or override the\n default ``.step()`` schedule, override the :meth:`optimizer_step` hook.\n\n - If you only want to call a learning rate scheduler every ``x`` step or epoch,\n or want to monitor a custom metric, you can specify these in a lr_dict:\n\n .. code-block:: python\n\n {\n 'scheduler': lr_scheduler,\n 'interval': 'step', # or 'epoch'\n 'monitor': 'val_f1',\n 'frequency': x,\n }\n\n \"\"\"\n rank_zero_warn(\n \"`configure_optimizers` must be implemented to be used with the Lightning Trainer\"\n )\n\n def manual_backward(self, loss: Tensor, optimizer: Optimizer, *args, **kwargs) -> None:\n \"\"\"\n Call this directly from your training_step when doing optimizations manually.\n By using this we can ensure that all the proper scaling when using 16-bit etc has been done for you\n\n This function forwards all args to the .backward() call as well.\n\n .. tip:: In manual mode we still automatically clip grads if Trainer(gradient_clip_val=x) is set\n\n .. tip:: In manual mode we still automatically accumulate grad over batches if\n Trainer(accumulate_grad_batches=x) is set and you use `optimizer.step()`\n\n Example::\n\n def training_step(...):\n (opt_a, opt_b) = self.optimizers()\n loss = ...\n # automatically applies scaling, etc...\n self.manual_backward(loss, opt_a)\n opt_a.step()\n \"\"\"\n # make sure we're using manual opt\n self._verify_is_manual_optimization('manual_backward')\n\n # backward\n self._running_manual_backward = True\n self.trainer.train_loop.backward(loss, optimizer, -1, *args, **kwargs)\n self._running_manual_backward = False\n\n def backward(self, loss: Tensor, optimizer: Optimizer, optimizer_idx: int, *args, **kwargs) -> None:\n \"\"\"\n Override backward with your own implementation if you need to.\n\n Args:\n loss: Loss is already scaled by accumulated grads\n optimizer: Current optimizer being used\n optimizer_idx: Index of the current optimizer being used\n\n Called to perform backward step.\n Feel free to override as needed.\n The loss passed in has already been scaled for accumulated gradients if requested.\n\n Example::\n\n def backward(self, loss, optimizer, optimizer_idx):\n loss.backward()\n\n \"\"\"\n if self.trainer.train_loop.automatic_optimization or self._running_manual_backward:\n loss.backward(*args, **kwargs)\n\n def toggle_optimizer(self, optimizer: Optimizer, optimizer_idx: int):\n \"\"\"\n Makes sure only the gradients of the current optimizer's parameters are calculated\n in the training step to prevent dangling gradients in multiple-optimizer setup.\n\n .. note:: Only called when using multiple optimizers\n\n Override for your own behavior\n\n Args:\n optimizer:\n optimizer_idx:\n \"\"\"\n for param in self.parameters():\n param.requires_grad = False\n\n for group in optimizer.param_groups:\n for param in group['params']:\n param.requires_grad = True\n\n def optimizer_step(\n self,\n epoch: int = None,\n batch_idx: int = None,\n optimizer: Optimizer = None,\n optimizer_idx: int = None,\n optimizer_closure: Optional[Callable] = None,\n on_tpu: bool = None,\n using_native_amp: bool = None,\n using_lbfgs: bool = None,\n ) -> None:\n r\"\"\"\n Override this method to adjust the default way the\n :class:`~pytorch_lightning.trainer.trainer.Trainer` calls each optimizer.\n By default, Lightning calls ``step()`` and ``zero_grad()`` as shown in the example\n once per optimizer.\n\n .. tip:: With `Trainer(enable_pl_optimizer=True)`, you can user `optimizer.step()` directly and it will handle zero_grad, accumulated gradients, AMP, TPU and more automatically for you.\n\n Warning:\n If you are overriding this method, make sure that you pass the ``optimizer_closure`` parameter\n to ``optimizer.step()`` function as shown in the examples. This ensures that\n ``train_step_and_backward_closure`` is called within\n :meth:`~pytorch_lightning.trainer.training_loop.TrainLoop.run_training_batch`.\n\n Args:\n epoch: Current epoch\n batch_idx: Index of current batch\n optimizer: A PyTorch optimizer\n optimizer_idx: If you used multiple optimizers this indexes into that list.\n optimizer_closure: closure for all optimizers\n on_tpu: true if TPU backward is required\n using_native_amp: True if using native amp\n using_lbfgs: True if the matching optimizer is lbfgs\n\n Examples:\n .. code-block:: python\n\n # DEFAULT\n def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,\n optimizer_closure, on_tpu, using_native_amp, using_lbfgs):\n optimizer.step(closure=optimizer_closure)\n\n # Alternating schedule for optimizer steps (i.e.: GANs)\n def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,\n optimizer_closure, on_tpu, using_native_amp, using_lbfgs):\n # update generator opt every 2 steps\n if optimizer_idx == 0:\n if batch_idx % 2 == 0 :\n optimizer.step(closure=optimizer_closure)\n optimizer.zero_grad()\n\n # update discriminator opt every 4 steps\n if optimizer_idx == 1:\n if batch_idx % 4 == 0 :\n optimizer.step(closure=optimizer_closure)\n optimizer.zero_grad()\n\n # ...\n # add as many optimizers as you want\n\n\n Here's another example showing how to use this for more advanced things such as\n learning rate warm-up:\n\n .. code-block:: python\n\n # learning rate warm-up\n def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,\n optimizer_closure, on_tpu, using_native_amp, using_lbfgs):\n # warm up lr\n if self.trainer.global_step < 500:\n lr_scale = min(1., float(self.trainer.global_step + 1) / 500.)\n for pg in optimizer.param_groups:\n pg['lr'] = lr_scale * self.learning_rate\n\n # update params\n optimizer.step(closure=optimizer_closure)\n optimizer.zero_grad()\n\n \"\"\"\n optimizer.step(closure=optimizer_closure)\n\n def optimizer_zero_grad(\n self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int\n ):\n optimizer.zero_grad()\n\n def tbptt_split_batch(self, batch: Tensor, split_size: int) -> list:\n r\"\"\"\n When using truncated backpropagation through time, each batch must be split along the\n time dimension. Lightning handles this by default, but for custom behavior override\n this function.\n\n Args:\n batch: Current batch\n split_size: The size of the split\n\n Return:\n List of batch splits. Each split will be passed to :meth:`training_step` to enable truncated\n back propagation through time. The default implementation splits root level Tensors and\n Sequences at dim=1 (i.e. time dim). It assumes that each time dim is the same length.\n\n Examples:\n .. code-block:: python\n\n def tbptt_split_batch(self, batch, split_size):\n splits = []\n for t in range(0, time_dims[0], split_size):\n batch_split = []\n for i, x in enumerate(batch):\n if isinstance(x, torch.Tensor):\n split_x = x[:, t:t + split_size]\n elif isinstance(x, collections.Sequence):\n split_x = [None] * len(x)\n for batch_idx in range(len(x)):\n split_x[batch_idx] = x[batch_idx][t:t + split_size]\n\n batch_split.append(split_x)\n\n splits.append(batch_split)\n\n return splits\n\n Note:\n Called in the training loop after\n :meth:`~pytorch_lightning.callbacks.base.Callback.on_batch_start`\n if :paramref:`~pytorch_lightning.trainer.Trainer.truncated_bptt_steps` > 0.\n Each returned batch split is passed separately to :meth:`training_step`.\n\n \"\"\"\n time_dims = [\n len(x[0])\n for x in batch\n if isinstance(x, (torch.Tensor, collections.Sequence))\n ]\n assert len(time_dims) >= 1, \"Unable to determine batch time dimension\"\n assert all(\n x == time_dims[0] for x in time_dims\n ), \"Batch time dimension length is ambiguous\"\n\n splits = []\n for t in range(0, time_dims[0], split_size):\n batch_split = []\n for i, x in enumerate(batch):\n if isinstance(x, torch.Tensor):\n split_x = x[:, t: t + split_size]\n elif isinstance(x, collections.Sequence):\n split_x = [None] * len(x)\n for batch_idx in range(len(x)):\n split_x[batch_idx] = x[batch_idx][t: t + split_size]\n\n batch_split.append(split_x)\n\n splits.append(batch_split)\n\n return splits\n\n def summarize(self, mode: Optional[str] = ModelSummary.MODE_DEFAULT) -> Optional[ModelSummary]:\n model_summary = None\n\n if mode in ModelSummary.MODES:\n model_summary = ModelSummary(self, mode=mode)\n log.info(\"\\n\" + str(model_summary))\n elif mode is not None:\n raise MisconfigurationException(\n f\"`mode` can be None, {', '.join(ModelSummary.MODES)}, got {mode}\"\n )\n\n return model_summary\n\n def freeze(self) -> None:\n r\"\"\"\n Freeze all params for inference.\n\n Example:\n .. code-block:: python\n\n model = MyLightningModule(...)\n model.freeze()\n\n \"\"\"\n for param in self.parameters():\n param.requires_grad = False\n\n self.eval()\n\n def unfreeze(self) -> None:\n \"\"\"\n Unfreeze all parameters for training.\n\n .. code-block:: python\n\n model = MyLightningModule(...)\n model.unfreeze()\n\n \"\"\"\n for param in self.parameters():\n param.requires_grad = True\n\n self.train()\n\n def get_progress_bar_dict(self) -> Dict[str, Union[int, str]]:\n r\"\"\"\n Implement this to override the default items displayed in the progress bar.\n By default it includes the average loss value, split index of BPTT (if used)\n and the version of the experiment when using a logger.\n\n .. code-block::\n\n Epoch 1: 4%|▎ | 40/1095 [00:03<01:37, 10.84it/s, loss=4.501, v_num=10]\n\n Here is an example how to override the defaults:\n\n .. code-block:: python\n\n def get_progress_bar_dict(self):\n # don't show the version number\n items = super().get_progress_bar_dict()\n items.pop(\"v_num\", None)\n return items\n\n Return:\n Dictionary with the items to be displayed in the progress bar.\n \"\"\"\n # call .item() only once but store elements without graphs\n running_train_loss = self.trainer.train_loop.running_loss.mean()\n avg_training_loss = None\n if running_train_loss is not None:\n avg_training_loss = running_train_loss.cpu().item()\n elif self.trainer.train_loop.automatic_optimization:\n avg_training_loss = float('NaN')\n\n tqdm_dict = {}\n if avg_training_loss is not None:\n tqdm_dict[\"loss\"] = f\"{avg_training_loss:.3g}\"\n\n if self.trainer.truncated_bptt_steps is not None:\n tqdm_dict[\"split_idx\"] = self.trainer.split_idx\n\n if self.trainer.logger is not None and self.trainer.logger.version is not None:\n version = self.trainer.logger.version\n # show last 4 places of long version strings\n version = version[-4:] if isinstance(version, str) else version\n tqdm_dict[\"v_num\"] = version\n\n return tqdm_dict\n\n def _verify_is_manual_optimization(self, fn_name):\n if self.trainer.train_loop.automatic_optimization:\n raise MisconfigurationException(\n f'to use {fn_name}, please disable automatic optimization:'\n ' set model property `automatic_optimization` as False'\n )\n\n @classmethod\n def _auto_collect_arguments(cls, frame=None) -> Tuple[Dict, Dict]:\n \"\"\"\n Collect all module arguments in the current constructor and all child constructors.\n The child constructors are all the ``__init__`` methods that reach the current class through\n (chained) ``super().__init__()`` calls.\n\n Args:\n frame: instance frame\n\n Returns:\n self_arguments: arguments dictionary of the first instance\n parents_arguments: arguments dictionary of the parent's instances\n \"\"\"\n if not frame:\n frame = inspect.currentframe()\n\n frame_args = collect_init_args(frame.f_back, [])\n self_arguments = frame_args[-1]\n\n # set hyper_parameters in child\n self_arguments = self_arguments\n parents_arguments = {}\n\n # add all arguments from parents\n for args in frame_args[:-1]:\n parents_arguments.update(args)\n return self_arguments, parents_arguments\n\n def save_hyperparameters(self, *args, frame=None) -> None:\n \"\"\"Save all model arguments.\n\n Args:\n args: single object of `dict`, `NameSpace` or `OmegaConf`\n or string names or argumenst from class `__init__`\n\n >>> from collections import OrderedDict\n >>> class ManuallyArgsModel(LightningModule):\n ... def __init__(self, arg1, arg2, arg3):\n ... super().__init__()\n ... # manually assign arguments\n ... self.save_hyperparameters('arg1', 'arg3')\n ... def forward(self, *args, **kwargs):\n ... ...\n >>> model = ManuallyArgsModel(1, 'abc', 3.14)\n >>> model.hparams\n \"arg1\": 1\n \"arg3\": 3.14\n\n >>> class AutomaticArgsModel(LightningModule):\n ... def __init__(self, arg1, arg2, arg3):\n ... super().__init__()\n ... # equivalent automatic\n ... self.save_hyperparameters()\n ... def forward(self, *args, **kwargs):\n ... ...\n >>> model = AutomaticArgsModel(1, 'abc', 3.14)\n >>> model.hparams\n \"arg1\": 1\n \"arg2\": abc\n \"arg3\": 3.14\n\n >>> class SingleArgModel(LightningModule):\n ... def __init__(self, params):\n ... super().__init__()\n ... # manually assign single argument\n ... self.save_hyperparameters(params)\n ... def forward(self, *args, **kwargs):\n ... ...\n >>> model = SingleArgModel(Namespace(p1=1, p2='abc', p3=3.14))\n >>> model.hparams\n \"p1\": 1\n \"p2\": abc\n \"p3\": 3.14\n \"\"\"\n if not frame:\n frame = inspect.currentframe().f_back\n init_args = get_init_args(frame)\n assert init_args, \"failed to inspect the self init\"\n if not args:\n # take all arguments\n hp = init_args\n self._hparams_name = \"kwargs\" if hp else None\n else:\n # take only listed arguments in `save_hparams`\n isx_non_str = [i for i, arg in enumerate(args) if not isinstance(arg, str)]\n if len(isx_non_str) == 1:\n hp = args[isx_non_str[0]]\n cand_names = [k for k, v in init_args.items() if v == hp]\n self._hparams_name = cand_names[0] if cand_names else None\n else:\n hp = {arg: init_args[arg] for arg in args if isinstance(arg, str)}\n self._hparams_name = \"kwargs\"\n\n # `hparams` are expected here\n if hp:\n self._set_hparams(hp)\n # make deep copy so there is not other runtime changes reflected\n self._hparams_initial = copy.deepcopy(self._hparams)\n\n def _set_hparams(self, hp: Union[dict, Namespace, str]) -> None:\n if isinstance(hp, Namespace):\n hp = vars(hp)\n if isinstance(hp, dict):\n hp = AttributeDict(hp)\n elif isinstance(hp, PRIMITIVE_TYPES):\n raise ValueError(f\"Primitives {PRIMITIVE_TYPES} are not allowed.\")\n elif not isinstance(hp, ALLOWED_CONFIG_TYPES):\n raise ValueError(f\"Unsupported config type of {type(hp)}.\")\n\n if isinstance(hp, dict) and isinstance(self.hparams, dict):\n self.hparams.update(hp)\n else:\n self._hparams = hp\n\n @torch.no_grad()\n def to_onnx(\n self,\n file_path: Union[str, Path],\n input_sample: Optional[Any] = None,\n **kwargs,\n ):\n \"\"\"\n Saves the model in ONNX format\n\n Args:\n file_path: The path of the file the onnx model should be saved to.\n input_sample: An input for tracing. Default: None (Use self.example_input_array)\n **kwargs: Will be passed to torch.onnx.export function.\n\n Example:\n >>> class SimpleModel(LightningModule):\n ... def __init__(self):\n ... super().__init__()\n ... self.l1 = torch.nn.Linear(in_features=64, out_features=4)\n ...\n ... def forward(self, x):\n ... return torch.relu(self.l1(x.view(x.size(0), -1)))\n\n >>> with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as tmpfile:\n ... model = SimpleModel()\n ... input_sample = torch.randn((1, 64))\n ... model.to_onnx(tmpfile.name, input_sample, export_params=True)\n ... os.path.isfile(tmpfile.name)\n True\n \"\"\"\n mode = self.training\n\n if input_sample is None:\n if self.example_input_array is None:\n raise ValueError(\n \"Could not export to ONNX since neither `input_sample` nor\"\n \" `model.example_input_array` attribute is set.\"\n )\n input_sample = self.example_input_array\n\n input_sample = self.transfer_batch_to_device(input_sample)\n\n if \"example_outputs\" not in kwargs:\n self.eval()\n kwargs[\"example_outputs\"] = self(input_sample)\n\n torch.onnx.export(self, input_sample, file_path, **kwargs)\n self.train(mode)\n\n @torch.no_grad()\n def to_torchscript(\n self,\n file_path: Optional[Union[str, Path]] = None,\n method: Optional[str] = 'script',\n example_inputs: Optional[Any] = None,\n **kwargs,\n ) -> Union[ScriptModule, Dict[str, ScriptModule]]:\n \"\"\"\n By default compiles the whole model to a :class:`~torch.jit.ScriptModule`.\n If you want to use tracing, please provided the argument `method='trace'` and make sure that either the\n example_inputs argument is provided, or the model has self.example_input_array set.\n If you would like to customize the modules that are scripted you should override this method.\n In case you want to return multiple modules, we recommend using a dictionary.\n\n Args:\n file_path: Path where to save the torchscript. Default: None (no file saved).\n method: Whether to use TorchScript's script or trace method. Default: 'script'\n example_inputs: An input to be used to do tracing when method is set to 'trace'.\n Default: None (Use self.example_input_array)\n **kwargs: Additional arguments that will be passed to the :func:`torch.jit.script` or\n :func:`torch.jit.trace` function.\n\n Note:\n - Requires the implementation of the\n :meth:`~pytorch_lightning.core.lightning.LightningModule.forward` method.\n - The exported script will be set to evaluation mode.\n - It is recommended that you install the latest supported version of PyTorch\n to use this feature without limitations. See also the :mod:`torch.jit`\n documentation for supported features.\n\n Example:\n >>> class SimpleModel(LightningModule):\n ... def __init__(self):\n ... super().__init__()\n ... self.l1 = torch.nn.Linear(in_features=64, out_features=4)\n ...\n ... def forward(self, x):\n ... return torch.relu(self.l1(x.view(x.size(0), -1)))\n ...\n >>> model = SimpleModel()\n >>> torch.jit.save(model.to_torchscript(), \"model.pt\") # doctest: +SKIP\n >>> os.path.isfile(\"model.pt\") # doctest: +SKIP\n >>> torch.jit.save(model.to_torchscript(file_path=\"model_trace.pt\", method='trace', # doctest: +SKIP\n ... example_inputs=torch.randn(1, 64))) # doctest: +SKIP\n >>> os.path.isfile(\"model_trace.pt\") # doctest: +SKIP\n True\n\n Return:\n This LightningModule as a torchscript, regardless of whether file_path is\n defined or not.\n \"\"\"\n mode = self.training\n\n if method == 'script':\n torchscript_module = torch.jit.script(self.eval(), **kwargs)\n elif method == 'trace':\n # if no example inputs are provided, try to see if model has example_input_array set\n if example_inputs is None:\n if self.example_input_array is None:\n raise ValueError(\n 'Choosing method=`trace` requires either `example_inputs`'\n ' or `model.example_input_array` to be defined'\n )\n example_inputs = self.example_input_array\n\n # automatically send example inputs to the right device and use trace\n example_inputs = self.transfer_batch_to_device(example_inputs)\n torchscript_module = torch.jit.trace(func=self.eval(), example_inputs=example_inputs, **kwargs)\n else:\n raise ValueError(\"The 'method' parameter only supports 'script' or 'trace',\"\n f\" but value given was: {method}\")\n\n self.train(mode)\n\n if file_path is not None:\n torch.jit.save(torchscript_module, file_path)\n\n return torchscript_module\n\n @property\n def hparams(self) -> Union[AttributeDict, dict, Namespace]:\n if not hasattr(self, \"_hparams\"):\n self._hparams = AttributeDict()\n return self._hparams\n\n @property\n def hparams_initial(self) -> AttributeDict:\n if not hasattr(self, \"_hparams_initial\"):\n return AttributeDict()\n # prevent any change\n return copy.deepcopy(self._hparams_initial)\n\n @hparams.setter\n def hparams(self, hp: Union[dict, Namespace, Any]):\n # TODO: remove this method in v1.3.0.\n rank_zero_warn(\n \"The setter for self.hparams in LightningModule is deprecated since v1.1.0 and will be\"\n \" removed in v1.3.0. Replace the assignment `self.hparams = hparams` with \"\n \" `self.save_hyperparameters()`.\",\n DeprecationWarning\n )\n hparams_assignment_name = self.__get_hparams_assignment_variable()\n self._hparams_name = hparams_assignment_name\n self._set_hparams(hp)\n # this resolves case when user does not uses `save_hyperparameters` and do hard assignement in init\n if not hasattr(self, \"_hparams_initial\"):\n self._hparams_initial = copy.deepcopy(self._hparams)\n\n def __get_hparams_assignment_variable(self):\n \"\"\"\n looks at the code of the class to figure out what the user named self.hparams\n this only happens when the user explicitly sets self.hparams\n \"\"\"\n try:\n class_code = inspect.getsource(self.__class__)\n lines = class_code.split(\"\\n\")\n for line in lines:\n line = re.sub(r\"\\s+\", \"\", line, flags=re.UNICODE)\n if \".hparams=\" in line:\n return line.split(\"=\")[1]\n except Exception:\n return \"hparams\"\n\n return None\n", "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional, Union\n\nimport torch\nfrom torch import optim\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.accelerators.accelerator import Accelerator\nfrom pytorch_lightning.cluster_environments import ClusterEnvironment\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom pytorch_lightning.core.step_result import Result\nfrom pytorch_lightning.distributed import LightningDistributed\nfrom pytorch_lightning.overrides.data_parallel import LightningDataParallel\nfrom pytorch_lightning.utilities import AMPType\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\n\nclass DataParallelAccelerator(Accelerator):\n\n def __init__(self, trainer, cluster_environment: Optional[ClusterEnvironment] = None):\n \"\"\"\n Runs training using DP via manual start (not HPC cluster)\n\n Example::\n\n # default\n trainer = Trainer(accelerator=DataParallelAccelerator())\n\n \"\"\"\n super().__init__(trainer, cluster_environment)\n self.model_autocast_original_forward = None\n self.dist = LightningDistributed()\n self.nickname = 'dp'\n\n def setup(self, model):\n # call setup after the ddp process has connected\n self.trainer.call_setup_hook(model)\n\n # put model on correct device\n model.cuda(self.trainer.root_gpu)\n\n # CHOOSE OPTIMIZER\n # allow for lr schedulers as well\n self.setup_optimizers(model)\n\n # init torch data parallel\n model = self.__init_torch_data_parallel(model)\n\n # hack forward to do autocast for the user\n self.model_autocast_original_forward = model.forward\n\n # init half precision\n if self.trainer.amp_backend:\n model = self.__init_half_precision(model)\n\n self.trainer.model = model\n\n def __init_torch_data_parallel(self, model):\n # create list of device ids\n device_ids = self.trainer.data_parallel_device_ids\n if isinstance(device_ids, int):\n device_ids = list(range(device_ids))\n\n # set dp device\n torch.cuda.set_device(self.trainer.root_gpu)\n model = LightningDataParallel(model, device_ids=device_ids)\n return model\n\n def __init_half_precision(self, model):\n if self.trainer.amp_backend == AMPType.NATIVE:\n self.__init_native_amp(model)\n else:\n model = self.__init_nvidia_apex(model)\n return model\n\n def __init_native_amp(self, model):\n model.forward = torch.cuda.amp.autocast()(model.forward)\n\n def __init_nvidia_apex(self, model):\n # check for this bug (amp + dp + !01 doesn't work)\n # https://github.com/NVIDIA/apex/issues/227\n if self.trainer.amp_level == 'O2':\n raise MisconfigurationException(\n f'Amp level {self.trainer.amp_level} with DataParallel is not supported.'\n f' See this note from NVIDIA for more info: https://github.com/NVIDIA/apex/issues/227.'\n f' We recommend you switch to ddp if you want to use amp')\n else:\n model = self.trainer.precision_connector.connect(model)\n\n return model\n\n def teardown(self):\n # replace the original fwd function\n self.trainer.model.forward = self.model_autocast_original_forward\n self.barrier()\n\n def _step(self, args):\n if self.trainer.amp_backend == AMPType.NATIVE:\n with torch.cuda.amp.autocast():\n output = self.trainer.model(*args)\n else:\n output = self.trainer.model(*args)\n return output\n\n def training_step(self, args):\n return self._step(args)\n\n def validation_step(self, args):\n return self._step(args)\n\n def test_step(self, args):\n return self._step(args)\n\n def training_step_end(self, output):\n if isinstance(output, Result):\n output.dp_reduce()\n elif isinstance(output, torch.Tensor):\n output = output.mean()\n return output\n\n def validation_step_end(self, output):\n if isinstance(output, Result):\n output.dp_reduce()\n elif isinstance(output, torch.Tensor):\n output = output.mean()\n return output\n\n def test_step_end(self, output):\n if isinstance(output, Result):\n output.dp_reduce()\n elif isinstance(output, torch.Tensor):\n output = output.mean()\n return output\n\n def get_reference_model(self, model) -> LightningModule:\n if isinstance(model, LightningDataParallel):\n return model.module\n return model\n\n @property\n def require_distributed_sampler(self):\n return False\n", "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\nfrom pathlib import Path\nfrom typing import Optional, Union\n\nimport torch\n\nimport pytorch_lightning\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom pytorch_lightning.utilities import AMPType, APEX_AVAILABLE, OMEGACONF_AVAILABLE, rank_zero_info, rank_zero_warn\nfrom pytorch_lightning.utilities.cloud_io import atomic_save, get_filesystem\nfrom pytorch_lightning.utilities.cloud_io import load as pl_load\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities.upgrade_checkpoint import KEYS_MAPPING as DEPRECATED_CHECKPOINT_KEYS\n\nif APEX_AVAILABLE:\n from apex import amp\n\nif OMEGACONF_AVAILABLE:\n from omegaconf import Container\n\n\nclass CheckpointConnector:\n\n def __init__(self, trainer):\n self.trainer = trainer\n\n # used to validate checkpointing logic\n self.has_trained = False\n\n def restore_weights(self) -> None:\n \"\"\"\n Attempt to restore a checkpoint (e.g. weights) in this priority:\n 1. from HPC weights\n 2. from `resume_from_checkpoint` file\n 3. don't restore\n \"\"\"\n # clear cache before restore\n if self.trainer.on_gpu:\n torch.cuda.empty_cache()\n\n # 1. Attempt to restore states from HPC checkpoint\n dir_path_hpc = str(self.trainer.weights_save_path)\n max_suffix = self.max_ckpt_in_folder(dir_path_hpc, \"hpc_ckpt_\")\n if max_suffix is not None:\n checkpoint_path = f'{dir_path_hpc}/hpc_ckpt_{max_suffix}.ckpt'\n self.hpc_load(checkpoint_path, self.trainer.on_gpu)\n rank_zero_info(f'restored hpc model from: {checkpoint_path}')\n\n # 2. Attempt to restore states from `resume_from_checkpoint` file\n elif self.trainer.resume_from_checkpoint is not None:\n self.restore(self.trainer.resume_from_checkpoint, on_gpu=self.trainer.on_gpu)\n\n # wait for all to catch up\n self.trainer.accelerator_backend.barrier('TrainerIOMixin.restore_weights')\n\n # clear cache after restore\n if self.trainer.on_gpu:\n torch.cuda.empty_cache()\n\n def restore(self, checkpoint_path: str, on_gpu: bool) -> bool:\n \"\"\"\n Load model/training states from a 'PyTorch-Lightning checkpoint' file through file-read and state-restore.\n All restored states are listed in return value description of `dump_checkpoint`.\n \"\"\"\n # Try to read the checkpoint file at `checkpoint_path`. If not exist, do not restore checkpoint.\n fs = get_filesystem(checkpoint_path)\n if not fs.exists(checkpoint_path):\n rank_zero_warn(\"No checkpoint file exists at `resume_from_checkpoint`. Start from scratch\")\n return False\n\n # read a checkpoint dictionary object from the 'PyTorch-Lightning checkpoint' file at `checkpoint_path`\n checkpoint = pl_load(checkpoint_path, map_location=lambda storage, loc: storage)\n\n # acquire the model\n model = self.trainer.get_model()\n\n # restore model and datamodule state\n self.restore_model_state(model, checkpoint)\n\n if on_gpu:\n model.cuda(self.trainer.root_gpu)\n\n # restore training state\n self.restore_training_state(checkpoint)\n\n rank_zero_info(f\"Restored states from the checkpoint file at {checkpoint_path}\")\n return True\n\n def restore_model_state(self, model: LightningModule, checkpoint) -> None:\n \"\"\"\n Restore model states from a 'PyTorch-Lightning checkpoint' dictionary object\n \"\"\"\n\n # restore datamodule states\n if self.trainer.datamodule is not None:\n self.trainer.datamodule.on_load_checkpoint(checkpoint)\n\n # hook: give user access to checkpoint if needed.\n model.on_load_checkpoint(checkpoint)\n\n # restore model state_dict\n model.load_state_dict(checkpoint['state_dict'])\n\n def restore_training_state(self, checkpoint):\n \"\"\"\n Restore trainer state.\n Model will get its change to update\n :param checkpoint:\n :return:\n \"\"\"\n # validation\n if 'optimizer_states' not in checkpoint or 'lr_schedulers' not in checkpoint:\n raise KeyError(\n 'Trying to restore training state but checkpoint contains only the model.'\n ' This is probably due to `ModelCheckpoint.save_weights_only` being set to `True`.'\n )\n\n if any([key in checkpoint for key in DEPRECATED_CHECKPOINT_KEYS]):\n raise ValueError(\n \"The checkpoint you're attempting to load follows an\"\n \" outdated schema. You can upgrade to the current schema by running\"\n \" `python -m pytorch_lightning.utilities.upgrade_checkpoint --file model.ckpt`\"\n \" where `model.ckpt` is your checkpoint file.\"\n )\n\n # restore amp scaling\n if self.trainer.amp_backend == AMPType.NATIVE and 'native_amp_scaling_state' in checkpoint:\n self.trainer.scaler.load_state_dict(checkpoint['native_amp_scaling_state'])\n elif self.trainer.amp_backend == AMPType.APEX and 'amp_scaling_state' in checkpoint:\n amp.load_state_dict(checkpoint['amp_scaling_state'])\n\n # restore callback states\n self.trainer.on_load_checkpoint(checkpoint)\n\n self.trainer.global_step = checkpoint['global_step']\n self.trainer.current_epoch = checkpoint['epoch']\n\n # crash if max_epochs is lower then the current epoch from the checkpoint\n if self.trainer.current_epoch > self.trainer.max_epochs:\n m = f\"\"\"\n you restored a checkpoint with current_epoch={self.trainer.current_epoch}\n but the Trainer(max_epochs={self.trainer.max_epochs})\n \"\"\"\n raise MisconfigurationException(m)\n\n # Division deals with global step stepping once per accumulated batch\n # Inequality deals with different global step for odd vs even num_training_batches\n n_accum = 1 if self.trainer.accumulate_grad_batches is None else self.trainer.accumulate_grad_batches\n expected_steps = self.trainer.num_training_batches / n_accum\n if self.trainer.num_training_batches != 0 and self.trainer.global_step % expected_steps > 1:\n rank_zero_warn(\n \"You're resuming from a checkpoint that ended mid-epoch.\"\n \" Training will start from the beginning of the next epoch.\"\n \" This can cause unreliable results if further training is done,\"\n \" consider using an end of epoch checkpoint.\"\n )\n\n # restore the optimizers\n optimizer_states = checkpoint['optimizer_states']\n for optimizer, opt_state in zip(self.trainer.optimizers, optimizer_states):\n optimizer.load_state_dict(opt_state)\n\n # move optimizer to GPU 1 weight at a time\n # avoids OOM\n if self.trainer.root_gpu is not None:\n for state in optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.cuda(self.trainer.root_gpu)\n\n # restore the lr schedulers\n lr_schedulers = checkpoint['lr_schedulers']\n for scheduler, lrs_state in zip(self.trainer.lr_schedulers, lr_schedulers):\n scheduler['scheduler'].load_state_dict(lrs_state)\n\n # ----------------------------------\n # PRIVATE OPS\n # ----------------------------------\n def hpc_save(self, folderpath: str, logger):\n # make sure the checkpoint folder exists\n folderpath = str(folderpath) # because the tests pass a path object\n fs = get_filesystem(folderpath)\n fs.makedirs(folderpath, exist_ok=True)\n\n # save logger to make sure we get all the metrics\n logger.save()\n\n max_suffix = self.max_ckpt_in_folder(folderpath)\n ckpt_number = (max_suffix if max_suffix is not None else 0) + 1\n\n fs.makedirs(folderpath, exist_ok=True)\n filepath = os.path.join(folderpath, f'hpc_ckpt_{ckpt_number}.ckpt')\n\n # give model a chance to do something on hpc_save\n model = self.trainer.get_model()\n checkpoint = self.dump_checkpoint()\n\n model.on_hpc_save(checkpoint)\n\n if self.trainer.accelerator_backend:\n checkpoint = self.trainer.accelerator_backend.on_save(checkpoint)\n\n # do the actual save\n # TODO: fix for anything with multiprocess DP, DDP, DDP2\n try:\n atomic_save(checkpoint, filepath)\n except AttributeError as err:\n if LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in checkpoint:\n del checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]\n rank_zero_warn(\n 'warning, `hyper_parameters` dropped from checkpoint.' f' An attribute is not picklable {err}'\n )\n atomic_save(checkpoint, filepath)\n\n return filepath\n\n def dump_checkpoint(self, weights_only: bool = False) -> dict:\n \"\"\"Creating a model checkpoint dictionary object from various component states.\n\n Args:\n weights_only: saving model weights only\n\n Return:\n structured dictionary: {\n 'epoch': training epoch\n 'global_step': training global step\n 'pytorch-lightning_version': PyTorch Lightning's version\n 'callbacks': \"callback specific state\"[] # if not weights_only\n 'optimizer_states': \"PT optim's state_dict\"[] # if not weights_only\n 'lr_schedulers': \"PT sched's state_dict\"[] # if not weights_only\n 'native_amp_scaling_state': PT amp's state_dict # if not weights_only and use native amp\n 'amp_scaling_state': Apex's state_dict # if not weights_only and use apex amp\n 'state_dict': Model's state_dict (e.g. network weights)\n CHECKPOINT_HYPER_PARAMS_NAME:\n CHECKPOINT_HYPER_PARAMS_KEY:\n CHECKPOINT_HYPER_PARAMS_TYPE:\n something_cool_i_want_to_save: anything you define through model.on_save_checkpoint\n LightningDataModule.__class__.__name__: pl DataModule's state\n }\n \"\"\"\n\n # dump epoch/global_step/pytorch-lightning_version\n current_epoch = self.trainer.current_epoch\n global_step = self.trainer.global_step\n has_reached_max_steps = self.trainer.max_steps and self.trainer.max_steps <= global_step\n\n global_step += 1\n if not has_reached_max_steps:\n current_epoch += 1\n\n checkpoint = {\n 'epoch': current_epoch,\n 'global_step': global_step,\n 'pytorch-lightning_version': pytorch_lightning.__version__,\n }\n\n if not weights_only:\n\n # dump callbacks\n callback_states = self.trainer.on_save_checkpoint()\n checkpoint['callbacks'] = callback_states\n\n optimizer_states = []\n for i, optimizer in enumerate(self.trainer.optimizers):\n # Rely on accelerator to dump optimizer state\n optimizer_state = self.trainer.accelerator_backend.optimizer_state(optimizer)\n optimizer_states.append(optimizer_state)\n\n checkpoint['optimizer_states'] = optimizer_states\n\n # dump lr schedulers\n lr_schedulers = []\n for scheduler in self.trainer.lr_schedulers:\n lr_schedulers.append(scheduler['scheduler'].state_dict())\n checkpoint['lr_schedulers'] = lr_schedulers\n\n # dump amp scaling\n if self.trainer.amp_backend == AMPType.NATIVE and not self.trainer.use_tpu and self.trainer.scaler is not None:\n checkpoint['native_amp_scaling_state'] = self.trainer.scaler.state_dict()\n elif self.trainer.amp_backend == AMPType.APEX:\n checkpoint['amp_scaling_state'] = amp.state_dict()\n\n # add the hyper_parameters and state_dict from the model\n model = self.trainer.get_model()\n\n # dump the module_arguments and state_dict from the model\n checkpoint['state_dict'] = model.state_dict()\n\n if model.hparams:\n if hasattr(model, '_hparams_name'):\n checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_NAME] = model._hparams_name\n # dump arguments\n if OMEGACONF_AVAILABLE and isinstance(model.hparams, Container):\n checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY] = model.hparams\n checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_TYPE] = type(model.hparams)\n else:\n checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY] = dict(model.hparams)\n\n # give the model a chance to dump a few things\n model.on_save_checkpoint(checkpoint)\n if self.trainer.datamodule is not None:\n self.trainer.datamodule.on_save_checkpoint(checkpoint)\n\n return checkpoint\n\n def hpc_load(self, checkpoint_path: str, on_gpu: bool):\n \"\"\"\n Load model/training states from a 'PyTorch-Lightning checkpoint' file for hpc.\n All restored states are listed in return value description of `dump_checkpoint`.\n \"\"\"\n\n # read a checkpoint dictionary object from the 'PyTorch-Lightning checkpoint' file at `checkpoint_path`\n checkpoint = pl_load(checkpoint_path, map_location=lambda storage, loc: storage)\n\n # acquire the model\n model = self.trainer.get_model()\n\n # restore model and datamodule state\n self.restore_model_state(model, checkpoint)\n\n if self.trainer.root_gpu is not None:\n model.cuda(self.trainer.root_gpu)\n\n # restore training state\n self.restore_training_state(checkpoint)\n\n # call hpc specific hook\n model.on_hpc_load(checkpoint)\n\n def max_ckpt_in_folder(self, dir_path: Union[str, Path], name_key: str = 'ckpt_') -> Optional[int]:\n \"\"\"List up files in `dir_path` with name_key, then yield maximum suffix number.\n\n Args:\n dir_path: path of directory which may contain files whose name include `name_key`\n\n Returns:\n None if no-corresponding-file else maximum suffix number\n \"\"\"\n\n # check directory existence\n fs = get_filesystem(dir_path)\n if not fs.exists(dir_path):\n return None\n\n # check corresponding file existence\n files = [os.path.basename(f[\"name\"]) for f in fs.listdir(dir_path)]\n files = [x for x in files if name_key in x]\n if len(files) == 0:\n return None\n\n # extract suffix number\n ckpt_vs = []\n for name in files:\n name = name.split(name_key)[-1]\n name = re.sub('[^0-9]', '', name)\n ckpt_vs.append(int(name))\n\n return max(ckpt_vs)\n\n def get_max_ckpt_path_from_folder(self, folder_path: Union[str, Path]) -> str:\n \"\"\"Get path of maximum-epoch checkpoint in the folder.\"\"\"\n\n max_suffix = self.max_ckpt_in_folder(folder_path)\n ckpt_number = max_suffix if max_suffix is not None else 0\n return f'{folder_path}/hpc_ckpt_{ckpt_number}.ckpt'\n\n def save_checkpoint(self, filepath, weights_only: bool = False):\n \"\"\"Save model/training states as a checkpoint file through state-dump and file-write.\n\n Args:\n filepath: write-target file's path\n weights_only: saving model weights only\n \"\"\"\n # dump states as a checkpoint dictionary object\n checkpoint = self.dump_checkpoint(weights_only)\n\n if self.trainer.is_global_zero:\n # write the checkpoint dictionary on the file\n if self.trainer.accelerator_backend:\n checkpoint = self.trainer.accelerator_backend.on_save(checkpoint)\n try:\n atomic_save(checkpoint, filepath)\n except AttributeError as err:\n if LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in checkpoint:\n del checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]\n rank_zero_warn(\n 'Warning, `hyper_parameters` dropped from checkpoint.' f' An attribute is not picklable {err}'\n )\n atomic_save(checkpoint, filepath)\n" ]
[ [ "torch._C._log_api_usage_once", "torch.onnx.export", "torch.jit.save", "torch.no_grad" ], [ "torch.cuda.set_device", "torch.cuda.amp.autocast" ], [ "torch.cuda.empty_cache" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
HotMaps/renovation_effect
[ "5b1fb81102b3c6ee531b719d8136ed9a343c2598" ]
[ "cm/app/api_v1/my_calculation_module_directory/CM/__delete_if_tested__/CEDM/create_csv_results.py" ]
[ "\nimport numpy as np\nimport os\nimport time\nimport sys\n\npath = os.path.dirname(os.path.dirname(os.path.dirname(os.path.\n abspath(__file__))))\nif path not in sys.path:\n sys.path.append(path)\n \nimport CM_intern.CEDM.modules.cyf.create_density_map as CDM\nimport CM_intern.CEDM.modules.Subfunctions as SF\nfrom CM_intern.common_modules.exportLayerDict import export_layer as expLyr\nimport CM_intern.common_modules.cliprasterlayer as CRL\n\nimport pickle\n\nTARGET_RESOLUTION = 100\n\ndef load_reference_raster_lyr(NUTS3_vector_path, strd_raster_path_full, outputpath, NUTS3_feat_id_LIST\n , MOST_RECENT_CUT=\"\"):\n \n datatype_int = 'uint32'\n #self.datatype_int16 = 'uint16'\n datatype = \"float32\"\n # common parameters\n noDataValue = 0\n \n #SaveLayerDict = {}\n # Get current extent -> Use the Population 1x1km raster as reference Layer\n key_field = \"NUTS_ID\" \n REFERENCE_RASTER_LAYER_COORD, Layer_is_uncut = CRL.create_reference_raster_layer_origin_extent_of_vctr_feat(strd_raster_path_full\n , NUTS3_vector_path, NUTS3_feat_id_LIST\n , Vctr_key_field=key_field)\n (REFERENCE_geotransform_obj, REFERENCE_RasterSize\n , REFERENCE_RESOLUTION, REFERENCE_extent) = REFERENCE_RASTER_LAYER_COORD\n \n REFERENCE_RasterResolution = REFERENCE_geotransform_obj[1]\n \n gto_hr = list(REFERENCE_geotransform_obj)\n gto_hr[1] = TARGET_RESOLUTION\n gto_hr[5] = -TARGET_RESOLUTION\n HighRes_gt_obj = tuple(gto_hr)\n \n SaveLayerDict = {}\n SaveLayerDict[\"Reference\"] = [\"%s/REFERENCE.tif\" % outputpath, REFERENCE_geotransform_obj\n , datatype_int\n , np.ones((REFERENCE_RasterSize), dtype=datatype_int) , noDataValue]\n \n \n # If data are the same as previous cut, then loading data can be done\n LOAD_DATA_PREVIOUS = False\n filename = MOST_RECENT_CUT\n if os.path.exists(MOST_RECENT_CUT):\n try:\n with open(MOST_RECENT_CUT, 'rb') as fobject:\n PREV_CUT = pickle.load(fobject)\n fobject.close()\n if PREV_CUT == REFERENCE_RASTER_LAYER_COORD:\n LOAD_DATA_PREVIOUS = True\n except Exception as e:\n print(\"Cannot import %s\"%MOST_RECENT_CUT)\n print(e)\n \n \n if LOAD_DATA_PREVIOUS != True:\n\n with open(filename, 'wb') as fobject:\n pickle.dump(REFERENCE_RASTER_LAYER_COORD, fobject, protocol=2)\n fobject.close()\n SaveLayerDict = expLyr(SaveLayerDict)\n \n return (REFERENCE_RasterResolution, HighRes_gt_obj, LOAD_DATA_PREVIOUS, Layer_is_uncut, REFERENCE_geotransform_obj, REFERENCE_RasterSize)\n\n\n\ndef main(main_path, path_in_raw, preproccessed_input_path, prj_path_output): \n st = time.time()\n \n data_type = \"uint8\"\n \n MOST_RECENT_CUT = main_path + prj_path_output + \"/MOST_RECENT_CUT.pk\" \n prepro_path = main_path + preproccessed_input_path\n org_data_path = main_path + path_in_raw\n p_ = org_data_path\n pi_ = org_data_path + \"/vector_input_data/\"\n NUTS3_vector_path = pi_ + \"/NUTS3.shp\"\n strd_raster_path_full = \"%s/%s\" %(org_data_path, \"Population.tif\")\n temp_path = \"/home/simulant/workspace/project/Hotmaps_DATA/heat_density_map/output_2/\" + os.sep + \"Temp\"\n SoilSeal_path_full = \"%s/%s\" %(org_data_path, \"_____ESM100m_final.tif\")\n \n \n \n #p_ = \"/home/simulant/workspace/project/Hotmaps_DATA/heat_density_map/output/\"\n \n \n \n sd = \"\"\n print(os.path.exists(p_))\n print(os.path.exists(pi_))\n fn = []\n NUTS3_feat_id_LIST = range(12000)\n (REFERENCE_RasterResolution, HighRes_gt_obj, LOAD_DATA_PREVIOUS\n , Ref_layer_is_uncut, REFERENCE_geotransform_obj, REFERENCE_RasterSize) = \\\n load_reference_raster_lyr(NUTS3_vector_path,\n strd_raster_path_full, \n temp_path, NUTS3_feat_id_LIST\n , MOST_RECENT_CUT)\n \n \n for f_ in os.listdir(\"%s/%s\" %(p_, sd)):\n if f_.endswith(\".tif\"):\n fn.append(\"%s/%s/%s\" %(p_, sd, f_))\n print(f_)\n if \"g100_clc12_v18_5\" in f_.lower():\n data, geotransform_obj = CRL.clip_raster_layer(fn[-1]\n , REFERENCE_geotransform_obj\n , REFERENCE_RasterSize)\n data2 = np.zeros((data.shape),dtype=\"f4\")\n data3 = np.zeros_like(data2)\n data4 = np.ones_like(data2) * 10.0 # 1000 m2\n data2[data <= 21] = 10.0\n data3[data <= 6] = 10.0\n data3[data == 9] = 10.0\n data3[data == 10] = 10.0\n data3[data == 11] = 10.0\n data3[data == 20] = 10.0\n print(np.sum(data2))\n print(np.sum(data3))\n print(np.sum(data4))\n \n \n elif \"ESM100m_final\" in f_: \n data5, geotransform_obj = CRL.clip_raster_layer(fn[-1]\n , REFERENCE_geotransform_obj\n , REFERENCE_RasterSize)\n data5 *= 10.0/100.0 # in 1000 m2, data5 Einheit = %\n print(np.sum(data5))\n \n \n \n print(time.time() - st)\n ARR_NUTS_ID_NUMBER, geotransform_obj = SF.rrl(\"%s/%s_id_number.tif\" %(prepro_path, \"NUTS3\"), data_type=\"uint16\")\n print(time.time() - st)\n ARR_LAU2_ID_NUMBER, geotransform_obj = SF.rrl(\"%s/%s_id_number.tif\" %(prepro_path, \"LAU2\"), data_type=\"uint32\")\n print(time.time() - st)\n \n \n \n #num_fn = len(fn)\n num_fn = 4\n \n RES_Table_NUTS = np.zeros((np.max(ARR_NUTS_ID_NUMBER)+1, num_fn+1), \"f4\") \n RES_Table_LAU = np.zeros((np.max(ARR_LAU2_ID_NUMBER)+1, num_fn+1), \"f4\") \n RES_Table_NUTS[:,0] = np.arange(RES_Table_NUTS.shape[0])\n RES_Table_LAU[:,0] = np.arange(RES_Table_LAU.shape[0])\n \n header = [\"DI\"]\n #for i, f_ in enumerate(fn):\n for i in range(num_fn):\n #print(f_)\n \n if i == 0:\n data = data2.copy()\n fn = \"dauersiedlungsraum\"\n elif i == 1:\n data = data3.copy()\n fn = \"dauersiedlungsraum_eng\" \n elif i == 2:\n data = data4.copy()\n fn = \"flaeche\"\n else:\n data = data5.copy()\n fn = \"ESM100m_final\"\n print(fn)\n header.append(fn) \n print(np.sum(data))\n #header.append(f_.split(\"/\")[-1]) \n #data, geotransform_obj = SF.rrl(f_, data_type=data_type)\n \n TABLE_RESULTS_NUTS = CDM.CreateResultsTableperIndicator(data, ARR_NUTS_ID_NUMBER) \n print(time.time() - st)\n TABLE_RESULTS_LAU = CDM.CreateResultsTableperIndicator(data, ARR_LAU2_ID_NUMBER) \n del data\n print(time.time() - st)\n RES_Table_NUTS[:, i+1] = TABLE_RESULTS_NUTS[:,-1]\n RES_Table_LAU[:, i+1] = TABLE_RESULTS_LAU[:,-1]\n #break\n \n header = \",\".join(header)\n np.savetxt(\"%s/%s.csv\" %(prepro_path, \"__TABLE_RES_LAU2\"), np.round(RES_Table_LAU, 3), delimiter=\",\", header=header, comments=\"\")\n np.savetxt(\"%s/%s.csv\" %(prepro_path, \"__TABLE_RES_NUTS\"), np.round(RES_Table_NUTS, 3), delimiter=\",\", header=header, comments=\"\")\n \n print(\"DONE\")" ]
[ [ "numpy.ones_like", "numpy.arange", "numpy.ones", "numpy.round", "numpy.max", "numpy.zeros_like", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RiccardoNanni/bigbang
[ "70b9890fcd615ccb21a3685a9b33d79226e6fb36" ]
[ "bigbang/listserv.py" ]
[ "import datetime\nimport email\nimport email.parser\nimport glob\nimport mailbox\nimport os\nimport re\nimport subprocess\nimport time\nimport urllib\nimport warnings\nfrom email.header import Header\nfrom email.message import Message\nfrom email.mime.text import MIMEText\nfrom typing import Dict, List, Optional, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\nimport requests\nimport yaml\nfrom bs4 import BeautifulSoup\n\n\nclass ListservMessageWarning(BaseException):\n \"\"\"Base class for Archive class specific exceptions\"\"\"\n\n pass\n\n\nclass ListservListWarning(BaseException):\n \"\"\"Base class for Archive class specific exceptions\"\"\"\n\n pass\n\n\nclass ListservArchiveWarning(BaseException):\n \"\"\"Base class for Archive class specific exceptions\"\"\"\n\n pass\n\n\nclass ListservMessage:\n \"\"\"\n Parameters\n ----------\n body\n subject\n fromname\n fromaddr\n toname\n toaddr\n date\n contenttype\n messageid\n\n Methods\n -------\n from_url\n get_header_from_html\n get_body_from_html\n get_header_from_listserv_file\n get_body_from_listserv_file\n get_name\n get_addr\n get_date\n remove_unwanted_header_content\n to_dict\n to_mbox\n\n Example\n -------\n msg = ListservMessage.from_url(\n list_name=\"3GPP_TSG_CT_WG6\",\n url=url_message,\n fields=\"total\",\n )\n \"\"\"\n\n empty_header = {\n \"subject\": None,\n \"fromname\": None,\n \"fromaddr\": None,\n \"toname\": None,\n \"toaddr\": None,\n \"date\": None,\n \"contenttype\": None,\n }\n\n def __init__(\n self,\n body: str,\n subject: str,\n fromname: str,\n fromaddr: str,\n toname: str,\n toaddr: str,\n date: str,\n contenttype: str,\n messageid: Optional[str] = None,\n ):\n self.body = body\n self.subject = subject\n self.fromname = fromname\n self.fromaddr = fromaddr\n self.toname = toname\n self.toaddr = toaddr\n self.date = date\n self.contenttype = contenttype\n\n @classmethod\n def from_url(\n cls,\n list_name: str,\n url: str,\n fields: str = \"total\",\n url_login: str = \"https://list.etsi.org/scripts/wa.exe?LOGON\",\n login: Optional[Dict[str, str]] = {\"username\": None, \"password\": None},\n session: Optional[str] = None,\n ) -> \"ListservMessage\":\n \"\"\"\n Args:\n \"\"\"\n # TODO implement field selection, e.g. return only header, body, etc.\n if session is None:\n session = get_auth_session(url_login, **login)\n soup = get_website_content(url, session=session)\n if fields in [\"header\", \"total\"]:\n header = ListservMessage.get_header_from_html(soup)\n else:\n header = cls.empty_header\n if fields in [\"body\", \"total\"]:\n body = ListservMessage.get_body_from_html(list_name, url, soup)\n else:\n body = None\n return cls(body, **header)\n\n @classmethod\n def from_listserv_file(\n cls,\n list_name: str,\n file_path: str,\n header_start_line_nr: int,\n fields: str = \"total\",\n ) -> \"ListservMessage\":\n file = open(file_path, \"r\")\n fcontent = file.readlines()\n file.close()\n header_end_line_nr = cls.get_header_end_line_nr(\n fcontent, header_start_line_nr\n )\n if fields in [\"header\", \"total\"]:\n header = cls.get_header_from_listserv_file(\n fcontent, header_start_line_nr, header_end_line_nr\n )\n else:\n header = cls.empty_header\n if fields in [\"body\", \"total\"]:\n body = cls.get_body_from_listserv_file(\n fcontent, header_end_line_nr\n )\n else:\n body = None\n return cls(body, **header)\n\n @classmethod\n def get_header_end_line_nr(\n cls,\n content: List[str],\n header_start_line_nr: int,\n ) -> List[int]:\n \"\"\"\n The header ends with the first empty line encountered.\n\n Args:\n content: The content of one LISTSERV-file.\n \"\"\"\n for lnr, lcont in enumerate(content[header_start_line_nr:]):\n if len(lcont) <= 1:\n header_end_line_nr = header_start_line_nr + lnr\n break\n return header_end_line_nr\n\n @classmethod\n def get_header_from_listserv_file(\n cls,\n content: List[str],\n header_start_line_nr: int,\n header_end_line_nr: int,\n ) -> Dict[str, str]:\n \"\"\"\n Args:\n content:\n \"\"\"\n content = content[header_start_line_nr:header_end_line_nr]\n # collect important info from LISTSERV header\n header = {}\n for lnr in range(len(content)):\n line = content[lnr]\n # get header keyword and value\n if re.match(r\"\\S+:\\s+\\S+\", line):\n key = line.split(\":\")[0]\n value = line.replace(key + \":\", \"\").strip().rstrip(\"\\n\")\n # if not at the end of header\n if lnr < len(content) - 1:\n # if header-keyword value is split over two lines\n if not re.match(r\"\\S+:\\s+\\S+\", content[lnr + 1]):\n value += \" \" + content[lnr + 1].strip().rstrip(\"\\n\")\n header[key.lower()] = value\n\n header = cls.format_header_content(header)\n header = cls.remove_unwanted_header_content(header)\n return header\n\n @classmethod\n def get_body_from_listserv_file(\n cls,\n content: List[str],\n header_end_line_nr: int,\n ) -> str:\n \"\"\"\"\"\"\n found = False\n # find body 'position' in file\n for line_nr, line in enumerate(content[header_end_line_nr:]):\n if \"=\" * 73 in line:\n body_end_line_nr = line_nr + header_end_line_nr\n found = True\n break\n if not found:\n body_end_line_nr = -1\n # get body content\n body = content[header_end_line_nr:body_end_line_nr]\n # remove empty lines and join into one string\n body = (\"\").join([line for line in body if len(line) > 1])\n return body\n\n @classmethod\n def get_header_from_html(cls, soup: BeautifulSoup) -> Dict[str, str]:\n \"\"\"\"\"\"\n text = soup.find(\n \"b\",\n text=re.compile(r\"^\\bSubject\\b\"),\n ).parent.parent.parent.parent.text\n # collect important info from LISTSERV header\n header = {}\n for field in text.split(\"Parts/Attachments:\")[0].splitlines():\n if len(field) == 0:\n continue\n field_name = field.split(\":\")[0].strip()\n field_body = field.replace(field_name + \":\", \"\").strip()\n header[field_name.lower()] = field_body\n\n header = cls.format_header_content(header)\n header = cls.remove_unwanted_header_content(header)\n return header\n\n @staticmethod\n def get_body_from_html(\n list_name: str, url: str, soup: BeautifulSoup\n ) -> str:\n \"\"\"\"\"\"\n url_root = (\"/\").join(url.split(\"/\")[:-2])\n a_tags = soup.select(f'a[href*=\"A3=\"][href*=\"{list_name}\"]')\n href_plain_text = [\n tag.get(\"href\") for tag in a_tags if \"Fplain\" in tag.get(\"href\")\n ][0]\n body_soup = get_website_content(\n urllib.parse.urljoin(url_root, href_plain_text)\n )\n return body_soup.find(\"pre\").text\n\n @classmethod\n def format_header_content(cls, header: Dict[str, str]) -> Dict[str, str]:\n header[\"fromname\"] = cls.get_name(header[\"from\"]).strip()\n header[\"fromaddr\"] = cls.get_addr(header[\"from\"])\n header[\"toname\"] = cls.get_name(header[\"reply-to\"]).strip()\n header[\"toaddr\"] = cls.get_addr(header[\"reply-to\"])\n header[\"date\"] = cls.get_date(header[\"date\"])\n header[\"contenttype\"] = header[\"content-type\"]\n return header\n\n @classmethod\n def remove_unwanted_header_content(\n cls, header: Dict[str, str]\n ) -> Dict[str, str]:\n for key in list(header.keys()):\n if key not in list(cls.empty_header.keys()):\n del header[key]\n return header\n\n @staticmethod\n def get_name(line: str) -> str:\n # get string in between < and >\n email_of_sender = re.findall(r\"\\<(.*)\\>\", line)\n if email_of_sender:\n # remove email_of_sender from line\n name = line.replace(\"<\" + email_of_sender[0] + \">\", \"\")\n # remove special characters\n name = re.sub(r\"[^a-zA-Z0-9]+\", \" \", name)\n else:\n name = line\n return name\n\n @staticmethod\n def get_addr(line: str) -> str:\n # get string in between < and >\n email_of_sender = re.findall(r\"\\<(.*)\\>\", line)\n if email_of_sender:\n email_of_sender = email_of_sender[0]\n else:\n email_of_sender = None\n return email_of_sender\n\n @staticmethod\n def get_date(line: str) -> str:\n line = (\" \").join(line.split(\" \")[:-1]).lstrip()\n # convert format to local version of date and time\n date_time_obj = datetime.datetime.strptime(\n line, \"%a, %d %b %Y %H:%M:%S\"\n )\n return date_time_obj.strftime(\"%c\")\n\n @staticmethod\n def create_message_id(\n date: str,\n from_address: str,\n ) -> str:\n message_id = (\".\").join([date, from_address])\n # remove special characters\n message_id = re.sub(r\"[^a-zA-Z0-9]+\", \"\", message_id)\n return message_id\n\n def to_dict(self) -> Dict[str, str]:\n dic = {\n \"Body\": self.body,\n \"Subject\": self.subject,\n \"FromName\": self.fromname,\n \"FromAddr\": self.fromaddr,\n \"ToName\": self.toname,\n \"ToAddr\": self.toaddr,\n \"Date\": self.date,\n \"ContentType\": self.contenttype,\n }\n return dic\n\n def to_mbox(self, filepath: str, mode: str = \"w\"):\n \"\"\"\n Safe mail list to .mbox files.\n \"\"\"\n message_id = ListservMessage.create_message_id(\n self.date,\n self.fromaddr,\n )\n f = open(filepath, mode, encoding=\"utf-8\")\n f.write(\"\\n\")\n # check that header was selected\n if self.subject is not None:\n f.write(f\"From b'{self.fromaddr}' {self.date}\\n\")\n f.write(f\"Content-Type: {self.contenttype}\\n\")\n f.write(f\"MIME-Version: 1.0\\n\")\n f.write(f\"In-Reply-To: {self.toname} <b'{self.toaddr}'>\\n\")\n f.write(f\"From: {self.fromname} <b'{self.fromaddr}'>\\n\")\n f.write(f\"Subject: b'{self.subject}\\n\")\n f.write(f\"Message-ID: <{message_id}>'\\n\")\n f.write(f\"Date: {self.date}'\\n\")\n f.write(\"\\n\")\n # check that body was selected\n if self.body is not None:\n f.write(self.body)\n f.write(\"\\n\")\n f.close()\n\n\nclass ListservList:\n \"\"\"\n This class handles a single mailing list of a public archive in the\n LISTSERV 16.5 format.\n\n Parameters\n ----------\n name\n The of whom the list (e.g. 3GPP_COMMON_IMS_XFER, IEEESCO-DIFUSION, ...)\n source\n Contains the information of the location of the mailing list.\n It can be either an URL where the list or a path to the file(s).\n msgs\n List of ListservMessage objects\n\n Methods\n -------\n from_url\n from_messages\n from_listserv_files\n from_listserv_directories\n get_messages_from_url\n get_period_urls\n get_line_numbers_of_header_starts\n get_index_of_elements_in_selection\n to_dict\n to_pandas_dataframe\n to_mbox\n\n Example\n -------\n mlist = ListservList.from_url(\n \"3GPP_TSG_CT_WG6\",\n url=\"https://list.etsi.org/scripts/wa.exe?A0=3GPP_TSG_CT_WG6\",\n select={\n \"years\": (2020, 2021),\n \"months\": \"January\",\n \"weeks\": [1,5],\n \"fields\": \"header\",\n },\n )\n \"\"\"\n\n def __init__(\n self,\n name: str,\n source: Union[List[str], str],\n msgs: List[ListservMessage],\n ):\n self.name = name\n self.source = source\n self.messages = msgs\n\n def __len__(self) -> int:\n return len(self.messages)\n\n def __iter__(self):\n return iter(self.messages)\n\n def __getitem__(self, index) -> ListservMessage:\n return self.messages[index]\n\n @classmethod\n def from_url(\n cls,\n name: str,\n url: str,\n select: dict,\n url_login: str = \"https://list.etsi.org/scripts/wa.exe?LOGON\",\n login: Optional[Dict[str, str]] = {\"username\": None, \"password\": None},\n session: Optional[str] = None,\n ) -> \"ListservList\":\n \"\"\"\n Args:\n name: Name of the list of messages, e.g. '3GPP_TSG_SA_WG2_UPCON'\n url: URL to the LISTSERV list.\n select: Selection criteria that can filter messages by:\n - content, i.e. header and/or body\n - period, i.e. written in a certain year, month, week-of-month\n \"\"\"\n if session is None:\n session = get_auth_session(url_login, **login)\n if \"fields\" not in list(select.keys()):\n select[\"fields\"] = \"total\"\n msgs = cls.get_messages_from_url(name, url, select, session)\n return cls.from_messages(name, url, msgs)\n\n @classmethod\n def from_messages(\n cls,\n name: str,\n url: str,\n messages: List[Union[str, ListservMessage]],\n fields: str = \"total\",\n url_login: str = \"https://list.etsi.org/scripts/wa.exe?LOGON\",\n login: Optional[Dict[str, str]] = {\"username\": None, \"password\": None},\n session: Optional[str] = None,\n ) -> \"ListservList\":\n \"\"\"\n Args:\n messages: Can either be a list of URLs to specific LISTSERV messages\n or a list of `ListservMessage` objects.\n \"\"\"\n if not messages:\n # create empty ListservList for ListservArchive\n msgs = messages\n elif isinstance(messages[0], str):\n # create ListservList from message URLs\n if session is None:\n session = get_auth_session(url_login, **login)\n msgs = []\n for idx, url in enumerate(messages):\n msgs.append(\n ListservMessage.from_url(\n list_name=name,\n url=url,\n fields=fields,\n session=session,\n )\n )\n else:\n # create ListservList from list of ListservMessages\n msgs = messages\n return cls(name, url, msgs)\n\n @classmethod\n def from_listserv_directories(\n cls,\n name: str,\n directorypaths: List[str],\n filedsc: str,\n select: Optional[dict] = None,\n ) -> \"ListservList\":\n \"\"\"\n Args:\n name: Name of the list of messages, e.g. '3GPP_TSG_SA_WG2_UPCON'.\n directorypaths: List of directory paths where LISTSERV formatted\n messages are.\n filedsc: A description of the relevant files, e.g. *.LOG?????\n select: Selection criteria that can filter messages by:\n - content, i.e. header and/or body\n - period, i.e. written in a certain year, month, week-of-month\n \"\"\"\n _filepaths = []\n # run through directories and collect all filepaths\n for directorypath in directorypaths:\n _filepaths.append(\n get_all_file_from_directory(directorypath, filedsc)\n )\n # flatten list of lists\n filepaths = [fp for li in _filepaths for fp in li]\n return cls.from_listserv_files(name, filepaths, select)\n\n @classmethod\n def from_listserv_files(\n cls,\n name: str,\n filepaths: List[str],\n select: Optional[dict] = None,\n ) -> \"ListservList\":\n \"\"\"\n Args:\n name: Name of the list of messages, e.g. '3GPP_TSG_SA_WG2_UPCON'\n filepaths: List of file paths where LISTSERV formatted messages are.\n Such files can have a file extension of the form: *.LOG1405D\n select: Selection criteria that can filter messages by:\n - content, i.e. header and/or body\n - period, i.e. written in a certain year, month, week-of-month\n \"\"\"\n if select is None:\n select = {\"fields\": \"total\"}\n msgs = []\n for filepath in filepaths:\n # TODO: implement selection filter\n file = open(filepath, \"r\")\n fcontent = file.readlines()\n # get positions of all Emails in file\n header_start_line_nrs = cls.get_line_numbers_of_header_starts(\n fcontent\n )\n file.close()\n # run through all messages in file\n for msg_nr in header_start_line_nrs:\n msgs.append(\n ListservMessage.from_listserv_file(\n name,\n filepath,\n msg_nr,\n select[\"fields\"],\n )\n )\n return cls(name, filepaths, msgs)\n\n @classmethod\n def get_messages_from_url(\n cls,\n name: str,\n url: str,\n select: Optional[dict] = None,\n session: Optional[dict] = None,\n ) -> List[ListservMessage]:\n \"\"\"\n Generator that yields all messages within a certain period\n (e.g. January 2021, Week 5).\n\n Args:\n name: Name of the list of messages, e.g. '3GPP_TSG_SA_WG2_UPCON'\n url: URL to the LISTSERV list.\n select: Selection criteria that can filter messages by:\n - content, i.e. header and/or body\n - period, i.e. written in a certain year, month, week-of-month\n session: AuthSession\n \"\"\"\n if select is None:\n select = {\"fields\": \"total\"}\n msgs = []\n # run through periods\n for period_url in ListservList.get_period_urls(url, select):\n # run through messages within period\n for msg_url in ListservList.get_messages_urls(name, period_url):\n msgs.append(\n ListservMessage.from_url(\n name,\n msg_url,\n select[\"fields\"],\n session=session,\n )\n )\n # wait between loading messages, for politeness\n time.sleep(1)\n return msgs\n\n @classmethod\n def get_period_urls(\n cls, url: str, select: Optional[dict] = None\n ) -> List[str]:\n \"\"\"\n All messages within a certain period\n (e.g. January 2021, Week 5).\n \"\"\"\n url_root = (\"/\").join(url.split(\"/\")[:-2])\n # create dictionary with key indicating period and values the url\n periods, urls_of_periods = cls.get_all_periods_and_their_urls(\n url_root, get_website_content(url)\n )\n\n if any(\n period in list(select.keys())\n for period in [\"years\", \"months\", \"weeks\"]\n ):\n for key, value in select.items():\n if key == \"years\":\n cond = lambda x: int(re.findall(r\"\\d{4}\", x)[0])\n elif key == \"months\":\n cond = lambda x: x.split(\" \")[0]\n elif key == \"weeks\":\n cond = lambda x: int(x.split(\" \")[-1])\n else:\n continue\n\n periodquants = [cond(period) for period in periods]\n\n indices = ListservList.get_index_of_elements_in_selection(\n periodquants,\n urls_of_periods,\n value,\n )\n\n periods = [periods[idx] for idx in indices]\n urls_of_periods = [urls_of_periods[idx] for idx in indices]\n return urls_of_periods\n\n @staticmethod\n def get_all_periods_and_their_urls(\n url_root: str,\n soup: BeautifulSoup,\n ) -> Tuple[List[str], List[str]]:\n periods = [list_tag.find(\"a\").text for list_tag in soup.find_all(\"li\")]\n urls_of_periods = [\n urllib.parse.urljoin(url_root, list_tag.find(\"a\").get(\"href\"))\n for list_tag in soup.find_all(\"li\")\n ]\n return periods, urls_of_periods\n\n @staticmethod\n def get_index_of_elements_in_selection(\n times: List[Union[int, str]],\n urls: List[str],\n filtr: Union[tuple, list, int, str],\n ) -> List[int]:\n \"\"\"\n Filter out messages that where in a specific period. Period here is a set\n containing units of year, month, and week-of-month which can have the following\n example elements:\n - years: (1992, 2010), [2000, 2008], 2021\n - months: [\"January\", \"July\"], \"November\"\n - weeks: (1, 4), [1, 5], 2\n\n Args:\n times: A list containing information of the period for each\n group of ListservMessage.\n urls: Corresponding URLs of each group of ListservMessage of which the\n period info is contained in `times`.\n filtr: Containing info on what should be filtered.\n\n Returns:\n Indices of to the elements in `times`/`ursl`.\n \"\"\"\n if isinstance(filtr, tuple):\n # filter year or week in range\n cond = lambda x: (np.min(filtr) <= x <= np.max(filtr))\n if isinstance(filtr, list):\n # filter in year, week, or month in list\n cond = lambda x: x in filtr\n if isinstance(filtr, int):\n # filter specific year or week\n cond = lambda x: x == filtr\n if isinstance(filtr, str):\n # filter specific month\n cond = lambda x: x == filtr\n return [idx for idx, time in enumerate(times) if cond(time)]\n\n @classmethod\n def get_messages_urls(cls, name: str, url: str) -> List[str]:\n \"\"\"\n Args:\n name: Name of the `ListservList`\n url: URL to group of messages that are within the same period.\n\n Returns:\n List to URLs from which`ListservMessage` can be initialized.\n \"\"\"\n url_root = (\"/\").join(url.split(\"/\")[:-2])\n soup = get_website_content(url)\n a_tags = soup.select(f'a[href*=\"A2=\"][href*=\"{name}\"]')\n if a_tags:\n a_tags = [\n urllib.parse.urljoin(url_root, url.get(\"href\"))\n for url in a_tags\n ]\n return a_tags\n\n @classmethod\n def get_line_numbers_of_header_starts(\n cls, content: List[str]\n ) -> List[int]:\n \"\"\"\n By definition LISTSERV logs seperate new messages by a row\n of 73 equal signs.\n\n Args:\n content: The content of one LISTSERV-file.\n\n Returns:\n List of line numbers where header starts\n \"\"\"\n return [\n line_nr for line_nr, line in enumerate(content) if \"=\" * 73 in line\n ]\n\n def to_dict(self) -> Dict[str, List[str]]:\n \"\"\"\n Place all message into a dictionary of the form:\n dic = {\n \"Subject\": [messages[0], ... , messages[n]],\n .\n .\n .\n \"ContentType\": [messages[0], ... , messages[n]]\n }\n \"\"\"\n # initialize dictionary\n dic = {}\n for key in list(self.messages[0].to_dict().keys()):\n dic[key] = []\n # run through messages\n for msg in self.messages:\n # run through message attributes\n for key, value in msg.to_dict().items():\n dic[key].append(value)\n return dic\n\n def to_pandas_dataframe(self) -> pd.DataFrame:\n return pd.DataFrame.from_dict(self.to_dict())\n\n def to_mbox(self, dir_out: str, filename: Optional[str] = None):\n \"\"\"\n Safe mail list to .mbox files.\n\n Args:\n \"\"\"\n if filename is None:\n filepath = f\"{dir_out}/{self.name}.mbox\"\n else:\n filepath = f\"{dir_out}/{filename}.mbox\"\n first = True\n for msg in self.messages:\n if first:\n msg.to_mbox(filepath, mode=\"w\")\n first = False\n else:\n msg.to_mbox(filepath, mode=\"a\")\n\n\nclass ListservArchive(object):\n \"\"\"\n This class handles a public mailing list archive that uses the\n LISTSERV 16.5 format.\n An archive is a list of ListservList elements.\n\n Parameters\n ----------\n name\n The of whom the archive is (e.g. 3GPP, IEEE, ...)\n url\n The URL where the archive lives\n lists\n A list containing the mailing lists as `ListservList` types\n\n Methods\n -------\n from_url\n from_mailing_lists\n get_lists\n get_sections\n to_dict\n to_pandas_dataframe\n to_mbox\n\n Example\n -------\n arch = ListservArchive.from_url(\n \"3GPP\",\n \"https://list.etsi.org/scripts/wa.exe?\",\n \"https://list.etsi.org/scripts/wa.exe?HOME\",\n select={\n \"years\": (2020, 2021),\n \"months\": \"January\",\n \"weeks\": [1,5],\n \"fields\": \"header\",\n },\n )\n \"\"\"\n\n def __init__(self, name: str, url: str, lists: List[ListservList]):\n self.name = name\n self.url = url\n self.lists = lists\n\n def __len__(self):\n return len(self.lists)\n\n def __iter__(self):\n return iter(self.lists)\n\n def __getitem__(self, index):\n return self.lists[index]\n\n @classmethod\n def from_url(\n cls,\n name: str,\n url_root: str,\n url_home: str,\n select: dict,\n url_login: str = \"https://list.etsi.org/scripts/wa.exe?LOGON\",\n login: Optional[Dict[str, str]] = {\"username\": None, \"password\": None},\n session: Optional[str] = None,\n ) -> \"ListservArchive\":\n \"\"\"\n Create ListservArchive from a given URL.\n\n Args:\n name:\n url_root:\n url_home:\n select:\n \"\"\"\n session = get_auth_session(url_login, **login)\n lists = cls.get_lists_from_url(url_root, url_home, select, session)\n return cls.from_mailing_lists(name, url_root, lists, select)\n\n @classmethod\n def from_mailing_lists(\n cls,\n name: str,\n url_root: str,\n url_mailing_lists: Union[List[str], List[ListservList]],\n select: dict,\n url_login: str = \"https://list.etsi.org/scripts/wa.exe?LOGON\",\n login: Optional[Dict[str, str]] = {\"username\": None, \"password\": None},\n session: Optional[str] = None,\n ) -> \"ListservArchive\":\n \"\"\"\n Create ListservArchive from a given list of 'ListservList'.\n\n Args:\n name:\n url_root:\n url_mailing_lists:\n\n \"\"\"\n if isinstance(url_mailing_lists[0], str):\n if session is None:\n session = get_auth_session(url_login, **login)\n lists = []\n for idx, url in enumerate(url_mailing_lists):\n lists.append(\n ListservList.from_url(\n name=idx,\n url=url,\n select=select,\n session=session,\n )\n )\n else:\n lists = url_mailing_lists\n return cls(name, url_root, lists)\n\n @staticmethod\n def get_lists_from_url(\n url_root: str,\n url_home: str,\n select: dict,\n session: Optional[str] = None,\n ) -> List[ListservList]:\n \"\"\"\n Created dictionary of all lists in the archive.\n\n Args:\n\n Returns:\n archive_dict: the keys are the names of the lists and the value their url\n \"\"\"\n archive = []\n # run through archive sections\n for url in list(\n ListservArchive.get_sections(url_root, url_home).keys()\n )[:1]:\n soup = get_website_content(url)\n a_tags_in_section = soup.select(\n 'a[href*=\"A0=\"][onmouseover*=\"showDesc\"][onmouseout*=\"hideDesc\"]',\n )\n\n # run through archive lists in section\n for a_tag in a_tags_in_section:\n value = urllib.parse.urljoin(url_root, a_tag.get(\"href\"))\n key = value.split(\"A0=\")[-1]\n mlist = ListservList.from_url(\n name=key,\n url=value,\n select=select,\n session=session,\n )\n if len(mlist) != 0:\n archive.append(mlist)\n return archive\n\n def get_sections(url_root: str, url_home: str) -> int:\n \"\"\"\n Get different sections of archive. On the website they look like:\n [3GPP] [3GPP–AT1] [AT2–CONS] [CONS–EHEA] [EHEA–ERM_] ...\n\n Returns:\n If sections exist, it returns their urls and names. Otherwise it returns\n the url_home.\n \"\"\"\n soup = get_website_content(url_home)\n sections = soup.select(\n 'a[href*=\"INDEX=\"][href*=\"p=\"]',\n )\n archive_sections_dict = {}\n if sections:\n for sec in sections:\n key = urllib.parse.urljoin(url_root, sec.get(\"href\"))\n value = sec.text\n if value in [\"Next\", \"Previous\"]:\n continue\n archive_sections_dict[key] = value\n # TODO check that p=1 is included\n else:\n archive_sections_dict[url_home] = \"Home\"\n return archive_sections_dict\n\n def to_dict(self) -> Dict[str, List[str]]:\n \"\"\"\n Place all message in all lists into a dictionary of the form:\n dic = {\n \"Subject\": [messages[0], ... , messages[n]],\n .\n .\n .\n \"ListName\": [messages[0], ... , messages[n]]\n }\n \"\"\"\n # initialize dictionary\n dic = {}\n for key in list(self.lists[0].messages[0].to_dict().keys()):\n dic[key] = []\n dic[\"ListName\"] = []\n # run through lists\n for mlist in self.lists:\n # run through messages\n for msg in mlist.messages:\n # run through message attributes\n for key, value in msg.to_dict().items():\n dic[key].append(value)\n dic[\"ListName\"].append(mlist.name)\n return dic\n\n def to_pandas_dataframe(self) -> pd.DataFrame:\n return pd.DataFrame.from_dict(self.to_dict())\n\n def to_mbox(self, dir_out: str):\n \"\"\"\n Save Archive content to .mbox files\n \"\"\"\n for llist in self.lists:\n llist.to_mbox(dir_out)\n\n\ndef get_auth_session(\n url_login: str, username: str, password: str\n) -> requests.Session:\n \"\"\" Create AuthSession \"\"\"\n # ask user for login keys\n username, password = get_login_from_terminal(username, password)\n if username is None or password is None:\n # continue without authentication\n return None\n else:\n # Start the AuthSession\n session = requests.Session()\n # Create the payload\n payload = {\n \"LOGIN1\": \"\",\n \"Y\": username,\n \"p\": password,\n \"X\": \"\",\n }\n # Post the payload to the site to log in\n session.post(url_login, data=payload)\n return session\n\n\ndef get_login_from_terminal(\n username: Union[str, None],\n password: Union[str, None],\n file_auth: str = \"../config/authentication.yaml\",\n) -> Tuple[Union[str, None]]:\n \"\"\"\n Get login key from user during run time if 'username' and/or 'password' is 'None'.\n Return 'None' if no reply within 15 sec.\n \"\"\"\n if username is None or password is None:\n record = True\n else:\n record = False\n if username is None:\n username = ask_for_input(\"Enter your Email: \")\n if password is None:\n password = ask_for_input(\"Enter your Password: \")\n if record and isinstance(username, str) and isinstance(password, str):\n loginkey_to_file(username, password, file_auth)\n return username, password\n\n\ndef ask_for_input(request: str) -> Union[str, None]:\n timeout = 15\n end_time = time.time() + timeout\n while time.time() < end_time:\n reply = input(request)\n try:\n assert isinstance(reply, str)\n break\n except Exception:\n reply = None\n continue\n return reply\n\n\ndef loginkey_to_file(\n username: str,\n password: str,\n file_auth: str,\n) -> None:\n \"\"\" Safe login key to yaml \"\"\"\n file = open(file_auth, \"w\")\n file.write(f\"username: '{username}'\\n\")\n file.write(f\"password: '{password}'\")\n file.close()\n\n\ndef get_website_content(\n url: str,\n session: Optional[requests.Session] = None,\n) -> BeautifulSoup:\n \"\"\" Get HTML code from website \"\"\"\n # TODO: include option to change BeautifulSoup args\n if session is None:\n sauce = requests.get(url)\n assert sauce.status_code == 200\n soup = BeautifulSoup(sauce.content, \"lxml\")\n else:\n sauce = session.get(url)\n soup = BeautifulSoup(sauce.text, \"lxml\")\n return soup\n\n\ndef get_all_file_from_directory(directory: str, file_dsc: str) -> List[str]:\n \"\"\" Get paths of all files matching file_dsc in directory \"\"\"\n template = f\"{directory}{file_dsc}\"\n file_paths = glob.glob(template)\n return file_paths\n" ]
[ [ "numpy.max", "numpy.min" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bhigy/discrete-repr
[ "3d4a4fc3833df3a1fa287c78c7402ce6df09abd4" ]
[ "metrics.py" ]
[ "from collections import Counter\nfrom itertools import groupby\nfrom math import log2\nimport numpy as np\n\n\ndef segments_start(array):\n return [i for i in range(len(array)) if i == 0 or array[i] != array[i-1]]\n\n\ndef split_sequences(array, start):\n end = start[1:] + [len(array)]\n return [array[s:e] for s, e in zip(start, end)]\n\n\ndef coverage_top_1(labels, codes):\n '''\n Computes the coverage of label segments by the most frequent co-occuring\n code.\n '''\n start = segments_start(labels)\n segments = split_sequences(codes, start)\n return [sorted(Counter(s).values())[-1] / len(s) for s in segments]\n\n\ndef compute_joint_probability(x, y):\n labels_x = np.unique(x)\n idx_x = {v: i for i, v in enumerate(labels_x)}\n labels_y = np.unique(y)\n idx_y = {v: i for i, v in enumerate(labels_y)}\n counts_xy = np.zeros([len(labels_x), len(labels_y)])\n for xi, yi in zip(x, y):\n counts_xy[idx_x[xi], idx_y[yi]] += 1\n return labels_x, labels_y, counts_xy / len(x)\n\n\ndef conditional_entropy(x, y):\n labels_x, labels_y, p_xy = compute_joint_probability(x, y)\n p_y = np.sum(p_xy, axis=0)\n h_x_y = 0\n for i_x in range(len(labels_x)):\n for i_y in range(len(labels_y)):\n if p_xy[i_x, i_y] > 0:\n h_x_y -= p_xy[i_x, i_y] * log2(p_xy[i_x, i_y] / p_y[i_y])\n return h_x_y\n\n\ndef count_repetitions(array):\n return [len(list(v)) for _, v in groupby(array)]\n" ]
[ [ "numpy.sum", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shivammalviya712/Real-Time-Trigger-Word-Detection
[ "7ad9144d31ef407f7326750633471dcb30cb5e46" ]
[ "code/realtime.py" ]
[ "\"\"\"Implement the model in real time.\"\"\"\n\n# Third party modules\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sounddevice as sd\nfrom pydub import AudioSegment\nfrom pydub.playback import play\n\n\nclass Realtime:\n \"\"\"Implement the modle in real time.\"\"\"\n def __init__(self, settings):\n \"\"\"Intiallise the attributes.\"\"\"\n self.Ty = settings.Ty\n self.Tx = settings.Tx\n self.Tnew = settings.Tnew\n self.n_freq = settings.n_freq\n self.fs = settings.fs\n self.duration = settings.duration\n self.threshold = settings.threshold\n self.new_x = None\n self.chime = AudioSegment.from_wav(\n './dataset/activate/chime/chime.wav')\n self.x = np.zeros((1, self.Tx, self.n_freq))\n self.new_audio = np.zeros(shape=(int(self.Tnew * self.fs), 2))\n\n sd.default.samplerate = self.fs\n sd.default.channels = 2\n\n \n def refresh_audio(self):\n \"\"\"It adds spectrogram of new audio \n to the x.\n \"\"\"\n self.new_audio = sd.rec(frames=int(self.Tnew * self.fs))\n sd.wait()\n self.new_x = self.spectrogram(self.new_audio).T\n self.x[0, :self.Tx-len(self.new_x)] = self.x[0, len(self.new_x):]\n self.x[0, self.Tx-len(self.new_x):] = self.new_x\n\n\n\n def spectrogram(self, sound, plotting=False):\n \"\"\"It generates the spectrogram \n of the sound given.\n \n # Arguments\n sound: ndarray\n The recorded sound.\n\n # Returns\n x: ndarray\n The spectrogram of the sound.\n \"\"\"\n nfft = 200\n noverlap = 120\n nchannels = sound.ndim\n if nchannels == 1:\n x, freqs, bins, im = plt.specgram(\n x=sound, NFFT=nfft, Fs=self.fs, noverlap=noverlap)\n elif nchannels == 2:\n x, freqs, bins, im = plt.specgram(\n x=sound[:, 0], NFFT=nfft, Fs=self.fs, noverlap=noverlap)\n else:\n print('The audio has more than 2 channels') \n \n if plotting==True:\n plt.show(block=False)\n plt.pause(0.001)\n\n return x\n\n\n def check_trigger(self, y):\n \"\"\"It checks if wake word is\n predicted or not. If the wake\n word is present then it produces\n a chime sound.\n \n # Arguments\n y: ndarray\n Prediction of our model for\n Realtime.x as the input.\n \"\"\"\n for i in range(self.Ty-1, -1, -1):\n if y[0, i] > self.threshold:\n play(self.chime)\n break " ]
[ [ "matplotlib.pyplot.specgram", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.pause" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mfarthin/PyDMD
[ "ac2c800cfa9fb23ab110d2b2957b5681e2aa5055", "ac2c800cfa9fb23ab110d2b2957b5681e2aa5055" ]
[ "pydmd/hodmd.py", "pydmd/dmd.py" ]
[ "\"\"\"\nDerived module from dmdbase.py for higher order dmd.\n\nReference:\n- S. L Clainche, J. M. Vega, Higher Order Dynamic Mode Decomposition.\nJournal on Applied Dynamical Systems, 16(2), 882-925, 2017.\n\"\"\"\nimport numpy as np\n\nfrom .dmdbase import DMDBase\nfrom .utils import compute_tlsq\n\n\nclass HODMD(DMDBase):\n \"\"\"\n Higher Order Dynamic Mode Decomposition\n\n :param svd_rank: the rank for the truncation; If 0, the method computes the\n optimal rank and uses it for truncation; if positive interger, the\n method uses the argument for the truncation; if float between 0 and 1,\n the rank is the number of the biggest singular values that are needed\n to reach the 'energy' specified by `svd_rank`; if -1, the method does\n not compute truncation.\n :type svd_rank: int or float\n :param int tlsq_rank: rank truncation computing Total Least Square. Default\n is 0, that means no truncation.\n :param bool exact: flag to compute either exact DMD or projected DMD.\n Default is False.\n :param opt: argument to control the computation of DMD modes amplitudes. See\n :class:`DMDBase`. Default is False.\n :type opt: bool or int\n :param rescale_mode: Scale Atilde as shown in\n 10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its\n eigendecomposition. None means no rescaling, 'auto' means automatic\n rescaling using singular values, otherwise the scaling factors.\n :type rescale_mode: {'auto'} or None or numpy.ndarray\n :param bool forward_backward: If True, the low-rank operator is computed\n like in fbDMD (reference: https://arxiv.org/abs/1507.02264). Default is\n False.\n :param int d: the new order for spatial dimension of the input snapshots.\n Default is 1.\n :param sorted_eigs: Sort eigenvalues (and modes/dynamics accordingly) by\n magnitude if `sorted_eigs='abs'`, by real part (and then by imaginary\n part to break ties) if `sorted_eigs='real'`. Default: False.\n :type sorted_eigs: {'real', 'abs'} or False\n :param reconstruction_method: Due to how HODMD is defined, we have several\n versions of the same snapshot. The parameter `reconstruction_method`\n allows changing how these versions are combined in `reconstructed_data`.\n If `'first'`, only the first version is selected (default behavior);\n if `'mean'` we take the mean of all the versions; if the parameter is an\n array of floats of size `d`, the return value is the weighted average\n of the versions.\n :type reconstruction_method: {'first', 'mean'} or array-like\n \"\"\"\n\n def __init__(self, svd_rank=0, tlsq_rank=0, exact=False, opt=False,\n rescale_mode=None, forward_backward=False, d=1, sorted_eigs=False,\n reconstruction_method='first'):\n super(HODMD, self).__init__(svd_rank=svd_rank, tlsq_rank=tlsq_rank,\n exact=exact, opt=opt, rescale_mode=rescale_mode,\n sorted_eigs=sorted_eigs)\n self._d = d\n\n if isinstance(reconstruction_method, list):\n if len(reconstruction_method) != d:\n raise ValueError('The length of the array of weights must be equal to d')\n elif isinstance(reconstruction_method, np.ndarray):\n if reconstruction_method.ndim > 1 or reconstruction_method.shape[0] != d:\n raise ValueError('The length of the array of weights must be equal to d')\n self._reconstruction_method = reconstruction_method\n\n @property\n def d(self):\n return self._d\n\n def reconstructions_of_timeindex(self, timeindex=None):\n rec = super(HODMD, self).reconstructed_data\n space_dim = rec.shape[0] // self.d\n time_instants = rec.shape[1] + self.d - 1\n\n # for each time instance, we take the mean of all its appearences.\n # each snapshot appears at most d times (for instance, the first and the\n # last appear only once).\n reconstructed_snapshots = np.full((time_instants, self.d, space_dim), np.nan, dtype=np.complex128)\n\n for time_slice_idx in range(rec.shape[1]):\n time_slice = rec[:, time_slice_idx]\n\n for i in range(self.d):\n mx = time_slice[space_dim * i : space_dim * (i + 1)]\n if not np.ma.is_masked(mx):\n reconstructed_snapshots[time_slice_idx + i, i] = mx\n\n if timeindex is None:\n return reconstructed_snapshots\n else:\n return reconstructed_snapshots[timeindex]\n\n @property\n def reconstructed_data(self):\n rec = self.reconstructions_of_timeindex()\n rec = np.ma.array(rec, mask=np.isnan(rec))\n\n if self._reconstruction_method == 'first':\n return rec[:,0].T\n elif self._reconstruction_method == 'mean':\n return np.mean(rec, axis=1).T\n elif (isinstance(self._reconstruction_method, list) or\n isinstance(self._reconstruction_method, np.ndarray)):\n return np.average(rec, axis=1, weights=self._reconstruction_method).T\n else:\n raise ValueError(\"The reconstruction method wasn't recognized: {}\"\n .format(self._reconstruction_method))\n\n def fit(self, X):\n \"\"\"\n Compute the Dynamic Modes Decomposition to the input data.\n\n :param X: the input snapshots.\n :type X: numpy.ndarray or iterable\n \"\"\"\n snp, self._snapshots_shape = self._col_major_2darray(X)\n self._snapshots = np.concatenate(\n [\n snp[:, i:snp.shape[1] - self.d + i + 1]\n for i in range(self.d)\n ],\n axis=0)\n\n n_samples = self._snapshots.shape[1]\n X = self._snapshots[:, :-1]\n Y = self._snapshots[:, 1:]\n\n X, Y = compute_tlsq(X, Y, self.tlsq_rank)\n U, s, V = self.operator.compute_operator(X,Y)\n\n # Default timesteps\n self.original_time = {'t0': 0, 'tend': n_samples - 1, 'dt': 1}\n self.dmd_time = {'t0': 0, 'tend': n_samples - 1, 'dt': 1}\n\n self._b = self._compute_amplitudes()\n\n return self\n", "\"\"\"\nDerived module from dmdbase.py for classic dmd.\n\"\"\"\n\n# --> Import standard python packages\nimport numpy as np\n\n# --> Import PyDMD base class for DMD.\nfrom .dmdbase import DMDBase\n\nfrom .dmdoperator import DMDOperator\nfrom .utils import compute_tlsq\n\nfrom scipy.linalg import pinv2\n\ndef pinv(x): return pinv2(x, rcond=10 * np.finfo(float).eps)\n\nclass DMD(DMDBase):\n \"\"\"\n Dynamic Mode Decomposition\n\n :param svd_rank: the rank for the truncation; If 0, the method computes the\n optimal rank and uses it for truncation; if positive interger, the\n method uses the argument for the truncation; if float between 0 and 1,\n the rank is the number of the biggest singular values that are needed\n to reach the 'energy' specified by `svd_rank`; if -1, the method does\n not compute truncation.\n :type svd_rank: int or float\n :param int tlsq_rank: rank truncation computing Total Least Square. Default\n is 0, that means TLSQ is not applied.\n :param bool exact: flag to compute either exact DMD or projected DMD.\n Default is False.\n :param opt: argument to control the computation of DMD modes amplitudes. See\n :class:`DMDBase`. Default is False.\n :type opt: bool or int\n :param rescale_mode: Scale Atilde as shown in\n 10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its\n eigendecomposition. None means no rescaling, 'auto' means automatic\n rescaling using singular values, otherwise the scaling factors.\n :type rescale_mode: {'auto'} or None or numpy.ndarray\n :param bool forward_backward: If True, the low-rank operator is computed\n like in fbDMD (reference: https://arxiv.org/abs/1507.02264). Default is\n False.\n :param sorted_eigs: Sort eigenvalues (and modes/dynamics accordingly) by\n magnitude if `sorted_eigs='abs'`, by real part (and then by imaginary\n part to break ties) if `sorted_eigs='real'`. Default: False.\n :type sorted_eigs: {'real', 'abs'} or False\n \"\"\"\n\n def fit(self, X):\n \"\"\"\n Compute the Dynamic Modes Decomposition to the input data.\n\n :param X: the input snapshots.\n :type X: numpy.ndarray or iterable\n \"\"\"\n self._snapshots, self._snapshots_shape = self._col_major_2darray(X)\n\n n_samples = self._snapshots.shape[1]\n X = self._snapshots[:, :-1]\n Y = self._snapshots[:, 1:]\n\n X, Y = compute_tlsq(X, Y, self.tlsq_rank)\n self._svd_modes, _, _ = self.operator.compute_operator(X,Y)\n\n # Default timesteps\n self.original_time = {'t0': 0, 'tend': n_samples - 1, 'dt': 1}\n self.dmd_time = {'t0': 0, 'tend': n_samples - 1, 'dt': 1}\n\n self._b = self._compute_amplitudes()\n\n return self\n\n def predict(self, X):\n \"\"\"Predict the output Y given the input X using the fitted DMD model.\n\n Parameters\n ----------\n X : numpy array\n Input data.\n\n Returns\n -------\n Y : numpy array\n Predicted output.\n\n \"\"\"\n return np.linalg.multi_dot([self.modes, np.diag(self.eigs),\n pinv(self.modes), X])\n" ]
[ [ "numpy.isnan", "numpy.full", "numpy.mean", "numpy.average", "numpy.ma.is_masked" ], [ "numpy.diag", "numpy.finfo" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.13", "1.16", "1.9", "1.18", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
martahal/DeepLearning
[ "c3a70a117c2f3417832c7caecd3baf6cd9862ae2", "c3a70a117c2f3417832c7caecd3baf6cd9862ae2" ]
[ "GenerativeModelling/gen_autoencoder_routine.py", "GenerativeModelling/var_ae_routine.py" ]
[ "from GenerativeModelling.Autoencoder import Autoencoder\nfrom GenerativeModelling.Encoder import Encoder\nfrom GenerativeModelling.Decoder import Decoder\nfrom GenerativeModelling.verification_net import VerificationNet\nfrom SemiSupervisedLearning import visualisations\nfrom GenerativeModelling.Trainer import Trainer\nfrom GenerativeModelling.stacked_mnist import StackedMNISTData, DataMode\nfrom GenerativeModelling import utils\n\nimport torch\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport pathlib\n\n\nclass Generative_AE_Routine:\n\n def __init__(\n self,\n data,\n autoencoder_learning_rate: float,\n autoencoder_loss_function: str,\n autoencoder_optimizer: str,\n autoencoder_epochs: int,\n\n\n latent_vector_size: int,\n batch_size: int,\n num_samples: int,\n save_path: str\n\n ):\n self.data = utils.get_data_to_tensors(data, batch_size)\n self.image_dimensions = (data.test_images.shape[-1], data.test_images.shape[-2], data.test_images.shape[-3])\n self.num_samples = num_samples\n self.batch_size = batch_size\n self.latent_vector_size = latent_vector_size\n\n\n self.encoder = Encoder(\n input_shape=self.image_dimensions,\n num_filters=16,\n last_conv_layer_dim= (16,10, 10), #(32, 4, 4),\n output_vector_size=latent_vector_size)\n\n self.decoder = Decoder(\n input_size=latent_vector_size,\n encoder_last_layer_dim=self.encoder.last_conv_layer_dim,\n hidden_filters=self.encoder.num_filters,\n output_size=self.image_dimensions)\n\n self.autoencoder = Autoencoder(self.encoder, self.decoder, self.image_dimensions)\n\n self.autoencoder_trainer = Trainer(\n batch_size=batch_size,\n lr=autoencoder_learning_rate,\n epochs=autoencoder_epochs,\n model=self.autoencoder,\n data=self.data,\n loss_function=autoencoder_loss_function,\n optimizer=autoencoder_optimizer,\n early_stop_count = 4,\n model_save_path=save_path,\n )\n\n def train_autoencoder(self):\n #self.autoencoder_trainer.load_best_model()\n self.autoencoder_trainer.do_autoencoder_train()\n self.plot_autoencoder_training(self.autoencoder_trainer)\n\n def reconstruct_test_data(self, load_model_path=None):\n if load_model_path is not None:\n # self.vae_trainer.load_best_model() Does not return the model but sets the self.model in trainer to be best model\n # see if we can do:\n self.autoencoder.load_state_dict(torch.load(pathlib.Path(load_model_path).joinpath(\"best.ckpt\")))\n print(f'Loaded model from {load_model_path}')\n #selecting a fixed sample of the test data we like to visualize\n visualisation_data = self.data[1]\n images, reconstructions, labels = utils.make_reconstructions(\n self.autoencoder,\n visualisation_data,\n num_images=25,\n batch_size=self.batch_size,\n image_dimensions=self.image_dimensions,\n title=f'AE_z_size:{self.latent_vector_size}_lr_{self.autoencoder_trainer.lr}_epochs:{self.autoencoder_trainer.epochs}'\n )\n # checking quality of reproduced images\n return images, reconstructions, labels\n\n def anomaly_detection(self, k, load_model_path=None):\n if load_model_path is not None:\n # self.vae_trainer.load_best_model() Does not return the model but sets the self.model in trainer to be best model\n # see if we can do:\n self.autoencoder.load_state_dict(torch.load(pathlib.Path(load_model_path).joinpath(\"best.ckpt\")))\n print(f'Loaded model from {load_model_path}')\n # Calculate reconstruction loss (MSE) for test data\n # plot the k most anomalous images\n images, reconstructions, losses = self.autoencoder_trainer.ae_detect_anomaly_by_loss()\n\n worst_indices = np.argsort(losses)[-1:-(k + 1):-1]\n print(\"Anomaly loss values:\", [losses[index] for index in worst_indices])\n anomalies = np.array([images[index] for index in worst_indices])\n visualisations.show_images_and_reconstructions(anomalies, f'AE_Anomalies_latent_size:{self.latent_vector_size}_lr_{self.autoencoder_trainer.lr}_epochs:{self.autoencoder_trainer.epochs}')\n\n\n\n def generate_samples(self, load_model_path=None):\n if load_model_path is not None:\n # self.vae_trainer.load_best_model() Does not return the model but sets the self.model in trainer to be best model\n # see if we can do:\n self.autoencoder.load_state_dict(torch.load(pathlib.Path(load_model_path).joinpath(\"best.ckpt\")))\n print(f'Loaded model from {load_model_path}')\n Z = self.get_latent_vector_and_classes(self.autoencoder.encoder, self.num_samples)#, self.dataloaders)\n generated_images = utils.generate_images_from_Z(Z, self.autoencoder.decoder, self.image_dimensions, title=\"Gen_AE_generated_images\")\n return generated_images\n\n def check_autoencoder_performance(self, verification_net, tolerance, images, labels=None, load_model_path=None):\n if load_model_path is not None:\n # self.vae_trainer.load_best_model() Does not return the model but sets the self.model in trainer to be best model\n # see if we can do:\n self.autoencoder.load_state_dict(torch.load(pathlib.Path(load_model_path).joinpath(\"best.ckpt\")))\n print(f'Loaded model from {load_model_path}')\n coverage = verification_net.check_class_coverage(\n data=images,\n tolerance=tolerance\n )\n print(f\"Coverage: {100 * coverage:.2f}%\")\n if labels is not None:\n #if coverage != 0.0:\n predictability, accuracy = verification_net.check_predictability(\n data=images,\n correct_labels=labels,\n tolerance=tolerance\n )\n print(f\"Predictability: {100 * predictability:.2f}%\")\n print(f\"Accuracy: {100 * accuracy:.2f}%\")\n else:\n if coverage != 0.0:\n predictability, accuracy = verification_net.check_predictability(\n data=images,\n tolerance=tolerance\n )\n print(f\"Predictability: {100 * predictability:.2f}%\")\n\n @staticmethod\n def get_latent_vector_and_classes(encoder, n_samples):\n \"\"\"\n samples a random distribution of the latent vectors, Z\n :param encoder: The encoder that produces the latent vectors\n :param n_samples: number of samples from Z\n :return: a random sample of Z from the standard normal distribution\n \"\"\"\n p = torch.distributions.Normal(torch.zeros(encoder.output_vector_size), torch.ones(encoder.output_vector_size))\n temp_tensor = torch.ones(n_samples)\n Z = p.sample(sample_shape=temp_tensor.shape) # Wow, so ugly, but my brain hurts now\n return Z\n\n @staticmethod\n def plot_autoencoder_training(autoencoder_trainer):\n plt.figure(figsize=(10, 8))\n plt.title('Autoencoder loss')\n visualisations.plot_metric(autoencoder_trainer.train_history['loss'], label='Autoencoder training loss',\n averaged_plot=True)\n visualisations.plot_metric(autoencoder_trainer.validation_history['loss'], label='Autoencoder validation loss',\n averaged_plot=False)\n # plt.ylim(bottom=0, top=1)\n plt.legend()\n plt.savefig(f'figures/autoencoder_{autoencoder_trainer.loss_function}_{autoencoder_trainer.epochs}_training.png')\n\n\n\ndef main():\n torch.manual_seed(0)\n \"\"\" GENERATIVE AUTOENCODER ROUTINE\"\"\"\n batch_size = 16\n data_object = StackedMNISTData(mode=DataMode.MONO_FLOAT_COMPLETE, default_batch_size=batch_size)\n #instantiate verification network\n net = VerificationNet(force_learn=False)\n net.train(generator=data_object, epochs=5) # gen=data_object, makes sure we test on the same type of data as the model was trained on\n verification_tolerance = 0.8 if data_object.channels == 1 else 0.5\n\n autoencoder_learning_rate = 0.0002\n autoencoder_loss_function = 'MSE' #'binary_cross_entropy' # AVAILABLE 'binary_cross_entropy'\n autoencoder_optimizer = 'adam'#'SGD'# # AVAILABLE 'SGD' # #\n autoencoder_epochs = 1 # Optimal for MNIST: 3\n\n num_samples = 2000\n latent_vector_size = 64 # recommended for MNIST between 16 and 64\n gen_name = 'Test_gen_AE'\n gen_ae_save_path = f'checkpoints/gen_AE/{gen_name}'\n gen_autoencoder = Generative_AE_Routine(\n data_object,\n autoencoder_learning_rate,\n autoencoder_loss_function,\n autoencoder_optimizer,\n autoencoder_epochs,\n\n latent_vector_size,\n batch_size,\n num_samples,\n gen_ae_save_path\n )\n gen_autoencoder.train_autoencoder()\n images, reconstructions, labels = gen_autoencoder.reconstruct_test_data()\n #Check quality of reconstructions\n gen_autoencoder.check_autoencoder_performance(net, verification_tolerance, reconstructions, labels)\n#\n ##Generate samples\n #generated_images = gen_autoencoder.generate_samples()\n#\n ##check quality of generated images\n #gen_autoencoder.check_autoencoder_performance(net, verification_tolerance, generated_images)\n#\n #\"\"\" ANOMALY DETECTOR AUTOENCODER ROUTINE\"\"\"\n #data_object = StackedMNISTData(mode=DataMode.MONO_FLOAT_MISSING, default_batch_size=batch_size)\n #number_anom_images_to_show = 16\n #anom_name = 'Test_anom_AE'\n #anom_ae_save_path = f'checkpoints/anom_AE/{anom_name}/'\n #anom_autoencoder = Generative_AE_Routine(\n # data_object,\n # autoencoder_learning_rate,\n # autoencoder_loss_function,\n # autoencoder_optimizer,\n # autoencoder_epochs,\n #\n # latent_vector_size,\n # batch_size,\n # num_samples,\n # anom_ae_save_path\n #)\n #anom_autoencoder.train_autoencoder()\n#\n #anom_autoencoder.anomaly_detection(number_anom_images_to_show)\nif __name__ == '__main__':\n main()", "from GenerativeModelling.VAE import VAE\nfrom GenerativeModelling.VAE_Encoder import Encoder\nfrom GenerativeModelling.VAE_Decoder import Decoder\nfrom GenerativeModelling.verification_net import VerificationNet\nfrom SemiSupervisedLearning import visualisations\nfrom GenerativeModelling.Trainer import Trainer\nfrom GenerativeModelling.stacked_mnist import StackedMNISTData, DataMode\nfrom GenerativeModelling import utils\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\n\nimport pathlib\n\n\nclass VAE_Routine():\n def __init__(\n self,\n data,\n learning_rate: float,\n loss_function: str,\n optimizer: str,\n epochs: int,\n\n latent_vector_size: int,\n batch_size: int,\n num_samples: int,\n save_path: str\n ):\n self.data = utils.get_data_to_tensors(data, batch_size)\n self.image_dimensions = (data.test_images.shape[-1], data.test_images.shape[-2], data.test_images.shape[-3])\n self.num_samples = num_samples\n self.batch_size = batch_size\n self.enc_last_layer_dim = (8,10,10) #(32, 2, 2)#(8, 4, 4) #\n self.latent_vector_size = latent_vector_size\n\n\n\n self.encoder = Encoder(\n input_shape=self.image_dimensions,\n num_filters=32,\n last_conv_layer_dim=self.enc_last_layer_dim,\n output_vector_size=latent_vector_size * 2, # trying this first\n latent_vector_size= latent_vector_size # TODO check this\n ) # The encoder is no longer outputting to the latent vector but to the mean and variance layers\n\n self.decoder = Decoder(\n input_size=latent_vector_size,\n encoder_last_layer_dim=self.encoder.last_conv_layer_dim,\n hidden_filters=self.encoder.num_filters,\n output_size=self.image_dimensions)\n\n self.vae = VAE(self.encoder, self.decoder)\n\n self.vae_trainer = Trainer(\n batch_size=batch_size,\n lr=learning_rate,\n epochs=epochs,\n model=self.vae,\n data=self.data,\n loss_function=loss_function,\n optimizer=optimizer,\n early_stop_count=4,\n model_save_path=save_path,\n is_vae=True\n )\n\n def train_vae(self):\n\n self.vae_trainer.do_VAE_train()\n\n self.plot_vae_training(self.vae_trainer, self.enc_last_layer_dim)\n\n def reconstruct_test_data(self, load_model_path=None):\n if load_model_path is not None:\n # self.vae_trainer.load_best_model() Does not return the model but sets the self.model in trainer to be best model\n # see if we can do:\n self.vae.load_state_dict(torch.load(pathlib.Path(load_model_path).joinpath(\"best.ckpt\")))\n print(f'Loaded model from {load_model_path}')\n\n # selecting a fixed sample of the test data we like to visualize\n visualisation_data = self.data[1][:] #self.data[1][:12]\n images, reconstructions, labels = utils.make_vae_reconstructions(\n self.vae,\n visualisation_data,\n num_images=25,\n batch_size=self.batch_size,\n image_dimensions=self.image_dimensions,\n title=f'VAE_z_size:{self.latent_vector_size}_lr_{self.vae_trainer.lr}_epochs:{self.vae_trainer.epochs}'\n )\n # checking quality of reproduced images\n # Returned images are numpy arrays\n return images, reconstructions, labels\n\n def anomaly_detection(self, k, load_model_path=None):\n if load_model_path is not None:\n # self.vae_trainer.load_best_model() Does not return the model but sets the self.model in trainer to be best model\n # see if we can do:\n self.vae.load_state_dict(torch.load(pathlib.Path(load_model_path).joinpath(\"best.ckpt\")))\n print(f'Loaded model from {load_model_path}')\n\n # Calculate reconstruction loss (MSE) for test data\n # plot the k most anomalous images\n images, reconstructions, losses = self.vae_trainer.vae_detect_anomaly_by_loss()\n\n worst_indices = np.argsort(losses)[-1:-(k + 1):-1]\n print(\"Anomaly loss values:\", [losses[index] for index in worst_indices])\n anomalies = np.array([images[index] for index in worst_indices])\n visualisations.show_images_and_reconstructions(anomalies, f'VAE_Anomalies_latent_size:{self.latent_vector_size}_lr_{self.vae_trainer.lr}_epochs:{self.vae_trainer.epochs}')\n\n\n\n def generate_samples(self, data_object, load_model_path=None):\n if load_model_path is not None:\n # self.vae_trainer.load_best_model() Does not return the model but sets the self.model in trainer to be best model\n # see if we can do:\n self.vae.load_state_dict(torch.load(pathlib.Path(load_model_path).joinpath(\"best.ckpt\")))\n print(f'Loaded model from {load_model_path}')\n Z = self.sample_Z(self.vae.encoder, data_object, self.num_samples)\n generated_images = utils.generate_images_from_Z(\n Z,\n self.vae.decoder,\n self.image_dimensions,\n title=f'VAE_z_size:{self.latent_vector_size}_lr_{self.vae_trainer.lr}_epochs:{self.vae_trainer.epochs}'\n )\n return generated_images\n\n def check_vae_performance(self, verification_net, tolerance, images, labels=None, load_model_path=None):\n if load_model_path is not None:\n # self.vae_trainer.load_best_model() Does not return the model but sets the self.model in trainer to be best model\n # see if we can do:\n self.vae.load_state_dict(torch.load(pathlib.Path(load_model_path).joinpath(\"best.ckpt\")))\n print(f'Loaded model from {load_model_path}')\n\n coverage = verification_net.check_class_coverage(\n data=images,\n tolerance=tolerance\n )\n print(f\"Coverage: {100 * coverage:.2f}%\")\n if labels is not None:\n if coverage != 0.0:\n predictability, accuracy = verification_net.check_predictability(\n data=images,\n correct_labels=labels,\n tolerance=tolerance\n )\n print(f\"Predictability: {100 * predictability:.2f}%\")\n print(f\"Accuracy: {100 * accuracy:.2f}%\")\n else:\n if coverage != 0.0:\n predictability, accuracy = verification_net.check_predictability(\n data=images,\n tolerance=tolerance\n )\n print(f\"Predictability: {100 * predictability:.2f}%\")#\")\n\n\n @staticmethod\n def plot_vae_training(vae_trainer, enc_last_layer_dim):\n plt.figure(figsize=(10, 8))\n plt.title('ELBO loss')\n visualisations.plot_metric(vae_trainer.train_history['loss'], label='VAE training loss',\n averaged_plot=True)\n visualisations.plot_metric(vae_trainer.validation_history['loss'], label='VAE validation loss',\n averaged_plot=False)\n # plt.ylim(bottom=0, top=1)\n plt.legend()\n plt.savefig(\n f'figures/VAE_ll_dim:{enc_last_layer_dim}_lr:{vae_trainer.lr}_epochs:{vae_trainer.epochs}_training.png')\n plt.show()\n\n @staticmethod\n def sample_Z(encoder, data, n_samples):\n \"\"\"\n samples a random distribution of the latent vectors, Z, that is produced by the data examples\n :param encoder: The encoder that produces the latent vectors\n :param n_samples: number of samples from Z\n :return: a random sample of Z from the standard normal distribution\n \"\"\"\n epsilon = torch.distributions.Normal(torch.zeros(encoder.latent_vector_size), torch.ones(encoder.latent_vector_size))\n # Ugly fix to get sample in the shape I want\n temp_tensor = torch.ones(n_samples)\n # Inefficient quick-fix to make data batch in the shape we want\n #(train_data, test_data) = utils.get_data_to_tensors(data, batch_size=n_samples)\n\n # Reparametrization trick\n #x_hat = test_data[0][0]\n #mu, sigma = encoder(x_hat)\n # get samples Z = mean * std * epsilon\n Z = epsilon.sample(sample_shape=temp_tensor.shape) #* mu * sigma\n return Z\ndef main():\n torch.manual_seed(1)\n \"\"\" GENERATIVE VAE ROUTINE\"\"\"\n batch_size = 256\n data_object = StackedMNISTData(\n mode=DataMode.MONO_FLOAT_COMPLETE,\n default_batch_size=batch_size)\n net = VerificationNet(force_learn=False)\n net.train(\n generator=data_object,\n epochs=5) # gen=data_object, makes sure we test on the same type of data as the model was trained on\n verification_tolerance = 0.8 if data_object.channels == 1 else 0.5\n\n learning_rate = 1.0e-2\n loss_function = 'elbo'\n optimizer= 'adam'\n epochs = 1\n\n latent_vector_size = 128\n num_samples = 2000\n gen_name = 'Test_gen_VAE'\n gen_vae_save_path = f'checkpoints/gen_VAE/{gen_name}'\n vae_routine = VAE_Routine(\n data_object,\n learning_rate,\n loss_function,\n optimizer,\n epochs,\n#\n latent_vector_size,\n batch_size,\n num_samples,\n gen_vae_save_path\n )\n #vae_routine.train_vae()\n # Note, returned images, reconstructions and gen images are np arrays\n\n images, reconstructions, labels = vae_routine.reconstruct_test_data()\n ## Check quality of reconstructions:\n #print('CHECKING RECONSTRUCTED IMAGES QUALITY')\n print(f'Number of reconstructions: {len(reconstructions)}')\n vae_routine.check_vae_performance(net, verification_tolerance, reconstructions, labels)\n#\n#\n ## Check quality of generated images\n #print('CHECKING GENERATED IMAGES QUALITY')\n generated_images = vae_routine.generate_samples(data_object)\n print(f'Number of generated images: {len(generated_images)}')\n vae_routine.check_vae_performance(net, verification_tolerance, generated_images)\n\n \"\"\" ANOMALY DETECTOR VAE ROUTINE\"\"\"\n #data_object = StackedMNISTData(mode=DataMode.MONO_FLOAT_MISSING, default_batch_size=batch_size)\n #number_anom_images_to_show = 16\n #anom_name = 'Test_anom_VAE'\n #anom_vae_save_path = f'checkpoints/anom_VAE/{anom_name}'\n #anom_vae = VAE_Routine(\n # data_object,\n # learning_rate,\n # loss_function,\n # optimizer,\n # epochs,\n#\n # latent_vector_size,\n # batch_size,\n # num_samples,\n # anom_vae_save_path\n#\n #)\n #anom_vae.train_vae()\n #anom_vae.anomaly_detection(number_anom_images_to_show)\n\nif __name__ == '__main__':\n main()" ]
[ [ "matplotlib.pyplot.legend", "torch.ones", "matplotlib.pyplot.title", "torch.zeros", "torch.manual_seed", "matplotlib.pyplot.savefig", "numpy.argsort", "numpy.array", "matplotlib.pyplot.figure" ], [ "matplotlib.pyplot.legend", "torch.ones", "matplotlib.pyplot.title", "torch.zeros", "torch.manual_seed", "matplotlib.pyplot.savefig", "numpy.argsort", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mlepori1/Representations_Of_Syntax
[ "7a09004a1e443618ee6b1645e54224766c3965f1" ]
[ "Natural_and_Artificial/MARCC/test_aug500_const_1.py" ]
[ "import sys\n\nif not sys.warnoptions:\n import warnings\n warnings.simplefilter(\"ignore\")\n\nimport numpy as np\nimport csv\nimport copy\n\nfrom torch import optim\nfrom torch.nn import BCELoss\nfrom torch.optim import Adam\nimport torch\n\nfrom random import shuffle\nimport random\n\nimport models\nimport pickle\n\n\nembed_matrix = pickle.load(open('./data/embed_matrix.pkl', 'rb'))\nword2idx = pickle.load(open('./data/word2idx.pkl', 'rb'))\n\nany_attractors = pickle.load(open('./data/final_any_attractors.pkl', 'rb'))\none_attractor = pickle.load(open('./data/final_one_attractor.pkl', 'rb'))\ntwo_attractors = pickle.load(open('./data/final_two_attractors.pkl', 'rb'))\nthree_attractors = pickle.load(open('./data/final_three_attractors.pkl', 'rb'))\nfour_attractors = pickle.load(open('./data/final_four_attractors.pkl', 'rb'))\nno_attractors = pickle.load(open('./data/final_no_attractors.pkl', 'rb'))\n\nmodel = models.TreeLSTMClassifier(100, 100, len(word2idx.keys()), 'constituency', pretrained_embeddings=embed_matrix)\nmodel.load_state_dict(torch.load('./augmented_models/aug_const_1_500_model'))\n\nprint(\"No Attractors: \" + str(len(no_attractors)))\nprint(\"Any Attractors: \" + str(len(any_attractors)))\nprint(\"One Attractor: \" + str(len(one_attractor)))\nprint(\"Two Attractors: \" + str(len(two_attractors)))\nprint(\"Three Attractors: \" + str(len(three_attractors)))\nprint(\"Four Attractors: \" + str(len(four_attractors)))\n\n\n############################ Test on No Attractors Test Set\n\nprint('Running on No Attractors Set')\ncorrect = 0\n\nnot_processed = 0\nfor element in no_attractors:\n\n seq = element[0]\n const_tree = element[1]\n dep_tags = element[2]\n dep_tree = element[3]\n label = torch.FloatTensor(element[4])\n try:\n output = model(const_tree, dep_tree, dep_tags, seq)\n if (output > .5 and label == 1) or (output < .5 and label == 0):\n correct += 1\n except:\n not_processed += 1\n\nif not_processed != 0:\n print('Not Processed: ' + str(not_processed))\nprint('Accuracy on No Attractors: ' + str(correct/(len(no_attractors) - not_processed)))\n\n############################ Test on Attractors Test Set\n\nprint('Running on Any Attractors Test Set')\ncorrect = 0\n\nnot_processed = 0\nfor element in any_attractors:\n\n seq = element[0]\n const_tree = element[1]\n dep_tags = element[2]\n dep_tree = element[3]\n label = torch.FloatTensor(element[4])\n\n try:\n output = model(const_tree, dep_tree, dep_tags, seq)\n if (output > .5 and label == 1) or (output < .5 and label == 0):\n correct += 1\n except:\n not_processed += 1\n\nif not_processed != 0:\n print('Not Processed: ' + str(not_processed))\nprint('Accuracy on Any Attractors Test: ' + str(correct/(len(any_attractors) - not_processed)))\n\n\n############################ Test on One Attractors Test Set\n\nprint('Running on One Attractor Test Set')\ncorrect = 0\n\nnot_processed = 0\nfor element in one_attractor:\n\n seq = element[0]\n const_tree = element[1]\n dep_tags = element[2]\n dep_tree = element[3]\n label = torch.FloatTensor(element[4])\n\n try:\n output = model(const_tree, dep_tree, dep_tags, seq)\n if (output > .5 and label == 1) or (output < .5 and label == 0):\n correct += 1\n except:\n not_processed += 1\n\nif not_processed != 0:\n print(\"Not Processed: \" + str(not_processed))\nprint('Accuracy on One Attractor Test: ' + str(correct/(len(one_attractor) - not_processed)))\n\n\n############################ Test on Two Attractors Test Set\n\nprint('Running on Two Attractors Test Set')\ncorrect = 0\nnot_processed = 0\n\nfor element in two_attractors:\n\n seq = element[0]\n const_tree = element[1]\n dep_tags = element[2]\n dep_tree = element[3]\n label = torch.FloatTensor(element[4])\n\n try:\n output = model(const_tree, dep_tree, dep_tags, seq)\n if (output > .5 and label == 1) or (output < .5 and label == 0):\n correct += 1\n except:\n not_processed += 1\n\nif len(two_attractors) != 0:\n\n if not_processed != 0:\n print(\"Not Processed: \" + str(not_processed))\n print('Accuracy on Two Attractors Test: ' + str(correct/(len(two_attractors) - not_processed)))\n\n############################ Test on Three Attractors Test Set\n\nprint('Running on Three Attractors Test Set')\ncorrect = 0\nnot_processed = 0\n\nfor element in three_attractors:\n\n seq = element[0]\n const_tree = element[1]\n dep_tags = element[2]\n dep_tree = element[3]\n label = torch.FloatTensor(element[4])\n\n try:\n output = model(const_tree, dep_tree, dep_tags, seq)\n if (output > .5 and label == 1) or (output < .5 and label == 0):\n correct += 1\n except:\n not_processed += 1\n\nif len(three_attractors) != 0:\n\n if not_processed != 0:\n print(\"Not Processed: \" + str(not_processed))\n print('Accuracy on Three Attractors Test: ' + str(correct/(len(three_attractors) - not_processed)))\n\n\n############################ Test on Four Attractors Test Set\n\nprint('Running on Four Attractors Test Set')\ncorrect = 0\nnot_processed = 0\n\nfor element in four_attractors:\n\n seq = element[0]\n const_tree = element[1]\n dep_tags = element[2]\n dep_tree = element[3]\n label = torch.FloatTensor(element[4])\n\n try:\n output = model(const_tree, dep_tree, dep_tags, seq)\n if (output > .5 and label == 1) or (output < .5 and label == 0):\n correct += 1\n except:\n not_processed += 1\n\nif len(four_attractors) != 0:\n if not_processed != 0:\n print(\"Not Processed: \" + str(not_processed))\n print('Accuracy on Four Attractors Test: ' + str(correct/(len(four_attractors) - not_processed)))" ]
[ [ "torch.FloatTensor", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
VitaliyPavlyukov/AutoMLWhitebox
[ "4acd55624490707a7fbf036631533e29123bb1bd", "4acd55624490707a7fbf036631533e29123bb1bd" ]
[ "autowoe/lib/types_handler/types_handler.py", "autowoe/lib/autowoe.py" ]
[ "import collections\n\nimport pandas as pd\n\nfrom typing import Dict, Hashable, Optional, Any\nfrom copy import deepcopy\n\nfrom .features_checkers_handlers import dates_handler, dates_checker, cat_checker\n\n\nclass TypesHandler:\n \"\"\"\n Класс для автоматического определения типов признаков.\n Базовая имплементация порядка разработки:\n\n 0.\n 0.a) Парсим то, что указал юзер\n 0.b) Даты пасим С указанием сезонности (\"m\", \"d\", \"wd\", \"h\", \"min\")\n (месяц, день, день недели, час, минута)\n 1.\n Если стринга, то категория\n 2.\n Если отношение shape[1] к количеству уникальных значений >> 5, то категория\n \"\"\"\n\n def __init__(self,\n train: pd.DataFrame,\n public_features_type: Dict[Hashable, Any],\n max_bin_count: Dict[Hashable, Optional[int]] = None,\n features_monotone_constraints: Optional[dict] = None):\n \"\"\"\n\n Args:\n train:\n public_features_type:\n max_bin_count:\n features_monotone_constraints:\n \"\"\"\n self.__train = deepcopy(train)\n self.__public_features_type = deepcopy(public_features_type)\n self.__private_features_type = dict()\n\n if max_bin_count is None:\n max_bin_count = {}\n self.__max_bin_count = collections.defaultdict(lambda: None, max_bin_count)\n\n if features_monotone_constraints is None:\n features_monotone_constraints = {}\n self.__features_monotone_constraints = collections.defaultdict(lambda: \"0\", features_monotone_constraints)\n\n @property\n def train(self):\n \"\"\"\n Read only\n\n Return:\n\n \"\"\"\n return self.__train\n\n @property\n def public_features_type(self):\n \"\"\"\n Read only\n\n Return:\n\n \"\"\"\n return self.__public_features_type\n\n @property\n def private_features_type(self):\n \"\"\"\n Read only\n\n Returns:\n\n \"\"\"\n return self.__private_features_type\n\n @property\n def max_bin_count(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n return self.__max_bin_count\n\n @property\n def features_monotone_constraints(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n return self.__features_monotone_constraints\n\n def __feature_handler(self, feature_name):\n \"\"\"\n\n Args:\n feature_name:\n\n Returns:\n\n \"\"\"\n if dates_checker(self.__train[feature_name]):\n new_features, feature_type = dates_handler(self.__train[feature_name])\n self.__public_features_type[feature_name] = feature_type\n for new_feature_name, new_feature in new_features:\n self.__train[new_feature_name] = new_feature\n self.__max_bin_count[new_feature_name] = self.max_bin_count[feature_name]\n self.__private_features_type[new_feature_name] = \"real\"\n self.__features_monotone_constraints[new_feature_name] = \\\n self.features_monotone_constraints[feature_name]\n\n elif cat_checker(self.__train[feature_name]):\n self.__public_features_type[feature_name] = \"cat\"\n self.__private_features_type[feature_name] = \"cat\"\n self.__features_monotone_constraints[feature_name] = \"1\"\n else:\n self.__public_features_type[feature_name] = \"real\"\n self.__private_features_type[feature_name] = \"real\"\n\n def transform(self):\n \"\"\"\n Основной метод данного класса.\n Если feature_type[feature] == None, то парсим тип признкака\n Иначе происходит обработка указанных типов.\n Возмоожные типы признаков:\n \"cat\"\n \"real\"\n (\"%Y%d%m\", (\"m\", \"d\", \"wd\", \"h\", \"min\"))\n\n Returns:\n\n \"\"\"\n for feature_name in self.public_features_type:\n if not self.public_features_type[feature_name]:\n self.__feature_handler(feature_name)\n elif isinstance(self.public_features_type[feature_name], tuple): # переданы данные для дат\n new_features, _ = dates_handler(self.train[feature_name], self.public_features_type[feature_name])\n for new_feature_name, new_feature in new_features:\n self.__train[new_feature_name] = new_feature\n self.__max_bin_count[new_feature_name] = self.max_bin_count[feature_name]\n self.__private_features_type[new_feature_name] = \"real\"\n self.__features_monotone_constraints[new_feature_name] = \\\n self.__features_monotone_constraints[feature_name]\n\n elif self.public_features_type[feature_name] == \"cat\":\n self.__private_features_type[feature_name] = \"cat\"\n self.__features_monotone_constraints[feature_name] = \"1\"\n\n elif self.public_features_type[feature_name] == \"real\":\n self.__private_features_type[feature_name] = \"real\"\n self.__train[feature_name] = pd.to_numeric(self.train[feature_name], errors=\"coerce\")\n\n else:\n raise ValueError(\"The specified data type is not supported\")\n\n return (self.train, self.public_features_type, self.private_features_type,\n self.max_bin_count, self.features_monotone_constraints)\n", "import collections\nfrom collections import OrderedDict\nfrom copy import deepcopy\nfrom multiprocessing import Pool\nfrom typing import Union, Dict, List, Hashable, Optional, Sequence\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import roc_auc_score\n\nfrom .cat_encoding.cat_encoding import CatEncoding\nfrom .logging import get_logger, verbosity_to_loglevel\nfrom .optimizer.optimizer import TreeParamOptimizer\nfrom .pipelines.pipeline_homotopy import HTransform\nfrom .pipelines.pipeline_smallnans import SmallNans\nfrom .selectors.selector_first import nan_constant_selector, feature_imp_selector\nfrom .selectors.selector_last import Selector\nfrom .types_handler.types_handler import TypesHandler\nfrom .utilities.cv_split_f import cv_split_f\nfrom .utilities.refit import refit_reg, refit_simple\nfrom .utilities.sql import get_sql_inference_query\nfrom .woe.woe import WoE\n\nlogger = get_logger(__name__)\n\nSplitType = Optional[Union[np.ndarray, List[float], Dict[int, int]]]\n\n\ndef get_monotonic_constr(name: str, train: pd.DataFrame, target: str):\n df = train[[target, name]].dropna()\n try:\n auc = roc_auc_score(df[target].values, df[name].values)\n except (ValueError, TypeError):\n return '0'\n\n return str(int(np.sign(auc - 0.5)))\n\n\n_small_nan_set = {\"__NaN_0__\", \"__NaN_maxfreq__\", \"__NaN_maxp__\", \"__NaN_minp__\",\n \"__Small_0__\", \"__Small_maxfreq__\", \"__Small_maxp__\", \"__Small_minp__\"}\n\n_nan_set = {\"__NaN_0__\", \"__NaN__\", \"__NaN_maxfreq__\", \"__NaN_maxp__\", \"__NaN_minp__\"}\n\n\nclass AutoWoE:\n \"\"\"Implementation of Logistic regression with WoE transformation.\"\"\"\n\n @property\n def weights(self):\n return self._weights\n\n @property\n def intercept(self):\n return self._intercept\n\n @property\n def p_vals(self):\n return self._p_vals\n\n def __init__(self,\n interpreted_model: bool = True,\n monotonic: bool = False,\n max_bin_count: int = 5,\n select_type: Optional[int] = None,\n pearson_th: float = 0.9,\n auc_th: float = .505,\n vif_th: float = 5.,\n imp_th: float = 0.001,\n th_const: Union[int, float] = 0.005,\n force_single_split: bool = False,\n th_nan: Union[int, float] = 0.005,\n th_cat: Union[int, float] = 0.005,\n woe_diff_th: float = 0.01,\n min_bin_size: Union[int, float] = 0.01,\n min_bin_mults: Sequence[float] = (2, 4),\n min_gains_to_split: Sequence[float] = (0.0, 0.5, 1.0),\n auc_tol: float = 1e-4,\n cat_alpha: float = 1,\n cat_merge_to: str = \"to_woe_0\",\n nan_merge_to: str = 'to_woe_0',\n oof_woe: bool = False,\n n_folds: int = 6,\n n_jobs: int = 10,\n l1_grid_size: int = 20,\n l1_exp_scale: float = 4,\n imp_type: str = \"feature_imp\",\n regularized_refit: bool = True,\n p_val: float = 0.05,\n debug: bool = False,\n verbose: int = 2,\n **kwargs\n ):\n \"\"\"\n Initialization of the main hyperparameters related to the algorithm for interpretable model\n\n Args:\n interpreted_model: bool\n Model interpretability flag.\n monotonic: bool\n Global condition for monotonic constraints. If \"True\", then only\n monotonic binnings will be built. You can pass values to the .fit\n method that change this condition separately for each feature.\n max_bin_count: int\n Global limit for the number of bins. Can be specified for every\n feature in .fit\n select_type: None or int\n The type to specify the primary feature selection. If the type is an integer,\n then we select the number of features indicated by this number (with the best feature_importance).\n If the value is \"None\", we leave only features with feature_importance greater than 0.\n pearson_th: 0 < pearson_th < 1\n Threshold for feature selection by correlation. All features with\n the absolute value of correlation coefficient greater then\n pearson_th will be discarded.\n auc_th: .5 < auc_th < 1\n Threshold for feature selection by one-dimensional AUC. WoE with AUC < auc_th will\n be discarded.\n vif_th: vif_th > 0\n Threshold for feature selection by VIF. Features with VIF > vif_th\n are iteratively discarded one by one, then VIF is recalculated\n until all VIFs are less than vif_th.\n imp_th: real >= 0\n Threshold for feature selection by feature importance\n th_const:\n Threshold, which determines that the feature is constant.\n If the number of valid values is greater than the threshold, then\n the column is not constant. For float, the number of\n valid values will be calculated as the sample size * th_const\n force_single_split: bool\n In the tree parameters, you can set the minimum number of\n observations in the leaf. Thus, for some features, splitting for 2 beans at least will be impossible. If you specify that\n force_single_split = True, it means that 1 split will be created for the feature, if the minimum bin size is greater than th_const.\n th_nan: int >= 0\n Threshold, which determines that WoE values are calculated to NaN.\n th_cat: int >= 0\n Threshold, which determines which categories are small.\n woe_diff_th: float = 0.01\n The option to merge NaNs and rare categories with another bin,\n if the difference in WoE is less than woe_diff_th\n min_bin_size: int > 1, 0 < float < 1\n Minimum bin size when splitting.\n min_bin_mults: list of floats > 1\n If minimum bin size is specified, you can specify a list to check\n if large values work better, for example: [2, 4]\n min_gains_to_split: list of floats >= 0\n min_gain_to_split values that will be iterated to find the best split.\n auc_tol: 1e-5 <= auc_tol <=1e-2\n AUC tolerance. You can lower the auc_tol value from the maximum\n to make the model simpler.\n cat_alpha: float > 0\n Regularizer for category encoding.\n cat_merge_to: str\n The way of WoE values filling in the test sample for categories\n that are not in the training sample.\n Values - 'to_nan', 'to_woe_0', 'to_maxfreq', 'to_maxp', 'to_minp'\n nan_merge_to: str\n The way of WoE values filling on the test sample for real NaNs,\n if they are not included in their group.\n Values - 'to_woe_0', 'to_maxfreq', 'to_maxp', 'to_minp'\n oof_woe: bool\n Use OOF or standard encoding for WOE.\n n_folds: int\n Number of folds for feature selection / encoding, etc.\n n_jobs: int > 0\n Number of CPU cores to run in parallel.\n l1_base_step: real > 0\n Grid size in l1 regularization\n l1_exp_step: real > 1\n Grid scale in l1 regularization\n population_size: None, int > 0\n Feature selection type in the selector. If the value is \"None\" then L1 boost is used.\n If \"int\" is specified, then a standard step will be used for\n the number of random subsamples indicated by this value.\n Can be generalized to genetic algorithm.\n feature_groups_count: int > 0\n The number of groups in the genetic algorithm. Its effect is visible only when\n population_size > 0\n imp_type: str\n Feature importances type. Feature_imp and perm_imp are available.\n It is used to sort the features at the first and at the final\n stage of feature selection.\n regularized_refit: bool\n Use regularization at the time of model refit. Otherwise, we have\n a statistical model.\n p_val: 0 < p_val <= 1\n When training a statistical model, do backward selection\n until all p-values of the model's coefficient are less than p_val\n verbose: int >= 0\n verbosity level\n debug: bool\n Debug mode\n **kwargs:\n \"\"\"\n logger.setLevel(verbosity_to_loglevel(verbose))\n assert cat_merge_to in ['to_nan', 'to_woe_0', 'to_maxfreq', 'to_maxp', 'to_minp'], \\\n \"Value for cat_merge_to is invalid. Valid are 'to_nan', 'to_small', 'to_woe_0', 'to_maxfreq', 'to_maxp', 'to_minp'\"\n\n assert nan_merge_to in ['to_woe_0', 'to_maxfreq', 'to_maxp', 'to_minp'], \\\n \"Value for nan_merge_to is invalid. Valid are 'to_woe_0', 'to_maxfreq', 'to_maxp', 'to_minp'\"\n\n self._params = {\n\n 'interpreted_model': interpreted_model,\n 'monotonic': monotonic,\n 'max_bin_count': max_bin_count,\n 'select_type': select_type,\n 'pearson_th': pearson_th,\n 'auc_th': auc_th,\n 'vif_th': vif_th,\n 'imp_th': imp_th,\n 'min_bin_mults': min_bin_mults,\n 'min_gains_to_split': min_gains_to_split,\n 'force_single_split': force_single_split,\n 'auc_tol': auc_tol,\n 'cat_alpha': cat_alpha,\n 'cat_merge_to': cat_merge_to,\n 'nan_merge_to': nan_merge_to,\n 'oof_woe': oof_woe,\n 'n_folds': n_folds,\n 'n_jobs': n_jobs,\n 'l1_grid_size': l1_grid_size,\n 'l1_exp_scale': l1_exp_scale,\n\n 'imp_type': imp_type,\n 'population_size': None,\n 'regularized_refit': regularized_refit,\n 'p_val': p_val,\n 'debug': debug,\n\n 'th_const': th_const,\n 'th_nan': th_nan,\n 'th_cat': th_cat,\n 'woe_diff_th': woe_diff_th,\n 'min_bin_size': min_bin_size\n\n }\n for deprecated_arg, new_arg in zip(['l1_base_step', 'l1_exp_step', 'population_size', 'feature_groups_count'],\n ['l1_grid_size', 'l1_exp_scale', None, None]):\n\n if deprecated_arg in kwargs:\n msg = 'Parameter {0} is deprecated.'.format(deprecated_arg)\n if new_arg is not None:\n msg = msg + ' Value will be set to {0} parameter, but exception will be raised in future.'.format(\n new_arg)\n self._params[new_arg] = kwargs[deprecated_arg]\n logger.warning(msg, DeprecationWarning, stacklevel=2)\n\n self.woe_dict = None\n self.train_df = None\n self.split_dict = None # словарь со сплитами для каждого признкака\n self.target = None # целевая переменная\n self.clf = None # модель лог регрессии\n self.features_fit = None # Признаки, которые прошли проверку Selector + информация о лучшей итерации Result\n self._cv_split = None # Словарь с индексами разбиения на train и test\n self._small_nans = None\n\n self._private_features_type = None\n self._public_features_type = None\n\n self._weights = None\n self._intercept = None\n self._p_vals = None\n\n self.feature_history = None\n\n @property\n def features_type(self):\n return self._public_features_type\n\n @property\n def private_features_type(self):\n return self._private_features_type\n\n def get_split(self, feature: Hashable):\n return self.woe_dict[feature].split\n\n def get_woe(self, feature_name: Hashable):\n if self.private_features_type[feature_name] == \"real\":\n split = self.woe_dict[feature_name].split.copy()\n woe = self.woe_dict[feature_name].cod_dict\n split = enumerate(np.hstack([split, [np.inf]]))\n split = OrderedDict(split)\n\n spec_val = set(woe.keys()) - set(split.keys())\n spec_val = OrderedDict((key, woe[key]) for key in spec_val)\n\n split = OrderedDict((split[key], value) for (key, value) in woe.items() if key in split)\n split, spec_val = list(split.items()), list(spec_val.items())\n\n borders, values = list(zip(*split))\n new_borders = list(zip([-np.inf] + list(borders[:-1]), borders))\n new_borders = [('{:.2f}'.format(x[0]), '{:.2f}'.format(x[1])) for x in new_borders]\n\n split = list(zip(new_borders, values)) + spec_val\n\n elif self.private_features_type[feature_name] == \"cat\":\n split = list(self.woe_dict[feature_name].cod_dict.items())\n else:\n raise ValueError(f\"Feature type {self.private_features_type[feature_name]} is not supported\")\n\n split = [(x[1], str(x[0])) for x in split]\n return pd.Series(*(zip(*split)))\n\n def _infer_params(self, train: pd.DataFrame,\n target_name: str,\n features_type: Optional[Dict[str, str]] = None,\n group_kf: Hashable = None,\n max_bin_count: Optional[Dict[str, int]] = None,\n features_monotone_constraints: Optional[Dict[str, str]] = None):\n \"\"\"\n\n Args:\n train:\n target_name:\n features_type:\n group_kf:\n max_bin_count:\n features_monotone_constraints:\n\n Returns:\n\n \"\"\"\n self.params = deepcopy(self._params)\n\n for k in ['th_const', 'th_nan', 'th_cat', 'min_bin_size']:\n val = self.params[k]\n self.params[k] = int(val * train.shape[0]) if 0 <= val < 1 else int(val)\n\n min_data_in_bin = [self.params['min_bin_size'], ]\n for m in self.params['min_bin_mults']:\n min_data_in_bin.append(int(m * self.params['min_bin_size']))\n\n self._tree_dict_opt = OrderedDict({\"min_data_in_leaf\": (self.params['min_bin_size'],),\n \"min_data_in_bin\": min_data_in_bin,\n \"min_gain_to_split\": self.params['min_gains_to_split']})\n\n # составим features_type\n self._features_type = features_type\n if self._features_type is None:\n self._features_type = {}\n\n assert target_name not in self._features_type, \"target_name in features_type!!!\"\n assert group_kf not in self._features_type, \"group_kf in features_type!!!\"\n\n droplist = [target_name]\n if group_kf is not None:\n droplist.append(group_kf)\n\n for col in train.columns.drop(droplist):\n if col not in self._features_type:\n self._features_type[col] = None\n\n # поработаем с монотонными ограничениями\n self.features_monotone_constraints = features_monotone_constraints\n if self.features_monotone_constraints is None:\n self.features_monotone_constraints = {}\n\n checklist = ['auto']\n if self.params['monotonic']:\n checklist.extend(['0', 0, None])\n\n for col in self._features_type:\n val = self.features_monotone_constraints.get(col)\n\n if val in checklist:\n new_val = get_monotonic_constr(col, train, target_name)\n elif val in ['0', 0, None]:\n new_val = '0'\n else:\n new_val = val\n\n self.features_monotone_constraints[col] = new_val\n\n # max_bin_count\n self.max_bin_count = max_bin_count\n if self.max_bin_count is None:\n self.max_bin_count = {}\n\n for col in self._features_type:\n if col not in self.max_bin_count:\n self.max_bin_count[col] = self.params['max_bin_count']\n\n def fit(self, train: pd.DataFrame,\n target_name: str,\n features_type: Optional[Dict[str, str]] = None,\n group_kf: Hashable = None,\n max_bin_count: Optional[Dict[str, int]] = None,\n features_monotone_constraints: Optional[Dict[str, str]] = None,\n validation: Optional[pd.DataFrame] = None):\n \"\"\"\n\n Args:\n train: pandas.DataFrame\n Training sample\n target_name: str\n Target variable's column name\n features_type: dict\n Dictionary with feature types,\n \"cat\" - categorical, \"real\" - real, \"date\" - for date\n group_kf:\n Column name for GroupKFold\n max_bin_count: dict\n Dictionary with feature name -> maximum bin quantity values\n features_monotone_constraints: dict\n Dictionary with monotonic constraints for features\n \"-1\" - the feature values decreases monotonically when the target variable's value increases\n \"0\" - no limitations. Switches to auto in case of monotonic = True\n \"1\" - the feature values monotonically increases when the target variable's value increases\n \"auto\" - the feature values monotonically changes.\n Not specified for categorical features.\n validation: pandas.DataFrame\n Additional validation sample used for model selection\n Currently supported:\n - feature selection by p-value\n\n Returns:\n\n \"\"\"\n self._infer_params(train, target_name, features_type, group_kf, max_bin_count, features_monotone_constraints)\n\n if group_kf:\n group_kf = train[group_kf].values\n types_handler = TypesHandler(train=train,\n public_features_type=self._features_type,\n max_bin_count=self.max_bin_count,\n features_monotone_constraints=self.features_monotone_constraints)\n train_, self._public_features_type, self._private_features_type, max_bin_count, features_monotone_constraints \\\n = types_handler.transform()\n del types_handler\n\n train_ = train_[[*self.private_features_type.keys(), target_name]]\n self.target = train_[target_name]\n self.feature_history = {key: None for key in self.private_features_type.keys()}\n # Отбрасывание колонок с нанами\n features_before = set(self._private_features_type.keys())\n train_, self._private_features_type = nan_constant_selector(train_, self.private_features_type,\n th_const=self.params['th_const'])\n features_after = set(self._private_features_type.keys())\n features_diff = features_before - features_after\n for feature in features_diff:\n self.feature_history[feature] = 'NaN values'\n # Первичный отсев по важности\n features_before = features_after\n train_, self._private_features_type = feature_imp_selector(train_, self.private_features_type, target_name,\n imp_th=self.params['imp_th'],\n imp_type=self.params['imp_type'],\n select_type=self.params['select_type'],\n process_num=self.params['n_jobs'])\n features_after = set(self._private_features_type.keys())\n features_diff = features_before - features_after\n for feature in features_diff:\n self.feature_history[feature] = 'Low importance'\n\n self._small_nans = SmallNans(th_nan=self.params['th_nan'], th_cat=self.params['th_cat'],\n cat_merge_to=self.params['cat_merge_to'],\n nan_merge_to=self.params['nan_merge_to']) # класс для обработки нанов\n\n train_, spec_values = self._small_nans.fit_transform(train=train_, features_type=self.private_features_type)\n\n self._cv_split = cv_split_f(train_, self.target, group_kf, n_splits=self.params['n_folds'])\n\n params_gen = ((x,\n deepcopy(train_[[x, target_name]]),\n features_monotone_constraints[x],\n max_bin_count[x], self.params['cat_alpha']) for x in self.private_features_type.keys())\n\n if self.params['n_jobs'] > 1:\n with Pool(self.params['n_jobs']) as pool:\n result = pool.starmap(self.feature_woe_transform, params_gen)\n else:\n result = []\n for params in params_gen:\n result.append(self.feature_woe_transform(*params))\n\n split_dict = dict(zip(self.private_features_type.keys(), result))\n split_dict = {key: split_dict[key] for key in split_dict if split_dict[key] is not None}\n\n features_before = features_after\n self._private_features_type = {x: self.private_features_type[x] for x in split_dict if\n x in split_dict.keys()}\n features_after = set(self._private_features_type.keys())\n features_diff = features_before - features_after\n for feature in features_diff:\n self.feature_history[feature] = 'Unable to WOE transform'\n\n # print(f\"{split_dict.keys()} to selector !!!!!\")\n logger.info(f\"{split_dict.keys()} to selector !!!!!\")\n self.split_dict = split_dict # набор пар признаки - границы бинов\n self.train_df = self._train_encoding(train_, spec_values, self.params['oof_woe'])\n\n logger.info(\"Feature selection...\")\n selector = Selector(interpreted_model=self.params['interpreted_model'],\n train=self.train_df,\n target=self.target,\n features_type=self.private_features_type,\n n_jobs=self.params['n_jobs'],\n cv_split=self._cv_split\n )\n\n best_features, self._sel_result = selector(pearson_th=self.params['pearson_th'],\n auc_th=self.params['auc_th'],\n vif_th=self.params['vif_th'],\n l1_grid_size=self.params['l1_grid_size'],\n l1_exp_scale=self.params['l1_exp_scale'],\n auc_tol=self.params['auc_tol'],\n feature_history=self.feature_history)\n\n # create validation data if it's defined and usefull\n valid_enc, valid_target = None, None\n if validation is not None and not self.params['regularized_refit']:\n valid_enc = self.test_encoding(validation, best_features)\n valid_target = validation[target_name]\n\n fit_result = self._clf_fit(self.train_df, best_features, self.feature_history, valid_enc, valid_target)\n\n self.features_fit = fit_result['features_fit']\n self._weights = fit_result['weights']\n self._intercept = fit_result['intercept']\n if 'b_var' in fit_result:\n self._b_var = fit_result['b_var']\n if 'p_vals' in fit_result:\n self._p_vals = fit_result['p_vals']\n\n if not self.params['debug']:\n del self.train_df\n del self.target\n\n def feature_woe_transform(self, feature_name: str, train_f: pd.DataFrame,\n features_monotone_constraints: str, max_bin_count: int,\n cat_alpha: float = 1.) -> SplitType:\n \"\"\"\n\n Args:\n feature_name:\n train_f:\n features_monotone_constraints:\n max_bin_count:\n cat_alpha:\n\n Returns:\n\n \"\"\"\n train_f = train_f.reset_index(drop=True)\n logger.info(f\"{feature_name} processing...\")\n target_name = train_f.columns[1]\n # Откидываем здесь закодированные маленькие категории/наны. Их не учитываем при определения бинов\n if np.issubdtype(train_f.dtypes[feature_name], np.number):\n nan_index = []\n else:\n sn_set = _small_nan_set if self.private_features_type[feature_name] == \"cat\" else _nan_set\n nan_index = train_f[feature_name].isin(sn_set)\n nan_index = np.where(nan_index.values)[0]\n\n cat_enc = None\n if self.private_features_type[feature_name] == \"cat\":\n cat_enc = CatEncoding(data=train_f)\n train_f = cat_enc(self._cv_split, nan_index, cat_alpha)\n\n train_f = train_f.iloc[np.setdiff1d(np.arange(train_f.shape[0]), nan_index), :]\n\n train_f = train_f.astype({feature_name: float, target_name: int})\n # нужный тип для lgb после нанов и маленьких категорий\n if train_f.shape[0] == 0: # случай, если кроме нанов и маленьких категорий ничего не осталось\n split = [-np.inf]\n if self.private_features_type[feature_name] == \"cat\":\n return cat_enc.mean_target_reverse(split)\n elif self.private_features_type[feature_name] == \"real\":\n return split\n else:\n raise ValueError(\"self.features_type[feature] is cat or real\")\n\n # подбор оптимальных параметров дерева\n tree_dict_opt = deepcopy(self._tree_dict_opt)\n if max_bin_count: # ограничение на число бинов\n\n leaves_range = tuple(range(2, max_bin_count + 1))\n tree_dict_opt = OrderedDict({**self._tree_dict_opt,\n **{\"num_leaves\": leaves_range,\n \"bin_construct_sample_cnt\": (int(1e8),)},\n })\n\n # Еще фича force_single_split ..\n if self.params['force_single_split']:\n min_size = train_f.shape[0] - train_f[feature_name].value_counts(dropna=False).values[0]\n if self.params['th_const'] < min_size < self.params['min_bin_size']:\n tree_dict_opt[\"min_data_in_leaf\"] = [min_size, ]\n tree_dict_opt[\"min_data_in_bin\"] = [3, ]\n tree_dict_opt[\"num_leaves\"] = [2, ]\n\n tree_opt = TreeParamOptimizer(data=train_f,\n n_folds=self.params['n_folds'],\n params_range=collections.OrderedDict(**tree_dict_opt,\n **{\"monotone_constraints\": (\n features_monotone_constraints,)}))\n tree_param = tree_opt(3)\n # значение monotone_constraints содержится в tree_params\n # подбор подходяшего сплита на бины\n htransform = HTransform(train_f[feature_name],\n train_f[target_name])\n split = htransform(tree_param)\n\n # Обратная операция к mean_target_encoding\n if self.private_features_type[feature_name] == \"cat\":\n return cat_enc.mean_target_reverse(split)\n elif self.private_features_type[feature_name] == \"real\":\n return split\n else:\n raise ValueError(\"self.features_type[feature] is cat or real\")\n\n def _train_encoding(self, train: pd.DataFrame,\n spec_values: Dict, # TODO: ref\n folds_codding: bool) -> pd.DataFrame:\n \"\"\"\n\n Args:\n train:\n spec_values:\n folds_codding:\n\n Returns:\n\n \"\"\"\n woe_dict = dict()\n woe_list = []\n for feature in self.private_features_type:\n woe = WoE(f_type=self.private_features_type[feature], split=self.split_dict[feature],\n woe_diff_th=self.params['woe_diff_th'])\n if folds_codding:\n df_cod = woe.fit_transform_cv(train[feature], self.target, spec_values=spec_values[feature],\n cv_index_split=self._cv_split)\n woe.fit(train[feature], self.target, spec_values=spec_values[feature])\n else:\n df_cod = woe.fit_transform(train[feature], self.target, spec_values=spec_values[feature])\n woe_dict[feature] = woe\n woe_list.append(df_cod)\n self.woe_dict = woe_dict\n train_tr = pd.concat(woe_list, axis=1)\n train_tr.columns = self.private_features_type.keys()\n return train_tr\n\n def _clf_fit(self, data_enc, features, feature_history=None, valid_enc=None, valid_target=None) -> dict:\n \"\"\"\n\n Args:\n data_enc:\n features:\n feature_history:\n valid_enc:\n valid_target:\n\n Returns:\n\n \"\"\"\n x_train, y_train = data_enc[features].values, self.target.values\n x_val, y_val = None, None\n p_vals = None\n\n result = dict()\n if self.params['regularized_refit']:\n w, i, neg = refit_reg(x_train, y_train,\n l1_grid_size=self.params['l1_grid_size'],\n l1_exp_scale=self.params['l1_exp_scale'],\n max_penalty=self._sel_result.reg_alpha,\n interp=self.params['interpreted_model'])\n else:\n if valid_enc is not None:\n x_val, y_val = valid_enc[features].values, valid_target.values\n\n w, i, neg, p_vals, b_var = refit_simple(x_train, y_train, interp=self.params['interpreted_model'],\n p_val=self.params['p_val'], x_val=x_val, y_val=y_val)\n\n result['b_var'] = b_var\n\n _feats = np.array(features)[neg]\n\n features_before = set(features)\n features_fit = pd.Series(w, _feats)\n result['features_fit'] = features_fit\n features_after = set(features_fit.index)\n features_diff = features_before - features_after\n if feature_history is not None:\n for feature in features_diff:\n feature_history[feature] = 'Pruned during regression refit'\n\n if not self.params['regularized_refit']:\n result['p_vals'] = pd.Series(p_vals, list(_feats) + ['Intercept_'])\n\n logger.info(features_fit)\n result['weights'] = w\n result['intercept'] = i\n return result\n\n def test_encoding(self, test: pd.DataFrame, feats: Optional[List[str]] = None) -> pd.DataFrame:\n \"\"\"\n WoE encoding on test dataset\n\n Args:\n test: pandas.DataFrame\n Тестовый датасет\n feats: list or None\n features names\n\n Returns:\n\n \"\"\"\n if feats is None:\n feats = list(self.features_fit.index)\n\n feats_to_get = deepcopy(feats)\n\n for feat in feats:\n parts = feat.split('__F__')\n if len(parts) > 1:\n feats_to_get.append('__F__'.join(parts[:-1]))\n feats_to_get = [x for x in list(set(feats_to_get)) if x in test.columns]\n\n types = {}\n for feat in feats_to_get:\n if feat in self._public_features_type:\n types[feat] = self._public_features_type[feat]\n\n types_handler = TypesHandler(train=test[feats_to_get], public_features_type=types)\n test_, _, _, _, _ = types_handler.transform()\n del types_handler\n\n woe_list = []\n test_, spec_values = self._small_nans.transform(test_, feats)\n # здесь дебажный принт\n logger.debug(spec_values)\n for feature in feats:\n df_cod = self.woe_dict[feature].transform(test_[feature], spec_values[feature])\n woe_list.append(df_cod)\n\n test_tr = pd.concat(woe_list, axis=1)\n test_tr.columns = feats\n return test_tr[feats]\n\n def predict_proba(self, test: pd.DataFrame) -> np.ndarray:\n \"\"\"\n Make predictions for a test dataset\n\n Args:\n test: pd.DataFrame\n\n Returns:\n np.ndarray\n \"\"\"\n test_tr = self.test_encoding(test)\n prob = 1 / (1 + np.exp(-(np.dot(test_tr.values, self.weights) + self.intercept)))\n return prob\n\n def get_model_represenation(self):\n \"\"\"\n Get scorecard\n\n Returns:\n\n \"\"\"\n features = list(self.features_fit.index)\n result = dict()\n for feature in features:\n feature_data = dict()\n woe = self.woe_dict[feature]\n feature_data['f_type'] = woe.f_type\n\n if woe.f_type == 'real':\n feature_data['splits'] = [0 + round(float(x), 6) for x in woe.split]\n else:\n feature_data['cat_map'] = {str(k): int(v) for k, v in woe.split.items()}\n spec_vals = self._small_nans.cat_encoding[feature]\n feature_data['spec_cat'] = (spec_vals[0], spec_vals[2])\n\n feature_data['cod_dict'] = {int(k): (0 + round(float(v), 6))\n for k, v in woe.cod_dict.items()\n if type(k) is int or type(k) is float}\n\n feature_data['weight'] = float(self.features_fit[feature])\n feature_data['nan_value'] = self._small_nans.all_encoding[feature]\n feature_data['spec_cod'] = {k: (0 + round(float(v), 6))\n for k, v in woe.cod_dict.items()\n if type(k) is str}\n\n result[feature] = feature_data\n\n return {'features': result, 'intercept': float(self.intercept)}\n\n def get_sql_inference_query(self, table_name, round_digits=3, round_features=5, output_name='PROB', alias='WOE_TAB',\n bypass_encoded=True, template=None,\n nan_pattern_numbers=\"({0} IS NULL OR {0} = 'NaN')\",\n nan_pattern_category=\"({0} IS NULL OR LOWER(CAST({0} AS VARCHAR(50))) = 'nan')\",\n preprocessing=None) -> str:\n \"\"\"\n Get inference query for whitebox model\n\n Args:\n table_name: Source table name that should be passed into query\n round_digits: round woe and coefs to simplify query. Note: may be little accuracy decrease\n round_features: round features to simplify query. Note: may be little accuracy decrease\n output_name: name of output prediction feature\n alias: alias of woe_table in query\n bypass_encoded: add woe encoding to the result\n template: 'td' for teradata or None\n nan_pattern_numbers: string value representing how to check nulls for numbers in SQL.\n For ex. \"({0} IS NULL OR {0} = 'NaN')\"\n nan_pattern_category: string value representing how to check nulls for categories in SQL.\n preprocessing: due to possible difference in schemes between SQL database and csv file user may\n specify dict how to preprocess each feature. For ex. if feature Feat_0 was treated as integer by\n model, but is actually string in database schema, you may pass\n preprocessing = {'Feat_0': CAST({0} as INTEGER)}\n\n Returns:\n\n \"\"\"\n\n return get_sql_inference_query(self, table_name, round_digits, round_features, output_name, alias,\n bypass_encoded, template, nan_pattern_numbers, nan_pattern_category,\n preprocessing)\n" ]
[ [ "pandas.to_numeric" ], [ "sklearn.metrics.roc_auc_score", "pandas.concat", "numpy.hstack", "numpy.dot", "pandas.Series", "numpy.arange", "numpy.issubdtype", "numpy.sign", "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
bombrun/GaiaLQSO
[ "b4d787a4d80732cbb5a3762c34298f2430dd0540" ]
[ "lens/sie/random.py" ]
[ "import numpy as np\nimport pandas as pd\nimport astropy.units as u\nimport healpy as hp\n\nfrom lens.sie.plot import *\n\ndef angle2pixel(ra_deg,dec_deg):\n \"\"\" return healpix index 12\"\"\"\n phi = ra_deg * np.pi / 180\n theta = np.pi/2 - (dec_deg * np.pi/180)\n return hp.ang2pix(4096,theta,phi,nest=True)\n\n\ndef lensedQSO(f,scale,w,y,dy,gy):\n \"\"\" to generate a lensed QSO\n f : SIE lens eliptisicity parameter\n scale : a scale parameter (TODO link it to some physical parameter of the lens)\n w : lens orientation\n y : source position relative to the lens\n dy : source proper motion relative to the lens\n gy : source magnitude (assume that the magnitude is defined as 2.5 log10(flux))\n \"\"\"\n # locations of lens images in the source plane\n xs,phis = sie.solve(f,y[0],y[1])\n \n # compute images position proper motion and magnitude \n ra = []\n dec = []\n pmra = []\n pmdec = []\n g = []\n R = np.array([[np.cos(w),np.sin(w)],[-np.sin(w),np.cos(w)]])\n for phi,x in zip(phis,xs) :\n dx = np.dot(R,np.dot(np.linalg.inv(sie.A(x,phi,f)),dy))\n ra.append(x*np.cos(phi+w)*scale)\n dec.append(x*np.sin(phi+w)*scale)\n pmra.append(dx[0]*scale)\n pmdec.append(dx[1]*scale)\n g.append(gy-2.5*np.log10(np.abs(sie.magnification(x,phi,f))))\n \n # set a pandas data frame to store the result\n res = pd.DataFrame()\n res['ra'] = ra\n res['dec'] = dec\n res['pmra'] = pmra\n res['pmdec'] = pmdec\n res['phot_g_mean_mag'] = g\n return res\n\ndef getSourceId(ra_rad,dec_rad):\n x = np.asarray(ra_rad)\n y = np.asarray(dec_rad)\n s=34359738368\n sourceid = angle2pixel(x*u.rad.to(u.deg),y*u.rad.to(u.deg))*s\n if x.size==1 :\n return sourceid + np.int64(np.random.uniform(0,s))\n else :\n return sourceid + np.int64(np.random.uniform(0,s,x.size))\n\ndef randomLQSO(verbose=False):\n \"\"\" a dummy random lensed QSO generator \"\"\"\n \n #scale \n scale = np.random.uniform(1,2)\n \n # lens parameter\n f = np.random.uniform()\n \n # relative source-lens position\n y = np.random.uniform(-0.5,0.5,2)\n \n # relative source-lens proper motion\n dy = np.random.normal(0,0.1,2)\n \n # source magnitude\n gy = np.random.uniform(18,20)\n \n # random lens orientation\n w = np.random.uniform(0,2*np.pi)\n \n # wrap the data\n data = f,scale,w,y,dy,gy\n \n # to visualise the lens\n if verbose :\n print(data)\n plotLensSourceImage(f,y[0],y[1])\n \n res = lensedQSO(*data)\n \n # sky location\n ra = np.random.uniform(0,2*np.pi)\n dec = np.random.uniform(-np.pi/2+0.1,np.pi/2-0.1) # a bit wrong as we exclude the pole\n while(np.abs(dec) < 10*u.deg.to(u.rad)) :\n dec = np.random.uniform(-np.pi/2+0.1,np.pi/2-0.1) # a bit wrong as we exclude the pole\n res['ra'] = ra + res.ra*u.arcsecond.to(u.rad)\n res['dec'] = dec + res.dec*u.arcsecond.to(u.rad)\n res['source_id'] = getSourceId(res.ra,res.dec)\n res.index=res.source_id\n res['qsoid'] = res.phot_g_mean_mag.idxmin()\n return res\n\ndef generateLQSO(n):\n \"\"\"return n random QSO in a pandas DataFrame\"\"\"\n res = [randomLQSO() for i in range(0,n)]\n return pd.concat(res)" ]
[ [ "pandas.concat", "numpy.abs", "numpy.asarray", "numpy.cos", "pandas.DataFrame", "numpy.sin", "numpy.random.normal", "numpy.random.uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
DongChengdongHangZhou/CycleGAN-tiff
[ "e13a4d702ac6ce3e13af4946a1bc6657c1a2089e" ]
[ "util/visualizer.py" ]
[ "import numpy as np\nimport os\nimport sys\nimport ntpath\nimport time\nfrom . import util, html\nfrom subprocess import Popen, PIPE\nimport tifffile as tiff\n\n\nif sys.version_info[0] == 2:\n VisdomExceptionBase = Exception\nelse:\n VisdomExceptionBase = ConnectionError\n\n\ndef save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):\n \"\"\"Save images to the disk.\n\n Parameters:\n webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)\n visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs\n image_path (str) -- the string is used to create image paths\n aspect_ratio (float) -- the aspect ratio of saved images\n width (int) -- the images will be resized to width x width\n\n This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.\n \"\"\"\n image_dir = webpage.get_image_dir()\n short_path = ntpath.basename(image_path[0])\n name = os.path.splitext(short_path)[0]\n\n webpage.add_header(name)\n ims, txts, links = [], [], []\n\n for label, im_data in visuals.items():\n im = util.tensor2im(im_data)\n image_name = '%s_%s.png' % (name, label)\n save_path = os.path.join(image_dir, image_name)\n util.save_image(im, save_path, aspect_ratio=aspect_ratio)\n ims.append(image_name)\n txts.append(label)\n links.append(image_name)\n webpage.add_images(ims, txts, links, width=width)\n\n\ndef save_images_test(save_dir,visuals, image_path):\n \"\"\"Save images to the disk.\n\n Parameters:\n webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)\n visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs\n image_path (str) -- the string is used to create image paths\n aspect_ratio (float) -- the aspect ratio of saved images\n width (int) -- the images will be resized to width x width\n\n This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.\n \"\"\"\n image_dir = save_dir\n short_path = ntpath.basename(image_path[0])\n name = os.path.splitext(short_path)[0]\n\n for label, im_data in visuals.items():\n im = (im_data[0][0]).cpu().numpy()\n image_name = '%s_%s.tiff' % (name, label)\n save_path = os.path.join(image_dir, image_name)\n tiff.imsave(save_path,im)\n\n\n\nclass Visualizer():\n \"\"\"This class includes several functions that can display/save images and print/save logging information.\n\n It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.\n \"\"\"\n\n def __init__(self, opt):\n \"\"\"Initialize the Visualizer class\n\n Parameters:\n opt -- stores all the experiment flags; needs to be a subclass of BaseOptions\n Step 1: Cache the training/test options\n Step 2: connect to a visdom server\n Step 3: create an HTML object for saveing HTML filters\n Step 4: create a logging file to store training losses\n \"\"\"\n self.opt = opt # cache the option\n self.display_id = opt.display_id\n self.use_html = opt.isTrain and not opt.no_html\n self.win_size = opt.display_winsize\n self.name = opt.name\n self.port = opt.display_port\n self.saved = False\n if self.display_id > 0: # connect to a visdom server given <display_port> and <display_server>\n import visdom\n self.ncols = opt.display_ncols\n self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env)\n if not self.vis.check_connection():\n self.create_visdom_connections()\n\n if self.use_html: # create an HTML object at <checkpoints_dir>/web/; images will be saved under <checkpoints_dir>/web/images/\n self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')\n self.img_dir = os.path.join(self.web_dir, 'images')\n print('create web directory %s...' % self.web_dir)\n util.mkdirs([self.web_dir, self.img_dir])\n # create a logging file to store training losses\n self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')\n with open(self.log_name, \"a\") as log_file:\n now = time.strftime(\"%c\")\n log_file.write('================ Training Loss (%s) ================\\n' % now)\n\n def reset(self):\n \"\"\"Reset the self.saved status\"\"\"\n self.saved = False\n\n def create_visdom_connections(self):\n \"\"\"If the program could not connect to Visdom server, this function will start a new server at port < self.port > \"\"\"\n cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port\n print('\\n\\nCould not connect to Visdom server. \\n Trying to start a server....')\n print('Command: %s' % cmd)\n Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)\n\n def display_current_results(self, visuals, epoch, save_result):\n \"\"\"Display current results on visdom; save current results to an HTML file.\n\n Parameters:\n visuals (OrderedDict) - - dictionary of images to display or save\n epoch (int) - - the current epoch\n save_result (bool) - - if save the current results to an HTML file\n \"\"\"\n if self.display_id > 0: # show images in the browser using visdom\n ncols = self.ncols\n if ncols > 0: # show all the images in one visdom panel\n ncols = min(ncols, len(visuals))\n h, w = next(iter(visuals.values())).shape[:2]\n table_css = \"\"\"<style>\n table {border-collapse: separate; border-spacing: 4px; white-space: nowrap; text-align: center}\n table td {width: % dpx; height: % dpx; padding: 4px; outline: 4px solid black}\n </style>\"\"\" % (w, h) # create a table css\n # create a table of images.\n title = self.name\n label_html = ''\n label_html_row = ''\n images = []\n idx = 0\n for label, image in visuals.items():\n image_numpy = util.tensor2im(image)\n label_html_row += '<td>%s</td>' % label\n images.append(image_numpy.transpose([2, 0, 1]))\n idx += 1\n if idx % ncols == 0:\n label_html += '<tr>%s</tr>' % label_html_row\n label_html_row = ''\n white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255\n while idx % ncols != 0:\n images.append(white_image)\n label_html_row += '<td></td>'\n idx += 1\n if label_html_row != '':\n label_html += '<tr>%s</tr>' % label_html_row\n try:\n self.vis.images(images, nrow=ncols, win=self.display_id + 1,\n padding=2, opts=dict(title=title + ' images'))\n label_html = '<table>%s</table>' % label_html\n self.vis.text(table_css + label_html, win=self.display_id + 2,\n opts=dict(title=title + ' labels'))\n except VisdomExceptionBase:\n self.create_visdom_connections()\n\n else: # show each image in a separate visdom panel;\n idx = 1\n try:\n for label, image in visuals.items():\n image_numpy = util.tensor2im(image)\n self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label),\n win=self.display_id + idx)\n idx += 1\n except VisdomExceptionBase:\n self.create_visdom_connections()\n\n if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved.\n self.saved = True\n # save images to the disk\n for label, image in visuals.items():\n image_numpy = util.tensor2im(image)\n img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))\n util.save_image(image_numpy, img_path)\n\n # update website\n webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=1)\n for n in range(epoch, 0, -1):\n webpage.add_header('epoch [%d]' % n)\n ims, txts, links = [], [], []\n\n for label, image_numpy in visuals.items():\n image_numpy = util.tensor2im(image)\n img_path = 'epoch%.3d_%s.png' % (n, label)\n ims.append(img_path)\n txts.append(label)\n links.append(img_path)\n webpage.add_images(ims, txts, links, width=self.win_size)\n webpage.save()\n\n def plot_current_losses(self, epoch, counter_ratio, losses):\n \"\"\"display the current losses on visdom display: dictionary of error labels and values\n\n Parameters:\n epoch (int) -- current epoch\n counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1\n losses (OrderedDict) -- training losses stored in the format of (name, float) pairs\n \"\"\"\n if not hasattr(self, 'plot_data'):\n self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}\n self.plot_data['X'].append(epoch + counter_ratio)\n self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']])\n try:\n self.vis.line(\n X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),\n Y=np.array(self.plot_data['Y']),\n opts={\n 'title': self.name + ' loss over time',\n 'legend': self.plot_data['legend'],\n 'xlabel': 'epoch',\n 'ylabel': 'loss'},\n win=self.display_id)\n except VisdomExceptionBase:\n self.create_visdom_connections()\n\n # losses: same format as |losses| of plot_current_losses\n def print_current_losses(self, epoch, iters, losses, t_comp, t_data):\n \"\"\"print current losses on console; also save the losses to the disk\n\n Parameters:\n epoch (int) -- current epoch\n iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)\n losses (OrderedDict) -- training losses stored in the format of (name, float) pairs\n t_comp (float) -- computational time per data point (normalized by batch_size)\n t_data (float) -- data loading time per data point (normalized by batch_size)\n \"\"\"\n message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)\n for k, v in losses.items():\n message += '%s: %.3f ' % (k, v)\n\n print(message) # print the message\n with open(self.log_name, \"a\") as log_file:\n log_file.write('%s\\n' % message) # save the message\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pjgao/Deep-Forest
[ "0fdec38b671ababfcc3476807fe512aa993d4fd4" ]
[ "tests/test_buffer.py" ]
[ "import os\nimport pytest\nimport numpy as np\n\nfrom deepforest import _io as io\n\n\nopen_buffer = io.Buffer(use_buffer=True,\n buffer_dir=\"./\",\n store_est=True,\n store_pred=True,\n store_data=True)\n\n\nclose_buffer = io.Buffer(use_buffer=False)\n\nX = np.zeros((42, 42), dtype=np.uint8)\n\n\ndef test_buffer_name():\n name = open_buffer.name\n assert isinstance(name, str)\n\n name = close_buffer.name\n assert name is None\n\n\ndef test_store_data_close_buffer():\n \"\"\"When `store_data` is False, the buffer directly returns the array.\"\"\"\n ret = close_buffer.cache_data(0, X)\n assert isinstance(ret, np.ndarray)\n\n\ndef test_store_data_open_buffer():\n \"\"\"\n When `store_data` is True, the buffer returns the memmap object of the\n dumped array.\n \"\"\"\n layer_idx = 0\n ret = open_buffer.cache_data(layer_idx, X, is_training_data=True)\n assert isinstance(ret, np.memmap)\n assert os.path.exists(os.path.join(\n open_buffer.data_dir_, \"joblib_train_{}.mmap\".format(layer_idx)))\n\n ret = open_buffer.cache_data(layer_idx, X, is_training_data=False)\n assert isinstance(ret, np.memmap)\n assert os.path.exists(os.path.join(\n open_buffer.data_dir_, \"joblib_test_{}.mmap\".format(layer_idx)))\n\n\ndef test_load_estimator_missing():\n err_msg = \"Missing estimator in the path: unknown.est.\"\n with pytest.raises(FileNotFoundError, match=err_msg):\n open_buffer.load_estimator(\"unknown.est\")\n\n\ndef test_load_predictor_missing():\n err_msg = \"Missing predictor in the path: unknown.est.\"\n with pytest.raises(FileNotFoundError, match=err_msg):\n open_buffer.load_predictor(\"unknown.est\")\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
qwertpi/techdiff-textgen
[ "fd7578a24e11b96d86a92d2935b6153b1bea73f8" ]
[ "train.py" ]
[ "from json import dump\nfrom math import ceil\nfrom random import randint\nimport string\n\nfrom keras.layers import Input, Dense, Embedding\n#uncoment if using CPU\n##from keras.layers import LSTM\n#comment out the line bellow if using CPU\nfrom keras.layers import CuDNNLSTM as LSTM\nfrom keras.models import Model, load_model\nfrom keras.metrics import categorical_accuracy\nfrom keras.utils import to_categorical, plot_model\nimport numpy as np\n\ndef DataGenerator(x_data, y_data, batch_size, lookback_length):\n '''\n A generator that yields batches of training x and y data\n :param x_data: list, the input data that batches should be drawn from\n :param y_data: list, the output data that batches should be drawn from\n :param batch_size: int, the number of datapoints that should be yielded in each batch\n :param lookback_length: int, the length that the model expects every datapoint to be\n :returns: numpy array, an x batch\n :returns: numpy array, a y batch\n '''\n indexes = np.arange(len(x_data))\n while True:\n batch_indexes = np.random.choice(indexes, batch_size)\n\n X = []\n Y = []\n i = 0\n for i in batch_indexes:\n curr_X = x_data[i]\n if len(curr_X) >= 1:\n #cuts off a random number of words from the start of the datapoint as a form of dropout\n curr_X = curr_X[randint(0, len(curr_X) - 1):]\n #padds with 0s until the datapoint is lookback_length long\n while len(curr_X) < lookback_length:\n curr_X.append(0)\n X.append(curr_X)\n Y.append(y_data[i])\n\n X = np.array(X)\n Y = np.array(Y)\n yield X, Y\n \n#this is what will be removed from words\npunctuation = list(string.punctuation)+[\" \"]+[\"\"]\n\nlines = []\nwith open(\"data.txt\", \"r\", encoding=\"ascii\", errors=\"ignore\") as f:\n for line in f.read().splitlines():\n curr_line = \"\"\n #we aren't intrested in blank lines\n if line != \"\":\n for word in line.split(\" \"):\n #theres a problem in the bash download pipeline that means the filenames get scattered through the data file\n if \".en\" not in word:\n for char in word:\n #removes puntuation characters\n if char not in string.punctuation:\n curr_line += char\n curr_line += \" \"\n lines.append(curr_line.lower())\n\n#generates a list of words which appear frequently enough to be tokenized\nvalid_words = []\nword_counts = {}\nfor line in lines:\n for word in line.split(\" \"):\n if word not in valid_words and word not in punctuation:\n try:\n word_counts[word] += 1\n #the threshold is currently set at 45 occurences over the entire file but this is by no means defiantely the best value\n if word_counts[word] > 45:\n valid_words.append(word)\n del word_counts[word]\n except KeyError:\n word_counts[word] = 1\n\n#how many words the model will take as input\n#I felt an input of 20 words struck a good balance but feel free to change\nmax_len = 20\n\nX = []\nY = []\n\nword_to_token = {}\n\n#generates the dictionary for word token lookups\ni = 2\nfor word in valid_words:\n word_to_token[word] = i\n i += 1\nword_count = max(word_to_token.values())\nprint(word_count)\n\ndef to_token(word):\n '''\n Takes a word and outputs the coresponding token\n :param word: string, the word to be tokenzied\n :returns: int, the token\n '''\n word = word.lower()\n if word in word_to_token:\n return word_to_token[word]\n return 1\n\n#generates the x and y data by tokenizing segments of each line\n#the best analogy for what this does is it slides a window of size max_len words along each line with a stride of 1\n#and then adds the tokenized contents of the winodw to the X list\n#and then adds the tokenized word after the end of the window to the Y list\nfor line in lines:\n line = line.split(\" \")\n try:\n i = 1\n j = -1*(max_len - 1)\n while True:\n y_tokenized = [to_token(line[i])]\n if y_tokenized != [1] and y_tokenized != [None]:\n tokenized = list(map(to_token, line[j:i]))\n X.append(tokenized)\n Y.append(y_tokenized)\n i += 1\n j += 1\n except IndexError:\n pass\n\n#makes the Y data one-hot encoded\nY = to_categorical(np.array(Y))\n\n#creates an inverse dictionary for going from token to word\ntoken_to_word = {}\nfor key, value in zip(word_to_token.keys(), word_to_token.values()):\n token_to_word[value] = key\n\n#saves each token dictionary to a json file\ndump(word_to_token, open(\"word_to_token.json\", 'w'))\ndump(token_to_word, open(\"token_to_word.json\", 'w'))\n\n#trys to resume training if a model file already exists\ntry:\n open(\"model.h5\").close()\n model = load_model(\"model.h5\")\nexcept FileNotFoundError:\n print(\"Creating new models\")\n inp = Input((max_len,))\n #embedding size is 2 times the cube root of the word count\n embedding = Embedding(word_count, 2*ceil(word_count**(1/3)))(inp)\n lstm = LSTM(512, return_sequences=True)(embedding)\n lstm = LSTM(256)(lstm)\n dense_out = Dense(Y.shape[-1], activation=\"softmax\")(lstm)\n model = Model(inp, dense_out)\n #mse is used beacuse we want to capture the probability distribution\n model.compile(\"adam\", \"mse\", metrics=[categorical_accuracy])\n plot_model(model, \"model.png\", show_shapes=True, expand_nested=True)\n\nbatch_size = 256\nepoch = 0\nnum_samples = len(X)\nDataGen = DataGenerator(X, Y, batch_size, 20)\ntarget_epoch = 0\n#I found training stagnated at around epoch 200\nwhile target_epoch < 250:\n x, y = next(DataGen)\n loss, acc = model.train_on_batch(x, y)\n #if we have gone past the epoch which we are lookign for\n if (epoch*batch_size)//num_samples > target_epoch:\n #gives a rough esitmate of the number of passes over the dataset\n print(\"Epoch\", (epoch*batch_size)//num_samples)\n print(f\"Accuracy: {acc} Loss: {loss}\")\n model.save(\"model.h5\")\n target_epoch += 10\n epoch += 1\n" ]
[ [ "numpy.array", "numpy.random.choice" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Mithrillion/BiQA
[ "f61bea95521f5b2ffd838aa60aecaad568de6564" ]
[ "scripts/data_utils.py" ]
[ "import numpy as np\nimport re\nimport torch.utils.data as tud\nimport torch\nimport shutil\n\n\ndef get_word_ids(doc, rnn_encode=True, max_length=100,\n nr_unk=100, nr_var=600, rev_dic=None, relabel=True, ent_dict=None):\n queue = list(doc)\n X = np.zeros(max_length, dtype='int32')\n # M = np.zeros(max_length, dtype='int32')\n V = np.zeros(max_length, dtype='int32')\n words = []\n if ent_dict is None:\n ent_dict = {}\n k = 0\n while len(words) <= max_length and queue:\n word = queue.pop(0)\n if rnn_encode or (not word.is_punct and not word.is_space):\n words.append(word)\n words.sort()\n for j, token in enumerate(words):\n if token.text == '@placeholder':\n X[j] = 1\n V[j] = 1\n elif token.text[:7] == '@entity':\n # temporary dix\n # TODO: properly fix entity replacement\n try:\n num = int(re.search(r'\\d+', token.text[7:]).group(0))\n if 0 <= num < nr_var:\n if relabel:\n if num not in ent_dict.keys():\n ent_dict[num] = k\n k += 1\n X[j] = ent_dict[num] + 2\n V[j] = ent_dict[num] + 2\n else:\n X[j] = num + 2\n V[j] = num + 2\n except AttributeError:\n X[j] = (token.shape % nr_unk) + 2 + nr_var\n elif token.text in rev_dic.keys():\n X[j] = rev_dic[token.text] + nr_unk + nr_var + 2\n # M[j] = 1\n else:\n # X: [null; ph; vars; unks; vocab]\n X[j] = (token.shape % nr_unk) + 2 + nr_var\n if j >= max_length - 1:\n break\n return X, V, ent_dict\n\n\nclass QADataset(tud.Dataset):\n def __init__(self, data_df, nlp, rev_dic, relabel=True, lang_id=None):\n self.data_df = data_df\n self.nlp = nlp\n self.rev_dic = rev_dic\n self.relabel = relabel\n self.lang_id = lang_id\n\n def __len__(self):\n return self.data_df.shape[0]\n\n def __getitem__(self, i):\n\n story = self.nlp(self.data_df['story'].iloc[i].lower(), parse=False, tag=False, entity=False)\n s, s_var, ent_dict = get_word_ids(story, max_length=2000, rev_dic=self.rev_dic, relabel=self.relabel)\n s_len = np.sum(s != 0)\n\n question = self.nlp(self.data_df['question'].iloc[i].lower(), parse=False, tag=False, entity=False)\n q, q_var, ent_dict = get_word_ids(question, max_length=50, rev_dic=self.rev_dic, relabel=self.relabel,\n ent_dict=ent_dict)\n q_len = np.sum(q != 0)\n\n if self.relabel:\n answer = ent_dict[int(re.search(r'\\d+', self.data_df['answer'].iloc[i]).group(0))]\n else:\n answer = int(re.search(r'\\d+', self.data_df['answer'].iloc[i]).group(0))\n\n if self.lang_id is not None:\n return self.lang_id, s, q, s_len, q_len, s_var, q_var, answer\n else:\n return s, q, s_len, q_len, s_var, q_var, answer\n\n\nclass BiQADataset(tud.Dataset):\n def __init__(self, data_df_1, data_df_2, nlp_1, nlp_2, rev_dic_1, rev_dic_2, relabel=True, l2_supersample=5):\n self.data_df_1 = data_df_1\n self.data_df_2 = data_df_2\n self.nlp_1 = nlp_1\n self.nlp_2 = nlp_2\n self.rev_dic_1 = rev_dic_1\n self.rev_dic_2 = rev_dic_2\n self.relabel = relabel\n self.l2_supersample = l2_supersample\n\n def __len__(self):\n return self.data_df_1.shape[0] + self.data_df_2.shape[0] * self.l2_supersample\n\n def __getitem__(self, i):\n\n if i < self.data_df_1.shape[0]:\n story = self.nlp_1(self.data_df_1['story'].iloc[i].lower(), parse=False, tag=False, entity=False)\n s, s_var, ent_dict = get_word_ids(story, max_length=2000, rev_dic=self.rev_dic_1, relabel=self.relabel)\n s_len = np.sum(s != 0)\n\n question = self.nlp_1(self.data_df_1['question'].iloc[i].lower(), parse=False, tag=False, entity=False)\n q, q_var, ent_dict = get_word_ids(question, max_length=50, rev_dic=self.rev_dic_1, relabel=self.relabel,\n ent_dict=ent_dict)\n q_len = np.sum(q != 0)\n\n if self.relabel:\n answer = ent_dict[int(re.search(r'\\d+', self.data_df_1['answer'].iloc[i]).group(0))]\n else:\n answer = int(re.search(r'\\d+', self.data_df_1['answer'].iloc[i]).group(0))\n\n return 0, s, q, s_len, q_len, s_var, q_var, answer\n\n else:\n i = (i - self.data_df_1.shape[0]) % self.data_df_2.shape[0]\n story = self.nlp_2(self.data_df_2['story'].iloc[i].lower(), parse=False, tag=False, entity=False)\n s, s_var, ent_dict = get_word_ids(story, max_length=2000, rev_dic=self.rev_dic_2, relabel=self.relabel)\n s_len = np.sum(s != 0)\n\n question = self.nlp_2(self.data_df_2['question'].iloc[i].lower(), parse=False, tag=False, entity=False)\n q, q_var, ent_dict = get_word_ids(question, max_length=50, rev_dic=self.rev_dic_2, relabel=self.relabel,\n ent_dict=ent_dict)\n q_len = np.sum(q != 0)\n\n if self.relabel:\n answer = ent_dict[int(re.search(r'\\d+', self.data_df_2['answer'].iloc[i]).group(0))]\n else:\n answer = int(re.search(r'\\d+', self.data_df_2['answer'].iloc[i]).group(0))\n\n return 1, s, q, s_len, q_len, s_var, q_var, answer\n\n\ndef get_embeddings(f, nr_unk=100, nr_var=600, meta=None):\n if meta is None:\n nr_vector, ndim = f.readline().split(\" \")\n else:\n nr_vector, ndim = meta.split(\" \")\n nr_vector = int(nr_vector)\n ndim = int(ndim)\n vectors = np.zeros((nr_vector + nr_unk + nr_var + 2, ndim), dtype='float32')\n dic = dict()\n i = 0\n line = f.readline()\n while line:\n parts = line.split(\" \")\n if len(parts) != ndim + 1 and len(parts) != ndim + 2:\n print(line)\n raise ValueError(\"Vector size mismatch! Got {0}, expected {1} (+1)!\".\n format(len(parts), ndim + 1))\n else:\n word = parts[0]\n vec = np.array(parts[1: 1 + ndim]).astype(np.float32)\n vectors[i + nr_unk + nr_var + 2, :] = vec / np.linalg.norm(vec)\n dic[i] = word\n i += 1\n line = f.readline()\n rev_dic = {v: k for k, v, in dic.items()}\n return vectors, dic, rev_dic\n\n\ndef save_checkpoint(state, is_best, filename='checkpoint.en.packed.pth.tar',\n best_name='model_best.en.packed.pth.tar'):\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, best_name)\n\n\ndef sort_batch(batch, sort_ind=3, pack=True):\n if pack:\n _, orders = torch.sort(batch[sort_ind], dim=0, descending=True)\n return [x[orders] for x in batch]\n else:\n return batch\n\n" ]
[ [ "numpy.linalg.norm", "torch.sort", "numpy.array", "numpy.zeros", "numpy.sum", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dbseorms16/drnxgaze
[ "c7b84189c263456c648829bc399a5edb2ec17bb8" ]
[ "estimate_gaze_standalone.py" ]
[ "#!/usr/bin/env python\n\n# Licensed under Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode)\n\nfrom __future__ import print_function, division, absolute_import\n\nimport argparse\nimport os\nimport sys\nimport time\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom rt_gene.extract_landmarks_method_base import LandmarkMethodBase\nfrom rt_gene.gaze_tools import get_phi_theta_from_euler, limit_yaw\nfrom rt_gene.gaze_tools_standalone import euler_from_matrix\n\nfrom previous_state import PeopleState\n\n\n\n\n\nscript_path = os.path.dirname(os.path.realpath(__file__))\n\n\ndef load_camera_calibration(calibration_file):\n import yaml\n with open(calibration_file, 'r') as f:\n cal = yaml.safe_load(f)\n\n dist_coefficients = np.array(cal['distortion_coefficients']['data'], dtype='float32').reshape(1, 5)\n camera_matrix = np.array(cal['camera_matrix']['data'], dtype='float32').reshape(3, 3)\n\n return dist_coefficients, camera_matrix\n\n\ndef extract_eye_image_patches(subjects):\n for subject in subjects:\n le_c, re_c, leftcenter_coor, rightcenter_coor, _, _ = subject.get_eye_image_from_landmarks(subject, landmark_estimator.eye_image_size)\n subject.left_eye_color = le_c\n subject.right_eye_color = re_c\n subject.leftcenter_coor = leftcenter_coor\n subject.rightcenter_coor = rightcenter_coor\n\n\n\n\ndef init_previous(num,bbox_l_list):\n people_list.clear()\n\n for i in range(num):\n people_list.append(PeopleState(bbox_l_list[i]))\n\ndef append_people(faceboxes):\n for facebox in faceboxes:\n append_flag =True\n for people in people_list:\n if people.isTheSame(facebox[0]):\n append_flag = False\n break\n if append_flag:\n people_list.append(PeopleState(facebox[0]))\n\n\ndef del_people(faceboxes):\n i=0\n while i < len(people_list):\n del_flag = True\n for facebox in faceboxes:\n if people_list[i].isTheSame(facebox[0]):\n del_flag =False\n break\n if del_flag:\n del people_list[i]\n else:\n i+=1\n\ndef check_people(faceboxes):\n append_people(faceboxes)\n del_people(faceboxes)\n\ndef get_people(facebox_l):\n for idx in range(len(people_list)):\n if people_list[idx].isTheSame(facebox_l):\n return idx\n\n\n# head_theta =[]\n# head_phi =[]\n# gaze_theta = []\n# gaze_phi =[]\npeople_list =[]\nFPS = \"0\"\ngaze_error =[]\nheadpose_error =[]\n\nEVAL =False\nSHIFT =False\ndef estimate_gaze(base_name, color_img, dist_coefficients, camera_matrix ,label=False):\n global FPS\n\n\n # cv2.putText(color_img, \"FPS : \"+FPS, (10,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 2, cv2.LINE_AA)\n # start = time.time()\n\n #face box의 위치를 반환.(모든 대상 list로 반환) -[left_x, top_y, right_x, bottom_y]\n # faceboxes = landmark_estimator.get_face_bb(color_img)\n\n # if len(faceboxes) == 0:\n faceboxes = [[0,0,120,36]]\n # tqdm.write('Could not find faces in the image')\n # # if EVAL and not SHIFT:\n # # head_phi.append(-100)\n # # head_theta.append(-100)\n # # gaze_phi.append(-100)\n # # gaze_theta.append(-100)\n # return\n\n check_people(faceboxes)\n\n subjects = landmark_estimator.get_subjects_from_faceboxes(color_img, faceboxes)\n extract_eye_image_patches(subjects)\n\n input_r_list = []\n input_l_list = []\n input_head_list = []\n valid_subject_list = []\n\n people_count = 1;\n frame_img =color_img\n\n for idx, subject in enumerate(subjects):\n # people_idx = get_people(faceboxes[idx][0])\n # people_list[people_idx].set_bbox_l(faceboxes[idx][0])\n\n if subject.left_eye_color is None or subject.right_eye_color is None:\n tqdm.write('Failed to extract eye image patches')\n continue\n\n success, rotation_vector, _ = cv2.solvePnP(landmark_estimator.model_points,\n subject.landmarks.reshape(len(subject.landmarks), 1, 2),\n cameraMatrix=camera_matrix,\n distCoeffs=dist_coefficients, flags=cv2.SOLVEPNP_DLS)\n\n if not success:\n tqdm.write('Not able to extract head pose for subject {}'.format(idx))\n continue\n\n _rotation_matrix, _ = cv2.Rodrigues(rotation_vector)\n _rotation_matrix = np.matmul(_rotation_matrix, np.array([[0, 1, 0], [0, 0, -1], [-1, 0, 0]]))\n _m = np.zeros((4, 4))\n _m[:3, :3] = _rotation_matrix\n _m[3, 3] = 1\n # Go from camera space to ROS space\n _camera_to_ros = [[0.0, 0.0, 1.0, 0.0],\n [-1.0, 0.0, 0.0, 0.0],\n [0.0, -1.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 1.0]]\n roll_pitch_yaw = list(euler_from_matrix(np.dot(_camera_to_ros, _m)))\n roll_pitch_yaw = limit_yaw(roll_pitch_yaw)\n\n phi_head, theta_head = get_phi_theta_from_euler(roll_pitch_yaw)\n # if EVAL:\n # head_phi.append(phi_head)\n # head_theta.append(theta_head)\n # face_image_resized = cv2.resize(subject.face_color, dsize=(224, 224), interpolation=cv2.INTER_CUBIC)\n # head_pose_image = landmark_estimator.visualize_headpose_result(face_image_resized, (phi_head, theta_head))\n\n #color_image의 facebox에 headpose vector를 그림.\n if EVAL:\n head_pose_image, headpose_err = landmark_estimator.visualize_headpose_result(frame_img,faceboxes[idx], (phi_head, theta_head), people_list[people_idx],label)\n print(headpose_err)\n else:\n head_pose_image, headpose_err = landmark_estimator.visualize_headpose_result(frame_img,faceboxes[idx], (phi_head, theta_head), people_list[people_idx])\n\n frame_img = head_pose_image\n if EVAL:\n headpose_error.append(headpose_err)\n print(\"head pose error:\",headpose_err)\n\n if args.mode =='image':\n #show headpose\n # if args.vis_headpose:\n # plt.axis(\"off\")\n # plt.imshow(cv2.cvtColor(head_pose_image, cv2.COLOR_BGR2RGB))\n # plt.show()\n\n if args.save_headpose:\n cv2.imwrite(os.path.join(args.output_path, os.path.splitext(base_name)[0]+str(people_count) + '_headpose.jpg'), head_pose_image)\n people_count +=1\n #size 등 format 변경.\n input_r_list.append(gaze_estimator.input_from_image(subject.right_eye_color))\n input_l_list.append(gaze_estimator.input_from_image(subject.left_eye_color))\n input_head_list.append([theta_head, phi_head])\n valid_subject_list.append(idx)\n\n\n # if args.mode =='video':\n # # plt.axis(\"off\")\n # # plt.imshow(cv2.cvtColor(head_pose_image, cv2.COLOR_BGR2RGB))\n # # plt.show()\n # headpose_out_video.write(frame_img)\n\n if len(valid_subject_list) == 0:\n return\n\n # returns [subject : [gaze_pose]]\n gaze_est = gaze_estimator.estimate_gaze_twoeyes(inference_input_left_list=input_l_list,\n inference_input_right_list=input_r_list,\n\n inference_headpose_list=input_head_list)\n people_count = 1\n for subject_id, gaze, headpose in zip(valid_subject_list, gaze_est.tolist(), input_head_list):\n subject = subjects[subject_id]\n facebox = faceboxes[subject_id]\n people_idx = get_people(facebox[0])\n # Build visualizations\n # r_gaze_img = gaze_estimator.visualize_eye_result(subject.right_eye_color, gaze)\n # l_gaze_img = gaze_estimator.visualize_eye_result(subject.left_eye_color, gaze)\n # if EVAL:\n # gaze_theta.append(gaze[0])\n # gaze_phi.append(gaze[1])\n if EVAL:\n r_gaze_img, r_gaze_err = gaze_estimator.visualize_eye_result(frame_img, gaze, subject.leftcenter_coor, facebox,people_list[people_idx], \"gaze_r\", label)\n l_gaze_img, l_gaze_err = gaze_estimator.visualize_eye_result(r_gaze_img, gaze, subject.rightcenter_coor, facebox,people_list[people_idx], \"gaze_l\", label)\n else:\n r_gaze_img, r_gaze_err = gaze_estimator.visualize_eye_result(frame_img, gaze, subject.leftcenter_coor, facebox,people_list[people_idx], \"gaze_r\")\n l_gaze_img, l_gaze_err = gaze_estimator.visualize_eye_result(r_gaze_img, gaze, subject.rightcenter_coor, facebox,people_list[people_idx], \"gaze_l\")\n\n frame_img = l_gaze_img\n if EVAL:\n print(\"right gaze error:\",r_gaze_err)\n print(\"left gaze error:\",l_gaze_err)\n gaze_error.append(r_gaze_err)\n gaze_error.append(l_gaze_err)\n\n #show gaze image\n # if args.vis_gaze:\n # plt.axis(\"off\")\n # plt.imshow(cv2.cvtColor(s_gaze_img, cv2.COLOR_BGR2RGB))\n # plt.show()\n if args.mode =='image':\n if args.save_gaze:\n cv2.imwrite(os.path.join(args.output_path, os.path.splitext(base_name)[0]+str(people_count) + '_gaze.jpg'), frame_img)\n # cv2.imwrite(os.path.join(args.output_path, os.path.splitext(base_name)[0] + '_left.jpg'), subject.left_eye_color)\n # cv2.imwrite(os.path.join(args.output_path, os.path.splitext(base_name)[0] + '_right.jpg'), subject.right_eye_color)\n\n\n if args.save_estimate:\n with open(os.path.join(args.output_path, os.path.splitext(base_name)[0] + '_output.txt'), 'w+') as f:\n f.write(os.path.splitext(base_name)[0] + ', [' + str(headpose[1]) + ', ' + str(headpose[0]) + ']' +\n\n ', [' + str(gaze[1]) + ', ' + str(gaze[0]) + ']' + '\\n')\n people_count +=1\n if args.mode =='video':\n out_video.write(frame_img)\n # end = time.time()\n # delay_time = end-start\n # FPS = str(int(1/delay_time))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Estimate gaze from images')\n parser.add_argument('im_path', type=str, default=os.path.abspath(os.path.join(script_path, './samples_gaze/')),\n nargs='?', help='Path to an image or a directory containing images')\n parser.add_argument('video_path', type=str, default=os.path.abspath(os.path.join(script_path, './samples_video/')),\n nargs='?', help='Path to an video or a directory containing videos')\n parser.add_argument('--calib-file', type=str, dest='calib_file', default=None, help='Camera calibration file')\n parser.add_argument('--vis-headpose', dest='vis_headpose', action='store_true', help='Display the head pose images')\n parser.add_argument('--no-vis-headpose', dest='vis_headpose', action='store_false', help='Do not display the head pose images')\n parser.add_argument('--save-headpose', dest='save_headpose', action='store_true', help='Save the head pose images')\n parser.add_argument('--no-save-headpose', dest='save_headpose', action='store_false', help='Do not save the head pose images')\n parser.add_argument('--vis-gaze', dest='vis_gaze', action='store_true', help='Display the gaze images')\n parser.add_argument('--no-vis-gaze', dest='vis_gaze', action='store_false', help='Do not display the gaze images')\n parser.add_argument('--save-gaze', dest='save_gaze', action='store_true', help='Save the gaze images')\n parser.add_argument('--save-estimate', dest='save_estimate', action='store_true', help='Save the predictions in a text file')\n parser.add_argument('--no-save-gaze', dest='save_gaze', action='store_false', help='Do not save the gaze images')\n parser.add_argument('--gaze_backend', choices=['tensorflow', 'pytorch'], default='pytorch')\n parser.add_argument('--mode', choices=['video', 'image'], default='image')\n parser.add_argument('--output_path', type=str, default=os.path.abspath(os.path.join(script_path, './samples_gaze/out')),\n help='Output directory for head pose and gaze images')\n parser.add_argument('--models', nargs='+', type=str, default=[os.path.abspath(os.path.join(script_path, '../model_nets/Model_allsubjects1.h5'))],\n help='List of gaze estimators')\n parser.add_argument('--device-id-facedetection', dest=\"device_id_facedetection\", type=str, default='cuda:0', help='Pytorch device id. Set to \"cpu:0\" to disable cuda')\n\n parser.set_defaults(vis_gaze=True)\n parser.set_defaults(save_gaze=True)\n parser.set_defaults(vis_headpose=False)\n parser.set_defaults(save_headpose=True)\n parser.set_defaults(save_estimate=False)\n\n args = parser.parse_args()\n\n\n image_path_list = []\n video_path_list = []\n\n\n if args.mode == 'image':\n if os.path.isfile(args.im_path):\n image_path_list.append(os.path.split(args.im_path)[1])\n args.im_path = os.path.split(args.im_path)[0]\n elif os.path.isdir(args.im_path):\n for image_file_name in sorted(os.listdir(args.im_path)):\n if image_file_name.endswith('.jpg') or image_file_name.endswith('.png'):\n if '_gaze' not in image_file_name and '_headpose' not in image_file_name:\n image_path_list.append(image_file_name)\n else:\n tqdm.write('Provide either a path to an image or a path to a directory containing images')\n sys.exit(1)\n else:\n args.output_path = os.path.abspath(os.path.join(script_path, './samples_video/out'))\n if os.path.isfile(args.video_path):\n video_path_list.append(os.path.split(args.video_path)[1])\n args.video_path_list = os.path.split(video_path_list)[0]\n elif os.path.isdir(args.video_path):\n for video_file_name in sorted(os.listdir(args.video_path)):\n if video_file_name.endswith('.mp4') or video_file_name.endswith('.avi'):\n if '_gaze' not in video_path_list and '_headpose' not in video_path_list:\n video_path_list.append(video_file_name)\n else:\n tqdm.write('Provide either a path to an video or a path to a directory containing videos')\n sys.exit(1)\n print(\"========================video list==================\")\n print(video_path_list)\n tqdm.write('Loading networks')\n landmark_estimator = LandmarkMethodBase(device_id_facedetection=args.device_id_facedetection,\n checkpoint_path_face=os.path.abspath(os.path.join(script_path, \"rt_gene/model_nets/SFD/s3fd_facedetector.pth\")),\n checkpoint_path_landmark=os.path.abspath(\n os.path.join(script_path, \"rt_gene/model_nets/phase1_wpdc_vdc.pth.tar\")),\n model_points_file=os.path.abspath(os.path.join(script_path, \"rt_gene/model_nets/face_model_68.txt\")))\n\n if args.gaze_backend == \"tensorflow\":\n from rt_gene.estimate_gaze_tensorflow import GazeEstimator\n\n gaze_estimator = GazeEstimator(\"/gpu:0\", args.models)\n elif args.gaze_backend == \"pytorch\":\n from rt_gene.estimate_gaze_pytorch import GazeEstimator\n\n gaze_estimator = GazeEstimator(\"cuda:0\", args.models)\n else:\n raise ValueError(\"Incorrect gaze_base backend, choices are: tensorflow or pytorch\")\n\n if not os.path.isdir(args.output_path):\n os.makedirs(args.output_path)\n\n if args.mode == 'image':\n for image_file_name in tqdm(image_path_list):\n tqdm.write('Estimate gaze on ' + image_file_name)\n image = cv2.imread(os.path.join(args.im_path, image_file_name))\n if image is None:\n tqdm.write('Could not load ' + image_file_name + ', skipping this image.')\n continue\n\n if args.calib_file is not None:\n _dist_coefficients, _camera_matrix = load_camera_calibration(args.calib_file)\n else:\n im_width, im_height = image.shape[1], image.shape[0]\n # tqdm.write('WARNING!!! You should provide the camera calibration file, otherwise you might get bad results. Using a crude approximation!')\n _dist_coefficients, _camera_matrix = np.zeros((1, 5)), np.array(\n [[im_height, 0.0, im_width / 2.0], [0.0, im_height, im_height / 2.0], [0.0, 0.0, 1.0]])\n\n estimate_gaze(image_file_name, image, _dist_coefficients, _camera_matrix)\n else:\n print(\"=-------------------------video path list--------------------\")\n print(video_path_list)\n\n allVideo_total_error = 0\n for video_file_name in tqdm(video_path_list):\n tqdm.write('Estimate gaze on ' + video_file_name)\n\n video = cv2.VideoCapture(os.path.join(args.video_path, video_file_name))\n width = video.get(cv2.CAP_PROP_FRAME_WIDTH)\n height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)\n fps = video.get(cv2.CAP_PROP_FPS)\n fourcc = cv2.VideoWriter_fourcc(*'DIVX') # 코덱 정의\n\n #head pose + gaze\n out_path =os.path.join(args.output_path, video_file_name)\n out_video = cv2.VideoWriter(out_path, fourcc, fps, (int(width), int(height))) # VideoWriter 객체 정의\n\n #head pose와 gaze를 각각 출력하고 싶을 때.\n # gaze_out_path = os.path.join(args.output_path, 'gaze_'+video_file_name)\n # headpose_out_path = os.path.join(args.output_path, 'headpose'+video_file_name)\n # gaze_out_video = cv2.VideoWriter(gaze_out_path, fourcc, fps, (int(width), int(height))) # VideoWriter 객체 정의\n # headpose_out_video = cv2.VideoWriter(headpose_out_path, fourcc, fps, (int(width), int(height))) # VideoWriter 객체 정의\n\n label = []\n if EVAL:\n label_path = \"s000_label_combined.txt\"\n if SHIFT:\n # Shift video name :Shift_{num}.mp4\n # 해당하는 라벨을 고정으로 넘김.\n image_name = video_file_name.split(\"_\")[1]\n image_name = image_name.split(\".\")[0]\n with open(label_path, \"r\") as f:\n for line in f:\n line = line.split(\",\")\n if line[0] != image_name:\n continue\n else:\n label = line\n print(f\"======= label found {line[0]} ======\")\n break\n\n else:\n f = open(label_path, \"r\")\n #header 제거\n f.readline()\n\n count=0\n while video.isOpened():\n ret, frame = video.read()\n if not ret:\n break\n #RT_GENE DataSet -> 한줄씩 읽어와 넘김.\n if EVAL:\n if not SHIFT:\n label = f.readline()\n label = label.split(\",\")\n\n for i in range(1,5):\n label[i] = float(label[i])\n\n\n count+=1\n print(\"frame\",count)\n\n # 대상의 수에 맞게 people_list 초기화\n if count == 1:\n num, bboxes_l = landmark_estimator.get_init_value(frame)\n init_previous(num, bboxes_l)\n if not ret:\n print(\"Error:: Frame Road Fail\")\n break\n\n\n if args.calib_file is not None:\n _dist_coefficients, _camera_matrix = load_camera_calibration(args.calib_file)\n\n else:\n im_width, im_height = frame.shape[1], frame.shape[0]\n # tqdm.write(\n # 'WARNING!!! You should provide the camera calibration file, otherwise you might get bad results. Using a crude approximation!')\n _dist_coefficients, _camera_matrix = np.zeros((1, 5)), np.array(\n [[im_height, 0.0, im_width / 2.0], [0.0, im_height, im_height / 2.0], [0.0, 0.0, 1.0]])\n print(label)\n estimate_gaze(video_file_name, frame, _dist_coefficients, _camera_matrix,label)\n\n if EVAL:\n average_headpose_err = 0\n average_gaze_err =0\n average_total_err = 0\n frame_num = len(headpose_error)\n\n for error in headpose_error:\n average_headpose_err+= error\n for error in gaze_error:\n average_gaze_err+= error\n\n average_headpose_err /=frame_num\n average_gaze_err/= (frame_num*2)\n total_error = (average_headpose_err +average_gaze_err) /2\n allVideo_total_error+= total_error\n\n print(\"==================Average Error=================\")\n\n print(\"frame :\",frame_num)\n print(\"Average Headpose Error :\",average_headpose_err)\n print(\"Average Gaze Error :\",average_gaze_err)\n print(\"Average Error(Total) :\", total_error)\n\n video.release()\n out_video.release()\n\n if EVAL:\n f.close()\n headpose_error.clear()\n gaze_error.clear()\n if EVAL:\n print(\"all Video Average Error :\",allVideo_total_error/len(video_path_list))\n\n\n\n\n\n # if EVAL:\n # label_path = \"s000_label_combined.txt\"\n # average_head_phi=0\n # average_head_theta =0\n # average_gaze_phi=0\n # average_gaze_theta=0\n #\n # frame_num = len(head_theta)\n # skip_frame =0\n #\n # image_name = video_file_name.split(\"_\")[1]\n # image_name = image_name.split(\".\")[0]\n\n # with open(label_path,\"r\") as f:\n # label=[]\n #\n # for line in f:\n # line = line.split(\",\")\n # if line[0] != image_name:\n # continue\n # else:\n # label = np.copy(line)\n # print(\"======= label found ======\")\n # break\n # for i in range(frame_num):\n # if (head_phi[i] == -100):\n # skip_frame += 1\n # continue\n # average_head_phi += abs(head_phi[i] - float(label[1].strip()))\n # average_head_theta += abs(head_phi[i] - float(label[2].strip()))\n # average_gaze_phi += abs(head_phi[i] - float(label[3].strip()))\n # average_gaze_theta += abs(head_phi[i] - float(label[4].strip()))\n\n\n\n # with open(label_path,\"r\") as f:\n # #header 제거\n # line = f.readline()\n # for i in range(frame_num):\n # line = f.readline()\n # if(head_phi[i] == -100):\n # skip_frame+=1\n # continue\n # line = line.split(\",\")\n #\n # average_head_phi += abs(head_phi[i] - float(line[1].strip()))\n # average_head_theta += abs(head_phi[i] - float(line[2].strip()))\n # average_gaze_phi += abs(head_phi[i] - float(line[3].strip()))\n # average_gaze_theta += abs(head_phi[i] - float(line[4].strip()))\n # average_head_phi /=frame_num\n # average_head_theta /=frame_num\n # average_gaze_phi /=frame_num\n # average_gaze_theta /=frame_num\n #\n # total_error = (average_head_theta + average_head_phi +average_gaze_theta +average_gaze_phi) /4\n #\n # print(\"============================= Average Error ==========================\")\n # print(\"evaluate frame :\",frame_num-skip_frame)\n # print(\"Head_phi Error =\",average_head_phi)\n # print(\"Head_theta Error =\",average_head_theta)\n # print(\"Gaze_phi Error =\",average_gaze_phi)\n # print(\"Gaze_theta Error =\",average_gaze_theta)\n # print(\"Total Error =\",total_error)\n\n\n\n\n\n" ]
[ [ "numpy.dot", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mbmccoy/jax
[ "74346f464bc8369d81964305fcf05f95f43fb2d3", "74346f464bc8369d81964305fcf05f95f43fb2d3" ]
[ "jaxlib/pocketfft.py", "tests/custom_object_test.py" ]
[ "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# flatbuffers needs importlib.util but fails to import it itself.\nimport importlib.util # noqa: F401\nfrom typing import List\n\nimport jaxlib.mlir.ir as ir\nimport jaxlib.mlir.dialects.mhlo as mhlo\n\n\nfrom . import _pocketfft\nfrom . import pocketfft_flatbuffers_py_generated as pd\nimport numpy as np\n\nimport flatbuffers\nfrom jaxlib import xla_client\n\nfor _name, _value in _pocketfft.registrations().items():\n xla_client.register_custom_call_target(_name, _value, platform=\"cpu\")\n\nFftType = xla_client.FftType\n\nflatbuffers_version_2 = hasattr(flatbuffers, \"__version__\")\n\n\ndef _pocketfft_descriptor(shape: List[int], dtype, fft_type: FftType,\n fft_lengths: List[int]) -> bytes:\n n = len(shape)\n assert len(fft_lengths) >= 1\n assert len(fft_lengths) <= n, (fft_lengths, n)\n\n builder = flatbuffers.Builder(128)\n\n forward = fft_type in (FftType.FFT, FftType.RFFT)\n if fft_type == FftType.RFFT:\n pocketfft_type = pd.PocketFftType.R2C\n\n assert dtype in (np.float32, np.float64), dtype\n out_dtype = np.dtype(np.complex64 if dtype == np.float32 else np.complex128)\n pocketfft_dtype = (\n pd.PocketFftDtype.COMPLEX64\n if dtype == np.float32 else pd.PocketFftDtype.COMPLEX128)\n\n assert shape[-len(fft_lengths):] == fft_lengths, (shape, fft_lengths)\n out_shape = list(shape)\n out_shape[-1] = out_shape[-1] // 2 + 1\n\n elif fft_type == FftType.IRFFT:\n pocketfft_type = pd.PocketFftType.C2R\n assert np.issubdtype(dtype, np.complexfloating), dtype\n\n out_dtype = np.dtype(np.float32 if dtype == np.complex64 else np.float64)\n pocketfft_dtype = (\n pd.PocketFftDtype.COMPLEX64\n if dtype == np.complex64 else pd.PocketFftDtype.COMPLEX128)\n\n assert shape[-len(fft_lengths):-1] == fft_lengths[:-1]\n out_shape = list(shape)\n out_shape[-1] = fft_lengths[-1]\n assert (out_shape[-1] // 2 + 1) == shape[-1]\n else:\n pocketfft_type = pd.PocketFftType.C2C\n\n assert np.issubdtype(dtype, np.complexfloating), dtype\n out_dtype = dtype\n pocketfft_dtype = (\n pd.PocketFftDtype.COMPLEX64\n if dtype == np.complex64 else pd.PocketFftDtype.COMPLEX128)\n\n assert shape[-len(fft_lengths):] == fft_lengths, (shape, fft_lengths)\n out_shape = shape\n\n # PocketFft does not allow size 0 dimensions.\n if 0 in shape or 0 in out_shape:\n return b\"\", out_dtype, out_shape\n\n # Builds a PocketFftDescriptor flatbuffer. This descriptor is passed to the\n # C++ kernel to describe the FFT to perform.\n pd.PocketFftDescriptorStartShapeVector(builder, n)\n for d in reversed(shape if fft_type != FftType.IRFFT else out_shape):\n builder.PrependUint64(d)\n if flatbuffers_version_2:\n pocketfft_shape = builder.EndVector()\n else:\n pocketfft_shape = builder.EndVector(n)\n\n pd.PocketFftDescriptorStartStridesInVector(builder, n)\n stride = dtype.itemsize\n for d in reversed(shape):\n builder.PrependUint64(stride)\n stride *= d\n if flatbuffers_version_2:\n strides_in = builder.EndVector()\n else:\n strides_in = builder.EndVector(n)\n pd.PocketFftDescriptorStartStridesOutVector(builder, n)\n stride = out_dtype.itemsize\n for d in reversed(out_shape):\n builder.PrependUint64(stride)\n stride *= d\n if flatbuffers_version_2:\n strides_out = builder.EndVector()\n else:\n strides_out = builder.EndVector(n)\n\n pd.PocketFftDescriptorStartAxesVector(builder, len(fft_lengths))\n for d in range(len(fft_lengths)):\n builder.PrependUint32(n - d - 1)\n if flatbuffers_version_2:\n axes = builder.EndVector()\n else:\n axes = builder.EndVector(len(fft_lengths))\n\n scale = 1. if forward else (1. / np.prod(fft_lengths))\n pd.PocketFftDescriptorStart(builder)\n pd.PocketFftDescriptorAddDtype(builder, pocketfft_dtype)\n pd.PocketFftDescriptorAddFftType(builder, pocketfft_type)\n pd.PocketFftDescriptorAddShape(builder, pocketfft_shape)\n pd.PocketFftDescriptorAddStridesIn(builder, strides_in)\n pd.PocketFftDescriptorAddStridesOut(builder, strides_out)\n pd.PocketFftDescriptorAddAxes(builder, axes)\n pd.PocketFftDescriptorAddForward(builder, forward)\n pd.PocketFftDescriptorAddScale(builder, scale)\n descriptor = pd.PocketFftDescriptorEnd(builder)\n builder.Finish(descriptor)\n return builder.Output(), out_dtype, out_shape\n\n\ndef pocketfft_mhlo(a, dtype, *, fft_type: FftType, fft_lengths: List[int]):\n \"\"\"PocketFFT kernel for CPU.\"\"\"\n a_type = ir.RankedTensorType(a.type)\n n = len(a_type.shape)\n\n fft_lengths = list(fft_lengths)\n descriptor_bytes, out_dtype, out_shape = _pocketfft_descriptor(\n list(a_type.shape), dtype, fft_type, fft_lengths)\n\n if out_dtype == np.float32:\n out_type = ir.F32Type.get()\n elif out_dtype == np.float64:\n out_type = ir.F64Type.get()\n elif out_dtype == np.complex64:\n out_type = ir.ComplexType.get(ir.F32Type.get())\n elif out_dtype == np.complex128:\n out_type = ir.ComplexType.get(ir.F64Type.get())\n else:\n raise ValueError(f\"Unknown output type {out_dtype}\")\n\n if 0 in a_type.shape or 0 in out_shape:\n zero = mhlo.ConstOp(ir.RankedTensorType.get([], out_type),\n ir.DenseElementsAttr.get(np.array(0, dtype=out_dtype),\n type=out_type))\n return mhlo.BroadcastOp(\n ir.RankedTensorType.get(out_shape, out_type),\n zero,\n ir.DenseElementsAttr.get(np.asarray(out_shape, np.int64))).result\n\n u8_type = ir.IntegerType.get_unsigned(8)\n descriptor = mhlo.ConstOp(\n ir.RankedTensorType.get([len(descriptor_bytes)], u8_type),\n ir.DenseElementsAttr.get(np.frombuffer(descriptor_bytes, dtype=np.uint8),\n type=u8_type))\n layout = ir.DenseIntElementsAttr.get(np.arange(n - 1, -1, -1),\n type=ir.IndexType.get())\n return mhlo.CustomCallOp(\n [ir.RankedTensorType.get(out_shape, out_type)],\n [descriptor, a],\n call_target_name = ir.StringAttr.get(\"pocketfft\"),\n has_side_effect=ir.BoolAttr.get(False),\n backend_config=ir.StringAttr.get(\"\"),\n api_version=ir.IntegerAttr.get(ir.IntegerType.get_signless(32), 2),\n called_computations=ir.ArrayAttr.get([]),\n operand_layouts=ir.ArrayAttr.get([\n ir.DenseIntElementsAttr.get(np.array([0], np.int64),\n type=ir.IndexType.get()),\n layout,\n ]),\n result_layouts=ir.ArrayAttr.get([layout])).result\n", "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom absl.testing import absltest, parameterized\n\nimport numpy as np\n\nfrom jax._src import test_util as jtu\nimport jax.numpy as jnp\nfrom jax import core, jit, lax, make_jaxpr\nfrom jax._src import device_array\nfrom jax._src import dispatch\nfrom jax._src import dtypes\nfrom jax.interpreters import mlir\nfrom jax.interpreters import xla\nfrom jax._src.lib.mlir import ir\nfrom jax._src.lib import xla_bridge, xla_client\nxc = xla_client\nxb = xla_bridge\n\nfrom jax.config import config\nconfig.parse_flags_with_absl()\n\n# TODO(jakevdp): use a setup/teardown method to populate and unpopulate all the\n# dictionaries associated with the following objects.\n\n# Define a sparse array data structure. The important feature here is that\n# it is a jaxpr object that is backed by two device buffers.\nclass SparseArray:\n \"\"\"Simple sparse COO array data structure.\"\"\"\n def __init__(self, aval, data, indices):\n self.aval = aval\n self.shape = aval.shape\n self.data = data\n self.indices = indices\n\n @property\n def index_dtype(self):\n return self.indices.dtype\n\n @property\n def dtype(self):\n return self.data.dtype\n\n @property\n def nnz(self):\n return self.data.shape[0]\n\n def __repr__(self):\n return repr(list((tuple(ind), d) for ind, d in zip(self.indices, self.data)))\n\n\nclass AbstractSparseArray(core.ShapedArray):\n __slots__ = ['index_dtype', 'nnz', 'data_aval', 'indices_aval']\n\n def __init__(self, shape, dtype, index_dtype, nnz, weak_type=False,\n named_shape=None):\n super().__init__(shape, dtypes.canonicalize_dtype(dtype))\n named_shape = {} if named_shape is None else named_shape\n self.index_dtype = index_dtype\n self.nnz = nnz\n self.data_aval = core.ShapedArray((nnz,), dtypes.canonicalize_dtype(dtype),\n weak_type, named_shape)\n self.indices_aval = core.ShapedArray(\n (nnz, len(shape)), dtypes.canonicalize_dtype(index_dtype),\n named_shape=named_shape)\n\n def update(self, shape=None, dtype=None, index_dtype=None, nnz=None,\n weak_type=None, named_shape=None):\n if shape is None:\n shape = self.shape\n if dtype is None:\n dtype = self.dtype\n if index_dtype is None:\n index_dtype = self.dtype\n if nnz is None:\n nnz = self.nnz\n if weak_type is None:\n weak_type = self.weak_type\n if named_shape is None:\n named_shape = self.named_shape\n return AbstractSparseArray(\n shape, dtype, index_dtype, nnz, weak_type, named_shape)\n\n def strip_weak_type(self):\n return self\n\n @core.aval_property\n def data(self):\n return sp_data_p.bind(self)\n\n @core.aval_property\n def indices(self):\n return sp_indices_p.bind(self)\n\nclass ConcreteSparseArray(AbstractSparseArray):\n pass\n\ndef sparse_array_result_handler(device, aval):\n def build_sparse_array(data_buf, indices_buf):\n data = device_array.make_device_array(aval.data_aval, device, data_buf)\n indices = device_array.make_device_array(aval.indices_aval, device, indices_buf)\n return SparseArray(aval, data, indices)\n return build_sparse_array\n\ndef sparse_array_shape_handler(a):\n return (\n xc.Shape.array_shape(a.data_aval.dtype, a.data_aval.shape),\n xc.Shape.array_shape(a.indices_aval.dtype, a.indices_aval.shape),\n )\n\ndef sparse_array_device_put_handler(a, device):\n return (\n xb.get_device_backend(device).buffer_from_pyval(a.data, device),\n xb.get_device_backend(device).buffer_from_pyval(a.indices, device)\n )\n\ncore.pytype_aval_mappings[SparseArray] = lambda x: x.aval\ncore.raise_to_shaped_mappings[AbstractSparseArray] = lambda aval, _: aval\nxla.pytype_aval_mappings[SparseArray] = lambda x: x.aval\nxla.canonicalize_dtype_handlers[SparseArray] = lambda x: x\ndispatch.device_put_handlers[SparseArray] = sparse_array_device_put_handler\ndispatch.result_handlers[AbstractSparseArray] = sparse_array_result_handler\ndispatch.num_buffers_handlers[AbstractSparseArray] = lambda _: 2\nxla.xla_shape_handlers[AbstractSparseArray] = sparse_array_shape_handler\n\ndef sparse_array_mlir_type_handler(a):\n return (\n ir.RankedTensorType.get(\n a.data_aval.shape, mlir.dtype_to_ir_type(a.data_aval.dtype)),\n ir.RankedTensorType.get(\n a.indices_aval.shape, mlir.dtype_to_ir_type(a.indices_aval.dtype)),\n )\n\nmlir.ir_type_handlers[AbstractSparseArray] = sparse_array_mlir_type_handler\n\nsp_indices_p = core.Primitive('sp_indices')\n\n@sp_indices_p.def_impl\ndef _sp_indices_impl(mat):\n return mat.indices\n\n@sp_indices_p.def_abstract_eval\ndef _sp_indices_abstract_eval(mat):\n return mat.indices_aval\n\n# Note: cannot use lower_fun to define attribute access primitives\n# because it leads to infinite recursion.\n\ndef _sp_indices_mhlo_lowering(ctx, data_and_indices):\n return [data_and_indices[1]]\n\nmlir.register_lowering(sp_indices_p, _sp_indices_mhlo_lowering)\n\nsp_data_p = core.Primitive('sp_data')\n\n@sp_data_p.def_impl\ndef _sp_data_impl(mat):\n return mat.data\n\n@sp_data_p.def_abstract_eval\ndef _sp_data_abstract_eval(mat):\n return mat.data_aval\n\n# Note: cannot use lower_fun to define attribute access primitives\n# because it leads to infinite recursion.\n\ndef _sp_data_mhlo_lowering(ctx, data_and_indices):\n return [data_and_indices[0]]\n\nmlir.register_lowering(sp_data_p, _sp_data_mhlo_lowering)\n\ndef identity(x):\n return identity_p.bind(x)\n\nidentity_p = core.Primitive('identity')\n\n@identity_p.def_impl\ndef _identity_impl(mat):\n return mat\n\n@identity_p.def_abstract_eval\ndef _identity_abstract_eval(mat):\n return AbstractSparseArray(mat.shape, mat.dtype, mat.index_dtype, mat.nnz)\n\nmlir.register_lowering(\n identity_p, mlir.lower_fun(_identity_impl, multiple_results=False))\n\ndef split(x):\n return split_p.bind(x)\n\nsplit_p = core.Primitive('split')\nsplit_p.multiple_results = True\n\n@split_p.def_impl\ndef _split_impl(mat):\n return mat, mat\n\n@split_p.def_abstract_eval\ndef _split_abstract_eval(mat):\n m = AbstractSparseArray(mat.shape, mat.dtype, mat.index_dtype, mat.nnz)\n return m, m\n\nmlir.register_lowering(\n split_p, mlir.lower_fun(_split_impl, multiple_results=True))\n\ndef make_sparse_array(rng, shape, dtype, nnz=0.2):\n mat = rng(shape, dtype)\n size = int(np.prod(shape))\n if 0 < nnz < 1:\n nnz = nnz * size\n nnz = int(nnz)\n if nnz == 0:\n mat = np.zeros_like(mat)\n elif nnz < size:\n # TODO(jakevdp): do we care about duplicates?\n cutoff = np.sort(mat.ravel())[nnz]\n mat[mat >= cutoff] = 0\n nz = (mat != 0)\n data = jnp.array(mat[nz])\n indices = jnp.array(np.where(nz)).T\n aval = AbstractSparseArray(shape, data.dtype, indices.dtype, len(indices))\n return SparseArray(aval, data, indices)\n\ndef matvec(mat, v):\n v = jnp.asarray(v)\n assert v.ndim == 1\n assert len(mat.shape) == 2\n assert v.shape[0] == mat.shape[1]\n rows = mat.indices[:, 0]\n cols = mat.indices[:, 1]\n dv = mat.data * v[cols]\n return jnp.zeros(mat.shape[0], dtype=dv.dtype).at[rows].add(dv)\n\n\nclass Empty:\n def __init__(self, aval):\n self.aval = aval\n\nclass AbstractEmpty(core.AbstractValue):\n\n def join(self, other):\n assert isinstance(other, self.__class__), other\n return self\n\n def __hash__(self):\n return hash(())\n\n def __eq__(self, other):\n return isinstance(other, AbstractEmpty)\n\nclass ConcreteEmpty(AbstractEmpty):\n pass\n\n\ncore.pytype_aval_mappings[Empty] = lambda x: ConcreteEmpty()\ncore.raise_to_shaped_mappings[AbstractEmpty] = lambda aval, _: aval\nxla.pytype_aval_mappings[Empty] = lambda x: AbstractEmpty()\nxla.canonicalize_dtype_handlers[Empty] = lambda x: x\ndispatch.device_put_handlers[Empty] = lambda _, __: ()\ndispatch.result_handlers[AbstractEmpty] = lambda _, __: lambda: Empty(AbstractEmpty())\ndispatch.num_buffers_handlers[AbstractEmpty] = lambda _: 0\nxla.xla_shape_handlers[AbstractEmpty] = lambda _: ()\n\n\nclass CustomObjectTest(jtu.JaxTestCase):\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_compile={}_primitive={}\".format(compile, primitive),\n \"compile\": compile, \"primitive\": primitive}\n for primitive in [True, False]\n for compile in [True, False]))\n def testSparseIdentity(self, compile, primitive):\n f = identity if primitive else (lambda x: x)\n f = jit(f) if compile else f\n rng = jtu.rand_default(self.rng())\n M = make_sparse_array(rng, (10,), jnp.float32)\n M2 = f(M)\n\n jaxpr = make_jaxpr(f)(M).jaxpr\n core.check_jaxpr(jaxpr)\n\n self.assertEqual(M.dtype, M2.dtype)\n self.assertEqual(M.index_dtype, M2.index_dtype)\n self.assertAllClose(M.data, M2.data)\n self.assertAllClose(M.indices, M2.indices)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_compile={}\".format(compile),\n \"compile\": compile}\n for compile in [True, False]))\n def testSparseSplit(self, compile):\n f = jit(split) if compile else split\n rng = jtu.rand_default(self.rng())\n M = make_sparse_array(rng, (10,), jnp.float32)\n M2, M3 = f(M)\n\n jaxpr = make_jaxpr(f)(M).jaxpr\n core.check_jaxpr(jaxpr)\n\n for MM in M2, M3:\n self.assertEqual(M.dtype, MM.dtype)\n self.assertEqual(M.index_dtype, MM.index_dtype)\n self.assertArraysEqual(M.data, MM.data)\n self.assertArraysEqual(M.indices, MM.indices)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_compile={}_primitive={}\".format(compile, primitive),\n \"compile\": compile, \"primitive\": primitive}\n for primitive in [True, False]\n for compile in [True, False]))\n def testSparseLaxLoop(self, compile, primitive):\n rng = jtu.rand_default(self.rng())\n f = identity if primitive else (lambda x: x)\n f = jit(f) if compile else f\n body_fun = lambda _, A: f(A)\n M = make_sparse_array(rng, (10,), jnp.float32)\n lax.fori_loop(0, 10, body_fun, M)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_attr={}\".format(attr), \"attr\": attr}\n for attr in [\"data\", \"indices\"]))\n def testSparseAttrAccess(self, attr):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [make_sparse_array(rng, (10,), jnp.float32)]\n f = lambda x: getattr(x, attr)\n self._CompileAndCheck(f, args_maker)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n {\"testcase_name\": \"_{}\".format(\n jtu.format_shape_dtype_string(shape, dtype)),\n \"shape\": shape, \"dtype\": dtype}\n for shape in [(3, 3), (2, 6), (6, 2)]\n for dtype in jtu.dtypes.floating))\n def testSparseMatvec(self, shape, dtype):\n rng = jtu.rand_default(self.rng())\n args_maker = lambda: [make_sparse_array(rng, shape, dtype), rng(shape[-1:], dtype)]\n self._CompileAndCheck(matvec, args_maker)\n\n def testLowerToNothing(self):\n empty = Empty(AbstractEmpty())\n jaxpr = make_jaxpr(jit(lambda e: e))(empty).jaxpr\n core.check_jaxpr(jaxpr)\n\n # cannot return a unit, because CompileAndCheck assumes array output.\n testfunc = lambda e: None\n args_maker = lambda: [empty]\n self._CompileAndCheck(testfunc, args_maker)\n\n def testConstantHandler(self):\n def make_const_array():\n data = np.arange(3.0)\n indices = np.arange(3)[:, None]\n shape = (5,)\n aval = AbstractSparseArray(shape, data.dtype, indices.dtype, len(indices))\n return SparseArray(aval, data, indices)\n out1 = make_const_array()\n out2 = jit(make_const_array)()\n self.assertArraysEqual(out1.data, out2.data)\n self.assertArraysEqual(out1.indices, out2.indices)\n\n\nif __name__ == '__main__':\n absltest.main(testLoader=jtu.JaxTestLoader())\n" ]
[ [ "numpy.asarray", "numpy.arange", "numpy.issubdtype", "numpy.dtype", "numpy.frombuffer", "numpy.prod", "numpy.array" ], [ "numpy.arange", "numpy.zeros_like", "numpy.where", "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Qiza-lyhm/mmcv-1
[ "362a90f8bfffe62d5802925944f540ed16b2731e", "362a90f8bfffe62d5802925944f540ed16b2731e" ]
[ "tests/test_ops/test_bbox.py", "mmcv/device/mlu/utils.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport pytest\nimport torch\n\nfrom mmcv.device.mlu import IS_MLU_AVAILABLE\nfrom mmcv.utils import IS_CUDA_AVAILABLE\n\n\nclass TestBBox(object):\n\n def _test_bbox_overlaps(self, device, dtype=torch.float):\n from mmcv.ops import bbox_overlaps\n b1 = torch.tensor([[1.0, 1.0, 3.0, 4.0], [2.0, 2.0, 3.0, 4.0],\n [7.0, 7.0, 8.0, 8.0]]).to(device).type(dtype)\n b2 = torch.tensor([[0.0, 2.0, 2.0, 5.0], [2.0, 1.0, 3.0,\n 3.0]]).to(device).type(dtype)\n should_output = np.array([[0.33333334, 0.5], [0.2, 0.5], [0.0, 0.0]])\n out = bbox_overlaps(b1, b2, offset=1)\n assert np.allclose(out.cpu().numpy(), should_output, 1e-2)\n\n b1 = torch.tensor([[1.0, 1.0, 3.0, 4.0], [2.0, 2.0, 3.0,\n 4.0]]).to(device).type(dtype)\n b2 = torch.tensor([[0.0, 2.0, 2.0, 5.0], [2.0, 1.0, 3.0,\n 3.0]]).to(device).type(dtype)\n should_output = np.array([0.33333334, 0.5])\n out = bbox_overlaps(b1, b2, aligned=True, offset=1)\n assert np.allclose(out.cpu().numpy(), should_output, 1e-2)\n\n b1 = torch.tensor([[0.0, 0.0, 3.0, 3.0]]).to(device).type(dtype)\n b2 = torch.tensor([[4.0, 0.0, 5.0, 3.0], [3.0, 0.0, 4.0, 3.0],\n [2.0, 0.0, 3.0, 3.0], [1.0, 0.0, 2.0,\n 3.0]]).to(device).type(dtype)\n should_output = np.array([0, 0.2, 0.5, 0.5])\n out = bbox_overlaps(b1, b2, offset=1)\n assert np.allclose(out.cpu().numpy(), should_output, 1e-2)\n\n @pytest.mark.parametrize('device', [\n pytest.param(\n 'cuda',\n marks=pytest.mark.skipif(\n not IS_CUDA_AVAILABLE, reason='requires CUDA support')),\n pytest.param(\n 'mlu',\n marks=pytest.mark.skipif(\n not IS_MLU_AVAILABLE, reason='requires MLU support'))\n ])\n def test_bbox_overlaps_float(self, device):\n self._test_bbox_overlaps(device, dtype=torch.float)\n\n @pytest.mark.parametrize('device', [\n pytest.param(\n 'cuda',\n marks=pytest.mark.skipif(\n not IS_CUDA_AVAILABLE, reason='requires CUDA support')),\n pytest.param(\n 'mlu',\n marks=pytest.mark.skipif(\n not IS_MLU_AVAILABLE, reason='requires MLU support'))\n ])\n def test_bbox_overlaps_half(self, device):\n self._test_bbox_overlaps(device, dtype=torch.half)\n", "# Copyright (c) OpenMMLab. All rights reserved.\ndef is_mlu_available():\n try:\n import torch\n return (hasattr(torch, 'is_mlu_available')\n and torch.is_mlu_available())\n except Exception:\n return False\n\n\nIS_MLU_AVAILABLE = is_mlu_available()\n" ]
[ [ "numpy.array", "torch.tensor" ], [ "torch.is_mlu_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ZenanLin1999/FPGA_accerator_with_mnist_dataset
[ "1db3d698ebe3cf57050af9465e0b83ffef717d25" ]
[ "int16_version/tensorflow_mnist/mnist_int16.py" ]
[ "# -*- coding: utf-8 -*-\nimport input_data\nimport tensorflow as tf\nimport numpy as np\nfrom tf_fix import *\n\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\nsess = tf.InteractiveSession()\n\nwith tf.name_scope('input'): \n\tx = tf.placeholder(\"float\", shape=[None, 784])\n\ty_ = tf.placeholder(\"float\", shape=[None, 10])\n\ndef weight_variable(shape):\n\tinitial = tf.truncated_normal(shape, stddev=0.1);\n\treturn tf.Variable(initial)\n\ndef conv2d(x, W):\n\treturn tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef max_pool_4x4(x):\n\treturn tf.nn.max_pool(x, ksize=[1, 4, 4, 1], strides=[1, 4, 4,1], padding='SAME')\n\n#First Convolutional Layer\nwith tf.name_scope('1st_CNN'): \n\tW_conv1 = weight_variable([3, 3, 1, 32])\n\tx_image = tf.reshape(x, [-1,28,28,1])\n\th_conv1 = conv2d(x_image, W_conv1) #[28,28,32]\n\th_pool1 = max_pool_4x4(h_conv1) #[7,7,32]\n\n#Densely Connected Layer\nwith tf.name_scope('Densely_NN'): \n\tW_fc1 = weight_variable([ 7* 7* 32, 256])\n\th_pool2_flat = tf.reshape(h_pool1, [-1, 7* 7* 32])\n\th_fc1= tf.matmul(h_pool2_flat , W_fc1) # [256]\n\n#Dropout\nwith tf.name_scope('Dropout'):\n\tkeep_prob = tf.placeholder(\"float\")\n\th_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n#Readout Layer\nwith tf.name_scope('Softmax'):\n\tW_fc2 = weight_variable([256, 10])\n\th_fc2 = tf.matmul(h_fc1_drop, W_fc2)\n\ty_conv=tf.nn.softmax(h_fc2)\n\nwith tf.name_scope('Loss'):\n\tcross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))\n\nwith tf.name_scope('Train'):\n\ttrain_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\t#train_step = tf.train.AdamOptimizer(5e-5).minimize(cross_entropy)\n\nwith tf.name_scope('Accuracy'):\n\tcorrect_prediction = tf.equal(tf.argmax(y_conv ,1), tf.argmax(y_,1))\n\taccuracy = tf.reduce_mean(tf.cast(correct_prediction , \"float\"))\n\ntf.initialize_all_variables().run()\n\nfor i in range(1000):\n\tbatch = mnist.train.next_batch(400);\n\tif i%200 == 0:\n\t\ttrain_accuracy = accuracy.eval(feed_dict={x:batch[0], y_: batch[1], keep_prob:1.0});\n\t\tprint(\"step %d, training accuracy %g\"%(i, train_accuracy));\n\ttrain_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob:0.5});\n\nprint(\"test accuracy %g\"%accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))\n\nprint(\"=================================================\")\nf_cfg = open('./record/MNIST_LARGE_cfg.py', 'w')\n\nGet_Feature_Fraction_Part(x,\"img\",{x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0},f_cfg)\nRecord_Weight(W_conv1,\"W_conv1\",f_cfg)\n#print(W_conv1.eval())\nGet_Feature_Fraction_Part(h_conv1,\"h_conv1\",{x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0},f_cfg)\nGet_Feature_Fraction_Part(h_pool1,\"h_pool1\",{x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0},f_cfg)\n\t\nRecord_Weight(tf.reshape(W_fc1,[7,7,32,256]),\"W_fc1\",f_cfg)\nGet_Feature_Fraction_Part(h_fc1,\"h_fc1\",{x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0},f_cfg)\n\t\nRecord_Weight(tf.reshape(W_fc2,[1,1,256,10]),\"W_fc2\",f_cfg)\nGet_Feature_Fraction_Part(h_fc2,\"h_fc2\",{x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0},f_cfg)\t\t\n\nf_cfg.close();\nprint(\"=================================================\")\n\nsess.close()\n" ]
[ [ "tensorflow.matmul", "tensorflow.nn.softmax", "tensorflow.truncated_normal", "tensorflow.InteractiveSession", "tensorflow.Variable", "tensorflow.nn.max_pool", "tensorflow.reshape", "tensorflow.cast", "tensorflow.placeholder", "tensorflow.initialize_all_variables", "tensorflow.name_scope", "tensorflow.train.AdamOptimizer", "tensorflow.log", "tensorflow.nn.dropout", "tensorflow.argmax", "tensorflow.nn.conv2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
granatumx/gbox-py
[ "b3e264a22bc6a041f2dd631d952eae29c0ecae21", "b3e264a22bc6a041f2dd631d952eae29c0ecae21" ]
[ "sample_coloring.py", "scanpy_normalization.py" ]
[ "#!/usr/bin/env python\n\nimport math\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.spatial import ConvexHull\nfrom colour import Color\nfrom matplotlib.patches import Polygon\nimport statistics as st\n\nfrom granatum_sdk import Granatum\n\nCOLORS = [\"#3891ea\", \"#29ad19\", \"#ac2d58\", \"#db7580\", \"#ed2310\", \"#ca2dc2\", \"#5f7575\", \"#7cc1b5\", \"#c3bd78\", \"#4ffa24\"]\n\ndef main():\n gn = Granatum()\n sample_coords = gn.get_import(\"viz_data\")\n value = gn.get_import(\"value\")\n# print(value)\n coloring_type = gn.get_arg(\"coloring_type\")\n bounding_stdev = gn.get_arg(\"bounding_stdev\")\n\n coords = sample_coords.get(\"coords\")\n dim_names = sample_coords.get(\"dimNames\")\n\n df = pd.DataFrame(\n {\"x\": [a[0] for a in coords.values()], \"y\": [a[1] for a in coords.values()], \"value\": pd.Series(value)},\n index=coords.keys()\n )\n\n# print(df)\n\n if coloring_type == \"categorical\":\n uniq = df[\"value\"].unique();\n num = uniq.shape[0]\n COLORS2 = plt.get_cmap('gist_rainbow')\n carr = [0]*df.shape[0]\n listcats = list(df[\"value\"]) \n miny = min(list(df[\"y\"]))\n maxy = max(list(df[\"y\"]))\n scaley = (maxy-miny)/650\n print(\"Scaley = {}\".format(scaley))\n\n for i, cat in enumerate(df[\"value\"].unique()):\n dff = df[df[\"value\"] == cat]\n xs = list(dff[\"x\"])\n ys = list(dff[\"y\"])\n #avgx = sum(dff[\"x\"]) / len(dff[\"x\"]) \n #avgy = sum(dff[\"y\"]) / len(dff[\"y\"]) \n #plt.scatter(x=dff[\"x\"], y=dff[\"y\"], s=5000 / df.shape[0], c=COLORS[i].hex_l, label=cat)\n #plt.scatter(x=dff[\"x\"], y=dff[\"y\"], s=5000 / df.shape[0], c=[abs(hash(cat)) % 256]*len(dff[\"x\"]), cmap=COLORS2, label=cat)\n #plt.scatter(x=dff[\"x\"], y=dff[\"y\"], s=5000 / df.shape[0], c=abs(hash(cat)) % 256, cmap=COLORS2, label=cat)\n colorindex = abs(hash(cat)) % 256\n craw = COLORS2(colorindex/255.0)\n color = (craw[0], craw[1], craw[2], 0.2)\n whitetransparent = (1, 1, 1, 0.5)\n coloropaque = (craw[0], craw[1], craw[2], 1.0)\n if len(xs)>3:\n pts = list(zip(xs, ys))\n cent = np.mean(pts, axis=0)\n lengs = list(map(lambda p: math.sqrt((p[0]-cent[0])*(p[0]-cent[0])+(p[1]-cent[1])*(p[1]-cent[1])), pts))\n avgleng = st.mean(lengs)\n stdleng = st.stdev(lengs)*bounding_stdev\n rpts = []\n if(stdleng > 0.0):\n for j, ln in enumerate(lengs):\n if(ln - avgleng < stdleng):\n rpts.append(pts[j])\n pts = rpts\n cent = np.mean(pts, axis=0)\n hull = ConvexHull(pts)\n ptslist = []\n for pt in hull.simplices:\n ptslist.append(pts[pt[0]])\n ptslist.append(pts[pt[1]])\n ptslist.sort(key=lambda p: np.arctan2(p[1]-cent[1], p[0]-cent[0]))\n ptslist = ptslist[0::2]\n ptslist.insert(len(ptslist), ptslist[0])\n lowestpt = ptslist[0]\n for pt in ptslist:\n if(pt[1] < lowestpt[1]):\n lowestpt = pt\n poly = Polygon(1.1*(np.array(ptslist)-cent)+cent, facecolor=color)\n poly.set_capstyle('round')\n plt.gca().add_patch(poly)\n plt.text(lowestpt[0], lowestpt[1]-scaley*10, cat, fontsize=6, ha=\"center\", va=\"center\", color=\"black\", bbox=dict(boxstyle=\"round\",fc=whitetransparent,ec=coloropaque))\n for j,x in enumerate(listcats):\n if x == cat:\n carr[j] = int(abs(hash(cat)) % 256)\n \n plt.scatter(x=df[\"x\"], y=df[\"y\"], s=5000 / df.shape[0], c=carr, cmap=COLORS2)\n lgd = plt.legend(markerscale=6, loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=5)\n#60 / (5000 / df.shape[0])\n elif coloring_type == \"continuous\":\n plt.scatter(x=df[\"x\"], y=df[\"y\"], s=5000 / df.shape[0], c=df[\"value\"], cmap=\"Reds\")\n plt.colorbar()\n\n plt.xlabel(dim_names[0])\n plt.ylabel(dim_names[1])\n # plt.tight_layout()\n\n gn.add_current_figure_to_results(\n \"Scatter-plot\",\n dpi=75,\n width=750,\n height=650,\n# savefig_kwargs={'bbox_extra_artists': (lgd,), 'bbox_inches': 'tight'}\n savefig_kwargs={'bbox_inches': 'tight'}\n )\n\n gn.commit()\n\n\nif __name__ == \"__main__\":\n main()\n", "from itertools import combinations\n\nimport multiprocessing\nimport scanpy.api as sc\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.preprocessing import quantile_transform\nfrom scipy.sparse import csc_matrix\n\nfrom granatum_sdk import Granatum\n\n# import pandas as pd\n# import seaborn as sns\n\n\nnans = np.array([np.nan, np.nan])\nzeros = np.array([0, 0])\n\n\ndef trim_extreme(x, a, b):\n low = np.percentile(x, a)\n high = np.percentile(x, b)\n filtered = x[(x > low) & (x < high)]\n return filtered.copy()\n\n\ndef make_plot(adata, log_trans=False):\n violin_data = []\n for cell in adata.X:\n filtered = cell.toarray().flatten()\n #filtered = trim_extreme(filtered, 5, 95)\n if log_trans:\n #cell = np.log1p(cell)\n filtered = np.log1p(filtered)\n if filtered.shape[0] == 0:\n #cell = zeros\n filtered = zeros\n\n violin_data.append(filtered)\n\n plt.figure()\n plt.boxplot(violin_data)\n plt.xlabel('Cells')\n plt.ylabel('Expression lvl (log transformed)')\n plt.tight_layout()\n\ndef quantile_normalization(mat):\n # double argsort for getting the corresponding ranks for\n # each element in the vector\n\n rank_mat = np.argsort(np.argsort(mat, 1), 1)\n medians = np.median(np.sort(mat, 1), 0)\n normalized = np.zeros_like(mat)\n\n for i in range(rank_mat.shape[0]):\n normalized[i, :] = medians[rank_mat[i, :]]\n\n # normalized = quantile_transform(mat, copy=False)\n\n #return normalized.tolist()\n return sc.AnnData(csc_matrix(normalized))\n\n\ndef main():\n gn = Granatum()\n\n adata = gn.ann_data_from_assay(gn.get_import('assay'))\n num_cells_to_sample = gn.get_arg('num_cells_to_sample')\n method = gn.get_arg('method')\n log_trans_when_plot = gn.get_arg('log_trans_when_plot')\n\n if num_cells_to_sample > adata.shape[0]:\n num_cells_to_sample = adata.shape[0]\n\n sampled_cells_idxs = np.sort(np.random.choice(adata.shape[0], num_cells_to_sample, replace=False))\n\n make_plot(adata[sampled_cells_idxs, :], log_trans=log_trans_when_plot)\n gn.add_current_figure_to_results(\n 'Before normalization: Each bar in the box plot represents one cell. Only cells between the 5th and 95th percentile are shown.',\n height=350,\n dpi=75 * 40 / max(40, num_cells_to_sample)\n )\n\n if method == 'quantile':\n adata = quantile_normalization(adata.X.toarray())\n elif method == 'scanpy':\n sc.pp.normalize_total(adata)\n else:\n raise ValueError()\n\n make_plot(adata[sampled_cells_idxs, :], log_trans=log_trans_when_plot)\n gn.add_current_figure_to_results(\n 'After normalization: Each bar in the box plot represents one cell. Only cells between the 5th and 95th percentile are shown.',\n height=350,\n dpi=75 * 40 / max(40, num_cells_to_sample)\n )\n\n gn.export_statically(gn.assay_from_ann_data(adata), 'Normalized assay')\n\n gn.commit()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.gca", "pandas.Series", "matplotlib.pyplot.scatter", "matplotlib.pyplot.get_cmap", "numpy.arctan2", "matplotlib.pyplot.colorbar", "numpy.mean", "scipy.spatial.ConvexHull", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.ylabel" ], [ "matplotlib.pyplot.boxplot", "scipy.sparse.csc_matrix", "matplotlib.pyplot.tight_layout", "numpy.random.choice", "numpy.percentile", "numpy.sort", "numpy.log1p", "matplotlib.pyplot.ylabel", "numpy.zeros_like", "numpy.argsort", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
Bheshaj-Kumar/Transformer-Grapheme-to-Phoneme-Conversion
[ "cc1ff53498cf9d178e1880b5d074ec91559ac95a" ]
[ "model/new_models.py" ]
[ "import sys\nimport numpy as np\nimport tensorflow as tf\nfrom model.transformer_utils import create_encoder_padding_mask, create_mel_padding_mask, create_look_ahead_mask\n#from preprocessing.text import Pipeline\nfrom model.layers import PreBottleNeckDecoder, Encoder, Decoder, SpeakerModule\nfrom utils.losses import model_loss, crossentropy_loss\nimport data_utils \n\nclass Transformer(tf.keras.models.Model):\n\n def __init__(self,\n encoder_model_dimension : int,\n decoder_model_dimension : int,\n encoder_num_heads : int,\n decoder_num_heads : int,\n encoder_num_layers : int,\n decoder_num_layers : int,\n encoder_maximum_position_encoding : int,\n decoder_maximum_position_encoding : int,\n encoder_feed_forward_dimension : int,\n decoder_feed_forward_dimension : int,\n dropout_rate : int,\n encoder_vocab_size : int,\n decoder_vocab_size : int,\n debug : bool,\n diagonal_bandwidth_b : int,\n diagonal_rate_regul_coeff : float,\n layernorm : bool,\n Ldc : bool,\n buckets: list,\n training: bool,\n epoch_path: str,\n **kwargs\n ):\n \n super(Transformer, self).__init__(**kwargs)\n self.Ldc = Ldc\n self.buckets = buckets\n self.b = diagonal_bandwidth_b\n self.lamda = diagonal_rate_regul_coeff\n self.isTraining = training\n if self.isTraining:\n try:\n print(\"\\n\\n\")\n with open(epoch_path, 'r') as f:\n self.epoch = int(f.readlines()[0])\n print(\"Successfully loaded epoch count.\")\n except:\n print(\"Could not load epoch path : %s\" % epoch_path)\n print(\"Creating a file with default epoch = 1\")\n with open(epoch_path, 'w+') as f:\n f.write(str(1))\n print(\"Successfully created epoch file.\")\n self.epoch = 1\n self.epoch_path = epoch_path\n\n self.encoder = Encoder(d_model = encoder_model_dimension,\n num_heads = encoder_num_heads,\n num_layers = encoder_num_layers,\n dff = encoder_feed_forward_dimension,\n input_vocab_size = encoder_vocab_size, #change\n maximum_position_encoding = encoder_maximum_position_encoding,\n layernorm= layernorm,\n rate = dropout_rate,\n name = 'Encoder')\n\n self.decoder = Decoder(d_model = decoder_model_dimension,\n num_heads = decoder_num_heads,\n dff = decoder_feed_forward_dimension,\n maximum_position_encoding = decoder_maximum_position_encoding,\n output_vocab_size = decoder_vocab_size, #change\n num_layers = decoder_num_layers,\n rate = dropout_rate,\n name = 'Decoder')\n \n self.decoder_layers = decoder_num_layers\n self.regul_coeff = diagonal_rate_regul_coeff\n self.loss_bandwidth = diagonal_bandwidth_b\n self.decoder_model_dim = decoder_model_dimension\n self.linear = tf.keras.layers.Dense(decoder_vocab_size, name= 'linear')\n ## remaining ##\n self.training_input_signature = [\n tf.TensorSpec(shape=(None, None), dtype = tf.int32),\n tf.TensorSpec(shape=(None), dtype = tf.int64),\n tf.TensorSpec(shape=(None, None), dtype = tf.int32),\n tf.TensorSpec(shape=(None), dtype = tf.int64),\n\n ]\n\n self.forward_input_signature = [\n tf.TensorSpec(shape=(None, None), dtype= tf.int32),\n tf.TensorSpec(shape=(None, 1), dtype= tf.int32),\n #tf.TensorSpec(shape=(None, None, mel_channels), dtype = tf.float32)\n ]\n\n self.encoder_signature = [\n tf.TensorSpec(shape=(None, None), dtype=tf.int32)\n ]\n\n self.decoder_signature = [\n tf.TensorSpec(shape=(None, None, encoder_model_dimension), dtype = tf.float32),\n tf.TensorSpec(shape=(None, None), dtype=tf.float32),\n tf.TensorSpec(shape=(None, None, None, None), dtype=tf.float32)\n ]\n\n self.debug = debug\n self._apply_all_signatures()\n\n @property\n def step(self):\n return int(self.optimizer.iterations)\n\n def _apply_signature(self, function, signature):\n if self.debug:\n return function\n else:\n return tf.function(input_signature=signature)(function)\n def increment_epoch(self):\n self.epoch += 1\n with open(self.epoch_path,'w') as f:\n f.write(str(self.epoch))\n\n def call(self):\n self._apply_all_signature()\n\n def _apply_all_signatures(self):\n #self.forward = self._apply_signature(self._forward, self.forward_input_signature)\n self.train_step = self._apply_signature(self._train_step, self.training_input_signature)\n self.val_step = self._apply_signature(self._val_step, self.training_input_signature)\n self.forward_encoder = self._apply_signature(self._forward_encoder, self.encoder_signature)\n self.forward_decoder = self._apply_signature(self._forward_decoder, self.decoder_signature)\n\n def _call_encoder(self, inputs, training):\n padding_mask = create_encoder_padding_mask(inputs)\n enc_input = inputs \n enc_output, attn_weights = self.encoder(enc_input,\n training= training,\n mask = padding_mask)\n return enc_output, padding_mask, attn_weights\n\n def _call_decoder(self, dec_input, enc_output, enc_padding_mask, training):\n dec_target_padding_mask = create_mel_padding_mask(dec_input)\n look_ahead_mask = create_look_ahead_mask(tf.shape(dec_input)[1])\n combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)\n dec_output, attention_weights = self.decoder(x = dec_input,\n enc_output = enc_output,\n training = training,\n look_ahead_mask = combined_mask,\n padding_mask = enc_padding_mask\n )\n linear = self.linear(dec_output) \n model_out = {'linear':linear, 'decoder_attention':attention_weights, 'decoder_output':dec_output}\n return model_out\n\n def _forward(self, inp, output): # not getting used\n model_out = self.__call__(inputs = inp,\n speaker_input = sp_id,\n targets = output,\n training = False) \n \n return model_out\n\n def _forward_encoder(self, inputs):\n return self._call_encoder(inputs, training = False)\n \n def _forward_decoder(self, encoder_output, targets, encoder_padding_mask):\n return self._call_decoder(targets, encoder_output, encoder_padding_mask, training = False) \n\n def _gta_forward(self, encoder_inputs, seq_len, decoder_inputs, seq_len_target, training):\n tar_inp = decoder_inputs[:,:-1]\n tar_real = decoder_inputs[:,1:]\n\n seq_len = int(tf.shape(tar_inp)[1]) \n\n with tf.GradientTape() as tape:\n model_out = self.__call__(inputs= encoder_inputs,\n targets = tar_inp,\n training = training)\n\n loss = model_loss(tar_real, \n model_out['linear']\n )\n model_out.update({'loss' : loss})\n model_out.update({'target': tar_inp}) \n return model_out, tape\n\n def _train_step(self, encoder_inputs, seq_len, _decoder_inputs, seq_len_target): \n model_out, tape = self._gta_forward(encoder_inputs, seq_len, _decoder_inputs, seq_len_target, training= True)\n gradients = tape.gradient(model_out['loss'], self.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))\n\n return model_out\n\n def _val_step(self, encoder_inputs, seq_len, decoder_inputs, seq_len_target):\n model_out, _ = self._gta_forward(encoder_inputs, seq_len, decoder_inputs, seq_len_target, training= False)\n return model_out\n\n def _compile(self, optimizer):\n self.compile(loss = crossentropy_loss,\n optimizer = optimizer)\n\n def call(self, inputs, targets, training): \n encoder_output, encoder_padding_mask, encoder_attention = self._call_encoder(inputs, training=training) \n model_out = self._call_decoder(targets, encoder_output, encoder_padding_mask, training= training)\n model_out.update({'encoder_attention' : encoder_attention})\n return model_out\n\n def _compute_centroid(self, attention, win_c, s):\n attention = tf.reduce_mean(attention, axis=1)\n if s >= attention.shape[-1]:\n return win_c\n C_s = tf.cast(tf.reduce_sum(attention, 1).numpy()[:, s], tf.int32)\n if win_c + 3 * self.r >= C_s:\n return win_c + 3*self.r\n return win_c\n\n def predict(self, encoder_input, max_length = 20, verbose = True):\n #print(inp.shape)\n start_vec = tf.convert_to_tensor(tf.constant([data_utils.GO_ID]), dtype= tf.int32)\n inp = tf.cast(tf.expand_dims(encoder_inp, 0), tf.int32) \n output = tf.cast(tf.expand_dims(start_vec, 0), tf.int32)\n output_concat = tf.cast(tf.expand_dims(start_vec, 0), tf.int32) \n out_dict = {}\n encoder_output, encoder_padding_mask, encoder_attention = self.forward_encoder(encoder_inp) \n \n for i in range(max_length + 1):\n model_out = self.forward_decoder(encoder_output, output, encoder_padding_mask)\n output = tf.concat([output, model_out['mel_linear'][:1, -1:, :]], axis=-2) \n output_concat = tf.concat([tf.cast(output_concat, tf.int32), model_out['mel_linear'][:1, -1:, :]],\n axis=-2) ####### UNCLEAR -SELF.R ##########\n out_dict = {'linear': output_concat[0, 1:, :],\n 'decoder_attention': model_out['decoder_attention'],\n 'encoder_attention': encoder_attention}\n predictions = model_out['mel_linear'][:,-1:,:]\n prediction_id = tf.cast(tf.argmax(predictions, axis = -1), dtype= tf.int32)\n if verbose:\n sys.stdout.write(f'\\rpred word phoneme: {i}')\n if prediction_id == data_utils.EOS_ID:\n if verbose:\n print('Stopping')\n break\n \n return out_dict\n\n def set_constants(self, learning_rate: float= None):\n if learning_rate is not None:\n self.optimizer.lr.assign(learning_rate)\n\n def get_batch(self, data, bucket_id=None):\n \"\"\"Prepare minibatch from given data.\n Args:\n data: A list of datapoints (all from same bucket).\n bucket_id: Bucket ID of data. This is irrevelant for training but\n for evaluation we can limit the padding by the bucket size.\n Returns:\n Batched input IDs, input sequence length, output IDs & output\n sequence length\n \"\"\"\n #if not self.isTraining:\n # # During evaluation the bucket size limits the amount of padding\n # _, decoder_size = self.buckets[bucket_id]\n \n _, decoder_size = tf.cond(tf.math.equal(self.isTraining, False), lambda:self.buckets[bucket_id])\n encoder_inputs, decoder_inputs = [], []\n batch_size = len(data)\n\n #seq_len = np.zeros((batch_size), dtype=np.int64)\n #seq_len_target = np.zeros((batch_size), dtype=np.int64)\n\n seq_len = tf.zeros((batch_size), dtype=tf.int64)\n seq_len_target = tf.zeros((batch_size), dtype=tf.int64)\n \n for i, sample in enumerate(data):\n encoder_input, decoder_input = sample\n seq_len[i] = len(encoder_input)\n if not self.isTraining:\n seq_len_target[i] = decoder_size\n else:\n # 1 is added to output sequence length because the EOS token is\n # crucial to \"halt\" the decoder. Consider it the punctuation\n # mark of a English sentence. Both are necessary.\n seq_len_target[i] = len(decoder_input) + 1\n\n # Maximum input and output length which limit the padding till them\n max_len_source = max(seq_len)\n max_len_target = max(seq_len_target)\n\n for i, sample in enumerate(data):\n encoder_input, decoder_input = sample\n # Encoder inputs are padded and then reversed.\n encoder_pad_size = max_len_source - len(encoder_input)\n encoder_pad = [data_utils.PAD_ID] * encoder_pad_size\n # Encoder input is reversed - https://arxiv.org/abs/1409.3215\n #encoder_inputs.append(list(reversed(encoder_input)) + encoder_pad) \n\n encoder_inputs.append(encoder_input + encoder_pad) # removed reversed\n # 1 is added to decoder_input because GO_ID is considered a part of\n # decoder input. While EOS_ID is also added, it's really used by\n # the target tensor (self.tensor) in the core code above.\n decoder_pad_size = max_len_target - (len(decoder_input) + 1)\n decoder_inputs.append([data_utils.GO_ID] +\n decoder_input +\n [data_utils.EOS_ID] +\n [data_utils.PAD_ID] * decoder_pad_size)\n\n # Both the id sequences are made time major via transpose\n encoder_inputs = np.asarray(encoder_inputs, dtype=np.int32)# changed transpose\n decoder_inputs = np.asarray(decoder_inputs, dtype=np.int32)# same\n return tf.convert_to_tensor(encoder_inputs, dtype= tf.int32), tf.convert_to_tensor(seq_len, dtype= tf.int64), tf.convert_to_tensor(decoder_inputs,dtype= tf.int32),tf.convert_to_tensor(seq_len_target, dtype= tf.int64)\n\n\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.constant", "tensorflow.concat", "tensorflow.zeros", "numpy.asarray", "tensorflow.keras.layers.Dense", "tensorflow.maximum", "tensorflow.reduce_mean", "tensorflow.shape", "tensorflow.expand_dims", "tensorflow.cast", "tensorflow.reduce_sum", "tensorflow.math.equal", "tensorflow.function", "tensorflow.argmax", "tensorflow.TensorSpec", "tensorflow.GradientTape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13" ] } ]
cvanoort/differentiable-plasticity
[ "28c53765ed38f80fd5a5c49e3e62a0e6555eb669" ]
[ "maze/plotfigure.py" ]
[ "# Code for making a figure\n#\n# Copyright (c) 2018 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport glob\nimport matplotlib.pyplot as plt\nimport scipy\nfrom scipy import stats\n\ncolorz = ['r', 'b', 'g', 'c', 'm', 'y', 'orange', 'k']\n\ngroupnames = glob.glob('./loss_*rngseed_0.txt')\n# groupnames = glob.glob('./loss_*new*eplen_250*rngseed_0.txt')\n\nplt.rc('font', size=14)\n\n\ndef mavg(x, N):\n cumsum = np.cumsum(np.insert(x, 0, 0))\n return (cumsum[N:] - cumsum[:-N]) / N\n\n\nplt.ion()\n# plt.figure(figsize=(5,4)) # Smaller figure = relative larger fonts\nplt.figure()\n\nallmedianls = []\nalllosses = []\nposcol = 0\nminminlen = 999999\nfor numgroup, groupname in enumerate(groupnames):\n if \"lstm\" in groupname:\n continue\n g = groupname[:-6] + \"*\"\n print(\"====\", groupname)\n fnames = glob.glob(g)\n fulllosses = []\n losses = []\n lgts = []\n for fn in fnames:\n z = np.loadtxt(fn)\n\n # For each run, we average the losses over K successive episodes - otherwise figure is unreadable due to noise!\n z = mavg(z, 10)\n\n z = z[::10] # Decimation - speed things up!\n\n z = z[:2001]\n\n if len(z) < 1000:\n print(fn)\n continue\n # z = z[:90]\n lgts.append(len(z))\n fulllosses.append(z)\n minlen = min(lgts)\n if minlen < minminlen:\n minminlen = minlen\n print(minlen)\n # if minlen < 1000:\n # continue\n for z in fulllosses:\n losses.append(z[:minlen])\n\n losses = np.array(losses)\n alllosses.append(losses)\n\n meanl = np.mean(losses, axis=0)\n stdl = np.std(losses, axis=0)\n # cil = stdl / np.sqrt(losses.shape[0]) * 1.96 # 95% confidence interval - assuming normality\n cil = stdl / np.sqrt(\n losses.shape[0]) * 2.5 # 95% confidence interval - approximated with the t-distribution for 7 d.f. (?)\n\n medianl = np.median(losses, axis=0)\n allmedianls.append(medianl)\n q1l = np.percentile(losses, 25, axis=0)\n q3l = np.percentile(losses, 75, axis=0)\n\n highl = np.max(losses, axis=0)\n lowl = np.min(losses, axis=0)\n # highl = meanl+stdl\n # lowl = meanl-stdl\n\n xx = range(len(meanl))\n\n # xticks and labels\n xt = range(0, len(meanl), 500)\n xtl = [str(10 * 10 * i) for i in\n xt] # Because of decimation above, and only every 10th loss is recorded in the files\n\n if \"plastic\" in groupname:\n lbl = \"Plastic\"\n elif \"rnn\" in groupname:\n lbl = \"Non-plastic\"\n\n # plt.plot(mavg(meanl, 100), label=g) #, color='blue')\n # plt.fill_between(xx, lowl, highl, alpha=.2)\n # plt.fill_between(xx, q1l, q3l, alpha=.1)\n # plt.plot(meanl) #, color='blue')\n ####plt.plot(mavg(medianl, 100), label=g) #, color='blue') # mavg changes the number of points !\n # plt.plot(mavg(q1l, 100), label=g, alpha=.3) #, color='blue')\n # plt.plot(mavg(q3l, 100), label=g, alpha=.3) #, color='blue')\n # plt.fill_between(xx, q1l, q3l, alpha=.2)\n # plt.plot(medianl, label=g) #, color='blue')\n\n AVGSIZE = 1\n\n xlen = len(mavg(q1l, AVGSIZE))\n plt.plot(mavg(medianl, AVGSIZE), color=colorz[poscol % len(colorz)],\n label=lbl) # mavg changes the number of points !\n plt.fill_between(range(xlen), mavg(q1l, AVGSIZE), mavg(q3l, AVGSIZE), alpha=.2, color=colorz[poscol % len(colorz)])\n\n # xlen = len(mavg(meanl, AVGSIZE))\n # plt.plot(mavg(meanl, AVGSIZE), label=g, color=colorz[poscol % len(colorz)]) # mavg changes the number of points !\n # plt.fill_between( range(xlen), mavg(meanl - cil, AVGSIZE), mavg(meanl + cil, AVGSIZE), alpha=.2, color=colorz[poscol % len(colorz)])\n\n poscol += 1\n\n # plt.fill_between( range(xlen), mavg(lowl, 100), mavg(highl, 100), alpha=.2, color=colorz[numgroup % len(colorz)])\n\n # plt.plot(mavg(losses[0], 1000), label=g, color=colorz[numgroup % len(colorz)])\n # for curve in losses[1:]:\n # plt.plot(mavg(curve, 1000), color=colorz[numgroup % len(colorz)])\n\nps = []\n# Adapt for varying lengths across groups\n# for n in range(0, alllosses[0].shape[1], 3):\nfor n in range(0, minminlen):\n ps.append(scipy.stats.ranksums(alllosses[0][:, n], alllosses[1][:, n]).pvalue)\nps = np.array(ps)\nnp.mean(ps[-500:] < .05)\nnp.mean(ps[-500:] < .01)\n\nplt.legend(loc='best', fontsize=14)\n# plt.xlabel('Loss (sum square diff. b/w final output and target)')\nplt.xlabel('Number of Episodes')\nplt.ylabel('Reward')\nplt.xticks(xt, xtl)\n# plt.tight_layout()\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.sqrt", "numpy.min", "numpy.median", "matplotlib.pyplot.rc", "numpy.percentile", "numpy.max", "matplotlib.pyplot.ylabel", "numpy.std", "numpy.mean", "numpy.insert", "numpy.loadtxt", "scipy.stats.ranksums", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "numpy.array", "matplotlib.pyplot.ion", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
greenelab/phenoplier
[ "95f04b17f0b5227560fcf32ac0a85b2c5aa9001f", "95f04b17f0b5227560fcf32ac0a85b2c5aa9001f", "95f04b17f0b5227560fcf32ac0a85b2c5aa9001f", "95f04b17f0b5227560fcf32ac0a85b2c5aa9001f" ]
[ "nbs/13_consensus_clustering/py/030_03-analysis-coassociation.py", "nbs/99_manuscript/lvs/lv5/py/lv5-traits_assocs.py", "nbs/99_manuscript/lvs/lv844/py/lv844-pathways.py", "nbs/99_manuscript/lvs/lv844/py/lv844-cell_types.py" ]
[ "# ---\n# jupyter:\n# jupytext:\n# cell_metadata_filter: all,-execution,-papermill,-trusted\n# formats: ipynb,py//py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.7.1\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown] tags=[]\n# # Description\n\n# %% [markdown] tags=[]\n# It analyzes how clusters of traits were grouped across the ensemble partitions. For example, a stable cluster (obtained from consensus partitions) of cardiovascular diseases can show that all traits were always grouped together across all partitions of the ensemble; another cluster might show that some traits were clustered more often than others, representing a less stable group of traits.\n\n# %% [markdown] tags=[]\n# # Modules loading\n\n# %% tags=[]\n# %load_ext autoreload\n# %autoreload 2\n\n# %% tags=[]\nfrom IPython.display import display\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom utils import generate_result_set_name\nimport conf\n\n# %% [markdown] tags=[]\n# # Settings\n\n# %% tags=[]\nCONSENSUS_CLUSTERING_DIR = Path(\n conf.RESULTS[\"CLUSTERING_DIR\"], \"consensus_clustering\"\n).resolve()\n\ndisplay(CONSENSUS_CLUSTERING_DIR)\n\n# %% [markdown] tags=[]\n# ## Load data\n\n# %% tags=[]\nINPUT_SUBSET = \"umap\"\n\n# %% tags=[]\nINPUT_STEM = \"z_score_std-projection-smultixcan-efo_partial-mashr-zscores\"\n\n# %% tags=[]\nDR_OPTIONS = {\n \"n_components\": 5,\n \"metric\": \"euclidean\",\n \"n_neighbors\": 15,\n \"random_state\": 0,\n}\n\n# %% tags=[]\ninput_filepath = Path(\n conf.RESULTS[\"DATA_TRANSFORMATIONS_DIR\"],\n INPUT_SUBSET,\n generate_result_set_name(\n DR_OPTIONS, prefix=f\"{INPUT_SUBSET}-{INPUT_STEM}-\", suffix=\".pkl\"\n ),\n).resolve()\ndisplay(input_filepath)\n\nassert input_filepath.exists(), \"Input file does not exist\"\n\ninput_filepath_stem = input_filepath.stem\ndisplay(input_filepath_stem)\n\n# %% tags=[]\ndata_umap = pd.read_pickle(input_filepath)\n\n# %% tags=[]\ndata_umap.shape\n\n# %% tags=[]\ndata_umap.head()\n\n# %% [markdown] tags=[]\n# # Load best partitions\n\n# %% tags=[]\ninput_file = Path(CONSENSUS_CLUSTERING_DIR, \"best_partitions_by_k.pkl\").resolve()\ndisplay(input_file)\n\n# %% tags=[]\nbest_partitions = pd.read_pickle(input_file)\n\n# %% tags=[]\nbest_partitions.shape\n\n# %% tags=[]\nbest_partitions.head()\n\n# %% [markdown] tags=[]\n# # Load coassociation matrix\n\n# %% tags=[]\ninput_file = Path(CONSENSUS_CLUSTERING_DIR, \"ensemble_coassoc_matrix.npy\").resolve()\ndisplay(input_file)\n\n# %% tags=[]\ncoassoc_matrix = np.load(input_file)\n\n# %% tags=[]\ncoassoc_matrix = pd.DataFrame(\n data=1.0 - coassoc_matrix,\n index=data_umap.index.copy(),\n columns=data_umap.index.copy(),\n)\n\n# %% tags=[]\ncoassoc_matrix.shape\n\n# %% tags=[]\ncoassoc_matrix.head()\n\n# %% [markdown] tags=[]\n# The coassociation matrix shows the percentage of times a pair of traits was clustered together across the ensemble partitions.\n\n# %% [markdown] tags=[]\n# ## Stats\n\n# %% [markdown] tags=[]\n# Here I show some general stats of the coassociation matrix, useful to compare results below. For instance, if a pair of traits got clustered together 61% of the times, how strong is that?\n\n# %% tags=[]\ndf = coassoc_matrix.where(np.triu(np.ones(coassoc_matrix.shape)).astype(np.bool))\ndf = df.stack().reset_index()\n\ncoassoc_matrix_stats = df[0].describe(\n percentiles=[0.25, 0.50, 0.75, 0.80, 0.90, 0.95, 0.99]\n)\n\n# %% tags=[]\ncoassoc_matrix_stats.apply(str)\n\n# %% [markdown] tags=[]\n# On average, a pair of clusters appear together in 45% of the clusters in the ensemble (the median is 48%). That makes sense, since for some partitions the resolution (number of clusters) might not be enough to get smaller clusters.\n\n# %% [markdown] tags=[]\n# # Plot coassociation values\n\n# %% [markdown] tags=[]\n# ## Functions\n\n# %% tags=[]\nfrom IPython.display import HTML\n\n\n# %% tags=[]\ndef plot_cluster(data, partition, cluster_number, figsize=None):\n k = np.unique(partition).shape[0]\n\n display(HTML(f\"<h3>Cluster {k}.{cluster_number}</h3>\"))\n\n k_traits = data.loc[partition == cluster_number].index\n\n with sns.plotting_context(\"paper\"):\n f, ax = plt.subplots(figsize=figsize) # (figsize=(8, 8))\n\n display(\n sns.heatmap(\n data=coassoc_matrix.loc[k_traits, k_traits],\n vmin=coassoc_matrix_stats[\"50%\"],\n vmax=1.0,\n annot=True,\n fmt=\".2f\",\n square=True,\n )\n )\n\n\n# %% tags=[]\nk = 29\ndisplay(HTML(f\"<h2>k: {k}</h2>\"))\ndisplay(best_partitions.loc[k])\n\npart = best_partitions.loc[k, \"partition\"]\npart_stats = pd.Series(part).value_counts()\ndisplay(part_stats)\n\n# %% tags=[]\nplot_cluster(data_umap, part, 10)\n\n# %% [markdown] tags=[]\n# The plot above shows that these 8 keratometry measurements (such as 3mm weak meridian left) were always clustered together in all partitions of the ensemble, representing a very strong/stable grouping.\n\n# %% tags=[]\nplot_cluster(data_umap, part, 15, figsize=(10, 10))\n\n# %% [markdown] tags=[]\n# The \"heel bone mineral density\" cluster is not as strong as the keratometry one, since some trait pairs have a coassociation value of 0.89. However, 0.89 is quite higher than the 99 percentile of the coassociation values (which is 0.69).\n\n# %% tags=[]\n", "# ---\n# jupyter:\n# jupytext:\n# cell_metadata_filter: all,-execution,-papermill,-trusted\n# formats: ipynb,py//py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.7.1\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown] tags=[]\n# # Description\n\n# %% [markdown] tags=[]\n# Generates manubot tables for PhenomeXcan and eMERGE associations given an LV name (which is the only parameter that needs to be specified in the Settings section below).\n\n# %% [markdown] tags=[]\n# # Modules loading\n\n# %% tags=[]\n# %load_ext autoreload\n# %autoreload 2\n\n# %% tags=[]\nimport re\nfrom pathlib import Path\n\nimport pandas as pd\n\nfrom entity import Trait\nimport conf\n\n# %% [markdown] tags=[]\n# # Settings\n\n# %% tags=[\"parameters\"]\nLV_NAME = \"LV5\"\n\n# %%\nassert (\n conf.MANUSCRIPT[\"BASE_DIR\"] is not None\n), \"The manuscript directory was not configured\"\n\nOUTPUT_FILE_PATH = conf.MANUSCRIPT[\"CONTENT_DIR\"] / \"50.00.supplementary_material.md\"\ndisplay(OUTPUT_FILE_PATH)\nassert OUTPUT_FILE_PATH.exists()\n\n# %%\n# result_set is either phenomexcan or emerge\nLV_FILE_MARK_TEMPLATE = \"<!-- {lv}:{result_set}_traits_assocs:{position} -->\"\n\n# %%\nTABLE_CAPTION = \"Table: Significant trait associations of {lv_name} in {result_set_name}. {table_id}\"\n\n# %%\nTABLE_CAPTION_ID = \"#tbl:sup:{result_set}_assocs:{lv_name_lower_case}\"\n\n# %%\nRESULT_SET_NAMES = {\n \"phenomexcan\": \"PhenomeXcan\",\n \"emerge\": \"eMERGE\",\n}\n\n# %% [markdown] tags=[]\n# # Load data\n\n# %% [markdown]\n# ## PhenomeXcan LV-trait associations\n\n# %%\ninput_filepath = Path(conf.RESULTS[\"GLS\"] / \"gls_phenotypes-combined-phenomexcan.pkl\")\ndisplay(input_filepath)\n\n# %%\nphenomexcan_lv_trait_assocs = pd.read_pickle(input_filepath)\n\n# %%\nphenomexcan_lv_trait_assocs.shape\n\n# %%\nphenomexcan_lv_trait_assocs.head()\n\n# %% [markdown]\n# ## eMERGE LV-trait associations\n\n# %%\ninput_filepath = Path(conf.RESULTS[\"GLS\"] / \"gls_phenotypes-combined-emerge.pkl\")\ndisplay(input_filepath)\n\n# %%\nemerge_lv_trait_assocs = pd.read_pickle(input_filepath)\n\n# %%\nemerge_lv_trait_assocs.shape\n\n# %%\nemerge_lv_trait_assocs.head()\n\n# %% [markdown]\n# ## eMERGE traits info\n\n# %%\ninput_filepath = conf.EMERGE[\"DESC_FILE_WITH_SAMPLE_SIZE\"]\ndisplay(input_filepath)\n\n# %%\nemerge_traits_info = pd.read_csv(\n input_filepath,\n sep=\"\\t\",\n dtype={\"phecode\": str},\n usecols=[\n \"phecode\",\n \"phenotype\",\n \"category\",\n \"eMERGE_III_EUR_case\",\n \"eMERGE_III_EUR_control\",\n ],\n)\n\n# %%\nemerge_traits_info = emerge_traits_info.set_index(\"phecode\")\n\n# %%\nemerge_traits_info = emerge_traits_info.rename(\n columns={\n \"eMERGE_III_EUR_case\": \"eur_n_cases\",\n \"eMERGE_III_EUR_control\": \"eur_n_controls\",\n }\n)\n\n# %%\nemerge_traits_info.shape\n\n# %%\nemerge_traits_info.head()\n\n# %%\nassert emerge_traits_info.index.is_unique\n\n# %% [markdown]\n# # Trait associations\n\n\n# %% [markdown]\n# ## PhenomeXcan\n\n# %%\nfrom traits import SHORT_TRAIT_NAMES\n\n# %%\nresult_set = \"phenomexcan\"\n\n# %%\ndef get_trait_objs(phenotype_full_code):\n if Trait.is_efo_label(phenotype_full_code):\n traits = Trait.get_traits_from_efo(phenotype_full_code)\n else:\n traits = [Trait.get_trait(full_code=phenotype_full_code)]\n\n # sort by sample size\n return sorted(traits, key=lambda x: x.n_cases / x.n, reverse=True)\n\n\ndef get_trait_description(phenotype_full_code):\n traits = get_trait_objs(phenotype_full_code)\n\n desc = traits[0].description\n if desc in SHORT_TRAIT_NAMES:\n return SHORT_TRAIT_NAMES[desc]\n\n return desc\n\n\ndef get_trait_n(phenotype_full_code):\n traits = get_trait_objs(phenotype_full_code)\n\n return traits[0].n\n\n\ndef get_trait_n_cases(phenotype_full_code):\n traits = get_trait_objs(phenotype_full_code)\n\n return traits[0].n_cases\n\n\ndef num_to_int_str(num):\n if pd.isnull(num):\n return \"\"\n\n return f\"{num:,.0f}\"\n\n\ndef get_part_clust(row):\n return f\"{row.part_k} / {row.cluster_id}\"\n\n\n# %%\nlv_assocs = phenomexcan_lv_trait_assocs[\n (phenomexcan_lv_trait_assocs[\"lv\"] == LV_NAME)\n & (phenomexcan_lv_trait_assocs[\"fdr\"] < 0.05)\n].sort_values(\"fdr\")\n\n# %%\nwith pd.option_context(\n \"display.max_rows\", None, \"display.max_columns\", None, \"display.max_colwidth\", None\n):\n display(lv_assocs)\n\n# %%\nlv_assocs = lv_assocs.assign(\n phenotype_desc=lv_assocs[\"phenotype\"].apply(get_trait_description)\n)\n\n# %%\nlv_assocs = lv_assocs.assign(n=lv_assocs[\"phenotype\"].apply(get_trait_n))\n\n# %%\nlv_assocs = lv_assocs.assign(n_cases=lv_assocs[\"phenotype\"].apply(get_trait_n_cases))\n\n# %%\nlv_assocs = lv_assocs.assign(coef=lv_assocs[\"coef\"].apply(lambda x: f\"{x:.3f}\"))\n\n# %%\nlv_assocs = lv_assocs.assign(\n fdr=lv_assocs[\"fdr\"].apply(lambda x: f\"{x:.2e}\".replace(\"-\", \"&#8209;\"))\n)\n\n# %%\nlv_assocs = lv_assocs.assign(n=lv_assocs[\"n\"].apply(num_to_int_str))\n\n# %%\nlv_assocs = lv_assocs.assign(n_cases=lv_assocs[\"n_cases\"].apply(num_to_int_str))\n\n# %%\nlv_assocs = lv_assocs.assign(part_clust=lv_assocs.apply(get_part_clust, axis=1))\n\n# %%\nlv_assocs = lv_assocs.drop(columns=[\"phenotype\"])\n\n# %%\nlv_assocs.shape\n\n# %%\nlv_assocs = lv_assocs[[\"phenotype_desc\", \"n\", \"n_cases\", \"part_clust\", \"fdr\"]]\n\n# %%\nlv_assocs = lv_assocs.rename(\n columns={\n \"part_clust\": \"Partition / cluster\",\n \"lv\": \"Latent variable (LV)\",\n # \"coef\": r\"$\\beta$\",\n \"fdr\": \"FDR\",\n \"phenotype_desc\": \"Trait description\",\n \"n\": \"Sample size\",\n \"n_cases\": \"Cases\",\n }\n)\n\n# %%\nwith pd.option_context(\n \"display.max_rows\", None, \"display.max_columns\", None, \"display.max_colwidth\", None\n):\n display(lv_assocs)\n\n# %% [markdown]\n# ### Fill empty\n\n# %%\nif lv_assocs.shape[0] == 0:\n lv_assocs.loc[0, \"Trait description\"] = \"No significant associations\"\n lv_assocs = lv_assocs.fillna(\"\")\n\n# %% [markdown]\n# ### Save\n\n# %%\n# start\nlv_file_mark_start = LV_FILE_MARK_TEMPLATE.format(\n result_set=result_set, lv=LV_NAME, position=\"start\"\n)\ndisplay(lv_file_mark_start)\n\n# end\nlv_file_mark_end = LV_FILE_MARK_TEMPLATE.format(\n result_set=result_set, lv=LV_NAME, position=\"end\"\n)\ndisplay(lv_file_mark_end)\n\n# %%\nnew_content = lv_assocs.to_markdown(index=False, disable_numparse=True)\n\n# %%\n# add table caption\ntable_caption = TABLE_CAPTION.format(\n lv_name=LV_NAME,\n result_set_name=RESULT_SET_NAMES[result_set],\n table_id=\"{\"\n + TABLE_CAPTION_ID.format(result_set=result_set, lv_name_lower_case=LV_NAME.lower())\n + \"}\",\n)\ndisplay(table_caption)\n\n# %%\nnew_content += \"\\n\\n\" + table_caption\n\n# %%\nfull_new_content = (\n lv_file_mark_start + \"\\n\" + new_content.strip() + \"\\n\" + lv_file_mark_end\n)\n\n# %%\nwith open(OUTPUT_FILE_PATH, \"r\", encoding=\"utf8\") as f:\n file_content = f.read()\n\n# %%\nnew_file_content = re.sub(\n lv_file_mark_start + \".*?\" + lv_file_mark_end,\n full_new_content,\n file_content,\n flags=re.DOTALL,\n)\n\n# %%\nwith open(OUTPUT_FILE_PATH, \"w\", encoding=\"utf8\") as f:\n f.write(new_file_content) # .replace(\"\\beta\", r\"\\beta\"))\n\n# %% [markdown]\n# ## eMERGE\n\n# %%\nresult_set = \"emerge\"\n\n# %%\nTABLE_CAPTION = (\n \"Table: Trait associations of {lv_name} in {result_set_name}. {table_id}\"\n)\n\n# %%\nlv_assocs = emerge_lv_trait_assocs[\n (emerge_lv_trait_assocs[\"lv\"] == LV_NAME) & (emerge_lv_trait_assocs[\"fdr\"] < 0.10)\n].sort_values(\"fdr\")\n\n# %%\nwith pd.option_context(\n \"display.max_rows\", None, \"display.max_columns\", None, \"display.max_colwidth\", None\n):\n display(lv_assocs)\n\n# %%\nlv_assocs = lv_assocs.assign(\n phenotype_desc=lv_assocs[\"phenotype\"].apply(\n lambda x: emerge_traits_info.loc[x, \"phenotype\"]\n )\n)\n\n# %%\nlv_assocs = lv_assocs.assign(\n n=lv_assocs[\"phenotype\"].apply(\n lambda x: emerge_traits_info.loc[x, [\"eur_n_cases\", \"eur_n_controls\"]].sum()\n )\n)\n\n# %%\nlv_assocs = lv_assocs.assign(\n n_cases=lv_assocs[\"phenotype\"].apply(\n lambda x: emerge_traits_info.loc[x, \"eur_n_cases\"]\n )\n)\n\n# %%\nlv_assocs = lv_assocs.assign(coef=lv_assocs[\"coef\"].apply(lambda x: f\"{x:.3f}\"))\n\n# %%\nlv_assocs = lv_assocs.assign(\n fdr=lv_assocs[\"fdr\"].apply(lambda x: f\"{x:.2e}\".replace(\"-\", \"&#8209;\"))\n)\n\n# %%\nlv_assocs = lv_assocs.assign(n=lv_assocs[\"n\"].apply(num_to_int_str))\n\n# %%\nlv_assocs = lv_assocs.assign(n_cases=lv_assocs[\"n_cases\"].apply(num_to_int_str))\n\n# %%\nlv_assocs = lv_assocs.rename(columns={\"phenotype\": \"phecode\"})\n\n# %%\nlv_assocs.shape\n\n# %%\nlv_assocs = lv_assocs[[\"phecode\", \"phenotype_desc\", \"n\", \"n_cases\", \"fdr\"]]\n\n# %%\nlv_assocs = lv_assocs.rename(\n columns={\n \"lv\": \"Latent variable (LV)\",\n # \"coef\": r\"$\\beta$\",\n \"fdr\": \"FDR\",\n \"phecode\": \"Phecode\",\n \"phenotype_desc\": \"Trait description\",\n \"n\": \"Sample size\",\n \"n_cases\": \"Cases\",\n }\n)\n\n# %%\nwith pd.option_context(\n \"display.max_rows\", None, \"display.max_columns\", None, \"display.max_colwidth\", None\n):\n display(lv_assocs)\n\n# %% [markdown]\n# ### Fill empty\n\n# %%\nif lv_assocs.shape[0] == 0:\n lv_assocs.loc[0, \"Phecode\"] = \"No significant associations\"\n lv_assocs = lv_assocs.fillna(\"\")\n\n# %% [markdown]\n# ### Save\n\n# %%\n# start\nlv_file_mark_start = LV_FILE_MARK_TEMPLATE.format(\n result_set=result_set, lv=LV_NAME, position=\"start\"\n)\ndisplay(lv_file_mark_start)\n\n# end\nlv_file_mark_end = LV_FILE_MARK_TEMPLATE.format(\n result_set=result_set, lv=LV_NAME, position=\"end\"\n)\ndisplay(lv_file_mark_end)\n\n# %%\nnew_content = lv_assocs.to_markdown(index=False, disable_numparse=True)\n\n# %%\n# add table caption\ntable_caption = TABLE_CAPTION.format(\n lv_name=LV_NAME,\n result_set_name=RESULT_SET_NAMES[result_set],\n table_id=\"{\"\n + TABLE_CAPTION_ID.format(result_set=result_set, lv_name_lower_case=LV_NAME.lower())\n + \"}\",\n)\ndisplay(table_caption)\n\n# %%\nnew_content += \"\\n\\n\" + table_caption\n\n# %%\nfull_new_content = (\n lv_file_mark_start + \"\\n\" + new_content.strip() + \"\\n\" + lv_file_mark_end\n)\n\n# %%\nwith open(OUTPUT_FILE_PATH, \"r\", encoding=\"utf8\") as f:\n file_content = f.read()\n\n# %%\nnew_file_content = re.sub(\n lv_file_mark_start + \".*?\" + lv_file_mark_end,\n full_new_content,\n file_content,\n flags=re.DOTALL,\n)\n\n# %%\nwith open(OUTPUT_FILE_PATH, \"w\", encoding=\"utf8\") as f:\n f.write(new_file_content) # .replace(\"\\beta\", r\"\\beta\"))\n\n# %%\n", "# ---\n# jupyter:\n# jupytext:\n# cell_metadata_filter: all,-execution,-papermill,-trusted\n# formats: ipynb,py//py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.7.1\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown] tags=[]\n# # Description\n\n# %% [markdown] tags=[]\n# Generates manubot tables for pathways enriched (from the MultiPLIER models) given an LV name (in Settings below).\n\n# %% [markdown] tags=[]\n# # Modules loading\n\n# %% tags=[]\n# %load_ext autoreload\n# %autoreload 2\n\n# %% tags=[]\nimport re\nfrom pathlib import Path\n\nimport pandas as pd\n\nfrom entity import Trait\nimport conf\n\n# %% [markdown] tags=[]\n# # Settings\n\n# %% tags=[\"parameters\"]\nLV_NAME = \"LV844\"\n\n# %%\nassert (\n conf.MANUSCRIPT[\"BASE_DIR\"] is not None\n), \"The manuscript directory was not configured\"\n\nOUTPUT_FILE_PATH = conf.MANUSCRIPT[\"CONTENT_DIR\"] / \"50.00.supplementary_material.md\"\ndisplay(OUTPUT_FILE_PATH)\nassert OUTPUT_FILE_PATH.exists()\n\n# %% [markdown] tags=[]\n# # Load MultiPLIER summary\n\n# %% tags=[]\nmultiplier_model_summary = pd.read_pickle(conf.MULTIPLIER[\"MODEL_SUMMARY_FILE\"])\n\n# %% tags=[]\nmultiplier_model_summary.shape\n\n# %% tags=[]\nmultiplier_model_summary.head()\n\n# %% [markdown]\n# # LV pathways\n\n# %%\nlv_pathways = multiplier_model_summary[\n multiplier_model_summary[\"LV index\"].isin((LV_NAME[2:],))\n & (\n (multiplier_model_summary[\"FDR\"] < 0.05)\n # | (multiplier_model_summary[\"AUC\"] >= 0.75)\n )\n]\n\n# %%\nlv_pathways.shape\n\n# %%\nlv_pathways = lv_pathways[[\"pathway\", \"AUC\", \"FDR\"]].sort_values(\"FDR\")\n\n# %%\nlv_pathways = lv_pathways.assign(AUC=lv_pathways[\"AUC\"].apply(lambda x: f\"{x:.2f}\"))\n\n# %%\nlv_pathways = lv_pathways.assign(FDR=lv_pathways[\"FDR\"].apply(lambda x: f\"{x:.2e}\"))\n\n# %%\nlv_pathways = lv_pathways.rename(\n columns={\n \"pathway\": \"Pathway\",\n }\n)\n\n# %%\nlv_pathways.head()\n\n# %% [markdown]\n# ## Split names\n\n# %%\nlv_pathways[\"Pathway\"] = lv_pathways[\"Pathway\"].apply(lambda x: \" \".join(x.split(\"_\")))\n\n# %%\nlv_pathways.head()\n\n# %% [markdown]\n# ## Fill empty\n\n# %%\nif lv_pathways.shape[0] == 0:\n lv_pathways.loc[0, \"Pathway\"] = \"No pathways significantly enriched\"\n lv_pathways = lv_pathways.fillna(\"\")\n\n# %% [markdown]\n# ## Save\n\n# %%\n# result_set is either phenomexcan or emerge\nLV_FILE_MARK_TEMPLATE = \"<!-- {lv}:multiplier_pathways:{position} -->\"\n\n# %%\nTABLE_CAPTION = (\n \"Table: Pathways aligned to {lv_name} from the MultiPLIER models. {table_id}\"\n)\n\n# %%\nTABLE_CAPTION_ID = \"#tbl:sup:multiplier_pathways:{lv_name_lower_case}\"\n\n# %%\n# start\nlv_file_mark_start = LV_FILE_MARK_TEMPLATE.format(lv=LV_NAME, position=\"start\")\ndisplay(lv_file_mark_start)\n\n# end\nlv_file_mark_end = LV_FILE_MARK_TEMPLATE.format(lv=LV_NAME, position=\"end\")\ndisplay(lv_file_mark_end)\n\n# %%\nnew_content = lv_pathways.to_markdown(index=False, disable_numparse=True)\n\n# %%\n# add table caption\ntable_caption = TABLE_CAPTION.format(\n lv_name=LV_NAME,\n table_id=\"{\" + TABLE_CAPTION_ID.format(lv_name_lower_case=LV_NAME.lower()) + \"}\",\n)\ndisplay(table_caption)\n\n# %%\nnew_content += \"\\n\\n\" + table_caption\n\n# %%\nfull_new_content = (\n lv_file_mark_start + \"\\n\" + new_content.strip() + \"\\n\" + lv_file_mark_end\n)\n\n# %%\nwith open(OUTPUT_FILE_PATH, \"r\", encoding=\"utf8\") as f:\n file_content = f.read()\n\n# %%\nnew_file_content = re.sub(\n lv_file_mark_start + \".*?\" + lv_file_mark_end,\n full_new_content,\n file_content,\n flags=re.DOTALL,\n)\n\n# %%\nwith open(OUTPUT_FILE_PATH, \"w\", encoding=\"utf8\") as f:\n f.write(new_file_content) # .replace(\"\\beta\", r\"\\beta\"))\n\n# %%\n", "# ---\n# jupyter:\n# jupytext:\n# cell_metadata_filter: all,-execution,-papermill,-trusted\n# formats: ipynb,py//py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.7.1\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [markdown] tags=[]\n# # Description\n\n# %% [markdown] tags=[]\n# Generates the figure for top cell types for a specified LV (in Settings section below).\n\n# %% [markdown] tags=[]\n# # Modules loading\n\n# %% tags=[]\n# %load_ext autoreload\n# %autoreload 2\n\n# %% tags=[]\nimport re\nfrom pathlib import Path\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom data.recount2 import LVAnalysis\nfrom utils import chunker\nimport conf\n\n# %% [markdown] tags=[]\n# # Settings\n\n# %% tags=[\"parameters\"]\nLV_NAME = \"LV844\"\n\n# %%\nLV_AXIS_THRESHOLD = 3.0\nN_TOP_SAMPLES = 400\nN_TOP_ATTRS = 25\n\n# %%\nOUTPUT_FIGURES_DIR = Path(\n conf.MANUSCRIPT[\"FIGURES_DIR\"], \"lvs_analysis\", f\"{LV_NAME.lower()}\"\n).resolve()\ndisplay(OUTPUT_FIGURES_DIR)\nOUTPUT_FIGURES_DIR.mkdir(parents=True, exist_ok=True)\n\n# %%\nOUTPUT_CELL_TYPE_FILEPATH = OUTPUT_FIGURES_DIR / f\"{LV_NAME.lower()}-cell_types.svg\"\ndisplay(OUTPUT_CELL_TYPE_FILEPATH)\n\n# %% [markdown] tags=[]\n# # Load MultiPLIER summary\n\n# %% tags=[]\nmultiplier_model_summary = pd.read_pickle(conf.MULTIPLIER[\"MODEL_SUMMARY_FILE\"])\n\n# %% tags=[]\nmultiplier_model_summary.shape\n\n# %% tags=[]\nmultiplier_model_summary.head()\n\n# %% [markdown] tags=[]\n# # Load data\n\n# %% [markdown] tags=[]\n# ## Original data\n\n# %% tags=[]\nINPUT_SUBSET = \"z_score_std\"\n\n# %% tags=[]\nINPUT_STEM = \"projection-smultixcan-efo_partial-mashr-zscores\"\n\n# %% tags=[]\ninput_filepath = Path(\n conf.RESULTS[\"DATA_TRANSFORMATIONS_DIR\"],\n INPUT_SUBSET,\n f\"{INPUT_SUBSET}-{INPUT_STEM}.pkl\",\n).resolve()\ndisplay(input_filepath)\n\nassert input_filepath.exists(), \"Input file does not exist\"\n\ninput_filepath_stem = input_filepath.stem\ndisplay(input_filepath_stem)\n\n# %% tags=[]\ndata = pd.read_pickle(input_filepath)\n\n# %% tags=[]\ndata.shape\n\n# %% tags=[]\ndata.head()\n\n# %% [markdown]\n# ## LV data\n\n# %%\nlv_obj = LVAnalysis(LV_NAME, data)\n\n# %%\nmultiplier_model_summary[\n multiplier_model_summary[\"LV index\"].isin((LV_NAME[2:],))\n & (\n (multiplier_model_summary[\"FDR\"] < 0.05)\n | (multiplier_model_summary[\"AUC\"] >= 0.75)\n )\n]\n\n# %%\nlv_data = lv_obj.get_experiments_data()\n\n# %%\nlv_data.shape\n\n# %%\nlv_data.head()\n\n# %% [markdown]\n# # LV cell types analysis\n\n# %% [markdown]\n# ## Get top attributes\n\n# %%\nlv_attrs = lv_obj.get_attributes_variation_score()\ndisplay(lv_attrs.head(20))\n\n# %%\n# show those with cell type or tissue in their name\n_tmp = pd.Series(lv_attrs.index)\nlv_attrs[\n _tmp.str.match(\n \"(?:cell.+type$)|(?:tissue$)|(?:tissue.+type$)\",\n case=False,\n flags=re.IGNORECASE,\n ).values\n].sort_values(ascending=False)\n\n# %%\n_tmp = lv_data.loc[\n :,\n [\n \"cell type\",\n \"tissue\",\n \"tissue type\",\n LV_NAME,\n ],\n]\n\n# %%\n_tmp_seq = list(chunker(_tmp.sort_values(LV_NAME, ascending=False), 25))\n\n# %%\n_tmp_seq[0]\n\n# %%\n# what is there in these projects?\nlv_data.loc[[\"SRP027015\"]].dropna(how=\"all\", axis=1).sort_values(\n LV_NAME, ascending=False\n).sort_values(LV_NAME, ascending=False).head(10)\n\n# %%\nSELECTED_ATTRIBUTE = \"cell type\"\n\n# %%\n# it has to be in the order desired for filling nans in the SELECTED_ATTRIBUTE\nSECOND_ATTRIBUTES = [\"tissue\", \"tumor type\"]\n\n# %% [markdown]\n# ## Get plot data\n\n# %%\nplot_data = lv_data.loc[:, [SELECTED_ATTRIBUTE] + SECOND_ATTRIBUTES + [LV_NAME]]\n\n# %%\n# if blank/nan, fill cell type column with tissue content\n_new_column = plot_data[[SELECTED_ATTRIBUTE] + SECOND_ATTRIBUTES].fillna(\n method=\"backfill\", axis=1\n)[SELECTED_ATTRIBUTE]\nplot_data[SELECTED_ATTRIBUTE] = _new_column\nplot_data = plot_data.drop(columns=SECOND_ATTRIBUTES)\nplot_data = plot_data.fillna({SELECTED_ATTRIBUTE: \"NOT CATEGORIZED\"})\n# plot_data = plot_data.dropna(subset=[SELECTED_ATTRIBUTE])\n\n# %%\nplot_data = plot_data.sort_values(LV_NAME, ascending=False)\n\n# %%\nplot_data.head(20)\n\n# %% [markdown]\n# ## Customize x-axis values\n\n# %% [markdown]\n# When cell type values are not very clear, customize their names by looking at their specific studies to know exactly what the authors meant.\n\n# %%\nfinal_plot_data = plot_data.replace(\n {\n SELECTED_ATTRIBUTE: {\n \"mixture of U87 human glioma cells and WI-38 human lung fibroblast cells\": \"Glioma cells + lung fibroblast cells\",\n \"mixture of U87 human glioma cells and MCF10a human breast cancer cells\": \"Glioma cells + breast cancer cells\",\n }\n }\n)\n\n# %%\n# add also tissue information to these projects\n_srp_code = \"SRP050892\"\n_tmp = final_plot_data.loc[(_srp_code,)].apply(\n lambda x: \"Cell types from brain\"\n + f\" ({lv_data.loc[(_srp_code, x.name), 'screening chemicals']})\",\n axis=1,\n)\nfinal_plot_data.loc[(_srp_code, _tmp.index), SELECTED_ATTRIBUTE] = _tmp.values\n\n# %%\n# all samples from SRP049593 are fibroblasts\nfinal_plot_data[SELECTED_ATTRIBUTE] = final_plot_data.apply(\n lambda x: \"Myeloma cells\" if x.name[0] in (\"SRP027015\",) else x[\"cell type\"],\n axis=1,\n)\n\n# %%\n# take the top samples only\nfinal_plot_data = final_plot_data.sort_values(LV_NAME, ascending=False)[:N_TOP_SAMPLES]\n\n# %% [markdown]\n# ## Threshold LV values\n\n# %%\nfinal_plot_data.loc[\n final_plot_data[LV_NAME] > LV_AXIS_THRESHOLD, LV_NAME\n] = LV_AXIS_THRESHOLD\n\n# %% [markdown]\n# ## Delete samples with no tissue/cell type information\n\n# %%\nfinal_plot_data = final_plot_data[\n final_plot_data[SELECTED_ATTRIBUTE] != \"NOT CATEGORIZED\"\n]\n\n# %% [markdown]\n# ## Set x-axis order\n\n# %%\nattr_order = (\n final_plot_data.groupby(SELECTED_ATTRIBUTE)\n .max()\n .sort_values(LV_NAME, ascending=False)\n .index[:N_TOP_ATTRS]\n .tolist()\n)\n\n# %%\nlen(attr_order)\n\n# %%\nattr_order[:5]\n\n# %% [markdown]\n# ## Plot\n\n# %%\nwith sns.plotting_context(\"paper\", font_scale=1.5), sns.axes_style(\"whitegrid\"):\n sns.catplot(\n data=final_plot_data,\n y=LV_NAME,\n x=SELECTED_ATTRIBUTE,\n order=attr_order,\n kind=\"strip\",\n height=5,\n aspect=3,\n )\n plt.xticks(rotation=45, horizontalalignment=\"right\")\n\n plt.savefig(\n OUTPUT_CELL_TYPE_FILEPATH,\n bbox_inches=\"tight\",\n facecolor=\"white\",\n )\n\n# %% [markdown]\n# # Debug\n\n# %%\n# with pd.option_context(\n# \"display.max_rows\", None, \"display.max_columns\", None, \"display.max_colwidth\", None\n# ):\n# _tmp = final_plot_data[final_plot_data[SELECTED_ATTRIBUTE].str.contains(\"NOT CAT\")]\n# display(_tmp.head(20))\n\n# %%\n# # what is there in these projects?\n# lv_data.loc[[\"SRP050499\"]].dropna(how=\"all\", axis=1).sort_values(\n# LV_NAME, ascending=False\n# ).head(60)\n\n# %%\n" ]
[ [ "pandas.Series", "numpy.unique", "matplotlib.pyplot.subplots", "numpy.ones", "numpy.load", "pandas.read_pickle" ], [ "pandas.option_context", "pandas.read_pickle", "pandas.read_csv", "pandas.isnull" ], [ "pandas.read_pickle" ], [ "matplotlib.pyplot.xticks", "pandas.read_pickle", "pandas.Series", "matplotlib.pyplot.savefig" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
tensorleap/tensorflow-onnx
[ "56f6070828928bbb0f30890b2229eec8b663213d", "56f6070828928bbb0f30890b2229eec8b663213d", "56f6070828928bbb0f30890b2229eec8b663213d", "56f6070828928bbb0f30890b2229eec8b663213d", "56f6070828928bbb0f30890b2229eec8b663213d", "56f6070828928bbb0f30890b2229eec8b663213d" ]
[ "tests/test_tf_shape_inference.py", "tests/test_cudnn_compatible_gru.py", "tests/test_tflite_utils.py", "tests/run_pretrained_models.py", "tf2onnxnightly/rewriter/conv2d_with_pad_rewriter.py", "tf2onnxnightly/convert.py" ]
[ "# SPDX-License-Identifier: Apache-2.0\n\n\n\"\"\"Unit Tests for Tensorflow shape inference.\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.ops import variables as variables_lib\nfrom tensorflow.python.ops import init_ops\n\nfrom backend_test_base import Tf2OnnxBackendTestBase\nfrom common import * # pylint: disable=wildcard-import, unused-wildcard-import\nfrom tf2onnxnightly import utils\nfrom tf2onnxnightly.tf_utils import get_tf_tensor_shape\nfrom tf2onnxnightly.shape_inference import infer_shape_for_graph\nfrom tf2onnxnightly.tf_loader import tf_reset_default_graph, tf_session, tf_placeholder, tf_optimize\n\n# pylint: disable=missing-docstring\n\n\nclass TFShapeInferenceTests(Tf2OnnxBackendTestBase):\n def _run_test_case(self, input_names_with_port, output_names_with_port):\n try:\n tf.compat.v1.disable_eager_execution()\n except: # pylint: disable=bare-except\n pass\n graph_def = None\n with tf_session() as sess:\n # freeze graph\n origin_graph = sess.graph\n variables_lib.global_variables_initializer().run()\n output_name_without_port = [n.split(':')[0] for n in output_names_with_port]\n graph_def = tf.graph_util.convert_variables_to_constants(\n sess, sess.graph_def,\n output_name_without_port\n )\n\n tf_reset_default_graph()\n tf.import_graph_def(graph_def, name='')\n\n # optimize graph\n graph_def = tf_optimize(input_names_with_port, output_names_with_port, sess.graph_def, True)\n\n with tf_session() as sess:\n if self.config.is_debug_mode:\n if not os.path.exists(self.test_data_directory):\n os.makedirs(self.test_data_directory)\n model_path = os.path.join(self.test_data_directory, self._testMethodName + \"_after_tf_optimize.pb\")\n utils.save_protobuf(model_path, graph_def)\n self.logger.debug(\"created file %s\", model_path)\n\n tf_reset_default_graph()\n tf.import_graph_def(graph_def, name='')\n\n with tf_session() as sess:\n inferred_graph = infer_shape_for_graph(sess.graph)\n # compare each operation\n for op in origin_graph.get_operations():\n inferred_op = None\n try:\n inferred_op = inferred_graph.get_operation_by_name(op.name)\n except KeyError:\n continue\n self._compare_shape_for_op(op, inferred_op)\n\n def _compare_shape_for_op(self, op1, op2):\n \"\"\"Align outputs of op2 to op1.\"\"\"\n for out1, out2 in zip(op1.outputs, op2.outputs):\n expected_shape = get_tf_tensor_shape(out1)\n if out1 is not None:\n actual_shape = get_tf_tensor_shape(out2)\n self.assertTrue(utils.are_shapes_compatible(expected_shape, actual_shape))\n\n @check_tf_max_version(\"1.15\", \"_run_test_case needs to supported tf-2\")\n def test_while_loop_with_ta_read_and_write(self):\n i = tf_placeholder(tf.int32, (), name=\"input_1\")\n inputs = tf_placeholder(tf.float32, (10,), name=\"input_2\")\n\n inputs_2 = tf.identity(inputs)\n input_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True).unstack(inputs_2)\n output_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)\n\n c = lambda i, *_: tf.logical_and(tf.less(i, 10), i >= 0)\n\n def b(i, out_ta):\n new_i = tf.add(i, 1)\n x = input_ta.read(i)\n x = x + 3\n out_ta_new = out_ta.write(i, x)\n return new_i, out_ta_new\n\n i_final, out_final = tf.while_loop(c, b, [i, output_ta])\n _ = tf.identity(i_final, name=\"i\")\n _ = tf.identity(out_final.stack(), name=\"output_ta\")\n input_names_with_port = [\"input_1:0\", \"input_2:0\"]\n\n output_names_with_port = [\"i:0\", \"output_ta:0\"]\n self._run_test_case(input_names_with_port, output_names_with_port)\n\n @check_tf_max_version(\"1.15\", \"_run_test_case needs to supported tf-2\")\n def test_map_fn(self):\n def fn0(elem):\n res = elem + elem * elem\n return res\n\n def fn1(elem):\n res = elem[0] * elem[1] + elem[0]\n return res\n\n x_val = 100 * np.random.random_sample([2, 10]).astype(np.float32)\n y_val = 100 * np.random.random_sample([2, 10]).astype(np.float32)\n\n # test fn0\n x = tf_placeholder(tf.float32, shape=x_val.shape, name=\"input_0\")\n x_ = tf.identity(x)\n res_ = tf.map_fn(fn0, x_, dtype=tf.float32)\n _ = tf.identity(res_, name=\"output_0\")\n input_names_with_port = [\"input_0:0\"]\n output_names_with_port = [\"output_0:0\"]\n self._run_test_case(input_names_with_port, output_names_with_port)\n tf_reset_default_graph()\n\n # test fn1\n x = tf_placeholder(tf.float32, shape=x_val.shape, name=\"input_0\")\n y = tf_placeholder(tf.float32, shape=y_val.shape, name=\"input_1\")\n x_ = tf.identity(x)\n y_ = tf.identity(y)\n res_ = tf.map_fn(fn1, (x_, y_), dtype=tf.float32)\n _ = tf.identity(res_, name=\"output_0\")\n input_names_with_port = [\"input_0:0\", \"input_1:0\"]\n output_names_with_port = [\"output_0:0\"]\n self._run_test_case(input_names_with_port, output_names_with_port)\n\n @check_tf_max_version(\"1.15\", \"_run_test_case needs to supported tf-2\")\n def test_bidrectional_attention_wrapper_lstm_encoder(self):\n size = 30\n time_step = 3\n input_size = 4\n attn_size = size\n batch_size = 9\n\n # shape [batch size, time step, size]\n # attention_state: usually the output of an RNN encoder.\n # This tensor should be shaped `[batch_size, max_time, ...]`\n encoder_time_step = time_step\n encoder_x_val = np.random.randn(encoder_time_step, input_size).astype('f')\n encoder_x_val = np.stack([encoder_x_val] * batch_size)\n encoder_x = tf_placeholder(tf.float32, encoder_x_val.shape, name=\"input_1\")\n encoder_cell = tf.nn.rnn_cell.LSTMCell(size)\n attention_states, _ = tf.nn.dynamic_rnn(encoder_cell, encoder_x, dtype=tf.float32)\n # [9, 3, 30], [9, 30]\n attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(attn_size,\n attention_states)\n\n match_input_fn = lambda curr_input, state: tf.concat([curr_input, state], axis=-1)\n cell = tf.nn.rnn_cell.LSTMCell(size)\n match_cell_fw = tf.contrib.seq2seq.AttentionWrapper(cell,\n attention_mechanism,\n attention_layer_size=attn_size,\n cell_input_fn=match_input_fn,\n output_attention=False)\n match_cell_bk = tf.contrib.seq2seq.AttentionWrapper(cell,\n attention_mechanism,\n attention_layer_size=attn_size,\n cell_input_fn=match_input_fn,\n output_attention=False)\n\n decoder_time_step = 6\n decoder_x_val = np.random.randn(decoder_time_step, batch_size, input_size).astype('f')\n\n decoder_x = tf_placeholder(tf.float32, decoder_x_val.shape, name=\"input_2\")\n seq_length = tf_placeholder(tf.int32, (batch_size), name=\"input_3\")\n (match_output_fw, match_output_bk), (match_state_fw, match_state_bk) = \\\n tf.nn.bidirectional_dynamic_rnn(cell_fw=match_cell_fw,\n cell_bw=match_cell_bk,\n inputs=decoder_x,\n sequence_length=tf.identity(seq_length),\n dtype=tf.float32,\n time_major=True)\n\n matched_output = tf.concat([match_output_fw, match_output_bk], axis=-1)\n matched_state = tf.concat([match_state_fw.cell_state, match_state_bk.cell_state], -1)\n\n _ = tf.identity(matched_output, name=\"output_0\")\n _ = tf.identity(matched_state, name=\"final_state\")\n\n input_names_with_port = [\"input_1:0\", \"input_2:0\", \"input_3:0\"]\n output_names_with_port = [\"output_0:0\", \"final_state:0\"]\n self._run_test_case(input_names_with_port, output_names_with_port)\n\n @check_tf_max_version(\"1.15\", \"_run_test_case needs to supported tf-2\")\n def test_dynamic_decode_normal_stop(self):\n batch_size = 2\n num_units = 4\n vocab_size = 5\n embedding_size = 3\n go_token = 0\n end_token = 1\n\n embedding = tf.constant(np.ones([vocab_size, embedding_size], dtype=np.float32))\n state_val = np.reshape([np.ones([num_units], dtype=np.float32) * i for i in range(batch_size)],\n [batch_size, num_units])\n encoder_state = tf.nn.rnn_cell.LSTMStateTuple(state_val, state_val)\n\n cell_initializer = init_ops.constant_initializer(\n np.array([[-0.9592235, 0.42451382, 0.7437744, -0.54485345, -0.80763197,\n 0.19663906, -0.22738314, 0.7762785, 0.7464578, 0.27227187,\n 0.7661047, 0.3596425, -0.8528242, -0.89316916, -0.48946142,\n 0.87882376],\n [0.86586094, -0.75018406, 0.25992537, -0.69368935, 0.2515502,\n -0.26379275, 0.8954313, 0.5759742, -0.7753072, -0.4388857,\n 0.95751476, -0.82085776, -0.9467752, -0.37055635, -0.18570113,\n -0.86504984],\n [0.02305841, 0.3850248, 0.893692, -0.6866486, -0.83703446,\n -0.9828961, 0.3989377, -0.59993076, 0.5330808, 0.6916566,\n 0.98468065, -0.6047034, 0.10823512, 0.34599304, -0.7834821,\n -0.7852347],\n [0.81643987, 0.31507468, -0.51369476, -0.12273741, 0.9701307,\n -0.79669356, -0.34496522, -0.88750815, -0.17995334, 0.34707904,\n -0.09201193, 0.5363934, -0.87229705, -0.5073328, -0.95894027,\n 0.5481839],\n [-0.84093595, -0.2341497, -0.86047816, 0.43370056, -0.39073753,\n 0.37730122, 0.48026466, 0.3004985, -0.60727096, 0.9043884,\n -0.37619448, 0.22490788, -0.03739262, 0.61672115, 0.478899,\n -0.40780973],\n [0.31202435, -0.22045255, -0.6087918, 0.95115066, 0.00199413,\n -0.688287, -0.1103518, 0.4169519, 0.7913246, -0.9844644,\n -0.6193857, 0.38659644, -0.4726901, -0.44781208, -0.5174744,\n -0.605911],\n [0.66771054, 0.34912825, 0.22297978, -0.4990945, 0.24057317,\n -0.5540829, 0.92277217, 0.74939895, -0.35278273, -0.21587133,\n -0.28613377, -0.8794241, -0.40119147, 0.67175174, -0.22741508,\n 0.37898326]], dtype=np.float32))\n dense_initializer = init_ops.constant_initializer(\n np.array([[0.56177187, -0.6233454, 0.73997784, 0.35032558, 0.6479795],\n [0.6831174, -0.34233975, 0.39330363, 0.45177555, -0.49649096],\n [-0.98890066, 0.6175642, 0.09800482, -0.6721206, 0.48805737],\n [0.19671416, 0.2623148, 0.742548, 0.13555217, 0.56009054]], dtype=np.float32))\n\n cell = tf.nn.rnn_cell.LSTMCell(\n num_units=num_units,\n initializer=cell_initializer,\n state_is_tuple=True)\n\n helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(\n embedding=embedding,\n start_tokens=tf.tile([go_token], [batch_size]),\n end_token=end_token)\n\n output_layer = tf.layers.Dense(vocab_size, kernel_initializer=dense_initializer)\n decoder = tf.contrib.seq2seq.BasicDecoder(\n cell=cell,\n helper=helper,\n initial_state=encoder_state,\n output_layer=output_layer)\n\n outputs, state, sequence_lengths = tf.contrib.seq2seq.dynamic_decode(\n decoder=decoder,\n maximum_iterations=6)\n\n _ = tf.identity(outputs.rnn_output, name=\"rnn_output\")\n _ = tf.identity(outputs.sample_id, name=\"sample_id\")\n _ = tf.identity(state, name=\"state\")\n _ = tf.identity(sequence_lengths, name=\"sequence_lengths\")\n\n output_names_with_port = [\n \"rnn_output:0\",\n # \"sample_id:0\", # incomplete type support for Transpose on onnxruntime 0.2.1\n \"state:0\",\n ]\n\n self._run_test_case([], output_names_with_port)\n\n @check_tf_max_version(\"1.15\", \"_run_test_case needs to supported tf-2\")\n def test_while_loop_in_cond(self):\n x_val = np.array([1, 2, 3], dtype=np.float32)\n y_val = np.array([4, 5, 6], dtype=np.float32)\n x = tf_placeholder(tf.float32, x_val.shape, name=\"input_1\")\n y = tf_placeholder(tf.float32, y_val.shape, name=\"input_2\")\n\n def cond_graph():\n b = tf.constant(np.array([0], dtype=np.int32), dtype=tf.int32)\n # while_loop\n c = lambda y: tf.reduce_any(tf.less(y, 10))\n b = lambda i: tf.add(y, 1)\n return tf.while_loop(c, b, [y])\n\n res = tf.cond(x[0] < y[0], lambda: x, cond_graph, name=\"test_cond\")\n _ = tf.identity(res, name=\"output\")\n\n input_names_with_port = [\"input_1:0\", \"input_2:0\"]\n output_names_with_port = [\"output:0\"]\n self._run_test_case(input_names_with_port, output_names_with_port)\n\n @check_tf_max_version(\"1.15\", \"_run_test_case needs to supported tf-2\")\n def test_cond_in_while_loop(self):\n i = tf.placeholder(tf.int32, (), name=\"input_1\")\n inputs = tf.placeholder(tf.float32, (10,), name=\"input_2\")\n\n inputs_2 = tf.identity(inputs)\n input_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True).unstack(inputs_2)\n output_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)\n\n c = lambda i, *_: tf.logical_and(tf.less(i, 10), i >= 0)\n\n def b(i, out_ta):\n new_i = tf.add(i, 1)\n x = input_ta.read(i)\n x = tf.cond(x > 0, lambda: x - 1, lambda: x + 3)\n out_ta_new = out_ta.write(i, x)\n return new_i, out_ta_new\n\n i_final, out_final = tf.while_loop(c, b, [i, output_ta])\n _ = tf.identity(i_final, name=\"i\")\n _ = tf.identity(out_final.stack(), name=\"output_ta\")\n input_names_with_port = [\"input_1:0\", \"input_2:0\"]\n\n output_names_with_port = [\"i:0\", \"output_ta:0\"]\n self._run_test_case(input_names_with_port, output_names_with_port)\n\n\nif __name__ == \"__main__\":\n unittest_main()\n", "# SPDX-License-Identifier: Apache-2.0\n\n\n\"\"\"Unit Tests for gru.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import variable_scope\nfrom backend_test_base import Tf2OnnxBackendTestBase\nfrom common import unittest_main, check_gru_count, check_tf_max_version, check_opset_after_tf_version\nfrom tf2onnxnightly.tf_loader import is_tf2\n\n\n# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,cell-var-from-loop\n\nif is_tf2():\n MultiRNNCell = tf.compat.v1.nn.rnn_cell.MultiRNNCell\n dynamic_rnn = tf.compat.v1.nn.dynamic_rnn\n bidirectional_dynamic_rnn = tf.compat.v1.nn.bidirectional_dynamic_rnn\nelse:\n GRUBlockCell = tf.contrib.rnn.GRUBlockCell\n MultiRNNCell = tf.contrib.rnn.MultiRNNCell\n CudnnCompatibleGRUCell = tf.contrib.cudnn_rnn.CudnnCompatibleGRUCell\n dynamic_rnn = tf.nn.dynamic_rnn\n bidirectional_dynamic_rnn = tf.nn.bidirectional_dynamic_rnn\n\n\n# TODO: as a workaround, set batch_size to 1 for now to bypass a onnxruntime bug, revert it when the bug is fixed\nclass CudnnCompatibleGRUTests(Tf2OnnxBackendTestBase):\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_single_dynamic_gru(self):\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n\n def func(x):\n # no scope\n cell = CudnnCompatibleGRUCell(units)\n outputs, cell_state = dynamic_rnn(\n cell,\n x,\n dtype=tf.float32)\n\n return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\")\n\n input_names_with_port = [\"input_1:0\"]\n feed_dict = {\"input_1:0\": x_val}\n output_names_with_port = [\"output:0\", \"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-03, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_multiple_dynamic_gru(self):\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n\n def func(x):\n gru_output_list = []\n gru_cell_state_list = []\n # no scope\n cell = CudnnCompatibleGRUCell(units)\n outputs, cell_state = dynamic_rnn(\n cell,\n x,\n dtype=tf.float32)\n gru_output_list.append(outputs)\n gru_cell_state_list.append(cell_state)\n\n # given scope\n cell = CudnnCompatibleGRUCell(units)\n with variable_scope.variable_scope(\"root1\") as scope:\n outputs, cell_state = dynamic_rnn(\n cell,\n x,\n dtype=tf.float32,\n sequence_length=[4],\n scope=scope)\n gru_output_list.append(outputs)\n gru_cell_state_list.append(cell_state)\n\n return tf.identity(gru_output_list, name=\"output\"), tf.identity(gru_cell_state_list, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\", \"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06)\n # graph_validator=lambda g: check_gru_count(g, 2))\n\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_single_dynamic_gru_seq_length_is_const(self):\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n def func(x):\n initializer = init_ops.constant_initializer(0.5)\n\n # no scope\n cell = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n outputs, cell_state = dynamic_rnn(\n cell,\n x,\n dtype=tf.float32,\n sequence_length=[5])\n\n return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\", \"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_single_dynamic_gru_seq_length_is_not_const(self):\n for np_dtype in [np.int32, np.int64, np.float32]:\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n y_val = np.array([5], dtype=np_dtype)\n\n def func(x, seq_length):\n initializer = init_ops.constant_initializer(0.5)\n # no scope\n cell = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n outputs, cell_state = dynamic_rnn(\n cell,\n x,\n dtype=tf.float32,\n sequence_length=tf.identity(seq_length))\n\n return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val, \"input_2:0\": y_val}\n input_names_with_port = [\"input_1:0\", \"input_2:0\"]\n output_names_with_port = [\"output:0\", \"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-03, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_single_dynamic_gru_placeholder_input(self):\n units = 5\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)\n x_val = np.stack([x_val] * 1)\n def func(x):\n initializer = init_ops.constant_initializer(0.5)\n # no scope\n cell = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n outputs, cell_state = dynamic_rnn(\n cell,\n x,\n dtype=tf.float32) # by default zero initializer is used\n\n return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\", \"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-03, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_single_dynamic_gru_ch_zero_state_initializer(self):\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.], [5., 5.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n def func(x):\n initializer = init_ops.constant_initializer(0.5)\n # no scope\n cell = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n\n # defining initial state\n initial_state = cell.zero_state(batch_size, dtype=tf.float32)\n outputs, cell_state = dynamic_rnn(\n cell,\n x,\n initial_state=initial_state,\n dtype=tf.float32)\n\n return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\", \"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-03, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_single_dynamic_gru_random_weights(self):\n hidden_size = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n\n def func(x):\n initializer = tf.random_uniform_initializer(-1.0, 1.0)\n\n # no scope\n cell = CudnnCompatibleGRUCell(\n hidden_size,\n kernel_initializer=initializer)\n\n outputs, cell_state = dynamic_rnn(\n cell,\n x,\n dtype=tf.float32)\n\n return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\", \"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.0001,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_single_dynamic_gru_random_weights2(self):\n hidden_size = 128\n batch_size = 1\n x_val = np.random.randn(1, 133).astype('f')\n x_val = np.stack([x_val] * batch_size)\n\n def func(x):\n initializer = tf.random_uniform_initializer(0.0, 1.0)\n # no scope\n cell = CudnnCompatibleGRUCell(\n hidden_size,\n kernel_initializer=initializer)\n\n outputs, cell_state = dynamic_rnn(\n cell,\n x,\n dtype=tf.float32)\n\n return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\", \"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.01,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_dynamic_gru_output_consumed_only(self):\n units = 5\n batch_size = 6\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n def func(x):\n initializer = tf.random_uniform_initializer(-1.0, 1.0)\n cell1 = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n\n outputs, _ = dynamic_rnn(\n cell1,\n x,\n dtype=tf.float32)\n\n return tf.identity(outputs, name=\"output\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, 0.0001,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_dynamic_gru_state_consumed_only(self):\n units = 5\n batch_size = 6\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n\n def func(x):\n initializer = tf.random_uniform_initializer(-1.0, 1.0)\n cell1 = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n\n _, cell_state = dynamic_rnn(\n cell1,\n x,\n dtype=tf.float32)\n\n return tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=0.0001, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_opset_after_tf_version(\"1.15\", 10, \"might need ReverseV2\")\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_dynamic_bigru(self):\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n\n def func(x):\n initializer = init_ops.constant_initializer(0.5)\n\n # bigru, no scope\n cell1 = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n cell2 = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n outputs, cell_state = bidirectional_dynamic_rnn(\n cell1,\n cell2,\n x,\n dtype=tf.float32)\n\n return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\", \"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_opset_after_tf_version(\"1.15\", 10, \"might need ReverseV2\")\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_dynamic_bigru_output_consumed_only(self):\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n\n def func(x):\n initializer = init_ops.constant_initializer(0.5)\n\n # bigru, no scope\n cell1 = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n cell2 = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n outputs, _ = bidirectional_dynamic_rnn(\n cell1,\n cell2,\n x,\n dtype=tf.float32)\n\n return tf.identity(outputs, name=\"output\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_opset_after_tf_version(\"1.15\", 10, \"might need ReverseV2\")\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_dynamic_bigru_state_consumed_only(self):\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n\n def func(x):\n initializer = init_ops.constant_initializer(0.5)\n\n # bigru, no scope\n cell1 = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n cell2 = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n _, cell_state = bidirectional_dynamic_rnn(\n cell1,\n cell2,\n x,\n dtype=tf.float32)\n\n return tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_opset_after_tf_version(\"1.15\", 10, \"might need ReverseV2\")\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_dynamic_bidirectional_but_one_gru(self):\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n\n def func(x):\n initializer = init_ops.constant_initializer(0.5)\n\n # bigru, no scope\n cell = CudnnCompatibleGRUCell(\n units,\n kernel_initializer=initializer)\n outputs, cell_state = bidirectional_dynamic_rnn(\n cell,\n cell,\n x,\n dtype=tf.float32)\n\n return tf.identity(outputs, name=\"output\"), tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\", \"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_opset_after_tf_version(\"1.15\", 10, \"might need ReverseV2\")\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_dynamic_bidirectional_but_one_gru_and_output_consumed_only(self):\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n def func(x):\n # bigru, no scope\n cell = CudnnCompatibleGRUCell(\n units)\n outputs, _ = bidirectional_dynamic_rnn(\n cell,\n cell,\n x,\n dtype=tf.float32)\n\n return tf.identity(outputs, name=\"output\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"output:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n @check_opset_after_tf_version(\"1.15\", 10, \"might need ReverseV2\")\n @check_tf_max_version(\"1.15\", \"no CudnnCompatibleGRUCell in tf-2.x\")\n def test_dynamic_bidirectional_but_one_gru_and_state_consumed_only(self):\n units = 5\n batch_size = 1\n x_val = np.array([[1., 1.], [2., 2.], [3., 3.]], dtype=np.float32)\n x_val = np.stack([x_val] * batch_size)\n\n def func(x):\n\n # bigru, no scope\n cell = CudnnCompatibleGRUCell(\n units)\n _, cell_state = bidirectional_dynamic_rnn(\n cell,\n cell,\n x,\n dtype=tf.float32)\n\n return tf.identity(cell_state, name=\"cell_state\")\n\n feed_dict = {\"input_1:0\": x_val}\n input_names_with_port = [\"input_1:0\"]\n output_names_with_port = [\"cell_state:0\"]\n self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port, rtol=1e-3, atol=1e-06,\n graph_validator=lambda g: check_gru_count(g, 1))\n\n\nif __name__ == '__main__':\n unittest_main()\n", "# SPDX-License-Identifier: Apache-2.0\n\n\n\"\"\"Unit Tests for TFLite utils.\"\"\"\n\nimport os\nimport tensorflow as tf\n\nfrom common import * # pylint: disable=wildcard-import,unused-wildcard-import\nfrom backend_test_base import Tf2OnnxBackendTestBase\nfrom tf2onnxnightly.tf_loader import from_function, tf_session\nfrom tf2onnxnightly.tflite_utils import read_tflite_model, parse_tflite_graph\n\n# pylint: disable=missing-docstring\n\n\nclass TFListUtilsTests(Tf2OnnxBackendTestBase):\n\n @check_tf_min_version(\"2.0\")\n def test_parse_tflite_graph(self):\n\n def func(a, b, c):\n alpha = tf.constant(1.1, dtype=tf.float32)\n beta = tf.constant(2.3, dtype=tf.float32)\n mul1 = tf.multiply(alpha, tf.matmul(a, b))\n mul2 = tf.multiply(beta, c)\n x_ = mul1 + mul2\n return tf.identity(x_, name=\"output\")\n\n inp_shapes = [[2, 3], [3, 1], [2, 1]]\n inp_dtypes = [tf.float32, tf.float32, tf.float32]\n names = ['a', 'b', 'c']\n names_with_port = ['a:0', 'b:0', 'c:0']\n output_names = ['output']\n output_names_with_port = ['output:0']\n\n input_tensors = [tf.TensorSpec(shape=s, dtype=d, name=n) for s, d, n in zip(inp_shapes, inp_dtypes, names)]\n\n concrete_func = tf.function(func, input_signature=tuple(input_tensors))\n concrete_func = concrete_func.get_concrete_function()\n graph_def = from_function(concrete_func,\n input_names=names_with_port,\n output_names=output_names_with_port)\n with tf_session() as sess:\n tf.import_graph_def(graph_def, name='')\n sess_inputs = [sess.graph.get_tensor_by_name(k) for k in names_with_port]\n sess_outputs = [sess.graph.get_tensor_by_name(n) for n in output_names_with_port]\n converter = tf.compat.v1.lite.TFLiteConverter.from_session(sess, sess_inputs, sess_outputs)\n\n tflite_model = converter.convert()\n tflite_path = os.path.join(self.test_data_directory, self._testMethodName + \".tflite\")\n dir_name = os.path.dirname(tflite_path)\n tflite_model = converter.convert()\n os.makedirs(dir_name, exist_ok=True)\n with open(tflite_path, 'wb') as f:\n f.write(tflite_model)\n\n tflite_graphs, opcodes_map, model, tensor_shapes = read_tflite_model(tflite_path)\n self.assertEqual(1, len(tflite_graphs))\n onnx_nodes, op_cnt, attr_cnt, output_shapes, dtypes, inputs, outputs, _ = \\\n parse_tflite_graph(tflite_graphs[0], opcodes_map, model, tensor_shapes_override=tensor_shapes)\n self.assertEqual(2, op_cnt['MUL'])\n self.assertEqual(1, op_cnt['ADD'])\n self.assertEqual(1, op_cnt['FULLY_CONNECTED'])\n\n self.assertEqual(1, attr_cnt['WeightsFormat'])\n self.assertEqual(names, inputs)\n self.assertEqual(output_names, outputs)\n\n for name, shape, dtype in zip(names, inp_shapes, inp_dtypes):\n self.assertEqual(shape, output_shapes[name])\n self.assertEqual(dtype, dtypes[name])\n\n self.assertTrue(len(onnx_nodes) >= 4)\n", "# SPDX-License-Identifier: Apache-2.0\n\n\n\"\"\"Tool to convert and test pre-trained tensorflow models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n# pylint: disable=broad-except,logging-not-lazy,unused-argument,unnecessary-lambda,import-outside-toplevel\n# pylint: disable=wrong-import-position,too-many-nested-blocks\n\nimport argparse\nimport os\nimport re\nimport sys\nimport tarfile\nimport tempfile\nimport time\nimport zipfile\nimport random\nfrom collections import namedtuple\nfrom distutils.version import LooseVersion\n\n\nimport yaml\nimport numpy as np\nimport PIL.Image\nimport six\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\nimport tensorflow as tf\n\n# contrib ops are registered only when the module is imported, the following import statement is needed,\n# otherwise tf runtime error will show up when the tf model is restored from pb file because of un-registered ops.\ntry:\n import tensorflow.contrib.rnn # pylint: disable=unused-import\nexcept: # pylint: disable=bare-except\n # not needed for tf-2.0\n pass\n\ntry:\n import tensorflow_text # pylint: disable=unused-import\nexcept ModuleNotFoundError:\n pass\n\nfrom tf2onnxnightly import tf_loader, logging, optimizer, utils, tf_utils, constants\nfrom tf2onnxnightly.tfonnx import process_tf_graph\nfrom tf2onnxnightly.tf_loader import tf_session, tf_reset_default_graph\nfrom tf2onnxnightly.graph import ExternalTensorStorage\n\nlogger = logging.getLogger(\"run_pretrained\")\n\nTEMP_DIR = os.path.join(utils.get_temp_directory(), \"run_pretrained\")\nPERFITER = 1000\n\n\ndef get_img(shape, path, dtype, should_scale=True):\n \"\"\"Get image as input.\"\"\"\n resize_to = shape[1:3]\n path = os.path.join(os.path.dirname(os.path.abspath(__file__)), path)\n img = PIL.Image.open(path)\n img = img.resize(resize_to, PIL.Image.ANTIALIAS)\n img_np = np.array(img).astype(dtype)\n img_np = np.stack([img_np] * shape[0], axis=0).reshape(shape)\n if should_scale:\n img_np = img_np / 255\n return img_np\n\n\ndef get_beach(shape):\n \"\"\"Get beach image as input.\"\"\"\n return get_img(shape, \"beach.jpg\", np.float32, should_scale=True)\n\n\ndef get_car(shape):\n \"\"\"Get car image as input.\"\"\"\n return get_img(shape, \"car.JPEG\", np.float32, should_scale=True)\n\n\ndef get_ade20k(shape):\n \"\"\"Get truck image from ade20k segmentation dataset.\"\"\"\n return get_img(shape, \"ade20k.jpg\", np.float32, should_scale=True)\n\n\ndef get_ade20k_uint8(shape):\n \"\"\"Get truck image from ade20k segmentation dataset.\"\"\"\n return get_img(shape, \"ade20k.jpg\", np.uint8, should_scale=False)\n\n\ndef get_random(shape):\n \"\"\"Get random input.\"\"\"\n np.random.seed(42)\n return np.random.sample(shape).astype(np.float32)\n\n\ndef get_random256(shape):\n \"\"\"Get random imput between 0 and 255.\"\"\"\n np.random.seed(42)\n return np.round(np.random.sample(shape) * 256).astype(np.float32)\n\n\ndef get_ramp(shape):\n \"\"\"Get ramp input.\"\"\"\n size = np.prod(shape)\n return np.linspace(1, size, size).reshape(shape).astype(np.float32)\n\n\ndef get_ones(shape):\n \"\"\"Get ones.\"\"\"\n return np.ones(shape).astype(np.float32)\n\ndef get_zeros(shape):\n \"\"\"Get zeros.\"\"\"\n return np.zeros(shape).astype(np.float32)\n\ndef get_zeros_int32(shape):\n \"\"\"Get zeros.\"\"\"\n return np.zeros(shape).astype(np.int32)\n\ndef get_zeros_int64(shape):\n \"\"\"Get zeros.\"\"\"\n return np.zeros(shape).astype(np.int64)\n\ndef get_ones_int32(shape):\n \"\"\"Get ones.\"\"\"\n return np.ones(shape).astype(np.int32)\n\ndef get_small_rand_int32(shape):\n \"\"\"Get random ints in range [1, 99]\"\"\"\n np.random.seed(42)\n return np.random.randint(low=1, high=100, size=shape, dtype=np.int32)\n\ndef get_zeros_then_ones(shape):\n \"\"\"Fill half the tensor with zeros and the rest with ones\"\"\"\n cnt = np.prod(shape)\n zeros_cnt = cnt // 2\n ones_cnt = cnt - zeros_cnt\n return np.concatenate((np.zeros(zeros_cnt, dtype=np.int32), np.ones(ones_cnt, dtype=np.int32))).reshape(shape)\n\ndef get_wav(shape):\n \"\"\"Get sound data.\"\"\"\n return np.sin(np.linspace(-np.pi, np.pi, shape[0]), dtype=np.float32)\n\ndef get_sentences(shape):\n \"\"\"Get sentences of shape\"\"\"\n words = \"the quick brown fox jumps over a lazy dog\".split(' ')\n random.seed(42)\n def get_sentence():\n length = random.randint(2, 7)\n return ' '.join(random.choice(words) for _ in range(length))\n return np.array([get_sentence() for _ in range(np.product(shape))]).reshape(shape)\n\n\n_INPUT_FUNC_MAPPING = {\n \"get_beach\": get_beach,\n \"get_car\": get_car,\n \"get_ade20k\": get_ade20k,\n \"get_ade20k_uint8\": get_ade20k_uint8,\n \"get_random\": get_random,\n \"get_random256\": get_random256,\n \"get_ramp\": get_ramp,\n \"get_ones\": get_ones,\n \"get_zeros\": get_zeros,\n \"get_wav\": get_wav,\n \"get_zeros_int32\": get_zeros_int32,\n \"get_zeros_int64\": get_zeros_int64,\n \"get_ones_int32\": get_ones_int32,\n \"get_small_rand_int32\": get_small_rand_int32,\n \"get_zeros_then_ones\": get_zeros_then_ones,\n \"get_sentences\": get_sentences,\n}\n\n\nOpsetConstraint = namedtuple(\"OpsetConstraint\", \"domain, min_version, max_version, excluded_version\")\n\n\nclass Test(object):\n \"\"\"Main Test class.\"\"\"\n\n cache_dir = None\n target = []\n\n def __init__(self, url, local, input_func, input_names, output_names,\n disabled=False, rtol=0.01, atol=1e-6, ptol=0, dequantize=False,\n check_only_shape=False, model_type=\"frozen\", force_input_shape=False,\n skip_tensorflow=False, opset_constraints=None, tf_min_version=None, tag=None,\n skip_conversion=False, converted_model=None, signature_def=None, concrete_function=None,\n large_model=False, structured_outputs=None, run_tf_frozen=None, use_custom_ops=False):\n self.url = url\n self.input_func = input_func\n self.local = local\n self.input_names = input_names\n self.output_names = output_names\n self.disabled = disabled\n self.large_model = large_model\n self.use_custom_ops = use_custom_ops\n if run_tf_frozen is None:\n run_tf_frozen = not self.large_model\n self.run_tf_frozen = run_tf_frozen\n self.structured_outputs = structured_outputs # Needed to determine output order for tf_function\n self.rtol = rtol\n self.atol = atol\n self.ptol = ptol\n self.dequantize = dequantize\n self.check_only_shape = check_only_shape\n self.perf = None\n self.tf_runtime = 0\n self.onnx_runtime = 0\n self.model_type = model_type\n self.tag = tag\n self.force_input_shape = force_input_shape\n self.skip_tensorflow = skip_tensorflow\n self.skip_conversion = skip_conversion\n self.converted_model = converted_model\n self.opset_constraints = opset_constraints\n self.tf_min_version = tf_min_version\n self.signatures = [signature_def] if signature_def else None\n self.concrete_function = concrete_function\n\n def make_input(self, v):\n \"\"\"Allows each input to specify its own function while defaulting to the input_get function\"\"\"\n if isinstance(v, dict):\n if \"input_get\" in v:\n return _INPUT_FUNC_MAPPING[v[\"input_get\"]](v[\"shape\"])\n if \"value\" in v:\n return np.array(v[\"value\"])\n return self.input_func(v)\n\n def download_model(self):\n \"\"\"Download model from url.\"\"\"\n cache_dir = Test.cache_dir\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n url = self.url\n if url.startswith(r'module://'):\n return self.download_from_module()\n k = url.rfind('/')\n fname = self.url[k + 1:]\n dir_name = fname + \"_dir\"\n ftype = None\n if url.endswith(\".tar.gz\") or url.endswith(\".tgz\"):\n ftype = 'tgz'\n dir_name = fname.replace(\".tar.gz\", \"\").replace(\".tgz\", \"\")\n elif url.endswith('.zip'):\n ftype = 'zip'\n dir_name = fname.replace(\".zip\", \"\")\n elif url.endswith('.tflite'):\n ftype = 'tflite'\n dir_name = fname.replace(\".tflite\", \"\")\n elif self.model_type == 'tflite':\n fname = self.local\n dir_name = fname.replace(\".tflite\", \"\") + \"_dir\"\n dir_name = os.path.join(cache_dir, dir_name)\n os.makedirs(dir_name, exist_ok=True)\n fpath = os.path.join(dir_name, fname)\n if not os.path.exists(fpath):\n utils.get_url(url, fpath)\n model_path = os.path.join(dir_name, self.local)\n if not os.path.exists(model_path) or self.local == \".\":\n if ftype == 'tgz':\n tar = tarfile.open(fpath)\n tar.extractall(dir_name)\n tar.close()\n elif ftype == 'zip':\n zip_ref = zipfile.ZipFile(fpath, 'r')\n zip_ref.extractall(dir_name)\n zip_ref.close()\n return fpath, dir_name\n\n def download_from_module(self):\n \"\"\"Download a model from a python module\"\"\"\n cache_dir = Test.cache_dir\n from importlib import import_module\n i = self.url.rfind('//')\n module, model_name = self.url[i + 2:].split('/')\n mod_object = import_module(module)\n model_class = getattr(mod_object, model_name)\n model = model_class()\n fpath = os.path.join(cache_dir, self.local)\n model.save(fpath)\n return fpath, cache_dir\n\n def run_tensorflow(self, sess, inputs):\n \"\"\"Run model on tensorflow so we have a reference output.\"\"\"\n feed_dict = {}\n for k, v in inputs.items():\n k = sess.graph.get_tensor_by_name(k)\n feed_dict[k] = v\n logger.info(\"Running TF\")\n result = sess.run(self.output_names, feed_dict=feed_dict)\n if self.perf:\n logger.info(\"Running TF perf\")\n start = time.time()\n for _ in range(PERFITER):\n _ = sess.run(self.output_names, feed_dict=feed_dict)\n self.tf_runtime = time.time() - start\n return result\n\n def to_onnx(self, tf_graph, opset=None, extra_opset=None, shape_override=None, input_names=None,\n const_node_values=None, initialized_tables=None, tflite_path=None, tensors_to_rename=None):\n \"\"\"Convert graph to tensorflow.\"\"\"\n if extra_opset is None:\n extra_opset = []\n if self.use_custom_ops:\n extra_opset.append(utils.make_opsetid(constants.CONTRIB_OPS_DOMAIN, 1))\n return process_tf_graph(tf_graph, continue_on_error=False, opset=opset,\n extra_opset=extra_opset, target=Test.target, shape_override=shape_override,\n input_names=input_names, output_names=self.output_names,\n const_node_values=const_node_values, initialized_tables=initialized_tables,\n tflite_path=tflite_path, dequantize=self.dequantize,\n tensors_to_rename=tensors_to_rename)\n\n def run_caffe2(self, name, model_proto, inputs):\n \"\"\"Run test again caffe2 backend.\"\"\"\n import caffe2.python.onnx.backend\n prepared_backend = caffe2.python.onnx.backend.prepare(model_proto)\n results = prepared_backend.run(inputs)\n if self.perf:\n start = time.time()\n for _ in range(PERFITER):\n _ = prepared_backend.run(inputs)\n self.onnx_runtime = time.time() - start\n return results\n\n def run_onnxruntime(self, name, model_proto, inputs, outputs, external_tensor_storage=None):\n \"\"\"Run test against onnxruntime backend.\"\"\"\n import onnxruntime as rt\n model_path = utils.save_onnx_model(TEMP_DIR, name, inputs, model_proto, include_test_data=True,\n as_text=utils.is_debug_mode(),\n external_tensor_storage=external_tensor_storage)\n logger.info(\"Model saved to %s\", model_path)\n if self.use_custom_ops:\n from ortcustomops import get_library_path\n opt = rt.SessionOptions()\n opt.register_custom_ops_library(get_library_path())\n m = rt.InferenceSession(model_path, opt)\n else:\n m = rt.InferenceSession(model_path)\n results = m.run(outputs, inputs)\n if self.perf:\n start = time.time()\n for _ in range(PERFITER):\n _ = m.run(outputs, inputs)\n self.onnx_runtime = time.time() - start\n return results\n\n @staticmethod\n def create_onnx_file(name, model_proto, inputs, outdir, external_tensor_storage=None):\n os.makedirs(outdir, exist_ok=True)\n if external_tensor_storage is None:\n model_path = os.path.join(outdir, name + \".onnx\")\n utils.save_protobuf(model_path, model_proto)\n else:\n model_path = os.path.join(outdir, name + \".zip\")\n utils.save_onnx_zip(model_path, model_proto, external_tensor_storage)\n logger.info(\"Created %s\", model_path)\n\n def run_test(self, name, backend=\"caffe2\", onnx_file=None, opset=None, extra_opset=None,\n perf=None, fold_const=None):\n \"\"\"Run complete test against backend.\"\"\"\n self.perf = perf\n\n # get the model\n if self.url:\n _, dir_name = self.download_model()\n logger.info(\"Downloaded to %s\", dir_name)\n model_path = os.path.join(dir_name, self.local) if self.local != \".\" else dir_name\n else:\n model_path = self.local\n\n logger.info(\"Load model from %s\", model_path)\n input_names = list(self.input_names.keys())\n initialized_tables = {}\n outputs = self.output_names\n tflite_path = None\n to_rename = None\n if self.model_type in [\"checkpoint\"]:\n graph_def, input_names, outputs = tf_loader.from_checkpoint(model_path, input_names, outputs)\n elif self.model_type in [\"saved_model\"]:\n loaded = tf_loader.from_saved_model(model_path, None, None, self.tag, self.signatures,\n self.concrete_function, self.large_model,\n return_concrete_func=not self.run_tf_frozen,\n return_initialized_tables=True, return_tensors_to_rename=True)\n if not self.run_tf_frozen:\n # Must maintain ref to imported since concrete_func uses weak refs\n # pylint: disable=unused-variable\n graph_def, input_names, outputs, concrete_func, imported, initialized_tables, to_rename = loaded\n else:\n graph_def, input_names, outputs, initialized_tables, to_rename = loaded\n elif self.model_type in [\"keras\"]:\n graph_def, input_names, outputs = tf_loader.from_keras(model_path, input_names, outputs)\n elif self.model_type in [\"tflite\"]:\n tflite_path = model_path\n graph_def = None\n else:\n graph_def, input_names, outputs = tf_loader.from_graphdef(model_path, input_names, outputs)\n\n if utils.is_debug_mode():\n utils.save_protobuf(os.path.join(TEMP_DIR, name + \"_after_tf_optimize.pb\"), graph_def)\n\n if tflite_path is not None:\n inputs = {}\n for k in input_names:\n v = self.input_names[k]\n inputs[k] = self.make_input(v)\n\n interpreter = tf.lite.Interpreter(tflite_path)\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n input_name_to_index = {n['name'].split(':')[0]: n['index'] for n in input_details}\n for k, v in inputs.items():\n interpreter.resize_tensor_input(input_name_to_index[k], v.shape)\n interpreter.allocate_tensors()\n def run_tflite():\n for k, v in inputs.items():\n interpreter.set_tensor(input_name_to_index[k], v)\n interpreter.invoke()\n result = [interpreter.get_tensor(output['index']) for output in output_details]\n return result\n tf_results = run_tflite()\n if self.perf:\n logger.info(\"Running TFLite perf\")\n start = time.time()\n for _ in range(PERFITER):\n _ = run_tflite()\n self.tf_runtime = time.time() - start\n logger.info(\"TFLite OK\")\n\n if not self.run_tf_frozen:\n inputs = {}\n for k in input_names:\n v = self.input_names[k]\n inputs[k.split(\":\")[0]] = tf.constant(self.make_input(v))\n tf_func = tf.function(concrete_func)\n logger.info(\"Running TF\")\n tf_results_d = tf_func(**inputs)\n # If there is only a single output a dict might not be returned\n if isinstance(tf_results_d, tf.Tensor):\n tf_results = [tf_results_d]\n else:\n tf_results = [tf_results_d[k] for k in sorted(tf_results_d.keys())]\n tf_results = [tf_res.numpy() for tf_res in tf_results]\n if self.perf:\n logger.info(\"Running TF perf\")\n start = time.time()\n for _ in range(PERFITER):\n _ = concrete_func(**inputs)\n self.tf_runtime = time.time() - start\n logger.info(\"TensorFlow OK\")\n\n shape_override = {}\n const_node_values = None\n tf_graph = None\n\n if graph_def is not None:\n inputs = {}\n tf_reset_default_graph()\n\n with tf.Graph().as_default() as tf_graph:\n from tf2onnxnightly.tf_utils import compress_graph_def\n if self.large_model:\n const_node_values = compress_graph_def(graph_def)\n tf.import_graph_def(graph_def, name='')\n\n with tf_session(graph=tf_graph) as sess:\n # create the input data\n for k in input_names:\n v = self.input_names[k]\n t = sess.graph.get_tensor_by_name(k)\n expected_dtype = tf.as_dtype(t.dtype).name\n if isinstance(v, six.text_type) and v.startswith(\"np.\"):\n np_value = eval(v) # pylint: disable=eval-used\n if expected_dtype != np_value.dtype:\n logger.warning(\"dtype mismatch for input %s: expected=%s, actual=%s\", k, expected_dtype,\n np_value.dtype)\n inputs[k] = np_value.astype(expected_dtype)\n else:\n if expected_dtype == \"string\":\n inputs[k] = self.make_input(v).astype(np.str).astype(np.object)\n else:\n inputs[k] = self.make_input(v).astype(expected_dtype)\n\n if self.force_input_shape:\n for k, v in inputs.items():\n shape_override[k] = list(v.shape)\n\n # run the model with tensorflow\n if self.skip_tensorflow:\n logger.info(\"TensorFlow SKIPPED\")\n elif self.run_tf_frozen:\n tf_results = self.run_tensorflow(sess, inputs)\n logger.info(\"TensorFlow OK\")\n tf_graph = sess.graph\n\n\n model_proto = None\n if self.skip_conversion:\n if self.large_model:\n external_tensor_storage = ExternalTensorStorage()\n model_proto = utils.model_proto_from_zip(self.converted_model, external_tensor_storage)\n else:\n external_tensor_storage = None\n model_proto = utils.model_proto_from_file(self.converted_model)\n logger.info(\"ONNX loaded from file\")\n else:\n try:\n # convert model to onnx\n onnx_graph = self.to_onnx(tf_graph, opset=opset, extra_opset=extra_opset,\n shape_override=shape_override, input_names=inputs.keys(),\n const_node_values=const_node_values,\n initialized_tables=initialized_tables, tflite_path=tflite_path,\n tensors_to_rename=to_rename)\n onnx_graph = optimizer.optimize_graph(onnx_graph)\n print(\"ONNX\", onnx_graph.dump_node_statistics())\n external_tensor_storage = ExternalTensorStorage() if self.large_model else None\n model_proto = onnx_graph.make_model(\"converted from tf2onnx\",\n external_tensor_storage=external_tensor_storage)\n logger.info(\"To_ONNX, OK\")\n if onnx_file:\n self.create_onnx_file(name, model_proto, inputs, onnx_file, external_tensor_storage)\n if self.converted_model:\n if self.large_model:\n utils.save_onnx_zip(self.converted_model, model_proto, external_tensor_storage)\n else:\n utils.save_protobuf(self.converted_model, model_proto)\n logger.info(\"Created %s\", self.converted_model)\n\n except Exception:\n logger.error(\"To_ONNX FAIL\", exc_info=1)\n return False\n\n try:\n onnx_results = None\n if backend == \"caffe2\":\n onnx_results = self.run_caffe2(name, model_proto, inputs)\n elif backend == \"onnxruntime\":\n if to_rename is None:\n struc_outputs = self.output_names\n else:\n struc_outputs = [to_rename.get(k, k) for k in self.output_names]\n onnx_results = self.run_onnxruntime(name, model_proto, inputs, struc_outputs, external_tensor_storage)\n else:\n raise ValueError(\"unknown backend\")\n logger.info(\"Run_ONNX OK\")\n\n try:\n if self.skip_tensorflow:\n logger.info(\"Results: skipped tensorflow\")\n else:\n if self.check_only_shape:\n for tf_res, onnx_res in zip(tf_results, onnx_results):\n np.testing.assert_array_equal(tf_res.shape, onnx_res.shape)\n else:\n for tf_res, onnx_res in zip(tf_results, onnx_results):\n good_cnt = np.count_nonzero(np.isclose(tf_res, onnx_res, rtol=self.rtol, atol=self.atol))\n bad_cnt = tf_res.size - good_cnt\n if bad_cnt > self.ptol / 100 * tf_res.size:\n # Prints a nice error message with stats\n np.testing.assert_allclose(tf_res, onnx_res, rtol=self.rtol, atol=self.atol)\n logger.info(\"Results: OK\")\n return True\n except Exception:\n logger.error(\"Results\", exc_info=1)\n\n except Exception:\n logger.error(\"Run_ONNX FAIL\", exc_info=1)\n\n return False\n\n def check_opset_constraints(self, opset, extra_opset=None):\n \"\"\" Return (condition, reason) tuple, condition is True if constraints are met. \"\"\"\n if not self.opset_constraints:\n return True, None\n\n opsets = {\"onnx\": opset}\n if extra_opset:\n for e in extra_opset:\n opsets[e.domain] = e.version\n\n for constraint in self.opset_constraints:\n domain = constraint.domain\n opset_version = opsets.get(domain)\n if not opset_version:\n return False, \"conversion requires opset {}\".format(domain)\n\n if constraint.min_version and opset_version < constraint.min_version:\n reason = \"conversion requires opset {} >= {}\".format(domain, constraint.min_version)\n return False, reason\n\n if constraint.max_version and opset_version > constraint.max_version:\n reason = \"conversion requires opset {} <= {}\".format(domain, constraint.max_version)\n return False, reason\n\n if constraint.excluded_version:\n if utils.is_list_or_tuple(constraint.excluded_version):\n skip = opset_version in constraint.excluded_version\n else:\n skip = opset_version == constraint.excluded_version\n if skip:\n reason = \"conversion requires opset {} != {}\".format(domain, constraint.excluded_version)\n return False, reason\n\n return True, None\n\n\ndef get_args():\n \"\"\"Parse commandline.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--cache\", default=os.path.join(tempfile.gettempdir(), 'pre-trained'),\n help=\"pre-trained models cache dir\")\n parser.add_argument(\"--config\", default=\"tests/run_pretrained_models.yaml\", help=\"yaml config to use\")\n parser.add_argument(\"--tests\", help=\"tests to run\")\n parser.add_argument(\"--target\", default=\"\", help=\"target platform\")\n parser.add_argument(\"--backend\", default=\"onnxruntime\",\n choices=[\"caffe2\", \"onnxruntime\"], help=\"backend to use\")\n parser.add_argument(\"--opset\", type=int, default=None, help=\"opset to use\")\n parser.add_argument(\"--extra_opset\", default=None,\n help=\"extra opset with format like domain:version, e.g. com.microsoft:1\")\n parser.add_argument(\"--skip_tf_tests\", help=\"skip non-tflite tests\", default=\"False\")\n parser.add_argument(\"--skip_tflite_tests\", help=\"skip tflite tests\", default=\"False\")\n parser.add_argument(\"--verbose\", \"-v\", help=\"verbose output, option is additive\", action=\"count\")\n parser.add_argument(\"--debug\", help=\"debug mode\", action=\"store_true\")\n parser.add_argument(\"--list\", help=\"list tests\", action=\"store_true\")\n parser.add_argument(\"--onnx-file\", help=\"create onnx file in directory\")\n parser.add_argument(\"--perf\", help=\"capture performance numbers\")\n parser.add_argument(\"--perfiter\", type=int, default=PERFITER, help=\"number of inferences for perf testing\")\n parser.add_argument(\"--fold_const\", help=\"enable tf constant_folding transformation before conversion\",\n action=\"store_true\")\n parser.add_argument(\"--include-disabled\", help=\"include disabled tests\", action=\"store_true\")\n args = parser.parse_args()\n\n args.target = args.target.split(\",\")\n args.skip_tf_tests = args.skip_tf_tests.upper() == \"TRUE\"\n args.skip_tflite_tests = args.skip_tflite_tests.upper() == \"TRUE\"\n if args.extra_opset:\n tokens = args.extra_opset.split(':')\n if len(tokens) != 2:\n raise ValueError(\"invalid extra_opset argument\")\n args.extra_opset = [utils.make_opsetid(tokens[0], int(tokens[1]))]\n return args\n\n\ndef load_tests_from_yaml(path):\n \"\"\"Create test class from yaml file.\"\"\"\n path = os.path.abspath(path)\n base_dir = os.path.dirname(path)\n\n tests = {}\n config = yaml.safe_load(open(path, 'r').read())\n for name, settings in config.items():\n if name in tests:\n raise ValueError(\"Found duplicated test: {}\".format(name))\n\n # parse model and url, non-absolute local path is relative to yaml directory\n model = settings.get(\"model\")\n url = settings.get(\"url\")\n if not url and not os.path.isabs(model):\n model = os.path.join(base_dir, model)\n\n # parse input_get\n input_func = settings.get(\"input_get\")\n input_func = _INPUT_FUNC_MAPPING[input_func]\n\n # parse inputs, non-absolute npy file path for np.load is relative to yaml directory\n inputs = settings.get(\"inputs\")\n for k, v in list(inputs.items()):\n if isinstance(v, str):\n # assume at most 1 match\n matches = re.findall(r\"np\\.load\\((r?['\\\"].*?['\\\"])\", v)\n if matches:\n npy_path = matches[0].lstrip('r').strip(\"'\").strip('\"')\n if not os.path.isabs(npy_path):\n abs_npy_path = os.path.join(base_dir, npy_path)\n inputs[k] = v.replace(matches[0], \"r'{}'\".format(abs_npy_path))\n\n # parse opset_constraints\n opset_constraints = []\n section = settings.get(\"opset_constraints\")\n if section:\n for k, v in section.items():\n c = OpsetConstraint(k, min_version=v.get(\"min\"), max_version=v.get(\"max\"),\n excluded_version=v.get(\"excluded\"))\n opset_constraints.append(c)\n\n kwargs = {}\n for kw in [\"rtol\", \"atol\", \"ptol\", \"disabled\", \"check_only_shape\", \"model_type\", \"concrete_function\",\n \"skip_tensorflow\", \"force_input_shape\", \"tf_min_version\", \"tag\", \"skip_conversion\",\n \"converted_model\", \"signature_def\", \"large_model\", \"structured_outputs\", \"run_tf_frozen\",\n \"use_custom_ops\", \"dequantize\"]:\n if settings.get(kw) is not None:\n kwargs[kw] = settings[kw]\n\n test = Test(url, model, input_func, inputs, settings.get(\"outputs\"),\n opset_constraints=opset_constraints, **kwargs)\n tests[name] = test\n return tests\n\n\ndef main():\n global PERFITER\n args = get_args()\n logging.basicConfig(level=logging.get_verbosity_level(args.verbose))\n if args.debug:\n utils.set_debug_mode(True)\n\n Test.cache_dir = args.cache\n Test.target = args.target\n tests = load_tests_from_yaml(args.config)\n if args.list:\n logger.info(sorted(tests.keys()))\n return 0\n if args.tests:\n test_keys = args.tests.split(\",\")\n else:\n test_keys = list(tests.keys())\n\n failed = 0\n count = 0\n PERFITER = args.perfiter\n for test in test_keys:\n logger.info(\"===================================\")\n\n t = tests[test]\n if args.tests is None:\n if t.disabled and not args.include_disabled:\n logger.info(\"Skip %s: disabled\", test)\n continue\n\n if args.skip_tflite_tests and t.model_type == \"tflite\":\n logger.info(\"Skip %s: tflite test\", test)\n continue\n if args.skip_tf_tests and t.model_type != \"tflite\":\n logger.info(\"Skip %s: not tflite test\", test)\n continue\n\n condition, reason = t.check_opset_constraints(args.opset, args.extra_opset)\n if not condition:\n logger.info(\"Skip %s: %s\", test, reason)\n continue\n\n if t.tf_min_version:\n if tf_utils.get_tf_version() < LooseVersion(str(t.tf_min_version)):\n logger.info(\"Skip %s: %s %s\", test, \"Min TF version needed:\", t.tf_min_version)\n continue\n\n count += 1\n try:\n logger.info(\"Running %s\", test)\n ret = t.run_test(test, backend=args.backend, onnx_file=args.onnx_file,\n opset=args.opset, extra_opset=args.extra_opset, perf=args.perf,\n fold_const=args.fold_const)\n except Exception:\n logger.error(\"Failed to run %s\", test, exc_info=1)\n ret = None\n finally:\n if not utils.is_debug_mode():\n utils.delete_directory(TEMP_DIR)\n if not ret:\n failed += 1\n\n logger.info(\"===================================\")\n logger.info(\"RESULT: %s failed of %s, backend=%s\", failed, count, args.backend)\n\n if args.perf:\n with open(args.perf, \"w\") as f:\n f.write(\"test,tensorflow,onnx\\n\")\n for test in test_keys:\n t = tests[test]\n if t.perf:\n # Report perf in ms per inference\n f.write(\"{},{},{}\\n\".format(test, t.tf_runtime * 1000 / PERFITER, t.onnx_runtime * 1000 / PERFITER))\n return failed\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "# SPDX-License-Identifier: Apache-2.0\n\n\n\"\"\"\ntf2onnx.rewriter - rewrite tensorflow subgraph to onnx condv2 op with pad\n\"\"\"\n\nimport numpy as np\n\nfrom tf2onnxnightly import handler, logging\nfrom tf2onnxnightly.graph_matcher import OpTypePattern, GraphMatcher\n\nlogger = logging.getLogger(__name__)\n\n\n# pylint: disable=missing-docstring\n\n\ndef rewrite_conv2d_with_pad(g, ops):\n pattern = \\\n OpTypePattern(\"Conv2D\", name=\"conv\", inputs=[\n OpTypePattern(\"Pad\", name=\"pad\"),\n OpTypePattern(\"*\")\n ])\n matcher = GraphMatcher(pattern)\n match_results = list(matcher.match_ops(ops))\n for match in match_results:\n conv = match.get_op(\"conv\")\n pad = match.get_op(\"pad\")\n paddings = pad.inputs[1]\n\n if not paddings.is_const():\n continue\n mode = pad.get_attr(\"mode\")\n if mode:\n mode = mode.s.decode(\"utf-8\").lower()\n if mode not in [None, \"constant\"] or len(pad.input) >= 3:\n continue\n # Conv2D already has a pad\n if conv.get_attr(\"padding\").s.decode(\"utf-8\") == \"SAME\":\n continue\n\n logger.debug(\"merge pad [%s] into conv [%s]\", pad.name, conv.name)\n paddings_val = np.array(paddings.get_tensor_value())\n # can't pad on batch or channel dimensions\n data_format = conv.get_attr(\"data_format\").s.decode(\"utf-8\")\n if data_format == \"NHWC\":\n if np.any(paddings_val[0]) or np.any(paddings_val[3]):\n continue\n paddings_val = paddings_val[1:3]\n else:\n if np.any(paddings_val[0]) or np.any(paddings_val[1]):\n continue\n paddings_val = paddings_val[2:4]\n\n paddings_val = paddings_val.transpose().flatten()\n g.replace_input(conv, conv.input[0], pad.input[0], 0)\n # convert Conv2D\n conv.type = \"Conv2D\"\n func, _ = handler.tf_op.find_effective_op(\"Conv2D\")\n func(g, conv)\n conv.skip_conversion = True\n conv.set_attr(\"auto_pad\", \"NOTSET\")\n conv.set_attr(\"pads\", paddings_val)\n return ops\n", "# SPDX-License-Identifier: Apache-2.0\n\n\n\"\"\"\npython -m tf2onnx.convert : api and commandline tool to convert a tensorflow model to onnx\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n# pylint: disable=unused-argument,unused-import,ungrouped-imports,wrong-import-position\n\nimport argparse\nimport os\nimport sys\nfrom distutils.version import LooseVersion\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = \"3\"\n\nimport tensorflow as tf\n\nfrom tf2onnxnightly.tfonnx import process_tf_graph\nfrom tf2onnxnightly import constants, logging, utils, optimizer\nfrom tf2onnxnightly import tf_loader\nfrom tf2onnxnightly.graph import ExternalTensorStorage\nfrom tf2onnxnightly.tf_utils import compress_graph_def\n\n\n\n# pylint: disable=unused-argument\n\n_HELP_TEXT = \"\"\"\nUsage Examples:\n\npython -m tf2onnx.convert --saved-model saved_model_dir --output model.onnx\npython -m tf2onnx.convert --input frozen_graph.pb --inputs X:0 --outputs output:0 --output model.onnx\npython -m tf2onnx.convert --checkpoint checkpoint.meta --inputs X:0 --outputs output:0 --output model.onnx\n\nFor help and additional information see:\n https://github.com/onnx/tensorflow-onnx\n\nIf you run into issues, open an issue here:\n https://github.com/onnx/tensorflow-onnx/issues\n\"\"\"\n\n\ndef get_args():\n \"\"\"Parse commandline.\"\"\"\n parser = argparse.ArgumentParser(description=\"Convert tensorflow graphs to ONNX.\",\n formatter_class=argparse.RawDescriptionHelpFormatter, epilog=_HELP_TEXT)\n parser.add_argument(\"--input\", help=\"input from graphdef\")\n parser.add_argument(\"--graphdef\", help=\"input from graphdef\")\n parser.add_argument(\"--saved-model\", help=\"input from saved model\")\n parser.add_argument(\"--tag\", help=\"tag to use for saved_model\")\n parser.add_argument(\"--signature_def\", help=\"signature_def from saved_model to use\")\n parser.add_argument(\"--concrete_function\", type=int, default=None,\n help=\"For TF2.x saved_model, index of func signature in __call__ (--signature_def is ignored)\")\n parser.add_argument(\"--checkpoint\", help=\"input from checkpoint\")\n parser.add_argument(\"--keras\", help=\"input from keras model\")\n parser.add_argument(\"--tflite\", help=\"input from tflite model\")\n parser.add_argument(\"--large_model\", help=\"use the large model format (for models > 2GB)\", action=\"store_true\")\n parser.add_argument(\"--output\", help=\"output model file\")\n parser.add_argument(\"--inputs\", help=\"model input_names (optional for saved_model, keras, and tflite)\")\n parser.add_argument(\"--outputs\", help=\"model output_names (optional for saved_model, keras, and tflite)\")\n parser.add_argument(\"--ignore_default\", help=\"comma-separated list of names of PlaceholderWithDefault \"\n \"ops to change into Placeholder ops\")\n parser.add_argument(\"--use_default\", help=\"comma-separated list of names of PlaceholderWithDefault ops to \"\n \"change into Identity ops using their default value\")\n parser.add_argument(\"--rename-inputs\", help=\"input names to use in final model (optional)\")\n parser.add_argument(\"--rename-outputs\", help=\"output names to use in final model (optional)\")\n parser.add_argument(\"--opset\", type=int, default=None, help=\"opset version to use for onnx domain\")\n parser.add_argument(\"--dequantize\", help=\"Remove quantization from model. Only supported for tflite currently.\",\n action=\"store_true\")\n parser.add_argument(\"--custom-ops\", help=\"comma-separated map of custom ops to domains in format OpName:domain\")\n parser.add_argument(\"--extra_opset\", default=None,\n help=\"extra opset with format like domain:version, e.g. com.microsoft:1\")\n parser.add_argument(\"--target\", default=\",\".join(constants.DEFAULT_TARGET), choices=constants.POSSIBLE_TARGETS,\n help=\"target platform\")\n parser.add_argument(\"--continue_on_error\", help=\"continue_on_error\", action=\"store_true\")\n parser.add_argument(\"--verbose\", \"-v\", help=\"verbose output, option is additive\", action=\"count\")\n parser.add_argument(\"--debug\", help=\"debug mode\", action=\"store_true\")\n parser.add_argument(\"--output_frozen_graph\", help=\"output frozen tf graph to file\")\n parser.add_argument(\"--fold_const\", help=\"Deprecated. Constant folding is always enabled.\",\n action=\"store_true\")\n # experimental\n parser.add_argument(\"--inputs-as-nchw\", help=\"transpose inputs as from nhwc to nchw\")\n args = parser.parse_args()\n\n args.shape_override = None\n if args.input:\n # for backward compativility\n args.graphdef = args.input\n if args.graphdef or args.checkpoint:\n if not args.inputs or not args.outputs:\n parser.error(\"graphdef and checkpoint models need to provide inputs and outputs\")\n if not any([args.graphdef, args.checkpoint, args.saved_model, args.keras, args.tflite]):\n parser.print_help()\n sys.exit(1)\n if args.inputs:\n args.inputs, args.shape_override = utils.split_nodename_and_shape(args.inputs)\n if args.outputs:\n args.outputs = args.outputs.split(\",\")\n if args.ignore_default:\n args.ignore_default = args.ignore_default.split(\",\")\n if args.use_default:\n args.use_default = args.use_default.split(\",\")\n if args.rename_outputs:\n args.rename_outputs = args.rename_outputs.split(\",\")\n if args.rename_inputs:\n args.rename_inputs = args.rename_inputs.split(\",\")\n if args.inputs_as_nchw:\n args.inputs_as_nchw = args.inputs_as_nchw.split(\",\")\n if args.target:\n args.target = args.target.split(\",\")\n if args.signature_def:\n args.signature_def = [args.signature_def]\n if args.dequantize:\n if not args.tflite:\n parser.error(\"dequantize flag is currently only supported for tflite\")\n if args.extra_opset:\n tokens = args.extra_opset.split(':')\n if len(tokens) != 2:\n parser.error(\"invalid extra_opset argument\")\n args.extra_opset = [utils.make_opsetid(tokens[0], int(tokens[1]))]\n\n return args\n\n\ndef make_default_custom_op_handler(domain):\n def default_custom_op_handler(ctx, node, name, args):\n node.domain = domain\n return node\n return default_custom_op_handler\n\n\ndef _convert_common(frozen_graph, name=\"unknown\", large_model=False, output_path=None,\n output_frozen_graph=None, **kwargs):\n \"\"\"Common processing for conversion.\"\"\"\n\n model_proto = None\n external_tensor_storage = None\n const_node_values = None\n\n with tf.Graph().as_default() as tf_graph:\n if large_model:\n const_node_values = compress_graph_def(frozen_graph)\n external_tensor_storage = ExternalTensorStorage()\n if output_frozen_graph:\n utils.save_protobuf(output_frozen_graph, frozen_graph)\n if not kwargs.get(\"tflite_path\"):\n tf.import_graph_def(frozen_graph, name='')\n g = process_tf_graph(tf_graph, const_node_values=const_node_values, **kwargs)\n onnx_graph = optimizer.optimize_graph(g)\n model_proto = onnx_graph.make_model(\"converted from {}\".format(name),\n external_tensor_storage=external_tensor_storage)\n if output_path:\n if large_model:\n utils.save_onnx_zip(output_path, model_proto, external_tensor_storage)\n else:\n utils.save_protobuf(output_path, model_proto)\n\n return model_proto, external_tensor_storage\n\n\ndef main():\n args = get_args()\n logging.basicConfig(level=logging.get_verbosity_level(args.verbose))\n if args.debug:\n utils.set_debug_mode(True)\n\n logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)\n\n extra_opset = args.extra_opset or []\n tflite_path = None\n custom_ops = {}\n initialized_tables = None\n tensors_to_rename = {}\n if args.custom_ops:\n using_tf_opset = False\n for op in args.custom_ops.split(\",\"):\n if \":\" in op:\n op, domain = op.split(\":\")\n else:\n # default custom ops for tensorflow-onnx are in the \"tf\" namespace\n using_tf_opset = True\n domain = constants.TENSORFLOW_OPSET.domain\n custom_ops[op] = (make_default_custom_op_handler(domain), [])\n if using_tf_opset:\n extra_opset.append(constants.TENSORFLOW_OPSET)\n\n if any(opset.domain == constants.CONTRIB_OPS_DOMAIN for opset in extra_opset):\n try:\n import tensorflow_text # pylint: disable=import-outside-toplevel\n except ModuleNotFoundError:\n logger.warning(\"tensorflow_text not installed. Model will fail to load if tensorflow_text ops are used.\")\n\n # get the frozen tensorflow model from graphdef, checkpoint or saved_model.\n graph_def = None\n inputs = None\n outputs = None\n model_path = None\n\n if args.graphdef:\n graph_def, inputs, outputs = tf_loader.from_graphdef(args.graphdef, args.inputs, args.outputs)\n model_path = args.graphdef\n if args.checkpoint:\n graph_def, inputs, outputs = tf_loader.from_checkpoint(args.checkpoint, args.inputs, args.outputs)\n model_path = args.checkpoint\n if args.saved_model:\n graph_def, inputs, outputs, initialized_tables, tensors_to_rename = tf_loader.from_saved_model(\n args.saved_model, args.inputs, args.outputs, args.tag, args.signature_def, args.concrete_function,\n args.large_model, return_initialized_tables=True, return_tensors_to_rename=True)\n model_path = args.saved_model\n if args.keras:\n graph_def, inputs, outputs = tf_loader.from_keras(\n args.keras, args.inputs, args.outputs)\n model_path = args.keras\n if args.tflite:\n tflite_path = args.tflite\n model_path = tflite_path\n\n if args.verbose:\n logger.info(\"inputs: %s\", inputs)\n logger.info(\"outputs: %s\", outputs)\n\n if args.rename_inputs:\n tensors_to_rename.update(zip(inputs, args.rename_inputs))\n if args.rename_outputs:\n tensors_to_rename.update(zip(outputs, args.rename_outputs))\n\n with tf.device(\"/cpu:0\"):\n model_proto, _ = _convert_common(\n graph_def,\n name=model_path,\n continue_on_error=args.continue_on_error,\n target=args.target,\n opset=args.opset,\n custom_op_handlers=custom_ops,\n extra_opset=extra_opset,\n shape_override=args.shape_override,\n input_names=inputs,\n output_names=outputs,\n inputs_as_nchw=args.inputs_as_nchw,\n large_model=args.large_model,\n tensors_to_rename=tensors_to_rename,\n ignore_default=args.ignore_default,\n use_default=args.use_default,\n tflite_path=tflite_path,\n dequantize=args.dequantize,\n initialized_tables=initialized_tables,\n output_frozen_graph=args.output_frozen_graph,\n output_path=args.output)\n\n\n # write onnx graph\n logger.info(\"\")\n logger.info(\"Successfully converted TensorFlow model %s to ONNX\", model_path)\n\n logger.info(\"Model inputs: %s\", [n.name for n in model_proto.graph.input])\n logger.info(\"Model outputs: %s\", [n.name for n in model_proto.graph.output])\n if args.output:\n if args.large_model:\n logger.info(\"Zipped ONNX model is saved at %s. Unzip before opening in onnxruntime.\", args.output)\n else:\n logger.info(\"ONNX model is saved at %s\", args.output)\n else:\n logger.info(\"To export ONNX model to file, please run with `--output` option\")\n\n\ndef tensor_names_from_structed(concrete_func, input_names, output_names):\n tensors_to_rename = {}\n args, kwargs = concrete_func.structured_input_signature\n structured_inputs = [t.name for t in args if isinstance(t, tf.TensorSpec)] + sorted(kwargs.keys())\n tensors_to_rename.update(zip(input_names, structured_inputs))\n if isinstance(concrete_func.structured_outputs, dict):\n for k, v in concrete_func.structured_outputs.items():\n tensors_to_rename[v.name] = k\n return tensors_to_rename\n\n\ndef from_keras(model, input_signature=None, opset=None, custom_ops=None, custom_op_handlers=None,\n custom_rewriter=None, inputs_as_nchw=None, extra_opset=None, shape_override=None,\n target=None, large_model=False, output_path=None):\n \"\"\"Returns a ONNX model_proto for a tf.keras model.\n\n Args:\n model: the tf.keras model we want to convert\n input_signature: a tf.TensorSpec or a numpy array defining the shape/dtype of the input\n opset: the opset to be used for the ONNX model, default is the latest\n target: list of workarounds applied to help certain platforms\n custom_op_handlers: dictionary of custom ops handlers\n custom_rewriter: list of custom graph rewriters\n extra_opset: list of extra opset's, for example the opset's used by custom ops\n shape_override: dict with inputs that override the shapes given by tensorflow\n inputs_as_nchw: transpose inputs in list from nchw to nhwc\n large_model: use the ONNX external tensor storage format\n output_path: save model to output_path\n\n Returns:\n An ONNX model_proto and an external_tensor_storage dict.\n \"\"\"\n if LooseVersion(tf.__version__) < \"2.0\":\n raise NotImplementedError(\"from_keras requires tf-1.15 or newer\")\n\n from tensorflow.python.keras.saving import saving_utils as _saving_utils # pylint: disable=import-outside-toplevel\n\n # let tensorflow do the checking if model is a valid model\n function = _saving_utils.trace_model_call(model, input_signature)\n concrete_func = function.get_concrete_function(*input_signature)\n\n input_names = [input_tensor.name for input_tensor in concrete_func.inputs\n if input_tensor.dtype != tf.dtypes.resource]\n output_names = [output_tensor.name for output_tensor in concrete_func.outputs\n if output_tensor.dtype != tf.dtypes.resource]\n\n initialized_tables = None\n tensors_to_rename = tensor_names_from_structed(concrete_func, input_names, output_names)\n\n with tf.device(\"/cpu:0\"):\n frozen_graph = tf_loader.from_function(concrete_func, input_names, output_names, large_model=large_model)\n model_proto, external_tensor_storage = _convert_common(\n frozen_graph,\n name=model.name,\n continue_on_error=True,\n target=None,\n opset=opset,\n custom_op_handlers=custom_ops,\n extra_opset=extra_opset,\n shape_override=shape_override,\n input_names=input_names,\n output_names=output_names,\n inputs_as_nchw=inputs_as_nchw,\n large_model=large_model,\n tensors_to_rename=tensors_to_rename,\n initialized_tables=initialized_tables,\n output_path=output_path)\n\n return model_proto, external_tensor_storage\n\n\ndef from_function(function, input_signature=None, opset=None, custom_ops=None, custom_op_handlers=None,\n custom_rewriter=None, inputs_as_nchw=None, extra_opset=None, shape_override=None, target=None,\n large_model=False, output_path=None):\n \"\"\"Returns a ONNX model_proto for a tf.function.\n\n Args:\n function: the tf.function we want to convert\n input_signature: a tf.TensorSpec or a numpy array defining the shape/dtype of the input\n opset: the opset to be used for the ONNX model, default is the latest\n target: list of workarounds applied to help certain platforms\n custom_op_handlers: dictionary of custom ops handlers\n custom_rewriter: list of custom graph rewriters\n extra_opset: list of extra opset's, for example the opset's used by custom ops\n shape_override: dict with inputs that override the shapes given by tensorflow\n inputs_as_nchw: transpose inputs in list from nchw to nhwc\n large_model: use the ONNX external tensor storage format\n output_path: save model to output_path\n\n Returns:\n An ONNX model_proto and an external_tensor_storage dict.\n \"\"\"\n if LooseVersion(tf.__version__) < \"2.0\":\n raise NotImplementedError(\"from_keras requires tf-1.15 or newer\")\n\n concrete_func = function.get_concrete_function(*input_signature)\n\n input_names = [input_tensor.name for input_tensor in concrete_func.inputs\n if input_tensor.dtype != tf.dtypes.resource]\n output_names = [output_tensor.name for output_tensor in concrete_func.outputs\n if output_tensor.dtype != tf.dtypes.resource]\n\n initialized_tables = None\n tensors_to_rename = tensor_names_from_structed(concrete_func, input_names, output_names)\n\n with tf.device(\"/cpu:0\"):\n frozen_graph = tf_loader.from_function(concrete_func, input_names, output_names, large_model=large_model)\n model_proto, external_tensor_storage = _convert_common(\n frozen_graph,\n name=concrete_func.name,\n continue_on_error=True,\n target=None,\n opset=opset,\n custom_op_handlers=custom_ops,\n extra_opset=extra_opset,\n shape_override=shape_override,\n input_names=input_names,\n output_names=output_names,\n inputs_as_nchw=inputs_as_nchw,\n large_model=large_model,\n tensors_to_rename=tensors_to_rename,\n initialized_tables=initialized_tables,\n output_path=output_path)\n\n return model_proto, external_tensor_storage\n\n\ndef from_graph_def(graph_def, name=None, input_names=None, output_names=None, opset=None, custom_ops=None,\n custom_op_handlers=None, custom_rewriter=None, inputs_as_nchw=None, extra_opset=None,\n shape_override=None, target=None, large_model=False, tensors_to_rename=None, output_path=None):\n \"\"\"Returns a ONNX model_proto for a tensorflow graphdef.\n\n Args:\n graph_def: the graphdef we want to convert\n input_names: list of input names\n output_names: list of output names\n name: A name for the graph\n opset: the opset to be used for the ONNX model, default is the latest\n target: list of workarounds applied to help certain platforms\n custom_op_handlers: dictionary of custom ops handlers\n custom_rewriter: list of custom graph rewriters\n extra_opset: list of extra opset's, for example the opset's used by custom ops\n shape_override: dict with inputs that override the shapes given by tensorflow\n inputs_as_nchw: transpose inputs in list from nchw to nhwc\n large_model: use the ONNX external tensor storage format\n output_path: save model to output_path\n\n Returns:\n An ONNX model_proto and an external_tensor_storage dict.\n \"\"\"\n if not input_names:\n raise ValueError(\"input_names needs to be provided\")\n if not output_names:\n raise ValueError(\"output_names needs to be provided\")\n if not name:\n name = \"unknown\"\n initialized_tables = None\n\n with tf.device(\"/cpu:0\"):\n with tf.Graph().as_default() as tf_graph:\n with tf_loader.tf_session(graph=tf_graph) as sess:\n tf.import_graph_def(graph_def, name='')\n frozen_graph = tf_loader.freeze_session(sess, input_names=input_names, output_names=output_names)\n input_names = tf_loader.inputs_without_resource(sess, input_names)\n frozen_graph = tf_loader.tf_optimize(input_names, output_names, graph_def)\n\n model_proto, external_tensor_storage = _convert_common(\n frozen_graph,\n name=name,\n continue_on_error=True,\n target=None,\n opset=opset,\n custom_op_handlers=custom_ops,\n extra_opset=extra_opset,\n shape_override=shape_override,\n input_names=input_names,\n output_names=output_names,\n inputs_as_nchw=inputs_as_nchw,\n large_model=large_model,\n tensors_to_rename=tensors_to_rename,\n initialized_tables=initialized_tables,\n output_path=output_path)\n\n return model_proto, external_tensor_storage\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "tensorflow.cond", "tensorflow.nn.dynamic_rnn", "tensorflow.concat", "tensorflow.nn.rnn_cell.LSTMStateTuple", "numpy.random.random_sample", "tensorflow.map_fn", "numpy.random.randn", "tensorflow.graph_util.convert_variables_to_constants", "tensorflow.import_graph_def", "tensorflow.while_loop", "numpy.stack", "tensorflow.add", "tensorflow.tile", "tensorflow.contrib.seq2seq.BasicDecoder", "tensorflow.less", "tensorflow.TensorArray", "tensorflow.identity", "tensorflow.layers.Dense", "tensorflow.placeholder", "tensorflow.contrib.seq2seq.AttentionWrapper", "tensorflow.compat.v1.disable_eager_execution", "numpy.array", "tensorflow.contrib.seq2seq.dynamic_decode", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.contrib.seq2seq.BahdanauAttention", "numpy.ones", "tensorflow.python.ops.variables.global_variables_initializer" ], [ "tensorflow.random_uniform_initializer", "tensorflow.identity", "numpy.stack", "numpy.random.randn", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.ops.init_ops.constant_initializer", "numpy.array" ], [ "tensorflow.matmul", "tensorflow.multiply", "tensorflow.constant", "tensorflow.import_graph_def", "tensorflow.identity", "tensorflow.compat.v1.lite.TFLiteConverter.from_session", "tensorflow.TensorSpec" ], [ "tensorflow.Graph", "tensorflow.import_graph_def", "numpy.product", "numpy.linspace", "numpy.random.seed", "tensorflow.as_dtype", "tensorflow.lite.Interpreter", "numpy.isclose", "numpy.stack", "numpy.ones", "numpy.testing.assert_array_equal", "tensorflow.function", "numpy.prod", "numpy.random.sample", "numpy.array", "numpy.zeros", "numpy.testing.assert_allclose", "numpy.random.randint" ], [ "numpy.any" ], [ "tensorflow.Graph", "tensorflow.import_graph_def", "tensorflow.device", "tensorflow.python.keras.saving.saving_utils.trace_model_call" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cnheider/xgboost
[ "e7fbc8591fa7277ee4c474b7371c48c11b34cbde" ]
[ "tests/python/test_training_continuation.py" ]
[ "import xgboost as xgb\nimport testing as tm\nimport numpy as np\nimport unittest\n\nrng = np.random.RandomState(1337)\n\n\nclass TestTrainingContinuation(unittest.TestCase):\n num_parallel_tree = 3\n\n xgb_params_01 = {\n 'silent': 1,\n 'nthread': 1,\n }\n\n xgb_params_02 = {\n 'silent': 1,\n 'nthread': 1,\n 'num_parallel_tree': num_parallel_tree\n }\n\n xgb_params_03 = {\n 'silent': 1,\n 'nthread': 1,\n 'num_class': 5,\n 'num_parallel_tree': num_parallel_tree\n }\n\n def test_training_continuation(self):\n tm._skip_if_no_sklearn()\n from sklearn.datasets import load_digits\n from sklearn.metrics import mean_squared_error\n\n digits_2class = load_digits(2)\n digits_5class = load_digits(5)\n\n X_2class = digits_2class['data']\n y_2class = digits_2class['target']\n\n X_5class = digits_5class['data']\n y_5class = digits_5class['target']\n\n dtrain_2class = xgb.DMatrix(X_2class, label=y_2class)\n dtrain_5class = xgb.DMatrix(X_5class, label=y_5class)\n\n gbdt_01 = xgb.train(self.xgb_params_01, dtrain_2class, num_boost_round=10)\n ntrees_01 = len(gbdt_01.get_dump())\n assert ntrees_01 == 10\n\n gbdt_02 = xgb.train(self.xgb_params_01, dtrain_2class, num_boost_round=0)\n gbdt_02.save_model('xgb_tc.model')\n\n gbdt_02a = xgb.train(self.xgb_params_01, dtrain_2class, num_boost_round=10, xgb_model=gbdt_02)\n gbdt_02b = xgb.train(self.xgb_params_01, dtrain_2class, num_boost_round=10, xgb_model=\"xgb_tc.model\")\n ntrees_02a = len(gbdt_02a.get_dump())\n ntrees_02b = len(gbdt_02b.get_dump())\n assert ntrees_02a == 10\n assert ntrees_02b == 10\n\n res1 = mean_squared_error(y_2class, gbdt_01.predict(dtrain_2class))\n res2 = mean_squared_error(y_2class, gbdt_02a.predict(dtrain_2class))\n assert res1 == res2\n\n res1 = mean_squared_error(y_2class, gbdt_01.predict(dtrain_2class))\n res2 = mean_squared_error(y_2class, gbdt_02b.predict(dtrain_2class))\n assert res1 == res2\n\n gbdt_03 = xgb.train(self.xgb_params_01, dtrain_2class, num_boost_round=3)\n gbdt_03.save_model('xgb_tc.model')\n\n gbdt_03a = xgb.train(self.xgb_params_01, dtrain_2class, num_boost_round=7, xgb_model=gbdt_03)\n gbdt_03b = xgb.train(self.xgb_params_01, dtrain_2class, num_boost_round=7, xgb_model=\"xgb_tc.model\")\n ntrees_03a = len(gbdt_03a.get_dump())\n ntrees_03b = len(gbdt_03b.get_dump())\n assert ntrees_03a == 10\n assert ntrees_03b == 10\n\n res1 = mean_squared_error(y_2class, gbdt_03a.predict(dtrain_2class))\n res2 = mean_squared_error(y_2class, gbdt_03b.predict(dtrain_2class))\n assert res1 == res2\n\n gbdt_04 = xgb.train(self.xgb_params_02, dtrain_2class, num_boost_round=3)\n assert gbdt_04.best_ntree_limit == (gbdt_04.best_iteration + 1) * self.num_parallel_tree\n\n res1 = mean_squared_error(y_2class, gbdt_04.predict(dtrain_2class))\n res2 = mean_squared_error(y_2class, gbdt_04.predict(dtrain_2class, ntree_limit=gbdt_04.best_ntree_limit))\n assert res1 == res2\n\n gbdt_04 = xgb.train(self.xgb_params_02, dtrain_2class, num_boost_round=7, xgb_model=gbdt_04)\n assert gbdt_04.best_ntree_limit == (gbdt_04.best_iteration + 1) * self.num_parallel_tree\n\n res1 = mean_squared_error(y_2class, gbdt_04.predict(dtrain_2class))\n res2 = mean_squared_error(y_2class, gbdt_04.predict(dtrain_2class, ntree_limit=gbdt_04.best_ntree_limit))\n assert res1 == res2\n\n gbdt_05 = xgb.train(self.xgb_params_03, dtrain_5class, num_boost_round=7)\n assert gbdt_05.best_ntree_limit == (gbdt_05.best_iteration + 1) * self.num_parallel_tree\n gbdt_05 = xgb.train(self.xgb_params_03, dtrain_5class, num_boost_round=3, xgb_model=gbdt_05)\n assert gbdt_05.best_ntree_limit == (gbdt_05.best_iteration + 1) * self.num_parallel_tree\n\n res1 = gbdt_05.predict(dtrain_5class)\n res2 = gbdt_05.predict(dtrain_5class, ntree_limit=gbdt_05.best_ntree_limit)\n np.testing.assert_almost_equal(res1, res2)\n" ]
[ [ "numpy.testing.assert_almost_equal", "numpy.random.RandomState", "sklearn.datasets.load_digits" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
feldman4/NatureProtocols
[ "a0a6775b8edfc493ac6265b1844040c1ae29c33b" ]
[ "ops/ngs.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom glob import glob\nfrom natsort import natsorted\n\n# TODO: from ops.constants import *\nfrom . import utils\n\ndef load_hist(filename, threshold):\n try:\n return (pd.read_csv(filename, sep='\\s+', header=None)\n .rename(columns={0: 'count', 1: 'seq'})\n .query('count > @threshold')\n .assign(fraction=lambda x: x['count']/x['count'].sum())\n .assign(log10_fraction=lambda x: np.log10(x['fraction']))\n .assign(file=filename)\n )\n except pd.errors.EmptyDataError:\n return None\n\n\ndef load_sgRNA_hists(histogram_files, threshold=3):\n pat = '(?P<plate>T.)_(?P<well>(?P<row>.)(?P<col>..))_S'\n cols = ['dataset', 'plate', 'well', 'row', 'col', \n 'count', 'log10_fraction', 'fraction', 'sgRNA']\n arr = []\n for dataset, search in histogram_files.items():\n files = natsorted(glob(search))\n (pd.concat([load_hist(f, threshold) for f in files])\n .rename(columns={'seq': 'sgRNA'})\n .pipe(lambda x: pd.concat([x['file'].str.extract(pat), x], \n axis=1))\n .pipe(utils.cast_cols, int_cols=['col'])\n .drop(['file'], axis=1)\n .assign(dataset=dataset)\n [cols]\n .pipe(arr.append)\n )\n\n return pd.concat(arr)\n\ndef calc_stats(df_hist, df_design, extra_cols=[]):\n sample_cols = ['dataset', 'plate', 'well', 'subpool'] + extra_cols\n sizes = df_design.groupby('subpool').size()\n fractions = (df_hist\n .groupby(sample_cols)\n ['fraction'].sum()\n .apply('{0:.1%}'.format)\n )\n\n cols = {'NGS_count': 'sgRNA_detected', \n 'NGS_missing': 'sgRNA_missing', \n 'NGS_designed': 'sgRNA_designed'}\n\n final_cols = ['NGS_fraction', 'NGS_Q10', 'NGS_Q50', 'NGS_Q90', 'NGS_Q90_10',\n 'NGS_mean', 'NGS_std', 'NGS_max', 'NGS_min', 'sgRNA_designed', \n 'sgRNA_detected', 'sgRNA_missing']\n\n return (df_hist\n .groupby(sample_cols)['count']\n .describe(percentiles=[0.1, 0.5, 0.9])\n .rename(columns={'10%': 'Q10', \n '50%': 'Q50', \n '90%': 'Q90'})\n .join(sizes.rename('designed'), on='subpool')\n .assign(Q90_10=lambda x: x.eval('Q90 / Q10'))\n .assign(missing=lambda x: x.eval('designed - count').astype(int))\n .pipe(utils.cast_cols, int_cols=['count', 'max', 'min'])\n .join(fractions)\n .rename(columns=lambda x: 'NGS_' + x)\n .rename(columns=cols)\n [final_cols]\n .sort_values(['dataset', 'plate', 'well', 'sgRNA_detected'],\n ascending=[True, True, True, False])\n ) \n\n\ndef identify_pool(df_hist, df_design):\n cols = ['subpool', 'spots_per_oligo']\n return (df_hist\n .join(df_design.set_index('sgRNA')[cols], on='sgRNA')\n .pipe(add_design_rank, df_design)\n .sort_values(['dataset', 'plate', 'well', 'sgRNA', 'design_rank'])\n .groupby(['dataset', 'plate', 'well', 'sgRNA']).head(1)\n .sort_values(['dataset', 'plate', 'well', 'fraction'], \n ascending=[True, True, True, False])\n .assign(mapped=lambda x: 1 - x['subpool'].isnull())\n .assign(mapped_fraction=lambda x: x.eval('fraction * mapped')) \n )\n\n\ndef add_design_rank(df_hist, df_design):\n \"\"\"For one file\n \"\"\"\n a = df_design.groupby('subpool').size()\n b = df_hist.groupby('subpool').size()\n ranked = (((b / a) * np.log10(a))\n .dropna().sort_values(ascending=False))\n designs = {k: v for v, k in enumerate(list(ranked.index))}\n get_design = lambda x: designs.get(x, 1e10)\n return (df_hist.assign(design_rank=lambda x: \n x['subpool'].apply(get_design)))" ]
[ [ "pandas.concat", "numpy.log10", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
norfordb/groundmotion
[ "3f714894a34d9d37e1ac236f26b4366e25a05056", "3f714894a34d9d37e1ac236f26b4366e25a05056", "3f714894a34d9d37e1ac236f26b4366e25a05056" ]
[ "gmprocess/metrics/reduction/arias.py", "gmprocess/streamcollection.py", "gmprocess/report.py" ]
[ "# Third party imports\nimport numpy as np\nfrom scipy import integrate\n\n# Local imports\nfrom gmprocess.constants import GAL_TO_PCTG\nfrom gmprocess.metrics.reduction.reduction import Reduction\nfrom gmprocess.stationstream import StationStream\nfrom gmprocess.stationtrace import StationTrace\n\n\nclass Arias(Reduction):\n \"\"\"Class for calculation of arias intensity.\"\"\"\n def __init__(self, reduction_data, bandwidth=None, percentile=None,\n period=None, smoothing=None):\n \"\"\"\n Args:\n reduction_data (obspy.core.stream.Stream or numpy.ndarray): Intensity\n measurement component.\n percentile (float): Percentile for rotation calculations. Default\n is None.\n period (float): Period for smoothing (Fourier amplitude spectra)\n calculations. Default is None.\n smoothing (string): Smoothing type. Default is None.\n bandwidth (float): Bandwidth for the smoothing operation. Default\n is None.\n \"\"\"\n super().__init__(reduction_data, bandwidth=None, percentile=None,\n period=None, smoothing=None)\n self.arias_stream = None\n self.result = self.get_arias()\n\n\n def get_arias(self):\n \"\"\"\n Performs calculation of arias intensity.\n\n Returns:\n arias_intensities: Dictionary of arias intensity for each channel.\n \"\"\"\n arias_intensities = {}\n arias_stream = StationStream([])\n for trace in self.reduction_data:\n dt = trace.stats['delta']\n # convert from cm/s/s to m/s/s\n acc = trace.data * 0.01\n\n # Calculate Arias Intensity\n integrated_acc2 = integrate.cumtrapz(acc * acc, dx=dt)\n arias_intensity = integrated_acc2 * np.pi * GAL_TO_PCTG / 2\n channel = trace.stats.channel\n trace.stats.standard.units = 'veloc'\n trace.stats.npts = len(arias_intensity)\n arias_stream.append(StationTrace(arias_intensity, trace.stats))\n arias_intensities[channel] = np.abs(np.max(arias_intensity))\n self.arias_stream = arias_stream\n return arias_intensities\n", "\"\"\"\nModule for StreamCollection class.\n\nThis class functions as a list of StationStream objects, and enforces\nvarious rules, such as all traces within a stream are from the same station.\n\"\"\"\n\nimport re\nimport copy\nimport logging\nimport fnmatch\n\nfrom obspy import UTCDateTime\nfrom obspy.core.event import Origin\nfrom obspy.geodetics import gps2dist_azimuth\nimport pandas as pd\n\nfrom gmprocess.exception import GMProcessException\nfrom gmprocess.metrics.station_summary import StationSummary\nfrom gmprocess.stationtrace import REV_PROCESS_LEVELS\nfrom gmprocess.stationstream import StationStream\nfrom gmprocess.io.read_directory import directory_to_streams\nfrom gmprocess.config import get_config\n\n\nINDENT = 2\n\nDEFAULT_IMTS = ['PGA', 'PGV', 'SA(0.3)', 'SA(1.0)', 'SA(3.0)']\nDEFAULT_IMCS = ['GREATER_OF_TWO_HORIZONTALS', 'CHANNELS']\n\nNETWORKS_USING_LOCATION = ['RE']\n\n\nclass StreamCollection(object):\n \"\"\"\n A collection/list of StationStream objects.\n\n This is a list of StationStream objectss, where the constituent\n StationTraces are grouped such that:\n\n - All traces are from the same network/station.\n - Sample rates must match.\n - Units much match.\n\n TODO:\n - Check for and handle misaligned start times and end times.\n - Check units\n\n \"\"\"\n\n def __init__(self, streams=None, drop_non_free=True,\n handle_duplicates=True, max_dist_tolerance=None,\n process_level_preference=None, format_preference=None):\n \"\"\"\n Args:\n streams (list):\n List of StationStream objects.\n drop_non_free (bool):\n If True, drop non-free-field Streams from the collection.\n hande_duplicates (bool):\n If True, remove duplicate data from the collection.\n max_dist_tolerance (float):\n Maximum distance tolerance for determining whether two streams\n are at the same location (in meters).\n process_level_preference (list):\n A list containing 'V0', 'V1', 'V2', with the order determining\n which process level is the most preferred (most preferred goes\n first in the list).\n format_preference (list):\n A list continaing strings of the file source formats (found\n in gmprocess.io). Does not need to list all of the formats.\n Example: ['cosmos', 'dmg'] indicates that cosmos files are\n preferred over dmg files.\n \"\"\"\n\n # Some initial checks of input streams\n if not isinstance(streams, list):\n raise TypeError(\n 'streams must be a list of StationStream objects.')\n newstreams = []\n for s in streams:\n if not isinstance(s, StationStream):\n raise TypeError(\n 'streams must be a list of StationStream objects.')\n\n logging.debug(s.get_id())\n\n if drop_non_free:\n if s[0].free_field:\n newstreams.append(s)\n else:\n newstreams.append(s)\n\n self.streams = newstreams\n if handle_duplicates:\n if len(self.streams):\n self.__handle_duplicates(\n max_dist_tolerance,\n process_level_preference,\n format_preference)\n self.__group_by_net_sta_inst()\n self.validate()\n\n @property\n def n_passed(self):\n n_passed = 0\n for stream in self:\n if stream.passed:\n n_passed += 1\n return n_passed\n\n @property\n def n_failed(self):\n n = len(self.streams)\n return n - self.n_passed\n\n def validate(self):\n \"\"\"Some validation checks across streams.\n\n \"\"\"\n # If tag exists, it should be consistent across StationStreams\n all_labels = []\n for stream in self:\n if hasattr(stream, 'tag'):\n eventid, station, label = stream.tag.split('_')\n all_labels.append(label)\n else:\n all_labels.append(\"\")\n if len(set(all_labels)) > 1:\n raise GMProcessException(\n 'Only one label allowed within a StreamCollection.')\n\n def select_colocated(self, preference=[\"HN?\", \"BN?\", \"HH?\", \"BH?\"]):\n \"\"\"\n Detect colocated instruments and select the preferred instrument type.\n\n This uses the a list of the first two channel characters, given as\n 'preference' in the 'colocated' section of the config. The algorithm\n is:\n\n 1) Generate list of StationStreams that have the same station code.\n 2) For each colocated group, loop over the list of preferred\n instrument codes, select the first one that is encountered by\n labeling all others a failed.\n\n * If the preferred instrument type matches more than one\n StationStream, pick the first (hopefully this never happens).\n * If no StationStream matches any of the codes in the preferred\n list then label all as failed.\n\n Args:\n preference (list):\n List of strings indicating preferred instrument types.\n \"\"\"\n\n # Create a list of streams with matching id (combo of net and station).\n all_matches = []\n match_list = []\n for idx1, stream1 in enumerate(self):\n if idx1 in all_matches:\n continue\n matches = [idx1]\n net_sta = stream1.get_net_sta()\n for idx2, stream2 in enumerate(self):\n if idx1 != idx2 and idx1 not in all_matches:\n if (net_sta == stream2.get_net_sta()):\n matches.append(idx2)\n if len(matches) > 1:\n match_list.append(matches)\n all_matches.extend(matches)\n else:\n if matches[0] not in all_matches:\n match_list.append(matches)\n all_matches.extend(matches)\n\n for group in match_list:\n # Are there colocated instruments for this group?\n if len(group) > 1:\n # If so, loop over list of preferred instruments\n group_insts = [self[g].get_inst() for g in group]\n\n # Loop over preferred instruments\n no_match = True\n for pref in preference:\n # Is this instrument available in the group?\n r = re.compile(pref[0:2])\n inst_match = list(filter(r.match, group_insts))\n if len(inst_match):\n no_match = False\n # Select index; if more than one, we just take the\n # first one because we don't know any better\n keep = inst_match[0]\n\n # Label all non-selected streams in the group as failed\n to_fail = group_insts\n to_fail.remove(keep)\n for tf in to_fail:\n for st in self.select(instrument=tf):\n for tr in st:\n tr.fail(\n 'Colocated with %s instrument.' % keep\n )\n\n break\n if no_match:\n # Fail all Streams in group\n for g in group:\n for tr in self[g]:\n tr.fail(\n 'No instruments match entries in the '\n 'colocated instrument preference list for '\n 'this station.'\n )\n\n @classmethod\n def from_directory(cls, directory):\n \"\"\"\n Create a StreamCollection instance from a directory of data.\n\n Args:\n directory (str):\n Directory of ground motion files (streams) to be read.\n\n Returns:\n StreamCollection instance.\n \"\"\"\n streams, missed_files, errors = directory_to_streams(directory)\n\n # Might eventually want to include some of the missed files and\n # error info but don't have a sensible place to put it currently.\n return cls(streams)\n\n @classmethod\n def from_traces(cls, traces):\n \"\"\"\n Create a StreamCollection instance from a list of traces.\n\n Args:\n traces (list):\n List of StationTrace objects.\n\n Returns:\n StreamCollection instance.\n \"\"\"\n\n streams = [StationStream([tr]) for tr in traces]\n return cls(streams)\n\n def to_dataframe(self, origin, imcs=None, imts=None):\n \"\"\"Get a summary dataframe of streams.\n\n Note: The PGM columns underneath each channel will be variable\n depending on the units of the Stream being passed in (velocity\n sensors can only generate PGV) and on the imtlist passed in by\n user. Spectral acceleration columns will be formatted as SA(0.3)\n for 0.3 second spectral acceleration, for example.\n\n Args:\n directory (str):\n Directory of ground motion files (streams).\n origin_dict (obspy):\n Dictionary with the following keys:\n - id\n - magnitude\n - time (UTCDateTime object)\n - lon\n - lat\n - depth\n imcs (list):\n Strings designating desired components to create in table.\n imts (list):\n Strings designating desired PGMs to create in table.\n\n Returns:\n DataFrame: Pandas dataframe containing columns:\n - STATION Station code.\n - NAME Text description of station.\n - LOCATION Two character location code.\n - SOURCE Long form string containing source network.\n - NETWORK Short network code.\n - LAT Station latitude\n - LON Station longitude\n - DISTANCE Epicentral distance (km) (if epicentral\n lat/lon provided)\n - HN1 East-west channel (or H1) (multi-index with pgm columns):\n - PGA Peak ground acceleration (%g).\n - PGV Peak ground velocity (cm/s).\n - SA(0.3) Pseudo-spectral acceleration at 0.3 seconds (%g).\n - SA(1.0) Pseudo-spectral acceleration at 1.0 seconds (%g).\n - SA(3.0) Pseudo-spectral acceleration at 3.0 seconds (%g).\n - HN2 North-south channel (or H2) (multi-index with pgm\n columns):\n - PGA Peak ground acceleration (%g).\n - PGV Peak ground velocity (cm/s).\n - SA(0.3) Pseudo-spectral acceleration at 0.3 seconds (%g).\n - SA(1.0) Pseudo-spectral acceleration at 1.0 seconds (%g).\n - SA(3.0) Pseudo-spectral acceleration at 3.0 seconds (%g).\n - HNZ Vertical channel (or HZ) (multi-index with pgm columns):\n - PGA Peak ground acceleration (%g).\n - PGV Peak ground velocity (cm/s).\n - SA(0.3) Pseudo-spectral acceleration at 0.3 seconds (%g).\n - SA(1.0) Pseudo-spectral acceleration at 1.0 seconds (%g).\n - SA(3.0) Pseudo-spectral acceleration at 3.0 seconds (%g).\n - GREATER_OF_TWO_HORIZONTALS (multi-index with pgm columns):\n - PGA Peak ground acceleration (%g).\n - PGV Peak ground velocity (cm/s).\n - SA(0.3) Pseudo-spectral acceleration at 0.3 seconds (%g).\n - SA(1.0) Pseudo-spectral acceleration at 1.0 seconds (%g).\n - SA(3.0) Pseudo-spectral acceleration at 3.0 seconds (%g).\n \"\"\"\n streams = self.streams\n # dept for an origin object should be stored in meters\n origin = Origin(resource_id=origin['id'], latitude=origin['lat'],\n longitude=origin['lon'], time=origin['time'],\n depth=origin['depth'] * 1000)\n\n if imcs is None:\n station_summary_imcs = DEFAULT_IMCS\n else:\n station_summary_imcs = imcs\n if imts is None:\n station_summary_imts = DEFAULT_IMTS\n else:\n station_summary_imts = imts\n\n if imcs is None:\n station_summary_imcs = DEFAULT_IMCS\n else:\n station_summary_imcs = imcs\n if imts is None:\n station_summary_imts = DEFAULT_IMTS\n else:\n station_summary_imts = imts\n\n subdfs = []\n for stream in streams:\n if not stream.passed:\n continue\n if len(stream) < 3:\n continue\n stream_summary = StationSummary.from_stream(\n stream, station_summary_imcs, station_summary_imts, origin)\n summary = stream_summary.summary\n subdfs += [summary]\n dataframe = pd.concat(subdfs, axis=0).reset_index(drop=True)\n\n return dataframe\n\n def __str__(self):\n \"\"\"\n String summary of the StreamCollection.\n \"\"\"\n summary = ''\n n = len(self.streams)\n summary += '%s StationStreams(s) in StreamCollection:\\n' % n\n summary += ' %s StationStreams(s) passed checks.\\n' % self.n_passed\n summary += ' %s StationStreams(s) failed checks.\\n' % self.n_failed\n return summary\n\n def describe(self):\n \"\"\"\n More verbose description of StreamCollection.\n \"\"\"\n summary = ''\n summary += str(len(self.streams)) + \\\n ' StationStreams(s) in StreamCollection:\\n'\n for stream in self:\n summary += stream.__str__(indent=INDENT) + '\\n'\n print(summary)\n\n def __len__(self):\n \"\"\"\n Length of StreamCollection is the number of constituent StationStreams.\n \"\"\"\n return len(self.streams)\n\n def __nonzero__(self):\n \"\"\"\n Nonzero if there are no StationStreams.\n \"\"\"\n return bool(len(self.traces))\n\n def __add__(self, other):\n \"\"\"\n Add two streams together means appending to list of streams.\n \"\"\"\n if not isinstance(other, StreamCollection):\n raise TypeError\n streams = self.streams + other.streams\n return self.__class__(streams)\n\n def __iter__(self):\n \"\"\"\n Iterator for StreamCollection iterates over constituent StationStreams.\n \"\"\"\n return list(self.streams).__iter__()\n\n def __setitem__(self, index, stream):\n \"\"\"\n __setitem__ method.\n \"\"\"\n self.streams.__setitem__(index, stream)\n\n def __getitem__(self, index):\n \"\"\"\n __getitem__ method.\n \"\"\"\n if isinstance(index, slice):\n return self.__class__(stream=self.streams.__getitem__(index))\n else:\n return self.streams.__getitem__(index)\n\n def __delitem__(self, index):\n \"\"\"\n __delitem__ method.\n \"\"\"\n return self.streams.__delitem__(index)\n\n def __getslice__(self, i, j, k=1):\n \"\"\"\n Getslice method.\n \"\"\"\n return self.__class__(streams=self.streams[max(0, i):max(0, j):k])\n\n def append(self, stream):\n \"\"\"\n Append a single StationStream object.\n\n Args:\n stream:\n A StationStream object.\n \"\"\"\n if isinstance(stream, StationStream):\n streams = self.streams + [stream]\n return self.__class__(streams)\n else:\n raise TypeError(\n 'Append only uspports adding a single StationStream.')\n\n def pop(self, index=(-1)):\n \"\"\"\n Remove and return the StationStream object specified by index from\n the StreamCollection.\n \"\"\"\n return self.streams.pop(index)\n\n def copy(self):\n \"\"\"\n Copy method.\n \"\"\"\n return copy.deepcopy(self)\n\n def select(self, network=None, station=None, instrument=None):\n \"\"\"\n Return a new StreamCollection with only those StationStreams\n that match the selection criteria.\n\n Based on obspy's `select` method for traces.\n\n Args:\n network (str):\n Network code.\n station (str):\n Station code.\n instrument (str):\n Instrument code; i.e., the first two characters of the\n channel.\n \"\"\"\n sel = []\n for st in self:\n inst = st.get_inst()\n net_sta = st.get_net_sta()\n net = net_sta.split('.')[0]\n sta = net_sta.split('.')[1]\n if network is not None:\n if not fnmatch.fnmatch(net.upper(), network.upper()):\n continue\n if station is not None:\n if not fnmatch.fnmatch(sta.upper(), station.upper()):\n continue\n if instrument is not None:\n if not fnmatch.fnmatch(inst.upper(), instrument.upper()):\n continue\n sel.append(st)\n return self.__class__(sel)\n\n def __group_by_net_sta_inst(self):\n\n trace_list = []\n stream_params = gather_stream_parameters(self.streams)\n for st in self.streams:\n for tr in st:\n trace_list.append(tr)\n\n # Create a list of traces with matching net, sta.\n all_matches = []\n match_list = []\n for idx1, trace1 in enumerate(trace_list):\n if idx1 in all_matches:\n continue\n matches = [idx1]\n network = trace1.stats['network']\n station = trace1.stats['station']\n free_field = trace1.free_field\n # For instrument, use first two characters of the channel\n inst = trace1.stats['channel'][0:2]\n for idx2, trace2 in enumerate(trace_list):\n if idx1 != idx2 and idx1 not in all_matches:\n if (\n network == trace2.stats['network']\n and station == trace2.stats['station']\n and inst == trace2.stats['channel'][0:2]\n and free_field == trace2.free_field\n ):\n matches.append(idx2)\n if len(matches) > 1:\n match_list.append(matches)\n all_matches.extend(matches)\n else:\n if matches[0] not in all_matches:\n match_list.append(matches)\n all_matches.extend(matches)\n\n grouped_streams = []\n for groups in match_list:\n grouped_trace_list = []\n for i in groups:\n grouped_trace_list.append(\n trace_list[i]\n )\n # some networks (e.g., Bureau of Reclamation, at the time of this\n # writing) use the location field to indicate different sensors at\n # (roughly) the same location. If we know this (as in the case of\n # BOR), we can use this to trim the stations into 3-channel\n # streams.\n streams = split_station(grouped_trace_list)\n streams = insert_stream_parameters(streams, stream_params)\n\n for st in streams:\n grouped_streams.append(st)\n\n self.streams = grouped_streams\n\n def __handle_duplicates(self, max_dist_tolerance,\n process_level_preference, format_preference):\n \"\"\"\n Removes duplicate data from the StreamCollection, based on the\n process level and format preferences.\n\n Args:\n max_dist_tolerance (float):\n Maximum distance tolerance for determining whether two streams\n are at the same location (in meters).\n process_level_preference (list):\n A list containing 'V0', 'V1', 'V2', with the order determining\n which process level is the most preferred (most preferred goes\n first in the list).\n format_preference (list):\n A list continaing strings of the file source formats (found\n in gmprocess.io). Does not need to list all of the formats.\n Example: ['cosmos', 'dmg'] indicates that cosmos files are\n preferred over dmg files.\n \"\"\"\n\n # If arguments are None, check the config\n # If not in the config, use the default values at top of the file\n if max_dist_tolerance is None:\n max_dist_tolerance = get_config('duplicate')['max_dist_tolerance']\n\n if process_level_preference is None:\n process_level_preference = \\\n get_config('duplicate')['process_level_preference']\n\n if format_preference is None:\n format_preference = get_config('duplicate')['format_preference']\n\n stream_params = gather_stream_parameters(self.streams)\n\n traces = []\n for st in self.streams:\n for tr in st:\n traces.append(tr)\n preferred_traces = []\n\n for tr_to_add in traces:\n is_duplicate = False\n for tr_pref in preferred_traces:\n if are_duplicates(tr_to_add, tr_pref, max_dist_tolerance):\n is_duplicate = True\n break\n\n if is_duplicate:\n if choose_preferred(\n tr_to_add, tr_pref,\n process_level_preference, format_preference) == tr_to_add:\n preferred_traces.remove(tr_pref)\n logging.info('Trace %s (%s) is a duplicate and '\n 'has been removed from the StreamCollection.'\n % (tr_pref.id,\n tr_pref.stats.standard.source_file))\n preferred_traces.append(tr_to_add)\n else:\n logging.info('Trace %s (%s) is a duplicate and '\n 'has been removed from the StreamCollection.'\n % (tr_to_add.id,\n tr_to_add.stats.standard.source_file))\n\n else:\n preferred_traces.append(tr_to_add)\n\n streams = [StationStream([tr]) for tr in preferred_traces]\n streams = insert_stream_parameters(streams, stream_params)\n self.streams = streams\n\n\ndef gather_stream_parameters(streams):\n \"\"\"\n Helper function for gathering the stream parameters into a datastructure\n and sticking the stream tag into the trace stats dictionaries.\n\n Args:\n streams (list): list of StationStream objects.\n\n Returns:\n dict. Dictionary of the stream parameters.\n \"\"\"\n stream_params = {}\n\n # Need to make sure that tag will be preserved; tag only really should\n # be created once a StreamCollection has been written to an ASDF file\n # and then read back in.\n for stream in streams:\n # we have stream-based metadata that we need to preserve\n if len(stream.parameters):\n stream_params[stream.get_id()] = stream.parameters\n\n # Tag is a StationStream attribute; If it does not exist, make it\n # an empty string\n if hasattr(stream, 'tag'):\n tag = stream.tag\n else:\n tag = \"\"\n # Since we have to deconstruct the stream groupings each time, we\n # need to stick the tag into the trace stats dictionary temporarily\n for trace in stream:\n tr = trace\n tr.stats.tag = tag\n\n return stream_params\n\n\ndef insert_stream_parameters(streams, stream_params):\n \"\"\"\n Helper function for inserting the stream parameters back to the streams.\n\n Args:\n streams (list): list of StationStream objects.\n stream_params (dict): Dictionary of stream parameters.\n\n Returns:\n list of StationStream objects with stream parameters.\n \"\"\"\n for st in streams:\n if len(st):\n sid = st.get_id()\n # put stream parameters back in\n if sid in stream_params:\n st.parameters = stream_params[sid].copy()\n\n # Put tag back as a stream attribute, assuming that the\n # tag has stayed the same through the grouping process\n if st[0].stats.tag:\n st.tag = st[0].stats.tag\n\n return streams\n\n\ndef split_station(grouped_trace_list):\n if grouped_trace_list[0].stats.network in NETWORKS_USING_LOCATION:\n streams_dict = {}\n for trace in grouped_trace_list:\n if trace.stats.location in streams_dict:\n streams_dict[trace.stats.location] += trace\n else:\n streams_dict[trace.stats.location] = \\\n StationStream(traces=[trace])\n streams = list(streams_dict.values())\n else:\n streams = [StationStream(traces=grouped_trace_list)]\n return streams\n\n\ndef are_duplicates(tr1, tr2, max_dist_tolerance):\n \"\"\"\n Determines whether two StationTraces are duplicates by checking the\n station, channel codes, and the distance between them.\n\n Args:\n tr1 (StationTrace):\n 1st trace.\n tr2 (StationTrace):\n 2nd trace.\n max_dist_tolerance (float):\n Maximum distance tolerance for determining whether two streams\n are at the same location (in meters).\n\n Returns:\n bool. True if traces are duplicates, False otherwise.\n \"\"\"\n\n # First, check if the ids match (net.sta.loc.cha)\n if tr1.id == tr2.id:\n return True\n # If not matching IDs, check the station, instrument code, and distance\n else:\n distance = gps2dist_azimuth(\n tr1.stats.coordinates.latitude, tr1.stats.coordinates.longitude,\n tr2.stats.coordinates.latitude, tr2.stats.coordinates.longitude)[0]\n if (tr1.stats.station == tr2.stats.station and\n tr1.stats.location == tr2.stats.location and\n tr1.stats.channel == tr2.stats.channel and\n distance < max_dist_tolerance):\n return True\n else:\n return False\n\n\ndef choose_preferred(tr1, tr2, process_level_preference, format_preference):\n \"\"\"\n Determines which trace is preferred. Returns the preferred the trace.\n\n Args:\n tr1 (StationTrace):\n 1st trace.\n tr2 (StationTrace):\n 2nd trace.\n process_level_preference (list):\n A list containing 'V0', 'V1', 'V2', with the order determining\n which process level is the most preferred (most preferred goes\n first in the list).\n format_preference (list):\n A list continaing strings of the file source formats (found\n in gmprocess.io). Does not need to list all of the formats.\n Example: ['cosmos', 'dmg'] indicates that cosmos files are\n preferred over dmg files.\n\n Returns:\n The preferred trace (StationTrace).\n \"\"\"\n\n tr1_pref = process_level_preference.index(\n REV_PROCESS_LEVELS[tr1.stats.standard.process_level])\n tr2_pref = process_level_preference.index(\n REV_PROCESS_LEVELS[tr2.stats.standard.process_level])\n\n if tr1_pref < tr2_pref:\n return tr1\n elif tr1_pref > tr2_pref:\n return tr2\n else:\n if (tr1.stats.standard.source_format in format_preference and\n tr2.stats.standard.source_format in format_preference):\n # Determine preferred format\n tr1_form_pref = format_preference.index(\n tr1.stats.standard.source_format)\n tr2_form_pref = format_preference.index(\n tr2.stats.standard.source_format)\n if tr1_form_pref < tr2_form_pref:\n return tr1\n elif tr1_form_pref > tr2_form_pref:\n return tr2\n else:\n if (tr1.stats.starttime == UTCDateTime(0) and\n tr2.stats.starttime != UTCDateTime(0)):\n return tr2\n elif (tr1.stats.starttime != UTCDateTime(0) and\n tr2.stats.starttime == UTCDateTime(0)):\n return tr1\n else:\n if tr1.stats.npts > tr2.stats.npts:\n return tr1\n elif tr2.stats.npts > tr1.stats.npts:\n return tr2\n else:\n if tr2.stats.sampling_rate > tr1.stats.sampling_rate:\n return tr2\n else:\n return tr1\n", "# stdlib imports\nimport os\nfrom shutil import which\nimport glob\n\n# third party imports\nimport numpy as np\nimport pandas as pd\nfrom impactutils.io.cmd import get_command_output\n\n# local imports\nimport gmprocess\nfrom gmprocess.config import get_config\n\nPREAMBLE = \"\"\"\n\\\\documentclass[9pt]{article}\n\\\\usepackage{helvet}\n\\\\renewcommand{\\\\familydefault}{\\\\sfdefault}\n\n\\\\usepackage{graphicx}\n\\\\usepackage{tikz}\n\n% grffile allows for multiple dots in image file name\n\\\\usepackage{grffile}\n\n% Turn off default page numbers\n% \\\\usepackage{nopageno}\n\n% Needed for table rules\n\\\\usepackage{booktabs}\n\n\\\\usepackage[english]{babel}\n\n\\\\usepackage[letterpaper, portrait]{geometry}\n\n\\\\geometry{\n left=0.75in,\n top=0.0in,\n total={7in,10.5in},\n includeheadfoot\n}\n\n\\setlength\\parindent{0pt}\n\n% Use custom headers\n\\\\usepackage{fancyhdr}\n\\\\pagestyle{fancy}\n\\\\fancyhf{}\n\\\\renewcommand{\\headrulewidth}{0pt}\n\\\\cfoot{\\\\thepage}\n%%\\\\lfoot{\\\\today}\n\n\\\\tikzstyle{box} = [\n draw=blue, fill=blue!20, thick,\n rectangle, rounded corners]\n\n\\\\begin{document}\n\"\"\"\n\nPOSTAMBLE = \"\"\"\n\\\\end{document}\n\"\"\"\n\nSTREAMBLOCK = \"\"\"\n\\\\begin{tikzpicture}[remember picture,overlay]\n \\\\draw[box] (0, 0.5) rectangle (9, 1.0) node[pos=.5]\n {\\\\normalsize [EVENT]};\n \\\\draw[box] (10, 0.5) rectangle (17, 1.0) node[pos=.5]\n {\\\\normalsize [STATION]};\n\\\\end{tikzpicture}\n\n\\\\includegraphics[height=5.75in]\n {[PLOTPATH]}\n\n\n\"\"\"\n\nTITLEBLOCK = \"\"\"\n\\\\begin{center}\n\n\\\\vfill\n\n\\\\large Summary Report\n\n\\\\vspace{1cm}\n\ngmprocess\n\n\\\\vspace{1cm}\n\nCode version: [VERSION]\n\n\\\\vspace{1cm}\n\n\\\\today\n\n\\\\vspace{1cm}\n\n\\\\includegraphics[width=0.9\\\\textwidth]\n {[MAPPATH]}\n\\\\end{center}\n\n\\\\vfill\n\n\\\\newpage\\n\\n\n\n\"\"\"\n\n\ndef build_report_latex(sc, directory, origin, config=None):\n \"\"\"\n Build latex summary report.\n\n Args:\n st (StreamCollection):\n StreamCollection of data.\n directory (str):\n Directory for saving report.\n origin (ScalarEvent):\n ScalarEvent object.\n config (dict):\n Config dictionary.\n Returns:\n tuple:\n - Name of pdf or latex report file created.\n - boolean indicating whether PDF creation was successful.\n\n \"\"\"\n # Need to get config to know where the plots are located\n if config is None:\n config = get_config()\n\n # Check if directory exists, and if not, create it.\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # Initialize report string with PREAMBLE\n report = PREAMBLE\n timestr = origin.time.strftime('%m/%d/%Y %H:%M:%S')\n\n # Does the map exist?\n map_file = os.path.join(directory, 'stations_map.png')\n if os.path.isfile(map_file):\n TB = TITLEBLOCK.replace(\n '[MAPPATH]', 'stations_map.png'\n )\n TB = TB.replace(\n '[VERSION]', gmprocess.__version__\n )\n report += TB\n\n # Loop over each StationStream and append it's page to the report\n # do not include more than three.\n for st in sc:\n plot_path = os.path.join(\n 'plots', origin.id + '_' + st.get_id() + '.png')\n SB = STREAMBLOCK.replace('[PLOTPATH]', plot_path)\n SB = SB.replace(\n '[EVENT]', 'M %s - %s - %s'\n % (origin.magnitude, origin.id, timestr)\n )\n SB = SB.replace(\n '[STATION]', st.get_id()\n )\n report += SB\n\n prov_latex = get_prov_latex(st)\n\n report += prov_latex\n report += '\\n'\n if st[0].hasParameter('signal_split'):\n pick_method = st[0].getParameter('signal_split')['picker_type']\n report += 'Pick Method: %s\\n\\n' % str_for_latex(pick_method)\n if not st.passed:\n for tr in st:\n if tr.hasParameter('failure'):\n report += ('Failure reason: %s\\n\\n'\n % tr.getParameter('failure')['reason'])\n break\n report += '\\\\newpage\\n\\n'\n\n # Finish the latex file\n report += POSTAMBLE\n\n res = False\n # Do not save report if running tests\n if 'CALLED_FROM_PYTEST' not in os.environ:\n\n # Set working directory to be the event subdirectory\n current_directory = os.getcwd()\n os.chdir(directory)\n\n # File name relative to current location\n file_name = ('report_%s.tex' % (origin.id))\n\n # File name for printing out later relative base directory\n latex_file = os.path.join(directory, file_name)\n with open(file_name, 'w') as f:\n f.write(report)\n\n # Can we find pdflatex?\n try:\n pdflatex_bin = which('pdflatex')\n pdflatex_options = '-interaction=nonstopmode -halt-on-error'\n cmd = '%s %s %s' % (pdflatex_bin, pdflatex_options, file_name)\n res, stdout, stderr = get_command_output(cmd)\n report_file = latex_file\n if res:\n base, ext = os.path.splitext(file_name)\n pdf_file = base + '.pdf'\n if os.path.isfile(pdf_file):\n report_file = pdf_file\n auxfiles = glob.glob(base + '*')\n auxfiles.remove(pdf_file)\n for auxfile in auxfiles:\n os.remove(auxfile)\n else:\n res = False\n else:\n print('pdflatex output:')\n print(stdout.decode())\n print(stderr.decode())\n except Exception:\n report_file = None\n pass\n finally:\n os.chdir(current_directory)\n\n # make report file an absolute path\n report_file = os.path.join(directory, report_file)\n\n return (report_file, res)\n\n\ndef get_prov_latex(st):\n \"\"\"\n Construct a latex representation of a trace's provenance.\n\n Args:\n st (StationStream):\n StationStream of data.\n\n Returns:\n str: Latex tabular representation of provenance.\n \"\"\"\n # start by sorting the channel names\n channels = [tr.stats.channel for tr in st]\n channelidx = np.argsort(channels).tolist()\n columns = ['Process Step',\n 'Process Attribute']\n\n trace1 = st[channelidx.index(0)]\n df = pd.DataFrame(columns=columns)\n df = trace1.getProvDataFrame()\n mapper = {'Process Value': '%s Value' % trace1.stats.channel}\n df = df.rename(mapper=mapper, axis='columns')\n for i in channelidx[1:]:\n trace2 = st[i]\n trace2_frame = trace2.getProvDataFrame()\n df['%s Value' % trace2.stats.channel] = trace2_frame['Process Value']\n\n lastrow = None\n newdf = pd.DataFrame(columns=df.columns)\n for idx, row in df.iterrows():\n if lastrow is None:\n lastrow = row\n newdf = newdf.append(row, ignore_index=True)\n continue\n if row['Index'] == lastrow['Index']:\n row['Process Step'] = ''\n newdf = newdf.append(row, ignore_index=True)\n lastrow = row\n\n newdf = newdf.drop(labels='Index', axis='columns')\n prov_string = newdf.to_latex(index=False)\n prov_string = '\\\\tiny\\n' + prov_string\n return prov_string\n\n\ndef str_for_latex(string):\n \"\"\"\n Helper method to convert some strings that are problematic for latex.\n \"\"\"\n string = string.replace('_', '\\\\_')\n string = string.replace('$', '\\\\$')\n string = string.replace('&', '\\\\&')\n string = string.replace('%', '\\\\%')\n string = string.replace('#', '\\\\#')\n string = string.replace('}', '\\\\}')\n string = string.replace('{', '\\\\{')\n string = string.replace('~', '\\\\textasciitilde ')\n string = string.replace('^', '\\\\textasciicircum ')\n return string\n" ]
[ [ "numpy.max", "scipy.integrate.cumtrapz" ], [ "pandas.concat" ], [ "numpy.argsort", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]